hexsha stringlengths 40 40 | size int64 2 1.02M | ext stringclasses 10
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 4 245 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 245 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 245 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 2 1.02M | avg_line_length float64 1 417k | max_line_length int64 1 987k | alphanum_fraction float64 0 1 | content_no_comment stringlengths 0 1.01M | is_comment_constant_removed bool 1
class | is_sharp_comment_removed bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f733b52982b45837ee3bed7113eab733edeaa179 | 59 | py | Python | geoana/em/__init__.py | simpeg/geoana | 417e23a0a689da19112e5fd361f823a2abd8785a | [
"MIT"
] | 11 | 2017-11-14T12:29:42.000Z | 2022-01-17T18:36:28.000Z | geoana/em/__init__.py | simpeg/geoana | 417e23a0a689da19112e5fd361f823a2abd8785a | [
"MIT"
] | 28 | 2016-09-02T02:44:32.000Z | 2022-03-31T22:41:33.000Z | geoana/em/__init__.py | simpeg/geoana | 417e23a0a689da19112e5fd361f823a2abd8785a | [
"MIT"
] | 4 | 2017-03-07T22:07:15.000Z | 2021-05-14T20:08:33.000Z | from . import static
from . import fdem
from . import tdem
| 14.75 | 20 | 0.745763 | from . import static
from . import fdem
from . import tdem
| true | true |
f733b5d2b190a4fb5a237b202ec640a7b9402391 | 68,794 | py | Python | test/test_utils.py | sokolx1/youtube-dlc | 2e5e4f74bfbf5628974de345d4c12f06ca3494c7 | [
"Unlicense"
] | null | null | null | test/test_utils.py | sokolx1/youtube-dlc | 2e5e4f74bfbf5628974de345d4c12f06ca3494c7 | [
"Unlicense"
] | null | null | null | test/test_utils.py | sokolx1/youtube-dlc | 2e5e4f74bfbf5628974de345d4c12f06ca3494c7 | [
"Unlicense"
] | 1 | 2021-10-30T16:42:36.000Z | 2021-10-30T16:42:36.000Z | #!/usr/bin/env python
# coding: utf-8
from __future__ import unicode_literals
# Allow direct execution
import os
import sys
import unittest
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
# Various small unit tests
import io
import json
import xml.etree.ElementTree
from youtube_dlc.utils import (
age_restricted,
args_to_str,
encode_base_n,
caesar,
clean_html,
clean_podcast_url,
date_from_str,
DateRange,
detect_exe_version,
determine_ext,
dict_get,
encode_compat_str,
encodeFilename,
escape_rfc3986,
escape_url,
extract_attributes,
ExtractorError,
find_xpath_attr,
fix_xml_ampersands,
float_or_none,
get_element_by_class,
get_element_by_attribute,
get_elements_by_class,
get_elements_by_attribute,
InAdvancePagedList,
int_or_none,
intlist_to_bytes,
is_html,
js_to_json,
limit_length,
merge_dicts,
mimetype2ext,
month_by_name,
multipart_encode,
ohdave_rsa_encrypt,
OnDemandPagedList,
orderedSet,
parse_age_limit,
parse_duration,
parse_filesize,
parse_count,
parse_iso8601,
parse_resolution,
parse_bitrate,
pkcs1pad,
read_batch_urls,
sanitize_filename,
sanitize_path,
sanitize_url,
expand_path,
prepend_extension,
replace_extension,
remove_start,
remove_end,
remove_quotes,
rot47,
shell_quote,
smuggle_url,
str_to_int,
strip_jsonp,
strip_or_none,
subtitles_filename,
timeconvert,
unescapeHTML,
unified_strdate,
unified_timestamp,
unsmuggle_url,
uppercase_escape,
lowercase_escape,
url_basename,
url_or_none,
base_url,
urljoin,
urlencode_postdata,
urshift,
update_url_query,
version_tuple,
xpath_with_ns,
xpath_element,
xpath_text,
xpath_attr,
render_table,
match_str,
parse_dfxp_time_expr,
dfxp2srt,
cli_option,
cli_valueless_option,
cli_bool_option,
parse_codecs,
)
from youtube_dlc.compat import (
compat_chr,
compat_etree_fromstring,
compat_getenv,
compat_os_name,
compat_setenv,
compat_urlparse,
compat_parse_qs,
)
class TestUtil(unittest.TestCase):
def test_timeconvert(self):
self.assertTrue(timeconvert('') is None)
self.assertTrue(timeconvert('bougrg') is None)
def test_sanitize_filename(self):
self.assertEqual(sanitize_filename('abc'), 'abc')
self.assertEqual(sanitize_filename('abc_d-e'), 'abc_d-e')
self.assertEqual(sanitize_filename('123'), '123')
self.assertEqual('abc_de', sanitize_filename('abc/de'))
self.assertFalse('/' in sanitize_filename('abc/de///'))
self.assertEqual('abc_de', sanitize_filename('abc/<>\\*|de'))
self.assertEqual('xxx', sanitize_filename('xxx/<>\\*|'))
self.assertEqual('yes no', sanitize_filename('yes? no'))
self.assertEqual('this - that', sanitize_filename('this: that'))
self.assertEqual(sanitize_filename('AT&T'), 'AT&T')
aumlaut = 'ä'
self.assertEqual(sanitize_filename(aumlaut), aumlaut)
tests = '\u043a\u0438\u0440\u0438\u043b\u043b\u0438\u0446\u0430'
self.assertEqual(sanitize_filename(tests), tests)
self.assertEqual(
sanitize_filename('New World record at 0:12:34'),
'New World record at 0_12_34')
self.assertEqual(sanitize_filename('--gasdgf'), '_-gasdgf')
self.assertEqual(sanitize_filename('--gasdgf', is_id=True), '--gasdgf')
self.assertEqual(sanitize_filename('.gasdgf'), 'gasdgf')
self.assertEqual(sanitize_filename('.gasdgf', is_id=True), '.gasdgf')
forbidden = '"\0\\/'
for fc in forbidden:
for fbc in forbidden:
self.assertTrue(fbc not in sanitize_filename(fc))
def test_sanitize_filename_restricted(self):
self.assertEqual(sanitize_filename('abc', restricted=True), 'abc')
self.assertEqual(sanitize_filename('abc_d-e', restricted=True), 'abc_d-e')
self.assertEqual(sanitize_filename('123', restricted=True), '123')
self.assertEqual('abc_de', sanitize_filename('abc/de', restricted=True))
self.assertFalse('/' in sanitize_filename('abc/de///', restricted=True))
self.assertEqual('abc_de', sanitize_filename('abc/<>\\*|de', restricted=True))
self.assertEqual('xxx', sanitize_filename('xxx/<>\\*|', restricted=True))
self.assertEqual('yes_no', sanitize_filename('yes? no', restricted=True))
self.assertEqual('this_-_that', sanitize_filename('this: that', restricted=True))
tests = 'aäb\u4e2d\u56fd\u7684c'
self.assertEqual(sanitize_filename(tests, restricted=True), 'aab_c')
self.assertTrue(sanitize_filename('\xf6', restricted=True) != '') # No empty filename
forbidden = '"\0\\/&!: \'\t\n()[]{}$;`^,#'
for fc in forbidden:
for fbc in forbidden:
self.assertTrue(fbc not in sanitize_filename(fc, restricted=True))
# Handle a common case more neatly
self.assertEqual(sanitize_filename('\u5927\u58f0\u5e26 - Song', restricted=True), 'Song')
self.assertEqual(sanitize_filename('\u603b\u7edf: Speech', restricted=True), 'Speech')
# .. but make sure the file name is never empty
self.assertTrue(sanitize_filename('-', restricted=True) != '')
self.assertTrue(sanitize_filename(':', restricted=True) != '')
self.assertEqual(sanitize_filename(
'ÂÃÄÀÁÅÆÇÈÉÊËÌÍÎÏÐÑÒÓÔÕÖŐØŒÙÚÛÜŰÝÞßàáâãäåæçèéêëìíîïðñòóôõöőøœùúûüűýþÿ', restricted=True),
'AAAAAAAECEEEEIIIIDNOOOOOOOOEUUUUUYTHssaaaaaaaeceeeeiiiionooooooooeuuuuuythy')
def test_sanitize_ids(self):
self.assertEqual(sanitize_filename('_n_cd26wFpw', is_id=True), '_n_cd26wFpw')
self.assertEqual(sanitize_filename('_BD_eEpuzXw', is_id=True), '_BD_eEpuzXw')
self.assertEqual(sanitize_filename('N0Y__7-UOdI', is_id=True), 'N0Y__7-UOdI')
def test_sanitize_path(self):
if sys.platform != 'win32':
return
self.assertEqual(sanitize_path('abc'), 'abc')
self.assertEqual(sanitize_path('abc/def'), 'abc\\def')
self.assertEqual(sanitize_path('abc\\def'), 'abc\\def')
self.assertEqual(sanitize_path('abc|def'), 'abc#def')
self.assertEqual(sanitize_path('<>:"|?*'), '#######')
self.assertEqual(sanitize_path('C:/abc/def'), 'C:\\abc\\def')
self.assertEqual(sanitize_path('C?:/abc/def'), 'C##\\abc\\def')
self.assertEqual(sanitize_path('\\\\?\\UNC\\ComputerName\\abc'), '\\\\?\\UNC\\ComputerName\\abc')
self.assertEqual(sanitize_path('\\\\?\\UNC/ComputerName/abc'), '\\\\?\\UNC\\ComputerName\\abc')
self.assertEqual(sanitize_path('\\\\?\\C:\\abc'), '\\\\?\\C:\\abc')
self.assertEqual(sanitize_path('\\\\?\\C:/abc'), '\\\\?\\C:\\abc')
self.assertEqual(sanitize_path('\\\\?\\C:\\ab?c\\de:f'), '\\\\?\\C:\\ab#c\\de#f')
self.assertEqual(sanitize_path('\\\\?\\C:\\abc'), '\\\\?\\C:\\abc')
self.assertEqual(
sanitize_path('youtube/%(uploader)s/%(autonumber)s-%(title)s-%(upload_date)s.%(ext)s'),
'youtube\\%(uploader)s\\%(autonumber)s-%(title)s-%(upload_date)s.%(ext)s')
self.assertEqual(
sanitize_path('youtube/TheWreckingYard ./00001-Not bad, Especially for Free! (1987 Yamaha 700)-20141116.mp4.part'),
'youtube\\TheWreckingYard #\\00001-Not bad, Especially for Free! (1987 Yamaha 700)-20141116.mp4.part')
self.assertEqual(sanitize_path('abc/def...'), 'abc\\def..#')
self.assertEqual(sanitize_path('abc.../def'), 'abc..#\\def')
self.assertEqual(sanitize_path('abc.../def...'), 'abc..#\\def..#')
self.assertEqual(sanitize_path('../abc'), '..\\abc')
self.assertEqual(sanitize_path('../../abc'), '..\\..\\abc')
self.assertEqual(sanitize_path('./abc'), 'abc')
self.assertEqual(sanitize_path('./../abc'), '..\\abc')
def test_sanitize_url(self):
self.assertEqual(sanitize_url('//foo.bar'), 'http://foo.bar')
self.assertEqual(sanitize_url('httpss://foo.bar'), 'https://foo.bar')
self.assertEqual(sanitize_url('rmtps://foo.bar'), 'rtmps://foo.bar')
self.assertEqual(sanitize_url('https://foo.bar'), 'https://foo.bar')
def test_expand_path(self):
def env(var):
return '%{0}%'.format(var) if sys.platform == 'win32' else '${0}'.format(var)
compat_setenv('youtube_dlc_EXPATH_PATH', 'expanded')
self.assertEqual(expand_path(env('youtube_dlc_EXPATH_PATH')), 'expanded')
self.assertEqual(expand_path(env('HOME')), compat_getenv('HOME'))
self.assertEqual(expand_path('~'), compat_getenv('HOME'))
self.assertEqual(
expand_path('~/%s' % env('youtube_dlc_EXPATH_PATH')),
'%s/expanded' % compat_getenv('HOME'))
def test_prepend_extension(self):
self.assertEqual(prepend_extension('abc.ext', 'temp'), 'abc.temp.ext')
self.assertEqual(prepend_extension('abc.ext', 'temp', 'ext'), 'abc.temp.ext')
self.assertEqual(prepend_extension('abc.unexpected_ext', 'temp', 'ext'), 'abc.unexpected_ext.temp')
self.assertEqual(prepend_extension('abc', 'temp'), 'abc.temp')
self.assertEqual(prepend_extension('.abc', 'temp'), '.abc.temp')
self.assertEqual(prepend_extension('.abc.ext', 'temp'), '.abc.temp.ext')
def test_replace_extension(self):
self.assertEqual(replace_extension('abc.ext', 'temp'), 'abc.temp')
self.assertEqual(replace_extension('abc.ext', 'temp', 'ext'), 'abc.temp')
self.assertEqual(replace_extension('abc.unexpected_ext', 'temp', 'ext'), 'abc.unexpected_ext.temp')
self.assertEqual(replace_extension('abc', 'temp'), 'abc.temp')
self.assertEqual(replace_extension('.abc', 'temp'), '.abc.temp')
self.assertEqual(replace_extension('.abc.ext', 'temp'), '.abc.temp')
def test_subtitles_filename(self):
self.assertEqual(subtitles_filename('abc.ext', 'en', 'vtt'), 'abc.en.vtt')
self.assertEqual(subtitles_filename('abc.ext', 'en', 'vtt', 'ext'), 'abc.en.vtt')
self.assertEqual(subtitles_filename('abc.unexpected_ext', 'en', 'vtt', 'ext'), 'abc.unexpected_ext.en.vtt')
def test_remove_start(self):
self.assertEqual(remove_start(None, 'A - '), None)
self.assertEqual(remove_start('A - B', 'A - '), 'B')
self.assertEqual(remove_start('B - A', 'A - '), 'B - A')
def test_remove_end(self):
self.assertEqual(remove_end(None, ' - B'), None)
self.assertEqual(remove_end('A - B', ' - B'), 'A')
self.assertEqual(remove_end('B - A', ' - B'), 'B - A')
def test_remove_quotes(self):
self.assertEqual(remove_quotes(None), None)
self.assertEqual(remove_quotes('"'), '"')
self.assertEqual(remove_quotes("'"), "'")
self.assertEqual(remove_quotes(';'), ';')
self.assertEqual(remove_quotes('";'), '";')
self.assertEqual(remove_quotes('""'), '')
self.assertEqual(remove_quotes('";"'), ';')
def test_ordered_set(self):
self.assertEqual(orderedSet([1, 1, 2, 3, 4, 4, 5, 6, 7, 3, 5]), [1, 2, 3, 4, 5, 6, 7])
self.assertEqual(orderedSet([]), [])
self.assertEqual(orderedSet([1]), [1])
# keep the list ordered
self.assertEqual(orderedSet([135, 1, 1, 1]), [135, 1])
def test_unescape_html(self):
self.assertEqual(unescapeHTML('%20;'), '%20;')
self.assertEqual(unescapeHTML('/'), '/')
self.assertEqual(unescapeHTML('/'), '/')
self.assertEqual(unescapeHTML('é'), 'é')
self.assertEqual(unescapeHTML('�'), '�')
self.assertEqual(unescapeHTML('&a"'), '&a"')
# HTML5 entities
self.assertEqual(unescapeHTML('.''), '.\'')
def test_date_from_str(self):
self.assertEqual(date_from_str('yesterday'), date_from_str('now-1day'))
self.assertEqual(date_from_str('now+7day'), date_from_str('now+1week'))
self.assertEqual(date_from_str('now+14day'), date_from_str('now+2week'))
self.assertEqual(date_from_str('now+365day'), date_from_str('now+1year'))
self.assertEqual(date_from_str('now+30day'), date_from_str('now+1month'))
def test_daterange(self):
_20century = DateRange("19000101", "20000101")
self.assertFalse("17890714" in _20century)
_ac = DateRange("00010101")
self.assertTrue("19690721" in _ac)
_firstmilenium = DateRange(end="10000101")
self.assertTrue("07110427" in _firstmilenium)
def test_unified_dates(self):
self.assertEqual(unified_strdate('December 21, 2010'), '20101221')
self.assertEqual(unified_strdate('8/7/2009'), '20090708')
self.assertEqual(unified_strdate('Dec 14, 2012'), '20121214')
self.assertEqual(unified_strdate('2012/10/11 01:56:38 +0000'), '20121011')
self.assertEqual(unified_strdate('1968 12 10'), '19681210')
self.assertEqual(unified_strdate('1968-12-10'), '19681210')
self.assertEqual(unified_strdate('28/01/2014 21:00:00 +0100'), '20140128')
self.assertEqual(
unified_strdate('11/26/2014 11:30:00 AM PST', day_first=False),
'20141126')
self.assertEqual(
unified_strdate('2/2/2015 6:47:40 PM', day_first=False),
'20150202')
self.assertEqual(unified_strdate('Feb 14th 2016 5:45PM'), '20160214')
self.assertEqual(unified_strdate('25-09-2014'), '20140925')
self.assertEqual(unified_strdate('27.02.2016 17:30'), '20160227')
self.assertEqual(unified_strdate('UNKNOWN DATE FORMAT'), None)
self.assertEqual(unified_strdate('Feb 7, 2016 at 6:35 pm'), '20160207')
self.assertEqual(unified_strdate('July 15th, 2013'), '20130715')
self.assertEqual(unified_strdate('September 1st, 2013'), '20130901')
self.assertEqual(unified_strdate('Sep 2nd, 2013'), '20130902')
self.assertEqual(unified_strdate('November 3rd, 2019'), '20191103')
self.assertEqual(unified_strdate('October 23rd, 2005'), '20051023')
def test_unified_timestamps(self):
self.assertEqual(unified_timestamp('December 21, 2010'), 1292889600)
self.assertEqual(unified_timestamp('8/7/2009'), 1247011200)
self.assertEqual(unified_timestamp('Dec 14, 2012'), 1355443200)
self.assertEqual(unified_timestamp('2012/10/11 01:56:38 +0000'), 1349920598)
self.assertEqual(unified_timestamp('1968 12 10'), -33436800)
self.assertEqual(unified_timestamp('1968-12-10'), -33436800)
self.assertEqual(unified_timestamp('28/01/2014 21:00:00 +0100'), 1390939200)
self.assertEqual(
unified_timestamp('11/26/2014 11:30:00 AM PST', day_first=False),
1417001400)
self.assertEqual(
unified_timestamp('2/2/2015 6:47:40 PM', day_first=False),
1422902860)
self.assertEqual(unified_timestamp('Feb 14th 2016 5:45PM'), 1455471900)
self.assertEqual(unified_timestamp('25-09-2014'), 1411603200)
self.assertEqual(unified_timestamp('27.02.2016 17:30'), 1456594200)
self.assertEqual(unified_timestamp('UNKNOWN DATE FORMAT'), None)
self.assertEqual(unified_timestamp('May 16, 2016 11:15 PM'), 1463440500)
self.assertEqual(unified_timestamp('Feb 7, 2016 at 6:35 pm'), 1454870100)
self.assertEqual(unified_timestamp('2017-03-30T17:52:41Q'), 1490896361)
self.assertEqual(unified_timestamp('Sep 11, 2013 | 5:49 AM'), 1378878540)
self.assertEqual(unified_timestamp('December 15, 2017 at 7:49 am'), 1513324140)
self.assertEqual(unified_timestamp('2018-03-14T08:32:43.1493874+00:00'), 1521016363)
def test_determine_ext(self):
self.assertEqual(determine_ext('http://example.com/foo/bar.mp4/?download'), 'mp4')
self.assertEqual(determine_ext('http://example.com/foo/bar/?download', None), None)
self.assertEqual(determine_ext('http://example.com/foo/bar.nonext/?download', None), None)
self.assertEqual(determine_ext('http://example.com/foo/bar/mp4?download', None), None)
self.assertEqual(determine_ext('http://example.com/foo/bar.m3u8//?download'), 'm3u8')
self.assertEqual(determine_ext('foobar', None), None)
def test_find_xpath_attr(self):
testxml = '''<root>
<node/>
<node x="a"/>
<node x="a" y="c" />
<node x="b" y="d" />
<node x="" />
</root>'''
doc = compat_etree_fromstring(testxml)
self.assertEqual(find_xpath_attr(doc, './/fourohfour', 'n'), None)
self.assertEqual(find_xpath_attr(doc, './/fourohfour', 'n', 'v'), None)
self.assertEqual(find_xpath_attr(doc, './/node', 'n'), None)
self.assertEqual(find_xpath_attr(doc, './/node', 'n', 'v'), None)
self.assertEqual(find_xpath_attr(doc, './/node', 'x'), doc[1])
self.assertEqual(find_xpath_attr(doc, './/node', 'x', 'a'), doc[1])
self.assertEqual(find_xpath_attr(doc, './/node', 'x', 'b'), doc[3])
self.assertEqual(find_xpath_attr(doc, './/node', 'y'), doc[2])
self.assertEqual(find_xpath_attr(doc, './/node', 'y', 'c'), doc[2])
self.assertEqual(find_xpath_attr(doc, './/node', 'y', 'd'), doc[3])
self.assertEqual(find_xpath_attr(doc, './/node', 'x', ''), doc[4])
def test_xpath_with_ns(self):
testxml = '''<root xmlns:media="http://example.com/">
<media:song>
<media:author>The Author</media:author>
<url>http://server.com/download.mp3</url>
</media:song>
</root>'''
doc = compat_etree_fromstring(testxml)
find = lambda p: doc.find(xpath_with_ns(p, {'media': 'http://example.com/'}))
self.assertTrue(find('media:song') is not None)
self.assertEqual(find('media:song/media:author').text, 'The Author')
self.assertEqual(find('media:song/url').text, 'http://server.com/download.mp3')
def test_xpath_element(self):
doc = xml.etree.ElementTree.Element('root')
div = xml.etree.ElementTree.SubElement(doc, 'div')
p = xml.etree.ElementTree.SubElement(div, 'p')
p.text = 'Foo'
self.assertEqual(xpath_element(doc, 'div/p'), p)
self.assertEqual(xpath_element(doc, ['div/p']), p)
self.assertEqual(xpath_element(doc, ['div/bar', 'div/p']), p)
self.assertEqual(xpath_element(doc, 'div/bar', default='default'), 'default')
self.assertEqual(xpath_element(doc, ['div/bar'], default='default'), 'default')
self.assertTrue(xpath_element(doc, 'div/bar') is None)
self.assertTrue(xpath_element(doc, ['div/bar']) is None)
self.assertTrue(xpath_element(doc, ['div/bar'], 'div/baz') is None)
self.assertRaises(ExtractorError, xpath_element, doc, 'div/bar', fatal=True)
self.assertRaises(ExtractorError, xpath_element, doc, ['div/bar'], fatal=True)
self.assertRaises(ExtractorError, xpath_element, doc, ['div/bar', 'div/baz'], fatal=True)
def test_xpath_text(self):
testxml = '''<root>
<div>
<p>Foo</p>
</div>
</root>'''
doc = compat_etree_fromstring(testxml)
self.assertEqual(xpath_text(doc, 'div/p'), 'Foo')
self.assertEqual(xpath_text(doc, 'div/bar', default='default'), 'default')
self.assertTrue(xpath_text(doc, 'div/bar') is None)
self.assertRaises(ExtractorError, xpath_text, doc, 'div/bar', fatal=True)
def test_xpath_attr(self):
testxml = '''<root>
<div>
<p x="a">Foo</p>
</div>
</root>'''
doc = compat_etree_fromstring(testxml)
self.assertEqual(xpath_attr(doc, 'div/p', 'x'), 'a')
self.assertEqual(xpath_attr(doc, 'div/bar', 'x'), None)
self.assertEqual(xpath_attr(doc, 'div/p', 'y'), None)
self.assertEqual(xpath_attr(doc, 'div/bar', 'x', default='default'), 'default')
self.assertEqual(xpath_attr(doc, 'div/p', 'y', default='default'), 'default')
self.assertRaises(ExtractorError, xpath_attr, doc, 'div/bar', 'x', fatal=True)
self.assertRaises(ExtractorError, xpath_attr, doc, 'div/p', 'y', fatal=True)
def test_smuggle_url(self):
data = {"ö": "ö", "abc": [3]}
url = 'https://foo.bar/baz?x=y#a'
smug_url = smuggle_url(url, data)
unsmug_url, unsmug_data = unsmuggle_url(smug_url)
self.assertEqual(url, unsmug_url)
self.assertEqual(data, unsmug_data)
res_url, res_data = unsmuggle_url(url)
self.assertEqual(res_url, url)
self.assertEqual(res_data, None)
smug_url = smuggle_url(url, {'a': 'b'})
smug_smug_url = smuggle_url(smug_url, {'c': 'd'})
res_url, res_data = unsmuggle_url(smug_smug_url)
self.assertEqual(res_url, url)
self.assertEqual(res_data, {'a': 'b', 'c': 'd'})
def test_shell_quote(self):
args = ['ffmpeg', '-i', encodeFilename('ñ€ß\'.mp4')]
self.assertEqual(
shell_quote(args),
"""ffmpeg -i 'ñ€ß'"'"'.mp4'""" if compat_os_name != 'nt' else '''ffmpeg -i "ñ€ß'.mp4"''')
def test_float_or_none(self):
self.assertEqual(float_or_none('42.42'), 42.42)
self.assertEqual(float_or_none('42'), 42.0)
self.assertEqual(float_or_none(''), None)
self.assertEqual(float_or_none(None), None)
self.assertEqual(float_or_none([]), None)
self.assertEqual(float_or_none(set()), None)
def test_int_or_none(self):
self.assertEqual(int_or_none('42'), 42)
self.assertEqual(int_or_none(''), None)
self.assertEqual(int_or_none(None), None)
self.assertEqual(int_or_none([]), None)
self.assertEqual(int_or_none(set()), None)
def test_str_to_int(self):
self.assertEqual(str_to_int('123,456'), 123456)
self.assertEqual(str_to_int('123.456'), 123456)
self.assertEqual(str_to_int(523), 523)
# Python 3 has no long
if sys.version_info < (3, 0):
eval('self.assertEqual(str_to_int(123456L), 123456)')
self.assertEqual(str_to_int('noninteger'), None)
self.assertEqual(str_to_int([]), None)
def test_url_basename(self):
self.assertEqual(url_basename('http://foo.de/'), '')
self.assertEqual(url_basename('http://foo.de/bar/baz'), 'baz')
self.assertEqual(url_basename('http://foo.de/bar/baz?x=y'), 'baz')
self.assertEqual(url_basename('http://foo.de/bar/baz#x=y'), 'baz')
self.assertEqual(url_basename('http://foo.de/bar/baz/'), 'baz')
self.assertEqual(
url_basename('http://media.w3.org/2010/05/sintel/trailer.mp4'),
'trailer.mp4')
def test_base_url(self):
self.assertEqual(base_url('http://foo.de/'), 'http://foo.de/')
self.assertEqual(base_url('http://foo.de/bar'), 'http://foo.de/')
self.assertEqual(base_url('http://foo.de/bar/'), 'http://foo.de/bar/')
self.assertEqual(base_url('http://foo.de/bar/baz'), 'http://foo.de/bar/')
self.assertEqual(base_url('http://foo.de/bar/baz?x=z/x/c'), 'http://foo.de/bar/')
def test_urljoin(self):
self.assertEqual(urljoin('http://foo.de/', '/a/b/c.txt'), 'http://foo.de/a/b/c.txt')
self.assertEqual(urljoin(b'http://foo.de/', '/a/b/c.txt'), 'http://foo.de/a/b/c.txt')
self.assertEqual(urljoin('http://foo.de/', b'/a/b/c.txt'), 'http://foo.de/a/b/c.txt')
self.assertEqual(urljoin(b'http://foo.de/', b'/a/b/c.txt'), 'http://foo.de/a/b/c.txt')
self.assertEqual(urljoin('//foo.de/', '/a/b/c.txt'), '//foo.de/a/b/c.txt')
self.assertEqual(urljoin('http://foo.de/', 'a/b/c.txt'), 'http://foo.de/a/b/c.txt')
self.assertEqual(urljoin('http://foo.de', '/a/b/c.txt'), 'http://foo.de/a/b/c.txt')
self.assertEqual(urljoin('http://foo.de', 'a/b/c.txt'), 'http://foo.de/a/b/c.txt')
self.assertEqual(urljoin('http://foo.de/', 'http://foo.de/a/b/c.txt'), 'http://foo.de/a/b/c.txt')
self.assertEqual(urljoin('http://foo.de/', '//foo.de/a/b/c.txt'), '//foo.de/a/b/c.txt')
self.assertEqual(urljoin(None, 'http://foo.de/a/b/c.txt'), 'http://foo.de/a/b/c.txt')
self.assertEqual(urljoin(None, '//foo.de/a/b/c.txt'), '//foo.de/a/b/c.txt')
self.assertEqual(urljoin('', 'http://foo.de/a/b/c.txt'), 'http://foo.de/a/b/c.txt')
self.assertEqual(urljoin(['foobar'], 'http://foo.de/a/b/c.txt'), 'http://foo.de/a/b/c.txt')
self.assertEqual(urljoin('http://foo.de/', None), None)
self.assertEqual(urljoin('http://foo.de/', ''), None)
self.assertEqual(urljoin('http://foo.de/', ['foobar']), None)
self.assertEqual(urljoin('http://foo.de/a/b/c.txt', '.././../d.txt'), 'http://foo.de/d.txt')
self.assertEqual(urljoin('http://foo.de/a/b/c.txt', 'rtmp://foo.de'), 'rtmp://foo.de')
self.assertEqual(urljoin(None, 'rtmp://foo.de'), 'rtmp://foo.de')
def test_url_or_none(self):
self.assertEqual(url_or_none(None), None)
self.assertEqual(url_or_none(''), None)
self.assertEqual(url_or_none('foo'), None)
self.assertEqual(url_or_none('http://foo.de'), 'http://foo.de')
self.assertEqual(url_or_none('https://foo.de'), 'https://foo.de')
self.assertEqual(url_or_none('http$://foo.de'), None)
self.assertEqual(url_or_none('http://foo.de'), 'http://foo.de')
self.assertEqual(url_or_none('//foo.de'), '//foo.de')
self.assertEqual(url_or_none('s3://foo.de'), None)
self.assertEqual(url_or_none('rtmpte://foo.de'), 'rtmpte://foo.de')
self.assertEqual(url_or_none('mms://foo.de'), 'mms://foo.de')
self.assertEqual(url_or_none('rtspu://foo.de'), 'rtspu://foo.de')
self.assertEqual(url_or_none('ftps://foo.de'), 'ftps://foo.de')
def test_parse_age_limit(self):
self.assertEqual(parse_age_limit(None), None)
self.assertEqual(parse_age_limit(False), None)
self.assertEqual(parse_age_limit('invalid'), None)
self.assertEqual(parse_age_limit(0), 0)
self.assertEqual(parse_age_limit(18), 18)
self.assertEqual(parse_age_limit(21), 21)
self.assertEqual(parse_age_limit(22), None)
self.assertEqual(parse_age_limit('18'), 18)
self.assertEqual(parse_age_limit('18+'), 18)
self.assertEqual(parse_age_limit('PG-13'), 13)
self.assertEqual(parse_age_limit('TV-14'), 14)
self.assertEqual(parse_age_limit('TV-MA'), 17)
self.assertEqual(parse_age_limit('TV14'), 14)
self.assertEqual(parse_age_limit('TV_G'), 0)
def test_parse_duration(self):
self.assertEqual(parse_duration(None), None)
self.assertEqual(parse_duration(False), None)
self.assertEqual(parse_duration('invalid'), None)
self.assertEqual(parse_duration('1'), 1)
self.assertEqual(parse_duration('1337:12'), 80232)
self.assertEqual(parse_duration('9:12:43'), 33163)
self.assertEqual(parse_duration('12:00'), 720)
self.assertEqual(parse_duration('00:01:01'), 61)
self.assertEqual(parse_duration('x:y'), None)
self.assertEqual(parse_duration('3h11m53s'), 11513)
self.assertEqual(parse_duration('3h 11m 53s'), 11513)
self.assertEqual(parse_duration('3 hours 11 minutes 53 seconds'), 11513)
self.assertEqual(parse_duration('3 hours 11 mins 53 secs'), 11513)
self.assertEqual(parse_duration('62m45s'), 3765)
self.assertEqual(parse_duration('6m59s'), 419)
self.assertEqual(parse_duration('49s'), 49)
self.assertEqual(parse_duration('0h0m0s'), 0)
self.assertEqual(parse_duration('0m0s'), 0)
self.assertEqual(parse_duration('0s'), 0)
self.assertEqual(parse_duration('01:02:03.05'), 3723.05)
self.assertEqual(parse_duration('T30M38S'), 1838)
self.assertEqual(parse_duration('5 s'), 5)
self.assertEqual(parse_duration('3 min'), 180)
self.assertEqual(parse_duration('2.5 hours'), 9000)
self.assertEqual(parse_duration('02:03:04'), 7384)
self.assertEqual(parse_duration('01:02:03:04'), 93784)
self.assertEqual(parse_duration('1 hour 3 minutes'), 3780)
self.assertEqual(parse_duration('87 Min.'), 5220)
self.assertEqual(parse_duration('PT1H0.040S'), 3600.04)
self.assertEqual(parse_duration('PT00H03M30SZ'), 210)
self.assertEqual(parse_duration('P0Y0M0DT0H4M20.880S'), 260.88)
def test_fix_xml_ampersands(self):
self.assertEqual(
fix_xml_ampersands('"&x=y&z=a'), '"&x=y&z=a')
self.assertEqual(
fix_xml_ampersands('"&x=y&wrong;&z=a'),
'"&x=y&wrong;&z=a')
self.assertEqual(
fix_xml_ampersands('&'><"'),
'&'><"')
self.assertEqual(
fix_xml_ampersands('Ӓ᪼'), 'Ӓ᪼')
self.assertEqual(fix_xml_ampersands('&#&#'), '&#&#')
def test_paged_list(self):
def testPL(size, pagesize, sliceargs, expected):
def get_page(pagenum):
firstid = pagenum * pagesize
upto = min(size, pagenum * pagesize + pagesize)
for i in range(firstid, upto):
yield i
pl = OnDemandPagedList(get_page, pagesize)
got = pl.getslice(*sliceargs)
self.assertEqual(got, expected)
iapl = InAdvancePagedList(get_page, size // pagesize + 1, pagesize)
got = iapl.getslice(*sliceargs)
self.assertEqual(got, expected)
testPL(5, 2, (), [0, 1, 2, 3, 4])
testPL(5, 2, (1,), [1, 2, 3, 4])
testPL(5, 2, (2,), [2, 3, 4])
testPL(5, 2, (4,), [4])
testPL(5, 2, (0, 3), [0, 1, 2])
testPL(5, 2, (1, 4), [1, 2, 3])
testPL(5, 2, (2, 99), [2, 3, 4])
testPL(5, 2, (20, 99), [])
def test_read_batch_urls(self):
f = io.StringIO('''\xef\xbb\xbf foo
bar\r
baz
# More after this line\r
; or after this
bam''')
self.assertEqual(read_batch_urls(f), ['foo', 'bar', 'baz', 'bam'])
def test_urlencode_postdata(self):
data = urlencode_postdata({'username': 'foo@bar.com', 'password': '1234'})
self.assertTrue(isinstance(data, bytes))
def test_update_url_query(self):
def query_dict(url):
return compat_parse_qs(compat_urlparse.urlparse(url).query)
self.assertEqual(query_dict(update_url_query(
'http://example.com/path', {'quality': ['HD'], 'format': ['mp4']})),
query_dict('http://example.com/path?quality=HD&format=mp4'))
self.assertEqual(query_dict(update_url_query(
'http://example.com/path', {'system': ['LINUX', 'WINDOWS']})),
query_dict('http://example.com/path?system=LINUX&system=WINDOWS'))
self.assertEqual(query_dict(update_url_query(
'http://example.com/path', {'fields': 'id,formats,subtitles'})),
query_dict('http://example.com/path?fields=id,formats,subtitles'))
self.assertEqual(query_dict(update_url_query(
'http://example.com/path', {'fields': ('id,formats,subtitles', 'thumbnails')})),
query_dict('http://example.com/path?fields=id,formats,subtitles&fields=thumbnails'))
self.assertEqual(query_dict(update_url_query(
'http://example.com/path?manifest=f4m', {'manifest': []})),
query_dict('http://example.com/path'))
self.assertEqual(query_dict(update_url_query(
'http://example.com/path?system=LINUX&system=WINDOWS', {'system': 'LINUX'})),
query_dict('http://example.com/path?system=LINUX'))
self.assertEqual(query_dict(update_url_query(
'http://example.com/path', {'fields': b'id,formats,subtitles'})),
query_dict('http://example.com/path?fields=id,formats,subtitles'))
self.assertEqual(query_dict(update_url_query(
'http://example.com/path', {'width': 1080, 'height': 720})),
query_dict('http://example.com/path?width=1080&height=720'))
self.assertEqual(query_dict(update_url_query(
'http://example.com/path', {'bitrate': 5020.43})),
query_dict('http://example.com/path?bitrate=5020.43'))
self.assertEqual(query_dict(update_url_query(
'http://example.com/path', {'test': '第二行тест'})),
query_dict('http://example.com/path?test=%E7%AC%AC%E4%BA%8C%E8%A1%8C%D1%82%D0%B5%D1%81%D1%82'))
def test_multipart_encode(self):
self.assertEqual(
multipart_encode({b'field': b'value'}, boundary='AAAAAA')[0],
b'--AAAAAA\r\nContent-Disposition: form-data; name="field"\r\n\r\nvalue\r\n--AAAAAA--\r\n')
self.assertEqual(
multipart_encode({'欄位'.encode('utf-8'): '值'.encode('utf-8')}, boundary='AAAAAA')[0],
b'--AAAAAA\r\nContent-Disposition: form-data; name="\xe6\xac\x84\xe4\xbd\x8d"\r\n\r\n\xe5\x80\xbc\r\n--AAAAAA--\r\n')
self.assertRaises(
ValueError, multipart_encode, {b'field': b'value'}, boundary='value')
def test_dict_get(self):
FALSE_VALUES = {
'none': None,
'false': False,
'zero': 0,
'empty_string': '',
'empty_list': [],
}
d = FALSE_VALUES.copy()
d['a'] = 42
self.assertEqual(dict_get(d, 'a'), 42)
self.assertEqual(dict_get(d, 'b'), None)
self.assertEqual(dict_get(d, 'b', 42), 42)
self.assertEqual(dict_get(d, ('a', )), 42)
self.assertEqual(dict_get(d, ('b', 'a', )), 42)
self.assertEqual(dict_get(d, ('b', 'c', 'a', 'd', )), 42)
self.assertEqual(dict_get(d, ('b', 'c', )), None)
self.assertEqual(dict_get(d, ('b', 'c', ), 42), 42)
for key, false_value in FALSE_VALUES.items():
self.assertEqual(dict_get(d, ('b', 'c', key, )), None)
self.assertEqual(dict_get(d, ('b', 'c', key, ), skip_false_values=False), false_value)
def test_merge_dicts(self):
self.assertEqual(merge_dicts({'a': 1}, {'b': 2}), {'a': 1, 'b': 2})
self.assertEqual(merge_dicts({'a': 1}, {'a': 2}), {'a': 1})
self.assertEqual(merge_dicts({'a': 1}, {'a': None}), {'a': 1})
self.assertEqual(merge_dicts({'a': 1}, {'a': ''}), {'a': 1})
self.assertEqual(merge_dicts({'a': 1}, {}), {'a': 1})
self.assertEqual(merge_dicts({'a': None}, {'a': 1}), {'a': 1})
self.assertEqual(merge_dicts({'a': ''}, {'a': 1}), {'a': ''})
self.assertEqual(merge_dicts({'a': ''}, {'a': 'abc'}), {'a': 'abc'})
self.assertEqual(merge_dicts({'a': None}, {'a': ''}, {'a': 'abc'}), {'a': 'abc'})
def test_encode_compat_str(self):
self.assertEqual(encode_compat_str(b'\xd1\x82\xd0\xb5\xd1\x81\xd1\x82', 'utf-8'), 'тест')
self.assertEqual(encode_compat_str('тест', 'utf-8'), 'тест')
def test_parse_iso8601(self):
self.assertEqual(parse_iso8601('2014-03-23T23:04:26+0100'), 1395612266)
self.assertEqual(parse_iso8601('2014-03-23T22:04:26+0000'), 1395612266)
self.assertEqual(parse_iso8601('2014-03-23T22:04:26Z'), 1395612266)
self.assertEqual(parse_iso8601('2014-03-23T22:04:26.1234Z'), 1395612266)
self.assertEqual(parse_iso8601('2015-09-29T08:27:31.727'), 1443515251)
self.assertEqual(parse_iso8601('2015-09-29T08-27-31.727'), None)
def test_strip_jsonp(self):
stripped = strip_jsonp('cb ([ {"id":"532cb",\n\n\n"x":\n3}\n]\n);')
d = json.loads(stripped)
self.assertEqual(d, [{"id": "532cb", "x": 3}])
stripped = strip_jsonp('parseMetadata({"STATUS":"OK"})\n\n\n//epc')
d = json.loads(stripped)
self.assertEqual(d, {'STATUS': 'OK'})
stripped = strip_jsonp('ps.embedHandler({"status": "success"});')
d = json.loads(stripped)
self.assertEqual(d, {'status': 'success'})
stripped = strip_jsonp('window.cb && window.cb({"status": "success"});')
d = json.loads(stripped)
self.assertEqual(d, {'status': 'success'})
stripped = strip_jsonp('window.cb && cb({"status": "success"});')
d = json.loads(stripped)
self.assertEqual(d, {'status': 'success'})
stripped = strip_jsonp('({"status": "success"});')
d = json.loads(stripped)
self.assertEqual(d, {'status': 'success'})
def test_strip_or_none(self):
self.assertEqual(strip_or_none(' abc'), 'abc')
self.assertEqual(strip_or_none('abc '), 'abc')
self.assertEqual(strip_or_none(' abc '), 'abc')
self.assertEqual(strip_or_none('\tabc\t'), 'abc')
self.assertEqual(strip_or_none('\n\tabc\n\t'), 'abc')
self.assertEqual(strip_or_none('abc'), 'abc')
self.assertEqual(strip_or_none(''), '')
self.assertEqual(strip_or_none(None), None)
self.assertEqual(strip_or_none(42), None)
self.assertEqual(strip_or_none([]), None)
def test_uppercase_escape(self):
self.assertEqual(uppercase_escape('aä'), 'aä')
self.assertEqual(uppercase_escape('\\U0001d550'), '𝕐')
def test_lowercase_escape(self):
self.assertEqual(lowercase_escape('aä'), 'aä')
self.assertEqual(lowercase_escape('\\u0026'), '&')
def test_limit_length(self):
self.assertEqual(limit_length(None, 12), None)
self.assertEqual(limit_length('foo', 12), 'foo')
self.assertTrue(
limit_length('foo bar baz asd', 12).startswith('foo bar'))
self.assertTrue('...' in limit_length('foo bar baz asd', 12))
def test_mimetype2ext(self):
self.assertEqual(mimetype2ext(None), None)
self.assertEqual(mimetype2ext('video/x-flv'), 'flv')
self.assertEqual(mimetype2ext('application/x-mpegURL'), 'm3u8')
self.assertEqual(mimetype2ext('text/vtt'), 'vtt')
self.assertEqual(mimetype2ext('text/vtt;charset=utf-8'), 'vtt')
self.assertEqual(mimetype2ext('text/html; charset=utf-8'), 'html')
self.assertEqual(mimetype2ext('audio/x-wav'), 'wav')
self.assertEqual(mimetype2ext('audio/x-wav;codec=pcm'), 'wav')
def test_month_by_name(self):
self.assertEqual(month_by_name(None), None)
self.assertEqual(month_by_name('December', 'en'), 12)
self.assertEqual(month_by_name('décembre', 'fr'), 12)
self.assertEqual(month_by_name('December'), 12)
self.assertEqual(month_by_name('décembre'), None)
self.assertEqual(month_by_name('Unknown', 'unknown'), None)
def test_parse_codecs(self):
self.assertEqual(parse_codecs(''), {})
self.assertEqual(parse_codecs('avc1.77.30, mp4a.40.2'), {
'vcodec': 'avc1.77.30',
'acodec': 'mp4a.40.2',
})
self.assertEqual(parse_codecs('mp4a.40.2'), {
'vcodec': 'none',
'acodec': 'mp4a.40.2',
})
self.assertEqual(parse_codecs('mp4a.40.5,avc1.42001e'), {
'vcodec': 'avc1.42001e',
'acodec': 'mp4a.40.5',
})
self.assertEqual(parse_codecs('avc3.640028'), {
'vcodec': 'avc3.640028',
'acodec': 'none',
})
self.assertEqual(parse_codecs(', h264,,newcodec,aac'), {
'vcodec': 'h264',
'acodec': 'aac',
})
self.assertEqual(parse_codecs('av01.0.05M.08'), {
'vcodec': 'av01.0.05M.08',
'acodec': 'none',
})
self.assertEqual(parse_codecs('theora, vorbis'), {
'vcodec': 'theora',
'acodec': 'vorbis',
})
self.assertEqual(parse_codecs('unknownvcodec, unknownacodec'), {
'vcodec': 'unknownvcodec',
'acodec': 'unknownacodec',
})
self.assertEqual(parse_codecs('unknown'), {})
def test_escape_rfc3986(self):
reserved = "!*'();:@&=+$,/?#[]"
unreserved = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789-_.~'
self.assertEqual(escape_rfc3986(reserved), reserved)
self.assertEqual(escape_rfc3986(unreserved), unreserved)
self.assertEqual(escape_rfc3986('тест'), '%D1%82%D0%B5%D1%81%D1%82')
self.assertEqual(escape_rfc3986('%D1%82%D0%B5%D1%81%D1%82'), '%D1%82%D0%B5%D1%81%D1%82')
self.assertEqual(escape_rfc3986('foo bar'), 'foo%20bar')
self.assertEqual(escape_rfc3986('foo%20bar'), 'foo%20bar')
def test_escape_url(self):
self.assertEqual(
escape_url('http://wowza.imust.org/srv/vod/telemb/new/UPLOAD/UPLOAD/20224_IncendieHavré_FD.mp4'),
'http://wowza.imust.org/srv/vod/telemb/new/UPLOAD/UPLOAD/20224_IncendieHavre%CC%81_FD.mp4'
)
self.assertEqual(
escape_url('http://www.ardmediathek.de/tv/Sturm-der-Liebe/Folge-2036-Zu-Mann-und-Frau-erklärt/Das-Erste/Video?documentId=22673108&bcastId=5290'),
'http://www.ardmediathek.de/tv/Sturm-der-Liebe/Folge-2036-Zu-Mann-und-Frau-erkl%C3%A4rt/Das-Erste/Video?documentId=22673108&bcastId=5290'
)
self.assertEqual(
escape_url('http://тест.рф/фрагмент'),
'http://xn--e1aybc.xn--p1ai/%D1%84%D1%80%D0%B0%D0%B3%D0%BC%D0%B5%D0%BD%D1%82'
)
self.assertEqual(
escape_url('http://тест.рф/абв?абв=абв#абв'),
'http://xn--e1aybc.xn--p1ai/%D0%B0%D0%B1%D0%B2?%D0%B0%D0%B1%D0%B2=%D0%B0%D0%B1%D0%B2#%D0%B0%D0%B1%D0%B2'
)
self.assertEqual(escape_url('http://vimeo.com/56015672#at=0'), 'http://vimeo.com/56015672#at=0')
def test_js_to_json_realworld(self):
inp = '''{
'clip':{'provider':'pseudo'}
}'''
self.assertEqual(js_to_json(inp), '''{
"clip":{"provider":"pseudo"}
}''')
json.loads(js_to_json(inp))
inp = '''{
'playlist':[{'controls':{'all':null}}]
}'''
self.assertEqual(js_to_json(inp), '''{
"playlist":[{"controls":{"all":null}}]
}''')
inp = '''"The CW\\'s \\'Crazy Ex-Girlfriend\\'"'''
self.assertEqual(js_to_json(inp), '''"The CW's 'Crazy Ex-Girlfriend'"''')
inp = '"SAND Number: SAND 2013-7800P\\nPresenter: Tom Russo\\nHabanero Software Training - Xyce Software\\nXyce, Sandia\\u0027s"'
json_code = js_to_json(inp)
self.assertEqual(json.loads(json_code), json.loads(inp))
inp = '''{
0:{src:'skipped', type: 'application/dash+xml'},
1:{src:'skipped', type: 'application/vnd.apple.mpegURL'},
}'''
self.assertEqual(js_to_json(inp), '''{
"0":{"src":"skipped", "type": "application/dash+xml"},
"1":{"src":"skipped", "type": "application/vnd.apple.mpegURL"}
}''')
inp = '''{"foo":101}'''
self.assertEqual(js_to_json(inp), '''{"foo":101}''')
inp = '''{"duration": "00:01:07"}'''
self.assertEqual(js_to_json(inp), '''{"duration": "00:01:07"}''')
inp = '''{segments: [{"offset":-3.885780586188048e-16,"duration":39.75000000000001}]}'''
self.assertEqual(js_to_json(inp), '''{"segments": [{"offset":-3.885780586188048e-16,"duration":39.75000000000001}]}''')
def test_js_to_json_edgecases(self):
on = js_to_json("{abc_def:'1\\'\\\\2\\\\\\'3\"4'}")
self.assertEqual(json.loads(on), {"abc_def": "1'\\2\\'3\"4"})
on = js_to_json('{"abc": true}')
self.assertEqual(json.loads(on), {'abc': True})
# Ignore JavaScript code as well
on = js_to_json('''{
"x": 1,
y: "a",
z: some.code
}''')
d = json.loads(on)
self.assertEqual(d['x'], 1)
self.assertEqual(d['y'], 'a')
# Just drop ! prefix for now though this results in a wrong value
on = js_to_json('''{
a: !0,
b: !1,
c: !!0,
d: !!42.42,
e: !!![],
f: !"abc",
g: !"",
!42: 42
}''')
self.assertEqual(json.loads(on), {
'a': 0,
'b': 1,
'c': 0,
'd': 42.42,
'e': [],
'f': "abc",
'g': "",
'42': 42
})
on = js_to_json('["abc", "def",]')
self.assertEqual(json.loads(on), ['abc', 'def'])
on = js_to_json('[/*comment\n*/"abc"/*comment\n*/,/*comment\n*/"def",/*comment\n*/]')
self.assertEqual(json.loads(on), ['abc', 'def'])
on = js_to_json('[//comment\n"abc" //comment\n,//comment\n"def",//comment\n]')
self.assertEqual(json.loads(on), ['abc', 'def'])
on = js_to_json('{"abc": "def",}')
self.assertEqual(json.loads(on), {'abc': 'def'})
on = js_to_json('{/*comment\n*/"abc"/*comment\n*/:/*comment\n*/"def"/*comment\n*/,/*comment\n*/}')
self.assertEqual(json.loads(on), {'abc': 'def'})
on = js_to_json('{ 0: /* " \n */ ",]" , }')
self.assertEqual(json.loads(on), {'0': ',]'})
on = js_to_json('{ /*comment\n*/0/*comment\n*/: /* " \n */ ",]" , }')
self.assertEqual(json.loads(on), {'0': ',]'})
on = js_to_json('{ 0: // comment\n1 }')
self.assertEqual(json.loads(on), {'0': 1})
on = js_to_json(r'["<p>x<\/p>"]')
self.assertEqual(json.loads(on), ['<p>x</p>'])
on = js_to_json(r'["\xaa"]')
self.assertEqual(json.loads(on), ['\u00aa'])
on = js_to_json("['a\\\nb']")
self.assertEqual(json.loads(on), ['ab'])
on = js_to_json("/*comment\n*/[/*comment\n*/'a\\\nb'/*comment\n*/]/*comment\n*/")
self.assertEqual(json.loads(on), ['ab'])
on = js_to_json('{0xff:0xff}')
self.assertEqual(json.loads(on), {'255': 255})
on = js_to_json('{/*comment\n*/0xff/*comment\n*/:/*comment\n*/0xff/*comment\n*/}')
self.assertEqual(json.loads(on), {'255': 255})
on = js_to_json('{077:077}')
self.assertEqual(json.loads(on), {'63': 63})
on = js_to_json('{/*comment\n*/077/*comment\n*/:/*comment\n*/077/*comment\n*/}')
self.assertEqual(json.loads(on), {'63': 63})
on = js_to_json('{42:42}')
self.assertEqual(json.loads(on), {'42': 42})
on = js_to_json('{/*comment\n*/42/*comment\n*/:/*comment\n*/42/*comment\n*/}')
self.assertEqual(json.loads(on), {'42': 42})
on = js_to_json('{42:4.2e1}')
self.assertEqual(json.loads(on), {'42': 42.0})
on = js_to_json('{ "0x40": "0x40" }')
self.assertEqual(json.loads(on), {'0x40': '0x40'})
on = js_to_json('{ "040": "040" }')
self.assertEqual(json.loads(on), {'040': '040'})
def test_js_to_json_malformed(self):
self.assertEqual(js_to_json('42a1'), '42"a1"')
self.assertEqual(js_to_json('42a-1'), '42"a"-1')
def test_extract_attributes(self):
self.assertEqual(extract_attributes('<e x="y">'), {'x': 'y'})
self.assertEqual(extract_attributes("<e x='y'>"), {'x': 'y'})
self.assertEqual(extract_attributes('<e x=y>'), {'x': 'y'})
self.assertEqual(extract_attributes('<e x="a \'b\' c">'), {'x': "a 'b' c"})
self.assertEqual(extract_attributes('<e x=\'a "b" c\'>'), {'x': 'a "b" c'})
self.assertEqual(extract_attributes('<e x="y">'), {'x': 'y'})
self.assertEqual(extract_attributes('<e x="y">'), {'x': 'y'})
self.assertEqual(extract_attributes('<e x="&">'), {'x': '&'}) # XML
self.assertEqual(extract_attributes('<e x=""">'), {'x': '"'})
self.assertEqual(extract_attributes('<e x="£">'), {'x': '£'}) # HTML 3.2
self.assertEqual(extract_attributes('<e x="λ">'), {'x': 'λ'}) # HTML 4.0
self.assertEqual(extract_attributes('<e x="&foo">'), {'x': '&foo'})
self.assertEqual(extract_attributes('<e x="\'">'), {'x': "'"})
self.assertEqual(extract_attributes('<e x=\'"\'>'), {'x': '"'})
self.assertEqual(extract_attributes('<e x >'), {'x': None})
self.assertEqual(extract_attributes('<e x=y a>'), {'x': 'y', 'a': None})
self.assertEqual(extract_attributes('<e x= y>'), {'x': 'y'})
self.assertEqual(extract_attributes('<e x=1 y=2 x=3>'), {'y': '2', 'x': '3'})
self.assertEqual(extract_attributes('<e \nx=\ny\n>'), {'x': 'y'})
self.assertEqual(extract_attributes('<e \nx=\n"y"\n>'), {'x': 'y'})
self.assertEqual(extract_attributes("<e \nx=\n'y'\n>"), {'x': 'y'})
self.assertEqual(extract_attributes('<e \nx="\ny\n">'), {'x': '\ny\n'})
self.assertEqual(extract_attributes('<e CAPS=x>'), {'caps': 'x'}) # Names lowercased
self.assertEqual(extract_attributes('<e x=1 X=2>'), {'x': '2'})
self.assertEqual(extract_attributes('<e X=1 x=2>'), {'x': '2'})
self.assertEqual(extract_attributes('<e _:funny-name1=1>'), {'_:funny-name1': '1'})
self.assertEqual(extract_attributes('<e x="Fáilte 世界 \U0001f600">'), {'x': 'Fáilte 世界 \U0001f600'})
self.assertEqual(extract_attributes('<e x="décomposé">'), {'x': 'décompose\u0301'})
# "Narrow" Python builds don't support unicode code points outside BMP.
try:
compat_chr(0x10000)
supports_outside_bmp = True
except ValueError:
supports_outside_bmp = False
if supports_outside_bmp:
self.assertEqual(extract_attributes('<e x="Smile 😀!">'), {'x': 'Smile \U0001f600!'})
# Malformed HTML should not break attributes extraction on older Python
self.assertEqual(extract_attributes('<mal"formed/>'), {})
def test_clean_html(self):
self.assertEqual(clean_html('a:\nb'), 'a: b')
self.assertEqual(clean_html('a:\n "b"'), 'a: "b"')
self.assertEqual(clean_html('a<br>\xa0b'), 'a\nb')
def test_intlist_to_bytes(self):
self.assertEqual(
intlist_to_bytes([0, 1, 127, 128, 255]),
b'\x00\x01\x7f\x80\xff')
def test_args_to_str(self):
self.assertEqual(
args_to_str(['foo', 'ba/r', '-baz', '2 be', '']),
'foo ba/r -baz \'2 be\' \'\'' if compat_os_name != 'nt' else 'foo ba/r -baz "2 be" ""'
)
def test_parse_filesize(self):
self.assertEqual(parse_filesize(None), None)
self.assertEqual(parse_filesize(''), None)
self.assertEqual(parse_filesize('91 B'), 91)
self.assertEqual(parse_filesize('foobar'), None)
self.assertEqual(parse_filesize('2 MiB'), 2097152)
self.assertEqual(parse_filesize('5 GB'), 5000000000)
self.assertEqual(parse_filesize('1.2Tb'), 1200000000000)
self.assertEqual(parse_filesize('1.2tb'), 1200000000000)
self.assertEqual(parse_filesize('1,24 KB'), 1240)
self.assertEqual(parse_filesize('1,24 kb'), 1240)
self.assertEqual(parse_filesize('8.5 megabytes'), 8500000)
def test_parse_count(self):
self.assertEqual(parse_count(None), None)
self.assertEqual(parse_count(''), None)
self.assertEqual(parse_count('0'), 0)
self.assertEqual(parse_count('1000'), 1000)
self.assertEqual(parse_count('1.000'), 1000)
self.assertEqual(parse_count('1.1k'), 1100)
self.assertEqual(parse_count('1.1kk'), 1100000)
self.assertEqual(parse_count('1.1kk '), 1100000)
self.assertEqual(parse_count('1.1kk views'), 1100000)
def test_parse_resolution(self):
self.assertEqual(parse_resolution(None), {})
self.assertEqual(parse_resolution(''), {})
self.assertEqual(parse_resolution('1920x1080'), {'width': 1920, 'height': 1080})
self.assertEqual(parse_resolution('1920×1080'), {'width': 1920, 'height': 1080})
self.assertEqual(parse_resolution('1920 x 1080'), {'width': 1920, 'height': 1080})
self.assertEqual(parse_resolution('720p'), {'height': 720})
self.assertEqual(parse_resolution('4k'), {'height': 2160})
self.assertEqual(parse_resolution('8K'), {'height': 4320})
def test_parse_bitrate(self):
self.assertEqual(parse_bitrate(None), None)
self.assertEqual(parse_bitrate(''), None)
self.assertEqual(parse_bitrate('300kbps'), 300)
self.assertEqual(parse_bitrate('1500kbps'), 1500)
self.assertEqual(parse_bitrate('300 kbps'), 300)
def test_version_tuple(self):
self.assertEqual(version_tuple('1'), (1,))
self.assertEqual(version_tuple('10.23.344'), (10, 23, 344))
self.assertEqual(version_tuple('10.1-6'), (10, 1, 6)) # avconv style
def test_detect_exe_version(self):
self.assertEqual(detect_exe_version('''ffmpeg version 1.2.1
built on May 27 2013 08:37:26 with gcc 4.7 (Debian 4.7.3-4)
configuration: --prefix=/usr --extra-'''), '1.2.1')
self.assertEqual(detect_exe_version('''ffmpeg version N-63176-g1fb4685
built on May 15 2014 22:09:06 with gcc 4.8.2 (GCC)'''), 'N-63176-g1fb4685')
self.assertEqual(detect_exe_version('''X server found. dri2 connection failed!
Trying to open render node...
Success at /dev/dri/renderD128.
ffmpeg version 2.4.4 Copyright (c) 2000-2014 the FFmpeg ...'''), '2.4.4')
def test_age_restricted(self):
self.assertFalse(age_restricted(None, 10)) # unrestricted content
self.assertFalse(age_restricted(1, None)) # unrestricted policy
self.assertFalse(age_restricted(8, 10))
self.assertTrue(age_restricted(18, 14))
self.assertFalse(age_restricted(18, 18))
def test_is_html(self):
self.assertFalse(is_html(b'\x49\x44\x43<html'))
self.assertTrue(is_html(b'<!DOCTYPE foo>\xaaa'))
self.assertTrue(is_html( # UTF-8 with BOM
b'\xef\xbb\xbf<!DOCTYPE foo>\xaaa'))
self.assertTrue(is_html( # UTF-16-LE
b'\xff\xfe<\x00h\x00t\x00m\x00l\x00>\x00\xe4\x00'
))
self.assertTrue(is_html( # UTF-16-BE
b'\xfe\xff\x00<\x00h\x00t\x00m\x00l\x00>\x00\xe4'
))
self.assertTrue(is_html( # UTF-32-BE
b'\x00\x00\xFE\xFF\x00\x00\x00<\x00\x00\x00h\x00\x00\x00t\x00\x00\x00m\x00\x00\x00l\x00\x00\x00>\x00\x00\x00\xe4'))
self.assertTrue(is_html( # UTF-32-LE
b'\xFF\xFE\x00\x00<\x00\x00\x00h\x00\x00\x00t\x00\x00\x00m\x00\x00\x00l\x00\x00\x00>\x00\x00\x00\xe4\x00\x00\x00'))
def test_render_table(self):
self.assertEqual(
render_table(
['a', 'bcd'],
[[123, 4], [9999, 51]]),
'a bcd\n'
'123 4\n'
'9999 51')
def test_match_str(self):
self.assertRaises(ValueError, match_str, 'xy>foobar', {})
self.assertFalse(match_str('xy', {'x': 1200}))
self.assertTrue(match_str('!xy', {'x': 1200}))
self.assertTrue(match_str('x', {'x': 1200}))
self.assertFalse(match_str('!x', {'x': 1200}))
self.assertTrue(match_str('x', {'x': 0}))
self.assertFalse(match_str('x>0', {'x': 0}))
self.assertFalse(match_str('x>0', {}))
self.assertTrue(match_str('x>?0', {}))
self.assertTrue(match_str('x>1K', {'x': 1200}))
self.assertFalse(match_str('x>2K', {'x': 1200}))
self.assertTrue(match_str('x>=1200 & x < 1300', {'x': 1200}))
self.assertFalse(match_str('x>=1100 & x < 1200', {'x': 1200}))
self.assertFalse(match_str('y=a212', {'y': 'foobar42'}))
self.assertTrue(match_str('y=foobar42', {'y': 'foobar42'}))
self.assertFalse(match_str('y!=foobar42', {'y': 'foobar42'}))
self.assertTrue(match_str('y!=foobar2', {'y': 'foobar42'}))
self.assertFalse(match_str(
'like_count > 100 & dislike_count <? 50 & description',
{'like_count': 90, 'description': 'foo'}))
self.assertTrue(match_str(
'like_count > 100 & dislike_count <? 50 & description',
{'like_count': 190, 'description': 'foo'}))
self.assertFalse(match_str(
'like_count > 100 & dislike_count <? 50 & description',
{'like_count': 190, 'dislike_count': 60, 'description': 'foo'}))
self.assertFalse(match_str(
'like_count > 100 & dislike_count <? 50 & description',
{'like_count': 190, 'dislike_count': 10}))
self.assertTrue(match_str('is_live', {'is_live': True}))
self.assertFalse(match_str('is_live', {'is_live': False}))
self.assertFalse(match_str('is_live', {'is_live': None}))
self.assertFalse(match_str('is_live', {}))
self.assertFalse(match_str('!is_live', {'is_live': True}))
self.assertTrue(match_str('!is_live', {'is_live': False}))
self.assertTrue(match_str('!is_live', {'is_live': None}))
self.assertTrue(match_str('!is_live', {}))
self.assertTrue(match_str('title', {'title': 'abc'}))
self.assertTrue(match_str('title', {'title': ''}))
self.assertFalse(match_str('!title', {'title': 'abc'}))
self.assertFalse(match_str('!title', {'title': ''}))
def test_parse_dfxp_time_expr(self):
self.assertEqual(parse_dfxp_time_expr(None), None)
self.assertEqual(parse_dfxp_time_expr(''), None)
self.assertEqual(parse_dfxp_time_expr('0.1'), 0.1)
self.assertEqual(parse_dfxp_time_expr('0.1s'), 0.1)
self.assertEqual(parse_dfxp_time_expr('00:00:01'), 1.0)
self.assertEqual(parse_dfxp_time_expr('00:00:01.100'), 1.1)
self.assertEqual(parse_dfxp_time_expr('00:00:01:100'), 1.1)
def test_dfxp2srt(self):
dfxp_data = '''<?xml version="1.0" encoding="UTF-8"?>
<tt xmlns="http://www.w3.org/ns/ttml" xml:lang="en" xmlns:tts="http://www.w3.org/ns/ttml#parameter">
<body>
<div xml:lang="en">
<p begin="0" end="1">The following line contains Chinese characters and special symbols</p>
<p begin="1" end="2">第二行<br/>♪♪</p>
<p begin="2" dur="1"><span>Third<br/>Line</span></p>
<p begin="3" end="-1">Lines with invalid timestamps are ignored</p>
<p begin="-1" end="-1">Ignore, two</p>
<p begin="3" dur="-1">Ignored, three</p>
</div>
</body>
</tt>'''.encode('utf-8')
srt_data = '''1
00:00:00,000 --> 00:00:01,000
The following line contains Chinese characters and special symbols
2
00:00:01,000 --> 00:00:02,000
第二行
♪♪
3
00:00:02,000 --> 00:00:03,000
Third
Line
'''
self.assertEqual(dfxp2srt(dfxp_data), srt_data)
dfxp_data_no_default_namespace = '''<?xml version="1.0" encoding="UTF-8"?>
<tt xml:lang="en" xmlns:tts="http://www.w3.org/ns/ttml#parameter">
<body>
<div xml:lang="en">
<p begin="0" end="1">The first line</p>
</div>
</body>
</tt>'''.encode('utf-8')
srt_data = '''1
00:00:00,000 --> 00:00:01,000
The first line
'''
self.assertEqual(dfxp2srt(dfxp_data_no_default_namespace), srt_data)
dfxp_data_with_style = '''<?xml version="1.0" encoding="utf-8"?>
<tt xmlns="http://www.w3.org/2006/10/ttaf1" xmlns:ttp="http://www.w3.org/2006/10/ttaf1#parameter" ttp:timeBase="media" xmlns:tts="http://www.w3.org/2006/10/ttaf1#style" xml:lang="en" xmlns:ttm="http://www.w3.org/2006/10/ttaf1#metadata">
<head>
<styling>
<style id="s2" style="s0" tts:color="cyan" tts:fontWeight="bold" />
<style id="s1" style="s0" tts:color="yellow" tts:fontStyle="italic" />
<style id="s3" style="s0" tts:color="lime" tts:textDecoration="underline" />
<style id="s0" tts:backgroundColor="black" tts:fontStyle="normal" tts:fontSize="16" tts:fontFamily="sansSerif" tts:color="white" />
</styling>
</head>
<body tts:textAlign="center" style="s0">
<div>
<p begin="00:00:02.08" id="p0" end="00:00:05.84">default style<span tts:color="red">custom style</span></p>
<p style="s2" begin="00:00:02.08" id="p0" end="00:00:05.84"><span tts:color="lime">part 1<br /></span><span tts:color="cyan">part 2</span></p>
<p style="s3" begin="00:00:05.84" id="p1" end="00:00:09.56">line 3<br />part 3</p>
<p style="s1" tts:textDecoration="underline" begin="00:00:09.56" id="p2" end="00:00:12.36"><span style="s2" tts:color="lime">inner<br /> </span>style</p>
</div>
</body>
</tt>'''.encode('utf-8')
srt_data = '''1
00:00:02,080 --> 00:00:05,839
<font color="white" face="sansSerif" size="16">default style<font color="red">custom style</font></font>
2
00:00:02,080 --> 00:00:05,839
<b><font color="cyan" face="sansSerif" size="16"><font color="lime">part 1
</font>part 2</font></b>
3
00:00:05,839 --> 00:00:09,560
<u><font color="lime">line 3
part 3</font></u>
4
00:00:09,560 --> 00:00:12,359
<i><u><font color="yellow"><font color="lime">inner
</font>style</font></u></i>
'''
self.assertEqual(dfxp2srt(dfxp_data_with_style), srt_data)
dfxp_data_non_utf8 = '''<?xml version="1.0" encoding="UTF-16"?>
<tt xmlns="http://www.w3.org/ns/ttml" xml:lang="en" xmlns:tts="http://www.w3.org/ns/ttml#parameter">
<body>
<div xml:lang="en">
<p begin="0" end="1">Line 1</p>
<p begin="1" end="2">第二行</p>
</div>
</body>
</tt>'''.encode('utf-16')
srt_data = '''1
00:00:00,000 --> 00:00:01,000
Line 1
2
00:00:01,000 --> 00:00:02,000
第二行
'''
self.assertEqual(dfxp2srt(dfxp_data_non_utf8), srt_data)
def test_cli_option(self):
self.assertEqual(cli_option({'proxy': '127.0.0.1:3128'}, '--proxy', 'proxy'), ['--proxy', '127.0.0.1:3128'])
self.assertEqual(cli_option({'proxy': None}, '--proxy', 'proxy'), [])
self.assertEqual(cli_option({}, '--proxy', 'proxy'), [])
self.assertEqual(cli_option({'retries': 10}, '--retries', 'retries'), ['--retries', '10'])
def test_cli_valueless_option(self):
self.assertEqual(cli_valueless_option(
{'downloader': 'external'}, '--external-downloader', 'downloader', 'external'), ['--external-downloader'])
self.assertEqual(cli_valueless_option(
{'downloader': 'internal'}, '--external-downloader', 'downloader', 'external'), [])
self.assertEqual(cli_valueless_option(
{'nocheckcertificate': True}, '--no-check-certificate', 'nocheckcertificate'), ['--no-check-certificate'])
self.assertEqual(cli_valueless_option(
{'nocheckcertificate': False}, '--no-check-certificate', 'nocheckcertificate'), [])
self.assertEqual(cli_valueless_option(
{'checkcertificate': True}, '--no-check-certificate', 'checkcertificate', False), [])
self.assertEqual(cli_valueless_option(
{'checkcertificate': False}, '--no-check-certificate', 'checkcertificate', False), ['--no-check-certificate'])
def test_cli_bool_option(self):
self.assertEqual(
cli_bool_option(
{'nocheckcertificate': True}, '--no-check-certificate', 'nocheckcertificate'),
['--no-check-certificate', 'true'])
self.assertEqual(
cli_bool_option(
{'nocheckcertificate': True}, '--no-check-certificate', 'nocheckcertificate', separator='='),
['--no-check-certificate=true'])
self.assertEqual(
cli_bool_option(
{'nocheckcertificate': True}, '--check-certificate', 'nocheckcertificate', 'false', 'true'),
['--check-certificate', 'false'])
self.assertEqual(
cli_bool_option(
{'nocheckcertificate': True}, '--check-certificate', 'nocheckcertificate', 'false', 'true', '='),
['--check-certificate=false'])
self.assertEqual(
cli_bool_option(
{'nocheckcertificate': False}, '--check-certificate', 'nocheckcertificate', 'false', 'true'),
['--check-certificate', 'true'])
self.assertEqual(
cli_bool_option(
{'nocheckcertificate': False}, '--check-certificate', 'nocheckcertificate', 'false', 'true', '='),
['--check-certificate=true'])
self.assertEqual(
cli_bool_option(
{}, '--check-certificate', 'nocheckcertificate', 'false', 'true', '='),
[])
def test_ohdave_rsa_encrypt(self):
N = 0xab86b6371b5318aaa1d3c9e612a9f1264f372323c8c0f19875b5fc3b3fd3afcc1e5bec527aa94bfa85bffc157e4245aebda05389a5357b75115ac94f074aefcd
e = 65537
self.assertEqual(
ohdave_rsa_encrypt(b'aa111222', e, N),
'726664bd9a23fd0c70f9f1b84aab5e3905ce1e45a584e9cbcf9bcc7510338fc1986d6c599ff990d923aa43c51c0d9013cd572e13bc58f4ae48f2ed8c0b0ba881')
def test_pkcs1pad(self):
data = [1, 2, 3]
padded_data = pkcs1pad(data, 32)
self.assertEqual(padded_data[:2], [0, 2])
self.assertEqual(padded_data[28:], [0, 1, 2, 3])
self.assertRaises(ValueError, pkcs1pad, data, 8)
def test_encode_base_n(self):
self.assertEqual(encode_base_n(0, 30), '0')
self.assertEqual(encode_base_n(80, 30), '2k')
custom_table = '9876543210ZYXWVUTSRQPONMLKJIHGFEDCBA'
self.assertEqual(encode_base_n(0, 30, custom_table), '9')
self.assertEqual(encode_base_n(80, 30, custom_table), '7P')
self.assertRaises(ValueError, encode_base_n, 0, 70)
self.assertRaises(ValueError, encode_base_n, 0, 60, custom_table)
def test_caesar(self):
self.assertEqual(caesar('ace', 'abcdef', 2), 'cea')
self.assertEqual(caesar('cea', 'abcdef', -2), 'ace')
self.assertEqual(caesar('ace', 'abcdef', -2), 'eac')
self.assertEqual(caesar('eac', 'abcdef', 2), 'ace')
self.assertEqual(caesar('ace', 'abcdef', 0), 'ace')
self.assertEqual(caesar('xyz', 'abcdef', 2), 'xyz')
self.assertEqual(caesar('abc', 'acegik', 2), 'ebg')
self.assertEqual(caesar('ebg', 'acegik', -2), 'abc')
def test_rot47(self):
self.assertEqual(rot47('youtube-dlc'), r'J@FEF36\5=4')
self.assertEqual(rot47('YOUTUBE-DLC'), r'*~&%&qt\s{r')
def test_urshift(self):
self.assertEqual(urshift(3, 1), 1)
self.assertEqual(urshift(-3, 1), 2147483646)
def test_get_element_by_class(self):
html = '''
<span class="foo bar">nice</span>
'''
self.assertEqual(get_element_by_class('foo', html), 'nice')
self.assertEqual(get_element_by_class('no-such-class', html), None)
def test_get_element_by_attribute(self):
html = '''
<span class="foo bar">nice</span>
'''
self.assertEqual(get_element_by_attribute('class', 'foo bar', html), 'nice')
self.assertEqual(get_element_by_attribute('class', 'foo', html), None)
self.assertEqual(get_element_by_attribute('class', 'no-such-foo', html), None)
html = '''
<div itemprop="author" itemscope>foo</div>
'''
self.assertEqual(get_element_by_attribute('itemprop', 'author', html), 'foo')
def test_get_elements_by_class(self):
html = '''
<span class="foo bar">nice</span><span class="foo bar">also nice</span>
'''
self.assertEqual(get_elements_by_class('foo', html), ['nice', 'also nice'])
self.assertEqual(get_elements_by_class('no-such-class', html), [])
def test_get_elements_by_attribute(self):
html = '''
<span class="foo bar">nice</span><span class="foo bar">also nice</span>
'''
self.assertEqual(get_elements_by_attribute('class', 'foo bar', html), ['nice', 'also nice'])
self.assertEqual(get_elements_by_attribute('class', 'foo', html), [])
self.assertEqual(get_elements_by_attribute('class', 'no-such-foo', html), [])
def test_clean_podcast_url(self):
self.assertEqual(clean_podcast_url('https://www.podtrac.com/pts/redirect.mp3/chtbl.com/track/5899E/traffic.megaphone.fm/HSW7835899191.mp3'), 'https://traffic.megaphone.fm/HSW7835899191.mp3')
self.assertEqual(clean_podcast_url('https://play.podtrac.com/npr-344098539/edge1.pod.npr.org/anon.npr-podcasts/podcast/npr/waitwait/2020/10/20201003_waitwait_wwdtmpodcast201003-015621a5-f035-4eca-a9a1-7c118d90bc3c.mp3'), 'https://edge1.pod.npr.org/anon.npr-podcasts/podcast/npr/waitwait/2020/10/20201003_waitwait_wwdtmpodcast201003-015621a5-f035-4eca-a9a1-7c118d90bc3c.mp3')
if __name__ == '__main__':
unittest.main()
| 46.451047 | 382 | 0.60514 |
from __future__ import unicode_literals
import os
import sys
import unittest
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
import io
import json
import xml.etree.ElementTree
from youtube_dlc.utils import (
age_restricted,
args_to_str,
encode_base_n,
caesar,
clean_html,
clean_podcast_url,
date_from_str,
DateRange,
detect_exe_version,
determine_ext,
dict_get,
encode_compat_str,
encodeFilename,
escape_rfc3986,
escape_url,
extract_attributes,
ExtractorError,
find_xpath_attr,
fix_xml_ampersands,
float_or_none,
get_element_by_class,
get_element_by_attribute,
get_elements_by_class,
get_elements_by_attribute,
InAdvancePagedList,
int_or_none,
intlist_to_bytes,
is_html,
js_to_json,
limit_length,
merge_dicts,
mimetype2ext,
month_by_name,
multipart_encode,
ohdave_rsa_encrypt,
OnDemandPagedList,
orderedSet,
parse_age_limit,
parse_duration,
parse_filesize,
parse_count,
parse_iso8601,
parse_resolution,
parse_bitrate,
pkcs1pad,
read_batch_urls,
sanitize_filename,
sanitize_path,
sanitize_url,
expand_path,
prepend_extension,
replace_extension,
remove_start,
remove_end,
remove_quotes,
rot47,
shell_quote,
smuggle_url,
str_to_int,
strip_jsonp,
strip_or_none,
subtitles_filename,
timeconvert,
unescapeHTML,
unified_strdate,
unified_timestamp,
unsmuggle_url,
uppercase_escape,
lowercase_escape,
url_basename,
url_or_none,
base_url,
urljoin,
urlencode_postdata,
urshift,
update_url_query,
version_tuple,
xpath_with_ns,
xpath_element,
xpath_text,
xpath_attr,
render_table,
match_str,
parse_dfxp_time_expr,
dfxp2srt,
cli_option,
cli_valueless_option,
cli_bool_option,
parse_codecs,
)
from youtube_dlc.compat import (
compat_chr,
compat_etree_fromstring,
compat_getenv,
compat_os_name,
compat_setenv,
compat_urlparse,
compat_parse_qs,
)
class TestUtil(unittest.TestCase):
def test_timeconvert(self):
self.assertTrue(timeconvert('') is None)
self.assertTrue(timeconvert('bougrg') is None)
def test_sanitize_filename(self):
self.assertEqual(sanitize_filename('abc'), 'abc')
self.assertEqual(sanitize_filename('abc_d-e'), 'abc_d-e')
self.assertEqual(sanitize_filename('123'), '123')
self.assertEqual('abc_de', sanitize_filename('abc/de'))
self.assertFalse('/' in sanitize_filename('abc/de///'))
self.assertEqual('abc_de', sanitize_filename('abc/<>\\*|de'))
self.assertEqual('xxx', sanitize_filename('xxx/<>\\*|'))
self.assertEqual('yes no', sanitize_filename('yes? no'))
self.assertEqual('this - that', sanitize_filename('this: that'))
self.assertEqual(sanitize_filename('AT&T'), 'AT&T')
aumlaut = 'ä'
self.assertEqual(sanitize_filename(aumlaut), aumlaut)
tests = '\u043a\u0438\u0440\u0438\u043b\u043b\u0438\u0446\u0430'
self.assertEqual(sanitize_filename(tests), tests)
self.assertEqual(
sanitize_filename('New World record at 0:12:34'),
'New World record at 0_12_34')
self.assertEqual(sanitize_filename('--gasdgf'), '_-gasdgf')
self.assertEqual(sanitize_filename('--gasdgf', is_id=True), '--gasdgf')
self.assertEqual(sanitize_filename('.gasdgf'), 'gasdgf')
self.assertEqual(sanitize_filename('.gasdgf', is_id=True), '.gasdgf')
forbidden = '"\0\\/'
for fc in forbidden:
for fbc in forbidden:
self.assertTrue(fbc not in sanitize_filename(fc))
def test_sanitize_filename_restricted(self):
self.assertEqual(sanitize_filename('abc', restricted=True), 'abc')
self.assertEqual(sanitize_filename('abc_d-e', restricted=True), 'abc_d-e')
self.assertEqual(sanitize_filename('123', restricted=True), '123')
self.assertEqual('abc_de', sanitize_filename('abc/de', restricted=True))
self.assertFalse('/' in sanitize_filename('abc/de///', restricted=True))
self.assertEqual('abc_de', sanitize_filename('abc/<>\\*|de', restricted=True))
self.assertEqual('xxx', sanitize_filename('xxx/<>\\*|', restricted=True))
self.assertEqual('yes_no', sanitize_filename('yes? no', restricted=True))
self.assertEqual('this_-_that', sanitize_filename('this: that', restricted=True))
tests = 'aäb\u4e2d\u56fd\u7684c'
self.assertEqual(sanitize_filename(tests, restricted=True), 'aab_c')
self.assertTrue(sanitize_filename('\xf6', restricted=True) != '') # No empty filename
forbidden = '"\0\\/&!: \'\t\n()[]{}$;`^,
for fc in forbidden:
for fbc in forbidden:
self.assertTrue(fbc not in sanitize_filename(fc, restricted=True))
# Handle a common case more neatly
self.assertEqual(sanitize_filename('\u5927\u58f0\u5e26 - Song', restricted=True), 'Song')
self.assertEqual(sanitize_filename('\u603b\u7edf: Speech', restricted=True), 'Speech')
# .. but make sure the file name is never empty
self.assertTrue(sanitize_filename('-', restricted=True) != '')
self.assertTrue(sanitize_filename(':', restricted=True) != '')
self.assertEqual(sanitize_filename(
'ÂÃÄÀÁÅÆÇÈÉÊËÌÍÎÏÐÑÒÓÔÕÖŐØŒÙÚÛÜŰÝÞßàáâãäåæçèéêëìíîïðñòóôõöőøœùúûüűýþÿ', restricted=True),
'AAAAAAAECEEEEIIIIDNOOOOOOOOEUUUUUYTHssaaaaaaaeceeeeiiiionooooooooeuuuuuythy')
def test_sanitize_ids(self):
self.assertEqual(sanitize_filename('_n_cd26wFpw', is_id=True), '_n_cd26wFpw')
self.assertEqual(sanitize_filename('_BD_eEpuzXw', is_id=True), '_BD_eEpuzXw')
self.assertEqual(sanitize_filename('N0Y__7-UOdI', is_id=True), 'N0Y__7-UOdI')
def test_sanitize_path(self):
if sys.platform != 'win32':
return
self.assertEqual(sanitize_path('abc'), 'abc')
self.assertEqual(sanitize_path('abc/def'), 'abc\\def')
self.assertEqual(sanitize_path('abc\\def'), 'abc\\def')
self.assertEqual(sanitize_path('abc|def'), 'abc
self.assertEqual(sanitize_path('<>:"|?*'), '#######')
self.assertEqual(sanitize_path('C:/abc/def'), 'C:\\abc\\def')
self.assertEqual(sanitize_path('C?:/abc/def'), 'C##\\abc\\def')
self.assertEqual(sanitize_path('\\\\?\\UNC\\ComputerName\\abc'), '\\\\?\\UNC\\ComputerName\\abc')
self.assertEqual(sanitize_path('\\\\?\\UNC/ComputerName/abc'), '\\\\?\\UNC\\ComputerName\\abc')
self.assertEqual(sanitize_path('\\\\?\\C:\\abc'), '\\\\?\\C:\\abc')
self.assertEqual(sanitize_path('\\\\?\\C:/abc'), '\\\\?\\C:\\abc')
self.assertEqual(sanitize_path('\\\\?\\C:\\ab?c\\de:f'), '\\\\?\\C:\\ab#c\\de#f')
self.assertEqual(sanitize_path('\\\\?\\C:\\abc'), '\\\\?\\C:\\abc')
self.assertEqual(
sanitize_path('youtube/%(uploader)s/%(autonumber)s-%(title)s-%(upload_date)s.%(ext)s'),
'youtube\\%(uploader)s\\%(autonumber)s-%(title)s-%(upload_date)s.%(ext)s')
self.assertEqual(
sanitize_path('youtube/TheWreckingYard ./00001-Not bad, Especially for Free! (1987 Yamaha 700)-20141116.mp4.part'),
'youtube\\TheWreckingYard #\\00001-Not bad, Especially for Free! (1987 Yamaha 700)-20141116.mp4.part')
self.assertEqual(sanitize_path('abc/def...'), 'abc\\def..#')
self.assertEqual(sanitize_path('abc.../def'), 'abc..#\\def')
self.assertEqual(sanitize_path('abc.../def...'), 'abc..#\\def..#')
self.assertEqual(sanitize_path('../abc'), '..\\abc')
self.assertEqual(sanitize_path('../../abc'), '..\\..\\abc')
self.assertEqual(sanitize_path('./abc'), 'abc')
self.assertEqual(sanitize_path('./../abc'), '..\\abc')
def test_sanitize_url(self):
self.assertEqual(sanitize_url('//foo.bar'), 'http://foo.bar')
self.assertEqual(sanitize_url('httpss://foo.bar'), 'https://foo.bar')
self.assertEqual(sanitize_url('rmtps://foo.bar'), 'rtmps://foo.bar')
self.assertEqual(sanitize_url('https://foo.bar'), 'https://foo.bar')
def test_expand_path(self):
def env(var):
return '%{0}%'.format(var) if sys.platform == 'win32' else '${0}'.format(var)
compat_setenv('youtube_dlc_EXPATH_PATH', 'expanded')
self.assertEqual(expand_path(env('youtube_dlc_EXPATH_PATH')), 'expanded')
self.assertEqual(expand_path(env('HOME')), compat_getenv('HOME'))
self.assertEqual(expand_path('~'), compat_getenv('HOME'))
self.assertEqual(
expand_path('~/%s' % env('youtube_dlc_EXPATH_PATH')),
'%s/expanded' % compat_getenv('HOME'))
def test_prepend_extension(self):
self.assertEqual(prepend_extension('abc.ext', 'temp'), 'abc.temp.ext')
self.assertEqual(prepend_extension('abc.ext', 'temp', 'ext'), 'abc.temp.ext')
self.assertEqual(prepend_extension('abc.unexpected_ext', 'temp', 'ext'), 'abc.unexpected_ext.temp')
self.assertEqual(prepend_extension('abc', 'temp'), 'abc.temp')
self.assertEqual(prepend_extension('.abc', 'temp'), '.abc.temp')
self.assertEqual(prepend_extension('.abc.ext', 'temp'), '.abc.temp.ext')
def test_replace_extension(self):
self.assertEqual(replace_extension('abc.ext', 'temp'), 'abc.temp')
self.assertEqual(replace_extension('abc.ext', 'temp', 'ext'), 'abc.temp')
self.assertEqual(replace_extension('abc.unexpected_ext', 'temp', 'ext'), 'abc.unexpected_ext.temp')
self.assertEqual(replace_extension('abc', 'temp'), 'abc.temp')
self.assertEqual(replace_extension('.abc', 'temp'), '.abc.temp')
self.assertEqual(replace_extension('.abc.ext', 'temp'), '.abc.temp')
def test_subtitles_filename(self):
self.assertEqual(subtitles_filename('abc.ext', 'en', 'vtt'), 'abc.en.vtt')
self.assertEqual(subtitles_filename('abc.ext', 'en', 'vtt', 'ext'), 'abc.en.vtt')
self.assertEqual(subtitles_filename('abc.unexpected_ext', 'en', 'vtt', 'ext'), 'abc.unexpected_ext.en.vtt')
def test_remove_start(self):
self.assertEqual(remove_start(None, 'A - '), None)
self.assertEqual(remove_start('A - B', 'A - '), 'B')
self.assertEqual(remove_start('B - A', 'A - '), 'B - A')
def test_remove_end(self):
self.assertEqual(remove_end(None, ' - B'), None)
self.assertEqual(remove_end('A - B', ' - B'), 'A')
self.assertEqual(remove_end('B - A', ' - B'), 'B - A')
def test_remove_quotes(self):
self.assertEqual(remove_quotes(None), None)
self.assertEqual(remove_quotes('"'), '"')
self.assertEqual(remove_quotes("'"), "'")
self.assertEqual(remove_quotes(';'), ';')
self.assertEqual(remove_quotes('";'), '";')
self.assertEqual(remove_quotes('""'), '')
self.assertEqual(remove_quotes('";"'), ';')
def test_ordered_set(self):
self.assertEqual(orderedSet([1, 1, 2, 3, 4, 4, 5, 6, 7, 3, 5]), [1, 2, 3, 4, 5, 6, 7])
self.assertEqual(orderedSet([]), [])
self.assertEqual(orderedSet([1]), [1])
# keep the list ordered
self.assertEqual(orderedSet([135, 1, 1, 1]), [135, 1])
def test_unescape_html(self):
self.assertEqual(unescapeHTML('%20;'), '%20;')
self.assertEqual(unescapeHTML('/'), '/')
self.assertEqual(unescapeHTML('/'), '/')
self.assertEqual(unescapeHTML('é'), 'é')
self.assertEqual(unescapeHTML('�'), '�')
self.assertEqual(unescapeHTML('&a"'), '&a"')
# HTML5 entities
self.assertEqual(unescapeHTML('.''), '.\'')
def test_date_from_str(self):
self.assertEqual(date_from_str('yesterday'), date_from_str('now-1day'))
self.assertEqual(date_from_str('now+7day'), date_from_str('now+1week'))
self.assertEqual(date_from_str('now+14day'), date_from_str('now+2week'))
self.assertEqual(date_from_str('now+365day'), date_from_str('now+1year'))
self.assertEqual(date_from_str('now+30day'), date_from_str('now+1month'))
def test_daterange(self):
_20century = DateRange("19000101", "20000101")
self.assertFalse("17890714" in _20century)
_ac = DateRange("00010101")
self.assertTrue("19690721" in _ac)
_firstmilenium = DateRange(end="10000101")
self.assertTrue("07110427" in _firstmilenium)
def test_unified_dates(self):
self.assertEqual(unified_strdate('December 21, 2010'), '20101221')
self.assertEqual(unified_strdate('8/7/2009'), '20090708')
self.assertEqual(unified_strdate('Dec 14, 2012'), '20121214')
self.assertEqual(unified_strdate('2012/10/11 01:56:38 +0000'), '20121011')
self.assertEqual(unified_strdate('1968 12 10'), '19681210')
self.assertEqual(unified_strdate('1968-12-10'), '19681210')
self.assertEqual(unified_strdate('28/01/2014 21:00:00 +0100'), '20140128')
self.assertEqual(
unified_strdate('11/26/2014 11:30:00 AM PST', day_first=False),
'20141126')
self.assertEqual(
unified_strdate('2/2/2015 6:47:40 PM', day_first=False),
'20150202')
self.assertEqual(unified_strdate('Feb 14th 2016 5:45PM'), '20160214')
self.assertEqual(unified_strdate('25-09-2014'), '20140925')
self.assertEqual(unified_strdate('27.02.2016 17:30'), '20160227')
self.assertEqual(unified_strdate('UNKNOWN DATE FORMAT'), None)
self.assertEqual(unified_strdate('Feb 7, 2016 at 6:35 pm'), '20160207')
self.assertEqual(unified_strdate('July 15th, 2013'), '20130715')
self.assertEqual(unified_strdate('September 1st, 2013'), '20130901')
self.assertEqual(unified_strdate('Sep 2nd, 2013'), '20130902')
self.assertEqual(unified_strdate('November 3rd, 2019'), '20191103')
self.assertEqual(unified_strdate('October 23rd, 2005'), '20051023')
def test_unified_timestamps(self):
self.assertEqual(unified_timestamp('December 21, 2010'), 1292889600)
self.assertEqual(unified_timestamp('8/7/2009'), 1247011200)
self.assertEqual(unified_timestamp('Dec 14, 2012'), 1355443200)
self.assertEqual(unified_timestamp('2012/10/11 01:56:38 +0000'), 1349920598)
self.assertEqual(unified_timestamp('1968 12 10'), -33436800)
self.assertEqual(unified_timestamp('1968-12-10'), -33436800)
self.assertEqual(unified_timestamp('28/01/2014 21:00:00 +0100'), 1390939200)
self.assertEqual(
unified_timestamp('11/26/2014 11:30:00 AM PST', day_first=False),
1417001400)
self.assertEqual(
unified_timestamp('2/2/2015 6:47:40 PM', day_first=False),
1422902860)
self.assertEqual(unified_timestamp('Feb 14th 2016 5:45PM'), 1455471900)
self.assertEqual(unified_timestamp('25-09-2014'), 1411603200)
self.assertEqual(unified_timestamp('27.02.2016 17:30'), 1456594200)
self.assertEqual(unified_timestamp('UNKNOWN DATE FORMAT'), None)
self.assertEqual(unified_timestamp('May 16, 2016 11:15 PM'), 1463440500)
self.assertEqual(unified_timestamp('Feb 7, 2016 at 6:35 pm'), 1454870100)
self.assertEqual(unified_timestamp('2017-03-30T17:52:41Q'), 1490896361)
self.assertEqual(unified_timestamp('Sep 11, 2013 | 5:49 AM'), 1378878540)
self.assertEqual(unified_timestamp('December 15, 2017 at 7:49 am'), 1513324140)
self.assertEqual(unified_timestamp('2018-03-14T08:32:43.1493874+00:00'), 1521016363)
def test_determine_ext(self):
self.assertEqual(determine_ext('http://example.com/foo/bar.mp4/?download'), 'mp4')
self.assertEqual(determine_ext('http://example.com/foo/bar/?download', None), None)
self.assertEqual(determine_ext('http://example.com/foo/bar.nonext/?download', None), None)
self.assertEqual(determine_ext('http://example.com/foo/bar/mp4?download', None), None)
self.assertEqual(determine_ext('http://example.com/foo/bar.m3u8//?download'), 'm3u8')
self.assertEqual(determine_ext('foobar', None), None)
def test_find_xpath_attr(self):
testxml = '''<root>
<node/>
<node x="a"/>
<node x="a" y="c" />
<node x="b" y="d" />
<node x="" />
</root>'''
doc = compat_etree_fromstring(testxml)
self.assertEqual(find_xpath_attr(doc, './/fourohfour', 'n'), None)
self.assertEqual(find_xpath_attr(doc, './/fourohfour', 'n', 'v'), None)
self.assertEqual(find_xpath_attr(doc, './/node', 'n'), None)
self.assertEqual(find_xpath_attr(doc, './/node', 'n', 'v'), None)
self.assertEqual(find_xpath_attr(doc, './/node', 'x'), doc[1])
self.assertEqual(find_xpath_attr(doc, './/node', 'x', 'a'), doc[1])
self.assertEqual(find_xpath_attr(doc, './/node', 'x', 'b'), doc[3])
self.assertEqual(find_xpath_attr(doc, './/node', 'y'), doc[2])
self.assertEqual(find_xpath_attr(doc, './/node', 'y', 'c'), doc[2])
self.assertEqual(find_xpath_attr(doc, './/node', 'y', 'd'), doc[3])
self.assertEqual(find_xpath_attr(doc, './/node', 'x', ''), doc[4])
def test_xpath_with_ns(self):
testxml = '''<root xmlns:media="http://example.com/">
<media:song>
<media:author>The Author</media:author>
<url>http://server.com/download.mp3</url>
</media:song>
</root>'''
doc = compat_etree_fromstring(testxml)
find = lambda p: doc.find(xpath_with_ns(p, {'media': 'http://example.com/'}))
self.assertTrue(find('media:song') is not None)
self.assertEqual(find('media:song/media:author').text, 'The Author')
self.assertEqual(find('media:song/url').text, 'http://server.com/download.mp3')
def test_xpath_element(self):
doc = xml.etree.ElementTree.Element('root')
div = xml.etree.ElementTree.SubElement(doc, 'div')
p = xml.etree.ElementTree.SubElement(div, 'p')
p.text = 'Foo'
self.assertEqual(xpath_element(doc, 'div/p'), p)
self.assertEqual(xpath_element(doc, ['div/p']), p)
self.assertEqual(xpath_element(doc, ['div/bar', 'div/p']), p)
self.assertEqual(xpath_element(doc, 'div/bar', default='default'), 'default')
self.assertEqual(xpath_element(doc, ['div/bar'], default='default'), 'default')
self.assertTrue(xpath_element(doc, 'div/bar') is None)
self.assertTrue(xpath_element(doc, ['div/bar']) is None)
self.assertTrue(xpath_element(doc, ['div/bar'], 'div/baz') is None)
self.assertRaises(ExtractorError, xpath_element, doc, 'div/bar', fatal=True)
self.assertRaises(ExtractorError, xpath_element, doc, ['div/bar'], fatal=True)
self.assertRaises(ExtractorError, xpath_element, doc, ['div/bar', 'div/baz'], fatal=True)
def test_xpath_text(self):
testxml = '''<root>
<div>
<p>Foo</p>
</div>
</root>'''
doc = compat_etree_fromstring(testxml)
self.assertEqual(xpath_text(doc, 'div/p'), 'Foo')
self.assertEqual(xpath_text(doc, 'div/bar', default='default'), 'default')
self.assertTrue(xpath_text(doc, 'div/bar') is None)
self.assertRaises(ExtractorError, xpath_text, doc, 'div/bar', fatal=True)
def test_xpath_attr(self):
testxml = '''<root>
<div>
<p x="a">Foo</p>
</div>
</root>'''
doc = compat_etree_fromstring(testxml)
self.assertEqual(xpath_attr(doc, 'div/p', 'x'), 'a')
self.assertEqual(xpath_attr(doc, 'div/bar', 'x'), None)
self.assertEqual(xpath_attr(doc, 'div/p', 'y'), None)
self.assertEqual(xpath_attr(doc, 'div/bar', 'x', default='default'), 'default')
self.assertEqual(xpath_attr(doc, 'div/p', 'y', default='default'), 'default')
self.assertRaises(ExtractorError, xpath_attr, doc, 'div/bar', 'x', fatal=True)
self.assertRaises(ExtractorError, xpath_attr, doc, 'div/p', 'y', fatal=True)
def test_smuggle_url(self):
data = {"ö": "ö", "abc": [3]}
url = 'https://foo.bar/baz?x=y#a'
smug_url = smuggle_url(url, data)
unsmug_url, unsmug_data = unsmuggle_url(smug_url)
self.assertEqual(url, unsmug_url)
self.assertEqual(data, unsmug_data)
res_url, res_data = unsmuggle_url(url)
self.assertEqual(res_url, url)
self.assertEqual(res_data, None)
smug_url = smuggle_url(url, {'a': 'b'})
smug_smug_url = smuggle_url(smug_url, {'c': 'd'})
res_url, res_data = unsmuggle_url(smug_smug_url)
self.assertEqual(res_url, url)
self.assertEqual(res_data, {'a': 'b', 'c': 'd'})
def test_shell_quote(self):
args = ['ffmpeg', '-i', encodeFilename('ñ€ß\'.mp4')]
self.assertEqual(
shell_quote(args),
"""ffmpeg -i 'ñ€ß'"'"'.mp4'""" if compat_os_name != 'nt' else '''ffmpeg -i "ñ€ß'.mp4"''')
def test_float_or_none(self):
self.assertEqual(float_or_none('42.42'), 42.42)
self.assertEqual(float_or_none('42'), 42.0)
self.assertEqual(float_or_none(''), None)
self.assertEqual(float_or_none(None), None)
self.assertEqual(float_or_none([]), None)
self.assertEqual(float_or_none(set()), None)
def test_int_or_none(self):
self.assertEqual(int_or_none('42'), 42)
self.assertEqual(int_or_none(''), None)
self.assertEqual(int_or_none(None), None)
self.assertEqual(int_or_none([]), None)
self.assertEqual(int_or_none(set()), None)
def test_str_to_int(self):
self.assertEqual(str_to_int('123,456'), 123456)
self.assertEqual(str_to_int('123.456'), 123456)
self.assertEqual(str_to_int(523), 523)
# Python 3 has no long
if sys.version_info < (3, 0):
eval('self.assertEqual(str_to_int(123456L), 123456)')
self.assertEqual(str_to_int('noninteger'), None)
self.assertEqual(str_to_int([]), None)
def test_url_basename(self):
self.assertEqual(url_basename('http://foo.de/'), '')
self.assertEqual(url_basename('http://foo.de/bar/baz'), 'baz')
self.assertEqual(url_basename('http://foo.de/bar/baz?x=y'), 'baz')
self.assertEqual(url_basename('http://foo.de/bar/baz
self.assertEqual(url_basename('http://foo.de/bar/baz/'), 'baz')
self.assertEqual(
url_basename('http://media.w3.org/2010/05/sintel/trailer.mp4'),
'trailer.mp4')
def test_base_url(self):
self.assertEqual(base_url('http://foo.de/'), 'http://foo.de/')
self.assertEqual(base_url('http://foo.de/bar'), 'http://foo.de/')
self.assertEqual(base_url('http://foo.de/bar/'), 'http://foo.de/bar/')
self.assertEqual(base_url('http://foo.de/bar/baz'), 'http://foo.de/bar/')
self.assertEqual(base_url('http://foo.de/bar/baz?x=z/x/c'), 'http://foo.de/bar/')
def test_urljoin(self):
self.assertEqual(urljoin('http://foo.de/', '/a/b/c.txt'), 'http://foo.de/a/b/c.txt')
self.assertEqual(urljoin(b'http://foo.de/', '/a/b/c.txt'), 'http://foo.de/a/b/c.txt')
self.assertEqual(urljoin('http://foo.de/', b'/a/b/c.txt'), 'http://foo.de/a/b/c.txt')
self.assertEqual(urljoin(b'http://foo.de/', b'/a/b/c.txt'), 'http://foo.de/a/b/c.txt')
self.assertEqual(urljoin('//foo.de/', '/a/b/c.txt'), '//foo.de/a/b/c.txt')
self.assertEqual(urljoin('http://foo.de/', 'a/b/c.txt'), 'http://foo.de/a/b/c.txt')
self.assertEqual(urljoin('http://foo.de', '/a/b/c.txt'), 'http://foo.de/a/b/c.txt')
self.assertEqual(urljoin('http://foo.de', 'a/b/c.txt'), 'http://foo.de/a/b/c.txt')
self.assertEqual(urljoin('http://foo.de/', 'http://foo.de/a/b/c.txt'), 'http://foo.de/a/b/c.txt')
self.assertEqual(urljoin('http://foo.de/', '//foo.de/a/b/c.txt'), '//foo.de/a/b/c.txt')
self.assertEqual(urljoin(None, 'http://foo.de/a/b/c.txt'), 'http://foo.de/a/b/c.txt')
self.assertEqual(urljoin(None, '//foo.de/a/b/c.txt'), '//foo.de/a/b/c.txt')
self.assertEqual(urljoin('', 'http://foo.de/a/b/c.txt'), 'http://foo.de/a/b/c.txt')
self.assertEqual(urljoin(['foobar'], 'http://foo.de/a/b/c.txt'), 'http://foo.de/a/b/c.txt')
self.assertEqual(urljoin('http://foo.de/', None), None)
self.assertEqual(urljoin('http://foo.de/', ''), None)
self.assertEqual(urljoin('http://foo.de/', ['foobar']), None)
self.assertEqual(urljoin('http://foo.de/a/b/c.txt', '.././../d.txt'), 'http://foo.de/d.txt')
self.assertEqual(urljoin('http://foo.de/a/b/c.txt', 'rtmp://foo.de'), 'rtmp://foo.de')
self.assertEqual(urljoin(None, 'rtmp://foo.de'), 'rtmp://foo.de')
def test_url_or_none(self):
self.assertEqual(url_or_none(None), None)
self.assertEqual(url_or_none(''), None)
self.assertEqual(url_or_none('foo'), None)
self.assertEqual(url_or_none('http://foo.de'), 'http://foo.de')
self.assertEqual(url_or_none('https://foo.de'), 'https://foo.de')
self.assertEqual(url_or_none('http$://foo.de'), None)
self.assertEqual(url_or_none('http://foo.de'), 'http://foo.de')
self.assertEqual(url_or_none('//foo.de'), '//foo.de')
self.assertEqual(url_or_none('s3://foo.de'), None)
self.assertEqual(url_or_none('rtmpte://foo.de'), 'rtmpte://foo.de')
self.assertEqual(url_or_none('mms://foo.de'), 'mms://foo.de')
self.assertEqual(url_or_none('rtspu://foo.de'), 'rtspu://foo.de')
self.assertEqual(url_or_none('ftps://foo.de'), 'ftps://foo.de')
def test_parse_age_limit(self):
self.assertEqual(parse_age_limit(None), None)
self.assertEqual(parse_age_limit(False), None)
self.assertEqual(parse_age_limit('invalid'), None)
self.assertEqual(parse_age_limit(0), 0)
self.assertEqual(parse_age_limit(18), 18)
self.assertEqual(parse_age_limit(21), 21)
self.assertEqual(parse_age_limit(22), None)
self.assertEqual(parse_age_limit('18'), 18)
self.assertEqual(parse_age_limit('18+'), 18)
self.assertEqual(parse_age_limit('PG-13'), 13)
self.assertEqual(parse_age_limit('TV-14'), 14)
self.assertEqual(parse_age_limit('TV-MA'), 17)
self.assertEqual(parse_age_limit('TV14'), 14)
self.assertEqual(parse_age_limit('TV_G'), 0)
def test_parse_duration(self):
self.assertEqual(parse_duration(None), None)
self.assertEqual(parse_duration(False), None)
self.assertEqual(parse_duration('invalid'), None)
self.assertEqual(parse_duration('1'), 1)
self.assertEqual(parse_duration('1337:12'), 80232)
self.assertEqual(parse_duration('9:12:43'), 33163)
self.assertEqual(parse_duration('12:00'), 720)
self.assertEqual(parse_duration('00:01:01'), 61)
self.assertEqual(parse_duration('x:y'), None)
self.assertEqual(parse_duration('3h11m53s'), 11513)
self.assertEqual(parse_duration('3h 11m 53s'), 11513)
self.assertEqual(parse_duration('3 hours 11 minutes 53 seconds'), 11513)
self.assertEqual(parse_duration('3 hours 11 mins 53 secs'), 11513)
self.assertEqual(parse_duration('62m45s'), 3765)
self.assertEqual(parse_duration('6m59s'), 419)
self.assertEqual(parse_duration('49s'), 49)
self.assertEqual(parse_duration('0h0m0s'), 0)
self.assertEqual(parse_duration('0m0s'), 0)
self.assertEqual(parse_duration('0s'), 0)
self.assertEqual(parse_duration('01:02:03.05'), 3723.05)
self.assertEqual(parse_duration('T30M38S'), 1838)
self.assertEqual(parse_duration('5 s'), 5)
self.assertEqual(parse_duration('3 min'), 180)
self.assertEqual(parse_duration('2.5 hours'), 9000)
self.assertEqual(parse_duration('02:03:04'), 7384)
self.assertEqual(parse_duration('01:02:03:04'), 93784)
self.assertEqual(parse_duration('1 hour 3 minutes'), 3780)
self.assertEqual(parse_duration('87 Min.'), 5220)
self.assertEqual(parse_duration('PT1H0.040S'), 3600.04)
self.assertEqual(parse_duration('PT00H03M30SZ'), 210)
self.assertEqual(parse_duration('P0Y0M0DT0H4M20.880S'), 260.88)
def test_fix_xml_ampersands(self):
self.assertEqual(
fix_xml_ampersands('"&x=y&z=a'), '"&x=y&z=a')
self.assertEqual(
fix_xml_ampersands('"&x=y&wrong;&z=a'),
'"&x=y&wrong;&z=a')
self.assertEqual(
fix_xml_ampersands('&'><"'),
'&'><"')
self.assertEqual(
fix_xml_ampersands('& def testPL(size, pagesize, sliceargs, expected):
def get_page(pagenum):
firstid = pagenum * pagesize
upto = min(size, pagenum * pagesize + pagesize)
for i in range(firstid, upto):
yield i
pl = OnDemandPagedList(get_page, pagesize)
got = pl.getslice(*sliceargs)
self.assertEqual(got, expected)
iapl = InAdvancePagedList(get_page, size // pagesize + 1, pagesize)
got = iapl.getslice(*sliceargs)
self.assertEqual(got, expected)
testPL(5, 2, (), [0, 1, 2, 3, 4])
testPL(5, 2, (1,), [1, 2, 3, 4])
testPL(5, 2, (2,), [2, 3, 4])
testPL(5, 2, (4,), [4])
testPL(5, 2, (0, 3), [0, 1, 2])
testPL(5, 2, (1, 4), [1, 2, 3])
testPL(5, 2, (2, 99), [2, 3, 4])
testPL(5, 2, (20, 99), [])
def test_read_batch_urls(self):
f = io.StringIO('''\xef\xbb\xbf foo
bar\r
baz
# More after this line\r
; or after this
bam''')
self.assertEqual(read_batch_urls(f), ['foo', 'bar', 'baz', 'bam'])
def test_urlencode_postdata(self):
data = urlencode_postdata({'username': 'foo@bar.com', 'password': '1234'})
self.assertTrue(isinstance(data, bytes))
def test_update_url_query(self):
def query_dict(url):
return compat_parse_qs(compat_urlparse.urlparse(url).query)
self.assertEqual(query_dict(update_url_query(
'http://example.com/path', {'quality': ['HD'], 'format': ['mp4']})),
query_dict('http://example.com/path?quality=HD&format=mp4'))
self.assertEqual(query_dict(update_url_query(
'http://example.com/path', {'system': ['LINUX', 'WINDOWS']})),
query_dict('http://example.com/path?system=LINUX&system=WINDOWS'))
self.assertEqual(query_dict(update_url_query(
'http://example.com/path', {'fields': 'id,formats,subtitles'})),
query_dict('http://example.com/path?fields=id,formats,subtitles'))
self.assertEqual(query_dict(update_url_query(
'http://example.com/path', {'fields': ('id,formats,subtitles', 'thumbnails')})),
query_dict('http://example.com/path?fields=id,formats,subtitles&fields=thumbnails'))
self.assertEqual(query_dict(update_url_query(
'http://example.com/path?manifest=f4m', {'manifest': []})),
query_dict('http://example.com/path'))
self.assertEqual(query_dict(update_url_query(
'http://example.com/path?system=LINUX&system=WINDOWS', {'system': 'LINUX'})),
query_dict('http://example.com/path?system=LINUX'))
self.assertEqual(query_dict(update_url_query(
'http://example.com/path', {'fields': b'id,formats,subtitles'})),
query_dict('http://example.com/path?fields=id,formats,subtitles'))
self.assertEqual(query_dict(update_url_query(
'http://example.com/path', {'width': 1080, 'height': 720})),
query_dict('http://example.com/path?width=1080&height=720'))
self.assertEqual(query_dict(update_url_query(
'http://example.com/path', {'bitrate': 5020.43})),
query_dict('http://example.com/path?bitrate=5020.43'))
self.assertEqual(query_dict(update_url_query(
'http://example.com/path', {'test': '第二行тест'})),
query_dict('http://example.com/path?test=%E7%AC%AC%E4%BA%8C%E8%A1%8C%D1%82%D0%B5%D1%81%D1%82'))
def test_multipart_encode(self):
self.assertEqual(
multipart_encode({b'field': b'value'}, boundary='AAAAAA')[0],
b'--AAAAAA\r\nContent-Disposition: form-data; name="field"\r\n\r\nvalue\r\n--AAAAAA--\r\n')
self.assertEqual(
multipart_encode({'欄位'.encode('utf-8'): '值'.encode('utf-8')}, boundary='AAAAAA')[0],
b'--AAAAAA\r\nContent-Disposition: form-data; name="\xe6\xac\x84\xe4\xbd\x8d"\r\n\r\n\xe5\x80\xbc\r\n--AAAAAA--\r\n')
self.assertRaises(
ValueError, multipart_encode, {b'field': b'value'}, boundary='value')
def test_dict_get(self):
FALSE_VALUES = {
'none': None,
'false': False,
'zero': 0,
'empty_string': '',
'empty_list': [],
}
d = FALSE_VALUES.copy()
d['a'] = 42
self.assertEqual(dict_get(d, 'a'), 42)
self.assertEqual(dict_get(d, 'b'), None)
self.assertEqual(dict_get(d, 'b', 42), 42)
self.assertEqual(dict_get(d, ('a', )), 42)
self.assertEqual(dict_get(d, ('b', 'a', )), 42)
self.assertEqual(dict_get(d, ('b', 'c', 'a', 'd', )), 42)
self.assertEqual(dict_get(d, ('b', 'c', )), None)
self.assertEqual(dict_get(d, ('b', 'c', ), 42), 42)
for key, false_value in FALSE_VALUES.items():
self.assertEqual(dict_get(d, ('b', 'c', key, )), None)
self.assertEqual(dict_get(d, ('b', 'c', key, ), skip_false_values=False), false_value)
def test_merge_dicts(self):
self.assertEqual(merge_dicts({'a': 1}, {'b': 2}), {'a': 1, 'b': 2})
self.assertEqual(merge_dicts({'a': 1}, {'a': 2}), {'a': 1})
self.assertEqual(merge_dicts({'a': 1}, {'a': None}), {'a': 1})
self.assertEqual(merge_dicts({'a': 1}, {'a': ''}), {'a': 1})
self.assertEqual(merge_dicts({'a': 1}, {}), {'a': 1})
self.assertEqual(merge_dicts({'a': None}, {'a': 1}), {'a': 1})
self.assertEqual(merge_dicts({'a': ''}, {'a': 1}), {'a': ''})
self.assertEqual(merge_dicts({'a': ''}, {'a': 'abc'}), {'a': 'abc'})
self.assertEqual(merge_dicts({'a': None}, {'a': ''}, {'a': 'abc'}), {'a': 'abc'})
def test_encode_compat_str(self):
self.assertEqual(encode_compat_str(b'\xd1\x82\xd0\xb5\xd1\x81\xd1\x82', 'utf-8'), 'тест')
self.assertEqual(encode_compat_str('тест', 'utf-8'), 'тест')
def test_parse_iso8601(self):
self.assertEqual(parse_iso8601('2014-03-23T23:04:26+0100'), 1395612266)
self.assertEqual(parse_iso8601('2014-03-23T22:04:26+0000'), 1395612266)
self.assertEqual(parse_iso8601('2014-03-23T22:04:26Z'), 1395612266)
self.assertEqual(parse_iso8601('2014-03-23T22:04:26.1234Z'), 1395612266)
self.assertEqual(parse_iso8601('2015-09-29T08:27:31.727'), 1443515251)
self.assertEqual(parse_iso8601('2015-09-29T08-27-31.727'), None)
def test_strip_jsonp(self):
stripped = strip_jsonp('cb ([ {"id":"532cb",\n\n\n"x":\n3}\n]\n);')
d = json.loads(stripped)
self.assertEqual(d, [{"id": "532cb", "x": 3}])
stripped = strip_jsonp('parseMetadata({"STATUS":"OK"})\n\n\n//epc')
d = json.loads(stripped)
self.assertEqual(d, {'STATUS': 'OK'})
stripped = strip_jsonp('ps.embedHandler({"status": "success"});')
d = json.loads(stripped)
self.assertEqual(d, {'status': 'success'})
stripped = strip_jsonp('window.cb && window.cb({"status": "success"});')
d = json.loads(stripped)
self.assertEqual(d, {'status': 'success'})
stripped = strip_jsonp('window.cb && cb({"status": "success"});')
d = json.loads(stripped)
self.assertEqual(d, {'status': 'success'})
stripped = strip_jsonp('({"status": "success"});')
d = json.loads(stripped)
self.assertEqual(d, {'status': 'success'})
def test_strip_or_none(self):
self.assertEqual(strip_or_none(' abc'), 'abc')
self.assertEqual(strip_or_none('abc '), 'abc')
self.assertEqual(strip_or_none(' abc '), 'abc')
self.assertEqual(strip_or_none('\tabc\t'), 'abc')
self.assertEqual(strip_or_none('\n\tabc\n\t'), 'abc')
self.assertEqual(strip_or_none('abc'), 'abc')
self.assertEqual(strip_or_none(''), '')
self.assertEqual(strip_or_none(None), None)
self.assertEqual(strip_or_none(42), None)
self.assertEqual(strip_or_none([]), None)
def test_uppercase_escape(self):
self.assertEqual(uppercase_escape('aä'), 'aä')
self.assertEqual(uppercase_escape('\\U0001d550'), '𝕐')
def test_lowercase_escape(self):
self.assertEqual(lowercase_escape('aä'), 'aä')
self.assertEqual(lowercase_escape('\\u0026'), '&')
def test_limit_length(self):
self.assertEqual(limit_length(None, 12), None)
self.assertEqual(limit_length('foo', 12), 'foo')
self.assertTrue(
limit_length('foo bar baz asd', 12).startswith('foo bar'))
self.assertTrue('...' in limit_length('foo bar baz asd', 12))
def test_mimetype2ext(self):
self.assertEqual(mimetype2ext(None), None)
self.assertEqual(mimetype2ext('video/x-flv'), 'flv')
self.assertEqual(mimetype2ext('application/x-mpegURL'), 'm3u8')
self.assertEqual(mimetype2ext('text/vtt'), 'vtt')
self.assertEqual(mimetype2ext('text/vtt;charset=utf-8'), 'vtt')
self.assertEqual(mimetype2ext('text/html; charset=utf-8'), 'html')
self.assertEqual(mimetype2ext('audio/x-wav'), 'wav')
self.assertEqual(mimetype2ext('audio/x-wav;codec=pcm'), 'wav')
def test_month_by_name(self):
self.assertEqual(month_by_name(None), None)
self.assertEqual(month_by_name('December', 'en'), 12)
self.assertEqual(month_by_name('décembre', 'fr'), 12)
self.assertEqual(month_by_name('December'), 12)
self.assertEqual(month_by_name('décembre'), None)
self.assertEqual(month_by_name('Unknown', 'unknown'), None)
def test_parse_codecs(self):
self.assertEqual(parse_codecs(''), {})
self.assertEqual(parse_codecs('avc1.77.30, mp4a.40.2'), {
'vcodec': 'avc1.77.30',
'acodec': 'mp4a.40.2',
})
self.assertEqual(parse_codecs('mp4a.40.2'), {
'vcodec': 'none',
'acodec': 'mp4a.40.2',
})
self.assertEqual(parse_codecs('mp4a.40.5,avc1.42001e'), {
'vcodec': 'avc1.42001e',
'acodec': 'mp4a.40.5',
})
self.assertEqual(parse_codecs('avc3.640028'), {
'vcodec': 'avc3.640028',
'acodec': 'none',
})
self.assertEqual(parse_codecs(', h264,,newcodec,aac'), {
'vcodec': 'h264',
'acodec': 'aac',
})
self.assertEqual(parse_codecs('av01.0.05M.08'), {
'vcodec': 'av01.0.05M.08',
'acodec': 'none',
})
self.assertEqual(parse_codecs('theora, vorbis'), {
'vcodec': 'theora',
'acodec': 'vorbis',
})
self.assertEqual(parse_codecs('unknownvcodec, unknownacodec'), {
'vcodec': 'unknownvcodec',
'acodec': 'unknownacodec',
})
self.assertEqual(parse_codecs('unknown'), {})
def test_escape_rfc3986(self):
reserved = "!*'();:@&=+$,/?#[]"
unreserved = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789-_.~'
self.assertEqual(escape_rfc3986(reserved), reserved)
self.assertEqual(escape_rfc3986(unreserved), unreserved)
self.assertEqual(escape_rfc3986('тест'), '%D1%82%D0%B5%D1%81%D1%82')
self.assertEqual(escape_rfc3986('%D1%82%D0%B5%D1%81%D1%82'), '%D1%82%D0%B5%D1%81%D1%82')
self.assertEqual(escape_rfc3986('foo bar'), 'foo%20bar')
self.assertEqual(escape_rfc3986('foo%20bar'), 'foo%20bar')
def test_escape_url(self):
self.assertEqual(
escape_url('http://wowza.imust.org/srv/vod/telemb/new/UPLOAD/UPLOAD/20224_IncendieHavré_FD.mp4'),
'http://wowza.imust.org/srv/vod/telemb/new/UPLOAD/UPLOAD/20224_IncendieHavre%CC%81_FD.mp4'
)
self.assertEqual(
escape_url('http://www.ardmediathek.de/tv/Sturm-der-Liebe/Folge-2036-Zu-Mann-und-Frau-erklärt/Das-Erste/Video?documentId=22673108&bcastId=5290'),
'http://www.ardmediathek.de/tv/Sturm-der-Liebe/Folge-2036-Zu-Mann-und-Frau-erkl%C3%A4rt/Das-Erste/Video?documentId=22673108&bcastId=5290'
)
self.assertEqual(
escape_url('http://тест.рф/фрагмент'),
'http://xn--e1aybc.xn--p1ai/%D1%84%D1%80%D0%B0%D0%B3%D0%BC%D0%B5%D0%BD%D1%82'
)
self.assertEqual(
escape_url('http://тест.рф/абв?абв=абв#абв'),
'http://xn--e1aybc.xn--p1ai/%D0%B0%D0%B1%D0%B2?%D0%B0%D0%B1%D0%B2=%D0%B0%D0%B1%D0%B2#%D0%B0%D0%B1%D0%B2'
)
self.assertEqual(escape_url('http://vimeo.com/56015672#at=0'), 'http://vimeo.com/56015672#at=0')
def test_js_to_json_realworld(self):
inp = '''{
'clip':{'provider':'pseudo'}
}'''
self.assertEqual(js_to_json(inp), '''{
"clip":{"provider":"pseudo"}
}''')
json.loads(js_to_json(inp))
inp = '''{
'playlist':[{'controls':{'all':null}}]
}'''
self.assertEqual(js_to_json(inp), '''{
"playlist":[{"controls":{"all":null}}]
}''')
inp = '''"The CW\\'s \\'Crazy Ex-Girlfriend\\'"'''
self.assertEqual(js_to_json(inp), '''"The CW's 'Crazy Ex-Girlfriend'"''')
inp = '"SAND Number: SAND 2013-7800P\\nPresenter: Tom Russo\\nHabanero Software Training - Xyce Software\\nXyce, Sandia\\u0027s"'
json_code = js_to_json(inp)
self.assertEqual(json.loads(json_code), json.loads(inp))
inp = '''{
0:{src:'skipped', type: 'application/dash+xml'},
1:{src:'skipped', type: 'application/vnd.apple.mpegURL'},
}'''
self.assertEqual(js_to_json(inp), '''{
"0":{"src":"skipped", "type": "application/dash+xml"},
"1":{"src":"skipped", "type": "application/vnd.apple.mpegURL"}
}''')
inp = '''{"foo":101}'''
self.assertEqual(js_to_json(inp), '''{"foo":101}''')
inp = '''{"duration": "00:01:07"}'''
self.assertEqual(js_to_json(inp), '''{"duration": "00:01:07"}''')
inp = '''{segments: [{"offset":-3.885780586188048e-16,"duration":39.75000000000001}]}'''
self.assertEqual(js_to_json(inp), '''{"segments": [{"offset":-3.885780586188048e-16,"duration":39.75000000000001}]}''')
def test_js_to_json_edgecases(self):
on = js_to_json("{abc_def:'1\\'\\\\2\\\\\\'3\"4'}")
self.assertEqual(json.loads(on), {"abc_def": "1'\\2\\'3\"4"})
on = js_to_json('{"abc": true}')
self.assertEqual(json.loads(on), {'abc': True})
on = js_to_json('''{
"x": 1,
y: "a",
z: some.code
}''')
d = json.loads(on)
self.assertEqual(d['x'], 1)
self.assertEqual(d['y'], 'a')
on = js_to_json('''{
a: !0,
b: !1,
c: !!0,
d: !!42.42,
e: !!![],
f: !"abc",
g: !"",
!42: 42
}''')
self.assertEqual(json.loads(on), {
'a': 0,
'b': 1,
'c': 0,
'd': 42.42,
'e': [],
'f': "abc",
'g': "",
'42': 42
})
on = js_to_json('["abc", "def",]')
self.assertEqual(json.loads(on), ['abc', 'def'])
on = js_to_json('[/*comment\n*/"abc"/*comment\n*/,/*comment\n*/"def",/*comment\n*/]')
self.assertEqual(json.loads(on), ['abc', 'def'])
on = js_to_json('[//comment\n"abc" //comment\n,//comment\n"def",//comment\n]')
self.assertEqual(json.loads(on), ['abc', 'def'])
on = js_to_json('{"abc": "def",}')
self.assertEqual(json.loads(on), {'abc': 'def'})
on = js_to_json('{/*comment\n*/"abc"/*comment\n*/:/*comment\n*/"def"/*comment\n*/,/*comment\n*/}')
self.assertEqual(json.loads(on), {'abc': 'def'})
on = js_to_json('{ 0: /* " \n */ ",]" , }')
self.assertEqual(json.loads(on), {'0': ',]'})
on = js_to_json('{ /*comment\n*/0/*comment\n*/: /* " \n */ ",]" , }')
self.assertEqual(json.loads(on), {'0': ',]'})
on = js_to_json('{ 0: // comment\n1 }')
self.assertEqual(json.loads(on), {'0': 1})
on = js_to_json(r'["<p>x<\/p>"]')
self.assertEqual(json.loads(on), ['<p>x</p>'])
on = js_to_json(r'["\xaa"]')
self.assertEqual(json.loads(on), ['\u00aa'])
on = js_to_json("['a\\\nb']")
self.assertEqual(json.loads(on), ['ab'])
on = js_to_json("/*comment\n*/[/*comment\n*/'a\\\nb'/*comment\n*/]/*comment\n*/")
self.assertEqual(json.loads(on), ['ab'])
on = js_to_json('{0xff:0xff}')
self.assertEqual(json.loads(on), {'255': 255})
on = js_to_json('{/*comment\n*/0xff/*comment\n*/:/*comment\n*/0xff/*comment\n*/}')
self.assertEqual(json.loads(on), {'255': 255})
on = js_to_json('{077:077}')
self.assertEqual(json.loads(on), {'63': 63})
on = js_to_json('{/*comment\n*/077/*comment\n*/:/*comment\n*/077/*comment\n*/}')
self.assertEqual(json.loads(on), {'63': 63})
on = js_to_json('{42:42}')
self.assertEqual(json.loads(on), {'42': 42})
on = js_to_json('{/*comment\n*/42/*comment\n*/:/*comment\n*/42/*comment\n*/}')
self.assertEqual(json.loads(on), {'42': 42})
on = js_to_json('{42:4.2e1}')
self.assertEqual(json.loads(on), {'42': 42.0})
on = js_to_json('{ "0x40": "0x40" }')
self.assertEqual(json.loads(on), {'0x40': '0x40'})
on = js_to_json('{ "040": "040" }')
self.assertEqual(json.loads(on), {'040': '040'})
def test_js_to_json_malformed(self):
self.assertEqual(js_to_json('42a1'), '42"a1"')
self.assertEqual(js_to_json('42a-1'), '42"a"-1')
def test_extract_attributes(self):
self.assertEqual(extract_attributes('<e x="y">'), {'x': 'y'})
self.assertEqual(extract_attributes("<e x='y'>"), {'x': 'y'})
self.assertEqual(extract_attributes('<e x=y>'), {'x': 'y'})
self.assertEqual(extract_attributes('<e x="a \'b\' c">'), {'x': "a 'b' c"})
self.assertEqual(extract_attributes('<e x=\'a "b" c\'>'), {'x': 'a "b" c'})
self.assertEqual(extract_attributes('<e x="y">'), {'x': 'y'})
self.assertEqual(extract_attributes('<e x="y">'), {'x': 'y'})
self.assertEqual(extract_attributes('<e x="&">'), {'x': '&'})
self.assertEqual(extract_attributes('<e x=""">'), {'x': '"'})
self.assertEqual(extract_attributes('<e x="£">'), {'x': '£'}) # HTML 3.2
self.assertEqual(extract_attributes('<e x="λ">'), {'x': 'λ'}) # HTML 4.0
self.assertEqual(extract_attributes('<e x="&foo">'), {'x': '&foo'})
self.assertEqual(extract_attributes('<e x="\'">'), {'x': "'"})
self.assertEqual(extract_attributes('<e x=\'"\'>'), {'x': '"'})
self.assertEqual(extract_attributes('<e x >'), {'x': None})
self.assertEqual(extract_attributes('<e x=y a>'), {'x': 'y', 'a': None})
self.assertEqual(extract_attributes('<e x= y>'), {'x': 'y'})
self.assertEqual(extract_attributes('<e x=1 y=2 x=3>'), {'y': '2', 'x': '3'})
self.assertEqual(extract_attributes('<e \nx=\ny\n>'), {'x': 'y'})
self.assertEqual(extract_attributes('<e \nx=\n"y"\n>'), {'x': 'y'})
self.assertEqual(extract_attributes("<e \nx=\n'y'\n>"), {'x': 'y'})
self.assertEqual(extract_attributes('<e \nx="\ny\n">'), {'x': '\ny\n'})
self.assertEqual(extract_attributes('<e CAPS=x>'), {'caps': 'x'}) # Names lowercased
self.assertEqual(extract_attributes('<e x=1 X=2>'), {'x': '2'})
self.assertEqual(extract_attributes('<e X=1 x=2>'), {'x': '2'})
self.assertEqual(extract_attributes('<e _:funny-name1=1>'), {'_:funny-name1': '1'})
self.assertEqual(extract_attributes('<e x="Fáilte 世界 \U0001f600">'), {'x': 'Fáilte 世界 \U0001f600'})
self.assertEqual(extract_attributes('<e x="décomposé">'), {'x': 'décompose\u0301'})
# "Narrow" Python builds don't support unicode code points outside BMP.
try:
compat_chr(0x10000)
supports_outside_bmp = True
except ValueError:
supports_outside_bmp = False
if supports_outside_bmp:
self.assertEqual(extract_attributes('<e x="Smile &
# Malformed HTML should not break attributes extraction on older Python
self.assertEqual(extract_attributes('<mal"formed/>'), {})
def test_clean_html(self):
self.assertEqual(clean_html('a:\nb'), 'a: b')
self.assertEqual(clean_html('a:\n "b"'), 'a: "b"')
self.assertEqual(clean_html('a<br>\xa0b'), 'a\nb')
def test_intlist_to_bytes(self):
self.assertEqual(
intlist_to_bytes([0, 1, 127, 128, 255]),
b'\x00\x01\x7f\x80\xff')
def test_args_to_str(self):
self.assertEqual(
args_to_str(['foo', 'ba/r', '-baz', '2 be', '']),
'foo ba/r -baz \'2 be\' \'\'' if compat_os_name != 'nt' else 'foo ba/r -baz "2 be" ""'
)
def test_parse_filesize(self):
self.assertEqual(parse_filesize(None), None)
self.assertEqual(parse_filesize(''), None)
self.assertEqual(parse_filesize('91 B'), 91)
self.assertEqual(parse_filesize('foobar'), None)
self.assertEqual(parse_filesize('2 MiB'), 2097152)
self.assertEqual(parse_filesize('5 GB'), 5000000000)
self.assertEqual(parse_filesize('1.2Tb'), 1200000000000)
self.assertEqual(parse_filesize('1.2tb'), 1200000000000)
self.assertEqual(parse_filesize('1,24 KB'), 1240)
self.assertEqual(parse_filesize('1,24 kb'), 1240)
self.assertEqual(parse_filesize('8.5 megabytes'), 8500000)
def test_parse_count(self):
self.assertEqual(parse_count(None), None)
self.assertEqual(parse_count(''), None)
self.assertEqual(parse_count('0'), 0)
self.assertEqual(parse_count('1000'), 1000)
self.assertEqual(parse_count('1.000'), 1000)
self.assertEqual(parse_count('1.1k'), 1100)
self.assertEqual(parse_count('1.1kk'), 1100000)
self.assertEqual(parse_count('1.1kk '), 1100000)
self.assertEqual(parse_count('1.1kk views'), 1100000)
def test_parse_resolution(self):
self.assertEqual(parse_resolution(None), {})
self.assertEqual(parse_resolution(''), {})
self.assertEqual(parse_resolution('1920x1080'), {'width': 1920, 'height': 1080})
self.assertEqual(parse_resolution('1920×1080'), {'width': 1920, 'height': 1080})
self.assertEqual(parse_resolution('1920 x 1080'), {'width': 1920, 'height': 1080})
self.assertEqual(parse_resolution('720p'), {'height': 720})
self.assertEqual(parse_resolution('4k'), {'height': 2160})
self.assertEqual(parse_resolution('8K'), {'height': 4320})
def test_parse_bitrate(self):
self.assertEqual(parse_bitrate(None), None)
self.assertEqual(parse_bitrate(''), None)
self.assertEqual(parse_bitrate('300kbps'), 300)
self.assertEqual(parse_bitrate('1500kbps'), 1500)
self.assertEqual(parse_bitrate('300 kbps'), 300)
def test_version_tuple(self):
self.assertEqual(version_tuple('1'), (1,))
self.assertEqual(version_tuple('10.23.344'), (10, 23, 344))
self.assertEqual(version_tuple('10.1-6'), (10, 1, 6)) # avconv style
def test_detect_exe_version(self):
self.assertEqual(detect_exe_version('''ffmpeg version 1.2.1
built on May 27 2013 08:37:26 with gcc 4.7 (Debian 4.7.3-4)
configuration: --prefix=/usr --extra-'''), '1.2.1')
self.assertEqual(detect_exe_version('''ffmpeg version N-63176-g1fb4685
built on May 15 2014 22:09:06 with gcc 4.8.2 (GCC)'''), 'N-63176-g1fb4685')
self.assertEqual(detect_exe_version('''X server found. dri2 connection failed!
Trying to open render node...
Success at /dev/dri/renderD128.
ffmpeg version 2.4.4 Copyright (c) 2000-2014 the FFmpeg ...'''), '2.4.4')
def test_age_restricted(self):
self.assertFalse(age_restricted(None, 10)) # unrestricted content
self.assertFalse(age_restricted(1, None)) # unrestricted policy
self.assertFalse(age_restricted(8, 10))
self.assertTrue(age_restricted(18, 14))
self.assertFalse(age_restricted(18, 18))
def test_is_html(self):
self.assertFalse(is_html(b'\x49\x44\x43<html'))
self.assertTrue(is_html(b'<!DOCTYPE foo>\xaaa'))
self.assertTrue(is_html( # UTF-8 with BOM
b'\xef\xbb\xbf<!DOCTYPE foo>\xaaa'))
self.assertTrue(is_html( # UTF-16-LE
b'\xff\xfe<\x00h\x00t\x00m\x00l\x00>\x00\xe4\x00'
))
self.assertTrue(is_html( # UTF-16-BE
b'\xfe\xff\x00<\x00h\x00t\x00m\x00l\x00>\x00\xe4'
))
self.assertTrue(is_html( # UTF-32-BE
b'\x00\x00\xFE\xFF\x00\x00\x00<\x00\x00\x00h\x00\x00\x00t\x00\x00\x00m\x00\x00\x00l\x00\x00\x00>\x00\x00\x00\xe4'))
self.assertTrue(is_html( # UTF-32-LE
b'\xFF\xFE\x00\x00<\x00\x00\x00h\x00\x00\x00t\x00\x00\x00m\x00\x00\x00l\x00\x00\x00>\x00\x00\x00\xe4\x00\x00\x00'))
def test_render_table(self):
self.assertEqual(
render_table(
['a', 'bcd'],
[[123, 4], [9999, 51]]),
'a bcd\n'
'123 4\n'
'9999 51')
def test_match_str(self):
self.assertRaises(ValueError, match_str, 'xy>foobar', {})
self.assertFalse(match_str('xy', {'x': 1200}))
self.assertTrue(match_str('!xy', {'x': 1200}))
self.assertTrue(match_str('x', {'x': 1200}))
self.assertFalse(match_str('!x', {'x': 1200}))
self.assertTrue(match_str('x', {'x': 0}))
self.assertFalse(match_str('x>0', {'x': 0}))
self.assertFalse(match_str('x>0', {}))
self.assertTrue(match_str('x>?0', {}))
self.assertTrue(match_str('x>1K', {'x': 1200}))
self.assertFalse(match_str('x>2K', {'x': 1200}))
self.assertTrue(match_str('x>=1200 & x < 1300', {'x': 1200}))
self.assertFalse(match_str('x>=1100 & x < 1200', {'x': 1200}))
self.assertFalse(match_str('y=a212', {'y': 'foobar42'}))
self.assertTrue(match_str('y=foobar42', {'y': 'foobar42'}))
self.assertFalse(match_str('y!=foobar42', {'y': 'foobar42'}))
self.assertTrue(match_str('y!=foobar2', {'y': 'foobar42'}))
self.assertFalse(match_str(
'like_count > 100 & dislike_count <? 50 & description',
{'like_count': 90, 'description': 'foo'}))
self.assertTrue(match_str(
'like_count > 100 & dislike_count <? 50 & description',
{'like_count': 190, 'description': 'foo'}))
self.assertFalse(match_str(
'like_count > 100 & dislike_count <? 50 & description',
{'like_count': 190, 'dislike_count': 60, 'description': 'foo'}))
self.assertFalse(match_str(
'like_count > 100 & dislike_count <? 50 & description',
{'like_count': 190, 'dislike_count': 10}))
self.assertTrue(match_str('is_live', {'is_live': True}))
self.assertFalse(match_str('is_live', {'is_live': False}))
self.assertFalse(match_str('is_live', {'is_live': None}))
self.assertFalse(match_str('is_live', {}))
self.assertFalse(match_str('!is_live', {'is_live': True}))
self.assertTrue(match_str('!is_live', {'is_live': False}))
self.assertTrue(match_str('!is_live', {'is_live': None}))
self.assertTrue(match_str('!is_live', {}))
self.assertTrue(match_str('title', {'title': 'abc'}))
self.assertTrue(match_str('title', {'title': ''}))
self.assertFalse(match_str('!title', {'title': 'abc'}))
self.assertFalse(match_str('!title', {'title': ''}))
def test_parse_dfxp_time_expr(self):
self.assertEqual(parse_dfxp_time_expr(None), None)
self.assertEqual(parse_dfxp_time_expr(''), None)
self.assertEqual(parse_dfxp_time_expr('0.1'), 0.1)
self.assertEqual(parse_dfxp_time_expr('0.1s'), 0.1)
self.assertEqual(parse_dfxp_time_expr('00:00:01'), 1.0)
self.assertEqual(parse_dfxp_time_expr('00:00:01.100'), 1.1)
self.assertEqual(parse_dfxp_time_expr('00:00:01:100'), 1.1)
def test_dfxp2srt(self):
dfxp_data = '''<?xml version="1.0" encoding="UTF-8"?>
<tt xmlns="http://www.w3.org/ns/ttml" xml:lang="en" xmlns:tts="http://www.w3.org/ns/ttml#parameter">
<body>
<div xml:lang="en">
<p begin="0" end="1">The following line contains Chinese characters and special symbols</p>
<p begin="1" end="2">第二行<br/>♪♪</p>
<p begin="2" dur="1"><span>Third<br/>Line</span></p>
<p begin="3" end="-1">Lines with invalid timestamps are ignored</p>
<p begin="-1" end="-1">Ignore, two</p>
<p begin="3" dur="-1">Ignored, three</p>
</div>
</body>
</tt>'''.encode('utf-8')
srt_data = '''1
00:00:00,000 --> 00:00:01,000
The following line contains Chinese characters and special symbols
2
00:00:01,000 --> 00:00:02,000
第二行
♪♪
3
00:00:02,000 --> 00:00:03,000
Third
Line
'''
self.assertEqual(dfxp2srt(dfxp_data), srt_data)
dfxp_data_no_default_namespace = '''<?xml version="1.0" encoding="UTF-8"?>
<tt xml:lang="en" xmlns:tts="http://www.w3.org/ns/ttml#parameter">
<body>
<div xml:lang="en">
<p begin="0" end="1">The first line</p>
</div>
</body>
</tt>'''.encode('utf-8')
srt_data = '''1
00:00:00,000 --> 00:00:01,000
The first line
'''
self.assertEqual(dfxp2srt(dfxp_data_no_default_namespace), srt_data)
dfxp_data_with_style = '''<?xml version="1.0" encoding="utf-8"?>
<tt xmlns="http://www.w3.org/2006/10/ttaf1" xmlns:ttp="http://www.w3.org/2006/10/ttaf1#parameter" ttp:timeBase="media" xmlns:tts="http://www.w3.org/2006/10/ttaf1#style" xml:lang="en" xmlns:ttm="http://www.w3.org/2006/10/ttaf1#metadata">
<head>
<styling>
<style id="s2" style="s0" tts:color="cyan" tts:fontWeight="bold" />
<style id="s1" style="s0" tts:color="yellow" tts:fontStyle="italic" />
<style id="s3" style="s0" tts:color="lime" tts:textDecoration="underline" />
<style id="s0" tts:backgroundColor="black" tts:fontStyle="normal" tts:fontSize="16" tts:fontFamily="sansSerif" tts:color="white" />
</styling>
</head>
<body tts:textAlign="center" style="s0">
<div>
<p begin="00:00:02.08" id="p0" end="00:00:05.84">default style<span tts:color="red">custom style</span></p>
<p style="s2" begin="00:00:02.08" id="p0" end="00:00:05.84"><span tts:color="lime">part 1<br /></span><span tts:color="cyan">part 2</span></p>
<p style="s3" begin="00:00:05.84" id="p1" end="00:00:09.56">line 3<br />part 3</p>
<p style="s1" tts:textDecoration="underline" begin="00:00:09.56" id="p2" end="00:00:12.36"><span style="s2" tts:color="lime">inner<br /> </span>style</p>
</div>
</body>
</tt>'''.encode('utf-8')
srt_data = '''1
00:00:02,080 --> 00:00:05,839
<font color="white" face="sansSerif" size="16">default style<font color="red">custom style</font></font>
2
00:00:02,080 --> 00:00:05,839
<b><font color="cyan" face="sansSerif" size="16"><font color="lime">part 1
</font>part 2</font></b>
3
00:00:05,839 --> 00:00:09,560
<u><font color="lime">line 3
part 3</font></u>
4
00:00:09,560 --> 00:00:12,359
<i><u><font color="yellow"><font color="lime">inner
</font>style</font></u></i>
'''
self.assertEqual(dfxp2srt(dfxp_data_with_style), srt_data)
dfxp_data_non_utf8 = '''<?xml version="1.0" encoding="UTF-16"?>
<tt xmlns="http://www.w3.org/ns/ttml" xml:lang="en" xmlns:tts="http://www.w3.org/ns/ttml#parameter">
<body>
<div xml:lang="en">
<p begin="0" end="1">Line 1</p>
<p begin="1" end="2">第二行</p>
</div>
</body>
</tt>'''.encode('utf-16')
srt_data = '''1
00:00:00,000 --> 00:00:01,000
Line 1
2
00:00:01,000 --> 00:00:02,000
第二行
'''
self.assertEqual(dfxp2srt(dfxp_data_non_utf8), srt_data)
def test_cli_option(self):
self.assertEqual(cli_option({'proxy': '127.0.0.1:3128'}, '--proxy', 'proxy'), ['--proxy', '127.0.0.1:3128'])
self.assertEqual(cli_option({'proxy': None}, '--proxy', 'proxy'), [])
self.assertEqual(cli_option({}, '--proxy', 'proxy'), [])
self.assertEqual(cli_option({'retries': 10}, '--retries', 'retries'), ['--retries', '10'])
def test_cli_valueless_option(self):
self.assertEqual(cli_valueless_option(
{'downloader': 'external'}, '--external-downloader', 'downloader', 'external'), ['--external-downloader'])
self.assertEqual(cli_valueless_option(
{'downloader': 'internal'}, '--external-downloader', 'downloader', 'external'), [])
self.assertEqual(cli_valueless_option(
{'nocheckcertificate': True}, '--no-check-certificate', 'nocheckcertificate'), ['--no-check-certificate'])
self.assertEqual(cli_valueless_option(
{'nocheckcertificate': False}, '--no-check-certificate', 'nocheckcertificate'), [])
self.assertEqual(cli_valueless_option(
{'checkcertificate': True}, '--no-check-certificate', 'checkcertificate', False), [])
self.assertEqual(cli_valueless_option(
{'checkcertificate': False}, '--no-check-certificate', 'checkcertificate', False), ['--no-check-certificate'])
def test_cli_bool_option(self):
self.assertEqual(
cli_bool_option(
{'nocheckcertificate': True}, '--no-check-certificate', 'nocheckcertificate'),
['--no-check-certificate', 'true'])
self.assertEqual(
cli_bool_option(
{'nocheckcertificate': True}, '--no-check-certificate', 'nocheckcertificate', separator='='),
['--no-check-certificate=true'])
self.assertEqual(
cli_bool_option(
{'nocheckcertificate': True}, '--check-certificate', 'nocheckcertificate', 'false', 'true'),
['--check-certificate', 'false'])
self.assertEqual(
cli_bool_option(
{'nocheckcertificate': True}, '--check-certificate', 'nocheckcertificate', 'false', 'true', '='),
['--check-certificate=false'])
self.assertEqual(
cli_bool_option(
{'nocheckcertificate': False}, '--check-certificate', 'nocheckcertificate', 'false', 'true'),
['--check-certificate', 'true'])
self.assertEqual(
cli_bool_option(
{'nocheckcertificate': False}, '--check-certificate', 'nocheckcertificate', 'false', 'true', '='),
['--check-certificate=true'])
self.assertEqual(
cli_bool_option(
{}, '--check-certificate', 'nocheckcertificate', 'false', 'true', '='),
[])
def test_ohdave_rsa_encrypt(self):
N = 0xab86b6371b5318aaa1d3c9e612a9f1264f372323c8c0f19875b5fc3b3fd3afcc1e5bec527aa94bfa85bffc157e4245aebda05389a5357b75115ac94f074aefcd
e = 65537
self.assertEqual(
ohdave_rsa_encrypt(b'aa111222', e, N),
'726664bd9a23fd0c70f9f1b84aab5e3905ce1e45a584e9cbcf9bcc7510338fc1986d6c599ff990d923aa43c51c0d9013cd572e13bc58f4ae48f2ed8c0b0ba881')
def test_pkcs1pad(self):
data = [1, 2, 3]
padded_data = pkcs1pad(data, 32)
self.assertEqual(padded_data[:2], [0, 2])
self.assertEqual(padded_data[28:], [0, 1, 2, 3])
self.assertRaises(ValueError, pkcs1pad, data, 8)
def test_encode_base_n(self):
self.assertEqual(encode_base_n(0, 30), '0')
self.assertEqual(encode_base_n(80, 30), '2k')
custom_table = '9876543210ZYXWVUTSRQPONMLKJIHGFEDCBA'
self.assertEqual(encode_base_n(0, 30, custom_table), '9')
self.assertEqual(encode_base_n(80, 30, custom_table), '7P')
self.assertRaises(ValueError, encode_base_n, 0, 70)
self.assertRaises(ValueError, encode_base_n, 0, 60, custom_table)
def test_caesar(self):
self.assertEqual(caesar('ace', 'abcdef', 2), 'cea')
self.assertEqual(caesar('cea', 'abcdef', -2), 'ace')
self.assertEqual(caesar('ace', 'abcdef', -2), 'eac')
self.assertEqual(caesar('eac', 'abcdef', 2), 'ace')
self.assertEqual(caesar('ace', 'abcdef', 0), 'ace')
self.assertEqual(caesar('xyz', 'abcdef', 2), 'xyz')
self.assertEqual(caesar('abc', 'acegik', 2), 'ebg')
self.assertEqual(caesar('ebg', 'acegik', -2), 'abc')
def test_rot47(self):
self.assertEqual(rot47('youtube-dlc'), r'J@FEF36\5=4')
self.assertEqual(rot47('YOUTUBE-DLC'), r'*~&%&qt\s{r')
def test_urshift(self):
self.assertEqual(urshift(3, 1), 1)
self.assertEqual(urshift(-3, 1), 2147483646)
def test_get_element_by_class(self):
html = '''
<span class="foo bar">nice</span>
'''
self.assertEqual(get_element_by_class('foo', html), 'nice')
self.assertEqual(get_element_by_class('no-such-class', html), None)
def test_get_element_by_attribute(self):
html = '''
<span class="foo bar">nice</span>
'''
self.assertEqual(get_element_by_attribute('class', 'foo bar', html), 'nice')
self.assertEqual(get_element_by_attribute('class', 'foo', html), None)
self.assertEqual(get_element_by_attribute('class', 'no-such-foo', html), None)
html = '''
<div itemprop="author" itemscope>foo</div>
'''
self.assertEqual(get_element_by_attribute('itemprop', 'author', html), 'foo')
def test_get_elements_by_class(self):
html = '''
<span class="foo bar">nice</span><span class="foo bar">also nice</span>
'''
self.assertEqual(get_elements_by_class('foo', html), ['nice', 'also nice'])
self.assertEqual(get_elements_by_class('no-such-class', html), [])
def test_get_elements_by_attribute(self):
html = '''
<span class="foo bar">nice</span><span class="foo bar">also nice</span>
'''
self.assertEqual(get_elements_by_attribute('class', 'foo bar', html), ['nice', 'also nice'])
self.assertEqual(get_elements_by_attribute('class', 'foo', html), [])
self.assertEqual(get_elements_by_attribute('class', 'no-such-foo', html), [])
def test_clean_podcast_url(self):
self.assertEqual(clean_podcast_url('https://www.podtrac.com/pts/redirect.mp3/chtbl.com/track/5899E/traffic.megaphone.fm/HSW7835899191.mp3'), 'https://traffic.megaphone.fm/HSW7835899191.mp3')
self.assertEqual(clean_podcast_url('https://play.podtrac.com/npr-344098539/edge1.pod.npr.org/anon.npr-podcasts/podcast/npr/waitwait/2020/10/20201003_waitwait_wwdtmpodcast201003-015621a5-f035-4eca-a9a1-7c118d90bc3c.mp3'), 'https://edge1.pod.npr.org/anon.npr-podcasts/podcast/npr/waitwait/2020/10/20201003_waitwait_wwdtmpodcast201003-015621a5-f035-4eca-a9a1-7c118d90bc3c.mp3')
if __name__ == '__main__':
unittest.main()
| true | true |
f733b6ccde55f9cb79d205da7c3b84dd6fb0217b | 3,542 | py | Python | scripts/utils/resort_bam_karyotype.py | a113n/bcbio-nextgen | 1d4afef27ad2e84a4ecb6145ccc5058f2abb4616 | [
"MIT"
] | 339 | 2015-01-04T13:23:04.000Z | 2022-03-25T23:09:09.000Z | scripts/utils/resort_bam_karyotype.py | a113n/bcbio-nextgen | 1d4afef27ad2e84a4ecb6145ccc5058f2abb4616 | [
"MIT"
] | 39 | 2015-01-14T21:31:09.000Z | 2021-11-18T15:15:33.000Z | scripts/utils/resort_bam_karyotype.py | a113n/bcbio-nextgen | 1d4afef27ad2e84a4ecb6145ccc5058f2abb4616 | [
"MIT"
] | 176 | 2015-01-10T17:40:44.000Z | 2022-03-25T05:14:21.000Z | #!/usr/bin/env python
"""Resort a BAM file karyotypically to match GATK's preferred file order.
Broad's GATK and associated resources prefer BAM files sorted as:
chr1, chr2... chr10, chr11... chrX
instead of the simple alphabetic sort:
chr1, chr10, chr2 ...
This takes a sorted BAM files with an alternative ordering of chromosomes
and re-sorts it the karyotypic way.
Usage:
resort_bam_karyotype.py <reference dict> [<one or more> <BAM files>]
<reference dict> is a *.dict file produced by Picard that identifies the order
of chromsomes to sort by:
java -jar CreateSequenceDictionary.jar REFERENCE=your.fasta OUTPUT=your.dict
Requires:
pysam -- http://code.google.com/p/pysam/
"""
import os
import sys
import pysam
def main(ref_file, *in_bams):
ref = pysam.Samfile(ref_file, "r")
sorter = SortByHeader(ref.header)
for bam in in_bams:
sort_bam(bam, sorter.header_cmp, sorter.to_include)
def sort_bam(in_bam, sort_fn, to_include=None):
out_file = "%s-ksort%s" % os.path.splitext(in_bam)
index_file = "%s.bai" % in_bam
if not os.path.exists(index_file):
pysam.index(in_bam)
orig = pysam.Samfile(in_bam, "rb")
chroms = [(c["SN"], c) for c in orig.header["SQ"]]
new_chroms = chroms[:]
if to_include:
new_chroms = [(c, x) for (c, x) in new_chroms if c in to_include]
new_chroms.sort(sort_fn)
remapper = _id_remapper(chroms, new_chroms)
new_header = orig.header
new_header["SQ"] = [h for (_, h) in new_chroms]
new = pysam.Samfile(out_file, "wb", header=new_header)
for (chrom, _) in new_chroms:
for read in orig.fetch(chrom):
write = True
read.rname = remapper[read.rname]
try:
read.mrnm = remapper[read.mrnm]
# read pair is on a chromosome we are not using
except KeyError:
assert to_include is not None
write = False
if write:
new.write(read)
def _id_remapper(orig, new):
"""Provide a dictionary remapping original read indexes to new indexes.
When re-ordering the header, the individual read identifiers need to be
updated as well.
"""
new_chrom_to_index = {}
for i_n, (chr_n, _) in enumerate(new):
new_chrom_to_index[chr_n] = i_n
remap_indexes = {}
for i_o, (chr_o, _) in enumerate(orig):
if chr_o in new_chrom_to_index.keys():
remap_indexes[i_o] = new_chrom_to_index[chr_o]
remap_indexes[None] = None
return remap_indexes
class SortByHeader:
"""Provide chromosome sorting to match an existing header.
"""
def __init__(self, base_header):
self._chrom_indexes = {}
self.to_include = []
for i, item in enumerate(base_header["SQ"]):
self._chrom_indexes[item["SN"]] = i
self.to_include.append(item["SN"])
def header_cmp(self, one, two):
return cmp(self._chrom_indexes[one[0]],
self._chrom_indexes[two[0]])
def sort_by_karyotype(one, two):
"""Sort function to order reads by karyotype.
"""
return cmp(_split_to_karyotype(one[0]),
_split_to_karyotype(two[0]))
def _split_to_karyotype(name):
parts = name.replace("chr", "").split("_")
try:
parts[0] = int(parts[0])
except ValueError:
pass
# anything with an extension (_random) goes at the end
if len(parts) > 1:
parts.insert(0, "z")
return parts
if __name__ == "__main__":
main(*sys.argv[1:])
| 30.273504 | 78 | 0.640034 |
import os
import sys
import pysam
def main(ref_file, *in_bams):
ref = pysam.Samfile(ref_file, "r")
sorter = SortByHeader(ref.header)
for bam in in_bams:
sort_bam(bam, sorter.header_cmp, sorter.to_include)
def sort_bam(in_bam, sort_fn, to_include=None):
out_file = "%s-ksort%s" % os.path.splitext(in_bam)
index_file = "%s.bai" % in_bam
if not os.path.exists(index_file):
pysam.index(in_bam)
orig = pysam.Samfile(in_bam, "rb")
chroms = [(c["SN"], c) for c in orig.header["SQ"]]
new_chroms = chroms[:]
if to_include:
new_chroms = [(c, x) for (c, x) in new_chroms if c in to_include]
new_chroms.sort(sort_fn)
remapper = _id_remapper(chroms, new_chroms)
new_header = orig.header
new_header["SQ"] = [h for (_, h) in new_chroms]
new = pysam.Samfile(out_file, "wb", header=new_header)
for (chrom, _) in new_chroms:
for read in orig.fetch(chrom):
write = True
read.rname = remapper[read.rname]
try:
read.mrnm = remapper[read.mrnm]
except KeyError:
assert to_include is not None
write = False
if write:
new.write(read)
def _id_remapper(orig, new):
new_chrom_to_index = {}
for i_n, (chr_n, _) in enumerate(new):
new_chrom_to_index[chr_n] = i_n
remap_indexes = {}
for i_o, (chr_o, _) in enumerate(orig):
if chr_o in new_chrom_to_index.keys():
remap_indexes[i_o] = new_chrom_to_index[chr_o]
remap_indexes[None] = None
return remap_indexes
class SortByHeader:
def __init__(self, base_header):
self._chrom_indexes = {}
self.to_include = []
for i, item in enumerate(base_header["SQ"]):
self._chrom_indexes[item["SN"]] = i
self.to_include.append(item["SN"])
def header_cmp(self, one, two):
return cmp(self._chrom_indexes[one[0]],
self._chrom_indexes[two[0]])
def sort_by_karyotype(one, two):
return cmp(_split_to_karyotype(one[0]),
_split_to_karyotype(two[0]))
def _split_to_karyotype(name):
parts = name.replace("chr", "").split("_")
try:
parts[0] = int(parts[0])
except ValueError:
pass
if len(parts) > 1:
parts.insert(0, "z")
return parts
if __name__ == "__main__":
main(*sys.argv[1:])
| true | true |
f733b77946c83379d75bb1a55541de68f72301a8 | 6,831 | py | Python | tests/test_utilities.py | ashwinvin/Tanjun | e16e28a3be7b809762e2cdc583ae9fe9edf8a0ab | [
"BSD-3-Clause"
] | null | null | null | tests/test_utilities.py | ashwinvin/Tanjun | e16e28a3be7b809762e2cdc583ae9fe9edf8a0ab | [
"BSD-3-Clause"
] | null | null | null | tests/test_utilities.py | ashwinvin/Tanjun | e16e28a3be7b809762e2cdc583ae9fe9edf8a0ab | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
# cython: language_level=3
# BSD 3-Clause License
#
# Copyright (c) 2020-2021, Faster Speeding
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# pyright: reportUnknownMemberType=none
# This leads to too many false-positives around mocks.
import typing
from collections import abc as collections
from unittest import mock
import pytest
import tanjun
from tanjun import utilities
_T = typing.TypeVar("_T")
def async_iter_mock(*values: _T) -> collections.AsyncIterable[_T]:
return mock.Mock(__aiter__=mock.Mock(return_value=mock.Mock(__anext__=mock.AsyncMock(side_effect=values))))
@pytest.mark.asyncio()
async def test_async_chain():
resources = (
async_iter_mock(1, 2, 3),
async_iter_mock(99, 55, 44),
async_iter_mock(444, 333, 222),
)
results = [result async for result in utilities.async_chain(resources)]
assert results == [1, 2, 3, 99, 55, 44, 444, 333, 222]
@pytest.mark.asyncio()
async def test_await_if_async_handles_async_callback():
callback = mock.AsyncMock()
assert await utilities.await_if_async(callback) is callback.return_value
@pytest.mark.asyncio()
async def test_await_if_async_handles_sync_callback():
callback = mock.Mock()
assert await utilities.await_if_async(callback) is callback.return_value
@pytest.mark.asyncio()
async def test_gather_checks_handles_no_checks():
assert await utilities.gather_checks(mock.Mock(), ()) is True
@pytest.mark.asyncio()
async def test_gather_checks_handles_faiedl_check():
mock_ctx = mock.Mock(tanjun.abc.Context)
check_1 = mock.AsyncMock()
check_2 = mock.AsyncMock(side_effect=tanjun.FailedCheck)
check_3 = mock.AsyncMock()
assert await utilities.gather_checks(mock_ctx, (check_1, check_2, check_3)) is False
check_1.assert_awaited_once_with(mock_ctx)
check_2.assert_awaited_once_with(mock_ctx)
check_3.assert_awaited_once_with(mock_ctx)
@pytest.mark.asyncio()
async def test_gather_checks():
mock_ctx = mock.Mock()
check_1 = mock.AsyncMock()
check_2 = mock.AsyncMock()
check_3 = mock.AsyncMock()
assert await utilities.gather_checks(mock_ctx, (check_1, check_2, check_3)) is True
check_1.assert_awaited_once_with(mock_ctx)
check_2.assert_awaited_once_with(mock_ctx)
check_3.assert_awaited_once_with(mock_ctx)
@pytest.mark.skip(reason="Not implemented")
@pytest.mark.asyncio()
async def test_fetch_resource():
...
@pytest.mark.parametrize(
("content", "prefix", "expected_result"),
[
("no go sir", ("no", "home", "blow"), "no"),
("hime", ("hi", "hime", "boomer"), "hime"),
("boomer", ("boo", "boomer", "no u"), "boomer"),
("ok boomer", ("no", "nani"), None),
("", ("nannnnni",), None),
("ok ok ok", (), None),
],
)
def test_match_prefix_names(content: str, prefix: str, expected_result: typing.Optional[str]):
assert utilities.match_prefix_names(content, prefix) == expected_result
@pytest.mark.skip(reason="Not implemented")
def test_calculate_permissions():
...
@pytest.mark.skip(reason="Not implemented")
def test_calculate_permissions_when_guild_owner():
...
@pytest.mark.skip(reason="Not implemented")
def test_calculate_permissions_when_admin_role():
...
@pytest.mark.skip(reason="Not implemented")
def test_calculate_permissions_when_no_channel():
...
@pytest.mark.skip(reason="Not implemented")
@pytest.mark.asyncio()
async def test_fetch_permissions():
...
@pytest.mark.skip(reason="Not implemented")
@pytest.mark.asyncio()
async def test_fetch_permissions_when_guild_owner():
...
@pytest.mark.skip(reason="Not implemented")
@pytest.mark.asyncio()
async def test_fetch_permissions_when_admin_role():
...
@pytest.mark.skip(reason="Not implemented")
@pytest.mark.asyncio()
async def test_fetch_permissions_when_no_channel():
...
@pytest.mark.skip(reason="Not implemented")
@pytest.mark.asyncio()
async def test_fetch_permissions_when_channel_object_provided():
...
@pytest.mark.skip(reason="Not implemented")
@pytest.mark.asyncio()
async def test_fetch_permissions_for_uncached_entities():
...
@pytest.mark.skip(reason="Not implemented")
@pytest.mark.asyncio()
async def test_fetch_permissions_for_no_cache():
...
@pytest.mark.skip(reason="Not implemented")
def test_calculate_everyone_permissions():
...
@pytest.mark.skip(reason="Not implemented")
def test_calculate_everyone_permissions_admin_role():
...
@pytest.mark.skip(reason="Not implemented")
def test_calculate_everyone_permissions_no_channel():
...
@pytest.mark.asyncio()
async def test_fetch_everyone_permissions():
...
@pytest.mark.skip(reason="Not implemented")
@pytest.mark.asyncio()
async def test_fetch_everyone_permissions_admin_role():
...
@pytest.mark.skip(reason="Not implemented")
@pytest.mark.asyncio()
async def test_fetch_everyone_permissions_for_uncached_entities():
...
@pytest.mark.skip(reason="Not implemented")
@pytest.mark.asyncio()
async def test_fetch_everyone_permissions_for_no_cache():
...
@pytest.mark.skip(reason="Not implemented")
@pytest.mark.asyncio()
async def test_fetch_everyone_permissions_no_channel():
...
@pytest.mark.skip(reason="Not implemented")
@pytest.mark.asyncio()
async def test_fetch_everyone_permissions_channel_object_provided():
...
| 28.111111 | 111 | 0.743083 |
import typing
from collections import abc as collections
from unittest import mock
import pytest
import tanjun
from tanjun import utilities
_T = typing.TypeVar("_T")
def async_iter_mock(*values: _T) -> collections.AsyncIterable[_T]:
return mock.Mock(__aiter__=mock.Mock(return_value=mock.Mock(__anext__=mock.AsyncMock(side_effect=values))))
@pytest.mark.asyncio()
async def test_async_chain():
resources = (
async_iter_mock(1, 2, 3),
async_iter_mock(99, 55, 44),
async_iter_mock(444, 333, 222),
)
results = [result async for result in utilities.async_chain(resources)]
assert results == [1, 2, 3, 99, 55, 44, 444, 333, 222]
@pytest.mark.asyncio()
async def test_await_if_async_handles_async_callback():
callback = mock.AsyncMock()
assert await utilities.await_if_async(callback) is callback.return_value
@pytest.mark.asyncio()
async def test_await_if_async_handles_sync_callback():
callback = mock.Mock()
assert await utilities.await_if_async(callback) is callback.return_value
@pytest.mark.asyncio()
async def test_gather_checks_handles_no_checks():
assert await utilities.gather_checks(mock.Mock(), ()) is True
@pytest.mark.asyncio()
async def test_gather_checks_handles_faiedl_check():
mock_ctx = mock.Mock(tanjun.abc.Context)
check_1 = mock.AsyncMock()
check_2 = mock.AsyncMock(side_effect=tanjun.FailedCheck)
check_3 = mock.AsyncMock()
assert await utilities.gather_checks(mock_ctx, (check_1, check_2, check_3)) is False
check_1.assert_awaited_once_with(mock_ctx)
check_2.assert_awaited_once_with(mock_ctx)
check_3.assert_awaited_once_with(mock_ctx)
@pytest.mark.asyncio()
async def test_gather_checks():
mock_ctx = mock.Mock()
check_1 = mock.AsyncMock()
check_2 = mock.AsyncMock()
check_3 = mock.AsyncMock()
assert await utilities.gather_checks(mock_ctx, (check_1, check_2, check_3)) is True
check_1.assert_awaited_once_with(mock_ctx)
check_2.assert_awaited_once_with(mock_ctx)
check_3.assert_awaited_once_with(mock_ctx)
@pytest.mark.skip(reason="Not implemented")
@pytest.mark.asyncio()
async def test_fetch_resource():
...
@pytest.mark.parametrize(
("content", "prefix", "expected_result"),
[
("no go sir", ("no", "home", "blow"), "no"),
("hime", ("hi", "hime", "boomer"), "hime"),
("boomer", ("boo", "boomer", "no u"), "boomer"),
("ok boomer", ("no", "nani"), None),
("", ("nannnnni",), None),
("ok ok ok", (), None),
],
)
def test_match_prefix_names(content: str, prefix: str, expected_result: typing.Optional[str]):
assert utilities.match_prefix_names(content, prefix) == expected_result
@pytest.mark.skip(reason="Not implemented")
def test_calculate_permissions():
...
@pytest.mark.skip(reason="Not implemented")
def test_calculate_permissions_when_guild_owner():
...
@pytest.mark.skip(reason="Not implemented")
def test_calculate_permissions_when_admin_role():
...
@pytest.mark.skip(reason="Not implemented")
def test_calculate_permissions_when_no_channel():
...
@pytest.mark.skip(reason="Not implemented")
@pytest.mark.asyncio()
async def test_fetch_permissions():
...
@pytest.mark.skip(reason="Not implemented")
@pytest.mark.asyncio()
async def test_fetch_permissions_when_guild_owner():
...
@pytest.mark.skip(reason="Not implemented")
@pytest.mark.asyncio()
async def test_fetch_permissions_when_admin_role():
...
@pytest.mark.skip(reason="Not implemented")
@pytest.mark.asyncio()
async def test_fetch_permissions_when_no_channel():
...
@pytest.mark.skip(reason="Not implemented")
@pytest.mark.asyncio()
async def test_fetch_permissions_when_channel_object_provided():
...
@pytest.mark.skip(reason="Not implemented")
@pytest.mark.asyncio()
async def test_fetch_permissions_for_uncached_entities():
...
@pytest.mark.skip(reason="Not implemented")
@pytest.mark.asyncio()
async def test_fetch_permissions_for_no_cache():
...
@pytest.mark.skip(reason="Not implemented")
def test_calculate_everyone_permissions():
...
@pytest.mark.skip(reason="Not implemented")
def test_calculate_everyone_permissions_admin_role():
...
@pytest.mark.skip(reason="Not implemented")
def test_calculate_everyone_permissions_no_channel():
...
@pytest.mark.asyncio()
async def test_fetch_everyone_permissions():
...
@pytest.mark.skip(reason="Not implemented")
@pytest.mark.asyncio()
async def test_fetch_everyone_permissions_admin_role():
...
@pytest.mark.skip(reason="Not implemented")
@pytest.mark.asyncio()
async def test_fetch_everyone_permissions_for_uncached_entities():
...
@pytest.mark.skip(reason="Not implemented")
@pytest.mark.asyncio()
async def test_fetch_everyone_permissions_for_no_cache():
...
@pytest.mark.skip(reason="Not implemented")
@pytest.mark.asyncio()
async def test_fetch_everyone_permissions_no_channel():
...
@pytest.mark.skip(reason="Not implemented")
@pytest.mark.asyncio()
async def test_fetch_everyone_permissions_channel_object_provided():
...
| true | true |
f733b7c0c0c9f572e92bab90f1e7680ae03f0e5f | 17,807 | py | Python | pororo/tasks/constituency_parsing.py | jayten42/pororo | 0b02e6a633b9a32ec4241b8ed96745e6592db317 | [
"Apache-2.0"
] | 1,137 | 2021-02-02T02:09:06.000Z | 2022-03-29T03:10:40.000Z | pororo/tasks/constituency_parsing.py | jayten42/pororo | 0b02e6a633b9a32ec4241b8ed96745e6592db317 | [
"Apache-2.0"
] | 57 | 2021-02-02T03:29:54.000Z | 2022-03-31T16:20:00.000Z | pororo/tasks/constituency_parsing.py | jayten42/pororo | 0b02e6a633b9a32ec4241b8ed96745e6592db317 | [
"Apache-2.0"
] | 216 | 2021-02-02T02:49:02.000Z | 2022-03-28T01:19:58.000Z | """Constituency Parsing related modeling class"""
import re
from typing import List, Optional, Tuple
from lxml import etree
from pororo.tasks.utils.base import PororoFactoryBase, PororoTaskBase
from pororo.tasks.utils.download_utils import download_or_load
class PororoConstFactory(PororoFactoryBase):
"""
Constituency parsing using Transformer model
English (`transformer.base.en.const`)
- dataset: OntoNotes 5.0
- metric: TBU
Korean (`transformer.base.en.const`)
- dataset: Sejong Corpus
- metric: TBU
Chinese (`transformer.base.zh.const`)
- dataset: OntoNotes 5.0
- metric: TBU
Args:
text (str): input text
beam (int): size of beam search
pos (bool): contains PoS tagging or not
Returns:
result: result of constituency parsing
Examples:
>>> const = Pororo(task="const", lang="en")
>>> const("I love this place")
<TOP>
<S>
<NP>I</NP>
<VP>
love
<NP>this place</NP>
</VP>
</S>
</TOP>
>>> const = Pororo(task="const", lang="zh")
>>> const("我喜欢饼干")
<TOP>
<IP>
<NP>我</NP>
<VP>
喜欢
<NP>饼干</NP>
</VP>
</IP>
</TOP>
>>> const = Pororo(task="const", lang="ko")
>>> const("미국에서도 같은 우려가 나오고 있다.")
<S>
<NP_AJT>미국/NNP+에서/JKB+도/JX</NP_AJT>
<S>
<NP_SBJ>
<VP_MOD>같/VA+은/ETM</VP_MOD>
<NP_SBJ>우려/NNG+가/JKS</NP_SBJ>
</NP_SBJ>
<VP>
<VP>나오/VV+고/EC</VP>
<VP>있/VX+다/EF+./SF</VP>
</VP>
</S>
</S>
"""
def __init__(self, task: str, lang: str, model: Optional[str]):
super().__init__(task, lang, model)
@staticmethod
def get_available_langs():
return ["en", "ko", "zh"]
@staticmethod
def get_available_models():
return {
"en": ["transformer.base.en.const"],
"ko": ["transformer.base.ko.const"],
"zh": ["transformer.base.zh.const"],
}
def load(self, device: str):
"""
Load user-selected task-specific model
Args:
device (str): device information
Returns:
object: User-selected task-specific model
"""
if "transformer" in self.config.n_model:
from fairseq.models.transformer import TransformerModel
from pororo.tasks import PororoPosFactory
load_dict = download_or_load(
f"transformer/{self.config.n_model}",
self.config.lang,
)
model = (TransformerModel.from_pretrained(
model_name_or_path=load_dict.path,
checkpoint_file=f"{self.config.n_model}.pt",
data_name_or_path=load_dict.dict_path,
source_lang=load_dict.src_dict,
target_lang=load_dict.tgt_dict,
).eval().to(device))
if self.config.lang == "ko":
tagger = PororoPosFactory(
task="pos",
model="mecab-ko",
lang=self.config.lang,
).load(device)
return PororoTransConstKo(model, tagger, self.config)
if self.config.lang == "en":
tagger = PororoPosFactory(
task="pos",
model="nltk",
lang=self.config.lang,
).load(device)
return PororoTransConstEn(model, tagger, self.config)
if self.config.lang == "zh":
tagger = PororoPosFactory(
task="pos",
model="jieba",
lang=self.config.lang,
).load(device)
return PororoTransConstZh(model, tagger, self.config)
class PororoConstBase(PororoTaskBase):
"""Constituency Parsing base class containinig various methods related to Const. Parsing"""
def _fix_tree(self, output: str):
"""
Fix tree when XML conversion is not conducted
Args:
output (str): string to fix
Returns:
text: fixed tree string
"""
tag_ptn = "[A-Z][A-Z_]*"
output = re.sub("\s", "", output)
xml = re.sub(f"<({tag_ptn})>", r"[\1 ", output)
xml = re.sub(f"</{tag_ptn}>", r"] ", xml)
def _convert_to_xml(text):
for _ in range(max(text.count("["), text.count("]"))):
text = re.sub(
f"(?s)[({tag_ptn})([^[]]+?)]",
r"<\1>\2 </\1>",
text,
)
return text
xml = _convert_to_xml(xml)
xml = re.sub(f"[{tag_ptn}", "", xml)
xml = re.sub(f"{tag_ptn}]", "", xml)
xml = re.sub("[[]\s]", "", xml)
return xml
def _prettify(self, output: str):
"""
Prettify model result using XML tree
Args:
output (str): string to make tree
Returns:
pretty: tree style output
"""
output = re.sub("> +", ">", output)
output = re.sub(" +<", "<", output)
output = re.sub(
"(<[A-Za-z_\d]+>) *([^< ]+) *(<[^/])",
r"\1<temp>\2</temp>\3",
output,
)
output = re.sub(
"(</[A-Za-z_\d]+>) *([^< ]+) *(</)",
r"\1<temp>\2</temp>\3",
output,
)
try:
root = etree.fromstring(output)
except:
root = etree.fromstring(self._fix_tree(output))
tree = etree.ElementTree(root)
pretty = etree.tostring(tree, pretty_print=True, encoding="unicode")
pretty = pretty.replace("<temp>", "").replace("</temp>", "")
return pretty.replace(" ", "\t")
def __call__(
self,
text: str,
beam: int = 5,
pos: bool = False,
**kwargs,
):
"""
Conduct constituency parsing
Args:
text (str): input text
beam (int): size of beam search
pos (bool): contains PoS tagging or not
Returns:
result: result of constituency parsing
"""
assert isinstance(text, str), "Input text should be string type"
text = self._normalize(text)
return self.predict(text, beam, pos, **kwargs)
class PororoTransConstKo(PororoConstBase):
def __init__(self, model, tagger, config):
super().__init__(config)
self._model = model
self._tagger = tagger
def _postprocess(
self,
result: List[str],
eojeols: List[str],
poses: List[str],
):
"""
Postprocess method to make XML format
Args:
result (List[str]): constituency parsing result
eojeols (List): list of eojeol
poses (List): list of pos tag
Returns:
str: result of postprocess
"""
token_indices = []
temp_group = []
for i, res in enumerate(result):
if ("<" in res) or (">" in res):
continue
if not temp_group:
temp_group.append(i)
else:
if i == (temp_group[-1] + 1):
temp_group.append(i)
else:
token_indices.append(temp_group)
temp_group = [i]
token_indices.append(temp_group)
lucrative = 0
for i, li_index in enumerate(token_indices):
if poses:
eojeol = eojeols[i].split("+")
pos = poses[i].split("+")
tagged = []
for e, p in zip(eojeol, pos):
tagged.append(f"{e}/{p}")
result[li_index[0] - lucrative:li_index[-1] + 1 -
lucrative] = ["+".join(tagged)]
else:
result[li_index[0] - lucrative:li_index[-1] + 1 -
lucrative] = [eojeols[i]]
lucrative += len(li_index) - 1
return result
def _check_sanity(self, cands: List[str], n_space: int):
"""
Check sanity for valid xml structure
Args:
cands (List[str]): candidates
n_space (int): number of space
Returns:
return valid or not
"""
for cand in cands:
# Count the number of space special character
if cand.count("▁") != n_space:
continue
# Check whether candidate XML is valid
try:
etree.fromstring(cand)
return cand
except:
continue
return False
def predict(
self,
text: str,
beam: int = 5,
pos: bool = False,
**kwargs,
):
"""
Conduct constituency parsing
Args:
text (str): input text
beam (int): size of beam search
pos (bool): contains PoS tagging or not
Returns:
result of constituency parsing
"""
eojeols = self._tagger(text)
n_space = len([m for m in eojeols if m[1] == "SPACE"])
pairs = self._tagger(text, return_string=False)
src = " ".join(
[pair[1] if pair[1] != "SPACE" else "▁" for pair in pairs])
outputs = self._model.translate(
src,
beam=beam,
max_len_a=1,
max_len_b=50,
)
result = self._check_sanity([outputs], n_space)
if not result:
return f"<ERROR> {text} </ERROR>"
result = [res for res in result.split() if res != "▁"]
words = []
poses = []
tmp_word = ""
tmp_pos = ""
for eojeol in eojeols:
if eojeol[1] != "SPACE":
tmp_word += f"{eojeol[0]}+"
tmp_pos += f"{eojeol[1]}+"
else:
words.append(tmp_word[:-1])
poses.append(tmp_pos[:-1])
tmp_word = ""
tmp_pos = ""
words.append(tmp_word[:-1])
poses.append(tmp_pos[:-1])
if not pos:
poses = None
result = " ".join(self._postprocess(result, words, poses))
return self._prettify(result).strip()
class PororoTransConstEn(PororoConstBase):
def __init__(self, model, tagger, config):
super().__init__(config)
self._model = model
self._tagger = tagger
def _check_sanity(self, tags: List[str], n_words: int):
"""
Check sanity for valid xml structure
Args:
tags (List[str]): list of tags
n_words (int): number of words
Returns:
return valid or not
"""
n_out = 0
for tag in tags:
if ("<" not in tag) and (">" not in tag):
n_out += 1
return n_out == n_words
def _preprocess(self, tagged: List[Tuple]) -> str:
"""
Preprocess input sentence to replace whitespace token with whitespace
Args:
tagged (List[str]): list of tagges
Returns:
preprocessed sentence, original input
"""
ori = " ".join([tag[0] for tag in tagged if tag[1] != "SPACE"])
sent = " ".join([tag[1] for tag in tagged if tag[1] != "SPACE"])
sent = sent.replace("-LRB-", "(")
sent = sent.replace("-RRB-", ")")
return sent, ori
def _postprocess(self, tags: List[str], words: List[str], pos: List[str]):
"""
Postprocess result of parsing
Args:
tags (List[str]): list of parsing tag
words (List[str]): list of word
pos (List[str]): list of PoS tag
Returns:
postprocessed result string
"""
result = list()
i = 0
for tag in tags:
if ("<" not in tag) and (">" not in tag):
if pos:
result.append(f"{words[i]}/{pos[i]}")
else:
result.append(words[i])
i += 1
else:
result.append(tag)
return " ".join(result)
def predict(
self,
text: str,
beam: int = 5,
pos: bool = False,
**kwargs,
):
"""
Conduct constituency parsing
Args:
text (str): input sentence
beam (int): size of beam search
pos (bool): contains PoS tagging or not
Returns:
result of constituency parsing
"""
tags, ori = self._preprocess(self._tagger(text))
n_words = len(tags.split())
outputs = self._model.translate(
tags,
beam=beam,
max_len_a=1,
max_len_b=50,
)
result = self._check_sanity(outputs.split(), n_words)
if not result:
return f"<ERROR> {text} </ERROR>"
poses = None
if pos:
poses = tags.split()
outputs = self._postprocess(outputs.split(), ori.split(), poses)
return self._prettify(outputs).strip()
class PororoTransConstZh(PororoConstBase):
def __init__(self, model, tagger, config):
super().__init__(config)
self._model = model
self._tagger = tagger
self._map = {
"a": "ADJ",
"ad": "ADJ",
"ag": "ADJ",
"an": "ADJ",
"b": "NOUN",
"c": "CONJ",
"d": "ADV",
"df": "ADV",
"dg": "ADV",
"e": "INTJ",
"f": "NOUN",
"g": "MORPHEME",
"h": "PREFIX",
"i": "IDIOM",
"j": "NOUN",
"k": "SUFFIX",
"l": "IDIOM",
"m": "NUM",
"mg": "NUM",
"mq": "NUM",
"n": "NOUN",
"ng": "NOUN",
"nr": "NOUN",
"nrfg": "NOUN",
"nrt": "NOUN",
"ns": "NOUN",
"nt": "NOUN",
"nz": "NOUN",
"o": "ONOM",
"p": "PREP",
"q": "CLASSIFIER",
"r": "PRON",
"rg": "PRON",
"rr": "PRON",
"rz": "PRON",
"s": "NOUN",
"t": "NOUN",
"tg": "NOUN",
"u": "PART",
"ud": "PART",
"ug": "PART",
"uj": "PART",
"ul": "PART",
"uv": "PART",
"uz": "PART",
"v": "VERB",
"vd": "VERB",
"vg": "VERB",
"vi": "VERB",
"vn": "VERB",
"vq": "VERB",
"x": "X",
"y": "PART",
"z": "ADJ",
"zg": "ADJ",
"eng": "X",
}
def _check_sanity(self, tags: List[str], n_words: int):
"""
Check sanity for valid xml structure
Args:
tags (List[str]): list of tag
n_words (int): number of word
Returns:
return valid or not
"""
n_out = 0
for tag in tags:
if ("<" not in tag) and (">" not in tag):
n_out += 1
return n_out == n_words
def _preprocess(self, tagged: List[Tuple]) -> Tuple:
"""
Preprocess input sentence to replace whitespace token with whitespace
Args:
tagged (List[Tuple]): list of tagged tuple
Returns:
result of preprocess
"""
ori = " ".join([tag[0] for tag in tagged])
tags = [tag[1] for tag in tagged]
# Mapping into general tagset
tags = [self._map[tag] if tag in self._map else "X" for tag in tags]
return " ".join(tags), ori
def _postprocess(
self,
tags: List[str],
words: List[str],
pos: bool = False,
):
"""
Postprocess result of parsing
Args:
tags (List[str]): list of parsing tag
words (List[str]): list of word
pos (List[str]): list of PoS tag
Returns:
postprocessed result string
"""
result = list()
i = 0
for tag in tags:
if ("<" not in tag) and (">" not in tag):
if pos:
result.append(f"{words[i]}/{pos[i]}")
else:
result.append(words[i])
i += 1
else:
result.append(tag)
return " ".join(result)
def predict(
self,
text: str,
beam: int = 5,
pos: bool = False,
**kwargs,
):
"""
Conduct constituency parsing
Args:
text (str): input sentence
beam (int): size of beam search
pos (bool): contains PoS tagging or not
Returns:
result of constituency parsing
"""
tags, ori = self._preprocess(self._tagger(text))
n_words = len(tags.split())
outputs = self._model.translate(
tags,
beam=beam,
max_len_a=1,
max_len_b=50,
)
result = self._check_sanity(outputs.split(), n_words)
if not result:
return f"<ERROR> {text} </ERROR>"
poses = None
if pos:
poses = tags.split()
outputs = self._postprocess(outputs.split(), ori.split(), poses)
return self._prettify(outputs).strip()
| 26.617339 | 95 | 0.461167 |
import re
from typing import List, Optional, Tuple
from lxml import etree
from pororo.tasks.utils.base import PororoFactoryBase, PororoTaskBase
from pororo.tasks.utils.download_utils import download_or_load
class PororoConstFactory(PororoFactoryBase):
def __init__(self, task: str, lang: str, model: Optional[str]):
super().__init__(task, lang, model)
@staticmethod
def get_available_langs():
return ["en", "ko", "zh"]
@staticmethod
def get_available_models():
return {
"en": ["transformer.base.en.const"],
"ko": ["transformer.base.ko.const"],
"zh": ["transformer.base.zh.const"],
}
def load(self, device: str):
if "transformer" in self.config.n_model:
from fairseq.models.transformer import TransformerModel
from pororo.tasks import PororoPosFactory
load_dict = download_or_load(
f"transformer/{self.config.n_model}",
self.config.lang,
)
model = (TransformerModel.from_pretrained(
model_name_or_path=load_dict.path,
checkpoint_file=f"{self.config.n_model}.pt",
data_name_or_path=load_dict.dict_path,
source_lang=load_dict.src_dict,
target_lang=load_dict.tgt_dict,
).eval().to(device))
if self.config.lang == "ko":
tagger = PororoPosFactory(
task="pos",
model="mecab-ko",
lang=self.config.lang,
).load(device)
return PororoTransConstKo(model, tagger, self.config)
if self.config.lang == "en":
tagger = PororoPosFactory(
task="pos",
model="nltk",
lang=self.config.lang,
).load(device)
return PororoTransConstEn(model, tagger, self.config)
if self.config.lang == "zh":
tagger = PororoPosFactory(
task="pos",
model="jieba",
lang=self.config.lang,
).load(device)
return PororoTransConstZh(model, tagger, self.config)
class PororoConstBase(PororoTaskBase):
def _fix_tree(self, output: str):
tag_ptn = "[A-Z][A-Z_]*"
output = re.sub("\s", "", output)
xml = re.sub(f"<({tag_ptn})>", r"[\1 ", output)
xml = re.sub(f"</{tag_ptn}>", r"] ", xml)
def _convert_to_xml(text):
for _ in range(max(text.count("["), text.count("]"))):
text = re.sub(
f"(?s)[({tag_ptn})([^[]]+?)]",
r"<\1>\2 </\1>",
text,
)
return text
xml = _convert_to_xml(xml)
xml = re.sub(f"[{tag_ptn}", "", xml)
xml = re.sub(f"{tag_ptn}]", "", xml)
xml = re.sub("[[]\s]", "", xml)
return xml
def _prettify(self, output: str):
output = re.sub("> +", ">", output)
output = re.sub(" +<", "<", output)
output = re.sub(
"(<[A-Za-z_\d]+>) *([^< ]+) *(<[^/])",
r"\1<temp>\2</temp>\3",
output,
)
output = re.sub(
"(</[A-Za-z_\d]+>) *([^< ]+) *(</)",
r"\1<temp>\2</temp>\3",
output,
)
try:
root = etree.fromstring(output)
except:
root = etree.fromstring(self._fix_tree(output))
tree = etree.ElementTree(root)
pretty = etree.tostring(tree, pretty_print=True, encoding="unicode")
pretty = pretty.replace("<temp>", "").replace("</temp>", "")
return pretty.replace(" ", "\t")
def __call__(
self,
text: str,
beam: int = 5,
pos: bool = False,
**kwargs,
):
assert isinstance(text, str), "Input text should be string type"
text = self._normalize(text)
return self.predict(text, beam, pos, **kwargs)
class PororoTransConstKo(PororoConstBase):
def __init__(self, model, tagger, config):
super().__init__(config)
self._model = model
self._tagger = tagger
def _postprocess(
self,
result: List[str],
eojeols: List[str],
poses: List[str],
):
token_indices = []
temp_group = []
for i, res in enumerate(result):
if ("<" in res) or (">" in res):
continue
if not temp_group:
temp_group.append(i)
else:
if i == (temp_group[-1] + 1):
temp_group.append(i)
else:
token_indices.append(temp_group)
temp_group = [i]
token_indices.append(temp_group)
lucrative = 0
for i, li_index in enumerate(token_indices):
if poses:
eojeol = eojeols[i].split("+")
pos = poses[i].split("+")
tagged = []
for e, p in zip(eojeol, pos):
tagged.append(f"{e}/{p}")
result[li_index[0] - lucrative:li_index[-1] + 1 -
lucrative] = ["+".join(tagged)]
else:
result[li_index[0] - lucrative:li_index[-1] + 1 -
lucrative] = [eojeols[i]]
lucrative += len(li_index) - 1
return result
def _check_sanity(self, cands: List[str], n_space: int):
for cand in cands:
if cand.count("▁") != n_space:
continue
try:
etree.fromstring(cand)
return cand
except:
continue
return False
def predict(
self,
text: str,
beam: int = 5,
pos: bool = False,
**kwargs,
):
eojeols = self._tagger(text)
n_space = len([m for m in eojeols if m[1] == "SPACE"])
pairs = self._tagger(text, return_string=False)
src = " ".join(
[pair[1] if pair[1] != "SPACE" else "▁" for pair in pairs])
outputs = self._model.translate(
src,
beam=beam,
max_len_a=1,
max_len_b=50,
)
result = self._check_sanity([outputs], n_space)
if not result:
return f"<ERROR> {text} </ERROR>"
result = [res for res in result.split() if res != "▁"]
words = []
poses = []
tmp_word = ""
tmp_pos = ""
for eojeol in eojeols:
if eojeol[1] != "SPACE":
tmp_word += f"{eojeol[0]}+"
tmp_pos += f"{eojeol[1]}+"
else:
words.append(tmp_word[:-1])
poses.append(tmp_pos[:-1])
tmp_word = ""
tmp_pos = ""
words.append(tmp_word[:-1])
poses.append(tmp_pos[:-1])
if not pos:
poses = None
result = " ".join(self._postprocess(result, words, poses))
return self._prettify(result).strip()
class PororoTransConstEn(PororoConstBase):
def __init__(self, model, tagger, config):
super().__init__(config)
self._model = model
self._tagger = tagger
def _check_sanity(self, tags: List[str], n_words: int):
n_out = 0
for tag in tags:
if ("<" not in tag) and (">" not in tag):
n_out += 1
return n_out == n_words
def _preprocess(self, tagged: List[Tuple]) -> str:
ori = " ".join([tag[0] for tag in tagged if tag[1] != "SPACE"])
sent = " ".join([tag[1] for tag in tagged if tag[1] != "SPACE"])
sent = sent.replace("-LRB-", "(")
sent = sent.replace("-RRB-", ")")
return sent, ori
def _postprocess(self, tags: List[str], words: List[str], pos: List[str]):
result = list()
i = 0
for tag in tags:
if ("<" not in tag) and (">" not in tag):
if pos:
result.append(f"{words[i]}/{pos[i]}")
else:
result.append(words[i])
i += 1
else:
result.append(tag)
return " ".join(result)
def predict(
self,
text: str,
beam: int = 5,
pos: bool = False,
**kwargs,
):
tags, ori = self._preprocess(self._tagger(text))
n_words = len(tags.split())
outputs = self._model.translate(
tags,
beam=beam,
max_len_a=1,
max_len_b=50,
)
result = self._check_sanity(outputs.split(), n_words)
if not result:
return f"<ERROR> {text} </ERROR>"
poses = None
if pos:
poses = tags.split()
outputs = self._postprocess(outputs.split(), ori.split(), poses)
return self._prettify(outputs).strip()
class PororoTransConstZh(PororoConstBase):
def __init__(self, model, tagger, config):
super().__init__(config)
self._model = model
self._tagger = tagger
self._map = {
"a": "ADJ",
"ad": "ADJ",
"ag": "ADJ",
"an": "ADJ",
"b": "NOUN",
"c": "CONJ",
"d": "ADV",
"df": "ADV",
"dg": "ADV",
"e": "INTJ",
"f": "NOUN",
"g": "MORPHEME",
"h": "PREFIX",
"i": "IDIOM",
"j": "NOUN",
"k": "SUFFIX",
"l": "IDIOM",
"m": "NUM",
"mg": "NUM",
"mq": "NUM",
"n": "NOUN",
"ng": "NOUN",
"nr": "NOUN",
"nrfg": "NOUN",
"nrt": "NOUN",
"ns": "NOUN",
"nt": "NOUN",
"nz": "NOUN",
"o": "ONOM",
"p": "PREP",
"q": "CLASSIFIER",
"r": "PRON",
"rg": "PRON",
"rr": "PRON",
"rz": "PRON",
"s": "NOUN",
"t": "NOUN",
"tg": "NOUN",
"u": "PART",
"ud": "PART",
"ug": "PART",
"uj": "PART",
"ul": "PART",
"uv": "PART",
"uz": "PART",
"v": "VERB",
"vd": "VERB",
"vg": "VERB",
"vi": "VERB",
"vn": "VERB",
"vq": "VERB",
"x": "X",
"y": "PART",
"z": "ADJ",
"zg": "ADJ",
"eng": "X",
}
def _check_sanity(self, tags: List[str], n_words: int):
n_out = 0
for tag in tags:
if ("<" not in tag) and (">" not in tag):
n_out += 1
return n_out == n_words
def _preprocess(self, tagged: List[Tuple]) -> Tuple:
ori = " ".join([tag[0] for tag in tagged])
tags = [tag[1] for tag in tagged]
tags = [self._map[tag] if tag in self._map else "X" for tag in tags]
return " ".join(tags), ori
def _postprocess(
self,
tags: List[str],
words: List[str],
pos: bool = False,
):
result = list()
i = 0
for tag in tags:
if ("<" not in tag) and (">" not in tag):
if pos:
result.append(f"{words[i]}/{pos[i]}")
else:
result.append(words[i])
i += 1
else:
result.append(tag)
return " ".join(result)
def predict(
self,
text: str,
beam: int = 5,
pos: bool = False,
**kwargs,
):
tags, ori = self._preprocess(self._tagger(text))
n_words = len(tags.split())
outputs = self._model.translate(
tags,
beam=beam,
max_len_a=1,
max_len_b=50,
)
result = self._check_sanity(outputs.split(), n_words)
if not result:
return f"<ERROR> {text} </ERROR>"
poses = None
if pos:
poses = tags.split()
outputs = self._postprocess(outputs.split(), ori.split(), poses)
return self._prettify(outputs).strip()
| true | true |
f733b84cfac911bd5dbd39f81e8c042070fb2d3f | 4,370 | py | Python | musica3.py | Oliviaffc/BOMusic | e272c50f93892f20b6210d1fa06ba1c6f44d0a62 | [
"MIT"
] | 1 | 2021-07-08T14:27:37.000Z | 2021-07-08T14:27:37.000Z | musica3.py | Oliviaffc/BOMusic | e272c50f93892f20b6210d1fa06ba1c6f44d0a62 | [
"MIT"
] | null | null | null | musica3.py | Oliviaffc/BOMusic | e272c50f93892f20b6210d1fa06ba1c6f44d0a62 | [
"MIT"
] | null | null | null | '''BOMusic'''
from tkinter import *
import pygame
class App3(Toplevel):
cor1 = '#171717'
cor2 = '#58009D'
cor3 = '#efefef'
def __init__(self, original):
self.frame_original = original
Toplevel.__init__(self)
self.config()
self.frames()
self.widgetsButton1()
self.widgetsButton2()
self.widgetsButton3()
self.widgetsimg()
self.widgetstitulo()
def config(self):
self.title('BoMusic')
self.geometry('380x380+700+350')
self.resizable(False, False)
self.configure(bg = self.cor1)
pygame.mixer.init()
self.iconbitmap('provaDevSistemas\icone.ico')
def som(self):
pygame.mixer.music.load('provaDevSistemas\musica3.mp3')
pygame.mixer.music.play()
StopIteration
print('tocando')
print('')
def stop(self):
pygame.mixer.music.pause()
print('parando')
print('')
StopIteration
def clickbtn(self):
self.withdraw()
#self.subFrame = Musicas(self)
self.stop()
def onClose(self):
self.stop()
self.destroy()
self.frame_original.show()
def frames(self):
self.titulo = Frame(
self,
bg = self.cor1,
)
self.titulo.place(
x = 0,
y = 20,
width = 380,
height = 100
)
self.logo = Frame(
self,
bg = self.cor1,
)
self.logo.place(
x = 0,
y = 140,
width = 380,
height = 100
)
self.voltar = Frame(
self,
bg = self.cor3,
)
self.voltar.place(
x = 12.5,
y = 280,
width = 110,
height = 50
)
self.play = Frame(
self,
bg = self.cor1,
)
self.play.place(
x = 135,
y = 280,
width = 110,
height = 50
)
self.parar = Frame(
self,
bg = self.cor3,
)
self.parar.place(
x = 257.5,
y = 280,
width = 110,
height = 50
)
def widgetstitulo(self):
title = Label(self.titulo,
text='Twenty One Pilots\nShy Away',
font=('Poppins', 20, 'bold'),
bg = self.cor1,
fg = self.cor2,
)
title.pack()
def widgetsimg(self):
self.album = PhotoImage(file = r'provaDevSistemas\album3.png')
self.img2 = Label(
self.logo,
image = self.album,
bd = 0
)
self.img2.pack()
def widgetsButton1(self):
self.botao3 = Button(
self.voltar,
text = 'Voltar',
font = ('Poppins', 25),
fg = self.cor3,
activeforeground = self.cor3,
bg = self.cor2,
activebackground = self.cor2,
command=self.onClose
)
self.botao3.place(
relx = 0,
rely = 0,
relwidth = 1,
relheight = 1
)
def widgetsButton2(self):
self.botao = Button(
self.play,
text = 'Play',
font = ('Poppins', 25),
fg = self.cor3,
activeforeground = self.cor3,
bg = self.cor2,
activebackground = self.cor2,
command=self.som
)
self.botao.place(
relx = 0,
rely = 0,
relwidth = 1,
relheight = 1
)
def widgetsButton3(self):
self.botao2 = Button(
self.parar,
text = 'Stop',
font = ('Poppins', 25),
fg = self.cor3,
activeforeground = self.cor3,
bg = self.cor2,
activebackground = self.cor2,
command=self.stop
)
self.botao2.place(
relx = 0,
rely = 0,
relwidth = 1,
relheight = 1
) | 22.879581 | 71 | 0.422197 |
from tkinter import *
import pygame
class App3(Toplevel):
cor1 = '#171717'
cor2 = '#58009D'
cor3 = '#efefef'
def __init__(self, original):
self.frame_original = original
Toplevel.__init__(self)
self.config()
self.frames()
self.widgetsButton1()
self.widgetsButton2()
self.widgetsButton3()
self.widgetsimg()
self.widgetstitulo()
def config(self):
self.title('BoMusic')
self.geometry('380x380+700+350')
self.resizable(False, False)
self.configure(bg = self.cor1)
pygame.mixer.init()
self.iconbitmap('provaDevSistemas\icone.ico')
def som(self):
pygame.mixer.music.load('provaDevSistemas\musica3.mp3')
pygame.mixer.music.play()
StopIteration
print('tocando')
print('')
def stop(self):
pygame.mixer.music.pause()
print('parando')
print('')
StopIteration
def clickbtn(self):
self.withdraw()
self.stop()
def onClose(self):
self.stop()
self.destroy()
self.frame_original.show()
def frames(self):
self.titulo = Frame(
self,
bg = self.cor1,
)
self.titulo.place(
x = 0,
y = 20,
width = 380,
height = 100
)
self.logo = Frame(
self,
bg = self.cor1,
)
self.logo.place(
x = 0,
y = 140,
width = 380,
height = 100
)
self.voltar = Frame(
self,
bg = self.cor3,
)
self.voltar.place(
x = 12.5,
y = 280,
width = 110,
height = 50
)
self.play = Frame(
self,
bg = self.cor1,
)
self.play.place(
x = 135,
y = 280,
width = 110,
height = 50
)
self.parar = Frame(
self,
bg = self.cor3,
)
self.parar.place(
x = 257.5,
y = 280,
width = 110,
height = 50
)
def widgetstitulo(self):
title = Label(self.titulo,
text='Twenty One Pilots\nShy Away',
font=('Poppins', 20, 'bold'),
bg = self.cor1,
fg = self.cor2,
)
title.pack()
def widgetsimg(self):
self.album = PhotoImage(file = r'provaDevSistemas\album3.png')
self.img2 = Label(
self.logo,
image = self.album,
bd = 0
)
self.img2.pack()
def widgetsButton1(self):
self.botao3 = Button(
self.voltar,
text = 'Voltar',
font = ('Poppins', 25),
fg = self.cor3,
activeforeground = self.cor3,
bg = self.cor2,
activebackground = self.cor2,
command=self.onClose
)
self.botao3.place(
relx = 0,
rely = 0,
relwidth = 1,
relheight = 1
)
def widgetsButton2(self):
self.botao = Button(
self.play,
text = 'Play',
font = ('Poppins', 25),
fg = self.cor3,
activeforeground = self.cor3,
bg = self.cor2,
activebackground = self.cor2,
command=self.som
)
self.botao.place(
relx = 0,
rely = 0,
relwidth = 1,
relheight = 1
)
def widgetsButton3(self):
self.botao2 = Button(
self.parar,
text = 'Stop',
font = ('Poppins', 25),
fg = self.cor3,
activeforeground = self.cor3,
bg = self.cor2,
activebackground = self.cor2,
command=self.stop
)
self.botao2.place(
relx = 0,
rely = 0,
relwidth = 1,
relheight = 1
) | true | true |
f733b8a006818fdb403ccfa922ba9f3b43ed2efe | 3,297 | py | Python | cs_tools/tools/rtql/const.py | thoughtspot/cs_tools | 7b516476be94adf7f121645b7c3fc7206fdae4ca | [
"MIT"
] | 1 | 2022-03-14T19:04:53.000Z | 2022-03-14T19:04:53.000Z | cs_tools/tools/rtql/const.py | thoughtspot/cs_tools | 7b516476be94adf7f121645b7c3fc7206fdae4ca | [
"MIT"
] | 10 | 2021-06-01T14:34:52.000Z | 2022-03-24T00:47:47.000Z | cs_tools/tools/rtql/const.py | thoughtspot/cs_tools | 7b516476be94adf7f121645b7c3fc7206fdae4ca | [
"MIT"
] | null | null | null |
TQL_HELP = """
Commands can optionally be multi-line.
Few common commands
-----------------------
show databases; -> list all available databases
use db; -> switches context to specified database
'db' this must be done if queries do
not use full names (db.schema.table)
for tables.
show schemas; -> list all schemas within current
database (set by use db;)
show tables; -> list all tables within current
database (set by use db;)
show table tab; -> list all columns for table 'tab'
show views; -> list all views within current
database (set by use db;)
show view vw; -> list all columns for view 'vw'
script server; -> generates SQL for all databases
script database db; -> generates create SQL for all tables in
database 'db'
script table tab; -> generates create SQL for table 'tab'
create database db; -> create database 'db'
drop database db; -> drop database 'db'
create table tab ...; -> create table 'tab'. Example ...
create table t1 (c1 int, c2 int);
create table t2 (d1 int, d2 int,
constraint primary key(d1));
drop table tab; -> drop table 'tab'
alter table tab ...; -> alter table 'tab'. Examples ...
alter table t add column c int
default 5;
alter table t rename column c to d
alter table t drop column c
alter table t1 add constraint
foreign key (c1, c2) references
t2 (d1, d2);
alter table t1 drop constraint foreign
key t2;
select from tab ...; -> select query against the specified
set of tables. Example queries:
select TOP 10 c1 from t1;
select c1, sum(c2) from tab1;
select c11, sum(c22) as X from t1, t2
where t11.c12 = t2.c21 and c13 = 10
group by c11
order by X desc
select c1, sum(c2) from tab1 limit 10;
insert into tab ...; -> insert values into 'tab'
update tab ...; -> update rows in 'tbl' that match
optionally provided predicates.
Predicates are of form column = value
connected by 'and' keyword. Set values
in 'columns' to specified values.
delete from tab ...; -> delete rows from 'tbl' that match
optionally provided predicates.
Predicates are of form column = value
connected by 'and' keyword.
compact table tab; -> compact table 'tab' data version
chain to a single DML file.
compact all_tables; -> compact all tables in current db
exit; -> exit.
For a list of all commands, type "help;" after invoking tql
For a list of all available flags, type tql --helpfull
"""
| 48.485294 | 63 | 0.506824 |
TQL_HELP = """
Commands can optionally be multi-line.
Few common commands
-----------------------
show databases; -> list all available databases
use db; -> switches context to specified database
'db' this must be done if queries do
not use full names (db.schema.table)
for tables.
show schemas; -> list all schemas within current
database (set by use db;)
show tables; -> list all tables within current
database (set by use db;)
show table tab; -> list all columns for table 'tab'
show views; -> list all views within current
database (set by use db;)
show view vw; -> list all columns for view 'vw'
script server; -> generates SQL for all databases
script database db; -> generates create SQL for all tables in
database 'db'
script table tab; -> generates create SQL for table 'tab'
create database db; -> create database 'db'
drop database db; -> drop database 'db'
create table tab ...; -> create table 'tab'. Example ...
create table t1 (c1 int, c2 int);
create table t2 (d1 int, d2 int,
constraint primary key(d1));
drop table tab; -> drop table 'tab'
alter table tab ...; -> alter table 'tab'. Examples ...
alter table t add column c int
default 5;
alter table t rename column c to d
alter table t drop column c
alter table t1 add constraint
foreign key (c1, c2) references
t2 (d1, d2);
alter table t1 drop constraint foreign
key t2;
select from tab ...; -> select query against the specified
set of tables. Example queries:
select TOP 10 c1 from t1;
select c1, sum(c2) from tab1;
select c11, sum(c22) as X from t1, t2
where t11.c12 = t2.c21 and c13 = 10
group by c11
order by X desc
select c1, sum(c2) from tab1 limit 10;
insert into tab ...; -> insert values into 'tab'
update tab ...; -> update rows in 'tbl' that match
optionally provided predicates.
Predicates are of form column = value
connected by 'and' keyword. Set values
in 'columns' to specified values.
delete from tab ...; -> delete rows from 'tbl' that match
optionally provided predicates.
Predicates are of form column = value
connected by 'and' keyword.
compact table tab; -> compact table 'tab' data version
chain to a single DML file.
compact all_tables; -> compact all tables in current db
exit; -> exit.
For a list of all commands, type "help;" after invoking tql
For a list of all available flags, type tql --helpfull
"""
| true | true |
f733ba9f177b5d7b058338a61d67df96d228937e | 1,884 | py | Python | uproot/const.py | ryuwd/uproot4 | 20d8575e941c32559c7b5e62b0ed5f92bc4927d0 | [
"BSD-3-Clause"
] | null | null | null | uproot/const.py | ryuwd/uproot4 | 20d8575e941c32559c7b5e62b0ed5f92bc4927d0 | [
"BSD-3-Clause"
] | null | null | null | uproot/const.py | ryuwd/uproot4 | 20d8575e941c32559c7b5e62b0ed5f92bc4927d0 | [
"BSD-3-Clause"
] | null | null | null | # BSD 3-Clause License; see https://github.com/scikit-hep/uproot4/blob/main/LICENSE
"""
This module defines integer constants used by serialization and deserialization routines.
"""
from __future__ import absolute_import
import numpy
# used in unmarshaling
kByteCountMask = numpy.int64(0x40000000)
kByteCountVMask = numpy.int64(0x4000)
kClassMask = numpy.int64(0x80000000)
kNewClassTag = numpy.int64(0xFFFFFFFF)
kIsOnHeap = numpy.uint32(0x01000000)
kIsReferenced = numpy.uint32(1 << 4)
kMapOffset = 2
# not used?
kNullTag = 0
kNotDeleted = numpy.uint32(0x02000000)
kZombie = numpy.uint32(0x04000000)
kBitMask = numpy.uint32(0x00FFFFFF)
kDisplacementMask = numpy.uint32(0xFF000000)
############# core/zip/inc/Compression.h
kZLIB = 1
kLZMA = 2
kOldCompressionAlgo = 3
kLZ4 = 4
kZSTD = 5
kUndefinedCompressionAlgorithm = 6
############# constants for streamers
kBase = 0
kChar = 1
kShort = 2
kInt = 3
kLong = 4
kFloat = 5
kCounter = 6
kCharStar = 7
kDouble = 8
kDouble32 = 9
kLegacyChar = 10
kUChar = 11
kUShort = 12
kUInt = 13
kULong = 14
kBits = 15
kLong64 = 16
kULong64 = 17
kBool = 18
kFloat16 = 19
kOffsetL = 20
kOffsetP = 40
kObject = 61
kAny = 62
kObjectp = 63
kObjectP = 64
kTString = 65
kTObject = 66
kTNamed = 67
kAnyp = 68
kAnyP = 69
kAnyPnoVT = 70
kSTLp = 71
kSkip = 100
kSkipL = 120
kSkipP = 140
kConv = 200
kConvL = 220
kConvP = 240
kSTL = 300
kSTLstring = 365
kStreamer = 500
kStreamLoop = 501
############# constants from core/foundation/inc/ESTLType.h
kNotSTL = 0
kSTLvector = 1
kSTLlist = 2
kSTLdeque = 3
kSTLmap = 4
kSTLmultimap = 5
kSTLset = 6
kSTLmultiset = 7
kSTLbitset = 8
kSTLforwardlist = 9
kSTLunorderedset = 10
kSTLunorderedmultiset = 11
kSTLunorderedmap = 12
kSTLunorderedmultimap = 13
kSTLend = 14
kSTLany = 300
############# IOFeatures
kGenerateOffsetMap = numpy.uint8(1)
############# other
kStreamedMemberWise = numpy.uint16(1 << 14)
| 16.526316 | 89 | 0.722399 |
from __future__ import absolute_import
import numpy
kByteCountMask = numpy.int64(0x40000000)
kByteCountVMask = numpy.int64(0x4000)
kClassMask = numpy.int64(0x80000000)
kNewClassTag = numpy.int64(0xFFFFFFFF)
kIsOnHeap = numpy.uint32(0x01000000)
kIsReferenced = numpy.uint32(1 << 4)
kMapOffset = 2
kNullTag = 0
kNotDeleted = numpy.uint32(0x02000000)
kZombie = numpy.uint32(0x04000000)
kBitMask = numpy.uint32(0x00FFFFFF)
kDisplacementMask = numpy.uint32(0xFF000000)
| true | true |
f733bd046f29b564247a1e27481442b8e98ac1dd | 9,526 | py | Python | pysnmp-with-texts/CISCOSB-PHY-MIB.py | agustinhenze/mibs.snmplabs.com | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | [
"Apache-2.0"
] | 8 | 2019-05-09T17:04:00.000Z | 2021-06-09T06:50:51.000Z | pysnmp-with-texts/CISCOSB-PHY-MIB.py | agustinhenze/mibs.snmplabs.com | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | [
"Apache-2.0"
] | 4 | 2019-05-31T16:42:59.000Z | 2020-01-31T21:57:17.000Z | pysnmp-with-texts/CISCOSB-PHY-MIB.py | agustinhenze/mibs.snmplabs.com | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | [
"Apache-2.0"
] | 10 | 2019-04-30T05:51:36.000Z | 2022-02-16T03:33:41.000Z | #
# PySNMP MIB module CISCOSB-PHY-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/CISCOSB-PHY-MIB
# Produced by pysmi-0.3.4 at Wed May 1 12:23:02 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
ObjectIdentifier, Integer, OctetString = mibBuilder.importSymbols("ASN1", "ObjectIdentifier", "Integer", "OctetString")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ConstraintsIntersection, SingleValueConstraint, ValueRangeConstraint, ValueSizeConstraint, ConstraintsUnion = mibBuilder.importSymbols("ASN1-REFINEMENT", "ConstraintsIntersection", "SingleValueConstraint", "ValueRangeConstraint", "ValueSizeConstraint", "ConstraintsUnion")
switch001, = mibBuilder.importSymbols("CISCOSB-MIB", "switch001")
ifIndex, = mibBuilder.importSymbols("IF-MIB", "ifIndex")
NotificationGroup, ModuleCompliance = mibBuilder.importSymbols("SNMPv2-CONF", "NotificationGroup", "ModuleCompliance")
Unsigned32, Counter64, Counter32, TimeTicks, IpAddress, Bits, Integer32, Gauge32, MibScalar, MibTable, MibTableRow, MibTableColumn, MibIdentifier, ObjectIdentity, ModuleIdentity, NotificationType, iso = mibBuilder.importSymbols("SNMPv2-SMI", "Unsigned32", "Counter64", "Counter32", "TimeTicks", "IpAddress", "Bits", "Integer32", "Gauge32", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "MibIdentifier", "ObjectIdentity", "ModuleIdentity", "NotificationType", "iso")
DisplayString, TextualConvention, TimeStamp = mibBuilder.importSymbols("SNMPv2-TC", "DisplayString", "TextualConvention", "TimeStamp")
rlPhy = ModuleIdentity((1, 3, 6, 1, 4, 1, 9, 6, 1, 101, 90))
rlPhy.setRevisions(('2002-09-30 00:24', '2003-09-21 00:24',))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
if mibBuilder.loadTexts: rlPhy.setRevisionsDescriptions(('Initial revision', 'Added MODULE-IDENTITY and TEXTUAL-CONVENTION IMPORTS.',))
if mibBuilder.loadTexts: rlPhy.setLastUpdated('200209300024Z')
if mibBuilder.loadTexts: rlPhy.setOrganization('Cisco Systems, Inc.')
if mibBuilder.loadTexts: rlPhy.setContactInfo('Postal: 170 West Tasman Drive San Jose , CA 95134-1706 USA Website: Cisco Small Business Support Community <http://www.cisco.com/go/smallbizsupport>')
if mibBuilder.loadTexts: rlPhy.setDescription("The MIB module describes the private MIB for testing Layer1 interfaces supported by CISCOSB's software and products.")
class RlPhyTestType(TextualConvention, Integer32):
description = 'A value indicating the test to perform.'
status = 'current'
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24))
namedValues = NamedValues(("rlPhyTestTableNoTest", 1), ("rlPhyTestTableCableStatus", 2), ("rlPhyTestTableCableFault", 3), ("rlPhyTestTableCableLength", 4), ("rlPhyTestTableTransceiverTemp", 5), ("rlPhyTestTableTransceiverSupply", 6), ("rlPhyTestTableTxBias", 7), ("rlPhyTestTableTxOutput", 8), ("rlPhyTestTableRxOpticalPower", 9), ("rlPhyTestTableDataReady", 10), ("rlPhyTestTableLOS", 11), ("rlPhyTestTableTxFault", 12), ("rlPhyTestTableCableChannel1", 13), ("rlPhyTestTableCableChannel2", 14), ("rlPhyTestTableCableChannel3", 15), ("rlPhyTestTableCableChannel4", 16), ("rlPhyTestTableCablePolarity1", 17), ("rlPhyTestTableCablePolarity2", 18), ("rlPhyTestTableCablePolarity3", 19), ("rlPhyTestTableCablePolarity4", 20), ("rlPhyTestTableCablePairSkew1", 21), ("rlPhyTestTableCablePairSkew2", 22), ("rlPhyTestTableCablePairSkew3", 23), ("rlPhyTestTableCablePairSkew4", 24))
rlPhyTest = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 6, 1, 101, 90, 1))
rlPhyTestSetTable = MibTable((1, 3, 6, 1, 4, 1, 9, 6, 1, 101, 90, 1, 1), )
if mibBuilder.loadTexts: rlPhyTestSetTable.setStatus('current')
if mibBuilder.loadTexts: rlPhyTestSetTable.setDescription('')
rlPhyTestSetEntry = MibTableRow((1, 3, 6, 1, 4, 1, 9, 6, 1, 101, 90, 1, 1, 1), ).setIndexNames((0, "IF-MIB", "ifIndex"))
if mibBuilder.loadTexts: rlPhyTestSetEntry.setStatus('current')
if mibBuilder.loadTexts: rlPhyTestSetEntry.setDescription('An entry containing objects for invoking tests on an interface.')
rlPhyTestSetType = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 6, 1, 101, 90, 1, 1, 1, 1), RlPhyTestType()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rlPhyTestSetType.setStatus('current')
if mibBuilder.loadTexts: rlPhyTestSetType.setDescription('A control variable used to start operator initiated interface tests.1 indicates that no test has been initiated. Only operator initiated interface tests can be set to this variable.')
rlPhyTestGetTable = MibTable((1, 3, 6, 1, 4, 1, 9, 6, 1, 101, 90, 1, 2), )
if mibBuilder.loadTexts: rlPhyTestGetTable.setStatus('current')
if mibBuilder.loadTexts: rlPhyTestGetTable.setDescription('')
rlPhyTestGetEntry = MibTableRow((1, 3, 6, 1, 4, 1, 9, 6, 1, 101, 90, 1, 2, 1), ).setIndexNames((0, "IF-MIB", "ifIndex"), (0, "CISCOSB-PHY-MIB", "rlPhyTestGetType"))
if mibBuilder.loadTexts: rlPhyTestGetEntry.setStatus('current')
if mibBuilder.loadTexts: rlPhyTestGetEntry.setDescription('An entry containing results of tests on an interface.')
rlPhyTestGetType = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 6, 1, 101, 90, 1, 2, 1, 1), RlPhyTestType()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rlPhyTestGetType.setStatus('current')
if mibBuilder.loadTexts: rlPhyTestGetType.setDescription('A control variable used to, 1 to indicate that this test can be done on the specified port, 2 to initiate the test whenever the user wishes')
rlPhyTestGetStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 6, 1, 101, 90, 1, 2, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6, 7))).clone(namedValues=NamedValues(("none", 1), ("success", 2), ("inProgress", 3), ("notSupported", 4), ("unAbleToRun", 5), ("aborted", 6), ("failed", 7)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: rlPhyTestGetStatus.setStatus('current')
if mibBuilder.loadTexts: rlPhyTestGetStatus.setDescription('This object contains the status of the most recently requested test for operator initiated tests or the value none(1) if no tests have been requested since the last reset. For non operator initiated tests the value is always none(1). Note that this facility provides no provision for saving the results of one test when starting another, as could be required if used by multiple managers concurrently.')
rlPhyTestGetResult = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 6, 1, 101, 90, 1, 2, 1, 3), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rlPhyTestGetResult.setStatus('current')
if mibBuilder.loadTexts: rlPhyTestGetResult.setDescription('This object holds the test result')
rlPhyTestGetUnits = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 6, 1, 101, 90, 1, 2, 1, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19))).clone(namedValues=NamedValues(("integer", 1), ("boolean", 2), ("downUP", 3), ("reverseNormal", 4), ("mdiMdix", 5), ("meter", 6), ("degree", 7), ("microVolt", 8), ("microOham", 9), ("microAmper", 10), ("microWatt", 11), ("millisecond", 12), ("alaskaPhyLength", 13), ("alaskaPhyStatus", 14), ("dbm", 15), ("decidbm", 16), ("milidbm", 17), ("abcd", 18), ("nanosecond", 19)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: rlPhyTestGetUnits.setStatus('current')
if mibBuilder.loadTexts: rlPhyTestGetUnits.setDescription('The test result unit of measure. The units can be standard unit or special units that are designed for special test. The alaskaPhyLength unit is design for the VCT diagnostic and its values are: less_than_50M(1), 50-80M(2), 80-110M(3), 110-140M(4), more_than_140M(5). The alaskaPhyStatus unit is design for the VCT diagnostic and its values are: 4_pair_cable(1), 2_pair_cable(2), no_cable(3), open_cable(4), short_cable(5), bad_cable(6), impedance_mismatch(7).')
rlPhyTestGetAlarm = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 6, 1, 101, 90, 1, 2, 1, 5), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6))).clone(namedValues=NamedValues(("notRelevant", 1), ("noAlarmSet", 2), ("lowWarning", 3), ("highWarning", 4), ("lowAlarm", 5), ("highAlarm", 6)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: rlPhyTestGetAlarm.setStatus('current')
if mibBuilder.loadTexts: rlPhyTestGetAlarm.setDescription('This object hold the Alarm for this Entry. only Test that have can have alarms use this field, other holds the Value notRelevant(1) ')
rlPhyTestGetTimeStamp = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 6, 1, 101, 90, 1, 2, 1, 6), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(1, 32))).setMaxAccess("readonly")
if mibBuilder.loadTexts: rlPhyTestGetTimeStamp.setStatus('current')
if mibBuilder.loadTexts: rlPhyTestGetTimeStamp.setDescription('The time in string (formated DD-MMM-YYYY HH:MM:SS e.g 14-Apr-2002 10:33:31)')
mibBuilder.exportSymbols("CISCOSB-PHY-MIB", rlPhyTestGetTimeStamp=rlPhyTestGetTimeStamp, rlPhyTestSetTable=rlPhyTestSetTable, rlPhyTestGetAlarm=rlPhyTestGetAlarm, rlPhyTestGetEntry=rlPhyTestGetEntry, rlPhyTestGetTable=rlPhyTestGetTable, rlPhy=rlPhy, rlPhyTest=rlPhyTest, rlPhyTestSetEntry=rlPhyTestSetEntry, rlPhyTestSetType=rlPhyTestSetType, RlPhyTestType=RlPhyTestType, rlPhyTestGetType=rlPhyTestGetType, rlPhyTestGetStatus=rlPhyTestGetStatus, rlPhyTestGetResult=rlPhyTestGetResult, rlPhyTestGetUnits=rlPhyTestGetUnits, PYSNMP_MODULE_ID=rlPhy)
| 144.333333 | 877 | 0.757401 |
ObjectIdentifier, Integer, OctetString = mibBuilder.importSymbols("ASN1", "ObjectIdentifier", "Integer", "OctetString")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ConstraintsIntersection, SingleValueConstraint, ValueRangeConstraint, ValueSizeConstraint, ConstraintsUnion = mibBuilder.importSymbols("ASN1-REFINEMENT", "ConstraintsIntersection", "SingleValueConstraint", "ValueRangeConstraint", "ValueSizeConstraint", "ConstraintsUnion")
switch001, = mibBuilder.importSymbols("CISCOSB-MIB", "switch001")
ifIndex, = mibBuilder.importSymbols("IF-MIB", "ifIndex")
NotificationGroup, ModuleCompliance = mibBuilder.importSymbols("SNMPv2-CONF", "NotificationGroup", "ModuleCompliance")
Unsigned32, Counter64, Counter32, TimeTicks, IpAddress, Bits, Integer32, Gauge32, MibScalar, MibTable, MibTableRow, MibTableColumn, MibIdentifier, ObjectIdentity, ModuleIdentity, NotificationType, iso = mibBuilder.importSymbols("SNMPv2-SMI", "Unsigned32", "Counter64", "Counter32", "TimeTicks", "IpAddress", "Bits", "Integer32", "Gauge32", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "MibIdentifier", "ObjectIdentity", "ModuleIdentity", "NotificationType", "iso")
DisplayString, TextualConvention, TimeStamp = mibBuilder.importSymbols("SNMPv2-TC", "DisplayString", "TextualConvention", "TimeStamp")
rlPhy = ModuleIdentity((1, 3, 6, 1, 4, 1, 9, 6, 1, 101, 90))
rlPhy.setRevisions(('2002-09-30 00:24', '2003-09-21 00:24',))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
if mibBuilder.loadTexts: rlPhy.setRevisionsDescriptions(('Initial revision', 'Added MODULE-IDENTITY and TEXTUAL-CONVENTION IMPORTS.',))
if mibBuilder.loadTexts: rlPhy.setLastUpdated('200209300024Z')
if mibBuilder.loadTexts: rlPhy.setOrganization('Cisco Systems, Inc.')
if mibBuilder.loadTexts: rlPhy.setContactInfo('Postal: 170 West Tasman Drive San Jose , CA 95134-1706 USA Website: Cisco Small Business Support Community <http://www.cisco.com/go/smallbizsupport>')
if mibBuilder.loadTexts: rlPhy.setDescription("The MIB module describes the private MIB for testing Layer1 interfaces supported by CISCOSB's software and products.")
class RlPhyTestType(TextualConvention, Integer32):
description = 'A value indicating the test to perform.'
status = 'current'
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24))
namedValues = NamedValues(("rlPhyTestTableNoTest", 1), ("rlPhyTestTableCableStatus", 2), ("rlPhyTestTableCableFault", 3), ("rlPhyTestTableCableLength", 4), ("rlPhyTestTableTransceiverTemp", 5), ("rlPhyTestTableTransceiverSupply", 6), ("rlPhyTestTableTxBias", 7), ("rlPhyTestTableTxOutput", 8), ("rlPhyTestTableRxOpticalPower", 9), ("rlPhyTestTableDataReady", 10), ("rlPhyTestTableLOS", 11), ("rlPhyTestTableTxFault", 12), ("rlPhyTestTableCableChannel1", 13), ("rlPhyTestTableCableChannel2", 14), ("rlPhyTestTableCableChannel3", 15), ("rlPhyTestTableCableChannel4", 16), ("rlPhyTestTableCablePolarity1", 17), ("rlPhyTestTableCablePolarity2", 18), ("rlPhyTestTableCablePolarity3", 19), ("rlPhyTestTableCablePolarity4", 20), ("rlPhyTestTableCablePairSkew1", 21), ("rlPhyTestTableCablePairSkew2", 22), ("rlPhyTestTableCablePairSkew3", 23), ("rlPhyTestTableCablePairSkew4", 24))
rlPhyTest = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 6, 1, 101, 90, 1))
rlPhyTestSetTable = MibTable((1, 3, 6, 1, 4, 1, 9, 6, 1, 101, 90, 1, 1), )
if mibBuilder.loadTexts: rlPhyTestSetTable.setStatus('current')
if mibBuilder.loadTexts: rlPhyTestSetTable.setDescription('')
rlPhyTestSetEntry = MibTableRow((1, 3, 6, 1, 4, 1, 9, 6, 1, 101, 90, 1, 1, 1), ).setIndexNames((0, "IF-MIB", "ifIndex"))
if mibBuilder.loadTexts: rlPhyTestSetEntry.setStatus('current')
if mibBuilder.loadTexts: rlPhyTestSetEntry.setDescription('An entry containing objects for invoking tests on an interface.')
rlPhyTestSetType = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 6, 1, 101, 90, 1, 1, 1, 1), RlPhyTestType()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rlPhyTestSetType.setStatus('current')
if mibBuilder.loadTexts: rlPhyTestSetType.setDescription('A control variable used to start operator initiated interface tests.1 indicates that no test has been initiated. Only operator initiated interface tests can be set to this variable.')
rlPhyTestGetTable = MibTable((1, 3, 6, 1, 4, 1, 9, 6, 1, 101, 90, 1, 2), )
if mibBuilder.loadTexts: rlPhyTestGetTable.setStatus('current')
if mibBuilder.loadTexts: rlPhyTestGetTable.setDescription('')
rlPhyTestGetEntry = MibTableRow((1, 3, 6, 1, 4, 1, 9, 6, 1, 101, 90, 1, 2, 1), ).setIndexNames((0, "IF-MIB", "ifIndex"), (0, "CISCOSB-PHY-MIB", "rlPhyTestGetType"))
if mibBuilder.loadTexts: rlPhyTestGetEntry.setStatus('current')
if mibBuilder.loadTexts: rlPhyTestGetEntry.setDescription('An entry containing results of tests on an interface.')
rlPhyTestGetType = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 6, 1, 101, 90, 1, 2, 1, 1), RlPhyTestType()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rlPhyTestGetType.setStatus('current')
if mibBuilder.loadTexts: rlPhyTestGetType.setDescription('A control variable used to, 1 to indicate that this test can be done on the specified port, 2 to initiate the test whenever the user wishes')
rlPhyTestGetStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 6, 1, 101, 90, 1, 2, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6, 7))).clone(namedValues=NamedValues(("none", 1), ("success", 2), ("inProgress", 3), ("notSupported", 4), ("unAbleToRun", 5), ("aborted", 6), ("failed", 7)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: rlPhyTestGetStatus.setStatus('current')
if mibBuilder.loadTexts: rlPhyTestGetStatus.setDescription('This object contains the status of the most recently requested test for operator initiated tests or the value none(1) if no tests have been requested since the last reset. For non operator initiated tests the value is always none(1). Note that this facility provides no provision for saving the results of one test when starting another, as could be required if used by multiple managers concurrently.')
rlPhyTestGetResult = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 6, 1, 101, 90, 1, 2, 1, 3), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rlPhyTestGetResult.setStatus('current')
if mibBuilder.loadTexts: rlPhyTestGetResult.setDescription('This object holds the test result')
rlPhyTestGetUnits = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 6, 1, 101, 90, 1, 2, 1, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19))).clone(namedValues=NamedValues(("integer", 1), ("boolean", 2), ("downUP", 3), ("reverseNormal", 4), ("mdiMdix", 5), ("meter", 6), ("degree", 7), ("microVolt", 8), ("microOham", 9), ("microAmper", 10), ("microWatt", 11), ("millisecond", 12), ("alaskaPhyLength", 13), ("alaskaPhyStatus", 14), ("dbm", 15), ("decidbm", 16), ("milidbm", 17), ("abcd", 18), ("nanosecond", 19)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: rlPhyTestGetUnits.setStatus('current')
if mibBuilder.loadTexts: rlPhyTestGetUnits.setDescription('The test result unit of measure. The units can be standard unit or special units that are designed for special test. The alaskaPhyLength unit is design for the VCT diagnostic and its values are: less_than_50M(1), 50-80M(2), 80-110M(3), 110-140M(4), more_than_140M(5). The alaskaPhyStatus unit is design for the VCT diagnostic and its values are: 4_pair_cable(1), 2_pair_cable(2), no_cable(3), open_cable(4), short_cable(5), bad_cable(6), impedance_mismatch(7).')
rlPhyTestGetAlarm = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 6, 1, 101, 90, 1, 2, 1, 5), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6))).clone(namedValues=NamedValues(("notRelevant", 1), ("noAlarmSet", 2), ("lowWarning", 3), ("highWarning", 4), ("lowAlarm", 5), ("highAlarm", 6)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: rlPhyTestGetAlarm.setStatus('current')
if mibBuilder.loadTexts: rlPhyTestGetAlarm.setDescription('This object hold the Alarm for this Entry. only Test that have can have alarms use this field, other holds the Value notRelevant(1) ')
rlPhyTestGetTimeStamp = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 6, 1, 101, 90, 1, 2, 1, 6), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(1, 32))).setMaxAccess("readonly")
if mibBuilder.loadTexts: rlPhyTestGetTimeStamp.setStatus('current')
if mibBuilder.loadTexts: rlPhyTestGetTimeStamp.setDescription('The time in string (formated DD-MMM-YYYY HH:MM:SS e.g 14-Apr-2002 10:33:31)')
mibBuilder.exportSymbols("CISCOSB-PHY-MIB", rlPhyTestGetTimeStamp=rlPhyTestGetTimeStamp, rlPhyTestSetTable=rlPhyTestSetTable, rlPhyTestGetAlarm=rlPhyTestGetAlarm, rlPhyTestGetEntry=rlPhyTestGetEntry, rlPhyTestGetTable=rlPhyTestGetTable, rlPhy=rlPhy, rlPhyTest=rlPhyTest, rlPhyTestSetEntry=rlPhyTestSetEntry, rlPhyTestSetType=rlPhyTestSetType, RlPhyTestType=RlPhyTestType, rlPhyTestGetType=rlPhyTestGetType, rlPhyTestGetStatus=rlPhyTestGetStatus, rlPhyTestGetResult=rlPhyTestGetResult, rlPhyTestGetUnits=rlPhyTestGetUnits, PYSNMP_MODULE_ID=rlPhy)
| true | true |
f733bd9a6eef5c6c113449021854e09351067ad5 | 857 | py | Python | cartographer/field_types/date_attribute.py | Patreon/cartographer | fe5c03decf01c9f7894bb9cf1f839af435143527 | [
"Apache-2.0"
] | 29 | 2016-03-30T00:53:42.000Z | 2022-03-02T23:45:12.000Z | cartographer/field_types/date_attribute.py | Patreon/cartographer | fe5c03decf01c9f7894bb9cf1f839af435143527 | [
"Apache-2.0"
] | 20 | 2016-04-19T18:34:05.000Z | 2022-02-14T14:18:33.000Z | cartographer/field_types/date_attribute.py | Patreon/cartographer | fe5c03decf01c9f7894bb9cf1f839af435143527 | [
"Apache-2.0"
] | 5 | 2016-04-28T00:44:24.000Z | 2019-10-26T08:09:17.000Z | import ciso8601
import dateutil.parser
from cartographer.field_types import SchemaAttribute
from cartographer.utils.datetime import as_utc, make_naive
class DateAttribute(SchemaAttribute):
@classmethod
def format_value_for_json(cls, value):
return as_utc(value).isoformat()
def from_json(self, serialized_value):
if self.is_nullable and serialized_value is None:
return None
try:
# ciso8601 is significantly faster than dateutil.parser for parsing iso8601 strings, so we try it first
parsed_value = ciso8601.parse_datetime(serialized_value)
assert parsed_value is not None # Caveat: asserts won't run if python is run with -O.
except Exception as e:
parsed_value = dateutil.parser.parse(serialized_value)
return make_naive(parsed_value)
| 32.961538 | 115 | 0.71762 | import ciso8601
import dateutil.parser
from cartographer.field_types import SchemaAttribute
from cartographer.utils.datetime import as_utc, make_naive
class DateAttribute(SchemaAttribute):
@classmethod
def format_value_for_json(cls, value):
return as_utc(value).isoformat()
def from_json(self, serialized_value):
if self.is_nullable and serialized_value is None:
return None
try:
parsed_value = ciso8601.parse_datetime(serialized_value)
assert parsed_value is not None
except Exception as e:
parsed_value = dateutil.parser.parse(serialized_value)
return make_naive(parsed_value)
| true | true |
f733be7ac9647ed984d090af8a46970329966652 | 747 | py | Python | apps/sushi/migrations/0015_help_text.py | techlib/celus | f32a7a22be5f4613dcac10b8e02c5c5a9bc297cb | [
"MIT"
] | 7 | 2020-02-20T13:24:40.000Z | 2022-01-28T19:36:04.000Z | apps/sushi/migrations/0015_help_text.py | techlib/czechelib-stats | ca132e326af0924740a525710474870b1fb5fd37 | [
"MIT"
] | 15 | 2020-04-28T13:09:02.000Z | 2021-11-03T15:21:24.000Z | apps/sushi/migrations/0015_help_text.py | techlib/czechelib-stats | ca132e326af0924740a525710474870b1fb5fd37 | [
"MIT"
] | 4 | 2020-02-20T13:48:30.000Z | 2021-03-19T00:33:34.000Z | # Generated by Django 2.2.4 on 2019-09-02 07:16
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('sushi', '0014_sushifetchattempt_processing_success'),
]
operations = [
migrations.AlterField(
model_name='sushifetchattempt',
name='download_success',
field=models.BooleanField(help_text='True if there was no error downloading data'),
),
migrations.AlterField(
model_name='sushifetchattempt',
name='processing_success',
field=models.BooleanField(
help_text='True if there was no error extracting data from the downloaded material'
),
),
]
| 28.730769 | 99 | 0.62249 |
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('sushi', '0014_sushifetchattempt_processing_success'),
]
operations = [
migrations.AlterField(
model_name='sushifetchattempt',
name='download_success',
field=models.BooleanField(help_text='True if there was no error downloading data'),
),
migrations.AlterField(
model_name='sushifetchattempt',
name='processing_success',
field=models.BooleanField(
help_text='True if there was no error extracting data from the downloaded material'
),
),
]
| true | true |
f733bfb438d40a8a18f025df4f707cd21176ab8d | 1,888 | py | Python | app/core/tests/test_models.py | Adamakk10/recepie-app-api | f0a989e571e8e9de8eed81768be3c11bff949832 | [
"MIT"
] | null | null | null | app/core/tests/test_models.py | Adamakk10/recepie-app-api | f0a989e571e8e9de8eed81768be3c11bff949832 | [
"MIT"
] | null | null | null | app/core/tests/test_models.py | Adamakk10/recepie-app-api | f0a989e571e8e9de8eed81768be3c11bff949832 | [
"MIT"
] | null | null | null | from django.test import TestCase
from django.contrib.auth import get_user_model
from core import models
def sample_user(email='test@gmail.com', password='test1234'):
return get_user_model().objects.create_user(email, password)
class ModelTests(TestCase):
def test_create_user_with_email_successful(self):
email = 'test@gmail.com'
password = 'test1234'
user = get_user_model().objects.create_user(
email=email,
password=password
)
self.assertEqual(user.email, email)
self.assertTrue(user.check_password(password))
def test_new_user_email_normalized(self):
email = 'test@GMAIL.COM'
user = get_user_model().objects.create_user(email, 'test1234')
self.assertEqual(user.email, email.lower())
def test_new_user_invalid_email(self):
with self.assertRaises(ValueError):
get_user_model().objects.create_user(None, 'test1234')
def test_create_new_superuser(self):
user = get_user_model().objects.create_superuser(
'test@gmail.com',
'test1234'
)
self.assertTrue(user.is_superuser)
self.assertTrue(user.is_staff)
def test_tag_str(self):
tag = models.Tag.objects.create(
user=sample_user(),
name='Vegan'
)
self.assertEqual(str(tag), tag.name)
def test_ingredient_str(self):
ingredient = models.Ingredient.objects.create(
user=sample_user(),
name='Cucumber'
)
self.assertEqual(str(ingredient), ingredient.name)
def test_recipe_str(self):
recipe = models.Recipe.objects.create(
user=sample_user(),
title='Staek and mushroom sauce',
time_minutes=5,
price=5.00
)
self.assertEqual(str(recipe), recipe.title)
| 24.842105 | 70 | 0.631886 | from django.test import TestCase
from django.contrib.auth import get_user_model
from core import models
def sample_user(email='test@gmail.com', password='test1234'):
return get_user_model().objects.create_user(email, password)
class ModelTests(TestCase):
def test_create_user_with_email_successful(self):
email = 'test@gmail.com'
password = 'test1234'
user = get_user_model().objects.create_user(
email=email,
password=password
)
self.assertEqual(user.email, email)
self.assertTrue(user.check_password(password))
def test_new_user_email_normalized(self):
email = 'test@GMAIL.COM'
user = get_user_model().objects.create_user(email, 'test1234')
self.assertEqual(user.email, email.lower())
def test_new_user_invalid_email(self):
with self.assertRaises(ValueError):
get_user_model().objects.create_user(None, 'test1234')
def test_create_new_superuser(self):
user = get_user_model().objects.create_superuser(
'test@gmail.com',
'test1234'
)
self.assertTrue(user.is_superuser)
self.assertTrue(user.is_staff)
def test_tag_str(self):
tag = models.Tag.objects.create(
user=sample_user(),
name='Vegan'
)
self.assertEqual(str(tag), tag.name)
def test_ingredient_str(self):
ingredient = models.Ingredient.objects.create(
user=sample_user(),
name='Cucumber'
)
self.assertEqual(str(ingredient), ingredient.name)
def test_recipe_str(self):
recipe = models.Recipe.objects.create(
user=sample_user(),
title='Staek and mushroom sauce',
time_minutes=5,
price=5.00
)
self.assertEqual(str(recipe), recipe.title)
| true | true |
f733bfe0e81565b3822d50f9e385174c31a40427 | 47,558 | py | Python | sdk/python/pulumi_azure_native/network/v20210201/virtual_network_gateway_connection.py | polivbr/pulumi-azure-native | 09571f3bf6bdc4f3621aabefd1ba6c0d4ecfb0e7 | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure_native/network/v20210201/virtual_network_gateway_connection.py | polivbr/pulumi-azure-native | 09571f3bf6bdc4f3621aabefd1ba6c0d4ecfb0e7 | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure_native/network/v20210201/virtual_network_gateway_connection.py | polivbr/pulumi-azure-native | 09571f3bf6bdc4f3621aabefd1ba6c0d4ecfb0e7 | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
from ._enums import *
from ._inputs import *
__all__ = ['VirtualNetworkGatewayConnectionArgs', 'VirtualNetworkGatewayConnection']
@pulumi.input_type
class VirtualNetworkGatewayConnectionArgs:
def __init__(__self__, *,
connection_type: pulumi.Input[Union[str, 'VirtualNetworkGatewayConnectionType']],
resource_group_name: pulumi.Input[str],
virtual_network_gateway1: pulumi.Input['VirtualNetworkGatewayArgs'],
authorization_key: Optional[pulumi.Input[str]] = None,
connection_mode: Optional[pulumi.Input[Union[str, 'VirtualNetworkGatewayConnectionMode']]] = None,
connection_protocol: Optional[pulumi.Input[Union[str, 'VirtualNetworkGatewayConnectionProtocol']]] = None,
dpd_timeout_seconds: Optional[pulumi.Input[int]] = None,
egress_nat_rules: Optional[pulumi.Input[Sequence[pulumi.Input['SubResourceArgs']]]] = None,
enable_bgp: Optional[pulumi.Input[bool]] = None,
express_route_gateway_bypass: Optional[pulumi.Input[bool]] = None,
id: Optional[pulumi.Input[str]] = None,
ingress_nat_rules: Optional[pulumi.Input[Sequence[pulumi.Input['SubResourceArgs']]]] = None,
ipsec_policies: Optional[pulumi.Input[Sequence[pulumi.Input['IpsecPolicyArgs']]]] = None,
local_network_gateway2: Optional[pulumi.Input['LocalNetworkGatewayArgs']] = None,
location: Optional[pulumi.Input[str]] = None,
peer: Optional[pulumi.Input['SubResourceArgs']] = None,
routing_weight: Optional[pulumi.Input[int]] = None,
shared_key: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
traffic_selector_policies: Optional[pulumi.Input[Sequence[pulumi.Input['TrafficSelectorPolicyArgs']]]] = None,
use_local_azure_ip_address: Optional[pulumi.Input[bool]] = None,
use_policy_based_traffic_selectors: Optional[pulumi.Input[bool]] = None,
virtual_network_gateway2: Optional[pulumi.Input['VirtualNetworkGatewayArgs']] = None,
virtual_network_gateway_connection_name: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a VirtualNetworkGatewayConnection resource.
:param pulumi.Input[Union[str, 'VirtualNetworkGatewayConnectionType']] connection_type: Gateway connection type.
:param pulumi.Input[str] resource_group_name: The name of the resource group.
:param pulumi.Input['VirtualNetworkGatewayArgs'] virtual_network_gateway1: The reference to virtual network gateway resource.
:param pulumi.Input[str] authorization_key: The authorizationKey.
:param pulumi.Input[Union[str, 'VirtualNetworkGatewayConnectionMode']] connection_mode: The connection mode for this connection.
:param pulumi.Input[Union[str, 'VirtualNetworkGatewayConnectionProtocol']] connection_protocol: Connection protocol used for this connection.
:param pulumi.Input[int] dpd_timeout_seconds: The dead peer detection timeout of this connection in seconds.
:param pulumi.Input[Sequence[pulumi.Input['SubResourceArgs']]] egress_nat_rules: List of egress NatRules.
:param pulumi.Input[bool] enable_bgp: EnableBgp flag.
:param pulumi.Input[bool] express_route_gateway_bypass: Bypass ExpressRoute Gateway for data forwarding.
:param pulumi.Input[str] id: Resource ID.
:param pulumi.Input[Sequence[pulumi.Input['SubResourceArgs']]] ingress_nat_rules: List of ingress NatRules.
:param pulumi.Input[Sequence[pulumi.Input['IpsecPolicyArgs']]] ipsec_policies: The IPSec Policies to be considered by this connection.
:param pulumi.Input['LocalNetworkGatewayArgs'] local_network_gateway2: The reference to local network gateway resource.
:param pulumi.Input[str] location: Resource location.
:param pulumi.Input['SubResourceArgs'] peer: The reference to peerings resource.
:param pulumi.Input[int] routing_weight: The routing weight.
:param pulumi.Input[str] shared_key: The IPSec shared key.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Resource tags.
:param pulumi.Input[Sequence[pulumi.Input['TrafficSelectorPolicyArgs']]] traffic_selector_policies: The Traffic Selector Policies to be considered by this connection.
:param pulumi.Input[bool] use_local_azure_ip_address: Use private local Azure IP for the connection.
:param pulumi.Input[bool] use_policy_based_traffic_selectors: Enable policy-based traffic selectors.
:param pulumi.Input['VirtualNetworkGatewayArgs'] virtual_network_gateway2: The reference to virtual network gateway resource.
:param pulumi.Input[str] virtual_network_gateway_connection_name: The name of the virtual network gateway connection.
"""
pulumi.set(__self__, "connection_type", connection_type)
pulumi.set(__self__, "resource_group_name", resource_group_name)
pulumi.set(__self__, "virtual_network_gateway1", virtual_network_gateway1)
if authorization_key is not None:
pulumi.set(__self__, "authorization_key", authorization_key)
if connection_mode is not None:
pulumi.set(__self__, "connection_mode", connection_mode)
if connection_protocol is not None:
pulumi.set(__self__, "connection_protocol", connection_protocol)
if dpd_timeout_seconds is not None:
pulumi.set(__self__, "dpd_timeout_seconds", dpd_timeout_seconds)
if egress_nat_rules is not None:
pulumi.set(__self__, "egress_nat_rules", egress_nat_rules)
if enable_bgp is not None:
pulumi.set(__self__, "enable_bgp", enable_bgp)
if express_route_gateway_bypass is not None:
pulumi.set(__self__, "express_route_gateway_bypass", express_route_gateway_bypass)
if id is not None:
pulumi.set(__self__, "id", id)
if ingress_nat_rules is not None:
pulumi.set(__self__, "ingress_nat_rules", ingress_nat_rules)
if ipsec_policies is not None:
pulumi.set(__self__, "ipsec_policies", ipsec_policies)
if local_network_gateway2 is not None:
pulumi.set(__self__, "local_network_gateway2", local_network_gateway2)
if location is not None:
pulumi.set(__self__, "location", location)
if peer is not None:
pulumi.set(__self__, "peer", peer)
if routing_weight is not None:
pulumi.set(__self__, "routing_weight", routing_weight)
if shared_key is not None:
pulumi.set(__self__, "shared_key", shared_key)
if tags is not None:
pulumi.set(__self__, "tags", tags)
if traffic_selector_policies is not None:
pulumi.set(__self__, "traffic_selector_policies", traffic_selector_policies)
if use_local_azure_ip_address is not None:
pulumi.set(__self__, "use_local_azure_ip_address", use_local_azure_ip_address)
if use_policy_based_traffic_selectors is not None:
pulumi.set(__self__, "use_policy_based_traffic_selectors", use_policy_based_traffic_selectors)
if virtual_network_gateway2 is not None:
pulumi.set(__self__, "virtual_network_gateway2", virtual_network_gateway2)
if virtual_network_gateway_connection_name is not None:
pulumi.set(__self__, "virtual_network_gateway_connection_name", virtual_network_gateway_connection_name)
@property
@pulumi.getter(name="connectionType")
def connection_type(self) -> pulumi.Input[Union[str, 'VirtualNetworkGatewayConnectionType']]:
"""
Gateway connection type.
"""
return pulumi.get(self, "connection_type")
@connection_type.setter
def connection_type(self, value: pulumi.Input[Union[str, 'VirtualNetworkGatewayConnectionType']]):
pulumi.set(self, "connection_type", value)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Input[str]:
"""
The name of the resource group.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter(name="virtualNetworkGateway1")
def virtual_network_gateway1(self) -> pulumi.Input['VirtualNetworkGatewayArgs']:
"""
The reference to virtual network gateway resource.
"""
return pulumi.get(self, "virtual_network_gateway1")
@virtual_network_gateway1.setter
def virtual_network_gateway1(self, value: pulumi.Input['VirtualNetworkGatewayArgs']):
pulumi.set(self, "virtual_network_gateway1", value)
@property
@pulumi.getter(name="authorizationKey")
def authorization_key(self) -> Optional[pulumi.Input[str]]:
"""
The authorizationKey.
"""
return pulumi.get(self, "authorization_key")
@authorization_key.setter
def authorization_key(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "authorization_key", value)
@property
@pulumi.getter(name="connectionMode")
def connection_mode(self) -> Optional[pulumi.Input[Union[str, 'VirtualNetworkGatewayConnectionMode']]]:
"""
The connection mode for this connection.
"""
return pulumi.get(self, "connection_mode")
@connection_mode.setter
def connection_mode(self, value: Optional[pulumi.Input[Union[str, 'VirtualNetworkGatewayConnectionMode']]]):
pulumi.set(self, "connection_mode", value)
@property
@pulumi.getter(name="connectionProtocol")
def connection_protocol(self) -> Optional[pulumi.Input[Union[str, 'VirtualNetworkGatewayConnectionProtocol']]]:
"""
Connection protocol used for this connection.
"""
return pulumi.get(self, "connection_protocol")
@connection_protocol.setter
def connection_protocol(self, value: Optional[pulumi.Input[Union[str, 'VirtualNetworkGatewayConnectionProtocol']]]):
pulumi.set(self, "connection_protocol", value)
@property
@pulumi.getter(name="dpdTimeoutSeconds")
def dpd_timeout_seconds(self) -> Optional[pulumi.Input[int]]:
"""
The dead peer detection timeout of this connection in seconds.
"""
return pulumi.get(self, "dpd_timeout_seconds")
@dpd_timeout_seconds.setter
def dpd_timeout_seconds(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "dpd_timeout_seconds", value)
@property
@pulumi.getter(name="egressNatRules")
def egress_nat_rules(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['SubResourceArgs']]]]:
"""
List of egress NatRules.
"""
return pulumi.get(self, "egress_nat_rules")
@egress_nat_rules.setter
def egress_nat_rules(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['SubResourceArgs']]]]):
pulumi.set(self, "egress_nat_rules", value)
@property
@pulumi.getter(name="enableBgp")
def enable_bgp(self) -> Optional[pulumi.Input[bool]]:
"""
EnableBgp flag.
"""
return pulumi.get(self, "enable_bgp")
@enable_bgp.setter
def enable_bgp(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "enable_bgp", value)
@property
@pulumi.getter(name="expressRouteGatewayBypass")
def express_route_gateway_bypass(self) -> Optional[pulumi.Input[bool]]:
"""
Bypass ExpressRoute Gateway for data forwarding.
"""
return pulumi.get(self, "express_route_gateway_bypass")
@express_route_gateway_bypass.setter
def express_route_gateway_bypass(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "express_route_gateway_bypass", value)
@property
@pulumi.getter
def id(self) -> Optional[pulumi.Input[str]]:
"""
Resource ID.
"""
return pulumi.get(self, "id")
@id.setter
def id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "id", value)
@property
@pulumi.getter(name="ingressNatRules")
def ingress_nat_rules(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['SubResourceArgs']]]]:
"""
List of ingress NatRules.
"""
return pulumi.get(self, "ingress_nat_rules")
@ingress_nat_rules.setter
def ingress_nat_rules(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['SubResourceArgs']]]]):
pulumi.set(self, "ingress_nat_rules", value)
@property
@pulumi.getter(name="ipsecPolicies")
def ipsec_policies(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['IpsecPolicyArgs']]]]:
"""
The IPSec Policies to be considered by this connection.
"""
return pulumi.get(self, "ipsec_policies")
@ipsec_policies.setter
def ipsec_policies(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['IpsecPolicyArgs']]]]):
pulumi.set(self, "ipsec_policies", value)
@property
@pulumi.getter(name="localNetworkGateway2")
def local_network_gateway2(self) -> Optional[pulumi.Input['LocalNetworkGatewayArgs']]:
"""
The reference to local network gateway resource.
"""
return pulumi.get(self, "local_network_gateway2")
@local_network_gateway2.setter
def local_network_gateway2(self, value: Optional[pulumi.Input['LocalNetworkGatewayArgs']]):
pulumi.set(self, "local_network_gateway2", value)
@property
@pulumi.getter
def location(self) -> Optional[pulumi.Input[str]]:
"""
Resource location.
"""
return pulumi.get(self, "location")
@location.setter
def location(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "location", value)
@property
@pulumi.getter
def peer(self) -> Optional[pulumi.Input['SubResourceArgs']]:
"""
The reference to peerings resource.
"""
return pulumi.get(self, "peer")
@peer.setter
def peer(self, value: Optional[pulumi.Input['SubResourceArgs']]):
pulumi.set(self, "peer", value)
@property
@pulumi.getter(name="routingWeight")
def routing_weight(self) -> Optional[pulumi.Input[int]]:
"""
The routing weight.
"""
return pulumi.get(self, "routing_weight")
@routing_weight.setter
def routing_weight(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "routing_weight", value)
@property
@pulumi.getter(name="sharedKey")
def shared_key(self) -> Optional[pulumi.Input[str]]:
"""
The IPSec shared key.
"""
return pulumi.get(self, "shared_key")
@shared_key.setter
def shared_key(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "shared_key", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
Resource tags.
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
@property
@pulumi.getter(name="trafficSelectorPolicies")
def traffic_selector_policies(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['TrafficSelectorPolicyArgs']]]]:
"""
The Traffic Selector Policies to be considered by this connection.
"""
return pulumi.get(self, "traffic_selector_policies")
@traffic_selector_policies.setter
def traffic_selector_policies(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['TrafficSelectorPolicyArgs']]]]):
pulumi.set(self, "traffic_selector_policies", value)
@property
@pulumi.getter(name="useLocalAzureIpAddress")
def use_local_azure_ip_address(self) -> Optional[pulumi.Input[bool]]:
"""
Use private local Azure IP for the connection.
"""
return pulumi.get(self, "use_local_azure_ip_address")
@use_local_azure_ip_address.setter
def use_local_azure_ip_address(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "use_local_azure_ip_address", value)
@property
@pulumi.getter(name="usePolicyBasedTrafficSelectors")
def use_policy_based_traffic_selectors(self) -> Optional[pulumi.Input[bool]]:
"""
Enable policy-based traffic selectors.
"""
return pulumi.get(self, "use_policy_based_traffic_selectors")
@use_policy_based_traffic_selectors.setter
def use_policy_based_traffic_selectors(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "use_policy_based_traffic_selectors", value)
@property
@pulumi.getter(name="virtualNetworkGateway2")
def virtual_network_gateway2(self) -> Optional[pulumi.Input['VirtualNetworkGatewayArgs']]:
"""
The reference to virtual network gateway resource.
"""
return pulumi.get(self, "virtual_network_gateway2")
@virtual_network_gateway2.setter
def virtual_network_gateway2(self, value: Optional[pulumi.Input['VirtualNetworkGatewayArgs']]):
pulumi.set(self, "virtual_network_gateway2", value)
@property
@pulumi.getter(name="virtualNetworkGatewayConnectionName")
def virtual_network_gateway_connection_name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the virtual network gateway connection.
"""
return pulumi.get(self, "virtual_network_gateway_connection_name")
@virtual_network_gateway_connection_name.setter
def virtual_network_gateway_connection_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "virtual_network_gateway_connection_name", value)
class VirtualNetworkGatewayConnection(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
authorization_key: Optional[pulumi.Input[str]] = None,
connection_mode: Optional[pulumi.Input[Union[str, 'VirtualNetworkGatewayConnectionMode']]] = None,
connection_protocol: Optional[pulumi.Input[Union[str, 'VirtualNetworkGatewayConnectionProtocol']]] = None,
connection_type: Optional[pulumi.Input[Union[str, 'VirtualNetworkGatewayConnectionType']]] = None,
dpd_timeout_seconds: Optional[pulumi.Input[int]] = None,
egress_nat_rules: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['SubResourceArgs']]]]] = None,
enable_bgp: Optional[pulumi.Input[bool]] = None,
express_route_gateway_bypass: Optional[pulumi.Input[bool]] = None,
id: Optional[pulumi.Input[str]] = None,
ingress_nat_rules: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['SubResourceArgs']]]]] = None,
ipsec_policies: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['IpsecPolicyArgs']]]]] = None,
local_network_gateway2: Optional[pulumi.Input[pulumi.InputType['LocalNetworkGatewayArgs']]] = None,
location: Optional[pulumi.Input[str]] = None,
peer: Optional[pulumi.Input[pulumi.InputType['SubResourceArgs']]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
routing_weight: Optional[pulumi.Input[int]] = None,
shared_key: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
traffic_selector_policies: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['TrafficSelectorPolicyArgs']]]]] = None,
use_local_azure_ip_address: Optional[pulumi.Input[bool]] = None,
use_policy_based_traffic_selectors: Optional[pulumi.Input[bool]] = None,
virtual_network_gateway1: Optional[pulumi.Input[pulumi.InputType['VirtualNetworkGatewayArgs']]] = None,
virtual_network_gateway2: Optional[pulumi.Input[pulumi.InputType['VirtualNetworkGatewayArgs']]] = None,
virtual_network_gateway_connection_name: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
A common class for general resource information.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] authorization_key: The authorizationKey.
:param pulumi.Input[Union[str, 'VirtualNetworkGatewayConnectionMode']] connection_mode: The connection mode for this connection.
:param pulumi.Input[Union[str, 'VirtualNetworkGatewayConnectionProtocol']] connection_protocol: Connection protocol used for this connection.
:param pulumi.Input[Union[str, 'VirtualNetworkGatewayConnectionType']] connection_type: Gateway connection type.
:param pulumi.Input[int] dpd_timeout_seconds: The dead peer detection timeout of this connection in seconds.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['SubResourceArgs']]]] egress_nat_rules: List of egress NatRules.
:param pulumi.Input[bool] enable_bgp: EnableBgp flag.
:param pulumi.Input[bool] express_route_gateway_bypass: Bypass ExpressRoute Gateway for data forwarding.
:param pulumi.Input[str] id: Resource ID.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['SubResourceArgs']]]] ingress_nat_rules: List of ingress NatRules.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['IpsecPolicyArgs']]]] ipsec_policies: The IPSec Policies to be considered by this connection.
:param pulumi.Input[pulumi.InputType['LocalNetworkGatewayArgs']] local_network_gateway2: The reference to local network gateway resource.
:param pulumi.Input[str] location: Resource location.
:param pulumi.Input[pulumi.InputType['SubResourceArgs']] peer: The reference to peerings resource.
:param pulumi.Input[str] resource_group_name: The name of the resource group.
:param pulumi.Input[int] routing_weight: The routing weight.
:param pulumi.Input[str] shared_key: The IPSec shared key.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Resource tags.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['TrafficSelectorPolicyArgs']]]] traffic_selector_policies: The Traffic Selector Policies to be considered by this connection.
:param pulumi.Input[bool] use_local_azure_ip_address: Use private local Azure IP for the connection.
:param pulumi.Input[bool] use_policy_based_traffic_selectors: Enable policy-based traffic selectors.
:param pulumi.Input[pulumi.InputType['VirtualNetworkGatewayArgs']] virtual_network_gateway1: The reference to virtual network gateway resource.
:param pulumi.Input[pulumi.InputType['VirtualNetworkGatewayArgs']] virtual_network_gateway2: The reference to virtual network gateway resource.
:param pulumi.Input[str] virtual_network_gateway_connection_name: The name of the virtual network gateway connection.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: VirtualNetworkGatewayConnectionArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
A common class for general resource information.
:param str resource_name: The name of the resource.
:param VirtualNetworkGatewayConnectionArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(VirtualNetworkGatewayConnectionArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
authorization_key: Optional[pulumi.Input[str]] = None,
connection_mode: Optional[pulumi.Input[Union[str, 'VirtualNetworkGatewayConnectionMode']]] = None,
connection_protocol: Optional[pulumi.Input[Union[str, 'VirtualNetworkGatewayConnectionProtocol']]] = None,
connection_type: Optional[pulumi.Input[Union[str, 'VirtualNetworkGatewayConnectionType']]] = None,
dpd_timeout_seconds: Optional[pulumi.Input[int]] = None,
egress_nat_rules: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['SubResourceArgs']]]]] = None,
enable_bgp: Optional[pulumi.Input[bool]] = None,
express_route_gateway_bypass: Optional[pulumi.Input[bool]] = None,
id: Optional[pulumi.Input[str]] = None,
ingress_nat_rules: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['SubResourceArgs']]]]] = None,
ipsec_policies: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['IpsecPolicyArgs']]]]] = None,
local_network_gateway2: Optional[pulumi.Input[pulumi.InputType['LocalNetworkGatewayArgs']]] = None,
location: Optional[pulumi.Input[str]] = None,
peer: Optional[pulumi.Input[pulumi.InputType['SubResourceArgs']]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
routing_weight: Optional[pulumi.Input[int]] = None,
shared_key: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
traffic_selector_policies: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['TrafficSelectorPolicyArgs']]]]] = None,
use_local_azure_ip_address: Optional[pulumi.Input[bool]] = None,
use_policy_based_traffic_selectors: Optional[pulumi.Input[bool]] = None,
virtual_network_gateway1: Optional[pulumi.Input[pulumi.InputType['VirtualNetworkGatewayArgs']]] = None,
virtual_network_gateway2: Optional[pulumi.Input[pulumi.InputType['VirtualNetworkGatewayArgs']]] = None,
virtual_network_gateway_connection_name: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = VirtualNetworkGatewayConnectionArgs.__new__(VirtualNetworkGatewayConnectionArgs)
__props__.__dict__["authorization_key"] = authorization_key
__props__.__dict__["connection_mode"] = connection_mode
__props__.__dict__["connection_protocol"] = connection_protocol
if connection_type is None and not opts.urn:
raise TypeError("Missing required property 'connection_type'")
__props__.__dict__["connection_type"] = connection_type
__props__.__dict__["dpd_timeout_seconds"] = dpd_timeout_seconds
__props__.__dict__["egress_nat_rules"] = egress_nat_rules
__props__.__dict__["enable_bgp"] = enable_bgp
__props__.__dict__["express_route_gateway_bypass"] = express_route_gateway_bypass
__props__.__dict__["id"] = id
__props__.__dict__["ingress_nat_rules"] = ingress_nat_rules
__props__.__dict__["ipsec_policies"] = ipsec_policies
__props__.__dict__["local_network_gateway2"] = local_network_gateway2
__props__.__dict__["location"] = location
__props__.__dict__["peer"] = peer
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__.__dict__["resource_group_name"] = resource_group_name
__props__.__dict__["routing_weight"] = routing_weight
__props__.__dict__["shared_key"] = shared_key
__props__.__dict__["tags"] = tags
__props__.__dict__["traffic_selector_policies"] = traffic_selector_policies
__props__.__dict__["use_local_azure_ip_address"] = use_local_azure_ip_address
__props__.__dict__["use_policy_based_traffic_selectors"] = use_policy_based_traffic_selectors
if virtual_network_gateway1 is None and not opts.urn:
raise TypeError("Missing required property 'virtual_network_gateway1'")
__props__.__dict__["virtual_network_gateway1"] = virtual_network_gateway1
__props__.__dict__["virtual_network_gateway2"] = virtual_network_gateway2
__props__.__dict__["virtual_network_gateway_connection_name"] = virtual_network_gateway_connection_name
__props__.__dict__["connection_status"] = None
__props__.__dict__["egress_bytes_transferred"] = None
__props__.__dict__["etag"] = None
__props__.__dict__["ingress_bytes_transferred"] = None
__props__.__dict__["name"] = None
__props__.__dict__["provisioning_state"] = None
__props__.__dict__["resource_guid"] = None
__props__.__dict__["tunnel_connection_status"] = None
__props__.__dict__["type"] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:network/v20210201:VirtualNetworkGatewayConnection"), pulumi.Alias(type_="azure-native:network:VirtualNetworkGatewayConnection"), pulumi.Alias(type_="azure-nextgen:network:VirtualNetworkGatewayConnection"), pulumi.Alias(type_="azure-native:network/v20150615:VirtualNetworkGatewayConnection"), pulumi.Alias(type_="azure-nextgen:network/v20150615:VirtualNetworkGatewayConnection"), pulumi.Alias(type_="azure-native:network/v20160330:VirtualNetworkGatewayConnection"), pulumi.Alias(type_="azure-nextgen:network/v20160330:VirtualNetworkGatewayConnection"), pulumi.Alias(type_="azure-native:network/v20160601:VirtualNetworkGatewayConnection"), pulumi.Alias(type_="azure-nextgen:network/v20160601:VirtualNetworkGatewayConnection"), pulumi.Alias(type_="azure-native:network/v20160901:VirtualNetworkGatewayConnection"), pulumi.Alias(type_="azure-nextgen:network/v20160901:VirtualNetworkGatewayConnection"), pulumi.Alias(type_="azure-native:network/v20161201:VirtualNetworkGatewayConnection"), pulumi.Alias(type_="azure-nextgen:network/v20161201:VirtualNetworkGatewayConnection"), pulumi.Alias(type_="azure-native:network/v20170301:VirtualNetworkGatewayConnection"), pulumi.Alias(type_="azure-nextgen:network/v20170301:VirtualNetworkGatewayConnection"), pulumi.Alias(type_="azure-native:network/v20170601:VirtualNetworkGatewayConnection"), pulumi.Alias(type_="azure-nextgen:network/v20170601:VirtualNetworkGatewayConnection"), pulumi.Alias(type_="azure-native:network/v20170801:VirtualNetworkGatewayConnection"), pulumi.Alias(type_="azure-nextgen:network/v20170801:VirtualNetworkGatewayConnection"), pulumi.Alias(type_="azure-native:network/v20170901:VirtualNetworkGatewayConnection"), pulumi.Alias(type_="azure-nextgen:network/v20170901:VirtualNetworkGatewayConnection"), pulumi.Alias(type_="azure-native:network/v20171001:VirtualNetworkGatewayConnection"), pulumi.Alias(type_="azure-nextgen:network/v20171001:VirtualNetworkGatewayConnection"), pulumi.Alias(type_="azure-native:network/v20171101:VirtualNetworkGatewayConnection"), pulumi.Alias(type_="azure-nextgen:network/v20171101:VirtualNetworkGatewayConnection"), pulumi.Alias(type_="azure-native:network/v20180101:VirtualNetworkGatewayConnection"), pulumi.Alias(type_="azure-nextgen:network/v20180101:VirtualNetworkGatewayConnection"), pulumi.Alias(type_="azure-native:network/v20180201:VirtualNetworkGatewayConnection"), pulumi.Alias(type_="azure-nextgen:network/v20180201:VirtualNetworkGatewayConnection"), pulumi.Alias(type_="azure-native:network/v20180401:VirtualNetworkGatewayConnection"), pulumi.Alias(type_="azure-nextgen:network/v20180401:VirtualNetworkGatewayConnection"), pulumi.Alias(type_="azure-native:network/v20180601:VirtualNetworkGatewayConnection"), pulumi.Alias(type_="azure-nextgen:network/v20180601:VirtualNetworkGatewayConnection"), pulumi.Alias(type_="azure-native:network/v20180701:VirtualNetworkGatewayConnection"), pulumi.Alias(type_="azure-nextgen:network/v20180701:VirtualNetworkGatewayConnection"), pulumi.Alias(type_="azure-native:network/v20180801:VirtualNetworkGatewayConnection"), pulumi.Alias(type_="azure-nextgen:network/v20180801:VirtualNetworkGatewayConnection"), pulumi.Alias(type_="azure-native:network/v20181001:VirtualNetworkGatewayConnection"), pulumi.Alias(type_="azure-nextgen:network/v20181001:VirtualNetworkGatewayConnection"), pulumi.Alias(type_="azure-native:network/v20181101:VirtualNetworkGatewayConnection"), pulumi.Alias(type_="azure-nextgen:network/v20181101:VirtualNetworkGatewayConnection"), pulumi.Alias(type_="azure-native:network/v20181201:VirtualNetworkGatewayConnection"), pulumi.Alias(type_="azure-nextgen:network/v20181201:VirtualNetworkGatewayConnection"), pulumi.Alias(type_="azure-native:network/v20190201:VirtualNetworkGatewayConnection"), pulumi.Alias(type_="azure-nextgen:network/v20190201:VirtualNetworkGatewayConnection"), pulumi.Alias(type_="azure-native:network/v20190401:VirtualNetworkGatewayConnection"), pulumi.Alias(type_="azure-nextgen:network/v20190401:VirtualNetworkGatewayConnection"), pulumi.Alias(type_="azure-native:network/v20190601:VirtualNetworkGatewayConnection"), pulumi.Alias(type_="azure-nextgen:network/v20190601:VirtualNetworkGatewayConnection"), pulumi.Alias(type_="azure-native:network/v20190701:VirtualNetworkGatewayConnection"), pulumi.Alias(type_="azure-nextgen:network/v20190701:VirtualNetworkGatewayConnection"), pulumi.Alias(type_="azure-native:network/v20190801:VirtualNetworkGatewayConnection"), pulumi.Alias(type_="azure-nextgen:network/v20190801:VirtualNetworkGatewayConnection"), pulumi.Alias(type_="azure-native:network/v20190901:VirtualNetworkGatewayConnection"), pulumi.Alias(type_="azure-nextgen:network/v20190901:VirtualNetworkGatewayConnection"), pulumi.Alias(type_="azure-native:network/v20191101:VirtualNetworkGatewayConnection"), pulumi.Alias(type_="azure-nextgen:network/v20191101:VirtualNetworkGatewayConnection"), pulumi.Alias(type_="azure-native:network/v20191201:VirtualNetworkGatewayConnection"), pulumi.Alias(type_="azure-nextgen:network/v20191201:VirtualNetworkGatewayConnection"), pulumi.Alias(type_="azure-native:network/v20200301:VirtualNetworkGatewayConnection"), pulumi.Alias(type_="azure-nextgen:network/v20200301:VirtualNetworkGatewayConnection"), pulumi.Alias(type_="azure-native:network/v20200401:VirtualNetworkGatewayConnection"), pulumi.Alias(type_="azure-nextgen:network/v20200401:VirtualNetworkGatewayConnection"), pulumi.Alias(type_="azure-native:network/v20200501:VirtualNetworkGatewayConnection"), pulumi.Alias(type_="azure-nextgen:network/v20200501:VirtualNetworkGatewayConnection"), pulumi.Alias(type_="azure-native:network/v20200601:VirtualNetworkGatewayConnection"), pulumi.Alias(type_="azure-nextgen:network/v20200601:VirtualNetworkGatewayConnection"), pulumi.Alias(type_="azure-native:network/v20200701:VirtualNetworkGatewayConnection"), pulumi.Alias(type_="azure-nextgen:network/v20200701:VirtualNetworkGatewayConnection"), pulumi.Alias(type_="azure-native:network/v20200801:VirtualNetworkGatewayConnection"), pulumi.Alias(type_="azure-nextgen:network/v20200801:VirtualNetworkGatewayConnection"), pulumi.Alias(type_="azure-native:network/v20201101:VirtualNetworkGatewayConnection"), pulumi.Alias(type_="azure-nextgen:network/v20201101:VirtualNetworkGatewayConnection"), pulumi.Alias(type_="azure-native:network/v20210301:VirtualNetworkGatewayConnection"), pulumi.Alias(type_="azure-nextgen:network/v20210301:VirtualNetworkGatewayConnection")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(VirtualNetworkGatewayConnection, __self__).__init__(
'azure-native:network/v20210201:VirtualNetworkGatewayConnection',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'VirtualNetworkGatewayConnection':
"""
Get an existing VirtualNetworkGatewayConnection resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = VirtualNetworkGatewayConnectionArgs.__new__(VirtualNetworkGatewayConnectionArgs)
__props__.__dict__["authorization_key"] = None
__props__.__dict__["connection_mode"] = None
__props__.__dict__["connection_protocol"] = None
__props__.__dict__["connection_status"] = None
__props__.__dict__["connection_type"] = None
__props__.__dict__["dpd_timeout_seconds"] = None
__props__.__dict__["egress_bytes_transferred"] = None
__props__.__dict__["egress_nat_rules"] = None
__props__.__dict__["enable_bgp"] = None
__props__.__dict__["etag"] = None
__props__.__dict__["express_route_gateway_bypass"] = None
__props__.__dict__["ingress_bytes_transferred"] = None
__props__.__dict__["ingress_nat_rules"] = None
__props__.__dict__["ipsec_policies"] = None
__props__.__dict__["local_network_gateway2"] = None
__props__.__dict__["location"] = None
__props__.__dict__["name"] = None
__props__.__dict__["peer"] = None
__props__.__dict__["provisioning_state"] = None
__props__.__dict__["resource_guid"] = None
__props__.__dict__["routing_weight"] = None
__props__.__dict__["shared_key"] = None
__props__.__dict__["tags"] = None
__props__.__dict__["traffic_selector_policies"] = None
__props__.__dict__["tunnel_connection_status"] = None
__props__.__dict__["type"] = None
__props__.__dict__["use_local_azure_ip_address"] = None
__props__.__dict__["use_policy_based_traffic_selectors"] = None
__props__.__dict__["virtual_network_gateway1"] = None
__props__.__dict__["virtual_network_gateway2"] = None
return VirtualNetworkGatewayConnection(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="authorizationKey")
def authorization_key(self) -> pulumi.Output[Optional[str]]:
"""
The authorizationKey.
"""
return pulumi.get(self, "authorization_key")
@property
@pulumi.getter(name="connectionMode")
def connection_mode(self) -> pulumi.Output[Optional[str]]:
"""
The connection mode for this connection.
"""
return pulumi.get(self, "connection_mode")
@property
@pulumi.getter(name="connectionProtocol")
def connection_protocol(self) -> pulumi.Output[Optional[str]]:
"""
Connection protocol used for this connection.
"""
return pulumi.get(self, "connection_protocol")
@property
@pulumi.getter(name="connectionStatus")
def connection_status(self) -> pulumi.Output[str]:
"""
Virtual Network Gateway connection status.
"""
return pulumi.get(self, "connection_status")
@property
@pulumi.getter(name="connectionType")
def connection_type(self) -> pulumi.Output[str]:
"""
Gateway connection type.
"""
return pulumi.get(self, "connection_type")
@property
@pulumi.getter(name="dpdTimeoutSeconds")
def dpd_timeout_seconds(self) -> pulumi.Output[Optional[int]]:
"""
The dead peer detection timeout of this connection in seconds.
"""
return pulumi.get(self, "dpd_timeout_seconds")
@property
@pulumi.getter(name="egressBytesTransferred")
def egress_bytes_transferred(self) -> pulumi.Output[float]:
"""
The egress bytes transferred in this connection.
"""
return pulumi.get(self, "egress_bytes_transferred")
@property
@pulumi.getter(name="egressNatRules")
def egress_nat_rules(self) -> pulumi.Output[Optional[Sequence['outputs.SubResourceResponse']]]:
"""
List of egress NatRules.
"""
return pulumi.get(self, "egress_nat_rules")
@property
@pulumi.getter(name="enableBgp")
def enable_bgp(self) -> pulumi.Output[Optional[bool]]:
"""
EnableBgp flag.
"""
return pulumi.get(self, "enable_bgp")
@property
@pulumi.getter
def etag(self) -> pulumi.Output[str]:
"""
A unique read-only string that changes whenever the resource is updated.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter(name="expressRouteGatewayBypass")
def express_route_gateway_bypass(self) -> pulumi.Output[Optional[bool]]:
"""
Bypass ExpressRoute Gateway for data forwarding.
"""
return pulumi.get(self, "express_route_gateway_bypass")
@property
@pulumi.getter(name="ingressBytesTransferred")
def ingress_bytes_transferred(self) -> pulumi.Output[float]:
"""
The ingress bytes transferred in this connection.
"""
return pulumi.get(self, "ingress_bytes_transferred")
@property
@pulumi.getter(name="ingressNatRules")
def ingress_nat_rules(self) -> pulumi.Output[Optional[Sequence['outputs.SubResourceResponse']]]:
"""
List of ingress NatRules.
"""
return pulumi.get(self, "ingress_nat_rules")
@property
@pulumi.getter(name="ipsecPolicies")
def ipsec_policies(self) -> pulumi.Output[Optional[Sequence['outputs.IpsecPolicyResponse']]]:
"""
The IPSec Policies to be considered by this connection.
"""
return pulumi.get(self, "ipsec_policies")
@property
@pulumi.getter(name="localNetworkGateway2")
def local_network_gateway2(self) -> pulumi.Output[Optional['outputs.LocalNetworkGatewayResponse']]:
"""
The reference to local network gateway resource.
"""
return pulumi.get(self, "local_network_gateway2")
@property
@pulumi.getter
def location(self) -> pulumi.Output[Optional[str]]:
"""
Resource location.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Resource name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def peer(self) -> pulumi.Output[Optional['outputs.SubResourceResponse']]:
"""
The reference to peerings resource.
"""
return pulumi.get(self, "peer")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> pulumi.Output[str]:
"""
The provisioning state of the virtual network gateway connection resource.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="resourceGuid")
def resource_guid(self) -> pulumi.Output[str]:
"""
The resource GUID property of the virtual network gateway connection resource.
"""
return pulumi.get(self, "resource_guid")
@property
@pulumi.getter(name="routingWeight")
def routing_weight(self) -> pulumi.Output[Optional[int]]:
"""
The routing weight.
"""
return pulumi.get(self, "routing_weight")
@property
@pulumi.getter(name="sharedKey")
def shared_key(self) -> pulumi.Output[Optional[str]]:
"""
The IPSec shared key.
"""
return pulumi.get(self, "shared_key")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
Resource tags.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter(name="trafficSelectorPolicies")
def traffic_selector_policies(self) -> pulumi.Output[Optional[Sequence['outputs.TrafficSelectorPolicyResponse']]]:
"""
The Traffic Selector Policies to be considered by this connection.
"""
return pulumi.get(self, "traffic_selector_policies")
@property
@pulumi.getter(name="tunnelConnectionStatus")
def tunnel_connection_status(self) -> pulumi.Output[Sequence['outputs.TunnelConnectionHealthResponse']]:
"""
Collection of all tunnels' connection health status.
"""
return pulumi.get(self, "tunnel_connection_status")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
Resource type.
"""
return pulumi.get(self, "type")
@property
@pulumi.getter(name="useLocalAzureIpAddress")
def use_local_azure_ip_address(self) -> pulumi.Output[Optional[bool]]:
"""
Use private local Azure IP for the connection.
"""
return pulumi.get(self, "use_local_azure_ip_address")
@property
@pulumi.getter(name="usePolicyBasedTrafficSelectors")
def use_policy_based_traffic_selectors(self) -> pulumi.Output[Optional[bool]]:
"""
Enable policy-based traffic selectors.
"""
return pulumi.get(self, "use_policy_based_traffic_selectors")
@property
@pulumi.getter(name="virtualNetworkGateway1")
def virtual_network_gateway1(self) -> pulumi.Output['outputs.VirtualNetworkGatewayResponse']:
"""
The reference to virtual network gateway resource.
"""
return pulumi.get(self, "virtual_network_gateway1")
@property
@pulumi.getter(name="virtualNetworkGateway2")
def virtual_network_gateway2(self) -> pulumi.Output[Optional['outputs.VirtualNetworkGatewayResponse']]:
"""
The reference to virtual network gateway resource.
"""
return pulumi.get(self, "virtual_network_gateway2")
| 55.3 | 6,521 | 0.705055 |
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
from ._enums import *
from ._inputs import *
__all__ = ['VirtualNetworkGatewayConnectionArgs', 'VirtualNetworkGatewayConnection']
@pulumi.input_type
class VirtualNetworkGatewayConnectionArgs:
def __init__(__self__, *,
connection_type: pulumi.Input[Union[str, 'VirtualNetworkGatewayConnectionType']],
resource_group_name: pulumi.Input[str],
virtual_network_gateway1: pulumi.Input['VirtualNetworkGatewayArgs'],
authorization_key: Optional[pulumi.Input[str]] = None,
connection_mode: Optional[pulumi.Input[Union[str, 'VirtualNetworkGatewayConnectionMode']]] = None,
connection_protocol: Optional[pulumi.Input[Union[str, 'VirtualNetworkGatewayConnectionProtocol']]] = None,
dpd_timeout_seconds: Optional[pulumi.Input[int]] = None,
egress_nat_rules: Optional[pulumi.Input[Sequence[pulumi.Input['SubResourceArgs']]]] = None,
enable_bgp: Optional[pulumi.Input[bool]] = None,
express_route_gateway_bypass: Optional[pulumi.Input[bool]] = None,
id: Optional[pulumi.Input[str]] = None,
ingress_nat_rules: Optional[pulumi.Input[Sequence[pulumi.Input['SubResourceArgs']]]] = None,
ipsec_policies: Optional[pulumi.Input[Sequence[pulumi.Input['IpsecPolicyArgs']]]] = None,
local_network_gateway2: Optional[pulumi.Input['LocalNetworkGatewayArgs']] = None,
location: Optional[pulumi.Input[str]] = None,
peer: Optional[pulumi.Input['SubResourceArgs']] = None,
routing_weight: Optional[pulumi.Input[int]] = None,
shared_key: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
traffic_selector_policies: Optional[pulumi.Input[Sequence[pulumi.Input['TrafficSelectorPolicyArgs']]]] = None,
use_local_azure_ip_address: Optional[pulumi.Input[bool]] = None,
use_policy_based_traffic_selectors: Optional[pulumi.Input[bool]] = None,
virtual_network_gateway2: Optional[pulumi.Input['VirtualNetworkGatewayArgs']] = None,
virtual_network_gateway_connection_name: Optional[pulumi.Input[str]] = None):
pulumi.set(__self__, "connection_type", connection_type)
pulumi.set(__self__, "resource_group_name", resource_group_name)
pulumi.set(__self__, "virtual_network_gateway1", virtual_network_gateway1)
if authorization_key is not None:
pulumi.set(__self__, "authorization_key", authorization_key)
if connection_mode is not None:
pulumi.set(__self__, "connection_mode", connection_mode)
if connection_protocol is not None:
pulumi.set(__self__, "connection_protocol", connection_protocol)
if dpd_timeout_seconds is not None:
pulumi.set(__self__, "dpd_timeout_seconds", dpd_timeout_seconds)
if egress_nat_rules is not None:
pulumi.set(__self__, "egress_nat_rules", egress_nat_rules)
if enable_bgp is not None:
pulumi.set(__self__, "enable_bgp", enable_bgp)
if express_route_gateway_bypass is not None:
pulumi.set(__self__, "express_route_gateway_bypass", express_route_gateway_bypass)
if id is not None:
pulumi.set(__self__, "id", id)
if ingress_nat_rules is not None:
pulumi.set(__self__, "ingress_nat_rules", ingress_nat_rules)
if ipsec_policies is not None:
pulumi.set(__self__, "ipsec_policies", ipsec_policies)
if local_network_gateway2 is not None:
pulumi.set(__self__, "local_network_gateway2", local_network_gateway2)
if location is not None:
pulumi.set(__self__, "location", location)
if peer is not None:
pulumi.set(__self__, "peer", peer)
if routing_weight is not None:
pulumi.set(__self__, "routing_weight", routing_weight)
if shared_key is not None:
pulumi.set(__self__, "shared_key", shared_key)
if tags is not None:
pulumi.set(__self__, "tags", tags)
if traffic_selector_policies is not None:
pulumi.set(__self__, "traffic_selector_policies", traffic_selector_policies)
if use_local_azure_ip_address is not None:
pulumi.set(__self__, "use_local_azure_ip_address", use_local_azure_ip_address)
if use_policy_based_traffic_selectors is not None:
pulumi.set(__self__, "use_policy_based_traffic_selectors", use_policy_based_traffic_selectors)
if virtual_network_gateway2 is not None:
pulumi.set(__self__, "virtual_network_gateway2", virtual_network_gateway2)
if virtual_network_gateway_connection_name is not None:
pulumi.set(__self__, "virtual_network_gateway_connection_name", virtual_network_gateway_connection_name)
@property
@pulumi.getter(name="connectionType")
def connection_type(self) -> pulumi.Input[Union[str, 'VirtualNetworkGatewayConnectionType']]:
return pulumi.get(self, "connection_type")
@connection_type.setter
def connection_type(self, value: pulumi.Input[Union[str, 'VirtualNetworkGatewayConnectionType']]):
pulumi.set(self, "connection_type", value)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Input[str]:
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter(name="virtualNetworkGateway1")
def virtual_network_gateway1(self) -> pulumi.Input['VirtualNetworkGatewayArgs']:
return pulumi.get(self, "virtual_network_gateway1")
@virtual_network_gateway1.setter
def virtual_network_gateway1(self, value: pulumi.Input['VirtualNetworkGatewayArgs']):
pulumi.set(self, "virtual_network_gateway1", value)
@property
@pulumi.getter(name="authorizationKey")
def authorization_key(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "authorization_key")
@authorization_key.setter
def authorization_key(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "authorization_key", value)
@property
@pulumi.getter(name="connectionMode")
def connection_mode(self) -> Optional[pulumi.Input[Union[str, 'VirtualNetworkGatewayConnectionMode']]]:
return pulumi.get(self, "connection_mode")
@connection_mode.setter
def connection_mode(self, value: Optional[pulumi.Input[Union[str, 'VirtualNetworkGatewayConnectionMode']]]):
pulumi.set(self, "connection_mode", value)
@property
@pulumi.getter(name="connectionProtocol")
def connection_protocol(self) -> Optional[pulumi.Input[Union[str, 'VirtualNetworkGatewayConnectionProtocol']]]:
return pulumi.get(self, "connection_protocol")
@connection_protocol.setter
def connection_protocol(self, value: Optional[pulumi.Input[Union[str, 'VirtualNetworkGatewayConnectionProtocol']]]):
pulumi.set(self, "connection_protocol", value)
@property
@pulumi.getter(name="dpdTimeoutSeconds")
def dpd_timeout_seconds(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "dpd_timeout_seconds")
@dpd_timeout_seconds.setter
def dpd_timeout_seconds(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "dpd_timeout_seconds", value)
@property
@pulumi.getter(name="egressNatRules")
def egress_nat_rules(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['SubResourceArgs']]]]:
return pulumi.get(self, "egress_nat_rules")
@egress_nat_rules.setter
def egress_nat_rules(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['SubResourceArgs']]]]):
pulumi.set(self, "egress_nat_rules", value)
@property
@pulumi.getter(name="enableBgp")
def enable_bgp(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "enable_bgp")
@enable_bgp.setter
def enable_bgp(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "enable_bgp", value)
@property
@pulumi.getter(name="expressRouteGatewayBypass")
def express_route_gateway_bypass(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "express_route_gateway_bypass")
@express_route_gateway_bypass.setter
def express_route_gateway_bypass(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "express_route_gateway_bypass", value)
@property
@pulumi.getter
def id(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "id")
@id.setter
def id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "id", value)
@property
@pulumi.getter(name="ingressNatRules")
def ingress_nat_rules(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['SubResourceArgs']]]]:
return pulumi.get(self, "ingress_nat_rules")
@ingress_nat_rules.setter
def ingress_nat_rules(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['SubResourceArgs']]]]):
pulumi.set(self, "ingress_nat_rules", value)
@property
@pulumi.getter(name="ipsecPolicies")
def ipsec_policies(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['IpsecPolicyArgs']]]]:
return pulumi.get(self, "ipsec_policies")
@ipsec_policies.setter
def ipsec_policies(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['IpsecPolicyArgs']]]]):
pulumi.set(self, "ipsec_policies", value)
@property
@pulumi.getter(name="localNetworkGateway2")
def local_network_gateway2(self) -> Optional[pulumi.Input['LocalNetworkGatewayArgs']]:
return pulumi.get(self, "local_network_gateway2")
@local_network_gateway2.setter
def local_network_gateway2(self, value: Optional[pulumi.Input['LocalNetworkGatewayArgs']]):
pulumi.set(self, "local_network_gateway2", value)
@property
@pulumi.getter
def location(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "location")
@location.setter
def location(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "location", value)
@property
@pulumi.getter
def peer(self) -> Optional[pulumi.Input['SubResourceArgs']]:
return pulumi.get(self, "peer")
@peer.setter
def peer(self, value: Optional[pulumi.Input['SubResourceArgs']]):
pulumi.set(self, "peer", value)
@property
@pulumi.getter(name="routingWeight")
def routing_weight(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "routing_weight")
@routing_weight.setter
def routing_weight(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "routing_weight", value)
@property
@pulumi.getter(name="sharedKey")
def shared_key(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "shared_key")
@shared_key.setter
def shared_key(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "shared_key", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
@property
@pulumi.getter(name="trafficSelectorPolicies")
def traffic_selector_policies(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['TrafficSelectorPolicyArgs']]]]:
return pulumi.get(self, "traffic_selector_policies")
@traffic_selector_policies.setter
def traffic_selector_policies(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['TrafficSelectorPolicyArgs']]]]):
pulumi.set(self, "traffic_selector_policies", value)
@property
@pulumi.getter(name="useLocalAzureIpAddress")
def use_local_azure_ip_address(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "use_local_azure_ip_address")
@use_local_azure_ip_address.setter
def use_local_azure_ip_address(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "use_local_azure_ip_address", value)
@property
@pulumi.getter(name="usePolicyBasedTrafficSelectors")
def use_policy_based_traffic_selectors(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "use_policy_based_traffic_selectors")
@use_policy_based_traffic_selectors.setter
def use_policy_based_traffic_selectors(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "use_policy_based_traffic_selectors", value)
@property
@pulumi.getter(name="virtualNetworkGateway2")
def virtual_network_gateway2(self) -> Optional[pulumi.Input['VirtualNetworkGatewayArgs']]:
return pulumi.get(self, "virtual_network_gateway2")
@virtual_network_gateway2.setter
def virtual_network_gateway2(self, value: Optional[pulumi.Input['VirtualNetworkGatewayArgs']]):
pulumi.set(self, "virtual_network_gateway2", value)
@property
@pulumi.getter(name="virtualNetworkGatewayConnectionName")
def virtual_network_gateway_connection_name(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "virtual_network_gateway_connection_name")
@virtual_network_gateway_connection_name.setter
def virtual_network_gateway_connection_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "virtual_network_gateway_connection_name", value)
class VirtualNetworkGatewayConnection(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
authorization_key: Optional[pulumi.Input[str]] = None,
connection_mode: Optional[pulumi.Input[Union[str, 'VirtualNetworkGatewayConnectionMode']]] = None,
connection_protocol: Optional[pulumi.Input[Union[str, 'VirtualNetworkGatewayConnectionProtocol']]] = None,
connection_type: Optional[pulumi.Input[Union[str, 'VirtualNetworkGatewayConnectionType']]] = None,
dpd_timeout_seconds: Optional[pulumi.Input[int]] = None,
egress_nat_rules: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['SubResourceArgs']]]]] = None,
enable_bgp: Optional[pulumi.Input[bool]] = None,
express_route_gateway_bypass: Optional[pulumi.Input[bool]] = None,
id: Optional[pulumi.Input[str]] = None,
ingress_nat_rules: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['SubResourceArgs']]]]] = None,
ipsec_policies: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['IpsecPolicyArgs']]]]] = None,
local_network_gateway2: Optional[pulumi.Input[pulumi.InputType['LocalNetworkGatewayArgs']]] = None,
location: Optional[pulumi.Input[str]] = None,
peer: Optional[pulumi.Input[pulumi.InputType['SubResourceArgs']]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
routing_weight: Optional[pulumi.Input[int]] = None,
shared_key: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
traffic_selector_policies: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['TrafficSelectorPolicyArgs']]]]] = None,
use_local_azure_ip_address: Optional[pulumi.Input[bool]] = None,
use_policy_based_traffic_selectors: Optional[pulumi.Input[bool]] = None,
virtual_network_gateway1: Optional[pulumi.Input[pulumi.InputType['VirtualNetworkGatewayArgs']]] = None,
virtual_network_gateway2: Optional[pulumi.Input[pulumi.InputType['VirtualNetworkGatewayArgs']]] = None,
virtual_network_gateway_connection_name: Optional[pulumi.Input[str]] = None,
__props__=None):
...
@overload
def __init__(__self__,
resource_name: str,
args: VirtualNetworkGatewayConnectionArgs,
opts: Optional[pulumi.ResourceOptions] = None):
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(VirtualNetworkGatewayConnectionArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
authorization_key: Optional[pulumi.Input[str]] = None,
connection_mode: Optional[pulumi.Input[Union[str, 'VirtualNetworkGatewayConnectionMode']]] = None,
connection_protocol: Optional[pulumi.Input[Union[str, 'VirtualNetworkGatewayConnectionProtocol']]] = None,
connection_type: Optional[pulumi.Input[Union[str, 'VirtualNetworkGatewayConnectionType']]] = None,
dpd_timeout_seconds: Optional[pulumi.Input[int]] = None,
egress_nat_rules: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['SubResourceArgs']]]]] = None,
enable_bgp: Optional[pulumi.Input[bool]] = None,
express_route_gateway_bypass: Optional[pulumi.Input[bool]] = None,
id: Optional[pulumi.Input[str]] = None,
ingress_nat_rules: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['SubResourceArgs']]]]] = None,
ipsec_policies: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['IpsecPolicyArgs']]]]] = None,
local_network_gateway2: Optional[pulumi.Input[pulumi.InputType['LocalNetworkGatewayArgs']]] = None,
location: Optional[pulumi.Input[str]] = None,
peer: Optional[pulumi.Input[pulumi.InputType['SubResourceArgs']]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
routing_weight: Optional[pulumi.Input[int]] = None,
shared_key: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
traffic_selector_policies: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['TrafficSelectorPolicyArgs']]]]] = None,
use_local_azure_ip_address: Optional[pulumi.Input[bool]] = None,
use_policy_based_traffic_selectors: Optional[pulumi.Input[bool]] = None,
virtual_network_gateway1: Optional[pulumi.Input[pulumi.InputType['VirtualNetworkGatewayArgs']]] = None,
virtual_network_gateway2: Optional[pulumi.Input[pulumi.InputType['VirtualNetworkGatewayArgs']]] = None,
virtual_network_gateway_connection_name: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = VirtualNetworkGatewayConnectionArgs.__new__(VirtualNetworkGatewayConnectionArgs)
__props__.__dict__["authorization_key"] = authorization_key
__props__.__dict__["connection_mode"] = connection_mode
__props__.__dict__["connection_protocol"] = connection_protocol
if connection_type is None and not opts.urn:
raise TypeError("Missing required property 'connection_type'")
__props__.__dict__["connection_type"] = connection_type
__props__.__dict__["dpd_timeout_seconds"] = dpd_timeout_seconds
__props__.__dict__["egress_nat_rules"] = egress_nat_rules
__props__.__dict__["enable_bgp"] = enable_bgp
__props__.__dict__["express_route_gateway_bypass"] = express_route_gateway_bypass
__props__.__dict__["id"] = id
__props__.__dict__["ingress_nat_rules"] = ingress_nat_rules
__props__.__dict__["ipsec_policies"] = ipsec_policies
__props__.__dict__["local_network_gateway2"] = local_network_gateway2
__props__.__dict__["location"] = location
__props__.__dict__["peer"] = peer
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__.__dict__["resource_group_name"] = resource_group_name
__props__.__dict__["routing_weight"] = routing_weight
__props__.__dict__["shared_key"] = shared_key
__props__.__dict__["tags"] = tags
__props__.__dict__["traffic_selector_policies"] = traffic_selector_policies
__props__.__dict__["use_local_azure_ip_address"] = use_local_azure_ip_address
__props__.__dict__["use_policy_based_traffic_selectors"] = use_policy_based_traffic_selectors
if virtual_network_gateway1 is None and not opts.urn:
raise TypeError("Missing required property 'virtual_network_gateway1'")
__props__.__dict__["virtual_network_gateway1"] = virtual_network_gateway1
__props__.__dict__["virtual_network_gateway2"] = virtual_network_gateway2
__props__.__dict__["virtual_network_gateway_connection_name"] = virtual_network_gateway_connection_name
__props__.__dict__["connection_status"] = None
__props__.__dict__["egress_bytes_transferred"] = None
__props__.__dict__["etag"] = None
__props__.__dict__["ingress_bytes_transferred"] = None
__props__.__dict__["name"] = None
__props__.__dict__["provisioning_state"] = None
__props__.__dict__["resource_guid"] = None
__props__.__dict__["tunnel_connection_status"] = None
__props__.__dict__["type"] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:network/v20210201:VirtualNetworkGatewayConnection"), pulumi.Alias(type_="azure-native:network:VirtualNetworkGatewayConnection"), pulumi.Alias(type_="azure-nextgen:network:VirtualNetworkGatewayConnection"), pulumi.Alias(type_="azure-native:network/v20150615:VirtualNetworkGatewayConnection"), pulumi.Alias(type_="azure-nextgen:network/v20150615:VirtualNetworkGatewayConnection"), pulumi.Alias(type_="azure-native:network/v20160330:VirtualNetworkGatewayConnection"), pulumi.Alias(type_="azure-nextgen:network/v20160330:VirtualNetworkGatewayConnection"), pulumi.Alias(type_="azure-native:network/v20160601:VirtualNetworkGatewayConnection"), pulumi.Alias(type_="azure-nextgen:network/v20160601:VirtualNetworkGatewayConnection"), pulumi.Alias(type_="azure-native:network/v20160901:VirtualNetworkGatewayConnection"), pulumi.Alias(type_="azure-nextgen:network/v20160901:VirtualNetworkGatewayConnection"), pulumi.Alias(type_="azure-native:network/v20161201:VirtualNetworkGatewayConnection"), pulumi.Alias(type_="azure-nextgen:network/v20161201:VirtualNetworkGatewayConnection"), pulumi.Alias(type_="azure-native:network/v20170301:VirtualNetworkGatewayConnection"), pulumi.Alias(type_="azure-nextgen:network/v20170301:VirtualNetworkGatewayConnection"), pulumi.Alias(type_="azure-native:network/v20170601:VirtualNetworkGatewayConnection"), pulumi.Alias(type_="azure-nextgen:network/v20170601:VirtualNetworkGatewayConnection"), pulumi.Alias(type_="azure-native:network/v20170801:VirtualNetworkGatewayConnection"), pulumi.Alias(type_="azure-nextgen:network/v20170801:VirtualNetworkGatewayConnection"), pulumi.Alias(type_="azure-native:network/v20170901:VirtualNetworkGatewayConnection"), pulumi.Alias(type_="azure-nextgen:network/v20170901:VirtualNetworkGatewayConnection"), pulumi.Alias(type_="azure-native:network/v20171001:VirtualNetworkGatewayConnection"), pulumi.Alias(type_="azure-nextgen:network/v20171001:VirtualNetworkGatewayConnection"), pulumi.Alias(type_="azure-native:network/v20171101:VirtualNetworkGatewayConnection"), pulumi.Alias(type_="azure-nextgen:network/v20171101:VirtualNetworkGatewayConnection"), pulumi.Alias(type_="azure-native:network/v20180101:VirtualNetworkGatewayConnection"), pulumi.Alias(type_="azure-nextgen:network/v20180101:VirtualNetworkGatewayConnection"), pulumi.Alias(type_="azure-native:network/v20180201:VirtualNetworkGatewayConnection"), pulumi.Alias(type_="azure-nextgen:network/v20180201:VirtualNetworkGatewayConnection"), pulumi.Alias(type_="azure-native:network/v20180401:VirtualNetworkGatewayConnection"), pulumi.Alias(type_="azure-nextgen:network/v20180401:VirtualNetworkGatewayConnection"), pulumi.Alias(type_="azure-native:network/v20180601:VirtualNetworkGatewayConnection"), pulumi.Alias(type_="azure-nextgen:network/v20180601:VirtualNetworkGatewayConnection"), pulumi.Alias(type_="azure-native:network/v20180701:VirtualNetworkGatewayConnection"), pulumi.Alias(type_="azure-nextgen:network/v20180701:VirtualNetworkGatewayConnection"), pulumi.Alias(type_="azure-native:network/v20180801:VirtualNetworkGatewayConnection"), pulumi.Alias(type_="azure-nextgen:network/v20180801:VirtualNetworkGatewayConnection"), pulumi.Alias(type_="azure-native:network/v20181001:VirtualNetworkGatewayConnection"), pulumi.Alias(type_="azure-nextgen:network/v20181001:VirtualNetworkGatewayConnection"), pulumi.Alias(type_="azure-native:network/v20181101:VirtualNetworkGatewayConnection"), pulumi.Alias(type_="azure-nextgen:network/v20181101:VirtualNetworkGatewayConnection"), pulumi.Alias(type_="azure-native:network/v20181201:VirtualNetworkGatewayConnection"), pulumi.Alias(type_="azure-nextgen:network/v20181201:VirtualNetworkGatewayConnection"), pulumi.Alias(type_="azure-native:network/v20190201:VirtualNetworkGatewayConnection"), pulumi.Alias(type_="azure-nextgen:network/v20190201:VirtualNetworkGatewayConnection"), pulumi.Alias(type_="azure-native:network/v20190401:VirtualNetworkGatewayConnection"), pulumi.Alias(type_="azure-nextgen:network/v20190401:VirtualNetworkGatewayConnection"), pulumi.Alias(type_="azure-native:network/v20190601:VirtualNetworkGatewayConnection"), pulumi.Alias(type_="azure-nextgen:network/v20190601:VirtualNetworkGatewayConnection"), pulumi.Alias(type_="azure-native:network/v20190701:VirtualNetworkGatewayConnection"), pulumi.Alias(type_="azure-nextgen:network/v20190701:VirtualNetworkGatewayConnection"), pulumi.Alias(type_="azure-native:network/v20190801:VirtualNetworkGatewayConnection"), pulumi.Alias(type_="azure-nextgen:network/v20190801:VirtualNetworkGatewayConnection"), pulumi.Alias(type_="azure-native:network/v20190901:VirtualNetworkGatewayConnection"), pulumi.Alias(type_="azure-nextgen:network/v20190901:VirtualNetworkGatewayConnection"), pulumi.Alias(type_="azure-native:network/v20191101:VirtualNetworkGatewayConnection"), pulumi.Alias(type_="azure-nextgen:network/v20191101:VirtualNetworkGatewayConnection"), pulumi.Alias(type_="azure-native:network/v20191201:VirtualNetworkGatewayConnection"), pulumi.Alias(type_="azure-nextgen:network/v20191201:VirtualNetworkGatewayConnection"), pulumi.Alias(type_="azure-native:network/v20200301:VirtualNetworkGatewayConnection"), pulumi.Alias(type_="azure-nextgen:network/v20200301:VirtualNetworkGatewayConnection"), pulumi.Alias(type_="azure-native:network/v20200401:VirtualNetworkGatewayConnection"), pulumi.Alias(type_="azure-nextgen:network/v20200401:VirtualNetworkGatewayConnection"), pulumi.Alias(type_="azure-native:network/v20200501:VirtualNetworkGatewayConnection"), pulumi.Alias(type_="azure-nextgen:network/v20200501:VirtualNetworkGatewayConnection"), pulumi.Alias(type_="azure-native:network/v20200601:VirtualNetworkGatewayConnection"), pulumi.Alias(type_="azure-nextgen:network/v20200601:VirtualNetworkGatewayConnection"), pulumi.Alias(type_="azure-native:network/v20200701:VirtualNetworkGatewayConnection"), pulumi.Alias(type_="azure-nextgen:network/v20200701:VirtualNetworkGatewayConnection"), pulumi.Alias(type_="azure-native:network/v20200801:VirtualNetworkGatewayConnection"), pulumi.Alias(type_="azure-nextgen:network/v20200801:VirtualNetworkGatewayConnection"), pulumi.Alias(type_="azure-native:network/v20201101:VirtualNetworkGatewayConnection"), pulumi.Alias(type_="azure-nextgen:network/v20201101:VirtualNetworkGatewayConnection"), pulumi.Alias(type_="azure-native:network/v20210301:VirtualNetworkGatewayConnection"), pulumi.Alias(type_="azure-nextgen:network/v20210301:VirtualNetworkGatewayConnection")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(VirtualNetworkGatewayConnection, __self__).__init__(
'azure-native:network/v20210201:VirtualNetworkGatewayConnection',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'VirtualNetworkGatewayConnection':
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = VirtualNetworkGatewayConnectionArgs.__new__(VirtualNetworkGatewayConnectionArgs)
__props__.__dict__["authorization_key"] = None
__props__.__dict__["connection_mode"] = None
__props__.__dict__["connection_protocol"] = None
__props__.__dict__["connection_status"] = None
__props__.__dict__["connection_type"] = None
__props__.__dict__["dpd_timeout_seconds"] = None
__props__.__dict__["egress_bytes_transferred"] = None
__props__.__dict__["egress_nat_rules"] = None
__props__.__dict__["enable_bgp"] = None
__props__.__dict__["etag"] = None
__props__.__dict__["express_route_gateway_bypass"] = None
__props__.__dict__["ingress_bytes_transferred"] = None
__props__.__dict__["ingress_nat_rules"] = None
__props__.__dict__["ipsec_policies"] = None
__props__.__dict__["local_network_gateway2"] = None
__props__.__dict__["location"] = None
__props__.__dict__["name"] = None
__props__.__dict__["peer"] = None
__props__.__dict__["provisioning_state"] = None
__props__.__dict__["resource_guid"] = None
__props__.__dict__["routing_weight"] = None
__props__.__dict__["shared_key"] = None
__props__.__dict__["tags"] = None
__props__.__dict__["traffic_selector_policies"] = None
__props__.__dict__["tunnel_connection_status"] = None
__props__.__dict__["type"] = None
__props__.__dict__["use_local_azure_ip_address"] = None
__props__.__dict__["use_policy_based_traffic_selectors"] = None
__props__.__dict__["virtual_network_gateway1"] = None
__props__.__dict__["virtual_network_gateway2"] = None
return VirtualNetworkGatewayConnection(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="authorizationKey")
def authorization_key(self) -> pulumi.Output[Optional[str]]:
return pulumi.get(self, "authorization_key")
@property
@pulumi.getter(name="connectionMode")
def connection_mode(self) -> pulumi.Output[Optional[str]]:
return pulumi.get(self, "connection_mode")
@property
@pulumi.getter(name="connectionProtocol")
def connection_protocol(self) -> pulumi.Output[Optional[str]]:
return pulumi.get(self, "connection_protocol")
@property
@pulumi.getter(name="connectionStatus")
def connection_status(self) -> pulumi.Output[str]:
return pulumi.get(self, "connection_status")
@property
@pulumi.getter(name="connectionType")
def connection_type(self) -> pulumi.Output[str]:
return pulumi.get(self, "connection_type")
@property
@pulumi.getter(name="dpdTimeoutSeconds")
def dpd_timeout_seconds(self) -> pulumi.Output[Optional[int]]:
return pulumi.get(self, "dpd_timeout_seconds")
@property
@pulumi.getter(name="egressBytesTransferred")
def egress_bytes_transferred(self) -> pulumi.Output[float]:
return pulumi.get(self, "egress_bytes_transferred")
@property
@pulumi.getter(name="egressNatRules")
def egress_nat_rules(self) -> pulumi.Output[Optional[Sequence['outputs.SubResourceResponse']]]:
return pulumi.get(self, "egress_nat_rules")
@property
@pulumi.getter(name="enableBgp")
def enable_bgp(self) -> pulumi.Output[Optional[bool]]:
return pulumi.get(self, "enable_bgp")
@property
@pulumi.getter
def etag(self) -> pulumi.Output[str]:
return pulumi.get(self, "etag")
@property
@pulumi.getter(name="expressRouteGatewayBypass")
def express_route_gateway_bypass(self) -> pulumi.Output[Optional[bool]]:
return pulumi.get(self, "express_route_gateway_bypass")
@property
@pulumi.getter(name="ingressBytesTransferred")
def ingress_bytes_transferred(self) -> pulumi.Output[float]:
return pulumi.get(self, "ingress_bytes_transferred")
@property
@pulumi.getter(name="ingressNatRules")
def ingress_nat_rules(self) -> pulumi.Output[Optional[Sequence['outputs.SubResourceResponse']]]:
return pulumi.get(self, "ingress_nat_rules")
@property
@pulumi.getter(name="ipsecPolicies")
def ipsec_policies(self) -> pulumi.Output[Optional[Sequence['outputs.IpsecPolicyResponse']]]:
return pulumi.get(self, "ipsec_policies")
@property
@pulumi.getter(name="localNetworkGateway2")
def local_network_gateway2(self) -> pulumi.Output[Optional['outputs.LocalNetworkGatewayResponse']]:
return pulumi.get(self, "local_network_gateway2")
@property
@pulumi.getter
def location(self) -> pulumi.Output[Optional[str]]:
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
return pulumi.get(self, "name")
@property
@pulumi.getter
def peer(self) -> pulumi.Output[Optional['outputs.SubResourceResponse']]:
return pulumi.get(self, "peer")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> pulumi.Output[str]:
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="resourceGuid")
def resource_guid(self) -> pulumi.Output[str]:
return pulumi.get(self, "resource_guid")
@property
@pulumi.getter(name="routingWeight")
def routing_weight(self) -> pulumi.Output[Optional[int]]:
return pulumi.get(self, "routing_weight")
@property
@pulumi.getter(name="sharedKey")
def shared_key(self) -> pulumi.Output[Optional[str]]:
return pulumi.get(self, "shared_key")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
return pulumi.get(self, "tags")
@property
@pulumi.getter(name="trafficSelectorPolicies")
def traffic_selector_policies(self) -> pulumi.Output[Optional[Sequence['outputs.TrafficSelectorPolicyResponse']]]:
return pulumi.get(self, "traffic_selector_policies")
@property
@pulumi.getter(name="tunnelConnectionStatus")
def tunnel_connection_status(self) -> pulumi.Output[Sequence['outputs.TunnelConnectionHealthResponse']]:
return pulumi.get(self, "tunnel_connection_status")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
return pulumi.get(self, "type")
@property
@pulumi.getter(name="useLocalAzureIpAddress")
def use_local_azure_ip_address(self) -> pulumi.Output[Optional[bool]]:
return pulumi.get(self, "use_local_azure_ip_address")
@property
@pulumi.getter(name="usePolicyBasedTrafficSelectors")
def use_policy_based_traffic_selectors(self) -> pulumi.Output[Optional[bool]]:
return pulumi.get(self, "use_policy_based_traffic_selectors")
@property
@pulumi.getter(name="virtualNetworkGateway1")
def virtual_network_gateway1(self) -> pulumi.Output['outputs.VirtualNetworkGatewayResponse']:
return pulumi.get(self, "virtual_network_gateway1")
@property
@pulumi.getter(name="virtualNetworkGateway2")
def virtual_network_gateway2(self) -> pulumi.Output[Optional['outputs.VirtualNetworkGatewayResponse']]:
return pulumi.get(self, "virtual_network_gateway2")
| true | true |
f733c151669c48589cccb6f5acb4d5b2e3e2031e | 1,309 | py | Python | src/prostatex/normalization.py | piotrsobecki/PCa-CNNs2 | 01504db2037c67dc6832c2c8aaf4b3d5e4f2808f | [
"MIT"
] | 1 | 2022-03-05T06:05:53.000Z | 2022-03-05T06:05:53.000Z | src/prostatex/normalization.py | piotrsobecki/PCa-CNNs2 | 01504db2037c67dc6832c2c8aaf4b3d5e4f2808f | [
"MIT"
] | 1 | 2021-01-03T02:25:31.000Z | 2021-01-03T02:25:31.000Z | src/prostatex/normalization.py | piotrsobecki/PCa-CNNs | 01504db2037c67dc6832c2c8aaf4b3d5e4f2808f | [
"MIT"
] | null | null | null | import numpy
# Normalization functions
class NormalizationNo():
def normalize(self, img, settings=None):
if settings is None:
settings = {}
return img
class NormalizationMean(NormalizationNo):
def normalize(self, img, settings=None):
if settings is None:
settings = {}
if img.std() == 0:
return img
return (img - img.mean()) / img.std()
class NormalizationMedian(NormalizationNo):
def normalize(self, img, settings=None):
if settings is None:
settings = {}
denominator = numpy.median(img) + 2 * img.std()
if denominator == 0.0:
return img
return img / denominator
class NormalizationFeatureScaling(NormalizationNo):
def __init__(self, vmin=0, vmax=1):
self.vmin=vmin
self.vmax=vmax
def normalize(self, img, settings=None):
if settings is None:
settings = {}
OldValue = img
OldMin = img.min()
OldMax = img.max()
NewMax = self.vmax
NewMin = self.vmin
OldRange = (OldMax - OldMin)
NewRange = (NewMax - NewMin)
if OldRange == 0.0:
return img
NewValue = (((OldValue - OldMin) * NewRange) / OldRange) + NewMin
return NewValue
| 26.18 | 73 | 0.574484 | import numpy
class NormalizationNo():
def normalize(self, img, settings=None):
if settings is None:
settings = {}
return img
class NormalizationMean(NormalizationNo):
def normalize(self, img, settings=None):
if settings is None:
settings = {}
if img.std() == 0:
return img
return (img - img.mean()) / img.std()
class NormalizationMedian(NormalizationNo):
def normalize(self, img, settings=None):
if settings is None:
settings = {}
denominator = numpy.median(img) + 2 * img.std()
if denominator == 0.0:
return img
return img / denominator
class NormalizationFeatureScaling(NormalizationNo):
def __init__(self, vmin=0, vmax=1):
self.vmin=vmin
self.vmax=vmax
def normalize(self, img, settings=None):
if settings is None:
settings = {}
OldValue = img
OldMin = img.min()
OldMax = img.max()
NewMax = self.vmax
NewMin = self.vmin
OldRange = (OldMax - OldMin)
NewRange = (NewMax - NewMin)
if OldRange == 0.0:
return img
NewValue = (((OldValue - OldMin) * NewRange) / OldRange) + NewMin
return NewValue
| true | true |
f733c3024f5de2da9f1a4e167beb1c1d4d8b6c34 | 3,845 | py | Python | src/api/datamanage/tests/lifecycle/lifecycle_metric/test_lifecycle_trend.py | Chromico/bk-base | be822d9bbee544a958bed4831348185a75604791 | [
"MIT"
] | 84 | 2021-06-30T06:20:23.000Z | 2022-03-22T03:05:49.000Z | src/api/datamanage/tests/lifecycle/lifecycle_metric/test_lifecycle_trend.py | Chromico/bk-base | be822d9bbee544a958bed4831348185a75604791 | [
"MIT"
] | 7 | 2021-06-30T06:21:16.000Z | 2022-03-29T07:36:13.000Z | src/api/datamanage/tests/lifecycle/lifecycle_metric/test_lifecycle_trend.py | Chromico/bk-base | be822d9bbee544a958bed4831348185a75604791 | [
"MIT"
] | 40 | 2021-06-30T06:21:26.000Z | 2022-03-29T12:42:26.000Z | # -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making BK-BASE 蓝鲸基础平台 available.
Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved.
BK-BASE 蓝鲸基础平台 is licensed under the MIT License.
License for BK-BASE 蓝鲸基础平台:
--------------------------------------------------------------------
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial
portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT
LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN
NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
from django.test import TestCase
from rest_framework.reverse import reverse
from tests.utils import UnittestClient
class TestLifecycleTrend(TestCase):
"""
生命周期趋势相关测试
"""
def setUp(self):
pass
def test_asset_value_trend(self):
"""价值趋势相关测试"""
params = {"dataset_id": "591_durant1115", "dataset_type": "result_table"}
client = UnittestClient()
url = reverse('asset_value-trend/asset-value')
response = client.get(url, params)
assert response.is_success()
assert len(response.data['score']) > 0
assert len(response.data['time']) > 0
def test_assetvalue_to_cost_trend(self):
"""收益比趋势相关测试"""
params = {"dataset_id": "591_durant1115", "dataset_type": "result_table"}
client = UnittestClient()
url = reverse('asset_value-trend/assetvalue-to-cost')
response = client.get(url, params)
assert response.is_success()
assert len(response.data['score']) > 0
assert len(response.data['time']) > 0
def test_importance_trend(self):
"""重要度趋势相关测试"""
params = {"dataset_id": "591_durant1115", "dataset_type": "result_table"}
client = UnittestClient()
url = reverse('asset_value-trend/importance')
response = client.get(url, params)
assert response.is_success()
assert len(response.data['score']) > 0
assert len(response.data['time']) > 0
def test_range_trend(self):
"""广度趋势相关测试"""
params = {"dataset_id": "591_durant1115", "dataset_type": "result_table"}
client = UnittestClient()
url = reverse('range-list-range-metric-by-influxdb')
response = client.get(url, params)
assert response.is_success()
assert len(response.data['score']) > 0
assert len(response.data['time']) > 0
assert len(response.data['biz_count']) > 0
assert len(response.data['proj_count']) > 0
def test_heat_trend(self):
"""热度趋势相关测试"""
params = {"dataset_id": "591_durant1115", "dataset_type": "result_table"}
client = UnittestClient()
url = reverse('heat-list-heat-metric-by-influxdb')
response = client.get(url, params)
assert response.is_success()
assert len(response.data['score']) > 0
assert len(response.data['time']) > 0
assert len(response.data['query_count']) > 0
assert len(response.data['day_query_count']) > 0
| 43.202247 | 111 | 0.671001 |
from django.test import TestCase
from rest_framework.reverse import reverse
from tests.utils import UnittestClient
class TestLifecycleTrend(TestCase):
def setUp(self):
pass
def test_asset_value_trend(self):
params = {"dataset_id": "591_durant1115", "dataset_type": "result_table"}
client = UnittestClient()
url = reverse('asset_value-trend/asset-value')
response = client.get(url, params)
assert response.is_success()
assert len(response.data['score']) > 0
assert len(response.data['time']) > 0
def test_assetvalue_to_cost_trend(self):
params = {"dataset_id": "591_durant1115", "dataset_type": "result_table"}
client = UnittestClient()
url = reverse('asset_value-trend/assetvalue-to-cost')
response = client.get(url, params)
assert response.is_success()
assert len(response.data['score']) > 0
assert len(response.data['time']) > 0
def test_importance_trend(self):
params = {"dataset_id": "591_durant1115", "dataset_type": "result_table"}
client = UnittestClient()
url = reverse('asset_value-trend/importance')
response = client.get(url, params)
assert response.is_success()
assert len(response.data['score']) > 0
assert len(response.data['time']) > 0
def test_range_trend(self):
params = {"dataset_id": "591_durant1115", "dataset_type": "result_table"}
client = UnittestClient()
url = reverse('range-list-range-metric-by-influxdb')
response = client.get(url, params)
assert response.is_success()
assert len(response.data['score']) > 0
assert len(response.data['time']) > 0
assert len(response.data['biz_count']) > 0
assert len(response.data['proj_count']) > 0
def test_heat_trend(self):
params = {"dataset_id": "591_durant1115", "dataset_type": "result_table"}
client = UnittestClient()
url = reverse('heat-list-heat-metric-by-influxdb')
response = client.get(url, params)
assert response.is_success()
assert len(response.data['score']) > 0
assert len(response.data['time']) > 0
assert len(response.data['query_count']) > 0
assert len(response.data['day_query_count']) > 0
| true | true |
f733c5a465faf703b08dcc3e28497c53a696bb95 | 1,657 | py | Python | pp/import_phidl_component.py | flaport/gdsfactory | 1f2e844c1fe27b9c6340e2d51500fd3358fa16e5 | [
"MIT"
] | 8 | 2020-08-25T11:25:18.000Z | 2022-03-27T11:32:11.000Z | pp/import_phidl_component.py | flaport/gdsfactory | 1f2e844c1fe27b9c6340e2d51500fd3358fa16e5 | [
"MIT"
] | null | null | null | pp/import_phidl_component.py | flaport/gdsfactory | 1f2e844c1fe27b9c6340e2d51500fd3358fa16e5 | [
"MIT"
] | 1 | 2022-03-04T07:03:29.000Z | 2022-03-04T07:03:29.000Z | import copy
from phidl.device_layout import Device
from pp.cell import cell
from pp.component import Component, ComponentReference, Port
from pp.config import call_if_func
@cell
def import_phidl_component(component: Device, **kwargs) -> Component:
""" returns a gdsfactory Component from a phidl Device or function
"""
D = call_if_func(component, **kwargs)
D_copy = Component(name=D._internal_name)
D_copy.info = copy.deepcopy(D.info)
for ref in D.references:
new_ref = ComponentReference(
device=ref.parent,
origin=ref.origin,
rotation=ref.rotation,
magnification=ref.magnification,
x_reflection=ref.x_reflection,
)
new_ref.owner = D_copy
D_copy.add(new_ref)
for alias_name, alias_ref in D.aliases.items():
if alias_ref == ref:
D_copy.aliases[alias_name] = new_ref
for p in D.ports.values():
D_copy.add_port(
port=Port(
name=p.name,
midpoint=p.midpoint,
width=p.width,
orientation=p.orientation,
parent=p.parent,
)
)
for poly in D.polygons:
D_copy.add_polygon(poly)
for label in D.labels:
D_copy.add_label(
text=label.text,
position=label.position,
layer=(label.layer, label.texttype),
)
return D_copy
if __name__ == "__main__":
import phidl.geometry as pg
import pp
c = pg.rectangle()
c = pg.snspd()
c2 = import_phidl_component(component=c)
print(c2.ports)
pp.show(c2)
| 26.301587 | 70 | 0.598672 | import copy
from phidl.device_layout import Device
from pp.cell import cell
from pp.component import Component, ComponentReference, Port
from pp.config import call_if_func
@cell
def import_phidl_component(component: Device, **kwargs) -> Component:
D = call_if_func(component, **kwargs)
D_copy = Component(name=D._internal_name)
D_copy.info = copy.deepcopy(D.info)
for ref in D.references:
new_ref = ComponentReference(
device=ref.parent,
origin=ref.origin,
rotation=ref.rotation,
magnification=ref.magnification,
x_reflection=ref.x_reflection,
)
new_ref.owner = D_copy
D_copy.add(new_ref)
for alias_name, alias_ref in D.aliases.items():
if alias_ref == ref:
D_copy.aliases[alias_name] = new_ref
for p in D.ports.values():
D_copy.add_port(
port=Port(
name=p.name,
midpoint=p.midpoint,
width=p.width,
orientation=p.orientation,
parent=p.parent,
)
)
for poly in D.polygons:
D_copy.add_polygon(poly)
for label in D.labels:
D_copy.add_label(
text=label.text,
position=label.position,
layer=(label.layer, label.texttype),
)
return D_copy
if __name__ == "__main__":
import phidl.geometry as pg
import pp
c = pg.rectangle()
c = pg.snspd()
c2 = import_phidl_component(component=c)
print(c2.ports)
pp.show(c2)
| true | true |
f733c5f9917b7f270a25970745b6764b9b5fd752 | 773 | py | Python | ocdskingfisherviews/cli/commands/field_counts.py | CDSPY/kingfisher-views | f6d7d4896f5ab714376ff819db71f90ed2ba5488 | [
"BSD-3-Clause"
] | null | null | null | ocdskingfisherviews/cli/commands/field_counts.py | CDSPY/kingfisher-views | f6d7d4896f5ab714376ff819db71f90ed2ba5488 | [
"BSD-3-Clause"
] | null | null | null | ocdskingfisherviews/cli/commands/field_counts.py | CDSPY/kingfisher-views | f6d7d4896f5ab714376ff819db71f90ed2ba5488 | [
"BSD-3-Clause"
] | null | null | null | import sqlalchemy as sa
import ocdskingfisherviews.cli.commands.base
from ocdskingfisherviews.field_counts import FieldCounts
class FieldCountsCommand(ocdskingfisherviews.cli.commands.base.CLICommand):
command = 'field-counts'
def configure_subparser(self, subparser):
subparser.add_argument("viewname", help="Name Of View")
subparser.add_argument("--remove", help="Remove the field_counts table", action='store_true')
subparser.add_argument("--threads", help="Amount of threads to use", type=int, default=1)
def run_logged_command(self, args):
engine = sa.create_engine(self.database_uri)
field_counts = FieldCounts(engine=engine)
field_counts.run(args.viewname, remove=args.remove, threads=args.threads)
| 36.809524 | 101 | 0.746442 | import sqlalchemy as sa
import ocdskingfisherviews.cli.commands.base
from ocdskingfisherviews.field_counts import FieldCounts
class FieldCountsCommand(ocdskingfisherviews.cli.commands.base.CLICommand):
command = 'field-counts'
def configure_subparser(self, subparser):
subparser.add_argument("viewname", help="Name Of View")
subparser.add_argument("--remove", help="Remove the field_counts table", action='store_true')
subparser.add_argument("--threads", help="Amount of threads to use", type=int, default=1)
def run_logged_command(self, args):
engine = sa.create_engine(self.database_uri)
field_counts = FieldCounts(engine=engine)
field_counts.run(args.viewname, remove=args.remove, threads=args.threads)
| true | true |
f733c6c6ec7585e4d981a7ead7cb6497d1fbd585 | 1,947 | py | Python | tests/mail.py | freifunkh/ansible | ff2f734a033d37d9a602b659e57d3e5f4c0d21a8 | [
"MIT"
] | 5 | 2017-03-01T06:41:02.000Z | 2021-12-10T15:44:57.000Z | tests/mail.py | freifunkh/ansible | ff2f734a033d37d9a602b659e57d3e5f4c0d21a8 | [
"MIT"
] | 213 | 2017-01-29T01:23:50.000Z | 2022-03-12T08:18:44.000Z | tests/mail.py | freifunkh/ansible | ff2f734a033d37d9a602b659e57d3e5f4c0d21a8 | [
"MIT"
] | 10 | 2017-01-22T17:53:14.000Z | 2021-12-18T15:46:22.000Z | #!/usr/bin/env python3
import datetime
import pytz
import smtplib, ssl
import re
from email.utils import make_msgid
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from email.header import Header
from email.charset import Charset, QP
import os
import sys
sys.path.insert(0, os.path.dirname(__file__))
from read_play import *
SMTP_HOST = 'mail.ffh.zone'
SMTP_PORT = 25
SMTP_USE_STARTTLS = False
SMTP_FROM = "auto@ruettgers.ffh.zone"
SMTP_REPLY_TO_EMAIL = "monitoring@hannover.freifunk.net"
SMTP_TO = 'monitoring@hannover.freifunk.net'
def send_mail(subject, message, message_html, to):
msgid = make_msgid()
msg = MIMEMultipart('alternative')
msg['Subject'] = str(Header(subject, 'utf-8'))
msg['From'] = str(Header(SMTP_FROM, 'utf-8'))
msg['To'] = str(Header(to, 'utf-8'))
msg['Message-ID'] = msgid
msg['Reply-To'] = SMTP_REPLY_TO_EMAIL
msg['Date'] = datetime.datetime.now(pytz.utc).strftime("%a, %e %b %Y %T %z")
# add message
charset = Charset('utf-8')
# QP = quoted printable; this is better readable instead of base64, when
# the mail is read in plaintext!
charset.body_encoding = QP
message_part = MIMEText(message.encode('utf-8'), 'plain', charset)
msg.attach(message_part)
message_part2 = MIMEText(message_html.encode('utf-8'), 'html', charset)
msg.attach(message_part2)
with smtplib.SMTP(SMTP_HOST, SMTP_PORT) as server:
server.ehlo()
if SMTP_USE_STARTTLS:
context = ssl.create_default_context()
server.starttls(context=context)
server.sendmail(SMTP_FROM, to, msg.as_string())
if __name__ == '__main__':
filename = sys.argv[1]
machine = re.findall('^.*-(.*?)\.json$', filename)[0]
msg_txt = read_play(filename)
msg_html = read_play(filename, 'html')
if msg_txt:
send_mail('Daily Report of Ansible Run on '+machine, msg_txt, msg_html, SMTP_TO)
| 29.5 | 88 | 0.69132 |
import datetime
import pytz
import smtplib, ssl
import re
from email.utils import make_msgid
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from email.header import Header
from email.charset import Charset, QP
import os
import sys
sys.path.insert(0, os.path.dirname(__file__))
from read_play import *
SMTP_HOST = 'mail.ffh.zone'
SMTP_PORT = 25
SMTP_USE_STARTTLS = False
SMTP_FROM = "auto@ruettgers.ffh.zone"
SMTP_REPLY_TO_EMAIL = "monitoring@hannover.freifunk.net"
SMTP_TO = 'monitoring@hannover.freifunk.net'
def send_mail(subject, message, message_html, to):
msgid = make_msgid()
msg = MIMEMultipart('alternative')
msg['Subject'] = str(Header(subject, 'utf-8'))
msg['From'] = str(Header(SMTP_FROM, 'utf-8'))
msg['To'] = str(Header(to, 'utf-8'))
msg['Message-ID'] = msgid
msg['Reply-To'] = SMTP_REPLY_TO_EMAIL
msg['Date'] = datetime.datetime.now(pytz.utc).strftime("%a, %e %b %Y %T %z")
charset = Charset('utf-8')
charset.body_encoding = QP
message_part = MIMEText(message.encode('utf-8'), 'plain', charset)
msg.attach(message_part)
message_part2 = MIMEText(message_html.encode('utf-8'), 'html', charset)
msg.attach(message_part2)
with smtplib.SMTP(SMTP_HOST, SMTP_PORT) as server:
server.ehlo()
if SMTP_USE_STARTTLS:
context = ssl.create_default_context()
server.starttls(context=context)
server.sendmail(SMTP_FROM, to, msg.as_string())
if __name__ == '__main__':
filename = sys.argv[1]
machine = re.findall('^.*-(.*?)\.json$', filename)[0]
msg_txt = read_play(filename)
msg_html = read_play(filename, 'html')
if msg_txt:
send_mail('Daily Report of Ansible Run on '+machine, msg_txt, msg_html, SMTP_TO)
| true | true |
f733c6cdb0d4908cc999ae0ed1d9aa2b5cc89bd9 | 5,951 | py | Python | flaskblog/routesapi.py | phamtho123/Project_python | 9ce442ed0ba100420e1a9d903d94a922f06abd47 | [
"MIT"
] | null | null | null | flaskblog/routesapi.py | phamtho123/Project_python | 9ce442ed0ba100420e1a9d903d94a922f06abd47 | [
"MIT"
] | null | null | null | flaskblog/routesapi.py | phamtho123/Project_python | 9ce442ed0ba100420e1a9d903d94a922f06abd47 | [
"MIT"
] | null | null | null | import sys
from flask import request, jsonify, abort
from flaskblog import app, db, bcrypt
from flaskblog.models import Token, Post, User
import datetime
# method used to create a token that can be used for some time defined by the delta
@app.route('/api/token/public', methods=['POST'])
def get_token():
data = request.form # gets the JSON sent by the user
if 'email' not in data or 'password' not in data:
# in this case, we do not have enough information to perform a login
return abort(400) # HTTP code 400: bad request
user = User.query.filter_by(email=data['email']).first()
if user and bcrypt.check_password_hash(user.password, data['password']):
# if login info is correct, create a new token
expired = datetime.datetime.now() + datetime.timedelta(minutes=60)
token_string = bcrypt.generate_password_hash(str(expired)).decode('utf-8')
new_token = Token(token=token_string, date_expired=expired, user_id=user.id)
db.session.add(new_token)
try:
db.session.commit()
return jsonify({'token': token_string,
'message': 'Login successful!',
'user_id': user.id,
'expire': expired.strftime('%Y-%m-%d %H:%M:%S')})
except:
db.session.rollback()
return abort(400) # HTTP code 400: bad request
else:
info = dict(message='Login Unsuccessful. Please check email and password.')
return jsonify(info)
# method used to inform the user of the webservice regarding its capabilities
@app.route('/api/', methods=['GET'])
def api():
info = dict()
info['message'] = 'This is the API to consume blog posts'
info['services'] = []
info['services'].append({'url': '/api/posts', 'method': 'GET', 'description': 'Gets a list of posts'})
return jsonify(info)
# method that returns all the posts
@app.route('/api/posts', methods=['GET'])
def api_get_posts():
posts = Post.query.all()
return jsonify(posts)
# method that returns a specific post
@app.route('/api/post/<int:post_id>', methods=['GET'])
def api_get_post(post_id):
post = Post.query.get_or_404(post_id)
return jsonify(post)
# method that inserts a new post
# note that the JSON received should have the key 'user' containing the user_id
@app.route('/api/posts', methods=['POST'])
def api_create_post():
data = request.json # gets the JSON sent by the user
token_string = request.headers['Authorization'].split(' ')[1]
token = Token.query.filter_by(token=token_string).first()
# the conditional should make sure that all the non-null attributes are present in the
# data sent by the call
if 'title' in data and 'content_type' in data and 'content' in data:
post = Post(title=data['title'],
content_type=data['content_type'],
content=data['content'],
user_id=token.user_id)
db.session.add(post)
try:
db.session.commit()
return jsonify(post), 201 # status 201 means "CREATED"
except Exception as e:
print('The WebService API experienced an error: ', e, file=sys.stderr)
# to have more detailed exception messages, check the content of lecture 7
db.session.rollback()
abort(400)
else:
return abort(400) # HTTP code 400: bad request
# method PUT replaces the entire object, i.e., changes all the attributes
@app.route('/api/post/<int:post_id>', methods=['PUT'])
def api_update_post(post_id):
post = Post.query.get_or_404(post_id) # makes sure that the post_id exists
data = request.json
# verifying if the token used is of the user that is author of the post
token_string = request.headers['Authorization'].split(' ')[1]
cur_token = Token.query.filter_by(token=token_string).first()
if cur_token.user_id != post.user_id:
abort(401)
# the conditional should make sure that all the non-null attributes are present in the
# data sent by the call
if 'title' in data and 'content_type' in data and 'content' in data and 'user' in data:
post.title = data['title']
post.content_type = data['content_type']
post.content = data['content']
try:
db.session.commit()
return jsonify(post), 200
except:
# to have more detailed exception messages, check the content of lecture 7
db.sesion.rollback()
abort(400)
else:
return abort(400) # HTTP code 400: bad request
# method PATCH changes only a few (not always all) the attributes of the object
@app.route('/api/post/<int:post_id>', methods=['PATCH'])
def api_replace_post(post_id):
post = Post.query.get_or_404(post_id)
data = request.json
# verifying if the token used is of the user that is author of the post
token_string = request.headers['Authorization'].split(' ')[1]
cur_token = Token.query.filter_by(token=token_string).first()
if cur_token.user_id != post.user_id:
abort(401)
# you should have at least one of the columns to be able to perform an update
if 'title' in data or 'content_type' in data or 'content' in data:
# the conditionals below check each of the possible attributes to be modified
if 'title' in data:
post.title = data['title']
if 'content_type' in data:
post.content_type = data['content_type']
if 'content' in data:
post.content = data['content']
try:
db.session.commit()
return jsonify(post), 200
except:
# to have more detailed exception messages, check the content of lecture 7
db.sesion.rollback()
abort(400)
else:
return abort(400) # HTTP code 400: bad request
@app.route('/api/post/<int:post_id>', methods=['DELETE'])
def api_delete_post(post_id):
post = Post.query.get_or_404(post_id)
# verifying if the token used is of the user that is author of the post
token_string = request.headers['Authorization'].split(' ')[1]
cur_token = Token.query.filter_by(token=token_string).first()
if cur_token.user_id != post.user_id:
abort(401)
db.session.delete(post)
try:
db.session.commit()
return jsonify({'message': f'Post {post_id} deleted'}), 200
except:
# to have more detailed exception messages, check the content of lecture 7
db.session.rollback()
abort(400) # HTTP code 400: bad request | 34.80117 | 103 | 0.715678 | import sys
from flask import request, jsonify, abort
from flaskblog import app, db, bcrypt
from flaskblog.models import Token, Post, User
import datetime
@app.route('/api/token/public', methods=['POST'])
def get_token():
data = request.form
if 'email' not in data or 'password' not in data:
return abort(400)
user = User.query.filter_by(email=data['email']).first()
if user and bcrypt.check_password_hash(user.password, data['password']):
expired = datetime.datetime.now() + datetime.timedelta(minutes=60)
token_string = bcrypt.generate_password_hash(str(expired)).decode('utf-8')
new_token = Token(token=token_string, date_expired=expired, user_id=user.id)
db.session.add(new_token)
try:
db.session.commit()
return jsonify({'token': token_string,
'message': 'Login successful!',
'user_id': user.id,
'expire': expired.strftime('%Y-%m-%d %H:%M:%S')})
except:
db.session.rollback()
return abort(400)
else:
info = dict(message='Login Unsuccessful. Please check email and password.')
return jsonify(info)
@app.route('/api/', methods=['GET'])
def api():
info = dict()
info['message'] = 'This is the API to consume blog posts'
info['services'] = []
info['services'].append({'url': '/api/posts', 'method': 'GET', 'description': 'Gets a list of posts'})
return jsonify(info)
@app.route('/api/posts', methods=['GET'])
def api_get_posts():
posts = Post.query.all()
return jsonify(posts)
@app.route('/api/post/<int:post_id>', methods=['GET'])
def api_get_post(post_id):
post = Post.query.get_or_404(post_id)
return jsonify(post)
@app.route('/api/posts', methods=['POST'])
def api_create_post():
data = request.json
token_string = request.headers['Authorization'].split(' ')[1]
token = Token.query.filter_by(token=token_string).first()
if 'title' in data and 'content_type' in data and 'content' in data:
post = Post(title=data['title'],
content_type=data['content_type'],
content=data['content'],
user_id=token.user_id)
db.session.add(post)
try:
db.session.commit()
return jsonify(post), 201
except Exception as e:
print('The WebService API experienced an error: ', e, file=sys.stderr)
db.session.rollback()
abort(400)
else:
return abort(400)
@app.route('/api/post/<int:post_id>', methods=['PUT'])
def api_update_post(post_id):
post = Post.query.get_or_404(post_id)
data = request.json
token_string = request.headers['Authorization'].split(' ')[1]
cur_token = Token.query.filter_by(token=token_string).first()
if cur_token.user_id != post.user_id:
abort(401)
if 'title' in data and 'content_type' in data and 'content' in data and 'user' in data:
post.title = data['title']
post.content_type = data['content_type']
post.content = data['content']
try:
db.session.commit()
return jsonify(post), 200
except:
db.sesion.rollback()
abort(400)
else:
return abort(400)
@app.route('/api/post/<int:post_id>', methods=['PATCH'])
def api_replace_post(post_id):
post = Post.query.get_or_404(post_id)
data = request.json
token_string = request.headers['Authorization'].split(' ')[1]
cur_token = Token.query.filter_by(token=token_string).first()
if cur_token.user_id != post.user_id:
abort(401)
if 'title' in data or 'content_type' in data or 'content' in data:
if 'title' in data:
post.title = data['title']
if 'content_type' in data:
post.content_type = data['content_type']
if 'content' in data:
post.content = data['content']
try:
db.session.commit()
return jsonify(post), 200
except:
db.sesion.rollback()
abort(400)
else:
return abort(400)
@app.route('/api/post/<int:post_id>', methods=['DELETE'])
def api_delete_post(post_id):
post = Post.query.get_or_404(post_id)
token_string = request.headers['Authorization'].split(' ')[1]
cur_token = Token.query.filter_by(token=token_string).first()
if cur_token.user_id != post.user_id:
abort(401)
db.session.delete(post)
try:
db.session.commit()
return jsonify({'message': f'Post {post_id} deleted'}), 200
except:
db.session.rollback()
abort(400) | true | true |
f733c7a98385296b9a9f7f67eb769d4bebef7170 | 15,880 | py | Python | lib/googlecloudsdk/api_lib/compute/base_classes_resource_registry.py | bshaffer/google-cloud-sdk | f587382fd112f238c0d6d5ca3dab8f52d2b5c5f9 | [
"Apache-2.0"
] | null | null | null | lib/googlecloudsdk/api_lib/compute/base_classes_resource_registry.py | bshaffer/google-cloud-sdk | f587382fd112f238c0d6d5ca3dab8f52d2b5c5f9 | [
"Apache-2.0"
] | null | null | null | lib/googlecloudsdk/api_lib/compute/base_classes_resource_registry.py | bshaffer/google-cloud-sdk | f587382fd112f238c0d6d5ca3dab8f52d2b5c5f9 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*- #
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A list of resources and their canonical format. This is deprecated."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from googlecloudsdk.core.resource import resource_info
RESOURCE_REGISTRY = {
'compute.addresses':
resource_info.ResourceInfo(
cache_command='compute addresses list',
list_format="""
table(
name,
region.basename(),
address,
status
)
""",),
'compute.autoscalers':
resource_info.ResourceInfo(
async_collection='compute.operations',
cache_command='compute autoscaler list',
list_format="""
table(
name,
target.basename(),
autoscalingPolicy.policy():label=POLICY
)
""",),
'compute.backendBuckets':
resource_info.ResourceInfo(
list_format="""
table(
name,
bucketName:label=GCS_BUCKET_NAME,
enableCdn
)
""",),
'compute.backendServiceGroupHealth':
resource_info.ResourceInfo(
list_format="""
default
""",),
'compute.backendServices':
resource_info.ResourceInfo(
cache_command='compute backend-services list',
list_format="""
table(
name,
backends[].group.list():label=BACKENDS,
protocol
)
""",),
'compute.backendServices.alpha':
resource_info.ResourceInfo(
cache_command='compute backend-services list',
list_format="""
table(
name,
backends[].group.list():label=BACKENDS,
protocol,
loadBalancingScheme,
healthChecks.map().basename().list()
)
""",),
'compute.regionBackendServices':
resource_info.ResourceInfo(
cache_command='compute backend-services list',
list_format="""
table(
name,
backends[].group.list():label=BACKENDS,
protocol,
loadBalancingScheme,
healthChecks.map().basename().list()
)
""",),
'compute.commitments':
resource_info.ResourceInfo(
cache_command='compute commitments list',
list_format="""
table(name,
region.basename(),
endTimestamp,
status)
""",),
'compute.disks':
resource_info.ResourceInfo(
cache_command='compute disks list',
list_format="""
table(
name,
zone.basename(),
sizeGb,
type.basename(),
status
)
""",),
'compute.diskTypes':
resource_info.ResourceInfo(
cache_command='compute disk-types list',
list_format="""
table(
name,
zone.basename(),
validDiskSize:label=VALID_DISK_SIZES
)
""",),
'compute.diskTypes.alpha':
resource_info.ResourceInfo(
cache_command='compute disk-types list',
list_format="""
table(
name,
location():label=LOCATION,
location_scope():label=SCOPE,
validDiskSize:label=VALID_DISK_SIZES
)
""",),
'compute.firewalls':
resource_info.ResourceInfo(
cache_command='compute firewall-rules list',
list_format="""
table(
name,
network.basename(),
direction,
priority,
allowed[].map().firewall_rule().list():label=ALLOW,
denied[].map().firewall_rule().list():label=DENY
)
""",),
'compute.forwardingRules':
resource_info.ResourceInfo(
cache_command='compute forwarding-rules list',
list_format="""
table(
name,
region.basename(),
IPAddress,
IPProtocol,
firstof(
target,
backendService).scope():label=TARGET
)
""",),
'compute.groups':
resource_info.ResourceInfo(
cache_command='compute groups list',
list_format="""
table(
name,
members.len():label=NUM_MEMBERS,
description
)
""",),
'compute.healthChecks':
resource_info.ResourceInfo(
cache_command='compute health-checks list',
list_format="""
table(
name,
type:label=PROTOCOL
)
""",),
'compute.httpHealthChecks':
resource_info.ResourceInfo(
cache_command='compute http-health-checks list',
list_format="""
table(
name,
host,
port,
requestPath
)
""",),
'compute.httpsHealthChecks':
resource_info.ResourceInfo(
cache_command='compute https-health-checks list',
list_format="""
table(
name,
host,
port,
requestPath
)
""",),
'compute.images':
resource_info.ResourceInfo(
cache_command='compute images list',
list_format="""
table(
name,
selfLink.map().scope(projects).segment(0):label=PROJECT,
family,
deprecated.state:label=DEPRECATED,
status
)
""",),
'compute.instanceGroups':
resource_info.ResourceInfo(
cache_command='compute instance-groups list',
list_format="""
table(
name,
location():label=LOCATION,
location_scope():label=SCOPE,
network.basename(),
isManaged:label=MANAGED,
size:label=INSTANCES
)
""",),
'compute.instanceGroupManagers':
resource_info.ResourceInfo(
cache_command='compute instance-groups managed list',
list_format="""
table(
name,
location():label=LOCATION,
location_scope():label=SCOPE,
baseInstanceName,
size,
targetSize,
instanceTemplate.basename(),
autoscaled
)
""",),
'compute.instances':
resource_info.ResourceInfo(
async_collection='compute.operations',
cache_command='compute instances list',
list_format="""
table(
name,
zone.basename(),
machineType.machine_type().basename(),
scheduling.preemptible.yesno(yes=true, no=''),
networkInterfaces[].networkIP.notnull().list():label=INTERNAL_IP,
networkInterfaces[].accessConfigs[0].natIP.notnull().list()\
:label=EXTERNAL_IP,
status
)
""",),
'compute.instanceTemplates':
resource_info.ResourceInfo(
cache_command='compute instance-templates list',
list_format="""
table(
name,
properties.machineType.machine_type(),
properties.scheduling.preemptible.yesno(yes=true, no=''),
creationTimestamp
)
""",),
'compute.invalidations':
resource_info.ResourceInfo(
cache_command='beta compute url-maps list-cdn-cache-invalidations',
list_format="""
table(
description,
operation_http_status():label=HTTP_STATUS,
status,
insertTime:label=TIMESTAMP
)
""",),
'compute.machineTypes':
resource_info.ResourceInfo(
cache_command='compute machine-types list',
list_format="""
table(
name,
zone.basename(),
guestCpus:label=CPUS,
memoryMb.size(units_in=MiB, units_out=GiB, precision=2):label=MEMORY_GB,
deprecated.state:label=DEPRECATED
)
""",),
'compute.networks':
resource_info.ResourceInfo(
cache_command='compute networks list',
list_format="""
table(
name,
x_gcloud_mode:label=MODE,
IPv4Range:label=IPV4_RANGE,
gatewayIPv4
)
""",),
'compute.operations':
resource_info.ResourceInfo(
list_format="""
table(
name,
operationType:label=TYPE,
targetLink.scope():label=TARGET,
operation_http_status():label=HTTP_STATUS,
status,
insertTime:label=TIMESTAMP
)
""",),
'compute.peerings':
resource_info.ResourceInfo(
cache_command='compute networks peerings list',
list_format="""
table(
name,
source_network.basename():label=NETWORK,
network.map().scope(projects).segment(0):label=PEER_PROJECT,
network.basename():label=PEER_NETWORK,
autoCreateRoutes,
state,
stateDetails
)
""",),
'compute.projects':
resource_info.ResourceInfo(
list_format="""
value(
format("There is no API support yet.")
)
""",),
'compute.xpnProjects':
resource_info.ResourceInfo(
list_format="""
table(
name,
creationTimestamp,
xpnProjectStatus
)
""",),
'compute.xpnResourceId':
resource_info.ResourceInfo(
list_format="""
table(
id:label=RESOURCE_ID,
type:label=RESOURCE_TYPE)
""",),
'compute.regions':
resource_info.ResourceInfo(
cache_command='compute regions list',
list_format="""
table(
name,
quotas.metric.CPUS.quota():label=CPUS,
quotas.metric.DISKS_TOTAL_GB.quota():label=DISKS_GB,
quotas.metric.IN_USE_ADDRESSES.quota():label=ADDRESSES,
quotas.metric.STATIC_ADDRESSES.quota():label=RESERVED_ADDRESSES,
status():label=STATUS,
deprecated.deleted:label=TURNDOWN_DATE
)
""",),
'compute.routers':
resource_info.ResourceInfo(
cache_command='compute routers list',
list_format="""
table(
name,
region.basename(),
network.basename()
)
""",),
'compute.routes':
resource_info.ResourceInfo(
cache_command='compute routes list',
list_format="""
table(
name,
network.basename(),
destRange,
firstof(
nextHopInstance,
nextHopGateway,
nextHopIp,
nextHopVpnTunnel,
nextHopPeering).scope()
:label=NEXT_HOP,
priority
)
""",),
'compute.snapshots':
resource_info.ResourceInfo(
cache_command='compute snapshots list',
list_format="""
table(
name,
diskSizeGb,
sourceDisk.scope():label=SRC_DISK,
status
)
""",),
'compute.sslCertificates':
resource_info.ResourceInfo(
cache_command='compute ssl-certificates list',
list_format="""
table(
name,
creationTimestamp
)
""",),
'compute.subnetworks':
resource_info.ResourceInfo(
cache_command='compute networks subnets list',
list_format="""
table(
name,
region.basename(),
network.basename(),
ipCidrRange:label=RANGE
)
""",),
'compute.targetHttpProxies':
resource_info.ResourceInfo(
cache_command='compute target-http-proxies list',
list_format="""
table(
name,
urlMap.basename()
)
""",),
'compute.targetHttpsProxies':
resource_info.ResourceInfo(
cache_command='compute target-https-proxies list',
list_format="""
table(
name,
sslCertificates.map().basename().list():label=SSL_CERTIFICATES,
urlMap.basename()
)
""",),
'compute.targetInstances':
resource_info.ResourceInfo(
cache_command='compute target-instances list',
list_format="""
table(
name,
zone.basename(),
instance.basename(),
natPolicy
)
""",),
'compute.targetPoolInstanceHealth':
resource_info.ResourceInfo(
list_format="""
default
""",),
'compute.targetPools':
resource_info.ResourceInfo(
cache_command='compute target-pools list',
list_format="""
table(
name,
region.basename(),
sessionAffinity,
backupPool.basename():label=BACKUP,
healthChecks[].map().basename().list():label=HEALTH_CHECKS
)
""",),
'compute.targetSslProxies':
resource_info.ResourceInfo(
cache_command='compute target-ssl-proxies list',),
'compute.targetTcpProxies':
resource_info.ResourceInfo(
cache_command='compute target-tcp-proxies list',),
'compute.targetVpnGateways':
resource_info.ResourceInfo(
cache_command='compute target-vpn-gateways list',
list_format="""
table(
name,
network.basename(),
region.basename()
)
""",),
'compute.urlMaps':
resource_info.ResourceInfo(
cache_command='compute url-maps list',
list_format="""
table(
name,
defaultService
)
""",),
'compute.users':
resource_info.ResourceInfo(
cache_command='compute users list',
list_format="""
table(
name,
owner,
description
)
""",),
'compute.vpnTunnels':
resource_info.ResourceInfo(
cache_command='compute vpn-tunnels list',
list_format="""
table(
name,
region.basename(),
targetVpnGateway.basename():label=GATEWAY,
peerIp:label=PEER_ADDRESS
)
""",),
'compute.zones':
resource_info.ResourceInfo(
cache_command='compute zones list',
list_format="""
table(
name,
region.basename(),
status():label=STATUS,
maintenanceWindows.next_maintenance():label=NEXT_MAINTENANCE,
deprecated.deleted:label=TURNDOWN_DATE
)
""",),
}
| 29.626866 | 84 | 0.518577 |
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from googlecloudsdk.core.resource import resource_info
RESOURCE_REGISTRY = {
'compute.addresses':
resource_info.ResourceInfo(
cache_command='compute addresses list',
list_format="""
table(
name,
region.basename(),
address,
status
)
""",),
'compute.autoscalers':
resource_info.ResourceInfo(
async_collection='compute.operations',
cache_command='compute autoscaler list',
list_format="""
table(
name,
target.basename(),
autoscalingPolicy.policy():label=POLICY
)
""",),
'compute.backendBuckets':
resource_info.ResourceInfo(
list_format="""
table(
name,
bucketName:label=GCS_BUCKET_NAME,
enableCdn
)
""",),
'compute.backendServiceGroupHealth':
resource_info.ResourceInfo(
list_format="""
default
""",),
'compute.backendServices':
resource_info.ResourceInfo(
cache_command='compute backend-services list',
list_format="""
table(
name,
backends[].group.list():label=BACKENDS,
protocol
)
""",),
'compute.backendServices.alpha':
resource_info.ResourceInfo(
cache_command='compute backend-services list',
list_format="""
table(
name,
backends[].group.list():label=BACKENDS,
protocol,
loadBalancingScheme,
healthChecks.map().basename().list()
)
""",),
'compute.regionBackendServices':
resource_info.ResourceInfo(
cache_command='compute backend-services list',
list_format="""
table(
name,
backends[].group.list():label=BACKENDS,
protocol,
loadBalancingScheme,
healthChecks.map().basename().list()
)
""",),
'compute.commitments':
resource_info.ResourceInfo(
cache_command='compute commitments list',
list_format="""
table(name,
region.basename(),
endTimestamp,
status)
""",),
'compute.disks':
resource_info.ResourceInfo(
cache_command='compute disks list',
list_format="""
table(
name,
zone.basename(),
sizeGb,
type.basename(),
status
)
""",),
'compute.diskTypes':
resource_info.ResourceInfo(
cache_command='compute disk-types list',
list_format="""
table(
name,
zone.basename(),
validDiskSize:label=VALID_DISK_SIZES
)
""",),
'compute.diskTypes.alpha':
resource_info.ResourceInfo(
cache_command='compute disk-types list',
list_format="""
table(
name,
location():label=LOCATION,
location_scope():label=SCOPE,
validDiskSize:label=VALID_DISK_SIZES
)
""",),
'compute.firewalls':
resource_info.ResourceInfo(
cache_command='compute firewall-rules list',
list_format="""
table(
name,
network.basename(),
direction,
priority,
allowed[].map().firewall_rule().list():label=ALLOW,
denied[].map().firewall_rule().list():label=DENY
)
""",),
'compute.forwardingRules':
resource_info.ResourceInfo(
cache_command='compute forwarding-rules list',
list_format="""
table(
name,
region.basename(),
IPAddress,
IPProtocol,
firstof(
target,
backendService).scope():label=TARGET
)
""",),
'compute.groups':
resource_info.ResourceInfo(
cache_command='compute groups list',
list_format="""
table(
name,
members.len():label=NUM_MEMBERS,
description
)
""",),
'compute.healthChecks':
resource_info.ResourceInfo(
cache_command='compute health-checks list',
list_format="""
table(
name,
type:label=PROTOCOL
)
""",),
'compute.httpHealthChecks':
resource_info.ResourceInfo(
cache_command='compute http-health-checks list',
list_format="""
table(
name,
host,
port,
requestPath
)
""",),
'compute.httpsHealthChecks':
resource_info.ResourceInfo(
cache_command='compute https-health-checks list',
list_format="""
table(
name,
host,
port,
requestPath
)
""",),
'compute.images':
resource_info.ResourceInfo(
cache_command='compute images list',
list_format="""
table(
name,
selfLink.map().scope(projects).segment(0):label=PROJECT,
family,
deprecated.state:label=DEPRECATED,
status
)
""",),
'compute.instanceGroups':
resource_info.ResourceInfo(
cache_command='compute instance-groups list',
list_format="""
table(
name,
location():label=LOCATION,
location_scope():label=SCOPE,
network.basename(),
isManaged:label=MANAGED,
size:label=INSTANCES
)
""",),
'compute.instanceGroupManagers':
resource_info.ResourceInfo(
cache_command='compute instance-groups managed list',
list_format="""
table(
name,
location():label=LOCATION,
location_scope():label=SCOPE,
baseInstanceName,
size,
targetSize,
instanceTemplate.basename(),
autoscaled
)
""",),
'compute.instances':
resource_info.ResourceInfo(
async_collection='compute.operations',
cache_command='compute instances list',
list_format="""
table(
name,
zone.basename(),
machineType.machine_type().basename(),
scheduling.preemptible.yesno(yes=true, no=''),
networkInterfaces[].networkIP.notnull().list():label=INTERNAL_IP,
networkInterfaces[].accessConfigs[0].natIP.notnull().list()\
:label=EXTERNAL_IP,
status
)
""",),
'compute.instanceTemplates':
resource_info.ResourceInfo(
cache_command='compute instance-templates list',
list_format="""
table(
name,
properties.machineType.machine_type(),
properties.scheduling.preemptible.yesno(yes=true, no=''),
creationTimestamp
)
""",),
'compute.invalidations':
resource_info.ResourceInfo(
cache_command='beta compute url-maps list-cdn-cache-invalidations',
list_format="""
table(
description,
operation_http_status():label=HTTP_STATUS,
status,
insertTime:label=TIMESTAMP
)
""",),
'compute.machineTypes':
resource_info.ResourceInfo(
cache_command='compute machine-types list',
list_format="""
table(
name,
zone.basename(),
guestCpus:label=CPUS,
memoryMb.size(units_in=MiB, units_out=GiB, precision=2):label=MEMORY_GB,
deprecated.state:label=DEPRECATED
)
""",),
'compute.networks':
resource_info.ResourceInfo(
cache_command='compute networks list',
list_format="""
table(
name,
x_gcloud_mode:label=MODE,
IPv4Range:label=IPV4_RANGE,
gatewayIPv4
)
""",),
'compute.operations':
resource_info.ResourceInfo(
list_format="""
table(
name,
operationType:label=TYPE,
targetLink.scope():label=TARGET,
operation_http_status():label=HTTP_STATUS,
status,
insertTime:label=TIMESTAMP
)
""",),
'compute.peerings':
resource_info.ResourceInfo(
cache_command='compute networks peerings list',
list_format="""
table(
name,
source_network.basename():label=NETWORK,
network.map().scope(projects).segment(0):label=PEER_PROJECT,
network.basename():label=PEER_NETWORK,
autoCreateRoutes,
state,
stateDetails
)
""",),
'compute.projects':
resource_info.ResourceInfo(
list_format="""
value(
format("There is no API support yet.")
)
""",),
'compute.xpnProjects':
resource_info.ResourceInfo(
list_format="""
table(
name,
creationTimestamp,
xpnProjectStatus
)
""",),
'compute.xpnResourceId':
resource_info.ResourceInfo(
list_format="""
table(
id:label=RESOURCE_ID,
type:label=RESOURCE_TYPE)
""",),
'compute.regions':
resource_info.ResourceInfo(
cache_command='compute regions list',
list_format="""
table(
name,
quotas.metric.CPUS.quota():label=CPUS,
quotas.metric.DISKS_TOTAL_GB.quota():label=DISKS_GB,
quotas.metric.IN_USE_ADDRESSES.quota():label=ADDRESSES,
quotas.metric.STATIC_ADDRESSES.quota():label=RESERVED_ADDRESSES,
status():label=STATUS,
deprecated.deleted:label=TURNDOWN_DATE
)
""",),
'compute.routers':
resource_info.ResourceInfo(
cache_command='compute routers list',
list_format="""
table(
name,
region.basename(),
network.basename()
)
""",),
'compute.routes':
resource_info.ResourceInfo(
cache_command='compute routes list',
list_format="""
table(
name,
network.basename(),
destRange,
firstof(
nextHopInstance,
nextHopGateway,
nextHopIp,
nextHopVpnTunnel,
nextHopPeering).scope()
:label=NEXT_HOP,
priority
)
""",),
'compute.snapshots':
resource_info.ResourceInfo(
cache_command='compute snapshots list',
list_format="""
table(
name,
diskSizeGb,
sourceDisk.scope():label=SRC_DISK,
status
)
""",),
'compute.sslCertificates':
resource_info.ResourceInfo(
cache_command='compute ssl-certificates list',
list_format="""
table(
name,
creationTimestamp
)
""",),
'compute.subnetworks':
resource_info.ResourceInfo(
cache_command='compute networks subnets list',
list_format="""
table(
name,
region.basename(),
network.basename(),
ipCidrRange:label=RANGE
)
""",),
'compute.targetHttpProxies':
resource_info.ResourceInfo(
cache_command='compute target-http-proxies list',
list_format="""
table(
name,
urlMap.basename()
)
""",),
'compute.targetHttpsProxies':
resource_info.ResourceInfo(
cache_command='compute target-https-proxies list',
list_format="""
table(
name,
sslCertificates.map().basename().list():label=SSL_CERTIFICATES,
urlMap.basename()
)
""",),
'compute.targetInstances':
resource_info.ResourceInfo(
cache_command='compute target-instances list',
list_format="""
table(
name,
zone.basename(),
instance.basename(),
natPolicy
)
""",),
'compute.targetPoolInstanceHealth':
resource_info.ResourceInfo(
list_format="""
default
""",),
'compute.targetPools':
resource_info.ResourceInfo(
cache_command='compute target-pools list',
list_format="""
table(
name,
region.basename(),
sessionAffinity,
backupPool.basename():label=BACKUP,
healthChecks[].map().basename().list():label=HEALTH_CHECKS
)
""",),
'compute.targetSslProxies':
resource_info.ResourceInfo(
cache_command='compute target-ssl-proxies list',),
'compute.targetTcpProxies':
resource_info.ResourceInfo(
cache_command='compute target-tcp-proxies list',),
'compute.targetVpnGateways':
resource_info.ResourceInfo(
cache_command='compute target-vpn-gateways list',
list_format="""
table(
name,
network.basename(),
region.basename()
)
""",),
'compute.urlMaps':
resource_info.ResourceInfo(
cache_command='compute url-maps list',
list_format="""
table(
name,
defaultService
)
""",),
'compute.users':
resource_info.ResourceInfo(
cache_command='compute users list',
list_format="""
table(
name,
owner,
description
)
""",),
'compute.vpnTunnels':
resource_info.ResourceInfo(
cache_command='compute vpn-tunnels list',
list_format="""
table(
name,
region.basename(),
targetVpnGateway.basename():label=GATEWAY,
peerIp:label=PEER_ADDRESS
)
""",),
'compute.zones':
resource_info.ResourceInfo(
cache_command='compute zones list',
list_format="""
table(
name,
region.basename(),
status():label=STATUS,
maintenanceWindows.next_maintenance():label=NEXT_MAINTENANCE,
deprecated.deleted:label=TURNDOWN_DATE
)
""",),
}
| true | true |
f733c7ea5059279ce6cab52793d4cf2eae1e4752 | 12,355 | py | Python | tests/testflows/rbac/tests/syntax/create_row_policy.py | lizhichao/ClickHouse | 3f5dc37095ccca18de490fab162d6e3cb99756aa | [
"Apache-2.0"
] | 8 | 2019-06-04T02:50:13.000Z | 2022-02-10T06:46:51.000Z | tests/testflows/rbac/tests/syntax/create_row_policy.py | lizhichao/ClickHouse | 3f5dc37095ccca18de490fab162d6e3cb99756aa | [
"Apache-2.0"
] | 16 | 2021-06-07T21:32:30.000Z | 2022-03-31T21:08:29.000Z | tests/testflows/rbac/tests/syntax/create_row_policy.py | lizhichao/ClickHouse | 3f5dc37095ccca18de490fab162d6e3cb99756aa | [
"Apache-2.0"
] | null | null | null | from contextlib import contextmanager
from testflows.core import *
from rbac.requirements import *
import rbac.tests.errors as errors
@TestFeature
@Name("create row policy")
@Args(format_description=False)
def feature(self, node="clickhouse1"):
"""Check create row policy query syntax.
```sql
CREATE [ROW] POLICY [IF NOT EXISTS | OR REPLACE] policy_name [ON CLUSTER cluster_name] ON [db.]table
[AS {PERMISSIVE | RESTRICTIVE}]
[FOR SELECT]
[USING condition]
[TO {role [,...] | ALL | ALL EXCEPT role [,...]}]
```
"""
node = self.context.cluster.node(node)
@contextmanager
def cleanup(policy, on="default.foo"):
try:
with Given(f"I ensure the row policy does not already exist on {on}"):
node.query(f"DROP ROW POLICY IF EXISTS {policy} ON {on}")
yield
finally:
with Finally(f"I drop the row policy on {on}"):
node.query(f"DROP ROW POLICY IF EXISTS {policy} ON {on}")
def create_policy(policy, on="default.foo"):
with Given(f"I ensure I do have policy {policy} on {on}"):
node.query(f"CREATE ROW POLICY OR REPLACE {policy} ON {on}")
try:
with Given("I have a table and some roles"):
node.query(f"CREATE TABLE default.foo (x UInt64, y String) Engine=Memory")
node.query(f"CREATE ROLE role0")
node.query(f"CREATE ROLE role1")
with Scenario("I create row policy with no options", flags=TE, requirements=[
RQ_SRS_006_RBAC_RowPolicy_Create("1.0"),
RQ_SRS_006_RBAC_RowPolicy_Create_On("1.0")]):
with cleanup("policy0"):
with When("I create row policy"):
node.query("CREATE ROW POLICY policy0 ON default.foo")
with Scenario("I create row policy using short syntax with no options", flags=TE, requirements=[
RQ_SRS_006_RBAC_RowPolicy_Create("1.0"),
RQ_SRS_006_RBAC_RowPolicy_Create_On("1.0")]):
with cleanup("policy1"):
with When("I create row policy short form"):
node.query("CREATE POLICY policy1 ON default.foo")
with Scenario("I create row policy that already exists, throws exception", flags=TE, requirements=[
RQ_SRS_006_RBAC_RowPolicy_Create("1.0"),
RQ_SRS_006_RBAC_RowPolicy_Create_On("1.0")]):
policy = "policy0"
with cleanup(policy):
create_policy(policy)
with When(f"I create row policy {policy}"):
exitcode, message = errors.cannot_insert_row_policy(name=f"{policy} ON default.foo")
node.query(f"CREATE ROW POLICY {policy} ON default.foo", exitcode=exitcode, message=message)
del policy
with Scenario("I create row policy if not exists, policy does not exist", flags=TE, requirements=[
RQ_SRS_006_RBAC_RowPolicy_Create_IfNotExists("1.0"),
RQ_SRS_006_RBAC_RowPolicy_Create_On("1.0")]):
with cleanup("policy2"):
with When("I create row policy with if not exists"):
node.query("CREATE ROW POLICY IF NOT EXISTS policy2 ON default.foo")
with Scenario("I create row policy if not exists, policy does exist", flags=TE, requirements=[
RQ_SRS_006_RBAC_RowPolicy_Create_IfNotExists("1.0"),
RQ_SRS_006_RBAC_RowPolicy_Create_On("1.0")]):
policy = "policy2"
with cleanup(policy):
create_policy(policy)
with When(f"I create row policy {policy} with if not exists"):
node.query(f"CREATE ROW POLICY IF NOT EXISTS {policy} ON default.foo")
del policy
with Scenario("I create row policy or replace, policy does not exist", flags=TE, requirements=[
RQ_SRS_006_RBAC_RowPolicy_Create_Replace("1.0"),
RQ_SRS_006_RBAC_RowPolicy_Create_On("1.0")]):
with cleanup("policy3"):
with When("I create row policy with or replace"):
node.query("CREATE ROW POLICY OR REPLACE policy3 ON default.foo")
with Scenario("I create row policy or replace, policy does exist", flags=TE, requirements=[
RQ_SRS_006_RBAC_RowPolicy_Create_Replace("1.0"),
RQ_SRS_006_RBAC_RowPolicy_Create_On("1.0")]):
policy = "policy3"
with cleanup(policy):
create_policy(policy)
with When(f"I create row policy {policy} with or replace"):
node.query(f"CREATE ROW POLICY OR REPLACE {policy} ON default.foo")
del policy
with Scenario("I create row policy as permissive", flags=TE, requirements=[
RQ_SRS_006_RBAC_RowPolicy_Create_Access_Permissive("1.0"),
RQ_SRS_006_RBAC_RowPolicy_Create_On("1.0")]):
with cleanup("policy4"):
with When("I create row policy as permissive"):
node.query("CREATE ROW POLICY policy4 ON default.foo AS PERMISSIVE")
with Scenario("I create row policy as restrictive", flags=TE, requirements=[
RQ_SRS_006_RBAC_RowPolicy_Create_Access_Restrictive("1.0"),
RQ_SRS_006_RBAC_RowPolicy_Create_On("1.0")]):
with cleanup("policy5"):
with When("I create row policy as restrictive"):
node.query("CREATE ROW POLICY policy5 ON default.foo AS RESTRICTIVE")
with Scenario("I create row policy for select", flags=TE, requirements=[
RQ_SRS_006_RBAC_RowPolicy_Create_ForSelect("1.0"),
RQ_SRS_006_RBAC_RowPolicy_Create_On("1.0"),
RQ_SRS_006_RBAC_RowPolicy_Create_Condition("1.0")]):
with cleanup("policy6"):
with When("I create row policy with for select"):
node.query("CREATE ROW POLICY policy6 ON default.foo FOR SELECT USING x > 10")
with Scenario("I create row policy using condition", flags=TE, requirements=[
RQ_SRS_006_RBAC_RowPolicy_Create_Condition("1.0"),
RQ_SRS_006_RBAC_RowPolicy_Create_On("1.0")]):
with cleanup("policy6"):
with When("I create row policy with condition"):
node.query("CREATE ROW POLICY policy6 ON default.foo USING x > 10")
with Scenario("I create row policy assigned to one role", flags=TE, requirements=[
RQ_SRS_006_RBAC_RowPolicy_Create_Assignment("1.0"),
RQ_SRS_006_RBAC_RowPolicy_Create_On("1.0")]):
with cleanup("policy7"):
with When("I create row policy for one role"):
node.query("CREATE ROW POLICY policy7 ON default.foo TO role0")
with Scenario("I create row policy to assign to role that does not exist, throws exception", flags=TE, requirements=[
RQ_SRS_006_RBAC_RowPolicy_Create_Assignment("1.0")]):
role = "role2"
with cleanup("policy8a"):
with Given(f"I drop {role} if it exists"):
node.query(f"DROP ROLE IF EXISTS {role}")
with Then(f"I create a row policy, assign to role {role}, which does not exist"):
exitcode, message = errors.role_not_found_in_disk(name=role)
node.query(f"CREATE ROW POLICY policy8a ON default.foo TO {role}", exitcode=exitcode, message=message)
del role
with Scenario("I create row policy to assign to all excpet role that does not exist, throws exception", flags=TE, requirements=[
RQ_SRS_006_RBAC_RowPolicy_Create_Assignment("1.0")]):
role = "role2"
with cleanup("policy8a"):
with Given(f"I drop {role} if it exists"):
node.query(f"DROP ROLE IF EXISTS {role}")
with Then(f"I create a row policy, assign to all except role {role}, which does not exist"):
exitcode, message = errors.role_not_found_in_disk(name=role)
node.query(f"CREATE ROW POLICY policy8a ON default.foo TO ALL EXCEPT {role}", exitcode=exitcode, message=message)
del role
with Scenario("I create row policy assigned to multiple roles", flags=TE, requirements=[
RQ_SRS_006_RBAC_RowPolicy_Create_Assignment("1.0"),
RQ_SRS_006_RBAC_RowPolicy_Create_On("1.0")]):
with cleanup("policy8b"):
with When("I create row policy for multiple roles"):
node.query("CREATE ROW POLICY policy8b ON default.foo TO role0, role1")
with Scenario("I create row policy assigned to all", flags=TE, requirements=[
RQ_SRS_006_RBAC_RowPolicy_Create_Assignment_All("1.0"),
RQ_SRS_006_RBAC_RowPolicy_Create_On("1.0")]):
with cleanup("policy9"):
with When("I create row policy for all"):
node.query("CREATE ROW POLICY policy9 ON default.foo TO ALL")
with Scenario("I create row policy assigned to all except one role", flags=TE, requirements=[
RQ_SRS_006_RBAC_RowPolicy_Create_Assignment_AllExcept("1.0"),
RQ_SRS_006_RBAC_RowPolicy_Create_On("1.0")]):
with cleanup("policy10"):
with When("I create row policy for all except one"):
node.query("CREATE ROW POLICY policy10 ON default.foo TO ALL EXCEPT role0")
with Scenario("I create row policy assigned to all except multiple roles", flags=TE, requirements=[
RQ_SRS_006_RBAC_RowPolicy_Create_Assignment_AllExcept("1.0"),
RQ_SRS_006_RBAC_RowPolicy_Create_On("1.0")]):
with cleanup("policy11"):
with When("I create row policy for all except multiple roles"):
node.query("CREATE ROW POLICY policy11 ON default.foo TO ALL EXCEPT role0, role1")
with Scenario("I create row policy assigned to none", flags=TE, requirements=[
RQ_SRS_006_RBAC_RowPolicy_Create_Assignment_None("1.0"),
RQ_SRS_006_RBAC_RowPolicy_Create_On("1.0")]):
with cleanup("policy11"):
with When("I create row policy for none"):
node.query("CREATE ROW POLICY policy11 ON default.foo TO NONE")
with Scenario("I create row policy on cluster", flags=TE, requirements=[
RQ_SRS_006_RBAC_RowPolicy_Create_OnCluster("1.0"),
RQ_SRS_006_RBAC_RowPolicy_Create_On("1.0")]):
try:
with When("I run create row policy command on cluster"):
node.query("CREATE ROW POLICY policy12 ON CLUSTER sharded_cluster ON default.foo")
finally:
with Finally("I drop the row policy from cluster"):
node.query("DROP ROW POLICY IF EXISTS policy12 ON default.foo ON CLUSTER sharded_cluster")
with Scenario("I create row policy on fake cluster, throws exception", flags=TE, requirements=[
RQ_SRS_006_RBAC_RowPolicy_Create_OnCluster("1.0"),
RQ_SRS_006_RBAC_RowPolicy_Create_On("1.0")]):
with When("I run create row policy command"):
exitcode, message = errors.cluster_not_found("fake_cluster")
node.query("CREATE ROW POLICY policy13 ON CLUSTER fake_cluster ON default.foo", exitcode=exitcode, message=message)
with Scenario("I create row policy on cluster after table", flags=TE, requirements=[
RQ_SRS_006_RBAC_RowPolicy_Create_OnCluster("1.0"),
RQ_SRS_006_RBAC_RowPolicy_Create_On("1.0")]):
try:
with When("I run create row policy command on cluster"):
node.query("CREATE ROW POLICY policy12 ON default.foo ON CLUSTER sharded_cluster")
finally:
with Finally("I drop the row policy from cluster"):
node.query("DROP ROW POLICY IF EXISTS policy12 ON default.foo ON CLUSTER sharded_cluster")
finally:
with Finally("I drop the table and the roles"):
node.query(f"DROP TABLE IF EXISTS default.foo")
node.query(f"DROP ROLE IF EXISTS role0, role1") | 54.911111 | 136 | 0.615055 | from contextlib import contextmanager
from testflows.core import *
from rbac.requirements import *
import rbac.tests.errors as errors
@TestFeature
@Name("create row policy")
@Args(format_description=False)
def feature(self, node="clickhouse1"):
node = self.context.cluster.node(node)
@contextmanager
def cleanup(policy, on="default.foo"):
try:
with Given(f"I ensure the row policy does not already exist on {on}"):
node.query(f"DROP ROW POLICY IF EXISTS {policy} ON {on}")
yield
finally:
with Finally(f"I drop the row policy on {on}"):
node.query(f"DROP ROW POLICY IF EXISTS {policy} ON {on}")
def create_policy(policy, on="default.foo"):
with Given(f"I ensure I do have policy {policy} on {on}"):
node.query(f"CREATE ROW POLICY OR REPLACE {policy} ON {on}")
try:
with Given("I have a table and some roles"):
node.query(f"CREATE TABLE default.foo (x UInt64, y String) Engine=Memory")
node.query(f"CREATE ROLE role0")
node.query(f"CREATE ROLE role1")
with Scenario("I create row policy with no options", flags=TE, requirements=[
RQ_SRS_006_RBAC_RowPolicy_Create("1.0"),
RQ_SRS_006_RBAC_RowPolicy_Create_On("1.0")]):
with cleanup("policy0"):
with When("I create row policy"):
node.query("CREATE ROW POLICY policy0 ON default.foo")
with Scenario("I create row policy using short syntax with no options", flags=TE, requirements=[
RQ_SRS_006_RBAC_RowPolicy_Create("1.0"),
RQ_SRS_006_RBAC_RowPolicy_Create_On("1.0")]):
with cleanup("policy1"):
with When("I create row policy short form"):
node.query("CREATE POLICY policy1 ON default.foo")
with Scenario("I create row policy that already exists, throws exception", flags=TE, requirements=[
RQ_SRS_006_RBAC_RowPolicy_Create("1.0"),
RQ_SRS_006_RBAC_RowPolicy_Create_On("1.0")]):
policy = "policy0"
with cleanup(policy):
create_policy(policy)
with When(f"I create row policy {policy}"):
exitcode, message = errors.cannot_insert_row_policy(name=f"{policy} ON default.foo")
node.query(f"CREATE ROW POLICY {policy} ON default.foo", exitcode=exitcode, message=message)
del policy
with Scenario("I create row policy if not exists, policy does not exist", flags=TE, requirements=[
RQ_SRS_006_RBAC_RowPolicy_Create_IfNotExists("1.0"),
RQ_SRS_006_RBAC_RowPolicy_Create_On("1.0")]):
with cleanup("policy2"):
with When("I create row policy with if not exists"):
node.query("CREATE ROW POLICY IF NOT EXISTS policy2 ON default.foo")
with Scenario("I create row policy if not exists, policy does exist", flags=TE, requirements=[
RQ_SRS_006_RBAC_RowPolicy_Create_IfNotExists("1.0"),
RQ_SRS_006_RBAC_RowPolicy_Create_On("1.0")]):
policy = "policy2"
with cleanup(policy):
create_policy(policy)
with When(f"I create row policy {policy} with if not exists"):
node.query(f"CREATE ROW POLICY IF NOT EXISTS {policy} ON default.foo")
del policy
with Scenario("I create row policy or replace, policy does not exist", flags=TE, requirements=[
RQ_SRS_006_RBAC_RowPolicy_Create_Replace("1.0"),
RQ_SRS_006_RBAC_RowPolicy_Create_On("1.0")]):
with cleanup("policy3"):
with When("I create row policy with or replace"):
node.query("CREATE ROW POLICY OR REPLACE policy3 ON default.foo")
with Scenario("I create row policy or replace, policy does exist", flags=TE, requirements=[
RQ_SRS_006_RBAC_RowPolicy_Create_Replace("1.0"),
RQ_SRS_006_RBAC_RowPolicy_Create_On("1.0")]):
policy = "policy3"
with cleanup(policy):
create_policy(policy)
with When(f"I create row policy {policy} with or replace"):
node.query(f"CREATE ROW POLICY OR REPLACE {policy} ON default.foo")
del policy
with Scenario("I create row policy as permissive", flags=TE, requirements=[
RQ_SRS_006_RBAC_RowPolicy_Create_Access_Permissive("1.0"),
RQ_SRS_006_RBAC_RowPolicy_Create_On("1.0")]):
with cleanup("policy4"):
with When("I create row policy as permissive"):
node.query("CREATE ROW POLICY policy4 ON default.foo AS PERMISSIVE")
with Scenario("I create row policy as restrictive", flags=TE, requirements=[
RQ_SRS_006_RBAC_RowPolicy_Create_Access_Restrictive("1.0"),
RQ_SRS_006_RBAC_RowPolicy_Create_On("1.0")]):
with cleanup("policy5"):
with When("I create row policy as restrictive"):
node.query("CREATE ROW POLICY policy5 ON default.foo AS RESTRICTIVE")
with Scenario("I create row policy for select", flags=TE, requirements=[
RQ_SRS_006_RBAC_RowPolicy_Create_ForSelect("1.0"),
RQ_SRS_006_RBAC_RowPolicy_Create_On("1.0"),
RQ_SRS_006_RBAC_RowPolicy_Create_Condition("1.0")]):
with cleanup("policy6"):
with When("I create row policy with for select"):
node.query("CREATE ROW POLICY policy6 ON default.foo FOR SELECT USING x > 10")
with Scenario("I create row policy using condition", flags=TE, requirements=[
RQ_SRS_006_RBAC_RowPolicy_Create_Condition("1.0"),
RQ_SRS_006_RBAC_RowPolicy_Create_On("1.0")]):
with cleanup("policy6"):
with When("I create row policy with condition"):
node.query("CREATE ROW POLICY policy6 ON default.foo USING x > 10")
with Scenario("I create row policy assigned to one role", flags=TE, requirements=[
RQ_SRS_006_RBAC_RowPolicy_Create_Assignment("1.0"),
RQ_SRS_006_RBAC_RowPolicy_Create_On("1.0")]):
with cleanup("policy7"):
with When("I create row policy for one role"):
node.query("CREATE ROW POLICY policy7 ON default.foo TO role0")
with Scenario("I create row policy to assign to role that does not exist, throws exception", flags=TE, requirements=[
RQ_SRS_006_RBAC_RowPolicy_Create_Assignment("1.0")]):
role = "role2"
with cleanup("policy8a"):
with Given(f"I drop {role} if it exists"):
node.query(f"DROP ROLE IF EXISTS {role}")
with Then(f"I create a row policy, assign to role {role}, which does not exist"):
exitcode, message = errors.role_not_found_in_disk(name=role)
node.query(f"CREATE ROW POLICY policy8a ON default.foo TO {role}", exitcode=exitcode, message=message)
del role
with Scenario("I create row policy to assign to all excpet role that does not exist, throws exception", flags=TE, requirements=[
RQ_SRS_006_RBAC_RowPolicy_Create_Assignment("1.0")]):
role = "role2"
with cleanup("policy8a"):
with Given(f"I drop {role} if it exists"):
node.query(f"DROP ROLE IF EXISTS {role}")
with Then(f"I create a row policy, assign to all except role {role}, which does not exist"):
exitcode, message = errors.role_not_found_in_disk(name=role)
node.query(f"CREATE ROW POLICY policy8a ON default.foo TO ALL EXCEPT {role}", exitcode=exitcode, message=message)
del role
with Scenario("I create row policy assigned to multiple roles", flags=TE, requirements=[
RQ_SRS_006_RBAC_RowPolicy_Create_Assignment("1.0"),
RQ_SRS_006_RBAC_RowPolicy_Create_On("1.0")]):
with cleanup("policy8b"):
with When("I create row policy for multiple roles"):
node.query("CREATE ROW POLICY policy8b ON default.foo TO role0, role1")
with Scenario("I create row policy assigned to all", flags=TE, requirements=[
RQ_SRS_006_RBAC_RowPolicy_Create_Assignment_All("1.0"),
RQ_SRS_006_RBAC_RowPolicy_Create_On("1.0")]):
with cleanup("policy9"):
with When("I create row policy for all"):
node.query("CREATE ROW POLICY policy9 ON default.foo TO ALL")
with Scenario("I create row policy assigned to all except one role", flags=TE, requirements=[
RQ_SRS_006_RBAC_RowPolicy_Create_Assignment_AllExcept("1.0"),
RQ_SRS_006_RBAC_RowPolicy_Create_On("1.0")]):
with cleanup("policy10"):
with When("I create row policy for all except one"):
node.query("CREATE ROW POLICY policy10 ON default.foo TO ALL EXCEPT role0")
with Scenario("I create row policy assigned to all except multiple roles", flags=TE, requirements=[
RQ_SRS_006_RBAC_RowPolicy_Create_Assignment_AllExcept("1.0"),
RQ_SRS_006_RBAC_RowPolicy_Create_On("1.0")]):
with cleanup("policy11"):
with When("I create row policy for all except multiple roles"):
node.query("CREATE ROW POLICY policy11 ON default.foo TO ALL EXCEPT role0, role1")
with Scenario("I create row policy assigned to none", flags=TE, requirements=[
RQ_SRS_006_RBAC_RowPolicy_Create_Assignment_None("1.0"),
RQ_SRS_006_RBAC_RowPolicy_Create_On("1.0")]):
with cleanup("policy11"):
with When("I create row policy for none"):
node.query("CREATE ROW POLICY policy11 ON default.foo TO NONE")
with Scenario("I create row policy on cluster", flags=TE, requirements=[
RQ_SRS_006_RBAC_RowPolicy_Create_OnCluster("1.0"),
RQ_SRS_006_RBAC_RowPolicy_Create_On("1.0")]):
try:
with When("I run create row policy command on cluster"):
node.query("CREATE ROW POLICY policy12 ON CLUSTER sharded_cluster ON default.foo")
finally:
with Finally("I drop the row policy from cluster"):
node.query("DROP ROW POLICY IF EXISTS policy12 ON default.foo ON CLUSTER sharded_cluster")
with Scenario("I create row policy on fake cluster, throws exception", flags=TE, requirements=[
RQ_SRS_006_RBAC_RowPolicy_Create_OnCluster("1.0"),
RQ_SRS_006_RBAC_RowPolicy_Create_On("1.0")]):
with When("I run create row policy command"):
exitcode, message = errors.cluster_not_found("fake_cluster")
node.query("CREATE ROW POLICY policy13 ON CLUSTER fake_cluster ON default.foo", exitcode=exitcode, message=message)
with Scenario("I create row policy on cluster after table", flags=TE, requirements=[
RQ_SRS_006_RBAC_RowPolicy_Create_OnCluster("1.0"),
RQ_SRS_006_RBAC_RowPolicy_Create_On("1.0")]):
try:
with When("I run create row policy command on cluster"):
node.query("CREATE ROW POLICY policy12 ON default.foo ON CLUSTER sharded_cluster")
finally:
with Finally("I drop the row policy from cluster"):
node.query("DROP ROW POLICY IF EXISTS policy12 ON default.foo ON CLUSTER sharded_cluster")
finally:
with Finally("I drop the table and the roles"):
node.query(f"DROP TABLE IF EXISTS default.foo")
node.query(f"DROP ROLE IF EXISTS role0, role1") | true | true |
f733c7fa71b298ed03e3a73c484f00bb55bfbbc1 | 1,602 | py | Python | config/urls.py | manuelen12/test_sale | 1d199fcfca8361edf704e0bb138a07e7d924f327 | [
"MIT"
] | null | null | null | config/urls.py | manuelen12/test_sale | 1d199fcfca8361edf704e0bb138a07e7d924f327 | [
"MIT"
] | null | null | null | config/urls.py | manuelen12/test_sale | 1d199fcfca8361edf704e0bb138a07e7d924f327 | [
"MIT"
] | null | null | null | from django.conf import settings
from django.conf.urls import include, url
from django.conf.urls.static import static
from django.contrib import admin
from django.views.generic import TemplateView
from django.views import defaults as default_views
from test_venta.sales.v0.urls import router as sales
from django.views.generic.base import RedirectView
from common.utils import DefaultRouter
from users.v0.urls import router as users
router = DefaultRouter()
# router.extend(upload)
router.extend(sales)
router.extend(users)
urlpatterns = [
url(r'^api/v0/', include(router.urls, namespace='api')),
url(r'^$', RedirectView.as_view(url='/api/v0/', permanent=False), name='home'),
# Django Admin, use {% url 'admin:index' %}
url(settings.ADMIN_URL, admin.site.urls),
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
if settings.DEBUG:
# This allows the error pages to be debugged during development, just visit
# these url in browser to see how these error pages look like.
urlpatterns += [
url(r'^400/$', default_views.bad_request, kwargs={'exception': Exception('Bad Request!')}),
url(r'^403/$', default_views.permission_denied, kwargs={'exception': Exception('Permission Denied')}),
url(r'^404/$', default_views.page_not_found, kwargs={'exception': Exception('Page not Found')}),
url(r'^500/$', default_views.server_error),
]
if 'debug_toolbar' in settings.INSTALLED_APPS:
import debug_toolbar
urlpatterns = [
url(r'^__debug__/', include(debug_toolbar.urls)),
] + urlpatterns
| 39.073171 | 110 | 0.717228 | from django.conf import settings
from django.conf.urls import include, url
from django.conf.urls.static import static
from django.contrib import admin
from django.views.generic import TemplateView
from django.views import defaults as default_views
from test_venta.sales.v0.urls import router as sales
from django.views.generic.base import RedirectView
from common.utils import DefaultRouter
from users.v0.urls import router as users
router = DefaultRouter()
router.extend(sales)
router.extend(users)
urlpatterns = [
url(r'^api/v0/', include(router.urls, namespace='api')),
url(r'^$', RedirectView.as_view(url='/api/v0/', permanent=False), name='home'),
url(settings.ADMIN_URL, admin.site.urls),
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
if settings.DEBUG:
urlpatterns += [
url(r'^400/$', default_views.bad_request, kwargs={'exception': Exception('Bad Request!')}),
url(r'^403/$', default_views.permission_denied, kwargs={'exception': Exception('Permission Denied')}),
url(r'^404/$', default_views.page_not_found, kwargs={'exception': Exception('Page not Found')}),
url(r'^500/$', default_views.server_error),
]
if 'debug_toolbar' in settings.INSTALLED_APPS:
import debug_toolbar
urlpatterns = [
url(r'^__debug__/', include(debug_toolbar.urls)),
] + urlpatterns
| true | true |
f733ca4ff17ab22da60d82f75c1251396d4c316f | 1,180 | py | Python | ai/monitoring/cleanup_dd.py | jolibrain/recognition | 5ad2cc7b94f65e72f68c11eb3b7e1066ef907cbe | [
"Apache-2.0"
] | 6 | 2018-03-16T15:27:22.000Z | 2020-07-16T09:45:24.000Z | ai/monitoring/cleanup_dd.py | jolibrain/recognition | 5ad2cc7b94f65e72f68c11eb3b7e1066ef907cbe | [
"Apache-2.0"
] | null | null | null | ai/monitoring/cleanup_dd.py | jolibrain/recognition | 5ad2cc7b94f65e72f68c11eb3b7e1066ef907cbe | [
"Apache-2.0"
] | 1 | 2020-12-24T20:05:35.000Z | 2020-12-24T20:05:35.000Z | """
Copyright 2016 Fabric S.P.A, Emmanuel Benazera, Alexandre Girard
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
"""
from dd_client import DD
HOST = 'localhost'
PORT = 8080
dd = DD(HOST,PORT)
dd.set_return_format(dd.RETURN_PYTHON)
def delete_dd_service(sname):
dd.delete_service(sname,clear='')
# main
info = dd.info()
# in case there are remaining services, remove them
for s in info['head']['services']:
sname = s['name']
delete_dd_service(sname)
| 28.780488 | 64 | 0.769492 |
from dd_client import DD
HOST = 'localhost'
PORT = 8080
dd = DD(HOST,PORT)
dd.set_return_format(dd.RETURN_PYTHON)
def delete_dd_service(sname):
dd.delete_service(sname,clear='')
info = dd.info()
for s in info['head']['services']:
sname = s['name']
delete_dd_service(sname)
| true | true |
f733ca75c9d6e2276134f106b397b8a95855ffff | 18,336 | py | Python | environment/lib/python3.8/site-packages/sklearn/decomposition/tests/test_dict_learning.py | 123972/PCA-nutricion | aff3c51a71c887c3fa367dbf9d599be5915c80cc | [
"MIT"
] | 1 | 2021-05-23T16:07:49.000Z | 2021-05-23T16:07:49.000Z | environment/lib/python3.8/site-packages/sklearn/decomposition/tests/test_dict_learning.py | 123972/PCA-nutricion | aff3c51a71c887c3fa367dbf9d599be5915c80cc | [
"MIT"
] | 2 | 2021-06-08T22:19:17.000Z | 2021-09-08T02:27:57.000Z | environment/lib/python3.8/site-packages/sklearn/decomposition/tests/test_dict_learning.py | 123972/PCA-nutricion | aff3c51a71c887c3fa367dbf9d599be5915c80cc | [
"MIT"
] | 1 | 2021-10-02T07:23:08.000Z | 2021-10-02T07:23:08.000Z | import pytest
import numpy as np
import itertools
from sklearn.exceptions import ConvergenceWarning
from sklearn.utils import check_array
from sklearn.utils._testing import assert_array_almost_equal
from sklearn.utils._testing import assert_array_equal
from sklearn.utils._testing import ignore_warnings
from sklearn.utils._testing import TempMemmap
from sklearn.decomposition import DictionaryLearning
from sklearn.decomposition import MiniBatchDictionaryLearning
from sklearn.decomposition import SparseCoder
from sklearn.decomposition import dict_learning
from sklearn.decomposition import dict_learning_online
from sklearn.decomposition import sparse_encode
rng_global = np.random.RandomState(0)
n_samples, n_features = 10, 8
X = rng_global.randn(n_samples, n_features)
def test_sparse_encode_shapes_omp():
rng = np.random.RandomState(0)
algorithms = ['omp', 'lasso_lars', 'lasso_cd', 'lars', 'threshold']
for n_components, n_samples in itertools.product([1, 5], [1, 9]):
X_ = rng.randn(n_samples, n_features)
dictionary = rng.randn(n_components, n_features)
for algorithm, n_jobs in itertools.product(algorithms, [1, 3]):
code = sparse_encode(X_, dictionary, algorithm=algorithm,
n_jobs=n_jobs)
assert code.shape == (n_samples, n_components)
def test_dict_learning_shapes():
n_components = 5
dico = DictionaryLearning(n_components, random_state=0).fit(X)
assert dico.components_.shape == (n_components, n_features)
n_components = 1
dico = DictionaryLearning(n_components, random_state=0).fit(X)
assert dico.components_.shape == (n_components, n_features)
assert dico.transform(X).shape == (X.shape[0], n_components)
def test_dict_learning_overcomplete():
n_components = 12
dico = DictionaryLearning(n_components, random_state=0).fit(X)
assert dico.components_.shape == (n_components, n_features)
def test_max_iter():
def ricker_function(resolution, center, width):
"""Discrete sub-sampled Ricker (Mexican hat) wavelet"""
x = np.linspace(0, resolution - 1, resolution)
x = ((2 / (np.sqrt(3 * width) * np.pi ** .25))
* (1 - (x - center) ** 2 / width ** 2)
* np.exp(-(x - center) ** 2 / (2 * width ** 2)))
return x
def ricker_matrix(width, resolution, n_components):
"""Dictionary of Ricker (Mexican hat) wavelets"""
centers = np.linspace(0, resolution - 1, n_components)
D = np.empty((n_components, resolution))
for i, center in enumerate(centers):
D[i] = ricker_function(resolution, center, width)
D /= np.sqrt(np.sum(D ** 2, axis=1))[:, np.newaxis]
return D
transform_algorithm = 'lasso_cd'
resolution = 1024
subsampling = 3 # subsampling factor
n_components = resolution // subsampling
# Compute a wavelet dictionary
D_multi = np.r_[tuple(ricker_matrix(width=w, resolution=resolution,
n_components=n_components // 5)
for w in (10, 50, 100, 500, 1000))]
X = np.linspace(0, resolution - 1, resolution)
first_quarter = X < resolution / 4
X[first_quarter] = 3.
X[np.logical_not(first_quarter)] = -1.
X = X.reshape(1, -1)
# check that the underlying model fails to converge
with pytest.warns(ConvergenceWarning):
model = SparseCoder(D_multi, transform_algorithm=transform_algorithm,
transform_max_iter=1)
model.fit_transform(X)
# check that the underlying model converges w/o warnings
with pytest.warns(None) as record:
model = SparseCoder(D_multi, transform_algorithm=transform_algorithm,
transform_max_iter=2000)
model.fit_transform(X)
assert not record.list
def test_dict_learning_lars_positive_parameter():
n_components = 5
alpha = 1
err_msg = "Positive constraint not supported for 'lars' coding method."
with pytest.raises(ValueError, match=err_msg):
dict_learning(X, n_components, alpha=alpha, positive_code=True)
@pytest.mark.parametrize("transform_algorithm", [
"lasso_lars",
"lasso_cd",
"threshold",
])
@pytest.mark.parametrize("positive_code", [False, True])
@pytest.mark.parametrize("positive_dict", [False, True])
def test_dict_learning_positivity(transform_algorithm,
positive_code,
positive_dict):
n_components = 5
dico = DictionaryLearning(
n_components, transform_algorithm=transform_algorithm, random_state=0,
positive_code=positive_code, positive_dict=positive_dict,
fit_algorithm="cd").fit(X)
code = dico.transform(X)
if positive_dict:
assert (dico.components_ >= 0).all()
else:
assert (dico.components_ < 0).any()
if positive_code:
assert (code >= 0).all()
else:
assert (code < 0).any()
@pytest.mark.parametrize("positive_dict", [False, True])
def test_dict_learning_lars_dict_positivity(positive_dict):
n_components = 5
dico = DictionaryLearning(
n_components, transform_algorithm="lars", random_state=0,
positive_dict=positive_dict, fit_algorithm="cd").fit(X)
if positive_dict:
assert (dico.components_ >= 0).all()
else:
assert (dico.components_ < 0).any()
def test_dict_learning_lars_code_positivity():
n_components = 5
dico = DictionaryLearning(
n_components, transform_algorithm="lars", random_state=0,
positive_code=True, fit_algorithm="cd").fit(X)
err_msg = "Positive constraint not supported for '{}' coding method."
err_msg = err_msg.format("lars")
with pytest.raises(ValueError, match=err_msg):
dico.transform(X)
def test_dict_learning_reconstruction():
n_components = 12
dico = DictionaryLearning(n_components, transform_algorithm='omp',
transform_alpha=0.001, random_state=0)
code = dico.fit(X).transform(X)
assert_array_almost_equal(np.dot(code, dico.components_), X)
dico.set_params(transform_algorithm='lasso_lars')
code = dico.transform(X)
assert_array_almost_equal(np.dot(code, dico.components_), X, decimal=2)
# used to test lars here too, but there's no guarantee the number of
# nonzero atoms is right.
def test_dict_learning_reconstruction_parallel():
# regression test that parallel reconstruction works with n_jobs>1
n_components = 12
dico = DictionaryLearning(n_components, transform_algorithm='omp',
transform_alpha=0.001, random_state=0, n_jobs=4)
code = dico.fit(X).transform(X)
assert_array_almost_equal(np.dot(code, dico.components_), X)
dico.set_params(transform_algorithm='lasso_lars')
code = dico.transform(X)
assert_array_almost_equal(np.dot(code, dico.components_), X, decimal=2)
def test_dict_learning_lassocd_readonly_data():
n_components = 12
with TempMemmap(X) as X_read_only:
dico = DictionaryLearning(n_components, transform_algorithm='lasso_cd',
transform_alpha=0.001, random_state=0,
n_jobs=4)
with ignore_warnings(category=ConvergenceWarning):
code = dico.fit(X_read_only).transform(X_read_only)
assert_array_almost_equal(np.dot(code, dico.components_), X_read_only,
decimal=2)
def test_dict_learning_nonzero_coefs():
n_components = 4
dico = DictionaryLearning(n_components, transform_algorithm='lars',
transform_n_nonzero_coefs=3, random_state=0)
code = dico.fit(X).transform(X[np.newaxis, 1])
assert len(np.flatnonzero(code)) == 3
dico.set_params(transform_algorithm='omp')
code = dico.transform(X[np.newaxis, 1])
assert len(np.flatnonzero(code)) == 3
def test_dict_learning_unknown_fit_algorithm():
n_components = 5
dico = DictionaryLearning(n_components, fit_algorithm='<unknown>')
with pytest.raises(ValueError):
dico.fit(X)
def test_dict_learning_split():
n_components = 5
dico = DictionaryLearning(n_components, transform_algorithm='threshold',
random_state=0)
code = dico.fit(X).transform(X)
dico.split_sign = True
split_code = dico.transform(X)
assert_array_almost_equal(split_code[:, :n_components] -
split_code[:, n_components:], code)
def test_dict_learning_online_shapes():
rng = np.random.RandomState(0)
n_components = 8
code, dictionary = dict_learning_online(X, n_components=n_components,
alpha=1, random_state=rng)
assert code.shape == (n_samples, n_components)
assert dictionary.shape == (n_components, n_features)
assert np.dot(code, dictionary).shape == X.shape
def test_dict_learning_online_lars_positive_parameter():
alpha = 1
err_msg = "Positive constraint not supported for 'lars' coding method."
with pytest.raises(ValueError, match=err_msg):
dict_learning_online(X, alpha=alpha, positive_code=True)
@pytest.mark.parametrize("transform_algorithm", [
"lasso_lars",
"lasso_cd",
"threshold",
])
@pytest.mark.parametrize("positive_code", [False, True])
@pytest.mark.parametrize("positive_dict", [False, True])
def test_minibatch_dictionary_learning_positivity(transform_algorithm,
positive_code,
positive_dict):
n_components = 8
dico = MiniBatchDictionaryLearning(
n_components, transform_algorithm=transform_algorithm, random_state=0,
positive_code=positive_code, positive_dict=positive_dict,
fit_algorithm='cd').fit(X)
code = dico.transform(X)
if positive_dict:
assert (dico.components_ >= 0).all()
else:
assert (dico.components_ < 0).any()
if positive_code:
assert (code >= 0).all()
else:
assert (code < 0).any()
@pytest.mark.parametrize("positive_dict", [False, True])
def test_minibatch_dictionary_learning_lars(positive_dict):
n_components = 8
dico = MiniBatchDictionaryLearning(
n_components, transform_algorithm="lars", random_state=0,
positive_dict=positive_dict, fit_algorithm='cd').fit(X)
if positive_dict:
assert (dico.components_ >= 0).all()
else:
assert (dico.components_ < 0).any()
@pytest.mark.parametrize("positive_code", [False, True])
@pytest.mark.parametrize("positive_dict", [False, True])
def test_dict_learning_online_positivity(positive_code,
positive_dict):
rng = np.random.RandomState(0)
n_components = 8
code, dictionary = dict_learning_online(X, n_components=n_components,
method="cd",
alpha=1, random_state=rng,
positive_dict=positive_dict,
positive_code=positive_code)
if positive_dict:
assert (dictionary >= 0).all()
else:
assert (dictionary < 0).any()
if positive_code:
assert (code >= 0).all()
else:
assert (code < 0).any()
def test_dict_learning_online_verbosity():
n_components = 5
# test verbosity
from io import StringIO
import sys
old_stdout = sys.stdout
try:
sys.stdout = StringIO()
dico = MiniBatchDictionaryLearning(n_components, n_iter=20, verbose=1,
random_state=0)
dico.fit(X)
dico = MiniBatchDictionaryLearning(n_components, n_iter=20, verbose=2,
random_state=0)
dico.fit(X)
dict_learning_online(X, n_components=n_components, alpha=1, verbose=1,
random_state=0)
dict_learning_online(X, n_components=n_components, alpha=1, verbose=2,
random_state=0)
finally:
sys.stdout = old_stdout
assert dico.components_.shape == (n_components, n_features)
def test_dict_learning_online_estimator_shapes():
n_components = 5
dico = MiniBatchDictionaryLearning(n_components, n_iter=20, random_state=0)
dico.fit(X)
assert dico.components_.shape == (n_components, n_features)
def test_dict_learning_online_overcomplete():
n_components = 12
dico = MiniBatchDictionaryLearning(n_components, n_iter=20,
random_state=0).fit(X)
assert dico.components_.shape == (n_components, n_features)
def test_dict_learning_online_initialization():
n_components = 12
rng = np.random.RandomState(0)
V = rng.randn(n_components, n_features)
dico = MiniBatchDictionaryLearning(n_components, n_iter=0,
dict_init=V, random_state=0).fit(X)
assert_array_equal(dico.components_, V)
def test_dict_learning_online_readonly_initialization():
n_components = 12
rng = np.random.RandomState(0)
V = rng.randn(n_components, n_features)
V.setflags(write=False)
MiniBatchDictionaryLearning(n_components, n_iter=1, dict_init=V,
random_state=0, shuffle=False).fit(X)
def test_dict_learning_online_partial_fit():
n_components = 12
rng = np.random.RandomState(0)
V = rng.randn(n_components, n_features) # random init
V /= np.sum(V ** 2, axis=1)[:, np.newaxis]
dict1 = MiniBatchDictionaryLearning(n_components, n_iter=10 * len(X),
batch_size=1,
alpha=1, shuffle=False, dict_init=V,
random_state=0).fit(X)
dict2 = MiniBatchDictionaryLearning(n_components, alpha=1,
n_iter=1, dict_init=V,
random_state=0)
for i in range(10):
for sample in X:
dict2.partial_fit(sample[np.newaxis, :])
assert not np.all(sparse_encode(X, dict1.components_, alpha=1) == 0)
assert_array_almost_equal(dict1.components_, dict2.components_,
decimal=2)
def test_sparse_encode_shapes():
n_components = 12
rng = np.random.RandomState(0)
V = rng.randn(n_components, n_features) # random init
V /= np.sum(V ** 2, axis=1)[:, np.newaxis]
for algo in ('lasso_lars', 'lasso_cd', 'lars', 'omp', 'threshold'):
code = sparse_encode(X, V, algorithm=algo)
assert code.shape == (n_samples, n_components)
@pytest.mark.parametrize("algo", [
'lasso_lars',
'lasso_cd',
'threshold'
])
@pytest.mark.parametrize("positive", [False, True])
def test_sparse_encode_positivity(algo, positive):
n_components = 12
rng = np.random.RandomState(0)
V = rng.randn(n_components, n_features) # random init
V /= np.sum(V ** 2, axis=1)[:, np.newaxis]
code = sparse_encode(X, V, algorithm=algo, positive=positive)
if positive:
assert (code >= 0).all()
else:
assert (code < 0).any()
@pytest.mark.parametrize("algo", ['lars', 'omp'])
def test_sparse_encode_unavailable_positivity(algo):
n_components = 12
rng = np.random.RandomState(0)
V = rng.randn(n_components, n_features) # random init
V /= np.sum(V ** 2, axis=1)[:, np.newaxis]
err_msg = "Positive constraint not supported for '{}' coding method."
err_msg = err_msg.format(algo)
with pytest.raises(ValueError, match=err_msg):
sparse_encode(X, V, algorithm=algo, positive=True)
def test_sparse_encode_input():
n_components = 100
rng = np.random.RandomState(0)
V = rng.randn(n_components, n_features) # random init
V /= np.sum(V ** 2, axis=1)[:, np.newaxis]
Xf = check_array(X, order='F')
for algo in ('lasso_lars', 'lasso_cd', 'lars', 'omp', 'threshold'):
a = sparse_encode(X, V, algorithm=algo)
b = sparse_encode(Xf, V, algorithm=algo)
assert_array_almost_equal(a, b)
def test_sparse_encode_error():
n_components = 12
rng = np.random.RandomState(0)
V = rng.randn(n_components, n_features) # random init
V /= np.sum(V ** 2, axis=1)[:, np.newaxis]
code = sparse_encode(X, V, alpha=0.001)
assert not np.all(code == 0)
assert np.sqrt(np.sum((np.dot(code, V) - X) ** 2)) < 0.1
def test_sparse_encode_error_default_sparsity():
rng = np.random.RandomState(0)
X = rng.randn(100, 64)
D = rng.randn(2, 64)
code = ignore_warnings(sparse_encode)(X, D, algorithm='omp',
n_nonzero_coefs=None)
assert code.shape == (100, 2)
def test_unknown_method():
n_components = 12
rng = np.random.RandomState(0)
V = rng.randn(n_components, n_features) # random init
with pytest.raises(ValueError):
sparse_encode(X, V, algorithm="<unknown>")
def test_sparse_coder_estimator():
n_components = 12
rng = np.random.RandomState(0)
V = rng.randn(n_components, n_features) # random init
V /= np.sum(V ** 2, axis=1)[:, np.newaxis]
code = SparseCoder(dictionary=V, transform_algorithm='lasso_lars',
transform_alpha=0.001).transform(X)
assert not np.all(code == 0)
assert np.sqrt(np.sum((np.dot(code, V) - X) ** 2)) < 0.1
def test_sparse_coder_parallel_mmap():
# Non-regression test for:
# https://github.com/scikit-learn/scikit-learn/issues/5956
# Test that SparseCoder does not error by passing reading only
# arrays to child processes
rng = np.random.RandomState(777)
n_components, n_features = 40, 64
init_dict = rng.rand(n_components, n_features)
# Ensure that `data` is >2M. Joblib memory maps arrays
# if they are larger than 1MB. The 4 accounts for float32
# data type
n_samples = int(2e6) // (4 * n_features)
data = np.random.rand(n_samples, n_features).astype(np.float32)
sc = SparseCoder(init_dict, transform_algorithm='omp', n_jobs=2)
sc.fit_transform(data)
def test_sparse_coder_n_features_in():
d = np.array([[1, 2, 3], [1, 2, 3]])
sc = SparseCoder(d)
assert sc.n_features_in_ == d.shape[1]
| 36.16568 | 79 | 0.650305 | import pytest
import numpy as np
import itertools
from sklearn.exceptions import ConvergenceWarning
from sklearn.utils import check_array
from sklearn.utils._testing import assert_array_almost_equal
from sklearn.utils._testing import assert_array_equal
from sklearn.utils._testing import ignore_warnings
from sklearn.utils._testing import TempMemmap
from sklearn.decomposition import DictionaryLearning
from sklearn.decomposition import MiniBatchDictionaryLearning
from sklearn.decomposition import SparseCoder
from sklearn.decomposition import dict_learning
from sklearn.decomposition import dict_learning_online
from sklearn.decomposition import sparse_encode
rng_global = np.random.RandomState(0)
n_samples, n_features = 10, 8
X = rng_global.randn(n_samples, n_features)
def test_sparse_encode_shapes_omp():
rng = np.random.RandomState(0)
algorithms = ['omp', 'lasso_lars', 'lasso_cd', 'lars', 'threshold']
for n_components, n_samples in itertools.product([1, 5], [1, 9]):
X_ = rng.randn(n_samples, n_features)
dictionary = rng.randn(n_components, n_features)
for algorithm, n_jobs in itertools.product(algorithms, [1, 3]):
code = sparse_encode(X_, dictionary, algorithm=algorithm,
n_jobs=n_jobs)
assert code.shape == (n_samples, n_components)
def test_dict_learning_shapes():
n_components = 5
dico = DictionaryLearning(n_components, random_state=0).fit(X)
assert dico.components_.shape == (n_components, n_features)
n_components = 1
dico = DictionaryLearning(n_components, random_state=0).fit(X)
assert dico.components_.shape == (n_components, n_features)
assert dico.transform(X).shape == (X.shape[0], n_components)
def test_dict_learning_overcomplete():
n_components = 12
dico = DictionaryLearning(n_components, random_state=0).fit(X)
assert dico.components_.shape == (n_components, n_features)
def test_max_iter():
def ricker_function(resolution, center, width):
x = np.linspace(0, resolution - 1, resolution)
x = ((2 / (np.sqrt(3 * width) * np.pi ** .25))
* (1 - (x - center) ** 2 / width ** 2)
* np.exp(-(x - center) ** 2 / (2 * width ** 2)))
return x
def ricker_matrix(width, resolution, n_components):
centers = np.linspace(0, resolution - 1, n_components)
D = np.empty((n_components, resolution))
for i, center in enumerate(centers):
D[i] = ricker_function(resolution, center, width)
D /= np.sqrt(np.sum(D ** 2, axis=1))[:, np.newaxis]
return D
transform_algorithm = 'lasso_cd'
resolution = 1024
subsampling = 3
n_components = resolution // subsampling
D_multi = np.r_[tuple(ricker_matrix(width=w, resolution=resolution,
n_components=n_components // 5)
for w in (10, 50, 100, 500, 1000))]
X = np.linspace(0, resolution - 1, resolution)
first_quarter = X < resolution / 4
X[first_quarter] = 3.
X[np.logical_not(first_quarter)] = -1.
X = X.reshape(1, -1)
with pytest.warns(ConvergenceWarning):
model = SparseCoder(D_multi, transform_algorithm=transform_algorithm,
transform_max_iter=1)
model.fit_transform(X)
with pytest.warns(None) as record:
model = SparseCoder(D_multi, transform_algorithm=transform_algorithm,
transform_max_iter=2000)
model.fit_transform(X)
assert not record.list
def test_dict_learning_lars_positive_parameter():
n_components = 5
alpha = 1
err_msg = "Positive constraint not supported for 'lars' coding method."
with pytest.raises(ValueError, match=err_msg):
dict_learning(X, n_components, alpha=alpha, positive_code=True)
@pytest.mark.parametrize("transform_algorithm", [
"lasso_lars",
"lasso_cd",
"threshold",
])
@pytest.mark.parametrize("positive_code", [False, True])
@pytest.mark.parametrize("positive_dict", [False, True])
def test_dict_learning_positivity(transform_algorithm,
positive_code,
positive_dict):
n_components = 5
dico = DictionaryLearning(
n_components, transform_algorithm=transform_algorithm, random_state=0,
positive_code=positive_code, positive_dict=positive_dict,
fit_algorithm="cd").fit(X)
code = dico.transform(X)
if positive_dict:
assert (dico.components_ >= 0).all()
else:
assert (dico.components_ < 0).any()
if positive_code:
assert (code >= 0).all()
else:
assert (code < 0).any()
@pytest.mark.parametrize("positive_dict", [False, True])
def test_dict_learning_lars_dict_positivity(positive_dict):
n_components = 5
dico = DictionaryLearning(
n_components, transform_algorithm="lars", random_state=0,
positive_dict=positive_dict, fit_algorithm="cd").fit(X)
if positive_dict:
assert (dico.components_ >= 0).all()
else:
assert (dico.components_ < 0).any()
def test_dict_learning_lars_code_positivity():
n_components = 5
dico = DictionaryLearning(
n_components, transform_algorithm="lars", random_state=0,
positive_code=True, fit_algorithm="cd").fit(X)
err_msg = "Positive constraint not supported for '{}' coding method."
err_msg = err_msg.format("lars")
with pytest.raises(ValueError, match=err_msg):
dico.transform(X)
def test_dict_learning_reconstruction():
n_components = 12
dico = DictionaryLearning(n_components, transform_algorithm='omp',
transform_alpha=0.001, random_state=0)
code = dico.fit(X).transform(X)
assert_array_almost_equal(np.dot(code, dico.components_), X)
dico.set_params(transform_algorithm='lasso_lars')
code = dico.transform(X)
assert_array_almost_equal(np.dot(code, dico.components_), X, decimal=2)
# nonzero atoms is right.
def test_dict_learning_reconstruction_parallel():
# regression test that parallel reconstruction works with n_jobs>1
n_components = 12
dico = DictionaryLearning(n_components, transform_algorithm='omp',
transform_alpha=0.001, random_state=0, n_jobs=4)
code = dico.fit(X).transform(X)
assert_array_almost_equal(np.dot(code, dico.components_), X)
dico.set_params(transform_algorithm='lasso_lars')
code = dico.transform(X)
assert_array_almost_equal(np.dot(code, dico.components_), X, decimal=2)
def test_dict_learning_lassocd_readonly_data():
n_components = 12
with TempMemmap(X) as X_read_only:
dico = DictionaryLearning(n_components, transform_algorithm='lasso_cd',
transform_alpha=0.001, random_state=0,
n_jobs=4)
with ignore_warnings(category=ConvergenceWarning):
code = dico.fit(X_read_only).transform(X_read_only)
assert_array_almost_equal(np.dot(code, dico.components_), X_read_only,
decimal=2)
def test_dict_learning_nonzero_coefs():
n_components = 4
dico = DictionaryLearning(n_components, transform_algorithm='lars',
transform_n_nonzero_coefs=3, random_state=0)
code = dico.fit(X).transform(X[np.newaxis, 1])
assert len(np.flatnonzero(code)) == 3
dico.set_params(transform_algorithm='omp')
code = dico.transform(X[np.newaxis, 1])
assert len(np.flatnonzero(code)) == 3
def test_dict_learning_unknown_fit_algorithm():
n_components = 5
dico = DictionaryLearning(n_components, fit_algorithm='<unknown>')
with pytest.raises(ValueError):
dico.fit(X)
def test_dict_learning_split():
n_components = 5
dico = DictionaryLearning(n_components, transform_algorithm='threshold',
random_state=0)
code = dico.fit(X).transform(X)
dico.split_sign = True
split_code = dico.transform(X)
assert_array_almost_equal(split_code[:, :n_components] -
split_code[:, n_components:], code)
def test_dict_learning_online_shapes():
rng = np.random.RandomState(0)
n_components = 8
code, dictionary = dict_learning_online(X, n_components=n_components,
alpha=1, random_state=rng)
assert code.shape == (n_samples, n_components)
assert dictionary.shape == (n_components, n_features)
assert np.dot(code, dictionary).shape == X.shape
def test_dict_learning_online_lars_positive_parameter():
alpha = 1
err_msg = "Positive constraint not supported for 'lars' coding method."
with pytest.raises(ValueError, match=err_msg):
dict_learning_online(X, alpha=alpha, positive_code=True)
@pytest.mark.parametrize("transform_algorithm", [
"lasso_lars",
"lasso_cd",
"threshold",
])
@pytest.mark.parametrize("positive_code", [False, True])
@pytest.mark.parametrize("positive_dict", [False, True])
def test_minibatch_dictionary_learning_positivity(transform_algorithm,
positive_code,
positive_dict):
n_components = 8
dico = MiniBatchDictionaryLearning(
n_components, transform_algorithm=transform_algorithm, random_state=0,
positive_code=positive_code, positive_dict=positive_dict,
fit_algorithm='cd').fit(X)
code = dico.transform(X)
if positive_dict:
assert (dico.components_ >= 0).all()
else:
assert (dico.components_ < 0).any()
if positive_code:
assert (code >= 0).all()
else:
assert (code < 0).any()
@pytest.mark.parametrize("positive_dict", [False, True])
def test_minibatch_dictionary_learning_lars(positive_dict):
n_components = 8
dico = MiniBatchDictionaryLearning(
n_components, transform_algorithm="lars", random_state=0,
positive_dict=positive_dict, fit_algorithm='cd').fit(X)
if positive_dict:
assert (dico.components_ >= 0).all()
else:
assert (dico.components_ < 0).any()
@pytest.mark.parametrize("positive_code", [False, True])
@pytest.mark.parametrize("positive_dict", [False, True])
def test_dict_learning_online_positivity(positive_code,
positive_dict):
rng = np.random.RandomState(0)
n_components = 8
code, dictionary = dict_learning_online(X, n_components=n_components,
method="cd",
alpha=1, random_state=rng,
positive_dict=positive_dict,
positive_code=positive_code)
if positive_dict:
assert (dictionary >= 0).all()
else:
assert (dictionary < 0).any()
if positive_code:
assert (code >= 0).all()
else:
assert (code < 0).any()
def test_dict_learning_online_verbosity():
n_components = 5
# test verbosity
from io import StringIO
import sys
old_stdout = sys.stdout
try:
sys.stdout = StringIO()
dico = MiniBatchDictionaryLearning(n_components, n_iter=20, verbose=1,
random_state=0)
dico.fit(X)
dico = MiniBatchDictionaryLearning(n_components, n_iter=20, verbose=2,
random_state=0)
dico.fit(X)
dict_learning_online(X, n_components=n_components, alpha=1, verbose=1,
random_state=0)
dict_learning_online(X, n_components=n_components, alpha=1, verbose=2,
random_state=0)
finally:
sys.stdout = old_stdout
assert dico.components_.shape == (n_components, n_features)
def test_dict_learning_online_estimator_shapes():
n_components = 5
dico = MiniBatchDictionaryLearning(n_components, n_iter=20, random_state=0)
dico.fit(X)
assert dico.components_.shape == (n_components, n_features)
def test_dict_learning_online_overcomplete():
n_components = 12
dico = MiniBatchDictionaryLearning(n_components, n_iter=20,
random_state=0).fit(X)
assert dico.components_.shape == (n_components, n_features)
def test_dict_learning_online_initialization():
n_components = 12
rng = np.random.RandomState(0)
V = rng.randn(n_components, n_features)
dico = MiniBatchDictionaryLearning(n_components, n_iter=0,
dict_init=V, random_state=0).fit(X)
assert_array_equal(dico.components_, V)
def test_dict_learning_online_readonly_initialization():
n_components = 12
rng = np.random.RandomState(0)
V = rng.randn(n_components, n_features)
V.setflags(write=False)
MiniBatchDictionaryLearning(n_components, n_iter=1, dict_init=V,
random_state=0, shuffle=False).fit(X)
def test_dict_learning_online_partial_fit():
n_components = 12
rng = np.random.RandomState(0)
V = rng.randn(n_components, n_features) # random init
V /= np.sum(V ** 2, axis=1)[:, np.newaxis]
dict1 = MiniBatchDictionaryLearning(n_components, n_iter=10 * len(X),
batch_size=1,
alpha=1, shuffle=False, dict_init=V,
random_state=0).fit(X)
dict2 = MiniBatchDictionaryLearning(n_components, alpha=1,
n_iter=1, dict_init=V,
random_state=0)
for i in range(10):
for sample in X:
dict2.partial_fit(sample[np.newaxis, :])
assert not np.all(sparse_encode(X, dict1.components_, alpha=1) == 0)
assert_array_almost_equal(dict1.components_, dict2.components_,
decimal=2)
def test_sparse_encode_shapes():
n_components = 12
rng = np.random.RandomState(0)
V = rng.randn(n_components, n_features) # random init
V /= np.sum(V ** 2, axis=1)[:, np.newaxis]
for algo in ('lasso_lars', 'lasso_cd', 'lars', 'omp', 'threshold'):
code = sparse_encode(X, V, algorithm=algo)
assert code.shape == (n_samples, n_components)
@pytest.mark.parametrize("algo", [
'lasso_lars',
'lasso_cd',
'threshold'
])
@pytest.mark.parametrize("positive", [False, True])
def test_sparse_encode_positivity(algo, positive):
n_components = 12
rng = np.random.RandomState(0)
V = rng.randn(n_components, n_features) # random init
V /= np.sum(V ** 2, axis=1)[:, np.newaxis]
code = sparse_encode(X, V, algorithm=algo, positive=positive)
if positive:
assert (code >= 0).all()
else:
assert (code < 0).any()
@pytest.mark.parametrize("algo", ['lars', 'omp'])
def test_sparse_encode_unavailable_positivity(algo):
n_components = 12
rng = np.random.RandomState(0)
V = rng.randn(n_components, n_features) # random init
V /= np.sum(V ** 2, axis=1)[:, np.newaxis]
err_msg = "Positive constraint not supported for '{}' coding method."
err_msg = err_msg.format(algo)
with pytest.raises(ValueError, match=err_msg):
sparse_encode(X, V, algorithm=algo, positive=True)
def test_sparse_encode_input():
n_components = 100
rng = np.random.RandomState(0)
V = rng.randn(n_components, n_features) # random init
V /= np.sum(V ** 2, axis=1)[:, np.newaxis]
Xf = check_array(X, order='F')
for algo in ('lasso_lars', 'lasso_cd', 'lars', 'omp', 'threshold'):
a = sparse_encode(X, V, algorithm=algo)
b = sparse_encode(Xf, V, algorithm=algo)
assert_array_almost_equal(a, b)
def test_sparse_encode_error():
n_components = 12
rng = np.random.RandomState(0)
V = rng.randn(n_components, n_features) # random init
V /= np.sum(V ** 2, axis=1)[:, np.newaxis]
code = sparse_encode(X, V, alpha=0.001)
assert not np.all(code == 0)
assert np.sqrt(np.sum((np.dot(code, V) - X) ** 2)) < 0.1
def test_sparse_encode_error_default_sparsity():
rng = np.random.RandomState(0)
X = rng.randn(100, 64)
D = rng.randn(2, 64)
code = ignore_warnings(sparse_encode)(X, D, algorithm='omp',
n_nonzero_coefs=None)
assert code.shape == (100, 2)
def test_unknown_method():
n_components = 12
rng = np.random.RandomState(0)
V = rng.randn(n_components, n_features) # random init
with pytest.raises(ValueError):
sparse_encode(X, V, algorithm="<unknown>")
def test_sparse_coder_estimator():
n_components = 12
rng = np.random.RandomState(0)
V = rng.randn(n_components, n_features) # random init
V /= np.sum(V ** 2, axis=1)[:, np.newaxis]
code = SparseCoder(dictionary=V, transform_algorithm='lasso_lars',
transform_alpha=0.001).transform(X)
assert not np.all(code == 0)
assert np.sqrt(np.sum((np.dot(code, V) - X) ** 2)) < 0.1
def test_sparse_coder_parallel_mmap():
# Non-regression test for:
# https://github.com/scikit-learn/scikit-learn/issues/5956
# Test that SparseCoder does not error by passing reading only
# arrays to child processes
rng = np.random.RandomState(777)
n_components, n_features = 40, 64
init_dict = rng.rand(n_components, n_features)
# Ensure that `data` is >2M. Joblib memory maps arrays
# if they are larger than 1MB. The 4 accounts for float32
# data type
n_samples = int(2e6) // (4 * n_features)
data = np.random.rand(n_samples, n_features).astype(np.float32)
sc = SparseCoder(init_dict, transform_algorithm='omp', n_jobs=2)
sc.fit_transform(data)
def test_sparse_coder_n_features_in():
d = np.array([[1, 2, 3], [1, 2, 3]])
sc = SparseCoder(d)
assert sc.n_features_in_ == d.shape[1]
| true | true |
f733ca883ab59016f210e4e497aed74a462493ca | 3,486 | py | Python | tools/generate_taint_models/tests/get_graphql_sources_test.py | MedRedha/pyre-check | 1e1aaceb1bfd98de5fabe67d3839e20e5ed0cd31 | [
"MIT"
] | 1 | 2019-12-31T01:08:13.000Z | 2019-12-31T01:08:13.000Z | tools/generate_taint_models/tests/get_graphql_sources_test.py | MedRedha/pyre-check | 1e1aaceb1bfd98de5fabe67d3839e20e5ed0cd31 | [
"MIT"
] | null | null | null | tools/generate_taint_models/tests/get_graphql_sources_test.py | MedRedha/pyre-check | 1e1aaceb1bfd98de5fabe67d3839e20e5ed0cd31 | [
"MIT"
] | null | null | null | # Copyright (c) 2016-present, Facebook, Inc.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# pyre-unsafe
import os # noqa
import unittest
from typing import Callable
from unittest.mock import patch
from graphql.type import (
GraphQLBoolean,
GraphQLField,
GraphQLID,
GraphQLNonNull,
GraphQLObjectType,
)
from graphql.type.definition import GraphQLType
from tools.pyre.tools.generate_taint_models import get_graphql_sources
from tools.pyre.tools.generate_taint_models.get_graphql_sources import (
GraphQLSourceGenerator,
)
from .test_functions import __name__ as qualifier, all_functions
class GetGraphQLSourcesTest(unittest.TestCase):
@patch.object(get_graphql_sources, "Configuration")
def test_gather_functions_to_model(self, configuration) -> None:
configuration.graphql_module = "tools.pyre.tools.generate_taint_models.tests"
configuration.graphql_object_type = GraphQLObjectType
functions = GraphQLSourceGenerator().gather_functions_to_model()
self.assertSetEqual(set(functions), {function_1, function_2})
# Run the same test again, passing in a list for 'graphql_module', to
# ensure both work
configuration.graphql_module = ["tools.pyre.tools.generate_taint_models.tests"]
configuration.graphql_object_type = GraphQLObjectType
functions = GraphQLSourceGenerator().gather_functions_to_model()
self.assertSetEqual(set(functions), {function_1, function_2})
def test_compute_models(self) -> None:
source = "TaintSource[UserControlled]"
sink = "TaintSink[ReturnedToUser]"
self.assertEqual(
[*map(str, GraphQLSourceGenerator().compute_models(all_functions))],
[
f"def {qualifier}.TestClass.methodA(self, x) -> {sink}: ...",
f"def {qualifier}.TestClass.methodB(self, *args: {source}) -> {sink}: ...",
f"def {qualifier}.testA() -> {sink}: ...",
f"def {qualifier}.testB(x) -> {sink}: ...",
f"def {qualifier}.testC(x) -> {sink}: ...",
f"def {qualifier}.testD(x, *args: {source}) -> {sink}: ...",
f"def {qualifier}.testE(x, **kwargs: {source}) -> {sink}: ...",
],
)
# Defined for testing purposes (see 'test_gather_functions_to_model')
# These functions are not used otherwise.
def function_1() -> None:
pass
def function_2() -> None:
pass
# Create an object directly at the top level of the file so that
# 'test_gather_functions_to_model' can verify that we correctly identify the
# resolver
DirectObjectType = GraphQLObjectType(
name="DirectObjectType",
description="GraphQLObject directly created at top level",
fields={
"no_resolver": GraphQLField(GraphQLNonNull(GraphQLID)),
"resolver": GraphQLField(GraphQLBoolean, resolver=function_1),
"lambda_resolver": GraphQLField(GraphQLBoolean, resolver=lambda x: x),
},
)
def add_field(type: GraphQLType, name: str, resolver: Callable) -> None:
# pyre-ignore[16]: Undefined attribute
type._fields[name] = GraphQLField(GraphQLNonNull(GraphQLID), resolver=resolver)
# Indirectly add in an additional resolver, so that
# 'test_gather_functions_to_model' can verify that that resolver is detected
IndirectObjectType = add_field(
type=DirectObjectType, name="indirect", resolver=function_2
)
| 35.938144 | 91 | 0.697361 |
import os
import unittest
from typing import Callable
from unittest.mock import patch
from graphql.type import (
GraphQLBoolean,
GraphQLField,
GraphQLID,
GraphQLNonNull,
GraphQLObjectType,
)
from graphql.type.definition import GraphQLType
from tools.pyre.tools.generate_taint_models import get_graphql_sources
from tools.pyre.tools.generate_taint_models.get_graphql_sources import (
GraphQLSourceGenerator,
)
from .test_functions import __name__ as qualifier, all_functions
class GetGraphQLSourcesTest(unittest.TestCase):
@patch.object(get_graphql_sources, "Configuration")
def test_gather_functions_to_model(self, configuration) -> None:
configuration.graphql_module = "tools.pyre.tools.generate_taint_models.tests"
configuration.graphql_object_type = GraphQLObjectType
functions = GraphQLSourceGenerator().gather_functions_to_model()
self.assertSetEqual(set(functions), {function_1, function_2})
configuration.graphql_module = ["tools.pyre.tools.generate_taint_models.tests"]
configuration.graphql_object_type = GraphQLObjectType
functions = GraphQLSourceGenerator().gather_functions_to_model()
self.assertSetEqual(set(functions), {function_1, function_2})
def test_compute_models(self) -> None:
source = "TaintSource[UserControlled]"
sink = "TaintSink[ReturnedToUser]"
self.assertEqual(
[*map(str, GraphQLSourceGenerator().compute_models(all_functions))],
[
f"def {qualifier}.TestClass.methodA(self, x) -> {sink}: ...",
f"def {qualifier}.TestClass.methodB(self, *args: {source}) -> {sink}: ...",
f"def {qualifier}.testA() -> {sink}: ...",
f"def {qualifier}.testB(x) -> {sink}: ...",
f"def {qualifier}.testC(x) -> {sink}: ...",
f"def {qualifier}.testD(x, *args: {source}) -> {sink}: ...",
f"def {qualifier}.testE(x, **kwargs: {source}) -> {sink}: ...",
],
)
def function_1() -> None:
pass
def function_2() -> None:
pass
DirectObjectType = GraphQLObjectType(
name="DirectObjectType",
description="GraphQLObject directly created at top level",
fields={
"no_resolver": GraphQLField(GraphQLNonNull(GraphQLID)),
"resolver": GraphQLField(GraphQLBoolean, resolver=function_1),
"lambda_resolver": GraphQLField(GraphQLBoolean, resolver=lambda x: x),
},
)
def add_field(type: GraphQLType, name: str, resolver: Callable) -> None:
type._fields[name] = GraphQLField(GraphQLNonNull(GraphQLID), resolver=resolver)
IndirectObjectType = add_field(
type=DirectObjectType, name="indirect", resolver=function_2
)
| true | true |
f733cc1d46074ff7e7976f96027b8df60d1a9565 | 1,177 | py | Python | application/routes/leads/views.py | dejbug/full-stack-python-test-1 | c5256e24d33ef5f8e1cc9dc9330507c15421f944 | [
"MIT"
] | null | null | null | application/routes/leads/views.py | dejbug/full-stack-python-test-1 | c5256e24d33ef5f8e1cc9dc9330507c15421f944 | [
"MIT"
] | null | null | null | application/routes/leads/views.py | dejbug/full-stack-python-test-1 | c5256e24d33ef5f8e1cc9dc9330507c15421f944 | [
"MIT"
] | null | null | null | from flask import Blueprint, render_template, redirect, url_for, flash, jsonify
from sqlalchemy import exc
from application import db
from application.routes.leads.models import Lead
from application.routes.leads.forms import AddLeadForm
leads = Blueprint("leads", __name__)
@leads.route("/")
def index():
return render_template("leads_index.html", leads=Lead.query.all())
@leads.route("/add", methods=['GET', 'POST'])
def add():
form = AddLeadForm()
if form.validate_on_submit():
print(form)
item = Lead(**form.to_dict())
db.session.add(item)
try:
db.session.commit()
except exc.IntegrityError as e:
flash("Lead already exists for this email.")
print(e)
except exc.SQLAlchemyError as e:
flash("An unknown error occurred while adding Lead.")
print(e)
else:
return redirect(url_for("leads.index"))
elif form.errors:
flash(form.errors)
return render_template("leads_add.html", form=form)
@leads.route("/json/names")
def json_names():
names = tuple("#%d - %s <%s> {%s}" % (int(lead.id), lead.name, lead.email, lead.company) for lead in Lead.query.all())
return jsonify(names)
| 24.020408 | 120 | 0.682243 | from flask import Blueprint, render_template, redirect, url_for, flash, jsonify
from sqlalchemy import exc
from application import db
from application.routes.leads.models import Lead
from application.routes.leads.forms import AddLeadForm
leads = Blueprint("leads", __name__)
@leads.route("/")
def index():
return render_template("leads_index.html", leads=Lead.query.all())
@leads.route("/add", methods=['GET', 'POST'])
def add():
form = AddLeadForm()
if form.validate_on_submit():
print(form)
item = Lead(**form.to_dict())
db.session.add(item)
try:
db.session.commit()
except exc.IntegrityError as e:
flash("Lead already exists for this email.")
print(e)
except exc.SQLAlchemyError as e:
flash("An unknown error occurred while adding Lead.")
print(e)
else:
return redirect(url_for("leads.index"))
elif form.errors:
flash(form.errors)
return render_template("leads_add.html", form=form)
@leads.route("/json/names")
def json_names():
names = tuple("#%d - %s <%s> {%s}" % (int(lead.id), lead.name, lead.email, lead.company) for lead in Lead.query.all())
return jsonify(names)
| true | true |
f733cc585e3bd598315b5ed42c06657f598db3ad | 13,520 | py | Python | struct2tensor/expression_impl/map_prensor.py | jay90099/struct2tensor | 47d651757efa27586bf75f991b2174d8173a750b | [
"Apache-2.0"
] | 30 | 2019-10-07T21:31:44.000Z | 2022-03-30T17:11:44.000Z | struct2tensor/expression_impl/map_prensor.py | jay90099/struct2tensor | 47d651757efa27586bf75f991b2174d8173a750b | [
"Apache-2.0"
] | 2 | 2020-03-23T20:48:14.000Z | 2021-04-16T15:05:33.000Z | struct2tensor/expression_impl/map_prensor.py | jay90099/struct2tensor | 47d651757efa27586bf75f991b2174d8173a750b | [
"Apache-2.0"
] | 30 | 2019-07-16T13:01:53.000Z | 2022-03-01T22:04:36.000Z | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Arbitrary operations from sparse and ragged tensors to a leaf field.
There are two public methods of note right now: map_sparse_tensor
and map_ragged_tensor.
Assume expr is:
```
session: {
event: {
val_a: 10
val_b: 1
}
event: {
val_a: 20
val_b: 2
}
event: {
}
event: {
val_a: 40
}
event: {
val_b: 5
}
}
```
Either of the following alternatives will add val_a and val_b
to create val_sum.
map_sparse_tensor converts val_a and val_b to sparse tensors,
and then add them to produce val_sum.
```
new_root = map_prensor.map_sparse_tensor(
expr,
path.Path(["event"]),
[path.Path(["val_a"]), path.Path(["val_b"])],
lambda x,y: x + y,
False,
tf.int32,
"val_sum")
```
map_ragged_tensor converts val_a and val_b to ragged tensors,
and then add them to produce val_sum.
```
new_root = map_prensor.map_ragged_tensor(
expr,
path.Path(["event"]),
[path.Path(["val_a"]), path.Path(["val_b"])],
lambda x,y: x + y,
False,
tf.int32,
"val_sum")
```
The result of either is:
```
session: {
event: {
val_a: 10
val_b: 1
val_sum: 11
}
event: {
val_a: 20
val_b: 2
val_sum: 22
}
event: {
}
event: {
val_a: 40
val_sum: 40
}
event: {
val_b: 5
val_sum: 5
}
}
```
"""
from typing import Callable, FrozenSet, Optional, Sequence, Tuple
from struct2tensor import calculate_options
from struct2tensor import expression
from struct2tensor import expression_add
from struct2tensor import path
from struct2tensor import prensor
from struct2tensor.expression_impl import project
import tensorflow as tf
def map_sparse_tensor(root: expression.Expression, root_path: path.Path,
paths: Sequence[path.Path],
operation: Callable[..., tf.SparseTensor],
is_repeated: bool, dtype: tf.DType,
new_field_name: path.Step) -> expression.Expression:
"""Maps a sparse tensor.
Args:
root: the root of the expression.
root_path: the path relative to which the sparse tensors are calculated.
paths: the input paths relative to the root_path
operation: a method that takes the list of sparse tensors as input and
returns a sparse tensor.
is_repeated: true if the result of operation is repeated.
dtype: dtype of the result of the operation.
new_field_name: root_path.get_child(new_field_name) is the path of the
result.
Returns:
A new root expression containing the old root expression plus the new path,
root_path.get_child(new_field_name), with the result of the operation.
"""
return _map_sparse_tensor_impl(root, root_path, paths, operation, is_repeated,
dtype, new_field_name)[0]
def map_ragged_tensor(root: expression.Expression, root_path: path.Path,
paths: Sequence[path.Path],
operation: Callable[..., tf.RaggedTensor],
is_repeated: bool, dtype: tf.DType,
new_field_name: path.Step) -> expression.Expression:
"""Map a ragged tensor.
Args:
root: the root of the expression.
root_path: the path relative to which the ragged tensors are calculated.
paths: the input paths relative to the root_path
operation: a method that takes the list of ragged tensors as input and
returns a ragged tensor.
is_repeated: true if the result of operation is repeated.
dtype: dtype of the result of the operation.
new_field_name: root_path.get_child(new_field_name) is the path of the
result.
Returns:
A new root expression containing the old root expression plus the new path,
root_path.get_child(new_field_name), with the result of the operation.
"""
return _map_ragged_tensor_impl(root, root_path, paths, operation, is_repeated,
dtype, new_field_name)[0]
class _MapPrensorExpression(expression.Expression):
"""Maps the values of the given expression.
It maps the value of a sub-tree (i.e. a Prensor) to a single prensor
LeafNodeTensor. Therefore its sources are all the (known) descendants of
`origin`: it usually should follow a project(...) to make known descendants
clear.
_MapPrensorExpression is intended to be a child of the origin. See
map_prensor_impl for example usage.
"""
def __init__(self, origin: expression.Expression,
operation: Callable[[prensor.Prensor, calculate_options
.Options], prensor.LeafNodeTensor],
is_repeated: bool, dtype: tf.DType):
super().__init__(is_repeated, dtype)
self._origin = origin
self._operation = operation
def _get_source_paths(self) -> Sequence[path.Path]:
"""Returns the source paths in a deterministic order."""
result = [k for k in self._origin.get_known_descendants().keys()]
result.sort()
return result
def get_source_expressions(self) -> Sequence[expression.Expression]:
subtree = self._origin.get_known_descendants()
source_paths = self._get_source_paths()
return [subtree[k] for k in source_paths]
def calculate(
self,
sources: Sequence[prensor.NodeTensor],
destinations: Sequence[expression.Expression],
options: calculate_options.Options,
side_info: Optional[prensor.Prensor] = None) -> prensor.LeafNodeTensor:
source_tree = prensor.create_prensor_from_descendant_nodes(
{k: v for k, v in zip(self._get_source_paths(), sources)})
return self._operation(source_tree, options)
def calculation_is_identity(self) -> bool:
return False
def calculation_equal(self, expr: expression.Expression) -> bool:
return self is expr
def _get_child_impl(self,
field_name: path.Step) -> Optional[expression.Expression]:
return None
def known_field_names(self) -> FrozenSet[path.Step]:
return frozenset()
def _as_leaf_node_no_checks(sparse_tensor: tf.SparseTensor,
is_repeated: bool) -> prensor.LeafNodeTensor:
"""Take a SparseTensor and create a LeafNodeTensor, no checks."""
if is_repeated:
parent_index = tf.transpose(sparse_tensor.indices)[0]
else:
parent_index = tf.reshape(sparse_tensor.indices, [-1])
return prensor.LeafNodeTensor(parent_index, sparse_tensor.values, is_repeated)
def _as_leaf_node_with_checks(sparse_tensor: tf.SparseTensor, is_repeated: bool,
required_batch_size: tf.Tensor
) -> prensor.LeafNodeTensor:
"""Take a SparseTensor and create a LeafNodeTensor, with checks."""
assertions = [
tf.assert_equal(sparse_tensor.dense_shape[0], required_batch_size)
]
if is_repeated:
assertions.append(tf.assert_equal(tf.shape(sparse_tensor.indices)[1], 2))
else:
assertions.append(tf.assert_equal(tf.shape(sparse_tensor.indices)[1], 1))
with tf.control_dependencies(assertions):
# TODO(b/72947444): Check that the resulting tensor is canonical, that the
# indices are in lexicographical order, and that the indices fit in the
# shape. Moreover, maybe we should check if it is repeated that it is a
# "ragged array".
return _as_leaf_node_no_checks(sparse_tensor, is_repeated)
def _as_leaf_node(sparse_tensor: tf.SparseTensor, is_repeated: bool,
required_batch_size: tf.Tensor,
options: calculate_options.Options) -> prensor.LeafNodeTensor:
if options.sparse_checks:
return _as_leaf_node_with_checks(sparse_tensor, is_repeated,
required_batch_size)
else:
return _as_leaf_node_no_checks(sparse_tensor, is_repeated)
def _map_prensor_impl(
root: expression.Expression, root_path: path.Path,
paths_needed: Sequence[path.Path],
operation: Callable[[prensor.Prensor, calculate_options.Options], prensor
.LeafNodeTensor], is_repeated: bool, dtype: tf.DType,
new_field_name: path.Step) -> Tuple[expression.Expression, path.Path]:
"""Map prensor implementation."""
child_expr = root.get_descendant_or_error(root_path)
sibling_child_expr = project.project(child_expr, paths_needed)
new_field_expr = _MapPrensorExpression(sibling_child_expr, operation,
is_repeated, dtype)
new_path = root_path.get_child(new_field_name)
return expression_add.add_paths(root, {new_path: new_field_expr}), new_path
def _map_sparse_tensor_impl(root: expression.Expression, root_path: path.Path,
paths: Sequence[path.Path],
operation: Callable[..., tf.SparseTensor],
is_repeated: bool, dtype: tf.DType,
new_field_name: path.Step
) -> Tuple[expression.Expression, path.Path]:
"""Helper method for map_sparse_tensor."""
def new_op(pren: prensor.Prensor,
options: calculate_options.Options) -> prensor.LeafNodeTensor:
"""Op for mapping prensor using the operation."""
sparse_tensor_map = pren.get_sparse_tensors(options)
sparse_tensors = [sparse_tensor_map[p] for p in paths]
result_as_tensor = operation(*sparse_tensors)
result = _as_leaf_node(result_as_tensor, is_repeated,
sparse_tensors[0].dense_shape[0], options)
if result.values.dtype != dtype:
raise ValueError("Type unmatched: actual ({})!= expected ({})".format(
str(result.values.dtype), str(dtype)))
return result
return _map_prensor_impl(root, root_path, paths, new_op, is_repeated, dtype,
new_field_name)
def _ragged_as_leaf_node(ragged_tensor: tf.RaggedTensor, is_repeated: bool,
reference_ragged_tensor: tf.RaggedTensor,
options: calculate_options.Options
) -> prensor.LeafNodeTensor:
"""Creates a ragged tensor as a leaf node."""
assertions = []
size_dim = tf.compat.dimension_at_index(ragged_tensor.shape, 0).value
reference_size_dim = tf.compat.dimension_at_index(
reference_ragged_tensor.shape, 0).value
if (size_dim is not None and reference_size_dim is not None):
if size_dim != reference_size_dim:
raise ValueError("Returned ragged tensor is not the right size.")
elif options.ragged_checks:
assertions.append(
tf.assert_equal(ragged_tensor.nrows(), reference_ragged_tensor.nrows()))
if not is_repeated:
rowids = ragged_tensor.value_rowids()
if options.ragged_checks:
assertions.append(tf.compat.v1.assert_positive(rowids[1:] - rowids[:-1]))
if assertions:
with tf.control_dependencies(assertions):
parent_index = ragged_tensor.value_rowids()
return prensor.LeafNodeTensor(parent_index, ragged_tensor.values,
is_repeated)
else:
parent_index = ragged_tensor.value_rowids()
return prensor.LeafNodeTensor(parent_index, ragged_tensor.values,
is_repeated)
def _map_ragged_tensor_impl(root: expression.Expression, root_path: path.Path,
paths: Sequence[path.Path],
operation: Callable[..., tf.RaggedTensor],
is_repeated: bool, dtype: tf.DType,
new_field_name: path.Step
) -> Tuple[expression.Expression, path.Path]:
"""Maps a ragged tensor.
Args:
root: the root of the expression.
root_path: the path relative to which the ragged tensors are calculated.
paths: the input paths relative to the root_path
operation: a method that takes the list of ragged tensors as input and
returns a ragged tensor.
is_repeated: true if the result of operation is repeated.
dtype: dtype of the result of the operation.
new_field_name: root_path.get_child(new_field_name) is the path of the
result.
Returns:
An expression/path pair (expr,p) with a new root expression containing
the old root expression plus the new path,
root_path.get_child(new_field_name), with the result of the operation.
"""
def new_op(tree: prensor.Prensor,
options: calculate_options.Options) -> prensor.LeafNodeTensor:
"""Apply operation to tree."""
ragged_tensor_map = tree.get_ragged_tensors(options)
ragged_tensors = [ragged_tensor_map[p] for p in paths]
result_as_tensor = operation(*ragged_tensors)
result = _ragged_as_leaf_node(result_as_tensor, is_repeated,
ragged_tensors[0], options)
if result.values.dtype != dtype:
raise ValueError("Type unmatched: actual ({})!= expected ({})".format(
str(result.values.dtype), str(dtype)))
return result
return _map_prensor_impl(root, root_path, paths, new_op, is_repeated, dtype,
new_field_name)
| 36.246649 | 80 | 0.678624 |
from typing import Callable, FrozenSet, Optional, Sequence, Tuple
from struct2tensor import calculate_options
from struct2tensor import expression
from struct2tensor import expression_add
from struct2tensor import path
from struct2tensor import prensor
from struct2tensor.expression_impl import project
import tensorflow as tf
def map_sparse_tensor(root: expression.Expression, root_path: path.Path,
paths: Sequence[path.Path],
operation: Callable[..., tf.SparseTensor],
is_repeated: bool, dtype: tf.DType,
new_field_name: path.Step) -> expression.Expression:
return _map_sparse_tensor_impl(root, root_path, paths, operation, is_repeated,
dtype, new_field_name)[0]
def map_ragged_tensor(root: expression.Expression, root_path: path.Path,
paths: Sequence[path.Path],
operation: Callable[..., tf.RaggedTensor],
is_repeated: bool, dtype: tf.DType,
new_field_name: path.Step) -> expression.Expression:
return _map_ragged_tensor_impl(root, root_path, paths, operation, is_repeated,
dtype, new_field_name)[0]
class _MapPrensorExpression(expression.Expression):
def __init__(self, origin: expression.Expression,
operation: Callable[[prensor.Prensor, calculate_options
.Options], prensor.LeafNodeTensor],
is_repeated: bool, dtype: tf.DType):
super().__init__(is_repeated, dtype)
self._origin = origin
self._operation = operation
def _get_source_paths(self) -> Sequence[path.Path]:
result = [k for k in self._origin.get_known_descendants().keys()]
result.sort()
return result
def get_source_expressions(self) -> Sequence[expression.Expression]:
subtree = self._origin.get_known_descendants()
source_paths = self._get_source_paths()
return [subtree[k] for k in source_paths]
def calculate(
self,
sources: Sequence[prensor.NodeTensor],
destinations: Sequence[expression.Expression],
options: calculate_options.Options,
side_info: Optional[prensor.Prensor] = None) -> prensor.LeafNodeTensor:
source_tree = prensor.create_prensor_from_descendant_nodes(
{k: v for k, v in zip(self._get_source_paths(), sources)})
return self._operation(source_tree, options)
def calculation_is_identity(self) -> bool:
return False
def calculation_equal(self, expr: expression.Expression) -> bool:
return self is expr
def _get_child_impl(self,
field_name: path.Step) -> Optional[expression.Expression]:
return None
def known_field_names(self) -> FrozenSet[path.Step]:
return frozenset()
def _as_leaf_node_no_checks(sparse_tensor: tf.SparseTensor,
is_repeated: bool) -> prensor.LeafNodeTensor:
if is_repeated:
parent_index = tf.transpose(sparse_tensor.indices)[0]
else:
parent_index = tf.reshape(sparse_tensor.indices, [-1])
return prensor.LeafNodeTensor(parent_index, sparse_tensor.values, is_repeated)
def _as_leaf_node_with_checks(sparse_tensor: tf.SparseTensor, is_repeated: bool,
required_batch_size: tf.Tensor
) -> prensor.LeafNodeTensor:
assertions = [
tf.assert_equal(sparse_tensor.dense_shape[0], required_batch_size)
]
if is_repeated:
assertions.append(tf.assert_equal(tf.shape(sparse_tensor.indices)[1], 2))
else:
assertions.append(tf.assert_equal(tf.shape(sparse_tensor.indices)[1], 1))
with tf.control_dependencies(assertions):
return _as_leaf_node_no_checks(sparse_tensor, is_repeated)
def _as_leaf_node(sparse_tensor: tf.SparseTensor, is_repeated: bool,
required_batch_size: tf.Tensor,
options: calculate_options.Options) -> prensor.LeafNodeTensor:
if options.sparse_checks:
return _as_leaf_node_with_checks(sparse_tensor, is_repeated,
required_batch_size)
else:
return _as_leaf_node_no_checks(sparse_tensor, is_repeated)
def _map_prensor_impl(
root: expression.Expression, root_path: path.Path,
paths_needed: Sequence[path.Path],
operation: Callable[[prensor.Prensor, calculate_options.Options], prensor
.LeafNodeTensor], is_repeated: bool, dtype: tf.DType,
new_field_name: path.Step) -> Tuple[expression.Expression, path.Path]:
child_expr = root.get_descendant_or_error(root_path)
sibling_child_expr = project.project(child_expr, paths_needed)
new_field_expr = _MapPrensorExpression(sibling_child_expr, operation,
is_repeated, dtype)
new_path = root_path.get_child(new_field_name)
return expression_add.add_paths(root, {new_path: new_field_expr}), new_path
def _map_sparse_tensor_impl(root: expression.Expression, root_path: path.Path,
paths: Sequence[path.Path],
operation: Callable[..., tf.SparseTensor],
is_repeated: bool, dtype: tf.DType,
new_field_name: path.Step
) -> Tuple[expression.Expression, path.Path]:
def new_op(pren: prensor.Prensor,
options: calculate_options.Options) -> prensor.LeafNodeTensor:
sparse_tensor_map = pren.get_sparse_tensors(options)
sparse_tensors = [sparse_tensor_map[p] for p in paths]
result_as_tensor = operation(*sparse_tensors)
result = _as_leaf_node(result_as_tensor, is_repeated,
sparse_tensors[0].dense_shape[0], options)
if result.values.dtype != dtype:
raise ValueError("Type unmatched: actual ({})!= expected ({})".format(
str(result.values.dtype), str(dtype)))
return result
return _map_prensor_impl(root, root_path, paths, new_op, is_repeated, dtype,
new_field_name)
def _ragged_as_leaf_node(ragged_tensor: tf.RaggedTensor, is_repeated: bool,
reference_ragged_tensor: tf.RaggedTensor,
options: calculate_options.Options
) -> prensor.LeafNodeTensor:
assertions = []
size_dim = tf.compat.dimension_at_index(ragged_tensor.shape, 0).value
reference_size_dim = tf.compat.dimension_at_index(
reference_ragged_tensor.shape, 0).value
if (size_dim is not None and reference_size_dim is not None):
if size_dim != reference_size_dim:
raise ValueError("Returned ragged tensor is not the right size.")
elif options.ragged_checks:
assertions.append(
tf.assert_equal(ragged_tensor.nrows(), reference_ragged_tensor.nrows()))
if not is_repeated:
rowids = ragged_tensor.value_rowids()
if options.ragged_checks:
assertions.append(tf.compat.v1.assert_positive(rowids[1:] - rowids[:-1]))
if assertions:
with tf.control_dependencies(assertions):
parent_index = ragged_tensor.value_rowids()
return prensor.LeafNodeTensor(parent_index, ragged_tensor.values,
is_repeated)
else:
parent_index = ragged_tensor.value_rowids()
return prensor.LeafNodeTensor(parent_index, ragged_tensor.values,
is_repeated)
def _map_ragged_tensor_impl(root: expression.Expression, root_path: path.Path,
paths: Sequence[path.Path],
operation: Callable[..., tf.RaggedTensor],
is_repeated: bool, dtype: tf.DType,
new_field_name: path.Step
) -> Tuple[expression.Expression, path.Path]:
def new_op(tree: prensor.Prensor,
options: calculate_options.Options) -> prensor.LeafNodeTensor:
ragged_tensor_map = tree.get_ragged_tensors(options)
ragged_tensors = [ragged_tensor_map[p] for p in paths]
result_as_tensor = operation(*ragged_tensors)
result = _ragged_as_leaf_node(result_as_tensor, is_repeated,
ragged_tensors[0], options)
if result.values.dtype != dtype:
raise ValueError("Type unmatched: actual ({})!= expected ({})".format(
str(result.values.dtype), str(dtype)))
return result
return _map_prensor_impl(root, root_path, paths, new_op, is_repeated, dtype,
new_field_name)
| true | true |
f733cd5cd7b26eff24b7f0922e58c6c60946ddc8 | 3,642 | py | Python | scripts/bitcoin/mlp_bitcoin_wf.py | Judene/fts_predictors | de018526579d7bce9f6619cf18fa2a634f29bede | [
"MIT"
] | null | null | null | scripts/bitcoin/mlp_bitcoin_wf.py | Judene/fts_predictors | de018526579d7bce9f6619cf18fa2a634f29bede | [
"MIT"
] | null | null | null | scripts/bitcoin/mlp_bitcoin_wf.py | Judene/fts_predictors | de018526579d7bce9f6619cf18fa2a634f29bede | [
"MIT"
] | null | null | null | import os
import pathlib
import pandas as pd
import matplotlib.pyplot as plt
from src.models.walk_forward_predictor import WalkForwardPredictor
from src.models.mlp import MultiLayerPerceptron
from src.utils import series_to_supervised
# TODO: Add description! Mention datasources
# Get data path or create a directory if it does not exist
# TODO: This is hacky. Need to fix
pathlib.Path(os.path.join(os.path.dirname(os.getcwd()), "..", "data")).mkdir(parents=True, exist_ok=True)
data_path = os.path.join(os.path.dirname(os.getcwd()), "..", "data")
# Check if file exists
if not os.path.exists(os.path.join(data_path, "bitcoin.csv")):
raise ValueError("No data in data folder!")
# Get bitcoin data
bitcoin_data = pd.read_csv(os.path.join(data_path, "bitcoin.csv"), index_col=0)
bitcoin_data = bitcoin_data.to_frame().ffill().dropna()
dates = bitcoin_data
n_features = 2
bitcoin_data = series_to_supervised(bitcoin_data, n_in=n_features, n_out=1)
input_data = bitcoin_data.drop(['var1(t)'], axis=1)
output_data = bitcoin_data.drop(['var1(t-2)', 'var1(t-1)'], axis=1)
# Create MLP model
mlp_model = MultiLayerPerceptron(
name="mlp_bitcoin_wf",
num_inputs=n_features,
num_outputs=1,
# If true, training info is outputted to stdout
keras_verbose=False,
# A summary of the NN is printed to stdout
print_model_summary=True,
# ff_layers = [units, activation, regularization, dropout, use_bias]
ff_layers=[
[512, "relu", 0.0, 0.2, True, "gaussian"],
[512, "relu", 0.0, 0.2, True, "gaussian"],
[512, "relu", 0.0, 0.2, True, "gaussian"]
],
# The final output layer's activation function
final_activation="tanh",
# The objective function for the NN
objective="mse",
# The maximum number of epochs to run
epochs=5,
# The batch size to use in the NN
batch_size=64,
# The learning rate used in optimization
learning_rate=0.001,
# If this many stagnant epochs are seen, stop training
stopping_patience=50
)
# Initiate our model
wf_model = WalkForwardPredictor(model=mlp_model, start_date="2004-11-08", end_date="2021-06-01",
input_pct_change=1, output_pct_change=1, window_size=252, frequency=7,
prediction_length=10, validation_size=21, sliding_window=True,
random_validation=False, train_from_scratch=False)
# Train our model through time, and obtain the predictions and errors
mlp_predictions, mlp_error = wf_model.train_and_predict(input_data, output_data)
print("MLP Walk Forward")
print(mlp_predictions)
print(mlp_error)
# sav_dates = pd.DataFrame(mlp_error)
# sav_dates = sav_dates.reset_index()
#
# saved = pd.read_csv(r'C:/Users/ELNA SIMONIS/Documents/Results/TESTING.csv')
# saved = saved.drop(['Unnamed: 0'], axis=1)
#
# saved['Dates'] = sav_dates['Date']
# saved = saved.set_index('Dates')
# saved['error'] = saved['TRUE'] - saved['PRED']
# saved = saved.dropna()
#
# # Calculate RMSE
# from sklearn.metrics import mean_squared_error, mean_absolute_error
# from math import sqrt
#
# mse = mean_squared_error(saved['TRUE'], saved['PRED'])
# rmse = sqrt(mean_squared_error(saved['TRUE'], saved['PRED']))
# mae = mean_absolute_error(saved['TRUE'], saved['PRED'])
#
# # Create a plot of our errors through time
#
# plt.figure(figsize=(10, 5))
# figuur = saved['error'] ** 2.0
# figuur.plot(color='blue')
# plt.xlabel('Dates', fontsize=15, fontweight='bold', color='black')
# plt.ylabel('Error', fontsize=15, fontweight='bold', color='black')
# plt.yticks(fontsize=10)
# plt.xticks(fontsize=10)
# plt.show()
# plt.close()
| 33.722222 | 105 | 0.699341 | import os
import pathlib
import pandas as pd
import matplotlib.pyplot as plt
from src.models.walk_forward_predictor import WalkForwardPredictor
from src.models.mlp import MultiLayerPerceptron
from src.utils import series_to_supervised
pathlib.Path(os.path.join(os.path.dirname(os.getcwd()), "..", "data")).mkdir(parents=True, exist_ok=True)
data_path = os.path.join(os.path.dirname(os.getcwd()), "..", "data")
if not os.path.exists(os.path.join(data_path, "bitcoin.csv")):
raise ValueError("No data in data folder!")
bitcoin_data = pd.read_csv(os.path.join(data_path, "bitcoin.csv"), index_col=0)
bitcoin_data = bitcoin_data.to_frame().ffill().dropna()
dates = bitcoin_data
n_features = 2
bitcoin_data = series_to_supervised(bitcoin_data, n_in=n_features, n_out=1)
input_data = bitcoin_data.drop(['var1(t)'], axis=1)
output_data = bitcoin_data.drop(['var1(t-2)', 'var1(t-1)'], axis=1)
mlp_model = MultiLayerPerceptron(
name="mlp_bitcoin_wf",
num_inputs=n_features,
num_outputs=1,
keras_verbose=False,
print_model_summary=True,
ff_layers=[
[512, "relu", 0.0, 0.2, True, "gaussian"],
[512, "relu", 0.0, 0.2, True, "gaussian"],
[512, "relu", 0.0, 0.2, True, "gaussian"]
],
final_activation="tanh",
# The objective function for the NN
objective="mse",
# The maximum number of epochs to run
epochs=5,
# The batch size to use in the NN
batch_size=64,
# The learning rate used in optimization
learning_rate=0.001,
# If this many stagnant epochs are seen, stop training
stopping_patience=50
)
# Initiate our model
wf_model = WalkForwardPredictor(model=mlp_model, start_date="2004-11-08", end_date="2021-06-01",
input_pct_change=1, output_pct_change=1, window_size=252, frequency=7,
prediction_length=10, validation_size=21, sliding_window=True,
random_validation=False, train_from_scratch=False)
# Train our model through time, and obtain the predictions and errors
mlp_predictions, mlp_error = wf_model.train_and_predict(input_data, output_data)
print("MLP Walk Forward")
print(mlp_predictions)
print(mlp_error)
# sav_dates = pd.DataFrame(mlp_error)
# sav_dates = sav_dates.reset_index()
#
# saved = pd.read_csv(r'C:/Users/ELNA SIMONIS/Documents/Results/TESTING.csv')
# saved = saved.drop(['Unnamed: 0'], axis=1)
#
# saved['Dates'] = sav_dates['Date']
# saved = saved.set_index('Dates')
# saved['error'] = saved['TRUE'] - saved['PRED']
# saved = saved.dropna()
#
# # Calculate RMSE
# from sklearn.metrics import mean_squared_error, mean_absolute_error
# from math import sqrt
#
# mse = mean_squared_error(saved['TRUE'], saved['PRED'])
# rmse = sqrt(mean_squared_error(saved['TRUE'], saved['PRED']))
# mae = mean_absolute_error(saved['TRUE'], saved['PRED'])
#
# # Create a plot of our errors through time
#
# plt.figure(figsize=(10, 5))
# figuur = saved['error'] ** 2.0
# figuur.plot(color='blue')
# plt.xlabel('Dates', fontsize=15, fontweight='bold', color='black')
# plt.ylabel('Error', fontsize=15, fontweight='bold', color='black')
# plt.yticks(fontsize=10)
# plt.xticks(fontsize=10)
# plt.show()
# plt.close()
| true | true |
f733ce30454b45b5468fa20788bec4378abc6679 | 20,309 | py | Python | test/merge_sharding.py | paralin/vitess | 7b048c5442679ce6cf48773cf17a184c1ce91295 | [
"Apache-2.0"
] | 8 | 2017-08-14T15:19:04.000Z | 2021-06-07T10:36:52.000Z | test/merge_sharding.py | paralin/vitess | 7b048c5442679ce6cf48773cf17a184c1ce91295 | [
"Apache-2.0"
] | 19 | 2020-09-25T15:41:41.000Z | 2022-03-25T23:06:54.000Z | test/merge_sharding.py | paralin/vitess | 7b048c5442679ce6cf48773cf17a184c1ce91295 | [
"Apache-2.0"
] | 7 | 2021-03-07T03:24:39.000Z | 2022-02-16T06:46:10.000Z | #!/usr/bin/env python
#
# Copyright 2019 The Vitess Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This test covers the workflow for a sharding merge.
We start with 3 shards: -40, 40-80, and 80-. We then merge -40 and 40-80
into -80.
Note this test is just testing the full workflow, not corner cases or error
cases. These are mostly done by the other resharding tests.
"""
import logging
import unittest
from vtdb import keyrange_constants
import base_sharding
import environment
import tablet
import utils
# initial shards
# shard -40
shard_0_master = tablet.Tablet()
shard_0_replica = tablet.Tablet()
shard_0_rdonly = tablet.Tablet()
# shard 40-80
shard_1_master = tablet.Tablet()
shard_1_replica = tablet.Tablet()
shard_1_rdonly = tablet.Tablet()
# shard 80-
shard_2_master = tablet.Tablet()
shard_2_replica = tablet.Tablet()
shard_2_rdonly = tablet.Tablet()
# merged shard -80
shard_dest_master = tablet.Tablet()
shard_dest_replica = tablet.Tablet()
shard_dest_rdonly = tablet.Tablet()
all_tablets = [shard_0_master, shard_0_replica, shard_0_rdonly,
shard_1_master, shard_1_replica, shard_1_rdonly,
shard_2_master, shard_2_replica, shard_2_rdonly,
shard_dest_master, shard_dest_replica, shard_dest_rdonly]
def setUpModule():
try:
environment.topo_server().setup()
setup_procs = [t.init_mysql() for t in all_tablets]
utils.Vtctld().start()
utils.wait_procs(setup_procs)
except:
tearDownModule()
raise
def tearDownModule():
utils.required_teardown()
if utils.options.skip_teardown:
return
teardown_procs = [t.teardown_mysql() for t in all_tablets]
utils.wait_procs(teardown_procs, raise_on_error=False)
environment.topo_server().teardown()
utils.kill_sub_processes()
utils.remove_tmp_files()
for t in all_tablets:
t.remove_tree()
class TestMergeSharding(unittest.TestCase, base_sharding.BaseShardingTest):
# create_schema will create the same schema on the keyspace
# then insert some values
def _create_schema(self):
if base_sharding.keyspace_id_type == keyrange_constants.KIT_BYTES:
t = 'varbinary(64)'
else:
t = 'bigint(20) unsigned'
# Note that the primary key columns are not defined first on purpose to test
# that a reordered column list is correctly used everywhere in vtworker.
create_table_template = '''create table %s(
msg varchar(64),
custom_ksid_col ''' + t + ''' not null,
id bigint not null,
parent_id bigint not null,
primary key (parent_id, id),
index by_msg (msg)
) Engine=InnoDB'''
create_view_template = (
'create view %s'
'(id, msg, custom_ksid_col) as select id, msg, custom_ksid_col '
'from %s')
utils.run_vtctl(['ApplySchema',
'-sql=' + create_table_template % ('resharding1'),
'test_keyspace'],
auto_log=True)
utils.run_vtctl(['ApplySchema',
'-sql=' + create_table_template % ('resharding2'),
'test_keyspace'],
auto_log=True)
utils.run_vtctl(['ApplySchema',
'-sql=' + create_view_template % ('view1', 'resharding1'),
'test_keyspace'],
auto_log=True)
def _insert_startup_values(self):
# row covered by shard -40 (should be merged).
self._insert_value(shard_0_master, 'resharding1', 1, 'msg1',
0x1000000000000000)
# row covered by shard 40-80 (should be merged).
self._insert_value(shard_1_master, 'resharding1', 2, 'msg2',
0x5000000000000000)
# row covered by shard 80- (must not be merged).
self._insert_value(shard_2_master, 'resharding1', 3, 'msg3',
0xD000000000000000)
def _check_startup_values(self):
# check first two values are in the right shard
self._check_value(shard_dest_master, 'resharding1', 1, 'msg1',
0x1000000000000000)
self._check_value(shard_dest_replica, 'resharding1', 1, 'msg1',
0x1000000000000000)
self._check_value(shard_dest_rdonly, 'resharding1', 1, 'msg1',
0x1000000000000000)
self._check_value(shard_dest_master, 'resharding1', 2, 'msg2',
0x5000000000000000)
self._check_value(shard_dest_replica, 'resharding1', 2, 'msg2',
0x5000000000000000)
self._check_value(shard_dest_rdonly, 'resharding1', 2, 'msg2',
0x5000000000000000)
def _insert_lots(self, count, base=0):
if count > 10000:
self.assertFail('bad count passed in, only support up to 10000')
for i in xrange(count):
self._insert_value(shard_0_master, 'resharding1', 1000000 + base + i,
'msg-range0-%d' % i, 0x2000000000000000 + base + i)
self._insert_value(shard_1_master, 'resharding1', 1010000 + base + i,
'msg-range1-%d' % i, 0x6000000000000000 + base + i)
# _check_lots returns how many of the values we have, in percents.
def _check_lots(self, count, base=0):
found = 0
for i in xrange(count):
if self._is_value_present_and_correct(shard_dest_replica, 'resharding1',
1000000 + base + i,
'msg-range0-%d' % i,
0x2000000000000000 + base + i):
found += 1
if self._is_value_present_and_correct(shard_dest_replica, 'resharding1',
1010000 + base + i,
'msg-range1-%d' % i,
0x6000000000000000 + base + i):
found += 1
percent = found * 100 / count / 2
logging.debug('I have %d%% of the data', percent)
return percent
def _check_lots_timeout(self, count, threshold, timeout, base=0):
while True:
value = self._check_lots(count, base=base)
if value >= threshold:
return value
timeout = utils.wait_step('waiting for %d%% of the data' % threshold,
timeout, sleep_time=1)
def test_merge_sharding(self):
utils.run_vtctl(['CreateKeyspace',
'--sharding_column_name', 'custom_ksid_col',
'--sharding_column_type', base_sharding.keyspace_id_type,
'test_keyspace'])
shard_0_master.init_tablet('replica', 'test_keyspace', '-40')
shard_0_replica.init_tablet('replica', 'test_keyspace', '-40')
shard_0_rdonly.init_tablet('rdonly', 'test_keyspace', '-40')
shard_1_master.init_tablet('replica', 'test_keyspace', '40-80')
shard_1_replica.init_tablet('replica', 'test_keyspace', '40-80')
shard_1_rdonly.init_tablet('rdonly', 'test_keyspace', '40-80')
shard_2_master.init_tablet('replica', 'test_keyspace', '80-')
shard_2_replica.init_tablet('replica', 'test_keyspace', '80-')
shard_2_rdonly.init_tablet('rdonly', 'test_keyspace', '80-')
# rebuild and check SrvKeyspace
utils.run_vtctl(['RebuildKeyspaceGraph', 'test_keyspace'], auto_log=True)
ks = utils.run_vtctl_json(['GetSrvKeyspace', 'test_nj', 'test_keyspace'])
self.assertEqual(ks['sharding_column_name'], 'custom_ksid_col')
# create databases so vttablet can start behaving normally
for t in [shard_0_master, shard_0_replica, shard_0_rdonly,
shard_1_master, shard_1_replica, shard_1_rdonly,
shard_2_master, shard_2_replica, shard_2_rdonly]:
t.create_db('vt_test_keyspace')
t.start_vttablet(wait_for_state=None,
binlog_use_v3_resharding_mode=False)
# won't be serving, no replication state
for t in [shard_0_master, shard_0_replica, shard_0_rdonly,
shard_1_master, shard_1_replica, shard_1_rdonly,
shard_2_master, shard_2_replica, shard_2_rdonly]:
t.wait_for_vttablet_state('NOT_SERVING')
# reparent to make the tablets work
utils.run_vtctl(['InitShardMaster', '-force', 'test_keyspace/-40',
shard_0_master.tablet_alias], auto_log=True)
utils.run_vtctl(['InitShardMaster', '-force', 'test_keyspace/40-80',
shard_1_master.tablet_alias], auto_log=True)
utils.run_vtctl(['InitShardMaster', '-force', 'test_keyspace/80-',
shard_2_master.tablet_alias], auto_log=True)
# create the tables
self._create_schema()
self._insert_startup_values()
# run a health check on source replicas so they respond to discovery
# (for binlog players) and on the source rdonlys (for workers)
for t in [shard_0_replica, shard_1_replica]:
utils.run_vtctl(['RunHealthCheck', t.tablet_alias])
for t in [shard_0_rdonly, shard_1_rdonly]:
utils.run_vtctl(['RunHealthCheck', t.tablet_alias])
# create the merge shards
shard_dest_master.init_tablet('replica', 'test_keyspace', '-80')
shard_dest_replica.init_tablet('replica', 'test_keyspace', '-80')
shard_dest_rdonly.init_tablet('rdonly', 'test_keyspace', '-80')
# start vttablet on the destination shard (no db created,
# so they're all not serving)
for t in [shard_dest_master, shard_dest_replica, shard_dest_rdonly]:
t.start_vttablet(wait_for_state=None,
binlog_use_v3_resharding_mode=False)
for t in [shard_dest_master, shard_dest_replica, shard_dest_rdonly]:
t.wait_for_vttablet_state('NOT_SERVING')
utils.run_vtctl(['InitShardMaster', '-force', 'test_keyspace/-80',
shard_dest_master.tablet_alias], auto_log=True)
utils.run_vtctl(['RebuildKeyspaceGraph', 'test_keyspace'],
auto_log=True)
utils.check_srv_keyspace(
'test_nj', 'test_keyspace',
'Partitions(master): -40 40-80 80-\n'
'Partitions(rdonly): -40 40-80 80-\n'
'Partitions(replica): -40 40-80 80-\n',
keyspace_id_type=base_sharding.keyspace_id_type,
sharding_column_name='custom_ksid_col')
# copy the schema
utils.run_vtctl(['CopySchemaShard', shard_0_rdonly.tablet_alias,
'test_keyspace/-80'], auto_log=True)
# copy the data (will also start filtered replication), reset source
# Run vtworker as daemon for the following SplitClone commands.
worker_proc, worker_port, worker_rpc_port = utils.run_vtworker_bg(
['--cell', 'test_nj', '--command_display_interval', '10ms',
'--use_v3_resharding_mode=false'],
auto_log=True)
# Initial clone (online).
workerclient_proc = utils.run_vtworker_client_bg(
['SplitClone',
'--offline=false',
'--chunk_count', '10',
'--min_rows_per_chunk', '1',
'--min_healthy_rdonly_tablets', '1',
'test_keyspace/-80'],
worker_rpc_port)
utils.wait_procs([workerclient_proc])
self.verify_reconciliation_counters(worker_port, 'Online', 'resharding1',
2, 0, 0, 0)
# Reset vtworker such that we can run the next command.
workerclient_proc = utils.run_vtworker_client_bg(['Reset'], worker_rpc_port)
utils.wait_procs([workerclient_proc])
# Modify the destination shard. SplitClone will revert the changes.
# Delete row 1 (provokes an insert).
shard_dest_master.mquery('vt_test_keyspace',
'delete from resharding1 where id=1', write=True)
# Update row 2 (provokes an update).
shard_dest_master.mquery(
'vt_test_keyspace', "update resharding1 set msg='msg-not-2' where id=2",
write=True)
# Insert row 0 (provokes a delete).
self._insert_value(shard_dest_master, 'resharding1', 0, 'msg0',
0x5000000000000000)
workerclient_proc = utils.run_vtworker_client_bg(
['SplitClone',
'--chunk_count', '10',
'--min_rows_per_chunk', '1',
'--min_healthy_rdonly_tablets', '1',
'test_keyspace/-80'],
worker_rpc_port)
utils.wait_procs([workerclient_proc])
# Change tablets, which were taken offline, back to rdonly.
utils.run_vtctl(['ChangeSlaveType', shard_0_rdonly.tablet_alias,
'rdonly'], auto_log=True)
utils.run_vtctl(['ChangeSlaveType', shard_1_rdonly.tablet_alias,
'rdonly'], auto_log=True)
self.verify_reconciliation_counters(worker_port, 'Online', 'resharding1',
1, 1, 1, 0)
self.verify_reconciliation_counters(worker_port, 'Offline', 'resharding1',
0, 0, 0, 2)
# Terminate worker daemon because it is no longer needed.
utils.kill_sub_process(worker_proc, soft=True)
# check the startup values are in the right place
self._check_startup_values()
# check the schema too
utils.run_vtctl(['ValidateSchemaKeyspace', 'test_keyspace'], auto_log=True)
# check binlog player variables
self.check_destination_master(shard_dest_master,
['test_keyspace/-40', 'test_keyspace/40-80'])
# check that binlog server exported the stats vars
self.check_binlog_server_vars(shard_0_replica, horizontal=True)
self.check_binlog_server_vars(shard_1_replica, horizontal=True)
# testing filtered replication: insert a bunch of data on shard 0 and 1,
# check we get most of it after a few seconds, wait for binlog server
# timeout, check we get all of it.
logging.debug('Inserting lots of data on source shards')
self._insert_lots(1000)
logging.debug('Checking 80 percent of data is sent quickly')
v = self._check_lots_timeout(1000, 80, 10)
if v != 100:
# small optimization: only do this check if we don't have all the data
# already anyway.
logging.debug('Checking all data goes through eventually')
self._check_lots_timeout(1000, 100, 30)
self.check_binlog_player_vars(shard_dest_master,
['test_keyspace/-40', 'test_keyspace/40-80'],
seconds_behind_master_max=30)
self.check_binlog_server_vars(shard_0_replica, horizontal=True,
min_statements=1000, min_transactions=1000)
self.check_binlog_server_vars(shard_1_replica, horizontal=True,
min_statements=1000, min_transactions=1000)
# use vtworker to compare the data (after health-checking the destination
# rdonly tablets so discovery works)
utils.run_vtctl(['RunHealthCheck', shard_dest_rdonly.tablet_alias])
logging.debug('Running vtworker SplitDiff on first half')
utils.run_vtworker(['-cell', 'test_nj',
'--use_v3_resharding_mode=false',
'SplitDiff',
'--exclude_tables', 'unrelated',
'--min_healthy_rdonly_tablets', '1',
'--source_uid', '1',
'test_keyspace/-80'],
auto_log=True)
utils.run_vtctl(['ChangeSlaveType', shard_0_rdonly.tablet_alias, 'rdonly'],
auto_log=True)
utils.run_vtctl(['ChangeSlaveType', shard_dest_rdonly.tablet_alias,
'rdonly'], auto_log=True)
logging.debug('Running vtworker SplitDiff on second half')
utils.run_vtworker(['-cell', 'test_nj',
'--use_v3_resharding_mode=false',
'SplitDiff',
'--exclude_tables', 'unrelated',
'--min_healthy_rdonly_tablets', '1',
'--source_uid', '2',
'test_keyspace/-80'],
auto_log=True)
utils.run_vtctl(['ChangeSlaveType', shard_1_rdonly.tablet_alias, 'rdonly'],
auto_log=True)
utils.run_vtctl(['ChangeSlaveType', shard_dest_rdonly.tablet_alias,
'rdonly'], auto_log=True)
# get status for the destination master tablet, make sure we have it all
self.check_running_binlog_player(shard_dest_master, 3000, 1000)
# check destination master query service is not running
utils.check_tablet_query_service(self, shard_dest_master, False, False)
stream_health = utils.run_vtctl_json(['VtTabletStreamHealth',
'-count', '1',
shard_dest_master.tablet_alias])
logging.debug('Got health: %s', str(stream_health))
self.assertIn('realtime_stats', stream_health)
self.assertNotIn('serving', stream_health)
# check the destination master 3 is healthy, even though its query
# service is not running (if not healthy this would exception out)
shard_dest_master.get_healthz()
# now serve rdonly from the split shards
utils.run_vtctl(['MigrateServedTypes', 'test_keyspace/-80', 'rdonly'],
auto_log=True)
utils.check_srv_keyspace('test_nj', 'test_keyspace',
'Partitions(master): -40 40-80 80-\n'
'Partitions(rdonly): -80 80-\n'
'Partitions(replica): -40 40-80 80-\n',
keyspace_id_type=base_sharding.keyspace_id_type,
sharding_column_name='custom_ksid_col')
# now serve replica from the split shards
utils.run_vtctl(['MigrateServedTypes', 'test_keyspace/-80', 'replica'],
auto_log=True)
utils.check_srv_keyspace('test_nj', 'test_keyspace',
'Partitions(master): -40 40-80 80-\n'
'Partitions(rdonly): -80 80-\n'
'Partitions(replica): -80 80-\n',
keyspace_id_type=base_sharding.keyspace_id_type,
sharding_column_name='custom_ksid_col')
# now serve master from the split shards
utils.run_vtctl(['MigrateServedTypes', 'test_keyspace/-80', 'master'],
auto_log=True)
utils.check_srv_keyspace('test_nj', 'test_keyspace',
'Partitions(master): -80 80-\n'
'Partitions(rdonly): -80 80-\n'
'Partitions(replica): -80 80-\n',
keyspace_id_type=base_sharding.keyspace_id_type,
sharding_column_name='custom_ksid_col')
utils.check_tablet_query_service(self, shard_0_master, False, True)
utils.check_tablet_query_service(self, shard_1_master, False, True)
# check the binlog players are gone now
self.check_no_binlog_player(shard_dest_master)
# kill the original tablets in the original shards
tablet.kill_tablets([shard_0_master, shard_0_replica, shard_0_rdonly,
shard_1_master, shard_1_replica, shard_1_rdonly])
for t in [shard_0_replica, shard_0_rdonly,
shard_1_replica, shard_1_rdonly]:
utils.run_vtctl(['DeleteTablet', t.tablet_alias], auto_log=True)
for t in [shard_0_master, shard_1_master]:
utils.run_vtctl(['DeleteTablet', '-allow_master', t.tablet_alias],
auto_log=True)
# delete the original shards
utils.run_vtctl(['DeleteShard', 'test_keyspace/-40'], auto_log=True)
utils.run_vtctl(['DeleteShard', 'test_keyspace/40-80'], auto_log=True)
# rebuild the serving graph, all mentions of the old shards should be gone
utils.run_vtctl(['RebuildKeyspaceGraph', 'test_keyspace'], auto_log=True)
# kill everything else
tablet.kill_tablets([shard_2_master, shard_2_replica, shard_2_rdonly,
shard_dest_master, shard_dest_replica,
shard_dest_rdonly])
if __name__ == '__main__':
utils.main()
| 43.581545 | 80 | 0.639766 |
import logging
import unittest
from vtdb import keyrange_constants
import base_sharding
import environment
import tablet
import utils
shard_0_master = tablet.Tablet()
shard_0_replica = tablet.Tablet()
shard_0_rdonly = tablet.Tablet()
shard_1_master = tablet.Tablet()
shard_1_replica = tablet.Tablet()
shard_1_rdonly = tablet.Tablet()
shard_2_master = tablet.Tablet()
shard_2_replica = tablet.Tablet()
shard_2_rdonly = tablet.Tablet()
shard_dest_master = tablet.Tablet()
shard_dest_replica = tablet.Tablet()
shard_dest_rdonly = tablet.Tablet()
all_tablets = [shard_0_master, shard_0_replica, shard_0_rdonly,
shard_1_master, shard_1_replica, shard_1_rdonly,
shard_2_master, shard_2_replica, shard_2_rdonly,
shard_dest_master, shard_dest_replica, shard_dest_rdonly]
def setUpModule():
try:
environment.topo_server().setup()
setup_procs = [t.init_mysql() for t in all_tablets]
utils.Vtctld().start()
utils.wait_procs(setup_procs)
except:
tearDownModule()
raise
def tearDownModule():
utils.required_teardown()
if utils.options.skip_teardown:
return
teardown_procs = [t.teardown_mysql() for t in all_tablets]
utils.wait_procs(teardown_procs, raise_on_error=False)
environment.topo_server().teardown()
utils.kill_sub_processes()
utils.remove_tmp_files()
for t in all_tablets:
t.remove_tree()
class TestMergeSharding(unittest.TestCase, base_sharding.BaseShardingTest):
def _create_schema(self):
if base_sharding.keyspace_id_type == keyrange_constants.KIT_BYTES:
t = 'varbinary(64)'
else:
t = 'bigint(20) unsigned'
create_table_template = '''create table %s(
msg varchar(64),
custom_ksid_col ''' + t + ''' not null,
id bigint not null,
parent_id bigint not null,
primary key (parent_id, id),
index by_msg (msg)
) Engine=InnoDB'''
create_view_template = (
'create view %s'
'(id, msg, custom_ksid_col) as select id, msg, custom_ksid_col '
'from %s')
utils.run_vtctl(['ApplySchema',
'-sql=' + create_table_template % ('resharding1'),
'test_keyspace'],
auto_log=True)
utils.run_vtctl(['ApplySchema',
'-sql=' + create_table_template % ('resharding2'),
'test_keyspace'],
auto_log=True)
utils.run_vtctl(['ApplySchema',
'-sql=' + create_view_template % ('view1', 'resharding1'),
'test_keyspace'],
auto_log=True)
def _insert_startup_values(self):
self._insert_value(shard_0_master, 'resharding1', 1, 'msg1',
0x1000000000000000)
self._insert_value(shard_1_master, 'resharding1', 2, 'msg2',
0x5000000000000000)
self._insert_value(shard_2_master, 'resharding1', 3, 'msg3',
0xD000000000000000)
def _check_startup_values(self):
self._check_value(shard_dest_master, 'resharding1', 1, 'msg1',
0x1000000000000000)
self._check_value(shard_dest_replica, 'resharding1', 1, 'msg1',
0x1000000000000000)
self._check_value(shard_dest_rdonly, 'resharding1', 1, 'msg1',
0x1000000000000000)
self._check_value(shard_dest_master, 'resharding1', 2, 'msg2',
0x5000000000000000)
self._check_value(shard_dest_replica, 'resharding1', 2, 'msg2',
0x5000000000000000)
self._check_value(shard_dest_rdonly, 'resharding1', 2, 'msg2',
0x5000000000000000)
def _insert_lots(self, count, base=0):
if count > 10000:
self.assertFail('bad count passed in, only support up to 10000')
for i in xrange(count):
self._insert_value(shard_0_master, 'resharding1', 1000000 + base + i,
'msg-range0-%d' % i, 0x2000000000000000 + base + i)
self._insert_value(shard_1_master, 'resharding1', 1010000 + base + i,
'msg-range1-%d' % i, 0x6000000000000000 + base + i)
def _check_lots(self, count, base=0):
found = 0
for i in xrange(count):
if self._is_value_present_and_correct(shard_dest_replica, 'resharding1',
1000000 + base + i,
'msg-range0-%d' % i,
0x2000000000000000 + base + i):
found += 1
if self._is_value_present_and_correct(shard_dest_replica, 'resharding1',
1010000 + base + i,
'msg-range1-%d' % i,
0x6000000000000000 + base + i):
found += 1
percent = found * 100 / count / 2
logging.debug('I have %d%% of the data', percent)
return percent
def _check_lots_timeout(self, count, threshold, timeout, base=0):
while True:
value = self._check_lots(count, base=base)
if value >= threshold:
return value
timeout = utils.wait_step('waiting for %d%% of the data' % threshold,
timeout, sleep_time=1)
def test_merge_sharding(self):
utils.run_vtctl(['CreateKeyspace',
'--sharding_column_name', 'custom_ksid_col',
'--sharding_column_type', base_sharding.keyspace_id_type,
'test_keyspace'])
shard_0_master.init_tablet('replica', 'test_keyspace', '-40')
shard_0_replica.init_tablet('replica', 'test_keyspace', '-40')
shard_0_rdonly.init_tablet('rdonly', 'test_keyspace', '-40')
shard_1_master.init_tablet('replica', 'test_keyspace', '40-80')
shard_1_replica.init_tablet('replica', 'test_keyspace', '40-80')
shard_1_rdonly.init_tablet('rdonly', 'test_keyspace', '40-80')
shard_2_master.init_tablet('replica', 'test_keyspace', '80-')
shard_2_replica.init_tablet('replica', 'test_keyspace', '80-')
shard_2_rdonly.init_tablet('rdonly', 'test_keyspace', '80-')
utils.run_vtctl(['RebuildKeyspaceGraph', 'test_keyspace'], auto_log=True)
ks = utils.run_vtctl_json(['GetSrvKeyspace', 'test_nj', 'test_keyspace'])
self.assertEqual(ks['sharding_column_name'], 'custom_ksid_col')
for t in [shard_0_master, shard_0_replica, shard_0_rdonly,
shard_1_master, shard_1_replica, shard_1_rdonly,
shard_2_master, shard_2_replica, shard_2_rdonly]:
t.create_db('vt_test_keyspace')
t.start_vttablet(wait_for_state=None,
binlog_use_v3_resharding_mode=False)
for t in [shard_0_master, shard_0_replica, shard_0_rdonly,
shard_1_master, shard_1_replica, shard_1_rdonly,
shard_2_master, shard_2_replica, shard_2_rdonly]:
t.wait_for_vttablet_state('NOT_SERVING')
# reparent to make the tablets work
utils.run_vtctl(['InitShardMaster', '-force', 'test_keyspace/-40',
shard_0_master.tablet_alias], auto_log=True)
utils.run_vtctl(['InitShardMaster', '-force', 'test_keyspace/40-80',
shard_1_master.tablet_alias], auto_log=True)
utils.run_vtctl(['InitShardMaster', '-force', 'test_keyspace/80-',
shard_2_master.tablet_alias], auto_log=True)
# create the tables
self._create_schema()
self._insert_startup_values()
# run a health check on source replicas so they respond to discovery
# (for binlog players) and on the source rdonlys (for workers)
for t in [shard_0_replica, shard_1_replica]:
utils.run_vtctl(['RunHealthCheck', t.tablet_alias])
for t in [shard_0_rdonly, shard_1_rdonly]:
utils.run_vtctl(['RunHealthCheck', t.tablet_alias])
# create the merge shards
shard_dest_master.init_tablet('replica', 'test_keyspace', '-80')
shard_dest_replica.init_tablet('replica', 'test_keyspace', '-80')
shard_dest_rdonly.init_tablet('rdonly', 'test_keyspace', '-80')
# start vttablet on the destination shard (no db created,
# so they're all not serving)
for t in [shard_dest_master, shard_dest_replica, shard_dest_rdonly]:
t.start_vttablet(wait_for_state=None,
binlog_use_v3_resharding_mode=False)
for t in [shard_dest_master, shard_dest_replica, shard_dest_rdonly]:
t.wait_for_vttablet_state('NOT_SERVING')
utils.run_vtctl(['InitShardMaster', '-force', 'test_keyspace/-80',
shard_dest_master.tablet_alias], auto_log=True)
utils.run_vtctl(['RebuildKeyspaceGraph', 'test_keyspace'],
auto_log=True)
utils.check_srv_keyspace(
'test_nj', 'test_keyspace',
'Partitions(master): -40 40-80 80-\n'
'Partitions(rdonly): -40 40-80 80-\n'
'Partitions(replica): -40 40-80 80-\n',
keyspace_id_type=base_sharding.keyspace_id_type,
sharding_column_name='custom_ksid_col')
utils.run_vtctl(['CopySchemaShard', shard_0_rdonly.tablet_alias,
'test_keyspace/-80'], auto_log=True)
worker_proc, worker_port, worker_rpc_port = utils.run_vtworker_bg(
['--cell', 'test_nj', '--command_display_interval', '10ms',
'--use_v3_resharding_mode=false'],
auto_log=True)
workerclient_proc = utils.run_vtworker_client_bg(
['SplitClone',
'--offline=false',
'--chunk_count', '10',
'--min_rows_per_chunk', '1',
'--min_healthy_rdonly_tablets', '1',
'test_keyspace/-80'],
worker_rpc_port)
utils.wait_procs([workerclient_proc])
self.verify_reconciliation_counters(worker_port, 'Online', 'resharding1',
2, 0, 0, 0)
workerclient_proc = utils.run_vtworker_client_bg(['Reset'], worker_rpc_port)
utils.wait_procs([workerclient_proc])
shard_dest_master.mquery('vt_test_keyspace',
'delete from resharding1 where id=1', write=True)
shard_dest_master.mquery(
'vt_test_keyspace', "update resharding1 set msg='msg-not-2' where id=2",
write=True)
self._insert_value(shard_dest_master, 'resharding1', 0, 'msg0',
0x5000000000000000)
workerclient_proc = utils.run_vtworker_client_bg(
['SplitClone',
'--chunk_count', '10',
'--min_rows_per_chunk', '1',
'--min_healthy_rdonly_tablets', '1',
'test_keyspace/-80'],
worker_rpc_port)
utils.wait_procs([workerclient_proc])
utils.run_vtctl(['ChangeSlaveType', shard_0_rdonly.tablet_alias,
'rdonly'], auto_log=True)
utils.run_vtctl(['ChangeSlaveType', shard_1_rdonly.tablet_alias,
'rdonly'], auto_log=True)
self.verify_reconciliation_counters(worker_port, 'Online', 'resharding1',
1, 1, 1, 0)
self.verify_reconciliation_counters(worker_port, 'Offline', 'resharding1',
0, 0, 0, 2)
utils.kill_sub_process(worker_proc, soft=True)
self._check_startup_values()
utils.run_vtctl(['ValidateSchemaKeyspace', 'test_keyspace'], auto_log=True)
self.check_destination_master(shard_dest_master,
['test_keyspace/-40', 'test_keyspace/40-80'])
self.check_binlog_server_vars(shard_0_replica, horizontal=True)
self.check_binlog_server_vars(shard_1_replica, horizontal=True)
logging.debug('Inserting lots of data on source shards')
self._insert_lots(1000)
logging.debug('Checking 80 percent of data is sent quickly')
v = self._check_lots_timeout(1000, 80, 10)
if v != 100:
# already anyway.
logging.debug('Checking all data goes through eventually')
self._check_lots_timeout(1000, 100, 30)
self.check_binlog_player_vars(shard_dest_master,
['test_keyspace/-40', 'test_keyspace/40-80'],
seconds_behind_master_max=30)
self.check_binlog_server_vars(shard_0_replica, horizontal=True,
min_statements=1000, min_transactions=1000)
self.check_binlog_server_vars(shard_1_replica, horizontal=True,
min_statements=1000, min_transactions=1000)
# use vtworker to compare the data (after health-checking the destination
# rdonly tablets so discovery works)
utils.run_vtctl(['RunHealthCheck', shard_dest_rdonly.tablet_alias])
logging.debug('Running vtworker SplitDiff on first half')
utils.run_vtworker(['-cell', 'test_nj',
'--use_v3_resharding_mode=false',
'SplitDiff',
'--exclude_tables', 'unrelated',
'--min_healthy_rdonly_tablets', '1',
'--source_uid', '1',
'test_keyspace/-80'],
auto_log=True)
utils.run_vtctl(['ChangeSlaveType', shard_0_rdonly.tablet_alias, 'rdonly'],
auto_log=True)
utils.run_vtctl(['ChangeSlaveType', shard_dest_rdonly.tablet_alias,
'rdonly'], auto_log=True)
logging.debug('Running vtworker SplitDiff on second half')
utils.run_vtworker(['-cell', 'test_nj',
'--use_v3_resharding_mode=false',
'SplitDiff',
'--exclude_tables', 'unrelated',
'--min_healthy_rdonly_tablets', '1',
'--source_uid', '2',
'test_keyspace/-80'],
auto_log=True)
utils.run_vtctl(['ChangeSlaveType', shard_1_rdonly.tablet_alias, 'rdonly'],
auto_log=True)
utils.run_vtctl(['ChangeSlaveType', shard_dest_rdonly.tablet_alias,
'rdonly'], auto_log=True)
# get status for the destination master tablet, make sure we have it all
self.check_running_binlog_player(shard_dest_master, 3000, 1000)
# check destination master query service is not running
utils.check_tablet_query_service(self, shard_dest_master, False, False)
stream_health = utils.run_vtctl_json(['VtTabletStreamHealth',
'-count', '1',
shard_dest_master.tablet_alias])
logging.debug('Got health: %s', str(stream_health))
self.assertIn('realtime_stats', stream_health)
self.assertNotIn('serving', stream_health)
# check the destination master 3 is healthy, even though its query
# service is not running (if not healthy this would exception out)
shard_dest_master.get_healthz()
# now serve rdonly from the split shards
utils.run_vtctl(['MigrateServedTypes', 'test_keyspace/-80', 'rdonly'],
auto_log=True)
utils.check_srv_keyspace('test_nj', 'test_keyspace',
'Partitions(master): -40 40-80 80-\n'
'Partitions(rdonly): -80 80-\n'
'Partitions(replica): -40 40-80 80-\n',
keyspace_id_type=base_sharding.keyspace_id_type,
sharding_column_name='custom_ksid_col')
# now serve replica from the split shards
utils.run_vtctl(['MigrateServedTypes', 'test_keyspace/-80', 'replica'],
auto_log=True)
utils.check_srv_keyspace('test_nj', 'test_keyspace',
'Partitions(master): -40 40-80 80-\n'
'Partitions(rdonly): -80 80-\n'
'Partitions(replica): -80 80-\n',
keyspace_id_type=base_sharding.keyspace_id_type,
sharding_column_name='custom_ksid_col')
# now serve master from the split shards
utils.run_vtctl(['MigrateServedTypes', 'test_keyspace/-80', 'master'],
auto_log=True)
utils.check_srv_keyspace('test_nj', 'test_keyspace',
'Partitions(master): -80 80-\n'
'Partitions(rdonly): -80 80-\n'
'Partitions(replica): -80 80-\n',
keyspace_id_type=base_sharding.keyspace_id_type,
sharding_column_name='custom_ksid_col')
utils.check_tablet_query_service(self, shard_0_master, False, True)
utils.check_tablet_query_service(self, shard_1_master, False, True)
# check the binlog players are gone now
self.check_no_binlog_player(shard_dest_master)
# kill the original tablets in the original shards
tablet.kill_tablets([shard_0_master, shard_0_replica, shard_0_rdonly,
shard_1_master, shard_1_replica, shard_1_rdonly])
for t in [shard_0_replica, shard_0_rdonly,
shard_1_replica, shard_1_rdonly]:
utils.run_vtctl(['DeleteTablet', t.tablet_alias], auto_log=True)
for t in [shard_0_master, shard_1_master]:
utils.run_vtctl(['DeleteTablet', '-allow_master', t.tablet_alias],
auto_log=True)
# delete the original shards
utils.run_vtctl(['DeleteShard', 'test_keyspace/-40'], auto_log=True)
utils.run_vtctl(['DeleteShard', 'test_keyspace/40-80'], auto_log=True)
# rebuild the serving graph, all mentions of the old shards should be gone
utils.run_vtctl(['RebuildKeyspaceGraph', 'test_keyspace'], auto_log=True)
# kill everything else
tablet.kill_tablets([shard_2_master, shard_2_replica, shard_2_rdonly,
shard_dest_master, shard_dest_replica,
shard_dest_rdonly])
if __name__ == '__main__':
utils.main()
| true | true |
f733cec75b8c2ac115c69e7f96db4eb07a055e76 | 1,348 | py | Python | CPL/__main__.py | Krzem5/Python-Launguage | 92f6cf9a331b5d52c5507eac291f1f51b9e97f3b | [
"BSD-3-Clause"
] | null | null | null | CPL/__main__.py | Krzem5/Python-Launguage | 92f6cf9a331b5d52c5507eac291f1f51b9e97f3b | [
"BSD-3-Clause"
] | null | null | null | CPL/__main__.py | Krzem5/Python-Launguage | 92f6cf9a331b5d52c5507eac291f1f51b9e97f3b | [
"BSD-3-Clause"
] | null | null | null | from .compiler import Compiler
import colorama
import ctypes
import glob
import os
import sys
colorama.init()
def write(t):
print("\033[22;37m"+t,end="")
def write_warn(w):
print("\033[2;33m"+w,end="")
def write_error(e):
print("\033[2;31m"+e,end="")
if ("--compile" in sys.argv):
sys.argv=sys.argv[1:]
D=("--debug" in sys.argv)
pl=[]
for a in sys.argv:
if (a[0]!="-"):
pl+=[a]
if (len(pl)==0):
pl=glob.glob("*.cpl")
for p in pl:
p=os.path.abspath(os.path.join(os.getcwd(),p))
if (not os.path.isfile(p)):
continue
ctypes.windll.kernel32.SetConsoleTitleW("Compiling \u2012 "+os.path.abspath(p))
op=os.path.abspath(p).rsplit(".",1)[0]+".ccpl"
with open(p,"r") as f:
e=Compiler.compile(f.read(),S=[os.path.abspath(p)],D=D)
if (hasattr(e,"ERROR") and e.ERROR==True):
write_error(e.print())
else:
with open(op,"wb") as f:
f.write(e)
elif ("--run" in sys.argv):
sys.argv=sys.argv[1:]
p=None
for a in sys.argv:
if (a[0]!="-"):
p=a+".ccpl"
if (p is None):
p=glob.glob(os.path.join(os.getcwd(),"*.ccpl"))[0]
p=os.path.abspath(os.path.join(os.getcwd(),p))
ctypes.windll.kernel32.SetConsoleTitleW("Running \u2012 "+os.path.abspath(p))
with open(os.path.abspath(p),"rb") as f:
Compiler.run(f.read())
else:
write(f"cpl v{Compiler.RE_VERSION}\n")
write(f"cplc v{Compiler.C_VERSION}")
| 22.466667 | 81 | 0.629822 | from .compiler import Compiler
import colorama
import ctypes
import glob
import os
import sys
colorama.init()
def write(t):
print("\033[22;37m"+t,end="")
def write_warn(w):
print("\033[2;33m"+w,end="")
def write_error(e):
print("\033[2;31m"+e,end="")
if ("--compile" in sys.argv):
sys.argv=sys.argv[1:]
D=("--debug" in sys.argv)
pl=[]
for a in sys.argv:
if (a[0]!="-"):
pl+=[a]
if (len(pl)==0):
pl=glob.glob("*.cpl")
for p in pl:
p=os.path.abspath(os.path.join(os.getcwd(),p))
if (not os.path.isfile(p)):
continue
ctypes.windll.kernel32.SetConsoleTitleW("Compiling \u2012 "+os.path.abspath(p))
op=os.path.abspath(p).rsplit(".",1)[0]+".ccpl"
with open(p,"r") as f:
e=Compiler.compile(f.read(),S=[os.path.abspath(p)],D=D)
if (hasattr(e,"ERROR") and e.ERROR==True):
write_error(e.print())
else:
with open(op,"wb") as f:
f.write(e)
elif ("--run" in sys.argv):
sys.argv=sys.argv[1:]
p=None
for a in sys.argv:
if (a[0]!="-"):
p=a+".ccpl"
if (p is None):
p=glob.glob(os.path.join(os.getcwd(),"*.ccpl"))[0]
p=os.path.abspath(os.path.join(os.getcwd(),p))
ctypes.windll.kernel32.SetConsoleTitleW("Running \u2012 "+os.path.abspath(p))
with open(os.path.abspath(p),"rb") as f:
Compiler.run(f.read())
else:
write(f"cpl v{Compiler.RE_VERSION}\n")
write(f"cplc v{Compiler.C_VERSION}")
| true | true |
f733d08bbbf58ea5f8b9e29fdcb3d39f309b195d | 7,029 | py | Python | pruning/cifar10_fbnet/supernet_main_file.py | sunghern/Auto-Compression | 7c1123e5ffb63b0c34bef2db40dbfb560cb25c2e | [
"MIT"
] | 11 | 2019-11-26T04:33:31.000Z | 2022-03-28T11:35:54.000Z | pruning/cifar10_fbnet/supernet_main_file.py | sunghern/Auto-Compression | 7c1123e5ffb63b0c34bef2db40dbfb560cb25c2e | [
"MIT"
] | 22 | 2019-11-26T06:48:07.000Z | 2021-12-20T12:50:16.000Z | pruning/cifar10_fbnet/supernet_main_file.py | sunghern/Auto-Compression | 7c1123e5ffb63b0c34bef2db40dbfb560cb25c2e | [
"MIT"
] | 10 | 2019-11-26T04:33:57.000Z | 2021-10-12T04:30:48.000Z | import numpy as np
import torch
from torch import nn
from tensorboardX import SummaryWriter
from scipy.special import softmax
import argparse
from general_functions.dataloaders import get_loaders, get_test_loader
from general_functions.utils import get_logger, weights_init, load, create_directories_from_list, \
check_tensor_in_list, writh_new_ARCH_to_fbnet_modeldef
from supernet_functions.lookup_table_builder import LookUpTable_HIGH
from supernet_functions.model_supernet import FBNet_Stochastic_SuperNet, SupernetLoss
from supernet_functions.training_functions_supernet import TrainerSupernet
from supernet_functions.config_for_supernet import CONFIG_SUPERNET
from fbnet_building_blocks.fbnet_modeldef import MODEL_ARCH
import copy
import torch.nn.utils.prune as prune
parser = argparse.ArgumentParser("action")
parser.add_argument('--train_or_sample', type=str, default='', \
help='train means training of the SuperNet, sample means sample from SuperNet\'s results')
parser.add_argument('--architecture_name', type=str, default='', \
help='Name of an architecture to be sampled')
parser.add_argument('--hardsampling_bool_value', type=str, default='True', \
help='If not False or 0 -> do hardsampling, else - softmax sampling')
parser.add_argument('--prune', type=str, default='channel', \
help='channel or group')
args = parser.parse_args()
def train_supernet():
manual_seed = 1
np.random.seed(manual_seed)
torch.manual_seed(manual_seed)
torch.cuda.manual_seed_all(manual_seed)
torch.backends.cudnn.benchmark = True
create_directories_from_list([CONFIG_SUPERNET['logging']['path_to_tensorboard_logs']])
logger = get_logger(CONFIG_SUPERNET['logging']['path_to_log_file'])
writer = SummaryWriter(log_dir=CONFIG_SUPERNET['logging']['path_to_tensorboard_logs'])
#### DataLoading
train_w_loader, train_thetas_loader = get_loaders(CONFIG_SUPERNET['dataloading']['w_share_in_train'],
CONFIG_SUPERNET['dataloading']['batch_size'],
CONFIG_SUPERNET['dataloading']['path_to_save_data'],
logger)
test_loader = get_test_loader(CONFIG_SUPERNET['dataloading']['batch_size'],
CONFIG_SUPERNET['dataloading']['path_to_save_data'])
lookup_table = LookUpTable_HIGH(calulate_latency=CONFIG_SUPERNET['lookup_table']['create_from_scratch'], prune_type=args.prune)
###MODEL
model = FBNet_Stochastic_SuperNet(lookup_table, cnt_classes=10).cuda()
model = model.apply(weights_init)
model = nn.DataParallel(model, device_ids=[0])
for m in model.modules():
if isinstance(m, nn.Conv2d) or isinstance(m, nn.Linear):
prune.remove(m, 'weight')
#### Loss, Optimizer and Scheduler
criterion = SupernetLoss().cuda()
thetas_params = [param for name, param in model.named_parameters() if 'thetas' in name]
params_except_thetas = [param for param in model.parameters() if not check_tensor_in_list(param, thetas_params)]
w_optimizer = torch.optim.SGD(params=params_except_thetas,
lr=CONFIG_SUPERNET['optimizer']['w_lr'],
momentum=CONFIG_SUPERNET['optimizer']['w_momentum'],
weight_decay=CONFIG_SUPERNET['optimizer']['w_weight_decay'])
theta_optimizer = torch.optim.Adam(params=thetas_params,
lr=CONFIG_SUPERNET['optimizer']['thetas_lr'],
weight_decay=CONFIG_SUPERNET['optimizer']['thetas_weight_decay'])
last_epoch = -1
w_scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(w_optimizer,
T_max=CONFIG_SUPERNET['train_settings']['cnt_epochs'],
last_epoch=last_epoch)
#### Training Loop
trainer = TrainerSupernet(criterion, w_optimizer, theta_optimizer, w_scheduler, logger, writer, True)
trainer.train_loop(train_w_loader, train_thetas_loader, test_loader, model)
ops_names = [op_name for op_name in lookup_table.lookup_table_operations]
'''
for layer in model.module.stages_to_search:
#layer.thetas = nn.Parameter(torch.Tensor([1.0 / 1 for i in range(1)]).cuda())
print(layer.thetas)
'''
f = open("result.txt", "w")
for i, layer in enumerate(model.module.stages_to_search):
print('Layer {}: '.format(i) + ops_names[np.argmax(layer.thetas.detach().cpu().numpy())], end=" ")
f.write('Layer {}: '.format(i) + ops_names[np.argmax(layer.thetas.detach().cpu().numpy())]+'\n')
f.close()
print()
# Arguments:
# hardsampling=True means get operations with the largest weights
# =False means apply softmax to weights and sample from the distribution
# unique_name_of_arch - name of architecture. will be written into fbnet_building_blocks/fbnet_modeldef.py
# and can be used in the training by train_architecture_main_file.py
def sample_architecture_from_the_supernet(unique_name_of_arch, hardsampling=True):
logger = get_logger(CONFIG_SUPERNET['logging']['path_to_log_file'])
lookup_table = LookUpTable()
model = FBNet_Stochastic_SuperNet(lookup_table, cnt_classes=10).cuda()
model = nn.DataParallel(model)
load(model, CONFIG_SUPERNET['train_settings']['path_to_save_model'])
ops_names = [op_name for op_name in lookup_table.lookup_table_operations]
cnt_ops = len(ops_names)
arch_operations=[]
if hardsampling:
for layer in model.module.stages_to_search:
arch_operations.append(ops_names[np.argmax(layer.thetas.detach().cpu().numpy())])
else:
rng = np.linspace(0, cnt_ops - 1, cnt_ops, dtype=int)
for layer in model.module.stages_to_search:
distribution = softmax(layer.thetas.detach().cpu().numpy())
arch_operations.append(ops_names[np.random.choice(rng, p=distribution)])
logger.info("Sampled Architecture: " + " - ".join(arch_operations))
writh_new_ARCH_to_fbnet_modeldef(arch_operations, my_unique_name_for_ARCH=unique_name_of_arch)
logger.info("CONGRATULATIONS! New architecture " + unique_name_of_arch \
+ " was written into fbnet_building_blocks/fbnet_modeldef.py")
if __name__ == "__main__":
assert args.train_or_sample in ['train', 'sample']
if args.train_or_sample == 'train':
train_supernet()
elif args.train_or_sample == 'sample':
assert args.architecture_name != '' and args.architecture_name not in MODEL_ARCH
hardsampling = False if args.hardsampling_bool_value in ['False', '0'] else True
sample_architecture_from_the_supernet(unique_name_of_arch=args.architecture_name, hardsampling=hardsampling)
| 52.849624 | 131 | 0.683454 | import numpy as np
import torch
from torch import nn
from tensorboardX import SummaryWriter
from scipy.special import softmax
import argparse
from general_functions.dataloaders import get_loaders, get_test_loader
from general_functions.utils import get_logger, weights_init, load, create_directories_from_list, \
check_tensor_in_list, writh_new_ARCH_to_fbnet_modeldef
from supernet_functions.lookup_table_builder import LookUpTable_HIGH
from supernet_functions.model_supernet import FBNet_Stochastic_SuperNet, SupernetLoss
from supernet_functions.training_functions_supernet import TrainerSupernet
from supernet_functions.config_for_supernet import CONFIG_SUPERNET
from fbnet_building_blocks.fbnet_modeldef import MODEL_ARCH
import copy
import torch.nn.utils.prune as prune
parser = argparse.ArgumentParser("action")
parser.add_argument('--train_or_sample', type=str, default='', \
help='train means training of the SuperNet, sample means sample from SuperNet\'s results')
parser.add_argument('--architecture_name', type=str, default='', \
help='Name of an architecture to be sampled')
parser.add_argument('--hardsampling_bool_value', type=str, default='True', \
help='If not False or 0 -> do hardsampling, else - softmax sampling')
parser.add_argument('--prune', type=str, default='channel', \
help='channel or group')
args = parser.parse_args()
def train_supernet():
manual_seed = 1
np.random.seed(manual_seed)
torch.manual_seed(manual_seed)
torch.cuda.manual_seed_all(manual_seed)
torch.backends.cudnn.benchmark = True
create_directories_from_list([CONFIG_SUPERNET['logging']['path_to_tensorboard_logs']])
logger = get_logger(CONFIG_SUPERNET['logging']['path_to_log_file'])
writer = SummaryWriter(log_dir=CONFIG_SUPERNET['logging']['path_to_tensorboard_logs'])
#### DataLoading
train_w_loader, train_thetas_loader = get_loaders(CONFIG_SUPERNET['dataloading']['w_share_in_train'],
CONFIG_SUPERNET['dataloading']['batch_size'],
CONFIG_SUPERNET['dataloading']['path_to_save_data'],
logger)
test_loader = get_test_loader(CONFIG_SUPERNET['dataloading']['batch_size'],
CONFIG_SUPERNET['dataloading']['path_to_save_data'])
lookup_table = LookUpTable_HIGH(calulate_latency=CONFIG_SUPERNET['lookup_table']['create_from_scratch'], prune_type=args.prune)
###MODEL
model = FBNet_Stochastic_SuperNet(lookup_table, cnt_classes=10).cuda()
model = model.apply(weights_init)
model = nn.DataParallel(model, device_ids=[0])
for m in model.modules():
if isinstance(m, nn.Conv2d) or isinstance(m, nn.Linear):
prune.remove(m, 'weight')
#### Loss, Optimizer and Scheduler
criterion = SupernetLoss().cuda()
thetas_params = [param for name, param in model.named_parameters() if 'thetas' in name]
params_except_thetas = [param for param in model.parameters() if not check_tensor_in_list(param, thetas_params)]
w_optimizer = torch.optim.SGD(params=params_except_thetas,
lr=CONFIG_SUPERNET['optimizer']['w_lr'],
momentum=CONFIG_SUPERNET['optimizer']['w_momentum'],
weight_decay=CONFIG_SUPERNET['optimizer']['w_weight_decay'])
theta_optimizer = torch.optim.Adam(params=thetas_params,
lr=CONFIG_SUPERNET['optimizer']['thetas_lr'],
weight_decay=CONFIG_SUPERNET['optimizer']['thetas_weight_decay'])
last_epoch = -1
w_scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(w_optimizer,
T_max=CONFIG_SUPERNET['train_settings']['cnt_epochs'],
last_epoch=last_epoch)
#### Training Loop
trainer = TrainerSupernet(criterion, w_optimizer, theta_optimizer, w_scheduler, logger, writer, True)
trainer.train_loop(train_w_loader, train_thetas_loader, test_loader, model)
ops_names = [op_name for op_name in lookup_table.lookup_table_operations]
f = open("result.txt", "w")
for i, layer in enumerate(model.module.stages_to_search):
print('Layer {}: '.format(i) + ops_names[np.argmax(layer.thetas.detach().cpu().numpy())], end=" ")
f.write('Layer {}: '.format(i) + ops_names[np.argmax(layer.thetas.detach().cpu().numpy())]+'\n')
f.close()
print()
# Arguments:
# hardsampling=True means get operations with the largest weights
# =False means apply softmax to weights and sample from the distribution
# unique_name_of_arch - name of architecture. will be written into fbnet_building_blocks/fbnet_modeldef.py
# and can be used in the training by train_architecture_main_file.py
def sample_architecture_from_the_supernet(unique_name_of_arch, hardsampling=True):
logger = get_logger(CONFIG_SUPERNET['logging']['path_to_log_file'])
lookup_table = LookUpTable()
model = FBNet_Stochastic_SuperNet(lookup_table, cnt_classes=10).cuda()
model = nn.DataParallel(model)
load(model, CONFIG_SUPERNET['train_settings']['path_to_save_model'])
ops_names = [op_name for op_name in lookup_table.lookup_table_operations]
cnt_ops = len(ops_names)
arch_operations=[]
if hardsampling:
for layer in model.module.stages_to_search:
arch_operations.append(ops_names[np.argmax(layer.thetas.detach().cpu().numpy())])
else:
rng = np.linspace(0, cnt_ops - 1, cnt_ops, dtype=int)
for layer in model.module.stages_to_search:
distribution = softmax(layer.thetas.detach().cpu().numpy())
arch_operations.append(ops_names[np.random.choice(rng, p=distribution)])
logger.info("Sampled Architecture: " + " - ".join(arch_operations))
writh_new_ARCH_to_fbnet_modeldef(arch_operations, my_unique_name_for_ARCH=unique_name_of_arch)
logger.info("CONGRATULATIONS! New architecture " + unique_name_of_arch \
+ " was written into fbnet_building_blocks/fbnet_modeldef.py")
if __name__ == "__main__":
assert args.train_or_sample in ['train', 'sample']
if args.train_or_sample == 'train':
train_supernet()
elif args.train_or_sample == 'sample':
assert args.architecture_name != '' and args.architecture_name not in MODEL_ARCH
hardsampling = False if args.hardsampling_bool_value in ['False', '0'] else True
sample_architecture_from_the_supernet(unique_name_of_arch=args.architecture_name, hardsampling=hardsampling)
| true | true |
f733d0a6b325a0935dd9642c3cc568d46f001f7a | 2,823 | py | Python | main/test.py | levishai/3DMPPE_POSENET_RELEASE | e364053b5a4e51f4a84eb50abb26026094931d90 | [
"MIT"
] | 608 | 2019-07-28T11:58:51.000Z | 2022-03-31T03:34:24.000Z | main/test.py | levishai/3DMPPE_POSENET_RELEASE | e364053b5a4e51f4a84eb50abb26026094931d90 | [
"MIT"
] | 111 | 2019-07-31T08:58:22.000Z | 2022-03-07T08:00:00.000Z | main/test.py | levishai/3DMPPE_POSENET_RELEASE | e364053b5a4e51f4a84eb50abb26026094931d90 | [
"MIT"
] | 142 | 2019-08-05T14:34:59.000Z | 2022-03-31T03:34:25.000Z | import argparse
from tqdm import tqdm
import numpy as np
import cv2
from config import cfg
import torch
from base import Tester
from utils.vis import vis_keypoints
from utils.pose_utils import flip
import torch.backends.cudnn as cudnn
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--gpu', type=str, dest='gpu_ids')
parser.add_argument('--test_epoch', type=str, dest='test_epoch')
args = parser.parse_args()
# test gpus
if not args.gpu_ids:
assert 0, "Please set proper gpu ids"
if '-' in args.gpu_ids:
gpus = args.gpu_ids.split('-')
gpus[0] = int(gpus[0])
gpus[1] = int(gpus[1]) + 1
args.gpu_ids = ','.join(map(lambda x: str(x), list(range(*gpus))))
assert args.test_epoch, 'Test epoch is required.'
return args
def main():
args = parse_args()
cfg.set_args(args.gpu_ids)
cudnn.fastest = True
cudnn.benchmark = True
cudnn.deterministic = False
cudnn.enabled = True
tester = Tester(args.test_epoch)
tester._make_batch_generator()
tester._make_model()
preds = []
with torch.no_grad():
for itr, input_img in enumerate(tqdm(tester.batch_generator)):
# forward
coord_out = tester.model(input_img)
if cfg.flip_test:
flipped_input_img = flip(input_img, dims=3)
flipped_coord_out = tester.model(flipped_input_img)
flipped_coord_out[:, :, 0] = cfg.output_shape[1] - flipped_coord_out[:, :, 0] - 1
for pair in tester.flip_pairs:
flipped_coord_out[:, pair[0], :], flipped_coord_out[:, pair[1], :] = flipped_coord_out[:, pair[1], :].clone(), flipped_coord_out[:, pair[0], :].clone()
coord_out = (coord_out + flipped_coord_out)/2.
vis = False
if vis:
filename = str(itr)
tmpimg = input_img[0].cpu().numpy()
tmpimg = tmpimg * np.array(cfg.pixel_std).reshape(3,1,1) + np.array(cfg.pixel_mean).reshape(3,1,1)
tmpimg = tmpimg.astype(np.uint8)
tmpimg = tmpimg[::-1, :, :]
tmpimg = np.transpose(tmpimg,(1,2,0)).copy()
tmpkps = np.zeros((3,tester.joint_num))
tmpkps[:2,:] = coord_out[0,:,:2].cpu().numpy().transpose(1,0) / cfg.output_shape[0] * cfg.input_shape[0]
tmpkps[2,:] = 1
tmpimg = vis_keypoints(tmpimg, tmpkps, tester.skeleton)
cv2.imwrite(filename + '_output.jpg', tmpimg)
coord_out = coord_out.cpu().numpy()
preds.append(coord_out)
# evaluate
preds = np.concatenate(preds, axis=0)
tester._evaluate(preds, cfg.result_dir)
if __name__ == "__main__":
main()
| 34.012048 | 171 | 0.588735 | import argparse
from tqdm import tqdm
import numpy as np
import cv2
from config import cfg
import torch
from base import Tester
from utils.vis import vis_keypoints
from utils.pose_utils import flip
import torch.backends.cudnn as cudnn
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--gpu', type=str, dest='gpu_ids')
parser.add_argument('--test_epoch', type=str, dest='test_epoch')
args = parser.parse_args()
if not args.gpu_ids:
assert 0, "Please set proper gpu ids"
if '-' in args.gpu_ids:
gpus = args.gpu_ids.split('-')
gpus[0] = int(gpus[0])
gpus[1] = int(gpus[1]) + 1
args.gpu_ids = ','.join(map(lambda x: str(x), list(range(*gpus))))
assert args.test_epoch, 'Test epoch is required.'
return args
def main():
args = parse_args()
cfg.set_args(args.gpu_ids)
cudnn.fastest = True
cudnn.benchmark = True
cudnn.deterministic = False
cudnn.enabled = True
tester = Tester(args.test_epoch)
tester._make_batch_generator()
tester._make_model()
preds = []
with torch.no_grad():
for itr, input_img in enumerate(tqdm(tester.batch_generator)):
coord_out = tester.model(input_img)
if cfg.flip_test:
flipped_input_img = flip(input_img, dims=3)
flipped_coord_out = tester.model(flipped_input_img)
flipped_coord_out[:, :, 0] = cfg.output_shape[1] - flipped_coord_out[:, :, 0] - 1
for pair in tester.flip_pairs:
flipped_coord_out[:, pair[0], :], flipped_coord_out[:, pair[1], :] = flipped_coord_out[:, pair[1], :].clone(), flipped_coord_out[:, pair[0], :].clone()
coord_out = (coord_out + flipped_coord_out)/2.
vis = False
if vis:
filename = str(itr)
tmpimg = input_img[0].cpu().numpy()
tmpimg = tmpimg * np.array(cfg.pixel_std).reshape(3,1,1) + np.array(cfg.pixel_mean).reshape(3,1,1)
tmpimg = tmpimg.astype(np.uint8)
tmpimg = tmpimg[::-1, :, :]
tmpimg = np.transpose(tmpimg,(1,2,0)).copy()
tmpkps = np.zeros((3,tester.joint_num))
tmpkps[:2,:] = coord_out[0,:,:2].cpu().numpy().transpose(1,0) / cfg.output_shape[0] * cfg.input_shape[0]
tmpkps[2,:] = 1
tmpimg = vis_keypoints(tmpimg, tmpkps, tester.skeleton)
cv2.imwrite(filename + '_output.jpg', tmpimg)
coord_out = coord_out.cpu().numpy()
preds.append(coord_out)
preds = np.concatenate(preds, axis=0)
tester._evaluate(preds, cfg.result_dir)
if __name__ == "__main__":
main()
| true | true |
f733d0e522fb8b9cb4cf1725290ec7f8123a4c29 | 12,842 | py | Python | test/test_public_bindings.py | jaketae/pytorch | 5654e6339879e438efb7cf50e88e356472eb0545 | [
"Intel"
] | null | null | null | test/test_public_bindings.py | jaketae/pytorch | 5654e6339879e438efb7cf50e88e356472eb0545 | [
"Intel"
] | null | null | null | test/test_public_bindings.py | jaketae/pytorch | 5654e6339879e438efb7cf50e88e356472eb0545 | [
"Intel"
] | null | null | null | # -*- coding: utf-8 -*-
# Owner(s): ["module: autograd"]
from torch.testing._internal.common_utils import TestCase, run_tests, IS_WINDOWS
import pkgutil
import torch
import sys
from typing import Callable
import inspect
import json
import os
import unittest
class TestPublicBindings(TestCase):
def test_no_new_bindings(self):
"""
This test aims to stop the introduction of new JIT bindings into torch._C
whose names do not start with _. Such bindings are made available as
torch.XXX, which may not be desirable.
If your change causes this test to fail, add your new binding to a relevant
submodule of torch._C, such as torch._C._jit (or other relevant submodule of
torch._C). If your binding really needs to be available as torch.XXX, add it
to torch._C and add it to the allowlist below.
If you have removed a binding, remove it from the allowlist as well.
"""
# This allowlist contains every binding in torch._C that is copied into torch at
# the time of writing. It was generated with
#
# {elem for elem in dir(torch._C) if not elem.startswith("_")}
#
torch_C_allowlist_superset = {
"AggregationType",
"AliasDb",
"AnyType",
"Argument",
"ArgumentSpec",
"autocast_decrement_nesting",
"autocast_increment_nesting",
"AVG",
"BenchmarkConfig",
"BenchmarkExecutionStats",
"BFloat16StorageBase",
"Block",
"BoolStorageBase",
"BoolType",
"BufferDict",
"ByteStorageBase",
"CallStack",
"Capsule",
"CharStorageBase",
"ClassType",
"clear_autocast_cache",
"Code",
"CompilationUnit",
"CompleteArgumentSpec",
"ComplexDoubleStorageBase",
"ComplexFloatStorageBase",
"ComplexType",
"ConcreteModuleType",
"ConcreteModuleTypeBuilder",
"CONV_BN_FUSION",
"cpp",
"CudaBFloat16StorageBase",
"CudaBFloat16TensorBase",
"CudaBFloat16TensorBase",
"CudaBoolStorageBase",
"CudaBoolTensorBase",
"CudaBoolTensorBase",
"CudaByteStorageBase",
"CudaByteTensorBase",
"CudaByteTensorBase",
"CudaCharStorageBase",
"CudaCharTensorBase",
"CudaCharTensorBase",
"CudaComplexDoubleStorageBase",
"CudaComplexDoubleTensorBase",
"CudaComplexDoubleTensorBase",
"CudaComplexFloatStorageBase",
"CudaComplexFloatTensorBase",
"CudaComplexFloatTensorBase",
"CudaDoubleStorageBase",
"CudaDoubleTensorBase",
"CudaDoubleTensorBase",
"CudaFloatStorageBase",
"CudaFloatTensorBase",
"CudaHalfStorageBase",
"CudaHalfTensorBase",
"CudaIntStorageBase",
"CudaIntTensorBase",
"CudaIntTensorBase",
"CudaLongStorageBase",
"CudaLongTensorBase",
"CudaLongTensorBase",
"CudaShortStorageBase",
"CudaShortTensorBase",
"CudaShortTensorBase",
"DeepCopyMemoTable",
"default_generator",
"DeserializationStorageContext",
"device",
"DeviceObjType",
"DictType",
"DisableTorchFunction",
"DoubleStorageBase",
"dtype",
"EnumType",
"ErrorReport",
"ExecutionPlan",
"FatalError",
"FileCheck",
"finfo",
"FloatStorageBase",
"FloatType",
"fork",
"FunctionSchema",
"FUSE_ADD_RELU",
"Future",
"FutureType",
"Generator",
"get_autocast_cpu_dtype",
"get_default_dtype",
"get_num_interop_threads",
"get_num_threads",
"Gradient",
"Graph",
"GraphExecutorState",
"HalfStorageBase",
"has_cuda",
"has_cudnn",
"has_lapack",
"has_mkl",
"has_mkldnn",
"has_mlc",
"has_openmp",
"has_spectral",
"HOIST_CONV_PACKED_PARAMS",
"iinfo",
"import_ir_module_from_buffer",
"import_ir_module",
"InferredType",
"init_num_threads",
"INSERT_FOLD_PREPACK_OPS",
"InterfaceType",
"IntStorageBase",
"IntType",
"SymIntType",
"IODescriptor",
"is_anomaly_enabled",
"is_autocast_cache_enabled",
"is_autocast_cpu_enabled",
"is_autocast_enabled",
"is_grad_enabled",
"is_inference_mode_enabled",
"JITException",
"layout",
"ListType",
"LiteScriptModule",
"LockingLogger",
"LoggerBase",
"LongStorageBase",
"memory_format",
"merge_type_from_type_comment",
"MobileOptimizerType",
"ModuleDict",
"Node",
"NoneType",
"NoopLogger",
"NumberType",
"OperatorInfo",
"OptionalType",
"ParameterDict",
"parse_ir",
"parse_schema",
"parse_type_comment",
"PyObjectType",
"PyTorchFileReader",
"PyTorchFileWriter",
"QInt32StorageBase",
"QInt8StorageBase",
"qscheme",
"QUInt4x2StorageBase",
"QUInt2x4StorageBase",
"QUInt8StorageBase",
"read_vitals",
"REMOVE_DROPOUT",
"RRefType",
"ScriptClass",
"ScriptClassFunction",
"ScriptDict",
"ScriptDictIterator",
"ScriptDictKeyIterator",
"ScriptList",
"ScriptListIterator",
"ScriptFunction",
"ScriptMethod",
"ScriptModule",
"ScriptModuleSerializer",
"ScriptObject",
"ScriptObjectProperty",
"SerializationStorageContext",
"set_anomaly_enabled",
"set_autocast_cache_enabled",
"set_autocast_cpu_dtype",
"set_autocast_cpu_enabled",
"set_autocast_enabled",
"set_flush_denormal",
"set_num_interop_threads",
"set_num_threads",
"set_vital",
"ShortStorageBase",
"Size",
"StaticModule",
"Stream",
"StreamObjType",
"StringType",
"SUM",
"TensorType",
"ThroughputBenchmark",
"TracingState",
"TupleType",
"Type",
"unify_type_list",
"UnionType",
"Use",
"Value",
"autocast_decrement_nesting",
"autocast_increment_nesting",
"clear_autocast_cache",
"cpp",
"default_generator",
"device",
"dtype",
"finfo",
"fork",
"get_default_dtype",
"get_num_interop_threads",
"get_num_threads",
"has_cuda",
"has_cudnn",
"has_lapack",
"has_mkl",
"has_mkldnn",
"has_mlc",
"has_openmp",
"iinfo",
"import_ir_module",
"import_ir_module_from_buffer",
"init_num_threads",
"is_anomaly_enabled",
"is_autocast_enabled",
"is_grad_enabled",
"layout",
"memory_format",
"merge_type_from_type_comment",
"parse_ir",
"parse_schema",
"parse_type_comment",
"qscheme",
"set_anomaly_enabled",
"set_autocast_enabled",
'set_autocast_gpu_dtype',
'get_autocast_gpu_dtype',
"set_flush_denormal",
"set_num_interop_threads",
"set_num_threads",
"unify_type_list",
"vitals_enabled",
"wait",
}
torch_C_bindings = {elem for elem in dir(torch._C) if not elem.startswith("_")}
# Check that the torch._C bindings are all in the allowlist. Since
# bindings can change based on how PyTorch was compiled (e.g. with/without
# CUDA), the two may not be an exact match but the bindings should be
# a subset of the allowlist.
difference = torch_C_bindings.difference(torch_C_allowlist_superset)
msg = f"torch._C had bindings that are not present in the allowlist:\n{difference}"
self.assertTrue(torch_C_bindings.issubset(torch_C_allowlist_superset), msg)
# AttributeError: module 'torch.distributed' has no attribute '_shard'
@unittest.skipIf(IS_WINDOWS, "Distributed Attribute Error")
def test_correct_module_names(self):
'''
An API is considered public, if its `__module__` starts with `torch.`
and there is no name in `__module__` or the object itself that starts with “_”.
Each public package should either:
- (preferred) Define `__all__` and all callables and classes in there must have their
`__module__` start with the current submodule's path. Things not in `__all__` should
NOT have their `__module__` start with the current submodule.
- (for simple python-only modules) Not define `__all__` and all the elements in `dir(submod)` must have their
`__module__` that start with the current submodule.
'''
failure_list = []
with open(os.path.join(os.path.dirname(__file__), 'allowlist_for_publicAPI.json')) as json_file:
# no new entries should be added to this allow_dict.
# New APIs must follow the public API guidelines.
allow_dict = json.load(json_file)
def test_module(modname):
split_strs = modname.split('.')
mod = sys.modules.get(modname)
for elem in split_strs:
if elem.startswith("_"):
return
def add_to_failure_list_if_not_in_allow_dict(modname, elem, elem_module):
if modname in allow_dict and elem in allow_dict[modname]:
return
failure_list.append((modname, elem, elem_module))
# verifies that each public API has the correct module name and naming semantics
def looks_public_or_not(elem, modname, mod, is_public=True):
obj = getattr(mod, elem)
if not (isinstance(obj, Callable) or inspect.isclass(obj)):
return
elem_module = getattr(obj, '__module__', None)
elem_modname_starts_with_mod = elem_module is not None and \
elem_module.startswith(modname) and '._' not in elem_module
# elem's name must NOT begin with an `_` and it's module name
# SHOULD start with it's current module since it's a public API
looks_public = not elem.startswith('_') and elem_modname_starts_with_mod
if is_public != looks_public:
add_to_failure_list_if_not_in_allow_dict(modname, elem, elem_module)
if hasattr(modname, '__all__'):
public_api = mod.__all__
all_api = dir(modname)
for elem in all_api:
looks_public_or_not(elem, modname, is_public=elem in public_api)
else:
all_api = dir(mod)
for elem in all_api:
if not elem.startswith('_'):
looks_public_or_not(elem, modname, mod, is_public=True)
for _, modname, ispkg in pkgutil.walk_packages(path=torch.__path__, prefix=torch.__name__ + '.'):
test_module(modname)
test_module('torch')
msg = "Following new APIs ( displayed in the form (module, element, element module) )" \
" were added that do not meet our guidelines for public API" \
" Please review https://docs.google.com/document/d/10yx2-4gs0gTMOimVS403MnoAWkqitS8TUHX73PN8EjE/edit?pli=1#" \
" for more information:\n" + "\n".join(map(str, failure_list))
# empty lists are considered false in python
self.assertTrue(not failure_list, msg)
if __name__ == '__main__':
run_tests()
| 36.276836 | 125 | 0.548746 |
from torch.testing._internal.common_utils import TestCase, run_tests, IS_WINDOWS
import pkgutil
import torch
import sys
from typing import Callable
import inspect
import json
import os
import unittest
class TestPublicBindings(TestCase):
def test_no_new_bindings(self):
torch_C_allowlist_superset = {
"AggregationType",
"AliasDb",
"AnyType",
"Argument",
"ArgumentSpec",
"autocast_decrement_nesting",
"autocast_increment_nesting",
"AVG",
"BenchmarkConfig",
"BenchmarkExecutionStats",
"BFloat16StorageBase",
"Block",
"BoolStorageBase",
"BoolType",
"BufferDict",
"ByteStorageBase",
"CallStack",
"Capsule",
"CharStorageBase",
"ClassType",
"clear_autocast_cache",
"Code",
"CompilationUnit",
"CompleteArgumentSpec",
"ComplexDoubleStorageBase",
"ComplexFloatStorageBase",
"ComplexType",
"ConcreteModuleType",
"ConcreteModuleTypeBuilder",
"CONV_BN_FUSION",
"cpp",
"CudaBFloat16StorageBase",
"CudaBFloat16TensorBase",
"CudaBFloat16TensorBase",
"CudaBoolStorageBase",
"CudaBoolTensorBase",
"CudaBoolTensorBase",
"CudaByteStorageBase",
"CudaByteTensorBase",
"CudaByteTensorBase",
"CudaCharStorageBase",
"CudaCharTensorBase",
"CudaCharTensorBase",
"CudaComplexDoubleStorageBase",
"CudaComplexDoubleTensorBase",
"CudaComplexDoubleTensorBase",
"CudaComplexFloatStorageBase",
"CudaComplexFloatTensorBase",
"CudaComplexFloatTensorBase",
"CudaDoubleStorageBase",
"CudaDoubleTensorBase",
"CudaDoubleTensorBase",
"CudaFloatStorageBase",
"CudaFloatTensorBase",
"CudaHalfStorageBase",
"CudaHalfTensorBase",
"CudaIntStorageBase",
"CudaIntTensorBase",
"CudaIntTensorBase",
"CudaLongStorageBase",
"CudaLongTensorBase",
"CudaLongTensorBase",
"CudaShortStorageBase",
"CudaShortTensorBase",
"CudaShortTensorBase",
"DeepCopyMemoTable",
"default_generator",
"DeserializationStorageContext",
"device",
"DeviceObjType",
"DictType",
"DisableTorchFunction",
"DoubleStorageBase",
"dtype",
"EnumType",
"ErrorReport",
"ExecutionPlan",
"FatalError",
"FileCheck",
"finfo",
"FloatStorageBase",
"FloatType",
"fork",
"FunctionSchema",
"FUSE_ADD_RELU",
"Future",
"FutureType",
"Generator",
"get_autocast_cpu_dtype",
"get_default_dtype",
"get_num_interop_threads",
"get_num_threads",
"Gradient",
"Graph",
"GraphExecutorState",
"HalfStorageBase",
"has_cuda",
"has_cudnn",
"has_lapack",
"has_mkl",
"has_mkldnn",
"has_mlc",
"has_openmp",
"has_spectral",
"HOIST_CONV_PACKED_PARAMS",
"iinfo",
"import_ir_module_from_buffer",
"import_ir_module",
"InferredType",
"init_num_threads",
"INSERT_FOLD_PREPACK_OPS",
"InterfaceType",
"IntStorageBase",
"IntType",
"SymIntType",
"IODescriptor",
"is_anomaly_enabled",
"is_autocast_cache_enabled",
"is_autocast_cpu_enabled",
"is_autocast_enabled",
"is_grad_enabled",
"is_inference_mode_enabled",
"JITException",
"layout",
"ListType",
"LiteScriptModule",
"LockingLogger",
"LoggerBase",
"LongStorageBase",
"memory_format",
"merge_type_from_type_comment",
"MobileOptimizerType",
"ModuleDict",
"Node",
"NoneType",
"NoopLogger",
"NumberType",
"OperatorInfo",
"OptionalType",
"ParameterDict",
"parse_ir",
"parse_schema",
"parse_type_comment",
"PyObjectType",
"PyTorchFileReader",
"PyTorchFileWriter",
"QInt32StorageBase",
"QInt8StorageBase",
"qscheme",
"QUInt4x2StorageBase",
"QUInt2x4StorageBase",
"QUInt8StorageBase",
"read_vitals",
"REMOVE_DROPOUT",
"RRefType",
"ScriptClass",
"ScriptClassFunction",
"ScriptDict",
"ScriptDictIterator",
"ScriptDictKeyIterator",
"ScriptList",
"ScriptListIterator",
"ScriptFunction",
"ScriptMethod",
"ScriptModule",
"ScriptModuleSerializer",
"ScriptObject",
"ScriptObjectProperty",
"SerializationStorageContext",
"set_anomaly_enabled",
"set_autocast_cache_enabled",
"set_autocast_cpu_dtype",
"set_autocast_cpu_enabled",
"set_autocast_enabled",
"set_flush_denormal",
"set_num_interop_threads",
"set_num_threads",
"set_vital",
"ShortStorageBase",
"Size",
"StaticModule",
"Stream",
"StreamObjType",
"StringType",
"SUM",
"TensorType",
"ThroughputBenchmark",
"TracingState",
"TupleType",
"Type",
"unify_type_list",
"UnionType",
"Use",
"Value",
"autocast_decrement_nesting",
"autocast_increment_nesting",
"clear_autocast_cache",
"cpp",
"default_generator",
"device",
"dtype",
"finfo",
"fork",
"get_default_dtype",
"get_num_interop_threads",
"get_num_threads",
"has_cuda",
"has_cudnn",
"has_lapack",
"has_mkl",
"has_mkldnn",
"has_mlc",
"has_openmp",
"iinfo",
"import_ir_module",
"import_ir_module_from_buffer",
"init_num_threads",
"is_anomaly_enabled",
"is_autocast_enabled",
"is_grad_enabled",
"layout",
"memory_format",
"merge_type_from_type_comment",
"parse_ir",
"parse_schema",
"parse_type_comment",
"qscheme",
"set_anomaly_enabled",
"set_autocast_enabled",
'set_autocast_gpu_dtype',
'get_autocast_gpu_dtype',
"set_flush_denormal",
"set_num_interop_threads",
"set_num_threads",
"unify_type_list",
"vitals_enabled",
"wait",
}
torch_C_bindings = {elem for elem in dir(torch._C) if not elem.startswith("_")}
difference = torch_C_bindings.difference(torch_C_allowlist_superset)
msg = f"torch._C had bindings that are not present in the allowlist:\n{difference}"
self.assertTrue(torch_C_bindings.issubset(torch_C_allowlist_superset), msg)
@unittest.skipIf(IS_WINDOWS, "Distributed Attribute Error")
def test_correct_module_names(self):
failure_list = []
with open(os.path.join(os.path.dirname(__file__), 'allowlist_for_publicAPI.json')) as json_file:
allow_dict = json.load(json_file)
def test_module(modname):
split_strs = modname.split('.')
mod = sys.modules.get(modname)
for elem in split_strs:
if elem.startswith("_"):
return
def add_to_failure_list_if_not_in_allow_dict(modname, elem, elem_module):
if modname in allow_dict and elem in allow_dict[modname]:
return
failure_list.append((modname, elem, elem_module))
def looks_public_or_not(elem, modname, mod, is_public=True):
obj = getattr(mod, elem)
if not (isinstance(obj, Callable) or inspect.isclass(obj)):
return
elem_module = getattr(obj, '__module__', None)
elem_modname_starts_with_mod = elem_module is not None and \
elem_module.startswith(modname) and '._' not in elem_module
looks_public = not elem.startswith('_') and elem_modname_starts_with_mod
if is_public != looks_public:
add_to_failure_list_if_not_in_allow_dict(modname, elem, elem_module)
if hasattr(modname, '__all__'):
public_api = mod.__all__
all_api = dir(modname)
for elem in all_api:
looks_public_or_not(elem, modname, is_public=elem in public_api)
else:
all_api = dir(mod)
for elem in all_api:
if not elem.startswith('_'):
looks_public_or_not(elem, modname, mod, is_public=True)
for _, modname, ispkg in pkgutil.walk_packages(path=torch.__path__, prefix=torch.__name__ + '.'):
test_module(modname)
test_module('torch')
msg = "Following new APIs ( displayed in the form (module, element, element module) )" \
" were added that do not meet our guidelines for public API" \
" Please review https://docs.google.com/document/d/10yx2-4gs0gTMOimVS403MnoAWkqitS8TUHX73PN8EjE/edit?pli=1#" \
" for more information:\n" + "\n".join(map(str, failure_list))
self.assertTrue(not failure_list, msg)
if __name__ == '__main__':
run_tests()
| true | true |
f733d1a2aac450996bfafba7f201d147ff23592a | 56,410 | py | Python | silx/math/fit/fittheories.py | sanjaymsh/silx | 50c2b4820d4786abcce866645b1d3c138891a25f | [
"CC0-1.0",
"MIT"
] | 2 | 2020-03-09T15:50:17.000Z | 2020-03-09T15:50:23.000Z | silx/math/fit/fittheories.py | sanjaymsh/silx | 50c2b4820d4786abcce866645b1d3c138891a25f | [
"CC0-1.0",
"MIT"
] | 1 | 2020-03-12T13:11:59.000Z | 2020-03-12T13:53:55.000Z | silx/math/fit/fittheories.py | JuliusHarald/silx | 3f9bcda88c074438fdb30cde29fec314d26f471c | [
"CC0-1.0",
"MIT"
] | null | null | null | # coding: utf-8
#/*##########################################################################
#
# Copyright (c) 2004-2020 European Synchrotron Radiation Facility
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
########################################################################### */
"""This modules provides a set of fit functions and associated
estimation functions in a format that can be imported into a
:class:`silx.math.fit.FitManager` instance.
These functions are well suited for fitting multiple gaussian shaped peaks
typically found in spectroscopy data. The estimation functions are designed
to detect how many peaks are present in the data, and provide an initial
estimate for their height, their center location and their full-width
at half maximum (fwhm).
The limitation of these estimation algorithms is that only gaussians having a
similar fwhm can be detected by the peak search algorithm.
This *search fwhm* can be defined by the user, if
he knows the characteristics of his data, or can be automatically estimated
based on the fwhm of the largest peak in the data.
The source code of this module can serve as template for defining your own
fit functions.
The functions to be imported by :meth:`FitManager.loadtheories` are defined by
a dictionary :const:`THEORY`: with the following structure::
from silx.math.fit.fittheory import FitTheory
THEORY = {
'theory_name_1': FitTheory(
description='Description of theory 1',
function=fitfunction1,
parameters=('param name 1', 'param name 2', …),
estimate=estimation_function1,
configure=configuration_function1,
derivative=derivative_function1),
'theory_name_2': FitTheory(…),
}
.. note::
Consider using an OrderedDict instead of a regular dictionary, when
defining your own theory dictionary, if the order matters to you.
This will likely be the case if you intend to load a selection of
functions in a GUI such as :class:`silx.gui.fit.FitManager`.
Theory names can be customized (e.g. ``gauss, lorentz, splitgauss``…).
The mandatory parameters for :class:`FitTheory` are ``function`` and
``parameters``.
You can also define an ``INIT`` function that will be executed by
:meth:`FitManager.loadtheories`.
See the documentation of :class:`silx.math.fit.fittheory.FitTheory`
for more information.
Module members:
---------------
"""
import numpy
from collections import OrderedDict
import logging
from silx.math.fit import functions
from silx.math.fit.peaks import peak_search, guess_fwhm
from silx.math.fit.filters import strip, savitsky_golay
from silx.math.fit.leastsq import leastsq
from silx.math.fit.fittheory import FitTheory
_logger = logging.getLogger(__name__)
__authors__ = ["V.A. Sole", "P. Knobel"]
__license__ = "MIT"
__date__ = "15/05/2017"
DEFAULT_CONFIG = {
'NoConstraintsFlag': False,
'PositiveFwhmFlag': True,
'PositiveHeightAreaFlag': True,
'SameFwhmFlag': False,
'QuotedPositionFlag': False, # peak not outside data range
'QuotedEtaFlag': False, # force 0 < eta < 1
# Peak detection
'AutoScaling': False,
'Yscaling': 1.0,
'FwhmPoints': 8,
'AutoFwhm': True,
'Sensitivity': 2.5,
'ForcePeakPresence': True,
# Hypermet
'HypermetTails': 15,
'QuotedFwhmFlag': 0,
'MaxFwhm2InputRatio': 1.5,
'MinFwhm2InputRatio': 0.4,
# short tail parameters
'MinGaussArea4ShortTail': 50000.,
'InitialShortTailAreaRatio': 0.050,
'MaxShortTailAreaRatio': 0.100,
'MinShortTailAreaRatio': 0.0010,
'InitialShortTailSlopeRatio': 0.70,
'MaxShortTailSlopeRatio': 2.00,
'MinShortTailSlopeRatio': 0.50,
# long tail parameters
'MinGaussArea4LongTail': 1000.0,
'InitialLongTailAreaRatio': 0.050,
'MaxLongTailAreaRatio': 0.300,
'MinLongTailAreaRatio': 0.010,
'InitialLongTailSlopeRatio': 20.0,
'MaxLongTailSlopeRatio': 50.0,
'MinLongTailSlopeRatio': 5.0,
# step tail
'MinGaussHeight4StepTail': 5000.,
'InitialStepTailHeightRatio': 0.002,
'MaxStepTailHeightRatio': 0.0100,
'MinStepTailHeightRatio': 0.0001,
# Hypermet constraints
# position in range [estimated position +- estimated fwhm/2]
'HypermetQuotedPositionFlag': True,
'DeltaPositionFwhmUnits': 0.5,
'SameSlopeRatioFlag': 1,
'SameAreaRatioFlag': 1,
# Strip bg removal
'StripBackgroundFlag': True,
'SmoothingFlag': True,
'SmoothingWidth': 5,
'StripWidth': 2,
'StripIterations': 5000,
'StripThresholdFactor': 1.0}
"""This dictionary defines default configuration parameters that have effects
on fit functions and estimation functions, mainly on fit constraints.
This dictionary is accessible as attribute :attr:`FitTheories.config`,
which can be modified by configuration functions defined in
:const:`CONFIGURE`.
"""
CFREE = 0
CPOSITIVE = 1
CQUOTED = 2
CFIXED = 3
CFACTOR = 4
CDELTA = 5
CSUM = 6
CIGNORED = 7
class FitTheories(object):
"""Class wrapping functions from :class:`silx.math.fit.functions`
and providing estimate functions for all of these fit functions."""
def __init__(self, config=None):
if config is None:
self.config = DEFAULT_CONFIG
else:
self.config = config
def ahypermet(self, x, *pars):
"""
Wrapping of :func:`silx.math.fit.functions.sum_ahypermet` without
the tail flags in the function signature.
Depending on the value of `self.config['HypermetTails']`, one can
activate or deactivate the various terms of the hypermet function.
`self.config['HypermetTails']` must be an integer between 0 and 15.
It is a set of 4 binary flags, one for activating each one of the
hypermet terms: *gaussian function, short tail, long tail, step*.
For example, 15 can be expressed as ``1111`` in base 2, so a flag of
15 means all terms are active.
"""
g_term = self.config['HypermetTails'] & 1
st_term = (self.config['HypermetTails'] >> 1) & 1
lt_term = (self.config['HypermetTails'] >> 2) & 1
step_term = (self.config['HypermetTails'] >> 3) & 1
return functions.sum_ahypermet(x, *pars,
gaussian_term=g_term, st_term=st_term,
lt_term=lt_term, step_term=step_term)
def poly(self, x, *pars):
"""Order n polynomial.
The order of the polynomial is defined by the number of
coefficients (``*pars``).
"""
p = numpy.poly1d(pars)
return p(x)
@staticmethod
def estimate_poly(x, y, n=2):
"""Estimate polynomial coefficients for a degree n polynomial.
"""
pcoeffs = numpy.polyfit(x, y, n)
constraints = numpy.zeros((n + 1, 3), numpy.float)
return pcoeffs, constraints
def estimate_quadratic(self, x, y):
"""Estimate quadratic coefficients
"""
return self.estimate_poly(x, y, n=2)
def estimate_cubic(self, x, y):
"""Estimate coefficients for a degree 3 polynomial
"""
return self.estimate_poly(x, y, n=3)
def estimate_quartic(self, x, y):
"""Estimate coefficients for a degree 4 polynomial
"""
return self.estimate_poly(x, y, n=4)
def estimate_quintic(self, x, y):
"""Estimate coefficients for a degree 5 polynomial
"""
return self.estimate_poly(x, y, n=5)
def strip_bg(self, y):
"""Return the strip background of y, using parameters from
:attr:`config` dictionary (*StripBackgroundFlag, StripWidth,
StripIterations, StripThresholdFactor*)"""
remove_strip_bg = self.config.get('StripBackgroundFlag', False)
if remove_strip_bg:
if self.config['SmoothingFlag']:
y = savitsky_golay(y, self.config['SmoothingWidth'])
strip_width = self.config['StripWidth']
strip_niterations = self.config['StripIterations']
strip_thr_factor = self.config['StripThresholdFactor']
return strip(y, w=strip_width,
niterations=strip_niterations,
factor=strip_thr_factor)
else:
return numpy.zeros_like(y)
def guess_yscaling(self, y):
"""Estimate scaling for y prior to peak search.
A smoothing filter is applied to y to estimate the noise level
(chi-squared)
:param y: Data array
:return: Scaling factor
"""
# ensure y is an array
yy = numpy.array(y, copy=False)
# smooth
convolution_kernel = numpy.ones(shape=(3,)) / 3.
ysmooth = numpy.convolve(y, convolution_kernel, mode="same")
# remove zeros
idx_array = numpy.fabs(y) > 0.0
yy = yy[idx_array]
ysmooth = ysmooth[idx_array]
# compute scaling factor
chisq = numpy.mean((yy - ysmooth)**2 / numpy.fabs(yy))
if chisq > 0:
return 1. / chisq
else:
return 1.0
def peak_search(self, y, fwhm, sensitivity):
"""Search for peaks in y array, after padding the array and
multiplying its value by a scaling factor.
:param y: 1-D data array
:param int fwhm: Typical full width at half maximum for peaks,
in number of points. This parameter is used for to discriminate between
true peaks and background fluctuations.
:param float sensitivity: Sensitivity parameter. This is a threshold factor
for peak detection. Only peaks larger than the standard deviation
of the noise multiplied by this sensitivity parameter are detected.
:return: List of peak indices
"""
# add padding
ysearch = numpy.ones((len(y) + 2 * fwhm,), numpy.float)
ysearch[0:fwhm] = y[0]
ysearch[-1:-fwhm - 1:-1] = y[len(y)-1]
ysearch[fwhm:fwhm + len(y)] = y[:]
scaling = self.guess_yscaling(y) if self.config["AutoScaling"] else self.config["Yscaling"]
if len(ysearch) > 1.5 * fwhm:
peaks = peak_search(scaling * ysearch,
fwhm=fwhm, sensitivity=sensitivity)
return [peak_index - fwhm for peak_index in peaks
if 0 <= peak_index - fwhm < len(y)]
else:
return []
def estimate_height_position_fwhm(self, x, y):
"""Estimation of *Height, Position, FWHM* of peaks, for gaussian-like
curves.
This functions finds how many parameters are needed, based on the
number of peaks detected. Then it estimates the fit parameters
with a few iterations of fitting gaussian functions.
:param x: Array of abscissa values
:param y: Array of ordinate values (``y = f(x)``)
:return: Tuple of estimated fit parameters and fit constraints.
Parameters to be estimated for each peak are:
*Height, Position, FWHM*.
Fit constraints depend on :attr:`config`.
"""
fittedpar = []
bg = self.strip_bg(y)
if self.config['AutoFwhm']:
search_fwhm = guess_fwhm(y)
else:
search_fwhm = int(float(self.config['FwhmPoints']))
search_sens = float(self.config['Sensitivity'])
if search_fwhm < 3:
_logger.warning("Setting peak fwhm to 3 (lower limit)")
search_fwhm = 3
self.config['FwhmPoints'] = 3
if search_sens < 1:
_logger.warning("Setting peak search sensitivity to 1. " +
"(lower limit to filter out noise peaks)")
search_sens = 1
self.config['Sensitivity'] = 1
npoints = len(y)
# Find indices of peaks in data array
peaks = self.peak_search(y,
fwhm=search_fwhm,
sensitivity=search_sens)
if not len(peaks):
forcepeak = int(float(self.config.get('ForcePeakPresence', 0)))
if forcepeak:
delta = y - bg
# get index of global maximum
# (first one if several samples are equal to this value)
peaks = [numpy.nonzero(delta == delta.max())[0][0]]
# Find index of largest peak in peaks array
index_largest_peak = 0
if len(peaks) > 0:
# estimate fwhm as 5 * sampling interval
sig = 5 * abs(x[npoints - 1] - x[0]) / npoints
peakpos = x[int(peaks[0])]
if abs(peakpos) < 1.0e-16:
peakpos = 0.0
param = numpy.array(
[y[int(peaks[0])] - bg[int(peaks[0])], peakpos, sig])
height_largest_peak = param[0]
peak_index = 1
for i in peaks[1:]:
param2 = numpy.array(
[y[int(i)] - bg[int(i)], x[int(i)], sig])
param = numpy.concatenate((param, param2))
if param2[0] > height_largest_peak:
height_largest_peak = param2[0]
index_largest_peak = peak_index
peak_index += 1
# Subtract background
xw = x
yw = y - bg
cons = numpy.zeros((len(param), 3), numpy.float)
# peak height must be positive
cons[0:len(param):3, 0] = CPOSITIVE
# force peaks to stay around their position
cons[1:len(param):3, 0] = CQUOTED
# set possible peak range to estimated peak +- guessed fwhm
if len(xw) > search_fwhm:
fwhmx = numpy.fabs(xw[int(search_fwhm)] - xw[0])
cons[1:len(param):3, 1] = param[1:len(param):3] - 0.5 * fwhmx
cons[1:len(param):3, 2] = param[1:len(param):3] + 0.5 * fwhmx
else:
shape = [max(1, int(x)) for x in (param[1:len(param):3])]
cons[1:len(param):3, 1] = min(xw) * numpy.ones(
shape,
numpy.float)
cons[1:len(param):3, 2] = max(xw) * numpy.ones(
shape,
numpy.float)
# ensure fwhm is positive
cons[2:len(param):3, 0] = CPOSITIVE
# run a quick iterative fit (4 iterations) to improve
# estimations
fittedpar, _, _ = leastsq(functions.sum_gauss, xw, yw, param,
max_iter=4, constraints=cons.tolist(),
full_output=True)
# set final constraints based on config parameters
cons = numpy.zeros((len(fittedpar), 3), numpy.float)
peak_index = 0
for i in range(len(peaks)):
# Setup height area constrains
if not self.config['NoConstraintsFlag']:
if self.config['PositiveHeightAreaFlag']:
cons[peak_index, 0] = CPOSITIVE
cons[peak_index, 1] = 0
cons[peak_index, 2] = 0
peak_index += 1
# Setup position constrains
if not self.config['NoConstraintsFlag']:
if self.config['QuotedPositionFlag']:
cons[peak_index, 0] = CQUOTED
cons[peak_index, 1] = min(x)
cons[peak_index, 2] = max(x)
peak_index += 1
# Setup positive FWHM constrains
if not self.config['NoConstraintsFlag']:
if self.config['PositiveFwhmFlag']:
cons[peak_index, 0] = CPOSITIVE
cons[peak_index, 1] = 0
cons[peak_index, 2] = 0
if self.config['SameFwhmFlag']:
if i != index_largest_peak:
cons[peak_index, 0] = CFACTOR
cons[peak_index, 1] = 3 * index_largest_peak + 2
cons[peak_index, 2] = 1.0
peak_index += 1
return fittedpar, cons
def estimate_agauss(self, x, y):
"""Estimation of *Area, Position, FWHM* of peaks, for gaussian-like
curves.
This functions uses :meth:`estimate_height_position_fwhm`, then
converts the height parameters to area under the curve with the
formula ``area = sqrt(2*pi) * height * fwhm / (2 * sqrt(2 * log(2))``
:param x: Array of abscissa values
:param y: Array of ordinate values (``y = f(x)``)
:return: Tuple of estimated fit parameters and fit constraints.
Parameters to be estimated for each peak are:
*Area, Position, FWHM*.
Fit constraints depend on :attr:`config`.
"""
fittedpar, cons = self.estimate_height_position_fwhm(x, y)
# get the number of found peaks
npeaks = len(fittedpar) // 3
for i in range(npeaks):
height = fittedpar[3 * i]
fwhm = fittedpar[3 * i + 2]
# Replace height with area in fittedpar
fittedpar[3 * i] = numpy.sqrt(2 * numpy.pi) * height * fwhm / (
2.0 * numpy.sqrt(2 * numpy.log(2)))
return fittedpar, cons
def estimate_alorentz(self, x, y):
"""Estimation of *Area, Position, FWHM* of peaks, for Lorentzian
curves.
This functions uses :meth:`estimate_height_position_fwhm`, then
converts the height parameters to area under the curve with the
formula ``area = height * fwhm * 0.5 * pi``
:param x: Array of abscissa values
:param y: Array of ordinate values (``y = f(x)``)
:return: Tuple of estimated fit parameters and fit constraints.
Parameters to be estimated for each peak are:
*Area, Position, FWHM*.
Fit constraints depend on :attr:`config`.
"""
fittedpar, cons = self.estimate_height_position_fwhm(x, y)
# get the number of found peaks
npeaks = len(fittedpar) // 3
for i in range(npeaks):
height = fittedpar[3 * i]
fwhm = fittedpar[3 * i + 2]
# Replace height with area in fittedpar
fittedpar[3 * i] = (height * fwhm * 0.5 * numpy.pi)
return fittedpar, cons
def estimate_splitgauss(self, x, y):
"""Estimation of *Height, Position, FWHM1, FWHM2* of peaks, for
asymmetric gaussian-like curves.
This functions uses :meth:`estimate_height_position_fwhm`, then
adds a second (identical) estimation of FWHM to the fit parameters
for each peak, and the corresponding constraint.
:param x: Array of abscissa values
:param y: Array of ordinate values (``y = f(x)``)
:return: Tuple of estimated fit parameters and fit constraints.
Parameters to be estimated for each peak are:
*Height, Position, FWHM1, FWHM2*.
Fit constraints depend on :attr:`config`.
"""
fittedpar, cons = self.estimate_height_position_fwhm(x, y)
# get the number of found peaks
npeaks = len(fittedpar) // 3
estimated_parameters = []
estimated_constraints = numpy.zeros((4 * npeaks, 3), numpy.float)
for i in range(npeaks):
for j in range(3):
estimated_parameters.append(fittedpar[3 * i + j])
# fwhm2 estimate = fwhm1
estimated_parameters.append(fittedpar[3 * i + 2])
# height
estimated_constraints[4 * i, 0] = cons[3 * i, 0]
estimated_constraints[4 * i, 1] = cons[3 * i, 1]
estimated_constraints[4 * i, 2] = cons[3 * i, 2]
# position
estimated_constraints[4 * i + 1, 0] = cons[3 * i + 1, 0]
estimated_constraints[4 * i + 1, 1] = cons[3 * i + 1, 1]
estimated_constraints[4 * i + 1, 2] = cons[3 * i + 1, 2]
# fwhm1
estimated_constraints[4 * i + 2, 0] = cons[3 * i + 2, 0]
estimated_constraints[4 * i + 2, 1] = cons[3 * i + 2, 1]
estimated_constraints[4 * i + 2, 2] = cons[3 * i + 2, 2]
# fwhm2
estimated_constraints[4 * i + 3, 0] = cons[3 * i + 2, 0]
estimated_constraints[4 * i + 3, 1] = cons[3 * i + 2, 1]
estimated_constraints[4 * i + 3, 2] = cons[3 * i + 2, 2]
if cons[3 * i + 2, 0] == CFACTOR:
# convert indices of related parameters
# (this happens if SameFwhmFlag == True)
estimated_constraints[4 * i + 2, 1] = \
int(cons[3 * i + 2, 1] / 3) * 4 + 2
estimated_constraints[4 * i + 3, 1] = \
int(cons[3 * i + 2, 1] / 3) * 4 + 3
return estimated_parameters, estimated_constraints
def estimate_pvoigt(self, x, y):
"""Estimation of *Height, Position, FWHM, eta* of peaks, for
pseudo-Voigt curves.
Pseudo-Voigt are a sum of a gaussian curve *G(x)* and a lorentzian
curve *L(x)* with the same height, center, fwhm parameters:
``y(x) = eta * G(x) + (1-eta) * L(x)``
This functions uses :meth:`estimate_height_position_fwhm`, then
adds a constant estimation of *eta* (0.5) to the fit parameters
for each peak, and the corresponding constraint.
:param x: Array of abscissa values
:param y: Array of ordinate values (``y = f(x)``)
:return: Tuple of estimated fit parameters and fit constraints.
Parameters to be estimated for each peak are:
*Height, Position, FWHM, eta*.
Constraint for the eta parameter can be set to QUOTED (0.--1.)
by setting :attr:`config`['QuotedEtaFlag'] to ``True``.
If this is not the case, the constraint code is set to FREE.
"""
fittedpar, cons = self.estimate_height_position_fwhm(x, y)
npeaks = len(fittedpar) // 3
newpar = []
newcons = numpy.zeros((4 * npeaks, 3), numpy.float)
# find out related parameters proper index
if not self.config['NoConstraintsFlag']:
if self.config['SameFwhmFlag']:
j = 0
# get the index of the free FWHM
for i in range(npeaks):
if cons[3 * i + 2, 0] != 4:
j = i
for i in range(npeaks):
if i != j:
cons[3 * i + 2, 1] = 4 * j + 2
for i in range(npeaks):
newpar.append(fittedpar[3 * i])
newpar.append(fittedpar[3 * i + 1])
newpar.append(fittedpar[3 * i + 2])
newpar.append(0.5)
# height
newcons[4 * i, 0] = cons[3 * i, 0]
newcons[4 * i, 1] = cons[3 * i, 1]
newcons[4 * i, 2] = cons[3 * i, 2]
# position
newcons[4 * i + 1, 0] = cons[3 * i + 1, 0]
newcons[4 * i + 1, 1] = cons[3 * i + 1, 1]
newcons[4 * i + 1, 2] = cons[3 * i + 1, 2]
# fwhm
newcons[4 * i + 2, 0] = cons[3 * i + 2, 0]
newcons[4 * i + 2, 1] = cons[3 * i + 2, 1]
newcons[4 * i + 2, 2] = cons[3 * i + 2, 2]
# Eta constrains
newcons[4 * i + 3, 0] = CFREE
newcons[4 * i + 3, 1] = 0
newcons[4 * i + 3, 2] = 0
if self.config['QuotedEtaFlag']:
newcons[4 * i + 3, 0] = CQUOTED
newcons[4 * i + 3, 1] = 0.0
newcons[4 * i + 3, 2] = 1.0
return newpar, newcons
def estimate_splitpvoigt(self, x, y):
"""Estimation of *Height, Position, FWHM1, FWHM2, eta* of peaks, for
asymmetric pseudo-Voigt curves.
This functions uses :meth:`estimate_height_position_fwhm`, then
adds an identical FWHM2 parameter and a constant estimation of
*eta* (0.5) to the fit parameters for each peak, and the corresponding
constraints.
Constraint for the eta parameter can be set to QUOTED (0.--1.)
by setting :attr:`config`['QuotedEtaFlag'] to ``True``.
If this is not the case, the constraint code is set to FREE.
:param x: Array of abscissa values
:param y: Array of ordinate values (``y = f(x)``)
:return: Tuple of estimated fit parameters and fit constraints.
Parameters to be estimated for each peak are:
*Height, Position, FWHM1, FWHM2, eta*.
"""
fittedpar, cons = self.estimate_height_position_fwhm(x, y)
npeaks = len(fittedpar) // 3
newpar = []
newcons = numpy.zeros((5 * npeaks, 3), numpy.float)
# find out related parameters proper index
if not self.config['NoConstraintsFlag']:
if self.config['SameFwhmFlag']:
j = 0
# get the index of the free FWHM
for i in range(npeaks):
if cons[3 * i + 2, 0] != 4:
j = i
for i in range(npeaks):
if i != j:
cons[3 * i + 2, 1] = 4 * j + 2
for i in range(npeaks):
# height
newpar.append(fittedpar[3 * i])
# position
newpar.append(fittedpar[3 * i + 1])
# fwhm1
newpar.append(fittedpar[3 * i + 2])
# fwhm2 estimate equal to fwhm1
newpar.append(fittedpar[3 * i + 2])
# eta
newpar.append(0.5)
# constraint codes
# ----------------
# height
newcons[5 * i, 0] = cons[3 * i, 0]
# position
newcons[5 * i + 1, 0] = cons[3 * i + 1, 0]
# fwhm1
newcons[5 * i + 2, 0] = cons[3 * i + 2, 0]
# fwhm2
newcons[5 * i + 3, 0] = cons[3 * i + 2, 0]
# cons 1
# ------
newcons[5 * i, 1] = cons[3 * i, 1]
newcons[5 * i + 1, 1] = cons[3 * i + 1, 1]
newcons[5 * i + 2, 1] = cons[3 * i + 2, 1]
newcons[5 * i + 3, 1] = cons[3 * i + 2, 1]
# cons 2
# ------
newcons[5 * i, 2] = cons[3 * i, 2]
newcons[5 * i + 1, 2] = cons[3 * i + 1, 2]
newcons[5 * i + 2, 2] = cons[3 * i + 2, 2]
newcons[5 * i + 3, 2] = cons[3 * i + 2, 2]
if cons[3 * i + 2, 0] == CFACTOR:
# fwhm2 connstraint depends on fwhm1
newcons[5 * i + 3, 1] = newcons[5 * i + 2, 1] + 1
# eta constraints
newcons[5 * i + 4, 0] = CFREE
newcons[5 * i + 4, 1] = 0
newcons[5 * i + 4, 2] = 0
if self.config['QuotedEtaFlag']:
newcons[5 * i + 4, 0] = CQUOTED
newcons[5 * i + 4, 1] = 0.0
newcons[5 * i + 4, 2] = 1.0
return newpar, newcons
def estimate_apvoigt(self, x, y):
"""Estimation of *Area, Position, FWHM1, eta* of peaks, for
pseudo-Voigt curves.
This functions uses :meth:`estimate_pvoigt`, then converts the height
parameter to area.
:param x: Array of abscissa values
:param y: Array of ordinate values (``y = f(x)``)
:return: Tuple of estimated fit parameters and fit constraints.
Parameters to be estimated for each peak are:
*Area, Position, FWHM, eta*.
"""
fittedpar, cons = self.estimate_pvoigt(x, y)
npeaks = len(fittedpar) // 4
# Assume 50% of the area is determined by the gaussian and 50% by
# the Lorentzian.
for i in range(npeaks):
height = fittedpar[4 * i]
fwhm = fittedpar[4 * i + 2]
fittedpar[4 * i] = 0.5 * (height * fwhm * 0.5 * numpy.pi) +\
0.5 * (height * fwhm / (2.0 * numpy.sqrt(2 * numpy.log(2)))
) * numpy.sqrt(2 * numpy.pi)
return fittedpar, cons
def estimate_ahypermet(self, x, y):
"""Estimation of *area, position, fwhm, st_area_r, st_slope_r,
lt_area_r, lt_slope_r, step_height_r* of peaks, for hypermet curves.
:param x: Array of abscissa values
:param y: Array of ordinate values (``y = f(x)``)
:return: Tuple of estimated fit parameters and fit constraints.
Parameters to be estimated for each peak are:
*area, position, fwhm, st_area_r, st_slope_r,
lt_area_r, lt_slope_r, step_height_r* .
"""
yscaling = self.config.get('Yscaling', 1.0)
if yscaling == 0:
yscaling = 1.0
fittedpar, cons = self.estimate_height_position_fwhm(x, y)
npeaks = len(fittedpar) // 3
newpar = []
newcons = numpy.zeros((8 * npeaks, 3), numpy.float)
main_peak = 0
# find out related parameters proper index
if not self.config['NoConstraintsFlag']:
if self.config['SameFwhmFlag']:
j = 0
# get the index of the free FWHM
for i in range(npeaks):
if cons[3 * i + 2, 0] != 4:
j = i
for i in range(npeaks):
if i != j:
cons[3 * i + 2, 1] = 8 * j + 2
main_peak = j
for i in range(npeaks):
if fittedpar[3 * i] > fittedpar[3 * main_peak]:
main_peak = i
for i in range(npeaks):
height = fittedpar[3 * i]
position = fittedpar[3 * i + 1]
fwhm = fittedpar[3 * i + 2]
area = (height * fwhm / (2.0 * numpy.sqrt(2 * numpy.log(2)))
) * numpy.sqrt(2 * numpy.pi)
# the gaussian parameters
newpar.append(area)
newpar.append(position)
newpar.append(fwhm)
# print "area, pos , fwhm = ",area,position,fwhm
# Avoid zero derivatives because of not calculating contribution
g_term = 1
st_term = 1
lt_term = 1
step_term = 1
if self.config['HypermetTails'] != 0:
g_term = self.config['HypermetTails'] & 1
st_term = (self.config['HypermetTails'] >> 1) & 1
lt_term = (self.config['HypermetTails'] >> 2) & 1
step_term = (self.config['HypermetTails'] >> 3) & 1
if g_term == 0:
# fix the gaussian parameters
newcons[8 * i, 0] = CFIXED
newcons[8 * i + 1, 0] = CFIXED
newcons[8 * i + 2, 0] = CFIXED
# the short tail parameters
if ((area * yscaling) <
self.config['MinGaussArea4ShortTail']) | \
(st_term == 0):
newpar.append(0.0)
newpar.append(0.0)
newcons[8 * i + 3, 0] = CFIXED
newcons[8 * i + 3, 1] = 0.0
newcons[8 * i + 3, 2] = 0.0
newcons[8 * i + 4, 0] = CFIXED
newcons[8 * i + 4, 1] = 0.0
newcons[8 * i + 4, 2] = 0.0
else:
newpar.append(self.config['InitialShortTailAreaRatio'])
newpar.append(self.config['InitialShortTailSlopeRatio'])
newcons[8 * i + 3, 0] = CQUOTED
newcons[8 * i + 3, 1] = self.config['MinShortTailAreaRatio']
newcons[8 * i + 3, 2] = self.config['MaxShortTailAreaRatio']
newcons[8 * i + 4, 0] = CQUOTED
newcons[8 * i + 4, 1] = self.config['MinShortTailSlopeRatio']
newcons[8 * i + 4, 2] = self.config['MaxShortTailSlopeRatio']
# the long tail parameters
if ((area * yscaling) <
self.config['MinGaussArea4LongTail']) | \
(lt_term == 0):
newpar.append(0.0)
newpar.append(0.0)
newcons[8 * i + 5, 0] = CFIXED
newcons[8 * i + 5, 1] = 0.0
newcons[8 * i + 5, 2] = 0.0
newcons[8 * i + 6, 0] = CFIXED
newcons[8 * i + 6, 1] = 0.0
newcons[8 * i + 6, 2] = 0.0
else:
newpar.append(self.config['InitialLongTailAreaRatio'])
newpar.append(self.config['InitialLongTailSlopeRatio'])
newcons[8 * i + 5, 0] = CQUOTED
newcons[8 * i + 5, 1] = self.config['MinLongTailAreaRatio']
newcons[8 * i + 5, 2] = self.config['MaxLongTailAreaRatio']
newcons[8 * i + 6, 0] = CQUOTED
newcons[8 * i + 6, 1] = self.config['MinLongTailSlopeRatio']
newcons[8 * i + 6, 2] = self.config['MaxLongTailSlopeRatio']
# the step parameters
if ((height * yscaling) <
self.config['MinGaussHeight4StepTail']) | \
(step_term == 0):
newpar.append(0.0)
newcons[8 * i + 7, 0] = CFIXED
newcons[8 * i + 7, 1] = 0.0
newcons[8 * i + 7, 2] = 0.0
else:
newpar.append(self.config['InitialStepTailHeightRatio'])
newcons[8 * i + 7, 0] = CQUOTED
newcons[8 * i + 7, 1] = self.config['MinStepTailHeightRatio']
newcons[8 * i + 7, 2] = self.config['MaxStepTailHeightRatio']
# if self.config['NoConstraintsFlag'] == 1:
# newcons=numpy.zeros((8*npeaks, 3),numpy.float)
if npeaks > 0:
if g_term:
if self.config['PositiveHeightAreaFlag']:
for i in range(npeaks):
newcons[8 * i, 0] = CPOSITIVE
if self.config['PositiveFwhmFlag']:
for i in range(npeaks):
newcons[8 * i + 2, 0] = CPOSITIVE
if self.config['SameFwhmFlag']:
for i in range(npeaks):
if i != main_peak:
newcons[8 * i + 2, 0] = CFACTOR
newcons[8 * i + 2, 1] = 8 * main_peak + 2
newcons[8 * i + 2, 2] = 1.0
if self.config['HypermetQuotedPositionFlag']:
for i in range(npeaks):
delta = self.config['DeltaPositionFwhmUnits'] * fwhm
newcons[8 * i + 1, 0] = CQUOTED
newcons[8 * i + 1, 1] = newpar[8 * i + 1] - delta
newcons[8 * i + 1, 2] = newpar[8 * i + 1] + delta
if self.config['SameSlopeRatioFlag']:
for i in range(npeaks):
if i != main_peak:
newcons[8 * i + 4, 0] = CFACTOR
newcons[8 * i + 4, 1] = 8 * main_peak + 4
newcons[8 * i + 4, 2] = 1.0
newcons[8 * i + 6, 0] = CFACTOR
newcons[8 * i + 6, 1] = 8 * main_peak + 6
newcons[8 * i + 6, 2] = 1.0
if self.config['SameAreaRatioFlag']:
for i in range(npeaks):
if i != main_peak:
newcons[8 * i + 3, 0] = CFACTOR
newcons[8 * i + 3, 1] = 8 * main_peak + 3
newcons[8 * i + 3, 2] = 1.0
newcons[8 * i + 5, 0] = CFACTOR
newcons[8 * i + 5, 1] = 8 * main_peak + 5
newcons[8 * i + 5, 2] = 1.0
return newpar, newcons
def estimate_stepdown(self, x, y):
"""Estimation of parameters for stepdown curves.
The functions estimates gaussian parameters for the derivative of
the data, takes the largest gaussian peak and uses its estimated
parameters to define the center of the step and its fwhm. The
estimated amplitude returned is simply ``max(y) - min(y)``.
:param x: Array of abscissa values
:param y: Array of ordinate values (``y = f(x)``)
:return: Tuple of estimated fit parameters and fit newconstraints.
Parameters to be estimated for each stepdown are:
*height, centroid, fwhm* .
"""
crappyfilter = [-0.25, -0.75, 0.0, 0.75, 0.25]
cutoff = len(crappyfilter) // 2
y_deriv = numpy.convolve(y,
crappyfilter,
mode="valid")
# make the derivative's peak have the same amplitude as the step
if max(y_deriv) > 0:
y_deriv = y_deriv * max(y) / max(y_deriv)
fittedpar, newcons = self.estimate_height_position_fwhm(
x[cutoff:-cutoff], y_deriv)
data_amplitude = max(y) - min(y)
# use parameters from largest gaussian found
if len(fittedpar):
npeaks = len(fittedpar) // 3
largest_index = 0
largest = [data_amplitude,
fittedpar[3 * largest_index + 1],
fittedpar[3 * largest_index + 2]]
for i in range(npeaks):
if fittedpar[3 * i] > largest[0]:
largest_index = i
largest = [data_amplitude,
fittedpar[3 * largest_index + 1],
fittedpar[3 * largest_index + 2]]
else:
# no peak was found
largest = [data_amplitude, # height
x[len(x)//2], # center: middle of x range
self.config["FwhmPoints"] * (x[1] - x[0])] # fwhm: default value
# Setup constrains
newcons = numpy.zeros((3, 3), numpy.float)
if not self.config['NoConstraintsFlag']:
# Setup height constrains
if self.config['PositiveHeightAreaFlag']:
newcons[0, 0] = CPOSITIVE
newcons[0, 1] = 0
newcons[0, 2] = 0
# Setup position constrains
if self.config['QuotedPositionFlag']:
newcons[1, 0] = CQUOTED
newcons[1, 1] = min(x)
newcons[1, 2] = max(x)
# Setup positive FWHM constrains
if self.config['PositiveFwhmFlag']:
newcons[2, 0] = CPOSITIVE
newcons[2, 1] = 0
newcons[2, 2] = 0
return largest, newcons
def estimate_slit(self, x, y):
"""Estimation of parameters for slit curves.
The functions estimates stepup and stepdown parameters for the largest
steps, and uses them for calculating the center (middle between stepup
and stepdown), the height (maximum amplitude in data), the fwhm
(distance between the up- and down-step centers) and the beamfwhm
(average of FWHM for up- and down-step).
:param x: Array of abscissa values
:param y: Array of ordinate values (``y = f(x)``)
:return: Tuple of estimated fit parameters and fit constraints.
Parameters to be estimated for each slit are:
*height, position, fwhm, beamfwhm* .
"""
largestup, cons = self.estimate_stepup(x, y)
largestdown, cons = self.estimate_stepdown(x, y)
fwhm = numpy.fabs(largestdown[1] - largestup[1])
beamfwhm = 0.5 * (largestup[2] + largestdown[1])
beamfwhm = min(beamfwhm, fwhm / 10.0)
beamfwhm = max(beamfwhm, (max(x) - min(x)) * 3.0 / len(x))
y_minus_bg = y - self.strip_bg(y)
height = max(y_minus_bg)
i1 = numpy.nonzero(y_minus_bg >= 0.5 * height)[0]
xx = numpy.take(x, i1)
position = (xx[0] + xx[-1]) / 2.0
fwhm = xx[-1] - xx[0]
largest = [height, position, fwhm, beamfwhm]
cons = numpy.zeros((4, 3), numpy.float)
# Setup constrains
if not self.config['NoConstraintsFlag']:
# Setup height constrains
if self.config['PositiveHeightAreaFlag']:
cons[0, 0] = CPOSITIVE
cons[0, 1] = 0
cons[0, 2] = 0
# Setup position constrains
if self.config['QuotedPositionFlag']:
cons[1, 0] = CQUOTED
cons[1, 1] = min(x)
cons[1, 2] = max(x)
# Setup positive FWHM constrains
if self.config['PositiveFwhmFlag']:
cons[2, 0] = CPOSITIVE
cons[2, 1] = 0
cons[2, 2] = 0
# Setup positive FWHM constrains
if self.config['PositiveFwhmFlag']:
cons[3, 0] = CPOSITIVE
cons[3, 1] = 0
cons[3, 2] = 0
return largest, cons
def estimate_stepup(self, x, y):
"""Estimation of parameters for a single step up curve.
The functions estimates gaussian parameters for the derivative of
the data, takes the largest gaussian peak and uses its estimated
parameters to define the center of the step and its fwhm. The
estimated amplitude returned is simply ``max(y) - min(y)``.
:param x: Array of abscissa values
:param y: Array of ordinate values (``y = f(x)``)
:return: Tuple of estimated fit parameters and fit constraints.
Parameters to be estimated for each stepup are:
*height, centroid, fwhm* .
"""
crappyfilter = [0.25, 0.75, 0.0, -0.75, -0.25]
cutoff = len(crappyfilter) // 2
y_deriv = numpy.convolve(y, crappyfilter, mode="valid")
if max(y_deriv) > 0:
y_deriv = y_deriv * max(y) / max(y_deriv)
fittedpar, cons = self.estimate_height_position_fwhm(
x[cutoff:-cutoff], y_deriv)
# for height, use the data amplitude after removing the background
data_amplitude = max(y) - min(y)
# find params of the largest gaussian found
if len(fittedpar):
npeaks = len(fittedpar) // 3
largest_index = 0
largest = [data_amplitude,
fittedpar[3 * largest_index + 1],
fittedpar[3 * largest_index + 2]]
for i in range(npeaks):
if fittedpar[3 * i] > largest[0]:
largest_index = i
largest = [fittedpar[3 * largest_index],
fittedpar[3 * largest_index + 1],
fittedpar[3 * largest_index + 2]]
else:
# no peak was found
largest = [data_amplitude, # height
x[len(x)//2], # center: middle of x range
self.config["FwhmPoints"] * (x[1] - x[0])] # fwhm: default value
newcons = numpy.zeros((3, 3), numpy.float)
# Setup constrains
if not self.config['NoConstraintsFlag']:
# Setup height constraints
if self.config['PositiveHeightAreaFlag']:
newcons[0, 0] = CPOSITIVE
newcons[0, 1] = 0
newcons[0, 2] = 0
# Setup position constraints
if self.config['QuotedPositionFlag']:
newcons[1, 0] = CQUOTED
newcons[1, 1] = min(x)
newcons[1, 2] = max(x)
# Setup positive FWHM constraints
if self.config['PositiveFwhmFlag']:
newcons[2, 0] = CPOSITIVE
newcons[2, 1] = 0
newcons[2, 2] = 0
return largest, newcons
def estimate_periodic_gauss(self, x, y):
"""Estimation of parameters for periodic gaussian curves:
*number of peaks, distance between peaks, height, position of the
first peak, fwhm*
The functions detects all peaks, then computes the parameters the
following way:
- *distance*: average of distances between detected peaks
- *height*: average height of detected peaks
- *fwhm*: fwhm of the highest peak (in number of samples) if
field ``'AutoFwhm'`` in :attr:`config` is ``True``, else take
the default value (field ``'FwhmPoints'`` in :attr:`config`)
:param x: Array of abscissa values
:param y: Array of ordinate values (``y = f(x)``)
:return: Tuple of estimated fit parameters and fit constraints.
"""
yscaling = self.config.get('Yscaling', 1.0)
if yscaling == 0:
yscaling = 1.0
bg = self.strip_bg(y)
if self.config['AutoFwhm']:
search_fwhm = guess_fwhm(y)
else:
search_fwhm = int(float(self.config['FwhmPoints']))
search_sens = float(self.config['Sensitivity'])
if search_fwhm < 3:
search_fwhm = 3
if search_sens < 1:
search_sens = 1
if len(y) > 1.5 * search_fwhm:
peaks = peak_search(yscaling * y, fwhm=search_fwhm,
sensitivity=search_sens)
else:
peaks = []
npeaks = len(peaks)
if not npeaks:
fittedpar = []
cons = numpy.zeros((len(fittedpar), 3), numpy.float)
return fittedpar, cons
fittedpar = [0.0, 0.0, 0.0, 0.0, 0.0]
# The number of peaks
fittedpar[0] = npeaks
# The separation between peaks in x units
delta = 0.0
height = 0.0
for i in range(npeaks):
height += y[int(peaks[i])] - bg[int(peaks[i])]
if i != npeaks - 1:
delta += (x[int(peaks[i + 1])] - x[int(peaks[i])])
# delta between peaks
if npeaks > 1:
fittedpar[1] = delta / (npeaks - 1)
# starting height
fittedpar[2] = height / npeaks
# position of the first peak
fittedpar[3] = x[int(peaks[0])]
# Estimate the fwhm
fittedpar[4] = search_fwhm
# setup constraints
cons = numpy.zeros((5, 3), numpy.float)
cons[0, 0] = CFIXED # the number of gaussians
if npeaks == 1:
cons[1, 0] = CFIXED # the delta between peaks
else:
cons[1, 0] = CFREE
j = 2
# Setup height area constrains
if not self.config['NoConstraintsFlag']:
if self.config['PositiveHeightAreaFlag']:
# POSITIVE = 1
cons[j, 0] = CPOSITIVE
cons[j, 1] = 0
cons[j, 2] = 0
j += 1
# Setup position constrains
if not self.config['NoConstraintsFlag']:
if self.config['QuotedPositionFlag']:
# QUOTED = 2
cons[j, 0] = CQUOTED
cons[j, 1] = min(x)
cons[j, 2] = max(x)
j += 1
# Setup positive FWHM constrains
if not self.config['NoConstraintsFlag']:
if self.config['PositiveFwhmFlag']:
# POSITIVE=1
cons[j, 0] = CPOSITIVE
cons[j, 1] = 0
cons[j, 2] = 0
j += 1
return fittedpar, cons
def configure(self, **kw):
"""Add new / unknown keyword arguments to :attr:`config`,
update entries in :attr:`config` if the parameter name is a existing
key.
:param kw: Dictionary of keyword arguments.
:return: Configuration dictionary :attr:`config`
"""
if not kw.keys():
return self.config
for key in kw.keys():
notdone = 1
# take care of lower / upper case problems ...
for config_key in self.config.keys():
if config_key.lower() == key.lower():
self.config[config_key] = kw[key]
notdone = 0
if notdone:
self.config[key] = kw[key]
return self.config
fitfuns = FitTheories()
THEORY = OrderedDict((
('Gaussians',
FitTheory(description='Gaussian functions',
function=functions.sum_gauss,
parameters=('Height', 'Position', 'FWHM'),
estimate=fitfuns.estimate_height_position_fwhm,
configure=fitfuns.configure)),
('Lorentz',
FitTheory(description='Lorentzian functions',
function=functions.sum_lorentz,
parameters=('Height', 'Position', 'FWHM'),
estimate=fitfuns.estimate_height_position_fwhm,
configure=fitfuns.configure)),
('Area Gaussians',
FitTheory(description='Gaussian functions (area)',
function=functions.sum_agauss,
parameters=('Area', 'Position', 'FWHM'),
estimate=fitfuns.estimate_agauss,
configure=fitfuns.configure)),
('Area Lorentz',
FitTheory(description='Lorentzian functions (area)',
function=functions.sum_alorentz,
parameters=('Area', 'Position', 'FWHM'),
estimate=fitfuns.estimate_alorentz,
configure=fitfuns.configure)),
('Pseudo-Voigt Line',
FitTheory(description='Pseudo-Voigt functions',
function=functions.sum_pvoigt,
parameters=('Height', 'Position', 'FWHM', 'Eta'),
estimate=fitfuns.estimate_pvoigt,
configure=fitfuns.configure)),
('Area Pseudo-Voigt',
FitTheory(description='Pseudo-Voigt functions (area)',
function=functions.sum_apvoigt,
parameters=('Area', 'Position', 'FWHM', 'Eta'),
estimate=fitfuns.estimate_apvoigt,
configure=fitfuns.configure)),
('Split Gaussian',
FitTheory(description='Asymmetric gaussian functions',
function=functions.sum_splitgauss,
parameters=('Height', 'Position', 'LowFWHM',
'HighFWHM'),
estimate=fitfuns.estimate_splitgauss,
configure=fitfuns.configure)),
('Split Lorentz',
FitTheory(description='Asymmetric lorentzian functions',
function=functions.sum_splitlorentz,
parameters=('Height', 'Position', 'LowFWHM', 'HighFWHM'),
estimate=fitfuns.estimate_splitgauss,
configure=fitfuns.configure)),
('Split Pseudo-Voigt',
FitTheory(description='Asymmetric pseudo-Voigt functions',
function=functions.sum_splitpvoigt,
parameters=('Height', 'Position', 'LowFWHM',
'HighFWHM', 'Eta'),
estimate=fitfuns.estimate_splitpvoigt,
configure=fitfuns.configure)),
('Step Down',
FitTheory(description='Step down function',
function=functions.sum_stepdown,
parameters=('Height', 'Position', 'FWHM'),
estimate=fitfuns.estimate_stepdown,
configure=fitfuns.configure)),
('Step Up',
FitTheory(description='Step up function',
function=functions.sum_stepup,
parameters=('Height', 'Position', 'FWHM'),
estimate=fitfuns.estimate_stepup,
configure=fitfuns.configure)),
('Slit',
FitTheory(description='Slit function',
function=functions.sum_slit,
parameters=('Height', 'Position', 'FWHM', 'BeamFWHM'),
estimate=fitfuns.estimate_slit,
configure=fitfuns.configure)),
('Atan',
FitTheory(description='Arctan step up function',
function=functions.atan_stepup,
parameters=('Height', 'Position', 'Width'),
estimate=fitfuns.estimate_stepup,
configure=fitfuns.configure)),
('Hypermet',
FitTheory(description='Hypermet functions',
function=fitfuns.ahypermet, # customized version of functions.sum_ahypermet
parameters=('G_Area', 'Position', 'FWHM', 'ST_Area',
'ST_Slope', 'LT_Area', 'LT_Slope', 'Step_H'),
estimate=fitfuns.estimate_ahypermet,
configure=fitfuns.configure)),
# ('Periodic Gaussians',
# FitTheory(description='Periodic gaussian functions',
# function=functions.periodic_gauss,
# parameters=('N', 'Delta', 'Height', 'Position', 'FWHM'),
# estimate=fitfuns.estimate_periodic_gauss,
# configure=fitfuns.configure))
('Degree 2 Polynomial',
FitTheory(description='Degree 2 polynomial'
'\ny = a*x^2 + b*x +c',
function=fitfuns.poly,
parameters=['a', 'b', 'c'],
estimate=fitfuns.estimate_quadratic)),
('Degree 3 Polynomial',
FitTheory(description='Degree 3 polynomial'
'\ny = a*x^3 + b*x^2 + c*x + d',
function=fitfuns.poly,
parameters=['a', 'b', 'c', 'd'],
estimate=fitfuns.estimate_cubic)),
('Degree 4 Polynomial',
FitTheory(description='Degree 4 polynomial'
'\ny = a*x^4 + b*x^3 + c*x^2 + d*x + e',
function=fitfuns.poly,
parameters=['a', 'b', 'c', 'd', 'e'],
estimate=fitfuns.estimate_quartic)),
('Degree 5 Polynomial',
FitTheory(description='Degree 5 polynomial'
'\ny = a*x^5 + b*x^4 + c*x^3 + d*x^2 + e*x + f',
function=fitfuns.poly,
parameters=['a', 'b', 'c', 'd', 'e', 'f'],
estimate=fitfuns.estimate_quintic)),
))
"""Dictionary of fit theories: fit functions and their associated estimation
function, parameters list, configuration function and description.
"""
def test(a):
from silx.math.fit import fitmanager
x = numpy.arange(1000).astype(numpy.float)
p = [1500, 100., 50.0,
1500, 700., 50.0]
y_synthetic = functions.sum_gauss(x, *p) + 1
fit = fitmanager.FitManager(x, y_synthetic)
fit.addtheory('Gaussians', functions.sum_gauss, ['Height', 'Position', 'FWHM'],
a.estimate_height_position_fwhm)
fit.settheory('Gaussians')
fit.setbackground('Linear')
fit.estimate()
fit.runfit()
y_fit = fit.gendata()
print("Fit parameter names: %s" % str(fit.get_names()))
print("Theoretical parameters: %s" % str(numpy.append([1, 0], p)))
print("Fitted parameters: %s" % str(fit.get_fitted_parameters()))
try:
from silx.gui import qt
from silx.gui.plot import plot1D
app = qt.QApplication([])
# Offset of 1 to see the difference in log scale
plot1D(x, (y_synthetic + 1, y_fit), "Input data + 1, Fit")
app.exec_()
except ImportError:
_logger.warning("Unable to load qt binding, can't plot results.")
if __name__ == "__main__":
test(fitfuns)
| 41.025455 | 99 | 0.537192 |
y'] = 1
npoints = len(y)
peaks = self.peak_search(y,
fwhm=search_fwhm,
sensitivity=search_sens)
if not len(peaks):
forcepeak = int(float(self.config.get('ForcePeakPresence', 0)))
if forcepeak:
delta = y - bg
peaks = [numpy.nonzero(delta == delta.max())[0][0]]
index_largest_peak = 0
if len(peaks) > 0:
sig = 5 * abs(x[npoints - 1] - x[0]) / npoints
peakpos = x[int(peaks[0])]
if abs(peakpos) < 1.0e-16:
peakpos = 0.0
param = numpy.array(
[y[int(peaks[0])] - bg[int(peaks[0])], peakpos, sig])
height_largest_peak = param[0]
peak_index = 1
for i in peaks[1:]:
param2 = numpy.array(
[y[int(i)] - bg[int(i)], x[int(i)], sig])
param = numpy.concatenate((param, param2))
if param2[0] > height_largest_peak:
height_largest_peak = param2[0]
index_largest_peak = peak_index
peak_index += 1
xw = x
yw = y - bg
cons = numpy.zeros((len(param), 3), numpy.float)
cons[0:len(param):3, 0] = CPOSITIVE
cons[1:len(param):3, 0] = CQUOTED
if len(xw) > search_fwhm:
fwhmx = numpy.fabs(xw[int(search_fwhm)] - xw[0])
cons[1:len(param):3, 1] = param[1:len(param):3] - 0.5 * fwhmx
cons[1:len(param):3, 2] = param[1:len(param):3] + 0.5 * fwhmx
else:
shape = [max(1, int(x)) for x in (param[1:len(param):3])]
cons[1:len(param):3, 1] = min(xw) * numpy.ones(
shape,
numpy.float)
cons[1:len(param):3, 2] = max(xw) * numpy.ones(
shape,
numpy.float)
cons[2:len(param):3, 0] = CPOSITIVE
fittedpar, _, _ = leastsq(functions.sum_gauss, xw, yw, param,
max_iter=4, constraints=cons.tolist(),
full_output=True)
cons = numpy.zeros((len(fittedpar), 3), numpy.float)
peak_index = 0
for i in range(len(peaks)):
if not self.config['NoConstraintsFlag']:
if self.config['PositiveHeightAreaFlag']:
cons[peak_index, 0] = CPOSITIVE
cons[peak_index, 1] = 0
cons[peak_index, 2] = 0
peak_index += 1
if not self.config['NoConstraintsFlag']:
if self.config['QuotedPositionFlag']:
cons[peak_index, 0] = CQUOTED
cons[peak_index, 1] = min(x)
cons[peak_index, 2] = max(x)
peak_index += 1
if not self.config['NoConstraintsFlag']:
if self.config['PositiveFwhmFlag']:
cons[peak_index, 0] = CPOSITIVE
cons[peak_index, 1] = 0
cons[peak_index, 2] = 0
if self.config['SameFwhmFlag']:
if i != index_largest_peak:
cons[peak_index, 0] = CFACTOR
cons[peak_index, 1] = 3 * index_largest_peak + 2
cons[peak_index, 2] = 1.0
peak_index += 1
return fittedpar, cons
def estimate_agauss(self, x, y):
fittedpar, cons = self.estimate_height_position_fwhm(x, y)
npeaks = len(fittedpar) // 3
for i in range(npeaks):
height = fittedpar[3 * i]
fwhm = fittedpar[3 * i + 2]
fittedpar[3 * i] = numpy.sqrt(2 * numpy.pi) * height * fwhm / (
2.0 * numpy.sqrt(2 * numpy.log(2)))
return fittedpar, cons
def estimate_alorentz(self, x, y):
fittedpar, cons = self.estimate_height_position_fwhm(x, y)
npeaks = len(fittedpar) // 3
for i in range(npeaks):
height = fittedpar[3 * i]
fwhm = fittedpar[3 * i + 2]
fittedpar[3 * i] = (height * fwhm * 0.5 * numpy.pi)
return fittedpar, cons
def estimate_splitgauss(self, x, y):
fittedpar, cons = self.estimate_height_position_fwhm(x, y)
npeaks = len(fittedpar) // 3
estimated_parameters = []
estimated_constraints = numpy.zeros((4 * npeaks, 3), numpy.float)
for i in range(npeaks):
for j in range(3):
estimated_parameters.append(fittedpar[3 * i + j])
estimated_parameters.append(fittedpar[3 * i + 2])
estimated_constraints[4 * i, 0] = cons[3 * i, 0]
estimated_constraints[4 * i, 1] = cons[3 * i, 1]
estimated_constraints[4 * i, 2] = cons[3 * i, 2]
estimated_constraints[4 * i + 1, 0] = cons[3 * i + 1, 0]
estimated_constraints[4 * i + 1, 1] = cons[3 * i + 1, 1]
estimated_constraints[4 * i + 1, 2] = cons[3 * i + 1, 2]
estimated_constraints[4 * i + 2, 0] = cons[3 * i + 2, 0]
estimated_constraints[4 * i + 2, 1] = cons[3 * i + 2, 1]
estimated_constraints[4 * i + 2, 2] = cons[3 * i + 2, 2]
estimated_constraints[4 * i + 3, 0] = cons[3 * i + 2, 0]
estimated_constraints[4 * i + 3, 1] = cons[3 * i + 2, 1]
estimated_constraints[4 * i + 3, 2] = cons[3 * i + 2, 2]
if cons[3 * i + 2, 0] == CFACTOR:
estimated_constraints[4 * i + 2, 1] = \
int(cons[3 * i + 2, 1] / 3) * 4 + 2
estimated_constraints[4 * i + 3, 1] = \
int(cons[3 * i + 2, 1] / 3) * 4 + 3
return estimated_parameters, estimated_constraints
def estimate_pvoigt(self, x, y):
fittedpar, cons = self.estimate_height_position_fwhm(x, y)
npeaks = len(fittedpar) // 3
newpar = []
newcons = numpy.zeros((4 * npeaks, 3), numpy.float)
if not self.config['NoConstraintsFlag']:
if self.config['SameFwhmFlag']:
j = 0
for i in range(npeaks):
if cons[3 * i + 2, 0] != 4:
j = i
for i in range(npeaks):
if i != j:
cons[3 * i + 2, 1] = 4 * j + 2
for i in range(npeaks):
newpar.append(fittedpar[3 * i])
newpar.append(fittedpar[3 * i + 1])
newpar.append(fittedpar[3 * i + 2])
newpar.append(0.5)
newcons[4 * i, 0] = cons[3 * i, 0]
newcons[4 * i, 1] = cons[3 * i, 1]
newcons[4 * i, 2] = cons[3 * i, 2]
newcons[4 * i + 1, 0] = cons[3 * i + 1, 0]
newcons[4 * i + 1, 1] = cons[3 * i + 1, 1]
newcons[4 * i + 1, 2] = cons[3 * i + 1, 2]
newcons[4 * i + 2, 0] = cons[3 * i + 2, 0]
newcons[4 * i + 2, 1] = cons[3 * i + 2, 1]
newcons[4 * i + 2, 2] = cons[3 * i + 2, 2]
newcons[4 * i + 3, 0] = CFREE
newcons[4 * i + 3, 1] = 0
newcons[4 * i + 3, 2] = 0
if self.config['QuotedEtaFlag']:
newcons[4 * i + 3, 0] = CQUOTED
newcons[4 * i + 3, 1] = 0.0
newcons[4 * i + 3, 2] = 1.0
return newpar, newcons
def estimate_splitpvoigt(self, x, y):
fittedpar, cons = self.estimate_height_position_fwhm(x, y)
npeaks = len(fittedpar) // 3
newpar = []
newcons = numpy.zeros((5 * npeaks, 3), numpy.float)
if not self.config['NoConstraintsFlag']:
if self.config['SameFwhmFlag']:
j = 0
for i in range(npeaks):
if cons[3 * i + 2, 0] != 4:
j = i
for i in range(npeaks):
if i != j:
cons[3 * i + 2, 1] = 4 * j + 2
for i in range(npeaks):
newpar.append(fittedpar[3 * i])
newpar.append(fittedpar[3 * i + 1])
newpar.append(fittedpar[3 * i + 2])
newpar.append(fittedpar[3 * i + 2])
newpar.append(0.5)
newcons[5 * i, 0] = cons[3 * i, 0]
newcons[5 * i + 1, 0] = cons[3 * i + 1, 0]
newcons[5 * i + 2, 0] = cons[3 * i + 2, 0]
newcons[5 * i + 3, 0] = cons[3 * i + 2, 0]
newcons[5 * i, 1] = cons[3 * i, 1]
newcons[5 * i + 1, 1] = cons[3 * i + 1, 1]
newcons[5 * i + 2, 1] = cons[3 * i + 2, 1]
newcons[5 * i + 3, 1] = cons[3 * i + 2, 1]
newcons[5 * i, 2] = cons[3 * i, 2]
newcons[5 * i + 1, 2] = cons[3 * i + 1, 2]
newcons[5 * i + 2, 2] = cons[3 * i + 2, 2]
newcons[5 * i + 3, 2] = cons[3 * i + 2, 2]
if cons[3 * i + 2, 0] == CFACTOR:
newcons[5 * i + 3, 1] = newcons[5 * i + 2, 1] + 1
newcons[5 * i + 4, 0] = CFREE
newcons[5 * i + 4, 1] = 0
newcons[5 * i + 4, 2] = 0
if self.config['QuotedEtaFlag']:
newcons[5 * i + 4, 0] = CQUOTED
newcons[5 * i + 4, 1] = 0.0
newcons[5 * i + 4, 2] = 1.0
return newpar, newcons
def estimate_apvoigt(self, x, y):
fittedpar, cons = self.estimate_pvoigt(x, y)
npeaks = len(fittedpar) // 4
for i in range(npeaks):
height = fittedpar[4 * i]
fwhm = fittedpar[4 * i + 2]
fittedpar[4 * i] = 0.5 * (height * fwhm * 0.5 * numpy.pi) +\
0.5 * (height * fwhm / (2.0 * numpy.sqrt(2 * numpy.log(2)))
) * numpy.sqrt(2 * numpy.pi)
return fittedpar, cons
def estimate_ahypermet(self, x, y):
yscaling = self.config.get('Yscaling', 1.0)
if yscaling == 0:
yscaling = 1.0
fittedpar, cons = self.estimate_height_position_fwhm(x, y)
npeaks = len(fittedpar) // 3
newpar = []
newcons = numpy.zeros((8 * npeaks, 3), numpy.float)
main_peak = 0
if not self.config['NoConstraintsFlag']:
if self.config['SameFwhmFlag']:
j = 0
for i in range(npeaks):
if cons[3 * i + 2, 0] != 4:
j = i
for i in range(npeaks):
if i != j:
cons[3 * i + 2, 1] = 8 * j + 2
main_peak = j
for i in range(npeaks):
if fittedpar[3 * i] > fittedpar[3 * main_peak]:
main_peak = i
for i in range(npeaks):
height = fittedpar[3 * i]
position = fittedpar[3 * i + 1]
fwhm = fittedpar[3 * i + 2]
area = (height * fwhm / (2.0 * numpy.sqrt(2 * numpy.log(2)))
) * numpy.sqrt(2 * numpy.pi)
newpar.append(area)
newpar.append(position)
newpar.append(fwhm)
g_term = 1
st_term = 1
lt_term = 1
step_term = 1
if self.config['HypermetTails'] != 0:
g_term = self.config['HypermetTails'] & 1
st_term = (self.config['HypermetTails'] >> 1) & 1
lt_term = (self.config['HypermetTails'] >> 2) & 1
step_term = (self.config['HypermetTails'] >> 3) & 1
if g_term == 0:
newcons[8 * i, 0] = CFIXED
newcons[8 * i + 1, 0] = CFIXED
newcons[8 * i + 2, 0] = CFIXED
if ((area * yscaling) <
self.config['MinGaussArea4ShortTail']) | \
(st_term == 0):
newpar.append(0.0)
newpar.append(0.0)
newcons[8 * i + 3, 0] = CFIXED
newcons[8 * i + 3, 1] = 0.0
newcons[8 * i + 3, 2] = 0.0
newcons[8 * i + 4, 0] = CFIXED
newcons[8 * i + 4, 1] = 0.0
newcons[8 * i + 4, 2] = 0.0
else:
newpar.append(self.config['InitialShortTailAreaRatio'])
newpar.append(self.config['InitialShortTailSlopeRatio'])
newcons[8 * i + 3, 0] = CQUOTED
newcons[8 * i + 3, 1] = self.config['MinShortTailAreaRatio']
newcons[8 * i + 3, 2] = self.config['MaxShortTailAreaRatio']
newcons[8 * i + 4, 0] = CQUOTED
newcons[8 * i + 4, 1] = self.config['MinShortTailSlopeRatio']
newcons[8 * i + 4, 2] = self.config['MaxShortTailSlopeRatio']
if ((area * yscaling) <
self.config['MinGaussArea4LongTail']) | \
(lt_term == 0):
newpar.append(0.0)
newpar.append(0.0)
newcons[8 * i + 5, 0] = CFIXED
newcons[8 * i + 5, 1] = 0.0
newcons[8 * i + 5, 2] = 0.0
newcons[8 * i + 6, 0] = CFIXED
newcons[8 * i + 6, 1] = 0.0
newcons[8 * i + 6, 2] = 0.0
else:
newpar.append(self.config['InitialLongTailAreaRatio'])
newpar.append(self.config['InitialLongTailSlopeRatio'])
newcons[8 * i + 5, 0] = CQUOTED
newcons[8 * i + 5, 1] = self.config['MinLongTailAreaRatio']
newcons[8 * i + 5, 2] = self.config['MaxLongTailAreaRatio']
newcons[8 * i + 6, 0] = CQUOTED
newcons[8 * i + 6, 1] = self.config['MinLongTailSlopeRatio']
newcons[8 * i + 6, 2] = self.config['MaxLongTailSlopeRatio']
if ((height * yscaling) <
self.config['MinGaussHeight4StepTail']) | \
(step_term == 0):
newpar.append(0.0)
newcons[8 * i + 7, 0] = CFIXED
newcons[8 * i + 7, 1] = 0.0
newcons[8 * i + 7, 2] = 0.0
else:
newpar.append(self.config['InitialStepTailHeightRatio'])
newcons[8 * i + 7, 0] = CQUOTED
newcons[8 * i + 7, 1] = self.config['MinStepTailHeightRatio']
newcons[8 * i + 7, 2] = self.config['MaxStepTailHeightRatio']
if npeaks > 0:
if g_term:
if self.config['PositiveHeightAreaFlag']:
for i in range(npeaks):
newcons[8 * i, 0] = CPOSITIVE
if self.config['PositiveFwhmFlag']:
for i in range(npeaks):
newcons[8 * i + 2, 0] = CPOSITIVE
if self.config['SameFwhmFlag']:
for i in range(npeaks):
if i != main_peak:
newcons[8 * i + 2, 0] = CFACTOR
newcons[8 * i + 2, 1] = 8 * main_peak + 2
newcons[8 * i + 2, 2] = 1.0
if self.config['HypermetQuotedPositionFlag']:
for i in range(npeaks):
delta = self.config['DeltaPositionFwhmUnits'] * fwhm
newcons[8 * i + 1, 0] = CQUOTED
newcons[8 * i + 1, 1] = newpar[8 * i + 1] - delta
newcons[8 * i + 1, 2] = newpar[8 * i + 1] + delta
if self.config['SameSlopeRatioFlag']:
for i in range(npeaks):
if i != main_peak:
newcons[8 * i + 4, 0] = CFACTOR
newcons[8 * i + 4, 1] = 8 * main_peak + 4
newcons[8 * i + 4, 2] = 1.0
newcons[8 * i + 6, 0] = CFACTOR
newcons[8 * i + 6, 1] = 8 * main_peak + 6
newcons[8 * i + 6, 2] = 1.0
if self.config['SameAreaRatioFlag']:
for i in range(npeaks):
if i != main_peak:
newcons[8 * i + 3, 0] = CFACTOR
newcons[8 * i + 3, 1] = 8 * main_peak + 3
newcons[8 * i + 3, 2] = 1.0
newcons[8 * i + 5, 0] = CFACTOR
newcons[8 * i + 5, 1] = 8 * main_peak + 5
newcons[8 * i + 5, 2] = 1.0
return newpar, newcons
def estimate_stepdown(self, x, y):
crappyfilter = [-0.25, -0.75, 0.0, 0.75, 0.25]
cutoff = len(crappyfilter) // 2
y_deriv = numpy.convolve(y,
crappyfilter,
mode="valid")
if max(y_deriv) > 0:
y_deriv = y_deriv * max(y) / max(y_deriv)
fittedpar, newcons = self.estimate_height_position_fwhm(
x[cutoff:-cutoff], y_deriv)
data_amplitude = max(y) - min(y)
# use parameters from largest gaussian found
if len(fittedpar):
npeaks = len(fittedpar) // 3
largest_index = 0
largest = [data_amplitude,
fittedpar[3 * largest_index + 1],
fittedpar[3 * largest_index + 2]]
for i in range(npeaks):
if fittedpar[3 * i] > largest[0]:
largest_index = i
largest = [data_amplitude,
fittedpar[3 * largest_index + 1],
fittedpar[3 * largest_index + 2]]
else:
# no peak was found
largest = [data_amplitude, # height
x[len(x)//2], # center: middle of x range
self.config["FwhmPoints"] * (x[1] - x[0])] # fwhm: default value
# Setup constrains
newcons = numpy.zeros((3, 3), numpy.float)
if not self.config['NoConstraintsFlag']:
# Setup height constrains
if self.config['PositiveHeightAreaFlag']:
newcons[0, 0] = CPOSITIVE
newcons[0, 1] = 0
newcons[0, 2] = 0
# Setup position constrains
if self.config['QuotedPositionFlag']:
newcons[1, 0] = CQUOTED
newcons[1, 1] = min(x)
newcons[1, 2] = max(x)
# Setup positive FWHM constrains
if self.config['PositiveFwhmFlag']:
newcons[2, 0] = CPOSITIVE
newcons[2, 1] = 0
newcons[2, 2] = 0
return largest, newcons
def estimate_slit(self, x, y):
largestup, cons = self.estimate_stepup(x, y)
largestdown, cons = self.estimate_stepdown(x, y)
fwhm = numpy.fabs(largestdown[1] - largestup[1])
beamfwhm = 0.5 * (largestup[2] + largestdown[1])
beamfwhm = min(beamfwhm, fwhm / 10.0)
beamfwhm = max(beamfwhm, (max(x) - min(x)) * 3.0 / len(x))
y_minus_bg = y - self.strip_bg(y)
height = max(y_minus_bg)
i1 = numpy.nonzero(y_minus_bg >= 0.5 * height)[0]
xx = numpy.take(x, i1)
position = (xx[0] + xx[-1]) / 2.0
fwhm = xx[-1] - xx[0]
largest = [height, position, fwhm, beamfwhm]
cons = numpy.zeros((4, 3), numpy.float)
# Setup constrains
if not self.config['NoConstraintsFlag']:
# Setup height constrains
if self.config['PositiveHeightAreaFlag']:
cons[0, 0] = CPOSITIVE
cons[0, 1] = 0
cons[0, 2] = 0
# Setup position constrains
if self.config['QuotedPositionFlag']:
cons[1, 0] = CQUOTED
cons[1, 1] = min(x)
cons[1, 2] = max(x)
# Setup positive FWHM constrains
if self.config['PositiveFwhmFlag']:
cons[2, 0] = CPOSITIVE
cons[2, 1] = 0
cons[2, 2] = 0
# Setup positive FWHM constrains
if self.config['PositiveFwhmFlag']:
cons[3, 0] = CPOSITIVE
cons[3, 1] = 0
cons[3, 2] = 0
return largest, cons
def estimate_stepup(self, x, y):
crappyfilter = [0.25, 0.75, 0.0, -0.75, -0.25]
cutoff = len(crappyfilter) // 2
y_deriv = numpy.convolve(y, crappyfilter, mode="valid")
if max(y_deriv) > 0:
y_deriv = y_deriv * max(y) / max(y_deriv)
fittedpar, cons = self.estimate_height_position_fwhm(
x[cutoff:-cutoff], y_deriv)
# for height, use the data amplitude after removing the background
data_amplitude = max(y) - min(y)
# find params of the largest gaussian found
if len(fittedpar):
npeaks = len(fittedpar) // 3
largest_index = 0
largest = [data_amplitude,
fittedpar[3 * largest_index + 1],
fittedpar[3 * largest_index + 2]]
for i in range(npeaks):
if fittedpar[3 * i] > largest[0]:
largest_index = i
largest = [fittedpar[3 * largest_index],
fittedpar[3 * largest_index + 1],
fittedpar[3 * largest_index + 2]]
else:
# no peak was found
largest = [data_amplitude, # height
x[len(x)//2], # center: middle of x range
self.config["FwhmPoints"] * (x[1] - x[0])] # fwhm: default value
newcons = numpy.zeros((3, 3), numpy.float)
# Setup constrains
if not self.config['NoConstraintsFlag']:
# Setup height constraints
if self.config['PositiveHeightAreaFlag']:
newcons[0, 0] = CPOSITIVE
newcons[0, 1] = 0
newcons[0, 2] = 0
# Setup position constraints
if self.config['QuotedPositionFlag']:
newcons[1, 0] = CQUOTED
newcons[1, 1] = min(x)
newcons[1, 2] = max(x)
# Setup positive FWHM constraints
if self.config['PositiveFwhmFlag']:
newcons[2, 0] = CPOSITIVE
newcons[2, 1] = 0
newcons[2, 2] = 0
return largest, newcons
def estimate_periodic_gauss(self, x, y):
yscaling = self.config.get('Yscaling', 1.0)
if yscaling == 0:
yscaling = 1.0
bg = self.strip_bg(y)
if self.config['AutoFwhm']:
search_fwhm = guess_fwhm(y)
else:
search_fwhm = int(float(self.config['FwhmPoints']))
search_sens = float(self.config['Sensitivity'])
if search_fwhm < 3:
search_fwhm = 3
if search_sens < 1:
search_sens = 1
if len(y) > 1.5 * search_fwhm:
peaks = peak_search(yscaling * y, fwhm=search_fwhm,
sensitivity=search_sens)
else:
peaks = []
npeaks = len(peaks)
if not npeaks:
fittedpar = []
cons = numpy.zeros((len(fittedpar), 3), numpy.float)
return fittedpar, cons
fittedpar = [0.0, 0.0, 0.0, 0.0, 0.0]
# The number of peaks
fittedpar[0] = npeaks
# The separation between peaks in x units
delta = 0.0
height = 0.0
for i in range(npeaks):
height += y[int(peaks[i])] - bg[int(peaks[i])]
if i != npeaks - 1:
delta += (x[int(peaks[i + 1])] - x[int(peaks[i])])
# delta between peaks
if npeaks > 1:
fittedpar[1] = delta / (npeaks - 1)
# starting height
fittedpar[2] = height / npeaks
# position of the first peak
fittedpar[3] = x[int(peaks[0])]
# Estimate the fwhm
fittedpar[4] = search_fwhm
# setup constraints
cons = numpy.zeros((5, 3), numpy.float)
cons[0, 0] = CFIXED # the number of gaussians
if npeaks == 1:
cons[1, 0] = CFIXED # the delta between peaks
else:
cons[1, 0] = CFREE
j = 2
# Setup height area constrains
if not self.config['NoConstraintsFlag']:
if self.config['PositiveHeightAreaFlag']:
# POSITIVE = 1
cons[j, 0] = CPOSITIVE
cons[j, 1] = 0
cons[j, 2] = 0
j += 1
# Setup position constrains
if not self.config['NoConstraintsFlag']:
if self.config['QuotedPositionFlag']:
# QUOTED = 2
cons[j, 0] = CQUOTED
cons[j, 1] = min(x)
cons[j, 2] = max(x)
j += 1
# Setup positive FWHM constrains
if not self.config['NoConstraintsFlag']:
if self.config['PositiveFwhmFlag']:
# POSITIVE=1
cons[j, 0] = CPOSITIVE
cons[j, 1] = 0
cons[j, 2] = 0
j += 1
return fittedpar, cons
def configure(self, **kw):
if not kw.keys():
return self.config
for key in kw.keys():
notdone = 1
# take care of lower / upper case problems ...
for config_key in self.config.keys():
if config_key.lower() == key.lower():
self.config[config_key] = kw[key]
notdone = 0
if notdone:
self.config[key] = kw[key]
return self.config
fitfuns = FitTheories()
THEORY = OrderedDict((
('Gaussians',
FitTheory(description='Gaussian functions',
function=functions.sum_gauss,
parameters=('Height', 'Position', 'FWHM'),
estimate=fitfuns.estimate_height_position_fwhm,
configure=fitfuns.configure)),
('Lorentz',
FitTheory(description='Lorentzian functions',
function=functions.sum_lorentz,
parameters=('Height', 'Position', 'FWHM'),
estimate=fitfuns.estimate_height_position_fwhm,
configure=fitfuns.configure)),
('Area Gaussians',
FitTheory(description='Gaussian functions (area)',
function=functions.sum_agauss,
parameters=('Area', 'Position', 'FWHM'),
estimate=fitfuns.estimate_agauss,
configure=fitfuns.configure)),
('Area Lorentz',
FitTheory(description='Lorentzian functions (area)',
function=functions.sum_alorentz,
parameters=('Area', 'Position', 'FWHM'),
estimate=fitfuns.estimate_alorentz,
configure=fitfuns.configure)),
('Pseudo-Voigt Line',
FitTheory(description='Pseudo-Voigt functions',
function=functions.sum_pvoigt,
parameters=('Height', 'Position', 'FWHM', 'Eta'),
estimate=fitfuns.estimate_pvoigt,
configure=fitfuns.configure)),
('Area Pseudo-Voigt',
FitTheory(description='Pseudo-Voigt functions (area)',
function=functions.sum_apvoigt,
parameters=('Area', 'Position', 'FWHM', 'Eta'),
estimate=fitfuns.estimate_apvoigt,
configure=fitfuns.configure)),
('Split Gaussian',
FitTheory(description='Asymmetric gaussian functions',
function=functions.sum_splitgauss,
parameters=('Height', 'Position', 'LowFWHM',
'HighFWHM'),
estimate=fitfuns.estimate_splitgauss,
configure=fitfuns.configure)),
('Split Lorentz',
FitTheory(description='Asymmetric lorentzian functions',
function=functions.sum_splitlorentz,
parameters=('Height', 'Position', 'LowFWHM', 'HighFWHM'),
estimate=fitfuns.estimate_splitgauss,
configure=fitfuns.configure)),
('Split Pseudo-Voigt',
FitTheory(description='Asymmetric pseudo-Voigt functions',
function=functions.sum_splitpvoigt,
parameters=('Height', 'Position', 'LowFWHM',
'HighFWHM', 'Eta'),
estimate=fitfuns.estimate_splitpvoigt,
configure=fitfuns.configure)),
('Step Down',
FitTheory(description='Step down function',
function=functions.sum_stepdown,
parameters=('Height', 'Position', 'FWHM'),
estimate=fitfuns.estimate_stepdown,
configure=fitfuns.configure)),
('Step Up',
FitTheory(description='Step up function',
function=functions.sum_stepup,
parameters=('Height', 'Position', 'FWHM'),
estimate=fitfuns.estimate_stepup,
configure=fitfuns.configure)),
('Slit',
FitTheory(description='Slit function',
function=functions.sum_slit,
parameters=('Height', 'Position', 'FWHM', 'BeamFWHM'),
estimate=fitfuns.estimate_slit,
configure=fitfuns.configure)),
('Atan',
FitTheory(description='Arctan step up function',
function=functions.atan_stepup,
parameters=('Height', 'Position', 'Width'),
estimate=fitfuns.estimate_stepup,
configure=fitfuns.configure)),
('Hypermet',
FitTheory(description='Hypermet functions',
function=fitfuns.ahypermet, # customized version of functions.sum_ahypermet
parameters=('G_Area', 'Position', 'FWHM', 'ST_Area',
'ST_Slope', 'LT_Area', 'LT_Slope', 'Step_H'),
estimate=fitfuns.estimate_ahypermet,
configure=fitfuns.configure)),
# ('Periodic Gaussians',
# FitTheory(description='Periodic gaussian functions',
# function=functions.periodic_gauss,
# parameters=('N', 'Delta', 'Height', 'Position', 'FWHM'),
# estimate=fitfuns.estimate_periodic_gauss,
# configure=fitfuns.configure))
('Degree 2 Polynomial',
FitTheory(description='Degree 2 polynomial'
'\ny = a*x^2 + b*x +c',
function=fitfuns.poly,
parameters=['a', 'b', 'c'],
estimate=fitfuns.estimate_quadratic)),
('Degree 3 Polynomial',
FitTheory(description='Degree 3 polynomial'
'\ny = a*x^3 + b*x^2 + c*x + d',
function=fitfuns.poly,
parameters=['a', 'b', 'c', 'd'],
estimate=fitfuns.estimate_cubic)),
('Degree 4 Polynomial',
FitTheory(description='Degree 4 polynomial'
'\ny = a*x^4 + b*x^3 + c*x^2 + d*x + e',
function=fitfuns.poly,
parameters=['a', 'b', 'c', 'd', 'e'],
estimate=fitfuns.estimate_quartic)),
('Degree 5 Polynomial',
FitTheory(description='Degree 5 polynomial'
'\ny = a*x^5 + b*x^4 + c*x^3 + d*x^2 + e*x + f',
function=fitfuns.poly,
parameters=['a', 'b', 'c', 'd', 'e', 'f'],
estimate=fitfuns.estimate_quintic)),
))
def test(a):
from silx.math.fit import fitmanager
x = numpy.arange(1000).astype(numpy.float)
p = [1500, 100., 50.0,
1500, 700., 50.0]
y_synthetic = functions.sum_gauss(x, *p) + 1
fit = fitmanager.FitManager(x, y_synthetic)
fit.addtheory('Gaussians', functions.sum_gauss, ['Height', 'Position', 'FWHM'],
a.estimate_height_position_fwhm)
fit.settheory('Gaussians')
fit.setbackground('Linear')
fit.estimate()
fit.runfit()
y_fit = fit.gendata()
print("Fit parameter names: %s" % str(fit.get_names()))
print("Theoretical parameters: %s" % str(numpy.append([1, 0], p)))
print("Fitted parameters: %s" % str(fit.get_fitted_parameters()))
try:
from silx.gui import qt
from silx.gui.plot import plot1D
app = qt.QApplication([])
# Offset of 1 to see the difference in log scale
plot1D(x, (y_synthetic + 1, y_fit), "Input data + 1, Fit")
app.exec_()
except ImportError:
_logger.warning("Unable to load qt binding, can't plot results.")
if __name__ == "__main__":
test(fitfuns)
| true | true |
f733d1aa834e2b814bc2040bb2f67d5110cbe269 | 7,083 | py | Python | Maquina_Virtual/lectura.py | alegayndra/KillerQueen | 87dd65597cbc6e0a678a5beadff5013f66dd919a | [
"MIT"
] | null | null | null | Maquina_Virtual/lectura.py | alegayndra/KillerQueen | 87dd65597cbc6e0a678a5beadff5013f66dd919a | [
"MIT"
] | 1 | 2021-06-02T16:00:49.000Z | 2021-06-02T16:00:49.000Z | Maquina_Virtual/lectura.py | alegayndra/KillerQueen | 87dd65597cbc6e0a678a5beadff5013f66dd919a | [
"MIT"
] | null | null | null | from os import DirEntry, read, getcwd
from pathlib import Path
import io
import sys
from globales import *
# Función que lee los cuadruplo
def leerCuadruplos(txt_cuadruplos):
readStr = 0
while readStr < len(txt_cuadruplos):
cantidades = txt_cuadruplos[ readStr:txt_cuadruplos.find( '\n', readStr ) ]
operador = int( cantidades[ 1:cantidades.find( ',' ) ] )
cantidades = cantidades[ cantidades.find( ',' ) + 2: ]
varIzq = int(cantidades[ :cantidades.find(',')] )
cantidades = cantidades[ cantidades.find(',') + 2: ]
varDer = int(cantidades[ :cantidades.find( ',' ) ] )
cantidades = cantidades[ cantidades.find(',') + 2:]
guardar = int(cantidades[ :cantidades.find( ')' ) ] )
lista_cuadruplos.append((operador, varIzq, varDer, guardar)) # Guardar los cuadruplos
readStr = txt_cuadruplos.find('\n', readStr) + 1 # Avanzar a la siguiente linea
# Función que lee los parametros
def guardarParams(direcciones_params, i):
readStr = 0
count = 0
parm = []
while readStr < len(direcciones_params) and count < 6:
cantidades = direcciones_params[readStr:direcciones_params.find('\n', readStr)]
direccion = int(cantidades[1:cantidades.find(',')] )
cantidades = cantidades[cantidades.find(',') + 2:]
tipo = cantidades[:cantidades.find(')')]
readStr = direcciones_params.find('\n', readStr) + 1
parm.append((direccion, tipo))
funciones[i].append(parm) # Guardar toda la info. de la fucion, incluyendo sus parametros
# Función que lee las funciones
def guardarFunciones(direcciones_funcs):
readStr = 0
i = 0
while readStr < len(direcciones_funcs):
funciones.append([])
cantidades = direcciones_funcs[readStr:direcciones_funcs.find('\n', readStr)]
nombreFunc = direcciones_funcs[1:cantidades.find(',')]
cantidades = cantidades[cantidades.find(',') + 2:]
direcFunc = int(cantidades[:cantidades.find(',')])
cantidades = cantidades[cantidades.find(',') + 2:]
numCuadruplo = int(cantidades[:cantidades.find(')')])
readStr = direcciones_funcs.find('\n', readStr) + 1
cantidades = direcciones_funcs[readStr:direcciones_funcs.find('\n', readStr)]
cntIntNormal = int(cantidades[1:cantidades.find(',')])
cantidades = cantidades[cantidades.find(',') + 2:]
cntIntTemp = int(cantidades[:cantidades.find(')')])
readStr = direcciones_funcs.find('\n', readStr) + 1
cantidades = direcciones_funcs[readStr:direcciones_funcs.find('\n', readStr)]
readStr = direcciones_funcs.find('\n', readStr) + 1
cntFloatNormal = int(cantidades[1:cantidades.find(',')])
cantidades = cantidades[cantidades.find(',') + 2:]
cntFloatTemp = int(cantidades[:cantidades.find(')')])
cantidades = direcciones_funcs[readStr:direcciones_funcs.find('\n', readStr)]
readStr = direcciones_funcs.find('\n', readStr) + 1
cntCharNormal = int(cantidades[1:cantidades.find(',')])
cantidades = cantidades[cantidades.find(',') + 2:]
cntCharTemp = int(cantidades[:cantidades.find(')')])
funciones[i].append((nombreFunc, direcFunc, numCuadruplo))
funciones[i].append((cntIntNormal, cntIntTemp))
funciones[i].append((cntFloatNormal, cntFloatTemp))
funciones[i].append((cntCharNormal, cntCharTemp))
# Registro de valores para los parámetros de la función en lectura
guardarParams(direcciones_funcs[direcciones_funcs.find("PARAMS") + 7:direcciones_funcs.find("FIN_PARAMS")], i)
direcciones_funcs = direcciones_funcs[direcciones_funcs.find("FIN_PARAMS", readStr) + 11:]
readStr = 0
i += 1
# Función que lee las variables globales
def guardarMapaGlobs(direcciones_globs):
readStr = 0
cantidades = direcciones_globs[readStr:direcciones_globs.find('\n', readStr)]
readStr = direcciones_globs.find('\n', readStr) + 1
cntIntNormal = cantidades[1:cantidades.find(',')]
cantidades = cantidades[cantidades.find(',') + 2:]
cntIntTemp = cantidades[:cantidades.find(')')]
cantidades = direcciones_globs[readStr:direcciones_globs.find('\n', readStr)]
readStr = direcciones_globs.find('\n', readStr) + 1
cntFloatNormal = cantidades[1:cantidades.find(',')]
cantidades = cantidades[cantidades.find(',') + 2:]
cntFloatTemp = cantidades[:cantidades.find(')')]
cantidades = direcciones_globs[readStr:direcciones_globs.find('\n', readStr)]
readStr = direcciones_globs.find('\n', readStr) + 1
cntCharNormal = cantidades[1:cantidades.find(',')]
cantidades = cantidades[cantidades.find(',') + 2:]
cntCharTemp = cantidades[:cantidades.find(')')]
mapa_memoria[0][0][0] = [None] * int(cntIntNormal)
mapa_memoria[0][0][1] = [None] * int(cntIntTemp)
mapa_memoria[0][1][0] = [None] * int(cntFloatNormal)
mapa_memoria[0][1][1] = [None] * int(cntFloatTemp)
mapa_memoria[0][2][0] = [None] * int(cntCharNormal)
mapa_memoria[0][2][1] = [None] * int(cntCharTemp)
# Función que lee las variables constantes
def guardarMapaCons(direcciones_const):
readStr = 0
cantidades = direcciones_const[readStr:direcciones_const.find('\n', readStr)]
cntInt = cantidades[1:cantidades.find(',')]
cantidades = cantidades[cantidades.find(',') + 2:]
cntFloat = cantidades[:cantidades.find(',')]
cantidades = cantidades[cantidades.find(',') + 2:]
cntChar = cantidades[:cantidades.find(',')]
mapa_memoria[2][0][0] = [None] * int(cntInt)
mapa_memoria[2][1][0] = [None] * int(cntFloat)
mapa_memoria[2][2][0] = [None] * int(cntChar)
direcciones_const = direcciones_const[direcciones_const.find('\n') + 1:]
while readStr < len(direcciones_const):
valYdir = direcciones_const[readStr:direcciones_const.find('\n', readStr)]
value = valYdir[1:valYdir.find(',')]
valYdir = valYdir[valYdir.find(',') + 2:]
direccion = valYdir[:valYdir.find(',')]
valYdir = valYdir[valYdir.find(',') + 2:]
tipo = valYdir[:valYdir.find(')')]
if tipo == "entero":
value = int(value)
elif tipo == "flotante":
value = float(value)
direccion = int(direccion)
i = len(dir_memoria[2])-1
while i >= 0:
if(direccion >= dir_memoria[2][i][0]):
mapa_memoria[2][i][0][direccion - dir_memoria[2][i][0]] = value
break
i -= 1
readStr = direcciones_const.find('\n', readStr) + 1
# Función principal que lee el .txt
def leer_obj():
# Lectura y normalizacion de archivo
cwd = getcwd()
path = Path(cwd + "/Compilador/cuadruplos/killer_queen.txt")
# archivo = input("Nombre programa ")
# abrir = prueba_cuadruplos / (archivo + ".txt")
file_opened = open(path, 'r')
stringTxt = file_opened.read()
# Registro de valores para constantes en mapa de memoria
guardarMapaCons(stringTxt[stringTxt.find("CONSTANTES") + 11:stringTxt.find("FIN_CONSTANTES")])
# Registro de valores para globales en mapa de memoria
guardarMapaGlobs(stringTxt[stringTxt.find("GLOBALES") + 9:stringTxt.find("FIN_GLOBALES")])
# Registro de valores para las funciones
guardarFunciones(stringTxt[stringTxt.find("FUNCIONES") + 10:stringTxt.find("FIN_FUNCIONES")])
#
leerCuadruplos(stringTxt[stringTxt.find("CUADRUPLOS") + 11:stringTxt.find("FIN_CUADRUPLOS")])
| 42.413174 | 114 | 0.690809 | from os import DirEntry, read, getcwd
from pathlib import Path
import io
import sys
from globales import *
def leerCuadruplos(txt_cuadruplos):
readStr = 0
while readStr < len(txt_cuadruplos):
cantidades = txt_cuadruplos[ readStr:txt_cuadruplos.find( '\n', readStr ) ]
operador = int( cantidades[ 1:cantidades.find( ',' ) ] )
cantidades = cantidades[ cantidades.find( ',' ) + 2: ]
varIzq = int(cantidades[ :cantidades.find(',')] )
cantidades = cantidades[ cantidades.find(',') + 2: ]
varDer = int(cantidades[ :cantidades.find( ',' ) ] )
cantidades = cantidades[ cantidades.find(',') + 2:]
guardar = int(cantidades[ :cantidades.find( ')' ) ] )
lista_cuadruplos.append((operador, varIzq, varDer, guardar))
readStr = txt_cuadruplos.find('\n', readStr) + 1
def guardarParams(direcciones_params, i):
readStr = 0
count = 0
parm = []
while readStr < len(direcciones_params) and count < 6:
cantidades = direcciones_params[readStr:direcciones_params.find('\n', readStr)]
direccion = int(cantidades[1:cantidades.find(',')] )
cantidades = cantidades[cantidades.find(',') + 2:]
tipo = cantidades[:cantidades.find(')')]
readStr = direcciones_params.find('\n', readStr) + 1
parm.append((direccion, tipo))
funciones[i].append(parm)
def guardarFunciones(direcciones_funcs):
readStr = 0
i = 0
while readStr < len(direcciones_funcs):
funciones.append([])
cantidades = direcciones_funcs[readStr:direcciones_funcs.find('\n', readStr)]
nombreFunc = direcciones_funcs[1:cantidades.find(',')]
cantidades = cantidades[cantidades.find(',') + 2:]
direcFunc = int(cantidades[:cantidades.find(',')])
cantidades = cantidades[cantidades.find(',') + 2:]
numCuadruplo = int(cantidades[:cantidades.find(')')])
readStr = direcciones_funcs.find('\n', readStr) + 1
cantidades = direcciones_funcs[readStr:direcciones_funcs.find('\n', readStr)]
cntIntNormal = int(cantidades[1:cantidades.find(',')])
cantidades = cantidades[cantidades.find(',') + 2:]
cntIntTemp = int(cantidades[:cantidades.find(')')])
readStr = direcciones_funcs.find('\n', readStr) + 1
cantidades = direcciones_funcs[readStr:direcciones_funcs.find('\n', readStr)]
readStr = direcciones_funcs.find('\n', readStr) + 1
cntFloatNormal = int(cantidades[1:cantidades.find(',')])
cantidades = cantidades[cantidades.find(',') + 2:]
cntFloatTemp = int(cantidades[:cantidades.find(')')])
cantidades = direcciones_funcs[readStr:direcciones_funcs.find('\n', readStr)]
readStr = direcciones_funcs.find('\n', readStr) + 1
cntCharNormal = int(cantidades[1:cantidades.find(',')])
cantidades = cantidades[cantidades.find(',') + 2:]
cntCharTemp = int(cantidades[:cantidades.find(')')])
funciones[i].append((nombreFunc, direcFunc, numCuadruplo))
funciones[i].append((cntIntNormal, cntIntTemp))
funciones[i].append((cntFloatNormal, cntFloatTemp))
funciones[i].append((cntCharNormal, cntCharTemp))
guardarParams(direcciones_funcs[direcciones_funcs.find("PARAMS") + 7:direcciones_funcs.find("FIN_PARAMS")], i)
direcciones_funcs = direcciones_funcs[direcciones_funcs.find("FIN_PARAMS", readStr) + 11:]
readStr = 0
i += 1
def guardarMapaGlobs(direcciones_globs):
readStr = 0
cantidades = direcciones_globs[readStr:direcciones_globs.find('\n', readStr)]
readStr = direcciones_globs.find('\n', readStr) + 1
cntIntNormal = cantidades[1:cantidades.find(',')]
cantidades = cantidades[cantidades.find(',') + 2:]
cntIntTemp = cantidades[:cantidades.find(')')]
cantidades = direcciones_globs[readStr:direcciones_globs.find('\n', readStr)]
readStr = direcciones_globs.find('\n', readStr) + 1
cntFloatNormal = cantidades[1:cantidades.find(',')]
cantidades = cantidades[cantidades.find(',') + 2:]
cntFloatTemp = cantidades[:cantidades.find(')')]
cantidades = direcciones_globs[readStr:direcciones_globs.find('\n', readStr)]
readStr = direcciones_globs.find('\n', readStr) + 1
cntCharNormal = cantidades[1:cantidades.find(',')]
cantidades = cantidades[cantidades.find(',') + 2:]
cntCharTemp = cantidades[:cantidades.find(')')]
mapa_memoria[0][0][0] = [None] * int(cntIntNormal)
mapa_memoria[0][0][1] = [None] * int(cntIntTemp)
mapa_memoria[0][1][0] = [None] * int(cntFloatNormal)
mapa_memoria[0][1][1] = [None] * int(cntFloatTemp)
mapa_memoria[0][2][0] = [None] * int(cntCharNormal)
mapa_memoria[0][2][1] = [None] * int(cntCharTemp)
def guardarMapaCons(direcciones_const):
readStr = 0
cantidades = direcciones_const[readStr:direcciones_const.find('\n', readStr)]
cntInt = cantidades[1:cantidades.find(',')]
cantidades = cantidades[cantidades.find(',') + 2:]
cntFloat = cantidades[:cantidades.find(',')]
cantidades = cantidades[cantidades.find(',') + 2:]
cntChar = cantidades[:cantidades.find(',')]
mapa_memoria[2][0][0] = [None] * int(cntInt)
mapa_memoria[2][1][0] = [None] * int(cntFloat)
mapa_memoria[2][2][0] = [None] * int(cntChar)
direcciones_const = direcciones_const[direcciones_const.find('\n') + 1:]
while readStr < len(direcciones_const):
valYdir = direcciones_const[readStr:direcciones_const.find('\n', readStr)]
value = valYdir[1:valYdir.find(',')]
valYdir = valYdir[valYdir.find(',') + 2:]
direccion = valYdir[:valYdir.find(',')]
valYdir = valYdir[valYdir.find(',') + 2:]
tipo = valYdir[:valYdir.find(')')]
if tipo == "entero":
value = int(value)
elif tipo == "flotante":
value = float(value)
direccion = int(direccion)
i = len(dir_memoria[2])-1
while i >= 0:
if(direccion >= dir_memoria[2][i][0]):
mapa_memoria[2][i][0][direccion - dir_memoria[2][i][0]] = value
break
i -= 1
readStr = direcciones_const.find('\n', readStr) + 1
def leer_obj():
cwd = getcwd()
path = Path(cwd + "/Compilador/cuadruplos/killer_queen.txt")
file_opened = open(path, 'r')
stringTxt = file_opened.read()
guardarMapaCons(stringTxt[stringTxt.find("CONSTANTES") + 11:stringTxt.find("FIN_CONSTANTES")])
guardarMapaGlobs(stringTxt[stringTxt.find("GLOBALES") + 9:stringTxt.find("FIN_GLOBALES")])
guardarFunciones(stringTxt[stringTxt.find("FUNCIONES") + 10:stringTxt.find("FIN_FUNCIONES")])
leerCuadruplos(stringTxt[stringTxt.find("CUADRUPLOS") + 11:stringTxt.find("FIN_CUADRUPLOS")])
| true | true |
f733d2a80a87cc7d115bb5c9f11d55731f8b4819 | 417 | py | Python | solutions/python/2017/arrayElementsProduct.py | lucifer1198/Codesignal | 07d6d6457b8b3a9f1c51118b0e8e44cce66ee039 | [
"MIT"
] | 2 | 2020-12-21T22:09:26.000Z | 2021-01-01T15:40:01.000Z | solutions/python/2017/arrayElementsProduct.py | nsu1210/Codesignal | 07d6d6457b8b3a9f1c51118b0e8e44cce66ee039 | [
"MIT"
] | null | null | null | solutions/python/2017/arrayElementsProduct.py | nsu1210/Codesignal | 07d6d6457b8b3a9f1c51118b0e8e44cce66ee039 | [
"MIT"
] | 1 | 2021-01-28T18:15:02.000Z | 2021-01-28T18:15:02.000Z | #Question: https://python.web.id/blog/given-an-array-of-integers-cf/
def arrayElementsProduct(inputArray):
product = 1
for numb in inputArray:
product *= numb
return product
'''
>>> inputArray = [1, 3, 2, 10]
>>> arrayElementsProduct(inputArray)
60
>>>
>>> inputArray = [2, 4, 10, 1]
>>> arrayElementsProduct(inputArray)
80
>>> inputArray = [1, 1]
>>> arrayElementsProduct(inputArray)
1
>>>
'''
| 18.954545 | 68 | 0.654676 |
def arrayElementsProduct(inputArray):
product = 1
for numb in inputArray:
product *= numb
return product
| true | true |
f733d33ac1d65a98c426fa5e37840a72560da8e6 | 1,415 | py | Python | agent/src/agent/modules/constants.py | eacherkan-aternity/daria | 7c77a2f52c09c852017b16949a848fa51f0fb579 | [
"Apache-2.0"
] | null | null | null | agent/src/agent/modules/constants.py | eacherkan-aternity/daria | 7c77a2f52c09c852017b16949a848fa51f0fb579 | [
"Apache-2.0"
] | null | null | null | agent/src/agent/modules/constants.py | eacherkan-aternity/daria | 7c77a2f52c09c852017b16949a848fa51f0fb579 | [
"Apache-2.0"
] | null | null | null | import os
ANODOT_API_URL = os.environ.get('ANODOT_API_URL', 'https://api.anodot.com')
ENV_PROD = True if os.environ.get('ENV_PROD') == 'true' else False
HOSTNAME = os.environ.get('HOSTNAME', 'agent')
STREAMSETS_PREVIEW_TIMEOUT = os.environ.get('STREAMSETS_PREVIEW_TIMEOUT', 30000)
VALIDATION_ENABLED = os.environ.get('VALIDATION_ENABLED', 'true') == 'true'
AGENT_DEFAULT_URL = os.environ.get('AGENT_URL', 'http://anodot-agent')
ROOT_DIR = os.path.join(os.path.dirname(os.path.abspath(__file__)), '..')
AGENT_DB_HOST = os.environ.get('AGENT_DB_HOST', 'db')
AGENT_DB_USER = os.environ.get('AGENT_DB_USER', 'agent')
AGENT_DB_PASSWORD = os.environ.get('AGENT_DB_USER', 'agent')
AGENT_DB = os.environ.get('AGENT_DB', 'agent')
BACKUP_DIRECTORY = os.environ.get('BACKUP_DIRECTORY', '/usr/src/app/backup-data')
DEFAULT_STREAMSETS_URL = os.environ.get('STREAMSETS_URL', 'http://dc:18630')
DEFAULT_STREAMSETS_USERNAME = os.environ.get('STREAMSETS_USERNAME', 'admin')
DEFAULT_STREAMSETS_PASSWORD = os.environ.get('STREAMSETS_PASSWORD', 'admin')
MONITORING_URL = os.environ.get('MONITORING_URL')
MONITORING_SEND_TO_CLIENT = True if os.environ.get('MONITORING_SEND_TO_CLIENT', 'true') == 'true' else False
MONITORING_SEND_TO_ANODOT = True if os.environ.get('MONITORING_SEND_TO_ANODOT', 'true') == 'true' else False
AGENT_MONITORING_ENDPOINT = os.environ.get('AGENT_MONITORING_ENDPOINT', 'http://localhost/monitoring')
| 45.645161 | 108 | 0.763958 | import os
ANODOT_API_URL = os.environ.get('ANODOT_API_URL', 'https://api.anodot.com')
ENV_PROD = True if os.environ.get('ENV_PROD') == 'true' else False
HOSTNAME = os.environ.get('HOSTNAME', 'agent')
STREAMSETS_PREVIEW_TIMEOUT = os.environ.get('STREAMSETS_PREVIEW_TIMEOUT', 30000)
VALIDATION_ENABLED = os.environ.get('VALIDATION_ENABLED', 'true') == 'true'
AGENT_DEFAULT_URL = os.environ.get('AGENT_URL', 'http://anodot-agent')
ROOT_DIR = os.path.join(os.path.dirname(os.path.abspath(__file__)), '..')
AGENT_DB_HOST = os.environ.get('AGENT_DB_HOST', 'db')
AGENT_DB_USER = os.environ.get('AGENT_DB_USER', 'agent')
AGENT_DB_PASSWORD = os.environ.get('AGENT_DB_USER', 'agent')
AGENT_DB = os.environ.get('AGENT_DB', 'agent')
BACKUP_DIRECTORY = os.environ.get('BACKUP_DIRECTORY', '/usr/src/app/backup-data')
DEFAULT_STREAMSETS_URL = os.environ.get('STREAMSETS_URL', 'http://dc:18630')
DEFAULT_STREAMSETS_USERNAME = os.environ.get('STREAMSETS_USERNAME', 'admin')
DEFAULT_STREAMSETS_PASSWORD = os.environ.get('STREAMSETS_PASSWORD', 'admin')
MONITORING_URL = os.environ.get('MONITORING_URL')
MONITORING_SEND_TO_CLIENT = True if os.environ.get('MONITORING_SEND_TO_CLIENT', 'true') == 'true' else False
MONITORING_SEND_TO_ANODOT = True if os.environ.get('MONITORING_SEND_TO_ANODOT', 'true') == 'true' else False
AGENT_MONITORING_ENDPOINT = os.environ.get('AGENT_MONITORING_ENDPOINT', 'http://localhost/monitoring')
| true | true |
f733d370c880ad82c68efc8ae724bfeb7fc4f204 | 6,819 | py | Python | starcheck/pcad_att_check.py | sot/starcheck | 33d9e4976c815ca0dca59bcdcb1bad61bcf7f023 | [
"BSD-3-Clause"
] | 2 | 2021-12-20T16:28:32.000Z | 2021-12-20T16:28:34.000Z | starcheck/pcad_att_check.py | sot/starcheck | 33d9e4976c815ca0dca59bcdcb1bad61bcf7f023 | [
"BSD-3-Clause"
] | 245 | 2015-02-19T16:19:44.000Z | 2022-02-03T18:25:18.000Z | starcheck/pcad_att_check.py | sot/starcheck | 33d9e4976c815ca0dca59bcdcb1bad61bcf7f023 | [
"BSD-3-Clause"
] | null | null | null | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import re
from astropy.table import Table
import Quaternion
from parse_cm import read_backstop, read_or_list
from Chandra.Time import DateTime
import hopper
def check_characteristics_date(ofls_characteristics_file, ref_date=None):
# de_bytetring these inputs from Perl -> Python 3
ofls_characteristics_file = ofls_characteristics_file.decode()
if ref_date is not None:
ref_date = ref_date.decode()
match = re.search(r'CHARACTERIS_(\d\d)([A-Z]{3})(\d\d)', ofls_characteristics_file)
if not match:
return False
day, mon, yr = match.groups()
yr = int(yr)
yr += 1900 if (yr > 90) else 2000
mon = mon.lower().capitalize()
file_date = DateTime('{}{}{} at 00:00:00.000'.format(yr, mon, day))
return False if (DateTime(ref_date) - file_date > 30) else True
def test_check_characteristics_date():
ok = check_characteristics_date('blah/blah/L_blah_CHARACTERIS_01OCT15',
'2015Oct30 at 00:00:00.000')
assert ok is True
ok = check_characteristics_date('blah/blah/L_blah_CHARACTERIS_01OCT15',
'2015Nov02 at 00:00:00.000')
assert ok is False
ok = check_characteristics_date('blah/blah/L_blah_CHARACTERIS_99OCT15',
'1999Oct20 at 00:00:00.000')
assert ok is True
ok = check_characteristics_date('blah/blah/L_blah',
'2015Nov02 at 00:00:00.000')
assert ok is False
def recent_sim_history(time, file):
"""
Read from the end of the a SIM history file and return the
first (last) time and value before the given time. Specific
to SIM focus and transition history based on the regex for
parsing and the int cast of the parsed data.
"""
for line in reversed(open(file).readlines()):
match = re.match('^(\d+\.\d+)\s+\|\s+(\S+)\s*$',
line)
if match:
greta_time, value = match.groups()
if (DateTime(greta_time, format='greta').secs < time):
return greta_time, int(value)
def recent_attitude_history(time, file):
"""
Read from the end of the a ATTITUDE history file and return the
first (last) time and value before the given time. Specific
to ATTITUDE and transition history based on the regex for
parsing.
"""
for line in reversed(open(file).readlines()):
match = re.match('^(\d+\.\d+)\s+\|\s+(\S+)\s+(\S+)\s+(\S+)\s+(\S+)\s*', line)
if match:
greta_time, q1, q2, q3, q4 = match.groups()
if (DateTime(greta_time, format='greta').secs < time):
return greta_time, float(q1), float(q2), float(q3), float(q4)
def make_pcad_attitude_check_report(backstop_file, or_list_file=None, attitude_file=None,
simtrans_file=None, simfocus_file=None,
ofls_characteristics_file=None, out=None,
dynamic_offsets_file=None,
):
"""
Make a report for checking PCAD attitudes
"""
all_ok = True
lines = [] # output report lines
bs = read_backstop(backstop_file)
# Get initial state attitude and sim position from history
att_time, q1, q2, q3, q4 = recent_attitude_history(DateTime(bs[0]['date']).secs,
attitude_file)
q = Quaternion.normalize([q1, q2, q3, q4])
simfa_time, simfa = recent_sim_history(DateTime(bs[0]['date']).secs,
simfocus_file)
simpos_time, simpos = recent_sim_history(DateTime(bs[0]['date']).secs,
simtrans_file)
initial_state = {'q1': q[0],
'q2': q[1],
'q3': q[2],
'q4': q[3],
'simpos': simpos,
'simfa_pos': simfa}
or_list = None if or_list_file is None else read_or_list(or_list_file)
if or_list is None:
lines.append('ERROR: No OR list provided, cannot check attitudes')
all_ok = False
# If dynamical offsets file is available then load was planned using
# Matlab tools 2016_210 later, which implements the "Cycle 18 aimpoint
# transition plan". This code injects new OR list attributes for the
# dynamical offset.
if dynamic_offsets_file is not None and or_list is not None:
# Existing OFLS characteristics file is not relevant for post 2016_210.
# Products are planned using the Matlab tools SI align which matches the
# baseline mission align matrix from pre-November 2015.
ofls_characteristics_file = None
lines.append('INFO: using dynamic offsets file {}'.format(dynamic_offsets_file))
or_map = {or_['obsid']: or_ for or_ in or_list}
doffs = Table.read(dynamic_offsets_file, format='ascii.basic', guess=False)
for doff in doffs:
obsid = doff['obsid']
if obsid in or_map:
or_map[obsid]['aca_offset_y'] = doff['aca_offset_y'] / 3600.
or_map[obsid]['aca_offset_z'] = doff['aca_offset_z'] / 3600.
# Check that obsids in dynamic offsets table are all in OR list
if not set(doffs['obsid']).issubset(set(or_map)):
all_ok = False
obsid_mismatch = set(doffs['obsid']) - set(or_map)
lines.append('WARNING: Obsid in dynamic offsets table but missing in OR list {}'
.format(list(obsid_mismatch)))
# Run the commands and populate attributes in `sc`, the spacecraft state.
# In particular sc.checks is a dict of checks by obsid.
# Any state value (e.g. obsid or q_att) has a corresponding plural that
# gives the history of updates as a dict with a `value` and `date` key.
sc = hopper.run_cmds(backstop_file, or_list, ofls_characteristics_file,
initial_state=initial_state, starcheck=True)
# Iterate through checks by obsid to print status
checks = sc.get_checks_by_obsid()
for obsid in sc.obsids:
for check in checks[obsid]:
if check.name == 'attitude_consistent_with_obsreq':
ok = check.success
all_ok &= ok
if check.not_applicable:
message = 'SKIPPED: {}'.format(":".join(check.infos))
else:
message = 'OK' if ok else "ERROR: {}".format(":".join(check.errors))
line = '{:5d}: {}'.format(obsid, message)
lines.append(line)
if out is not None:
with open(out, 'w') as fh:
fh.writelines("\n".join(lines))
return all_ok
| 41.327273 | 92 | 0.600088 |
import re
from astropy.table import Table
import Quaternion
from parse_cm import read_backstop, read_or_list
from Chandra.Time import DateTime
import hopper
def check_characteristics_date(ofls_characteristics_file, ref_date=None):
ofls_characteristics_file = ofls_characteristics_file.decode()
if ref_date is not None:
ref_date = ref_date.decode()
match = re.search(r'CHARACTERIS_(\d\d)([A-Z]{3})(\d\d)', ofls_characteristics_file)
if not match:
return False
day, mon, yr = match.groups()
yr = int(yr)
yr += 1900 if (yr > 90) else 2000
mon = mon.lower().capitalize()
file_date = DateTime('{}{}{} at 00:00:00.000'.format(yr, mon, day))
return False if (DateTime(ref_date) - file_date > 30) else True
def test_check_characteristics_date():
ok = check_characteristics_date('blah/blah/L_blah_CHARACTERIS_01OCT15',
'2015Oct30 at 00:00:00.000')
assert ok is True
ok = check_characteristics_date('blah/blah/L_blah_CHARACTERIS_01OCT15',
'2015Nov02 at 00:00:00.000')
assert ok is False
ok = check_characteristics_date('blah/blah/L_blah_CHARACTERIS_99OCT15',
'1999Oct20 at 00:00:00.000')
assert ok is True
ok = check_characteristics_date('blah/blah/L_blah',
'2015Nov02 at 00:00:00.000')
assert ok is False
def recent_sim_history(time, file):
for line in reversed(open(file).readlines()):
match = re.match('^(\d+\.\d+)\s+\|\s+(\S+)\s*$',
line)
if match:
greta_time, value = match.groups()
if (DateTime(greta_time, format='greta').secs < time):
return greta_time, int(value)
def recent_attitude_history(time, file):
for line in reversed(open(file).readlines()):
match = re.match('^(\d+\.\d+)\s+\|\s+(\S+)\s+(\S+)\s+(\S+)\s+(\S+)\s*', line)
if match:
greta_time, q1, q2, q3, q4 = match.groups()
if (DateTime(greta_time, format='greta').secs < time):
return greta_time, float(q1), float(q2), float(q3), float(q4)
def make_pcad_attitude_check_report(backstop_file, or_list_file=None, attitude_file=None,
simtrans_file=None, simfocus_file=None,
ofls_characteristics_file=None, out=None,
dynamic_offsets_file=None,
):
all_ok = True
lines = []
bs = read_backstop(backstop_file)
att_time, q1, q2, q3, q4 = recent_attitude_history(DateTime(bs[0]['date']).secs,
attitude_file)
q = Quaternion.normalize([q1, q2, q3, q4])
simfa_time, simfa = recent_sim_history(DateTime(bs[0]['date']).secs,
simfocus_file)
simpos_time, simpos = recent_sim_history(DateTime(bs[0]['date']).secs,
simtrans_file)
initial_state = {'q1': q[0],
'q2': q[1],
'q3': q[2],
'q4': q[3],
'simpos': simpos,
'simfa_pos': simfa}
or_list = None if or_list_file is None else read_or_list(or_list_file)
if or_list is None:
lines.append('ERROR: No OR list provided, cannot check attitudes')
all_ok = False
# transition plan". This code injects new OR list attributes for the
if dynamic_offsets_file is not None and or_list is not None:
ofls_characteristics_file = None
lines.append('INFO: using dynamic offsets file {}'.format(dynamic_offsets_file))
or_map = {or_['obsid']: or_ for or_ in or_list}
doffs = Table.read(dynamic_offsets_file, format='ascii.basic', guess=False)
for doff in doffs:
obsid = doff['obsid']
if obsid in or_map:
or_map[obsid]['aca_offset_y'] = doff['aca_offset_y'] / 3600.
or_map[obsid]['aca_offset_z'] = doff['aca_offset_z'] / 3600.
if not set(doffs['obsid']).issubset(set(or_map)):
all_ok = False
obsid_mismatch = set(doffs['obsid']) - set(or_map)
lines.append('WARNING: Obsid in dynamic offsets table but missing in OR list {}'
.format(list(obsid_mismatch)))
sc = hopper.run_cmds(backstop_file, or_list, ofls_characteristics_file,
initial_state=initial_state, starcheck=True)
checks = sc.get_checks_by_obsid()
for obsid in sc.obsids:
for check in checks[obsid]:
if check.name == 'attitude_consistent_with_obsreq':
ok = check.success
all_ok &= ok
if check.not_applicable:
message = 'SKIPPED: {}'.format(":".join(check.infos))
else:
message = 'OK' if ok else "ERROR: {}".format(":".join(check.errors))
line = '{:5d}: {}'.format(obsid, message)
lines.append(line)
if out is not None:
with open(out, 'w') as fh:
fh.writelines("\n".join(lines))
return all_ok
| true | true |
f733d3850979941ec7e3cd0e71b8be674b67dd74 | 2,415 | py | Python | tests/test_hydrothermal_venture.py | walshification/advent-of-code-2021 | 84c604d6f1468bc789515b767a1317e173feb43f | [
"MIT"
] | null | null | null | tests/test_hydrothermal_venture.py | walshification/advent-of-code-2021 | 84c604d6f1468bc789515b767a1317e173feb43f | [
"MIT"
] | null | null | null | tests/test_hydrothermal_venture.py | walshification/advent-of-code-2021 | 84c604d6f1468bc789515b767a1317e173feb43f | [
"MIT"
] | null | null | null | from solutions.hydrothermal_venture import Grid, Line, Point
def test_lines_from_strings():
"""Given a string of a certain format, lines can be made."""
line = Line.from_string("0,9 -> 3,9")
assert line.points == [Point(0, 9), Point(1, 9), Point(2, 9), Point(3, 9)]
def test_line_can_draw_horizontally_with_points():
"""Given a start and an end, a line can draw the intermediary
points.
"""
start = Point(0, 0)
end = Point(2, 0)
line = Line(start, end)
assert line.points == [start, Point(1, 0), end]
def test_line_can_draw_backwards():
"""Given a start and an end that goes left, the line can still draw
itself.
"""
start = Point(2, 0)
end = Point(0, 0)
line = Line(start, end)
assert line.points == [start, Point(1, 0), end]
def test_line_can_draw_vertically_with_points():
"""Given a start and an end, a line can draw the intermediary
points.
"""
start = Point(0, 0)
end = Point(0, 2)
line = Line(start, end)
assert line.points == [start, Point(0, 1), end]
def test_line_can_draw_backwards_vertically():
"""Given a start and an end that goes left, the line can still draw
itself.
"""
start = Point(0, 2)
end = Point(0, 0)
line = Line(start, end)
assert line.points == [start, Point(0, 1), end]
def test_grid_tracks_double_points():
"""Grid returns the number of points marked twice."""
lines = [Line(Point(0, 0), Point(2, 0)), Line(Point(0, 2), Point(0, 0))]
double_count = Grid.map(lines)
assert double_count == 1
def test_the_test_input():
"""Test the test input."""
raw_input = [
"0,9 -> 5,9",
"8,0 -> 0,8",
"9,4 -> 3,4",
"2,2 -> 2,1",
"7,0 -> 7,4",
"6,4 -> 2,0",
"0,9 -> 2,9",
"3,4 -> 1,4",
"0,0 -> 8,8",
"5,5 -> 8,2",
]
lines = [Line.from_string(raw_line) for raw_line in raw_input]
assert Grid.map_horizontal_and_vertical(lines) == 5
def test_the_full_test_input():
"""Test the full test input."""
raw_input = [
"0,9 -> 5,9",
"8,0 -> 0,8",
"9,4 -> 3,4",
"2,2 -> 2,1",
"7,0 -> 7,4",
"6,4 -> 2,0",
"0,9 -> 2,9",
"3,4 -> 1,4",
"0,0 -> 8,8",
"5,5 -> 8,2",
]
lines = [Line.from_string(raw_line) for raw_line in raw_input]
assert Grid.map(lines) == 12
| 25.421053 | 78 | 0.556522 | from solutions.hydrothermal_venture import Grid, Line, Point
def test_lines_from_strings():
line = Line.from_string("0,9 -> 3,9")
assert line.points == [Point(0, 9), Point(1, 9), Point(2, 9), Point(3, 9)]
def test_line_can_draw_horizontally_with_points():
start = Point(0, 0)
end = Point(2, 0)
line = Line(start, end)
assert line.points == [start, Point(1, 0), end]
def test_line_can_draw_backwards():
start = Point(2, 0)
end = Point(0, 0)
line = Line(start, end)
assert line.points == [start, Point(1, 0), end]
def test_line_can_draw_vertically_with_points():
start = Point(0, 0)
end = Point(0, 2)
line = Line(start, end)
assert line.points == [start, Point(0, 1), end]
def test_line_can_draw_backwards_vertically():
start = Point(0, 2)
end = Point(0, 0)
line = Line(start, end)
assert line.points == [start, Point(0, 1), end]
def test_grid_tracks_double_points():
lines = [Line(Point(0, 0), Point(2, 0)), Line(Point(0, 2), Point(0, 0))]
double_count = Grid.map(lines)
assert double_count == 1
def test_the_test_input():
raw_input = [
"0,9 -> 5,9",
"8,0 -> 0,8",
"9,4 -> 3,4",
"2,2 -> 2,1",
"7,0 -> 7,4",
"6,4 -> 2,0",
"0,9 -> 2,9",
"3,4 -> 1,4",
"0,0 -> 8,8",
"5,5 -> 8,2",
]
lines = [Line.from_string(raw_line) for raw_line in raw_input]
assert Grid.map_horizontal_and_vertical(lines) == 5
def test_the_full_test_input():
raw_input = [
"0,9 -> 5,9",
"8,0 -> 0,8",
"9,4 -> 3,4",
"2,2 -> 2,1",
"7,0 -> 7,4",
"6,4 -> 2,0",
"0,9 -> 2,9",
"3,4 -> 1,4",
"0,0 -> 8,8",
"5,5 -> 8,2",
]
lines = [Line.from_string(raw_line) for raw_line in raw_input]
assert Grid.map(lines) == 12
| true | true |
f733d3a946b4798fdaaed5bc828c68039904194f | 5,124 | py | Python | assets/paint/image.py | quantumjim/Ganymede | 2e3285f397ef43c356f634539aa1a89009cc624d | [
"Apache-2.0"
] | null | null | null | assets/paint/image.py | quantumjim/Ganymede | 2e3285f397ef43c356f634539aa1a89009cc624d | [
"Apache-2.0"
] | null | null | null | assets/paint/image.py | quantumjim/Ganymede | 2e3285f397ef43c356f634539aa1a89009cc624d | [
"Apache-2.0"
] | null | null | null | {(0, 0): [4, False], (0, 1): [4, False], (0, 2): [4, False], (0, 3): [4, False], (0, 4): [4, False], (0, 5): [4, False], (0, 6): [4, False], (0, 7): [4, False], (0, 8): [4, False], (0, 9): [4, False], (0, 10): [4, False], (0, 11): [4, False], (0, 12): [4, False], (0, 13): [4, False], (0, 14): [4, False], (0, 15): [4, False], (1, 0): [4, False], (1, 1): [4, False], (1, 2): [4, True], (1, 3): [4, True], (1, 4): [4, True], (1, 5): [4, True], (1, 6): [4, True], (1, 7): [4, True], (1, 8): [4, True], (1, 9): [4, True], (1, 10): [4, True], (1, 11): [4, True], (1, 12): [4, True], (1, 13): [4, True], (1, 14): [4, True], (1, 15): [4, False], (2, 0): [4, False], (2, 1): [4, False], (2, 2): [4, True], (2, 3): [1, True], (2, 4): [1, True], (2, 5): [4, True], (2, 6): [3, True], (2, 7): [3, True], (2, 8): [4, True], (2, 9): [2, True], (2, 10): [2, True], (2, 11): [4, True], (2, 12): [1, True], (2, 13): [1, True], (2, 14): [4, True], (2, 15): [4, False], (3, 0): [4, False], (3, 1): [4, False], (3, 2): [4, True], (3, 3): [1, True], (3, 4): [1, True], (3, 5): [4, True], (3, 6): [3, True], (3, 7): [3, True], (3, 8): [4, True], (3, 9): [2, True], (3, 10): [2, True], (3, 11): [4, True], (3, 12): [1, True], (3, 13): [1, True], (3, 14): [4, True], (3, 15): [4, False], (4, 0): [4, False], (4, 1): [4, False], (4, 2): [4, True], (4, 3): [4, True], (4, 4): [4, True], (4, 5): [4, True], (4, 6): [4, True], (4, 7): [4, True], (4, 8): [4, True], (4, 9): [4, True], (4, 10): [4, True], (4, 11): [4, True], (4, 12): [4, True], (4, 13): [4, True], (4, 14): [4, True], (4, 15): [4, False], (5, 0): [4, False], (5, 1): [4, False], (5, 2): [4, True], (5, 3): [3, True], (5, 4): [3, True], (5, 5): [4, True], (5, 6): [4, True], (5, 7): [4, True], (5, 8): [4, True], (5, 9): [4, True], (5, 10): [4, True], (5, 11): [4, True], (5, 12): [2, True], (5, 13): [2, True], (5, 14): [4, True], (5, 15): [4, False], (6, 0): [4, False], (6, 1): [4, False], (6, 2): [4, True], (6, 3): [3, True], (6, 4): [3, True], (6, 5): [4, True], (6, 6): [4, True], (6, 7): [4, True], (6, 8): [4, True], (6, 9): [4, True], (6, 10): [4, True], (6, 11): [4, True], (6, 12): [2, True], (6, 13): [2, True], (6, 14): [4, True], (6, 15): [4, False], (7, 0): [4, True], (7, 1): [4, True], (7, 2): [4, True], (7, 3): [4, True], (7, 4): [4, True], (7, 5): [4, True], (7, 6): [4, True], (7, 7): [4, True], (7, 8): [4, True], (7, 9): [4, True], (7, 10): [4, True], (7, 11): [4, True], (7, 12): [4, True], (7, 13): [4, True], (7, 14): [4, True], (7, 15): [4, True], (8, 0): [4, True], (8, 1): [4, True], (8, 2): [4, True], (8, 3): [4, True], (8, 4): [4, True], (8, 5): [4, True], (8, 6): [4, True], (8, 7): [4, True], (8, 8): [4, True], (8, 9): [4, True], (8, 10): [4, True], (8, 11): [4, True], (8, 12): [4, True], (8, 13): [4, True], (8, 14): [4, True], (8, 15): [4, True], (9, 0): [4, False], (9, 1): [4, False], (9, 2): [4, True], (9, 3): [2, True], (9, 4): [2, True], (9, 5): [4, True], (9, 6): [4, True], (9, 7): [4, True], (9, 8): [4, True], (9, 9): [4, True], (9, 10): [4, True], (9, 11): [4, True], (9, 12): [3, True], (9, 13): [3, True], (9, 14): [4, True], (9, 15): [4, False], (10, 0): [4, False], (10, 1): [4, False], (10, 2): [4, True], (10, 3): [2, True], (10, 4): [2, True], (10, 5): [4, True], (10, 6): [4, True], (10, 7): [4, True], (10, 8): [4, True], (10, 9): [4, True], (10, 10): [4, True], (10, 11): [4, True], (10, 12): [3, True], (10, 13): [3, True], (10, 14): [4, True], (10, 15): [4, False], (11, 0): [4, False], (11, 1): [4, False], (11, 2): [4, True], (11, 3): [4, True], (11, 4): [4, True], (11, 5): [4, True], (11, 6): [4, True], (11, 7): [4, True], (11, 8): [4, True], (11, 9): [4, True], (11, 10): [4, True], (11, 11): [4, True], (11, 12): [4, True], (11, 13): [4, True], (11, 14): [4, True], (11, 15): [4, False], (12, 0): [4, False], (12, 1): [4, False], (12, 2): [4, True], (12, 3): [1, True], (12, 4): [1, True], (12, 5): [4, True], (12, 6): [2, True], (12, 7): [2, True], (12, 8): [4, True], (12, 9): [3, True], (12, 10): [3, True], (12, 11): [4, True], (12, 12): [1, True], (12, 13): [1, True], (12, 14): [4, True], (12, 15): [4, False], (13, 0): [4, False], (13, 1): [4, False], (13, 2): [4, True], (13, 3): [1, True], (13, 4): [1, True], (13, 5): [4, True], (13, 6): [2, True], (13, 7): [2, True], (13, 8): [4, True], (13, 9): [3, True], (13, 10): [3, True], (13, 11): [4, True], (13, 12): [1, True], (13, 13): [1, True], (13, 14): [4, True], (13, 15): [4, False], (14, 0): [4, False], (14, 1): [4, False], (14, 2): [4, True], (14, 3): [4, True], (14, 4): [4, True], (14, 5): [4, True], (14, 6): [4, True], (14, 7): [4, True], (14, 8): [4, True], (14, 9): [4, True], (14, 10): [4, True], (14, 11): [4, True], (14, 12): [4, True], (14, 13): [4, True], (14, 14): [4, True], (14, 15): [4, False], (15, 0): [4, False], (15, 1): [4, False], (15, 2): [4, False], (15, 3): [4, False], (15, 4): [4, False], (15, 5): [4, False], (15, 6): [4, False], (15, 7): [4, False], (15, 8): [4, False], (15, 9): [4, False], (15, 10): [4, False], (15, 11): [4, False], (15, 12): [4, False], (15, 13): [4, False], (15, 14): [4, False], (15, 15): [4, False]} | 5,124 | 5,124 | 0.400468 | {(0, 0): [4, False], (0, 1): [4, False], (0, 2): [4, False], (0, 3): [4, False], (0, 4): [4, False], (0, 5): [4, False], (0, 6): [4, False], (0, 7): [4, False], (0, 8): [4, False], (0, 9): [4, False], (0, 10): [4, False], (0, 11): [4, False], (0, 12): [4, False], (0, 13): [4, False], (0, 14): [4, False], (0, 15): [4, False], (1, 0): [4, False], (1, 1): [4, False], (1, 2): [4, True], (1, 3): [4, True], (1, 4): [4, True], (1, 5): [4, True], (1, 6): [4, True], (1, 7): [4, True], (1, 8): [4, True], (1, 9): [4, True], (1, 10): [4, True], (1, 11): [4, True], (1, 12): [4, True], (1, 13): [4, True], (1, 14): [4, True], (1, 15): [4, False], (2, 0): [4, False], (2, 1): [4, False], (2, 2): [4, True], (2, 3): [1, True], (2, 4): [1, True], (2, 5): [4, True], (2, 6): [3, True], (2, 7): [3, True], (2, 8): [4, True], (2, 9): [2, True], (2, 10): [2, True], (2, 11): [4, True], (2, 12): [1, True], (2, 13): [1, True], (2, 14): [4, True], (2, 15): [4, False], (3, 0): [4, False], (3, 1): [4, False], (3, 2): [4, True], (3, 3): [1, True], (3, 4): [1, True], (3, 5): [4, True], (3, 6): [3, True], (3, 7): [3, True], (3, 8): [4, True], (3, 9): [2, True], (3, 10): [2, True], (3, 11): [4, True], (3, 12): [1, True], (3, 13): [1, True], (3, 14): [4, True], (3, 15): [4, False], (4, 0): [4, False], (4, 1): [4, False], (4, 2): [4, True], (4, 3): [4, True], (4, 4): [4, True], (4, 5): [4, True], (4, 6): [4, True], (4, 7): [4, True], (4, 8): [4, True], (4, 9): [4, True], (4, 10): [4, True], (4, 11): [4, True], (4, 12): [4, True], (4, 13): [4, True], (4, 14): [4, True], (4, 15): [4, False], (5, 0): [4, False], (5, 1): [4, False], (5, 2): [4, True], (5, 3): [3, True], (5, 4): [3, True], (5, 5): [4, True], (5, 6): [4, True], (5, 7): [4, True], (5, 8): [4, True], (5, 9): [4, True], (5, 10): [4, True], (5, 11): [4, True], (5, 12): [2, True], (5, 13): [2, True], (5, 14): [4, True], (5, 15): [4, False], (6, 0): [4, False], (6, 1): [4, False], (6, 2): [4, True], (6, 3): [3, True], (6, 4): [3, True], (6, 5): [4, True], (6, 6): [4, True], (6, 7): [4, True], (6, 8): [4, True], (6, 9): [4, True], (6, 10): [4, True], (6, 11): [4, True], (6, 12): [2, True], (6, 13): [2, True], (6, 14): [4, True], (6, 15): [4, False], (7, 0): [4, True], (7, 1): [4, True], (7, 2): [4, True], (7, 3): [4, True], (7, 4): [4, True], (7, 5): [4, True], (7, 6): [4, True], (7, 7): [4, True], (7, 8): [4, True], (7, 9): [4, True], (7, 10): [4, True], (7, 11): [4, True], (7, 12): [4, True], (7, 13): [4, True], (7, 14): [4, True], (7, 15): [4, True], (8, 0): [4, True], (8, 1): [4, True], (8, 2): [4, True], (8, 3): [4, True], (8, 4): [4, True], (8, 5): [4, True], (8, 6): [4, True], (8, 7): [4, True], (8, 8): [4, True], (8, 9): [4, True], (8, 10): [4, True], (8, 11): [4, True], (8, 12): [4, True], (8, 13): [4, True], (8, 14): [4, True], (8, 15): [4, True], (9, 0): [4, False], (9, 1): [4, False], (9, 2): [4, True], (9, 3): [2, True], (9, 4): [2, True], (9, 5): [4, True], (9, 6): [4, True], (9, 7): [4, True], (9, 8): [4, True], (9, 9): [4, True], (9, 10): [4, True], (9, 11): [4, True], (9, 12): [3, True], (9, 13): [3, True], (9, 14): [4, True], (9, 15): [4, False], (10, 0): [4, False], (10, 1): [4, False], (10, 2): [4, True], (10, 3): [2, True], (10, 4): [2, True], (10, 5): [4, True], (10, 6): [4, True], (10, 7): [4, True], (10, 8): [4, True], (10, 9): [4, True], (10, 10): [4, True], (10, 11): [4, True], (10, 12): [3, True], (10, 13): [3, True], (10, 14): [4, True], (10, 15): [4, False], (11, 0): [4, False], (11, 1): [4, False], (11, 2): [4, True], (11, 3): [4, True], (11, 4): [4, True], (11, 5): [4, True], (11, 6): [4, True], (11, 7): [4, True], (11, 8): [4, True], (11, 9): [4, True], (11, 10): [4, True], (11, 11): [4, True], (11, 12): [4, True], (11, 13): [4, True], (11, 14): [4, True], (11, 15): [4, False], (12, 0): [4, False], (12, 1): [4, False], (12, 2): [4, True], (12, 3): [1, True], (12, 4): [1, True], (12, 5): [4, True], (12, 6): [2, True], (12, 7): [2, True], (12, 8): [4, True], (12, 9): [3, True], (12, 10): [3, True], (12, 11): [4, True], (12, 12): [1, True], (12, 13): [1, True], (12, 14): [4, True], (12, 15): [4, False], (13, 0): [4, False], (13, 1): [4, False], (13, 2): [4, True], (13, 3): [1, True], (13, 4): [1, True], (13, 5): [4, True], (13, 6): [2, True], (13, 7): [2, True], (13, 8): [4, True], (13, 9): [3, True], (13, 10): [3, True], (13, 11): [4, True], (13, 12): [1, True], (13, 13): [1, True], (13, 14): [4, True], (13, 15): [4, False], (14, 0): [4, False], (14, 1): [4, False], (14, 2): [4, True], (14, 3): [4, True], (14, 4): [4, True], (14, 5): [4, True], (14, 6): [4, True], (14, 7): [4, True], (14, 8): [4, True], (14, 9): [4, True], (14, 10): [4, True], (14, 11): [4, True], (14, 12): [4, True], (14, 13): [4, True], (14, 14): [4, True], (14, 15): [4, False], (15, 0): [4, False], (15, 1): [4, False], (15, 2): [4, False], (15, 3): [4, False], (15, 4): [4, False], (15, 5): [4, False], (15, 6): [4, False], (15, 7): [4, False], (15, 8): [4, False], (15, 9): [4, False], (15, 10): [4, False], (15, 11): [4, False], (15, 12): [4, False], (15, 13): [4, False], (15, 14): [4, False], (15, 15): [4, False]} | true | true |
f733d5c48341e5e6cf202789ac858a4546fd80c5 | 4,855 | py | Python | verificator/Maps.py | jwallnoefer/multisat_qrepeater_sim_archive | 69b4c242fb760cf195871f38b3172d4dfd26c01a | [
"MIT"
] | null | null | null | verificator/Maps.py | jwallnoefer/multisat_qrepeater_sim_archive | 69b4c242fb760cf195871f38b3172d4dfd26c01a | [
"MIT"
] | null | null | null | verificator/Maps.py | jwallnoefer/multisat_qrepeater_sim_archive | 69b4c242fb760cf195871f38b3172d4dfd26c01a | [
"MIT"
] | null | null | null | """
The maps that model the different processes in the QKD return for input that is diagonal in Bell-basis a diagonal output.
To reduce calculations I determined in the scipt "How many numbers for state" the effect of the maps on the diagonal elements
"""
import numpy as np
import functools
"""These are some helper functions. a-d represents the diagonal elements of the first state, e-h the ones of the second state"""
z_rot = lambda a, b, c, d: np.array([b, a, d, c])
y_rot = lambda a, b, c, d: np.array([d, c, b, a])
perf_dist = lambda a, b, c, d, e, f, g, h: np.array(
[a * e, d * h, a * g, d * f, d * e, a * h, d * g, a * f, c * g, b * f, c * e, b * h, b * g, c * f, b * e, c * h])
dc0 = lambda ae, af, ag, ah, be, bf, bg, bh, ce, cf, cg, ch, de, df, dg, dh: np.array(
[ae + af, be + bf, ce + cf, de + df])
dc1 = lambda ae, af, ag, ah, be, bf, bg, bh, ce, cf, cg, ch, de, df, dg, dh: np.array(
[ae + af + ag + ah, be + bf + bg + bh, ce + cf + cg + ch, de + df + dg + dh])
"""p is the ideality of the map, q = 1-p"""
mixnswap = lambda p, q, a, b, c, d, e, f, g, h: np.array([a * e * p + b * f * p + c * g * p + d * h * p + q / 4,
a * f * p + b * e * p + c * h * p + d * g * p + q / 4,
a * g * p + b * h * p + c * e * p + d * f * p + q / 4,
a * h * p + b * g * p + c * f * p + d * e * p + q / 4])
def dp_sing(t, T, a, b, c, d):
""" Calculate the state after dephasing for one memory for time t.
Parameters
----------
t : float
time of dephasig
T : float
dephasing time of the memory
a-d: float
diagonal elements of the state
Returns
-------
list of diagonal elements of the state after dephasing
"""
lam = (1 - np.exp(-t / (2 * T))) / 2
return ((1 - lam) * np.array([a, b, c, d]) + lam * z_rot(a, b, c, d)).tolist()
def dp_doub(t, T, a, b, c, d):
""" Calculate the state after dephasing for time t1 for one memory and t2 for the other memory.
Parameters
----------
t : float
time of dephasig
T : float
dephasing time of the memories
a-d: float
diagonal elements of the state
Returns
-------
list of diagonal elements of the state after dephasing
"""
lam = (1 - np.exp(- t / (2 * T))) / 2
lam = lam + lam - 2 * lam**2
return ((1 - lam) * np.array([a, b, c, d]) + lam * z_rot(a, b, c, d)).tolist()
def coupl(em, a, b, c, d):
""" Calculate the state after imperfect coupling to the fibre.
Parameters
----------
em1, em2 : float
misalignment errors of the stations (0-1)
a-d: float
diagonal elements of the state
Returns
-------
list of diagonal element of the state after coupling
"""
p = 1 - em
q = em
return (p * np.array([a, b, c, d]) + q * y_rot(a, b, c, d)).tolist()
@functools.lru_cache(maxsize=2048)
def distil(lam, pd1, pd2, a, b, c, d, e, f, g, h):
""" Calculate the state after imperfect entanglement distillation and dephasing.
Parameters
----------
lam1, lam2 : float
idealities of the distillation process of the stations
pd1, pd2 : float
probabilities for dark counts in the measurement for the stations
a-d: float
diagonal elements of the fist state
e-h: float
diagonal elements of the second state
Returns
-------
list of diagonal element of the state after dephasing, probability for acceptance of the distillation result
"""
p0 = (1 - pd1) * (1 - pd2) # probability for zero dark counts
# probability for one or two dark counts
p1 = 0.5 * (pd1 + pd2 - pd1 * pd2)
mixed = (lam * perf_dist(a, b, c, d, e, f, g, h) + (1 - lam) * np.ones((16)) /
16).tolist() # mixing the result of the perfect map with abs mixed state
# state times the accapance probability
unnormed = p0 * dc0(*mixed) + p1 * dc1(*mixed)
trace = np.sum(unnormed) # acceptance probability
normed = (unnormed / trace).tolist() # normalising the state
return normed, trace
def swap(lam, a, b, c, d, e, f, g, h):
""" Calculate the state after imperfect entanglement swapping and dephasing.
Parameters
----------
lam: float
idealities of the swapping process of the middle station
a-d: float
diagonal elements of the fist state
e-h: float
diagonal elements of the second state
Returns
-------
list of diagonal element of the state after swapping
"""
swapped = mixnswap(lam, 1 - lam, a, b, c, d, e, f, g, h)
normed = swapped / np.sum(swapped) # normalising the state
return np.array(normed).tolist()
| 35.698529 | 128 | 0.547065 | import numpy as np
import functools
z_rot = lambda a, b, c, d: np.array([b, a, d, c])
y_rot = lambda a, b, c, d: np.array([d, c, b, a])
perf_dist = lambda a, b, c, d, e, f, g, h: np.array(
[a * e, d * h, a * g, d * f, d * e, a * h, d * g, a * f, c * g, b * f, c * e, b * h, b * g, c * f, b * e, c * h])
dc0 = lambda ae, af, ag, ah, be, bf, bg, bh, ce, cf, cg, ch, de, df, dg, dh: np.array(
[ae + af, be + bf, ce + cf, de + df])
dc1 = lambda ae, af, ag, ah, be, bf, bg, bh, ce, cf, cg, ch, de, df, dg, dh: np.array(
[ae + af + ag + ah, be + bf + bg + bh, ce + cf + cg + ch, de + df + dg + dh])
mixnswap = lambda p, q, a, b, c, d, e, f, g, h: np.array([a * e * p + b * f * p + c * g * p + d * h * p + q / 4,
a * f * p + b * e * p + c * h * p + d * g * p + q / 4,
a * g * p + b * h * p + c * e * p + d * f * p + q / 4,
a * h * p + b * g * p + c * f * p + d * e * p + q / 4])
def dp_sing(t, T, a, b, c, d):
lam = (1 - np.exp(-t / (2 * T))) / 2
return ((1 - lam) * np.array([a, b, c, d]) + lam * z_rot(a, b, c, d)).tolist()
def dp_doub(t, T, a, b, c, d):
lam = (1 - np.exp(- t / (2 * T))) / 2
lam = lam + lam - 2 * lam**2
return ((1 - lam) * np.array([a, b, c, d]) + lam * z_rot(a, b, c, d)).tolist()
def coupl(em, a, b, c, d):
p = 1 - em
q = em
return (p * np.array([a, b, c, d]) + q * y_rot(a, b, c, d)).tolist()
@functools.lru_cache(maxsize=2048)
def distil(lam, pd1, pd2, a, b, c, d, e, f, g, h):
p0 = (1 - pd1) * (1 - pd2)
p1 = 0.5 * (pd1 + pd2 - pd1 * pd2)
mixed = (lam * perf_dist(a, b, c, d, e, f, g, h) + (1 - lam) * np.ones((16)) /
16).tolist()
unnormed = p0 * dc0(*mixed) + p1 * dc1(*mixed)
trace = np.sum(unnormed)
normed = (unnormed / trace).tolist()
return normed, trace
def swap(lam, a, b, c, d, e, f, g, h):
swapped = mixnswap(lam, 1 - lam, a, b, c, d, e, f, g, h)
normed = swapped / np.sum(swapped)
return np.array(normed).tolist()
| true | true |
f733d6092e2e51d54d4bfc79178097397b2a12b8 | 3,849 | py | Python | insights/parsers/zdump_v.py | lhuett/insights-core | 1c84eeffc037f85e2bbf60c9a302c83aa1a50cf8 | [
"Apache-2.0"
] | 121 | 2017-05-30T20:23:25.000Z | 2022-03-23T12:52:15.000Z | insights/parsers/zdump_v.py | lhuett/insights-core | 1c84eeffc037f85e2bbf60c9a302c83aa1a50cf8 | [
"Apache-2.0"
] | 1,977 | 2017-05-26T14:36:03.000Z | 2022-03-31T10:38:53.000Z | insights/parsers/zdump_v.py | lhuett/insights-core | 1c84eeffc037f85e2bbf60c9a302c83aa1a50cf8 | [
"Apache-2.0"
] | 244 | 2017-05-30T20:22:57.000Z | 2022-03-26T10:09:39.000Z | """
ZdumpV - command ``/usr/sbin/zdump -v /etc/localtime -c 2019,2039``
===================================================================
The ``/usr/sbin/zdump -v /etc/localtime -c 2019,2039`` command provides information about
'Daylight Saving Time' in file /etc/localtime from 2019 to 2039.
Sample content from command ``zdump -v /etc/localtime -c 2019,2039`` is::
/etc/localtime Sun Mar 10 06:59:59 2019 UTC = Sun Mar 10 01:59:59 2019 EST isdst=0 gmtoff=-18000
/etc/localtime Sun Mar 10 07:00:00 2019 UTC = Sun Mar 10 03:00:00 2019 EDT isdst=1 gmtoff=-14400
/etc/localtime Sun Nov 7 05:59:59 2038 UTC = Sun Nov 7 01:59:59 2038 EDT isdst=1 gmtoff=-14400
/etc/localtime Sun Nov 7 06:00:00 2038 UTC = Sun Nov 7 01:00:00 2038 EST isdst=0 gmtoff=-18000
Examples:
>>> dst = zdump[0]
>>> dst.get('utc_time')
datetime.datetime(2019, 3, 10, 6, 59, 59)
>>> dst.get('utc_time_raw')
'Sun Mar 10 06:59:59 2019 UTC'
>>> dst.get('local_time')
datetime.datetime(2019, 3, 10, 1, 59, 59)
>>> dst.get('local_time_raw')
'Sun Mar 10 01:59:59 2019 EST'
>>> dst.get('isdst')
0
>>> dst.get('gmtoff')
-18000
"""
from datetime import datetime
from insights.specs import Specs
from insights.parsers import SkipException
from insights import parser, CommandParser
def str2datetime(timestamp, tz=False):
"""
This function translates the time stamp into a datetime object.
Args:
timestamp (str): the time stamp from command `zdump -v`
tz (bool): True if it's UTC TimeZone.
Returns:
time (datetime): the datetime object about the time stamp
time_string (str): the formatted time stamp
"""
time, time_string = None, timestamp.strip()
# Fixed the problem that the program running this python code doesn't
# has the corresponding TimeZone where strptime will raise ValueError.
# So, we skip the `TimeZone`
time_s = time_string.rsplit(None, 1)[0]
time_f = "%a %b %d %H:%M:%S %Y"
if tz:
# In some version, `zdump` prints 'UT' instead of 'UTC'
# 'UC' is an invalid TimeZone for function `strptime`
time_s = time_s + " UTC"
time_f = "%a %b %d %H:%M:%S %Y %Z"
try:
time = datetime.strptime(time_s, time_f)
except ValueError:
pass
return time, time_string
@parser(Specs.zdump_v)
class ZdumpV(CommandParser, list):
"""
Parse the output from the ``/usr/sbin/zdump -v /etc/localtime -c 2019,2039`` command
and store the 'Daylight Saving Time' information into a list.
Raises:
SkipException: When nothing is parsed.
.. warning:: The value in key `local_time` doesn't include the TimeZone information
"""
def parse_content(self, content):
if not content:
raise SkipException("No Data from command: /usr/sbin/zdump -v /etc/localtime -c 2019,2039")
for line in content:
dst = {}
if 'isdst' not in line:
# skip the line that does not include a time stamp
continue
utc_time, remains = line.strip('/etc/localtime').split(' = ')
dst['utc_time'], dst['utc_time_raw'] = str2datetime(utc_time, True)
if dst['utc_time'] is None:
continue
local_time, _ = remains.split("isdst")
dst['local_time'], dst['local_time_raw'] = str2datetime(local_time)
if dst['local_time'] is None:
continue
isdst = [s.split('=')[1] for s in remains.split() if 'isdst' in s and '=' in s]
if isdst:
dst['isdst'] = int(isdst[0])
gmtoff = [s.split('=')[1] for s in remains.split() if 'gmtoff' in s and '=' in s]
if gmtoff:
dst['gmtoff'] = int(gmtoff[0])
self.append(dst)
| 34.675676 | 103 | 0.600935 |
from datetime import datetime
from insights.specs import Specs
from insights.parsers import SkipException
from insights import parser, CommandParser
def str2datetime(timestamp, tz=False):
time, time_string = None, timestamp.strip()
# has the corresponding TimeZone where strptime will raise ValueError.
# So, we skip the `TimeZone`
time_s = time_string.rsplit(None, 1)[0]
time_f = "%a %b %d %H:%M:%S %Y"
if tz:
# In some version, `zdump` prints 'UT' instead of 'UTC'
# 'UC' is an invalid TimeZone for function `strptime`
time_s = time_s + " UTC"
time_f = "%a %b %d %H:%M:%S %Y %Z"
try:
time = datetime.strptime(time_s, time_f)
except ValueError:
pass
return time, time_string
@parser(Specs.zdump_v)
class ZdumpV(CommandParser, list):
def parse_content(self, content):
if not content:
raise SkipException("No Data from command: /usr/sbin/zdump -v /etc/localtime -c 2019,2039")
for line in content:
dst = {}
if 'isdst' not in line:
# skip the line that does not include a time stamp
continue
utc_time, remains = line.strip('/etc/localtime').split(' = ')
dst['utc_time'], dst['utc_time_raw'] = str2datetime(utc_time, True)
if dst['utc_time'] is None:
continue
local_time, _ = remains.split("isdst")
dst['local_time'], dst['local_time_raw'] = str2datetime(local_time)
if dst['local_time'] is None:
continue
isdst = [s.split('=')[1] for s in remains.split() if 'isdst' in s and '=' in s]
if isdst:
dst['isdst'] = int(isdst[0])
gmtoff = [s.split('=')[1] for s in remains.split() if 'gmtoff' in s and '=' in s]
if gmtoff:
dst['gmtoff'] = int(gmtoff[0])
self.append(dst)
| true | true |
f733d650885dd636e6fa4c71fac7eeffbd094e0f | 3,147 | py | Python | ai/domain_adaptation/utils/vis.py | aayushkafle/implicit_alignment | 4835a8a5acc4b30daf7e1c95195f160e76306cd1 | [
"Apache-2.0"
] | null | null | null | ai/domain_adaptation/utils/vis.py | aayushkafle/implicit_alignment | 4835a8a5acc4b30daf7e1c95195f160e76306cd1 | [
"Apache-2.0"
] | null | null | null | ai/domain_adaptation/utils/vis.py | aayushkafle/implicit_alignment | 4835a8a5acc4b30daf7e1c95195f160e76306cd1 | [
"Apache-2.0"
] | 1 | 2021-04-15T13:29:34.000Z | 2021-04-15T13:29:34.000Z | import numpy as np
from ai.domain_adaptation.datasets import image_index
from ai.domain_adaptation.utils import np_utils
from IPython.display import display, Image
import matplotlib.pyplot as plt
from sklearn.metrics import confusion_matrix
def load_data_for_vis(prob_path, target_domain_file, dataset_dir):
domain_info = image_index.parse_domain_file(target_domain_file, dataset_dir)
yhat_info = np_utils.parse_predictions_from_pickle(prob_path)
return domain_info, yhat_info
def visulize_confidence(prob_path, target_domain_file, dataset_dir, cls_id):
domain_info, yhat_info = load_data_for_vis(prob_path, target_domain_file, dataset_dir)
vis_confident_predictions(cls_id, None, domain_info, yhat_info)
def vis_confident_predictions(cls_id, top_k=20, domain_info=None, yhat_info=None):
sorted_id_indices = np_utils.retrieve_sorted_indices_for_one_cls(cls_id, yhat_info)
for ith, example_id in enumerate(sorted_id_indices):
filename, label = domain_info.image_path_label_tuples[example_id]
print(f'{domain_info.label_description_dict[label]}, P {yhat_info.prob[example_id, cls_id]:.3}')
img = Image(filename=filename, width=150, height=150)
display(img)
if top_k is not None and ith > top_k:
break
def plot_confusion_matrix(y_true, y_pred, classes,
normalize=False,
title=None,
cmap=plt.cm.Blues):
"""
This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
"""
if not title:
if normalize:
title = 'Normalized confusion matrix'
else:
title = 'Confusion matrix, without normalization'
# Compute confusion matrix
cm = confusion_matrix(y_true, y_pred)
# Only use the labels that appear in the data
# classes = classes[unique_labels(y_true, y_pred)]
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
# np.set_printoptions(precision=3)
fig, ax = plt.subplots(figsize=(20, 20))
im = ax.imshow(cm, interpolation='nearest', cmap=cmap)
# ax.figure.colorbar(im, ax=ax)
# We want to show all ticks...
ax.set(xticks=np.arange(cm.shape[1]),
yticks=np.arange(cm.shape[0]),
# ... and label them with the respective list entries
xticklabels=classes, yticklabels=classes,
title=title,
ylabel='True label',
xlabel='Predicted label')
# Rotate the tick labels and set their alignment.
plt.setp(ax.get_xticklabels(), rotation=45, ha="right",
rotation_mode="anchor")
# Loop over data dimensions and create text annotations.
fmt = '.2f' if normalize else 'd'
thresh = cm.max() / 2.
for i in range(cm.shape[0]):
for j in range(cm.shape[1]):
ax.text(j, i, format(cm[i, j], fmt),
ha="center", va="center",
color="white" if cm[i, j] > thresh else "black")
fig.tight_layout()
fig.savefig(f'./plots/confusion_matrix{title}.pdf')
return ax
| 37.464286 | 104 | 0.668891 | import numpy as np
from ai.domain_adaptation.datasets import image_index
from ai.domain_adaptation.utils import np_utils
from IPython.display import display, Image
import matplotlib.pyplot as plt
from sklearn.metrics import confusion_matrix
def load_data_for_vis(prob_path, target_domain_file, dataset_dir):
domain_info = image_index.parse_domain_file(target_domain_file, dataset_dir)
yhat_info = np_utils.parse_predictions_from_pickle(prob_path)
return domain_info, yhat_info
def visulize_confidence(prob_path, target_domain_file, dataset_dir, cls_id):
domain_info, yhat_info = load_data_for_vis(prob_path, target_domain_file, dataset_dir)
vis_confident_predictions(cls_id, None, domain_info, yhat_info)
def vis_confident_predictions(cls_id, top_k=20, domain_info=None, yhat_info=None):
sorted_id_indices = np_utils.retrieve_sorted_indices_for_one_cls(cls_id, yhat_info)
for ith, example_id in enumerate(sorted_id_indices):
filename, label = domain_info.image_path_label_tuples[example_id]
print(f'{domain_info.label_description_dict[label]}, P {yhat_info.prob[example_id, cls_id]:.3}')
img = Image(filename=filename, width=150, height=150)
display(img)
if top_k is not None and ith > top_k:
break
def plot_confusion_matrix(y_true, y_pred, classes,
normalize=False,
title=None,
cmap=plt.cm.Blues):
if not title:
if normalize:
title = 'Normalized confusion matrix'
else:
title = 'Confusion matrix, without normalization'
cm = confusion_matrix(y_true, y_pred)
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
fig, ax = plt.subplots(figsize=(20, 20))
im = ax.imshow(cm, interpolation='nearest', cmap=cmap)
ax.set(xticks=np.arange(cm.shape[1]),
yticks=np.arange(cm.shape[0]),
xticklabels=classes, yticklabels=classes,
title=title,
ylabel='True label',
xlabel='Predicted label')
plt.setp(ax.get_xticklabels(), rotation=45, ha="right",
rotation_mode="anchor")
fmt = '.2f' if normalize else 'd'
thresh = cm.max() / 2.
for i in range(cm.shape[0]):
for j in range(cm.shape[1]):
ax.text(j, i, format(cm[i, j], fmt),
ha="center", va="center",
color="white" if cm[i, j] > thresh else "black")
fig.tight_layout()
fig.savefig(f'./plots/confusion_matrix{title}.pdf')
return ax
| true | true |
f733d723ad0fc3dc2db8276b1a146d59c01444bc | 7,116 | py | Python | tests/test_debug.py | LoveIsGrief/coveragepy | fdadbe8d442a0957d15b4e14035b73d9321cbdff | [
"Apache-2.0"
] | null | null | null | tests/test_debug.py | LoveIsGrief/coveragepy | fdadbe8d442a0957d15b4e14035b73d9321cbdff | [
"Apache-2.0"
] | null | null | null | tests/test_debug.py | LoveIsGrief/coveragepy | fdadbe8d442a0957d15b4e14035b73d9321cbdff | [
"Apache-2.0"
] | 2 | 2018-02-27T08:56:41.000Z | 2020-12-22T22:10:38.000Z | # Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
# For details: https://bitbucket.org/ned/coveragepy/src/default/NOTICE.txt
"""Tests of coverage/debug.py"""
import os
import pytest
import coverage
from coverage.backward import StringIO
from coverage.debug import filter_text, info_formatter, info_header, short_id, short_stack
from tests.coveragetest import CoverageTest
from tests.helpers import re_lines
class InfoFormatterTest(CoverageTest):
"""Tests of misc.info_formatter."""
run_in_temp_dir = False
def test_info_formatter(self):
lines = list(info_formatter([
('x', 'hello there'),
('very long label', ['one element']),
('regular', ['abc', 'def', 'ghi', 'jkl']),
('nothing', []),
]))
self.assertEqual(lines, [
' x: hello there',
'very long label: one element',
' regular: abc',
' def',
' ghi',
' jkl',
' nothing: -none-',
])
def test_info_formatter_with_generator(self):
lines = list(info_formatter(('info%d' % i, i) for i in range(3)))
self.assertEqual(lines, ['info0: 0', 'info1: 1', 'info2: 2'])
@pytest.mark.parametrize("label, header", [
("x", "-- x ---------------------------------------------------------"),
("hello there", "-- hello there -----------------------------------------------"),
])
def test_info_header(label, header):
assert info_header(label) == header
@pytest.mark.parametrize("id64, id16", [
(0x1234, 0x1234),
(0x12340000, 0x1234),
(0xA5A55A5A, 0xFFFF),
(0x1234cba956780fed, 0x8008),
])
def test_short_id(id64, id16):
assert short_id(id64) == id16
@pytest.mark.parametrize("text, filters, result", [
("hello", [], "hello"),
("hello\n", [], "hello\n"),
("hello\nhello\n", [], "hello\nhello\n"),
("hello\nbye\n", [lambda x: "="+x], "=hello\n=bye\n"),
("hello\nbye\n", [lambda x: "="+x, lambda x: x+"\ndone\n"], "=hello\ndone\n=bye\ndone\n"),
])
def test_filter_text(text, filters, result):
assert filter_text(text, filters) == result
class DebugTraceTest(CoverageTest):
"""Tests of debug output."""
def f1_debug_output(self, debug):
"""Runs some code with `debug` option, returns the debug output."""
# Make code to run.
self.make_file("f1.py", """\
def f1(x):
return x+1
for i in range(5):
f1(i)
""")
debug_out = StringIO()
cov = coverage.Coverage(debug=debug)
cov._debug_file = debug_out
self.start_import_stop(cov, "f1")
cov.save()
out_lines = debug_out.getvalue()
return out_lines
def test_debug_no_trace(self):
out_lines = self.f1_debug_output([])
# We should have no output at all.
self.assertFalse(out_lines)
def test_debug_trace(self):
out_lines = self.f1_debug_output(["trace"])
# We should have a line like "Tracing 'f1.py'"
self.assertIn("Tracing 'f1.py'", out_lines)
# We should have lines like "Not tracing 'collector.py'..."
coverage_lines = re_lines(
out_lines,
r"^Not tracing .*: is part of coverage.py$"
)
self.assertTrue(coverage_lines)
def test_debug_trace_pid(self):
out_lines = self.f1_debug_output(["trace", "pid"])
# Now our lines are always prefixed with the process id.
pid_prefix = r"^%5d\.[0-9a-f]{4}: " % os.getpid()
pid_lines = re_lines(out_lines, pid_prefix)
self.assertEqual(pid_lines, out_lines)
# We still have some tracing, and some not tracing.
self.assertTrue(re_lines(out_lines, pid_prefix + "Tracing "))
self.assertTrue(re_lines(out_lines, pid_prefix + "Not tracing "))
def test_debug_callers(self):
out_lines = self.f1_debug_output(["pid", "dataop", "dataio", "callers"])
print(out_lines)
# For every real message, there should be a stack
# trace with a line like "f1_debug_output : /Users/ned/coverage/tests/test_debug.py @71"
real_messages = re_lines(out_lines, r" @\d+", match=False).splitlines()
frame_pattern = r"\s+f1_debug_output : .*tests[/\\]test_debug.py @\d+$"
frames = re_lines(out_lines, frame_pattern).splitlines()
self.assertEqual(len(real_messages), len(frames))
# The last message should be "Writing data", and the last frame should
# be write_file in data.py.
self.assertRegex(real_messages[-1], r"^\s*\d+\.\w{4}: Writing data")
last_line = out_lines.splitlines()[-1]
self.assertRegex(last_line, r"\s+write_file : .*coverage[/\\]data.py @\d+$")
def test_debug_config(self):
out_lines = self.f1_debug_output(["config"])
labels = """
attempted_config_files branch config_files cover_pylib data_file
debug exclude_list extra_css html_dir html_title ignore_errors
include omit parallel partial_always_list partial_list paths
precision show_missing source timid xml_output
""".split()
for label in labels:
label_pat = r"^\s*%s: " % label
self.assertEqual(
len(re_lines(out_lines, label_pat).splitlines()),
1
)
def test_debug_sys(self):
out_lines = self.f1_debug_output(["sys"])
labels = """
version coverage cover_paths pylib_paths tracer config_files
configs_read data_path python platform implementation executable
cwd path environment command_line cover_match pylib_match
""".split()
for label in labels:
label_pat = r"^\s*%s: " % label
self.assertEqual(
len(re_lines(out_lines, label_pat).splitlines()),
1,
msg="Incorrect lines for %r" % label,
)
def f_one(*args, **kwargs):
"""First of the chain of functions for testing `short_stack`."""
return f_two(*args, **kwargs)
def f_two(*args, **kwargs):
"""Second of the chain of functions for testing `short_stack`."""
return f_three(*args, **kwargs)
def f_three(*args, **kwargs):
"""Third of the chain of functions for testing `short_stack`."""
return short_stack(*args, **kwargs)
class ShortStackTest(CoverageTest):
"""Tests of coverage.debug.short_stack."""
run_in_temp_dir = False
def test_short_stack(self):
stack = f_one().splitlines()
self.assertGreater(len(stack), 10)
self.assertIn("f_three", stack[-1])
self.assertIn("f_two", stack[-2])
self.assertIn("f_one", stack[-3])
def test_short_stack_limit(self):
stack = f_one(limit=5).splitlines()
self.assertEqual(len(stack), 5)
def test_short_stack_skip(self):
stack = f_one(skip=1).splitlines()
self.assertIn("f_two", stack[-1])
| 34.047847 | 96 | 0.590219 |
import os
import pytest
import coverage
from coverage.backward import StringIO
from coverage.debug import filter_text, info_formatter, info_header, short_id, short_stack
from tests.coveragetest import CoverageTest
from tests.helpers import re_lines
class InfoFormatterTest(CoverageTest):
run_in_temp_dir = False
def test_info_formatter(self):
lines = list(info_formatter([
('x', 'hello there'),
('very long label', ['one element']),
('regular', ['abc', 'def', 'ghi', 'jkl']),
('nothing', []),
]))
self.assertEqual(lines, [
' x: hello there',
'very long label: one element',
' regular: abc',
' def',
' ghi',
' jkl',
' nothing: -none-',
])
def test_info_formatter_with_generator(self):
lines = list(info_formatter(('info%d' % i, i) for i in range(3)))
self.assertEqual(lines, ['info0: 0', 'info1: 1', 'info2: 2'])
@pytest.mark.parametrize("label, header", [
("x", "-- x ---------------------------------------------------------"),
("hello there", "-- hello there -----------------------------------------------"),
])
def test_info_header(label, header):
assert info_header(label) == header
@pytest.mark.parametrize("id64, id16", [
(0x1234, 0x1234),
(0x12340000, 0x1234),
(0xA5A55A5A, 0xFFFF),
(0x1234cba956780fed, 0x8008),
])
def test_short_id(id64, id16):
assert short_id(id64) == id16
@pytest.mark.parametrize("text, filters, result", [
("hello", [], "hello"),
("hello\n", [], "hello\n"),
("hello\nhello\n", [], "hello\nhello\n"),
("hello\nbye\n", [lambda x: "="+x], "=hello\n=bye\n"),
("hello\nbye\n", [lambda x: "="+x, lambda x: x+"\ndone\n"], "=hello\ndone\n=bye\ndone\n"),
])
def test_filter_text(text, filters, result):
assert filter_text(text, filters) == result
class DebugTraceTest(CoverageTest):
def f1_debug_output(self, debug):
self.make_file("f1.py", """\
def f1(x):
return x+1
for i in range(5):
f1(i)
""")
debug_out = StringIO()
cov = coverage.Coverage(debug=debug)
cov._debug_file = debug_out
self.start_import_stop(cov, "f1")
cov.save()
out_lines = debug_out.getvalue()
return out_lines
def test_debug_no_trace(self):
out_lines = self.f1_debug_output([])
self.assertFalse(out_lines)
def test_debug_trace(self):
out_lines = self.f1_debug_output(["trace"])
self.assertIn("Tracing 'f1.py'", out_lines)
coverage_lines = re_lines(
out_lines,
r"^Not tracing .*: is part of coverage.py$"
)
self.assertTrue(coverage_lines)
def test_debug_trace_pid(self):
out_lines = self.f1_debug_output(["trace", "pid"])
pid_prefix = r"^%5d\.[0-9a-f]{4}: " % os.getpid()
pid_lines = re_lines(out_lines, pid_prefix)
self.assertEqual(pid_lines, out_lines)
self.assertTrue(re_lines(out_lines, pid_prefix + "Tracing "))
self.assertTrue(re_lines(out_lines, pid_prefix + "Not tracing "))
def test_debug_callers(self):
out_lines = self.f1_debug_output(["pid", "dataop", "dataio", "callers"])
print(out_lines)
real_messages = re_lines(out_lines, r" @\d+", match=False).splitlines()
frame_pattern = r"\s+f1_debug_output : .*tests[/\\]test_debug.py @\d+$"
frames = re_lines(out_lines, frame_pattern).splitlines()
self.assertEqual(len(real_messages), len(frames))
self.assertRegex(real_messages[-1], r"^\s*\d+\.\w{4}: Writing data")
last_line = out_lines.splitlines()[-1]
self.assertRegex(last_line, r"\s+write_file : .*coverage[/\\]data.py @\d+$")
def test_debug_config(self):
out_lines = self.f1_debug_output(["config"])
labels = """
attempted_config_files branch config_files cover_pylib data_file
debug exclude_list extra_css html_dir html_title ignore_errors
include omit parallel partial_always_list partial_list paths
precision show_missing source timid xml_output
""".split()
for label in labels:
label_pat = r"^\s*%s: " % label
self.assertEqual(
len(re_lines(out_lines, label_pat).splitlines()),
1
)
def test_debug_sys(self):
out_lines = self.f1_debug_output(["sys"])
labels = """
version coverage cover_paths pylib_paths tracer config_files
configs_read data_path python platform implementation executable
cwd path environment command_line cover_match pylib_match
""".split()
for label in labels:
label_pat = r"^\s*%s: " % label
self.assertEqual(
len(re_lines(out_lines, label_pat).splitlines()),
1,
msg="Incorrect lines for %r" % label,
)
def f_one(*args, **kwargs):
return f_two(*args, **kwargs)
def f_two(*args, **kwargs):
return f_three(*args, **kwargs)
def f_three(*args, **kwargs):
return short_stack(*args, **kwargs)
class ShortStackTest(CoverageTest):
run_in_temp_dir = False
def test_short_stack(self):
stack = f_one().splitlines()
self.assertGreater(len(stack), 10)
self.assertIn("f_three", stack[-1])
self.assertIn("f_two", stack[-2])
self.assertIn("f_one", stack[-3])
def test_short_stack_limit(self):
stack = f_one(limit=5).splitlines()
self.assertEqual(len(stack), 5)
def test_short_stack_skip(self):
stack = f_one(skip=1).splitlines()
self.assertIn("f_two", stack[-1])
| true | true |
f733d82bc3f183b9585071a3be6b935d974669d6 | 2,620 | py | Python | hacks/stepper-mover-1-curses.py | joadavis/rpi-coding | ea60e393e0595dac03ef2594ad5aa4077718d28e | [
"MIT"
] | null | null | null | hacks/stepper-mover-1-curses.py | joadavis/rpi-coding | ea60e393e0595dac03ef2594ad5aa4077718d28e | [
"MIT"
] | null | null | null | hacks/stepper-mover-1-curses.py | joadavis/rpi-coding | ea60e393e0595dac03ef2594ad5aa4077718d28e | [
"MIT"
] | null | null | null | #!/usr/bin/python
# from raspberrypi-spy.co.uk
import sys
import time
import RPi.GPIO as GPIO
import curses
# set up curses
stdscr = curses.initscr()
#curses.noecho()
curses.cbreak()
stdscr.keypad(1)
# use BCM GPIO refs
GPIO.setmode(GPIO.BCM)
# define pins
#StepPins = [17, 22, 23, 24]
#StepPins = [35, 36, 37, 38] # order on board
#StepPins = [19, 16, 26, 20]
# motor 2
##StepPins = [19, 6, 16, 12]
# motor 1
#StepPins = [17, 18, 22, 23] # GPIO numbering
#StepPins = [18, 17, 22, 23] # GPIO numbering
# lined up on left of header
StepPins = [19, 13, 6, 5]
# and on right down to last, skipping ground on phys 34
StepPins2 = [12, 16, 20, 21]
# set all pins as ouput
for pin in StepPins:
#print "Setup pins"
stdscr.addstr(2,2, "Setup pins " + str(pin))
GPIO.setup(pin, GPIO.OUT)
GPIO.output(pin, False)
#define sequence for halfstepping
HalfSeq = [[1,0,0,1],
[1,0,0,0],
[1,1,0,0],
[0,1,0,0],
[0,1,1,0],
[0,0,1,0],
[0,0,1,1],
[0,0,0,1]]
# full stepping
FullSeq = [[1,0,0,0],
[0,1,0,0],
[0,0,1,0],
[0,0,0,1]]
Seq = FullSeq
StepCount = len(Seq)
StepDir = 1 # positive clockwise, negative counterclockwise
# read wait time from command line
if len(sys.argv) > 1:
WaitTime = int(sys.argv[1])/float(1000)
else:
WaitTime = 10/float(1000)
# init
StepCounter = 0
# main loop
#while True:
key = ''
while key != ord('q'):
key = stdscr.getch()
stdscr.addch(2, 25, key)
stdscr.refresh()
if key == curses.KEY_UP:
StepDir = 1
elif key == curses.KEY_DOWN:
StepDir = -1
if key == ord('h'):
# go to half steps
Seq = HalfSeq
#print len(HalfSeq)
StepCount = len(Seq)
elif key == ord('f'):
Seq = FullSeq
#print("full is " + str(len(Seq)))
StepCount = len(Seq)
# fewer steps in full than half, so chop down
if (StepCounter >= StepCount):
StepCounter = 0
#print "counting ",
#print StepCounter,
#print Seq[StepCounter]
stdscr.addstr(3, 3, "counting " + str(StepCounter) + " " + str(Seq[StepCounter]))
# 4 pins
for pin in range(0, 4):
xpin = StepPins[pin]
if Seq[StepCounter][pin] != 0:
#print " enable GPIO %i" % (xpin)
stdscr.addstr(4 + pin, 4, "enable GPIO %i" % (xpin))
GPIO.output(xpin, True)
else:
GPIO.output(xpin, False)
StepCounter += StepDir
# if end, start again
if (StepCounter >= StepCount):
StepCounter = 0
if (StepCounter < 0):
StepCounter = StepCount + StepDir
time.sleep(WaitTime)
GPIO.cleanup()
# curses cleanup
curses.nocbreak(); stdscr.keypad(0)
#curses.echo()
curses.endwin()
| 19.552239 | 83 | 0.607634 |
import sys
import time
import RPi.GPIO as GPIO
import curses
stdscr = curses.initscr()
curses.cbreak()
stdscr.keypad(1)
GPIO.setmode(GPIO.BCM)
pin in StepPins:
stdscr.addstr(2,2, "Setup pins " + str(pin))
GPIO.setup(pin, GPIO.OUT)
GPIO.output(pin, False)
HalfSeq = [[1,0,0,1],
[1,0,0,0],
[1,1,0,0],
[0,1,0,0],
[0,1,1,0],
[0,0,1,0],
[0,0,1,1],
[0,0,0,1]]
FullSeq = [[1,0,0,0],
[0,1,0,0],
[0,0,1,0],
[0,0,0,1]]
Seq = FullSeq
StepCount = len(Seq)
StepDir = 1
if len(sys.argv) > 1:
WaitTime = int(sys.argv[1])/float(1000)
else:
WaitTime = 10/float(1000)
StepCounter = 0
key = ''
while key != ord('q'):
key = stdscr.getch()
stdscr.addch(2, 25, key)
stdscr.refresh()
if key == curses.KEY_UP:
StepDir = 1
elif key == curses.KEY_DOWN:
StepDir = -1
if key == ord('h'):
Seq = HalfSeq
StepCount = len(Seq)
elif key == ord('f'):
Seq = FullSeq
StepCount = len(Seq)
if (StepCounter >= StepCount):
StepCounter = 0
stdscr.addstr(3, 3, "counting " + str(StepCounter) + " " + str(Seq[StepCounter]))
for pin in range(0, 4):
xpin = StepPins[pin]
if Seq[StepCounter][pin] != 0:
stdscr.addstr(4 + pin, 4, "enable GPIO %i" % (xpin))
GPIO.output(xpin, True)
else:
GPIO.output(xpin, False)
StepCounter += StepDir
if (StepCounter >= StepCount):
StepCounter = 0
if (StepCounter < 0):
StepCounter = StepCount + StepDir
time.sleep(WaitTime)
GPIO.cleanup()
curses.nocbreak(); stdscr.keypad(0)
curses.endwin()
| true | true |
f733d99d59edf672ec3ad0db376c6ee54adec6c2 | 2,405 | py | Python | examples/stream-job-examples/in-progress.py | prernaagarwal/cs6235Project | 9e7c5717ea2afdb5f1afef9f9b632b5a765fce4e | [
"Apache-2.0"
] | 3 | 2020-10-04T22:40:27.000Z | 2021-12-14T14:28:11.000Z | examples/stream-job-examples/in-progress.py | prernaagarwal/cs6235Project | 9e7c5717ea2afdb5f1afef9f9b632b5a765fce4e | [
"Apache-2.0"
] | 1 | 2020-10-22T19:58:53.000Z | 2020-10-22T19:58:53.000Z | examples/stream-job-examples/in-progress.py | prernaagarwal/cs6235Project | 9e7c5717ea2afdb5f1afef9f9b632b5a765fce4e | [
"Apache-2.0"
] | 4 | 2020-10-15T03:12:52.000Z | 2021-03-25T20:03:54.000Z | from edna.core.execution.context import StreamingContext
from edna.api import StreamBuilder
from edna.ingest.streaming import SimulatedIngest
from edna.serializers.EmptySerializer import EmptyStringSerializer
from edna.process.map import JsonToObject
from edna.process.filter import KeyedFilter
from edna.emit import StdoutEmit
import pdb
def filteractorid(actorid):
return True if actorid>205 else False
def main():
list_of_inserts = ['{"actor_id":210, "first_name":"jess", "last_name":"st. german", "additional":"unneeded1"}',
'{"actor_id":201, "first_name":"jess", "last_name":"courtney", "additional":"unneeded2"}',
'{"actor_id":202, "first_name":"jess", "last_name":"mishra", "additional":"unneeded3"}',
'{"actor_id":203, "first_name":"jess", "last_name":"novinha", "additional":"unneeded4"}',
'{"actor_id":204, "first_name":"jess", "last_name":"changed", "additional":"unneeded5"}',
'{"actor_id":205, "first_name":"jess", "last_name":"ael-rayya", "additional":"unneeded6"}',
'{"actor_id":206, "first_name":"jess", "last_name":"zuma", "additional":"unneeded7"}',
'{"actor_id":207, "first_name":"jess", "last_name":"changed", "additional":"unneeded8"}',
'{"actor_id":208, "first_name":"jess", "last_name":"changed", "additional":"unneeded9"}',
'{"actor_id":209, "first_name":"jess", "last_name":"changed", "additional":"unneeded10"}']
context = StreamingContext()
# Ok, so we have a stream
stream = StreamBuilder().build(ingest=SimulatedIngest(serializer=EmptyStringSerializer, stream_list=list_of_inserts), streaming_context=context)
stream = stream.map(map_process=JsonToObject()).filter(filter_process=KeyedFilter(filter_callable=filteractorid, key="actor_id")).emit(emit_process=StdoutEmit(serializer=EmptyStringSerializer))
stream1 = StreamBuilder().build(ingest=SimulatedIngest(serializer=EmptyStringSerializer, stream_list=list_of_inserts), streaming_context=context)
stream1 = stream1.map(map_process=JsonToObject()).filter(filter_process=KeyedFilter(filter_callable=filteractorid, key="actor_id")).emit(emit_process=StdoutEmit(serializer=EmptyStringSerializer))
context.addStream(stream=stream)
context.addStream(stream=stream1)
context.execute()
#pdb.set_trace()
if __name__ == "__main__":
main() | 51.170213 | 199 | 0.707692 | from edna.core.execution.context import StreamingContext
from edna.api import StreamBuilder
from edna.ingest.streaming import SimulatedIngest
from edna.serializers.EmptySerializer import EmptyStringSerializer
from edna.process.map import JsonToObject
from edna.process.filter import KeyedFilter
from edna.emit import StdoutEmit
import pdb
def filteractorid(actorid):
return True if actorid>205 else False
def main():
list_of_inserts = ['{"actor_id":210, "first_name":"jess", "last_name":"st. german", "additional":"unneeded1"}',
'{"actor_id":201, "first_name":"jess", "last_name":"courtney", "additional":"unneeded2"}',
'{"actor_id":202, "first_name":"jess", "last_name":"mishra", "additional":"unneeded3"}',
'{"actor_id":203, "first_name":"jess", "last_name":"novinha", "additional":"unneeded4"}',
'{"actor_id":204, "first_name":"jess", "last_name":"changed", "additional":"unneeded5"}',
'{"actor_id":205, "first_name":"jess", "last_name":"ael-rayya", "additional":"unneeded6"}',
'{"actor_id":206, "first_name":"jess", "last_name":"zuma", "additional":"unneeded7"}',
'{"actor_id":207, "first_name":"jess", "last_name":"changed", "additional":"unneeded8"}',
'{"actor_id":208, "first_name":"jess", "last_name":"changed", "additional":"unneeded9"}',
'{"actor_id":209, "first_name":"jess", "last_name":"changed", "additional":"unneeded10"}']
context = StreamingContext()
stream = StreamBuilder().build(ingest=SimulatedIngest(serializer=EmptyStringSerializer, stream_list=list_of_inserts), streaming_context=context)
stream = stream.map(map_process=JsonToObject()).filter(filter_process=KeyedFilter(filter_callable=filteractorid, key="actor_id")).emit(emit_process=StdoutEmit(serializer=EmptyStringSerializer))
stream1 = StreamBuilder().build(ingest=SimulatedIngest(serializer=EmptyStringSerializer, stream_list=list_of_inserts), streaming_context=context)
stream1 = stream1.map(map_process=JsonToObject()).filter(filter_process=KeyedFilter(filter_callable=filteractorid, key="actor_id")).emit(emit_process=StdoutEmit(serializer=EmptyStringSerializer))
context.addStream(stream=stream)
context.addStream(stream=stream1)
context.execute()
if __name__ == "__main__":
main() | true | true |
f733da9b2e6fd7da239b30ca0de5545ee2e75c94 | 7,971 | py | Python | docs/conf.py | SohilaBogdadyNagdy/HitchHikerDemo | d0f0e1607d87d8eefc32688adf680ee0c45a8b42 | [
"BSD-3-Clause"
] | null | null | null | docs/conf.py | SohilaBogdadyNagdy/HitchHikerDemo | d0f0e1607d87d8eefc32688adf680ee0c45a8b42 | [
"BSD-3-Clause"
] | null | null | null | docs/conf.py | SohilaBogdadyNagdy/HitchHikerDemo | d0f0e1607d87d8eefc32688adf680ee0c45a8b42 | [
"BSD-3-Clause"
] | null | null | null | # HitchHikerDemo documentation build configuration file, created by
# sphinx-quickstart.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import os
import sys
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# The suffix of source filenames.
source_suffix = ".rst"
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = "index"
# General information about the project.
project = "HitchHikerDemo"
copyright = """2018, Sohila Boghdady"""
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = "0.1"
# The full version, including alpha/beta/rc tags.
release = "0.1"
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ["_build"]
# The reST default role (used for this markup: `text`) to use for all documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = "default"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = "HitchHikerDemodoc"
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
(
"index",
"HitchHikerDemo.tex",
"HitchHikerDemo Documentation",
"""Sohila Boghdady""",
"manual",
)
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(
"index",
"HitchHikerDemo",
"HitchHikerDemo Documentation",
["""Sohila Boghdady"""],
1,
)
]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(
"index",
"HitchHikerDemo",
"HitchHikerDemo Documentation",
"""Sohila Boghdady""",
"HitchHikerDemo",
"""Behold My Awesome Project!""",
"Miscellaneous",
)
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
| 31.136719 | 80 | 0.697152 |
import os
import sys
extensions = []
templates_path = ["_templates"]
source_suffix = ".rst"
master_doc = "index"
project = "HitchHikerDemo"
copyright = """2018, Sohila Boghdady"""
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = "0.1"
# The full version, including alpha/beta/rc tags.
release = "0.1"
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ["_build"]
# The reST default role (used for this markup: `text`) to use for all documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = "default"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = "HitchHikerDemodoc"
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
(
"index",
"HitchHikerDemo.tex",
"HitchHikerDemo Documentation",
"""Sohila Boghdady""",
"manual",
)
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(
"index",
"HitchHikerDemo",
"HitchHikerDemo Documentation",
["""Sohila Boghdady"""],
1,
)
]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(
"index",
"HitchHikerDemo",
"HitchHikerDemo Documentation",
"""Sohila Boghdady""",
"HitchHikerDemo",
"""Behold My Awesome Project!""",
"Miscellaneous",
)
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
| true | true |
f733de1fcaee1027767be7d22688fda17296c23d | 4,896 | py | Python | tests/src/smiley/smiley/tests/test_db_linecache.py | incognitoRepo/hdlogger | c738161ef3144469ba0f47caf89770613031e96e | [
"BSD-2-Clause"
] | null | null | null | tests/src/smiley/smiley/tests/test_db_linecache.py | incognitoRepo/hdlogger | c738161ef3144469ba0f47caf89770613031e96e | [
"BSD-2-Clause"
] | null | null | null | tests/src/smiley/smiley/tests/test_db_linecache.py | incognitoRepo/hdlogger | c738161ef3144469ba0f47caf89770613031e96e | [
"BSD-2-Clause"
] | null | null | null | import fixtures
import testtools
from smiley import db
from smiley import db_linecache
class DBFileCacheTest(testtools.TestCase):
def setUp(self):
super(DBFileCacheTest, self).setUp()
self.useFixture(fixtures.FakeLogger())
self.db = db.DB(':memory:')
self.db.start_run(
'12345',
'/no/such/dir',
['command', 'line', 'would', 'go', 'here'],
1370436103.65,
)
self.db.cache_file_for_run(
'12345',
'test-file.txt',
'this would be the body\nline two\nline three',
)
self.db.cache_file_for_run(
'12345',
'with-comments.txt',
'\n'.join([
'# start comment',
'line two',
'',
' # middle comment',
'',
'end line',
'# comment',
'last line',
]),
)
self.db.cache_file_for_run(
'12345',
'multi-line-comments.txt',
'\n'.join([
'# start comment',
'# comment line 2',
'non-comment 1',
'',
' # middle comment',
' # middle comment line 2',
'',
'middle line',
'# last comment',
'# last comment line 2',
'last line',
]),
)
self.cache = db_linecache.DBLineCache(self.db, '12345')
def test_file_and_line_exist(self):
line = self.cache.getline('test-file.txt', 2)
self.assertEqual(line, 'line two')
def test_file_does_not_exist(self):
line = self.cache.getline('no-such-test-file.txt', 2)
self.assertEqual(line, '')
def test_line_does_not_exist(self):
line = self.cache.getline('test-file.txt', 99)
self.assertEqual(line, '')
def test_range_exists(self):
lines = self.cache.getlines('test-file.txt', 2, 3)
self.assertEqual(lines, 'line two\nline three')
def test_range_underflow(self):
self.assertRaises(IndexError,
self.cache.getlines,
'test-file.txt', 0, 2,
)
def test_range_overflow(self):
lines = self.cache.getlines('test-file.txt', 2, 4)
self.assertEqual(lines, 'line two\nline three')
def test_find_comments_adjascent(self):
start = self.cache.find_comment_block_start('with-comments.txt', 2)
self.assertEqual(start, 1)
def test_comments_adjascent(self):
lines = self.cache.getlines('with-comments.txt', 2, 2,
include_comments=True)
self.assertEqual(lines, '# start comment\nline two')
def test_multi_line_comments_adjascent(self):
lines = self.cache.getlines('multi-line-comments.txt', 3, 3,
include_comments=True)
self.assertEqual(
lines,
'# start comment\n# comment line 2\nnon-comment 1',
)
def test_find_comments_none(self):
start = self.cache.find_comment_block_start('test-file.txt', 2)
self.assertEqual(start, 2)
def test_comments_none(self):
lines = self.cache.getlines('test-file.txt', 2, 3,
include_comments=True)
self.assertEqual(lines, 'line two\nline three')
def test_find_comments_and_blank_line(self):
start = self.cache.find_comment_block_start('with-comments.txt', 6)
self.assertEqual(start, 4)
def test_comments_and_blank_line(self):
lines = self.cache.getlines('with-comments.txt', 6, 6,
include_comments=True)
self.assertEqual(lines, ' # middle comment\n\nend line')
def test_multi_line_comments_and_blank_line(self):
lines = self.cache.getlines('multi-line-comments.txt', 8, 8,
include_comments=True)
self.assertEqual(
lines,
' # middle comment\n # middle comment line 2\n\nmiddle line',
)
def test_find_comments_without_blank_line(self):
start = self.cache.find_comment_block_start('with-comments.txt', 8)
self.assertEqual(start, 7)
def test_comments_without_blank_line(self):
lines = self.cache.getlines('with-comments.txt', 8, 8,
include_comments=True)
self.assertEqual(lines, '# comment\nlast line')
def test_multi_line_comments_without_blank_line(self):
lines = self.cache.getlines('multi-line-comments.txt', 11, 11,
include_comments=True)
self.assertEqual(
lines,
'# last comment\n# last comment line 2\nlast line',
)
| 34.237762 | 75 | 0.542892 | import fixtures
import testtools
from smiley import db
from smiley import db_linecache
class DBFileCacheTest(testtools.TestCase):
def setUp(self):
super(DBFileCacheTest, self).setUp()
self.useFixture(fixtures.FakeLogger())
self.db = db.DB(':memory:')
self.db.start_run(
'12345',
'/no/such/dir',
['command', 'line', 'would', 'go', 'here'],
1370436103.65,
)
self.db.cache_file_for_run(
'12345',
'test-file.txt',
'this would be the body\nline two\nline three',
)
self.db.cache_file_for_run(
'12345',
'with-comments.txt',
'\n'.join([
'# start comment',
'line two',
'',
' # middle comment',
'',
'end line',
'# comment',
'last line',
]),
)
self.db.cache_file_for_run(
'12345',
'multi-line-comments.txt',
'\n'.join([
'# start comment',
'# comment line 2',
'non-comment 1',
'',
' # middle comment',
' # middle comment line 2',
'',
'middle line',
'# last comment',
'# last comment line 2',
'last line',
]),
)
self.cache = db_linecache.DBLineCache(self.db, '12345')
def test_file_and_line_exist(self):
line = self.cache.getline('test-file.txt', 2)
self.assertEqual(line, 'line two')
def test_file_does_not_exist(self):
line = self.cache.getline('no-such-test-file.txt', 2)
self.assertEqual(line, '')
def test_line_does_not_exist(self):
line = self.cache.getline('test-file.txt', 99)
self.assertEqual(line, '')
def test_range_exists(self):
lines = self.cache.getlines('test-file.txt', 2, 3)
self.assertEqual(lines, 'line two\nline three')
def test_range_underflow(self):
self.assertRaises(IndexError,
self.cache.getlines,
'test-file.txt', 0, 2,
)
def test_range_overflow(self):
lines = self.cache.getlines('test-file.txt', 2, 4)
self.assertEqual(lines, 'line two\nline three')
def test_find_comments_adjascent(self):
start = self.cache.find_comment_block_start('with-comments.txt', 2)
self.assertEqual(start, 1)
def test_comments_adjascent(self):
lines = self.cache.getlines('with-comments.txt', 2, 2,
include_comments=True)
self.assertEqual(lines, '# start comment\nline two')
def test_multi_line_comments_adjascent(self):
lines = self.cache.getlines('multi-line-comments.txt', 3, 3,
include_comments=True)
self.assertEqual(
lines,
'# start comment\n# comment line 2\nnon-comment 1',
)
def test_find_comments_none(self):
start = self.cache.find_comment_block_start('test-file.txt', 2)
self.assertEqual(start, 2)
def test_comments_none(self):
lines = self.cache.getlines('test-file.txt', 2, 3,
include_comments=True)
self.assertEqual(lines, 'line two\nline three')
def test_find_comments_and_blank_line(self):
start = self.cache.find_comment_block_start('with-comments.txt', 6)
self.assertEqual(start, 4)
def test_comments_and_blank_line(self):
lines = self.cache.getlines('with-comments.txt', 6, 6,
include_comments=True)
self.assertEqual(lines, ' # middle comment\n\nend line')
def test_multi_line_comments_and_blank_line(self):
lines = self.cache.getlines('multi-line-comments.txt', 8, 8,
include_comments=True)
self.assertEqual(
lines,
' # middle comment\n # middle comment line 2\n\nmiddle line',
)
def test_find_comments_without_blank_line(self):
start = self.cache.find_comment_block_start('with-comments.txt', 8)
self.assertEqual(start, 7)
def test_comments_without_blank_line(self):
lines = self.cache.getlines('with-comments.txt', 8, 8,
include_comments=True)
self.assertEqual(lines, '# comment\nlast line')
def test_multi_line_comments_without_blank_line(self):
lines = self.cache.getlines('multi-line-comments.txt', 11, 11,
include_comments=True)
self.assertEqual(
lines,
'# last comment\n# last comment line 2\nlast line',
)
| true | true |
f733df3416c3f4e79a3c542d6f4d715b01756f38 | 123,058 | py | Python | salt/cloud/clouds/ec2.py | DoubleNegativeVisualEffects/salt | 91b963274a46829454f19c729a56799573654807 | [
"Apache-2.0"
] | 1 | 2015-05-20T16:55:50.000Z | 2015-05-20T16:55:50.000Z | salt/cloud/clouds/ec2.py | DoubleNegativeVisualEffects/salt | 91b963274a46829454f19c729a56799573654807 | [
"Apache-2.0"
] | null | null | null | salt/cloud/clouds/ec2.py | DoubleNegativeVisualEffects/salt | 91b963274a46829454f19c729a56799573654807 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
'''
The EC2 Cloud Module
====================
The EC2 cloud module is used to interact with the Amazon Elastic Cloud
Computing.
To use the EC2 cloud module, set up the cloud configuration at
``/etc/salt/cloud.providers`` or ``/etc/salt/cloud.providers.d/ec2.conf``:
.. code-block:: yaml
my-ec2-config:
# The EC2 API authentication id
id: GKTADJGHEIQSXMKKRBJ08H
# The EC2 API authentication key
key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs
# The ssh keyname to use
keyname: default
# The amazon security group
securitygroup: ssh_open
# The location of the private key which corresponds to the keyname
private_key: /root/default.pem
# Be default, service_url is set to amazonaws.com. If you are using this
# driver for something other than Amazon EC2, change it here:
service_url: amazonaws.com
# The endpoint that is ultimately used is usually formed using the region
# and the service_url. If you would like to override that entirely, you
# can explicitly define the endpoint:
endpoint: myendpoint.example.com:1138/services/Cloud
# SSH Gateways can be used with this provider. Gateways can be used
# when a salt-master is not on the same private network as the instance
# that is being deployed.
# Defaults to None
# Required
ssh_gateway: gateway.example.com
# Defaults to port 22
# Optional
ssh_gateway_port: 22
# Defaults to root
# Optional
ssh_gateway_username: root
# One authentication method is required. If both
# are specified, Private key wins.
# Private key defaults to None
ssh_gateway_private_key: /path/to/key.pem
# Password defaults to None
ssh_gateway_password: ExamplePasswordHere
provider: ec2
:depends: requests
'''
# pylint: disable=E0102
from __future__ import absolute_import
# Import python libs
import os
import sys
import stat
import time
import uuid
import pprint
import logging
import yaml
# Import 3rd-party libs
# pylint: disable=import-error,no-name-in-module,redefined-builtin
import requests
import salt.ext.six as six
from salt.ext.six.moves import map, range, zip
from salt.ext.six.moves.urllib.parse import urlparse as _urlparse, urlencode as _urlencode
# pylint: enable=import-error,no-name-in-module
# Import libs for talking to the EC2 API
import hmac
import hashlib
import binascii
import datetime
import base64
# Import salt libs
import salt.utils
from salt.utils import namespaced_function
from salt.cloud.libcloudfuncs import get_salt_interface
from salt._compat import ElementTree as ET
import salt.utils.aws as aws
# Import salt.cloud libs
import salt.utils.cloud
import salt.config as config
from salt.exceptions import (
SaltCloudException,
SaltCloudSystemExit,
SaltCloudConfigError,
SaltCloudExecutionTimeout,
SaltCloudExecutionFailure
)
# Try to import PyCrypto, which may not be installed on a RAET-based system
try:
import Crypto
# PKCS1_v1_5 was added in PyCrypto 2.5
from Crypto.Cipher import PKCS1_v1_5 # pylint: disable=E0611
HAS_PYCRYPTO = True
except ImportError:
HAS_PYCRYPTO = False
# Get logging started
log = logging.getLogger(__name__)
# namespace libcloudfuncs
get_salt_interface = namespaced_function(get_salt_interface, globals())
SIZE_MAP = {
'Micro Instance': 't1.micro',
'Small Instance': 'm1.small',
'Medium Instance': 'm1.medium',
'Large Instance': 'm1.large',
'Extra Large Instance': 'm1.xlarge',
'High-CPU Medium Instance': 'c1.medium',
'High-CPU Extra Large Instance': 'c1.xlarge',
'High-Memory Extra Large Instance': 'm2.xlarge',
'High-Memory Double Extra Large Instance': 'm2.2xlarge',
'High-Memory Quadruple Extra Large Instance': 'm2.4xlarge',
'Cluster GPU Quadruple Extra Large Instance': 'cg1.4xlarge',
'Cluster Compute Quadruple Extra Large Instance': 'cc1.4xlarge',
'Cluster Compute Eight Extra Large Instance': 'cc2.8xlarge',
}
EC2_LOCATIONS = {
'ap-northeast-1': 'ec2_ap_northeast',
'ap-southeast-1': 'ec2_ap_southeast',
'ap-southeast-2': 'ec2_ap_southeast_2',
'eu-west-1': 'ec2_eu_west',
'sa-east-1': 'ec2_sa_east',
'us-east-1': 'ec2_us_east',
'us-west-1': 'ec2_us_west',
'us-west-2': 'ec2_us_west_oregon',
}
DEFAULT_LOCATION = 'us-east-1'
DEFAULT_EC2_API_VERSION = '2014-10-01'
EC2_RETRY_CODES = [
'RequestLimitExceeded',
'InsufficientInstanceCapacity',
'InternalError',
'Unavailable',
'InsufficientAddressCapacity',
'InsufficientReservedInstanceCapacity',
]
# Only load in this module if the EC2 configurations are in place
def __virtual__():
'''
Set up the libcloud functions and check for EC2 configurations
'''
if get_configured_provider() is False:
return False
for provider, details in six.iteritems(__opts__['providers']):
if 'provider' not in details or details['provider'] != 'ec2':
continue
if not os.path.exists(details['private_key']):
raise SaltCloudException(
'The EC2 key file {0!r} used in the {1!r} provider '
'configuration does not exist\n'.format(
details['private_key'],
provider
)
)
keymode = str(
oct(stat.S_IMODE(os.stat(details['private_key']).st_mode))
)
if keymode not in ('0400', '0600'):
raise SaltCloudException(
'The EC2 key file {0!r} used in the {1!r} provider '
'configuration needs to be set to mode 0400 or 0600\n'.format(
details['private_key'],
provider
)
)
return True
def get_configured_provider():
'''
Return the first configured instance.
'''
return config.is_provider_configured(
__opts__,
__active_provider_name__ or 'ec2',
('id', 'key', 'keyname', 'private_key')
)
def _xml_to_dict(xmltree):
'''
Convert an XML tree into a dict
'''
if sys.version_info < (2, 7):
children_len = len(xmltree.getchildren())
else:
children_len = len(xmltree)
if children_len < 1:
name = xmltree.tag
if '}' in name:
comps = name.split('}')
name = comps[1]
return {name: xmltree.text}
xmldict = {}
for item in xmltree:
name = item.tag
if '}' in name:
comps = name.split('}')
name = comps[1]
if name not in xmldict:
if sys.version_info < (2, 7):
children_len = len(item.getchildren())
else:
children_len = len(item)
if children_len > 0:
xmldict[name] = _xml_to_dict(item)
else:
xmldict[name] = item.text
else:
if not isinstance(xmldict[name], list):
tempvar = xmldict[name]
xmldict[name] = []
xmldict[name].append(tempvar)
xmldict[name].append(_xml_to_dict(item))
return xmldict
def optimize_providers(providers):
'''
Return an optimized list of providers.
We want to reduce the duplication of querying
the same region.
If a provider is using the same credentials for the same region
the same data will be returned for each provider, thus causing
un-wanted duplicate data and API calls to EC2.
'''
tmp_providers = {}
optimized_providers = {}
for name, data in six.iteritems(providers):
if 'location' not in data:
data['location'] = DEFAULT_LOCATION
if data['location'] not in tmp_providers:
tmp_providers[data['location']] = {}
creds = (data['id'], data['key'])
if creds not in tmp_providers[data['location']]:
tmp_providers[data['location']][creds] = {'name': name,
'data': data,
}
for location, tmp_data in six.iteritems(tmp_providers):
for creds, data in six.iteritems(tmp_data):
_id, _key = creds
_name = data['name']
_data = data['data']
if _name not in optimized_providers:
optimized_providers[_name] = _data
return optimized_providers
def query(params=None, setname=None, requesturl=None, location=None,
return_url=False, return_root=False):
provider = get_configured_provider()
service_url = provider.get('service_url', 'amazonaws.com')
attempts = 5
while attempts > 0:
params_with_headers = params.copy()
timestamp = datetime.datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%SZ')
if not location:
location = get_location()
if not requesturl:
endpoint = provider.get(
'endpoint',
'ec2.{0}.{1}'.format(location, service_url)
)
requesturl = 'https://{0}/'.format(endpoint)
else:
endpoint = _urlparse(requesturl).netloc
if endpoint == '':
endpoint_err = (
'Could not find a valid endpoint in the '
'requesturl: {0}. Looking for something '
'like https://some.ec2.endpoint/?args').format(requesturl)
log.error(endpoint_err)
if return_url is True:
return {'error': endpoint_err}, requesturl
return {'error': endpoint_err}
log.debug('Using EC2 endpoint: {0}'.format(endpoint))
method = 'GET'
ec2_api_version = provider.get(
'ec2_api_version',
DEFAULT_EC2_API_VERSION
)
params_with_headers['AWSAccessKeyId'] = provider['id']
params_with_headers['SignatureVersion'] = '2'
params_with_headers['SignatureMethod'] = 'HmacSHA256'
params_with_headers['Timestamp'] = '{0}'.format(timestamp)
params_with_headers['Version'] = ec2_api_version
keys = sorted(params_with_headers)
values = list(map(params_with_headers.get, keys))
querystring = _urlencode(list(zip(keys, values)))
# AWS signature version 2 requires that spaces be encoded as
# %20, however urlencode uses '+'. So replace pluses with %20.
querystring = querystring.replace('+', '%20')
uri = '{0}\n{1}\n/\n{2}'.format(method.encode('utf-8'),
endpoint.encode('utf-8'),
querystring.encode('utf-8'))
hashed = hmac.new(provider['key'], uri, hashlib.sha256)
sig = binascii.b2a_base64(hashed.digest())
params_with_headers['Signature'] = sig.strip()
log.debug('EC2 Request: {0}'.format(requesturl))
log.trace('EC2 Request Parameters: {0}'.format(params_with_headers))
try:
result = requests.get(requesturl, params=params_with_headers)
log.debug(
'EC2 Response Status Code: {0}'.format(
# result.getcode()
result.status_code
)
)
log.trace(
'EC2 Response Text: {0}'.format(
result.text
)
)
result.raise_for_status()
break
except requests.exceptions.HTTPError as exc:
root = ET.fromstring(exc.response.content)
data = _xml_to_dict(root)
# check to see if we should retry the query
err_code = data.get('Errors', {}).get('Error', {}).get('Code', '')
if attempts > 0 and err_code and err_code in EC2_RETRY_CODES:
attempts -= 1
log.error(
'EC2 Response Status Code and Error: [{0} {1}] {2}; '
'Attempts remaining: {3}'.format(
exc.response.status_code, exc, data, attempts
)
)
# Wait a bit before continuing to prevent throttling
time.sleep(2)
continue
log.error(
'EC2 Response Status Code and Error: [{0} {1}] {2}'.format(
exc.response.status_code, exc, data
)
)
if return_url is True:
return {'error': data}, requesturl
return {'error': data}
else:
log.error(
'EC2 Response Status Code and Error: [{0} {1}] {2}'.format(
exc.response.status_code, exc, data
)
)
if return_url is True:
return {'error': data}, requesturl
return {'error': data}
response = result.text
root = ET.fromstring(response)
items = root[1]
if return_root is True:
items = root
if setname:
if sys.version_info < (2, 7):
children_len = len(root.getchildren())
else:
children_len = len(root)
for item in range(0, children_len):
comps = root[item].tag.split('}')
if comps[1] == setname:
items = root[item]
ret = []
for item in items:
ret.append(_xml_to_dict(item))
if return_url is True:
return ret, requesturl
return ret
def _wait_for_spot_instance(update_callback,
update_args=None,
update_kwargs=None,
timeout=10 * 60,
interval=30,
interval_multiplier=1,
max_failures=10):
'''
Helper function that waits for a spot instance request to become active
for a specific maximum amount of time.
:param update_callback: callback function which queries the cloud provider
for spot instance request. It must return None if
the required data, running instance included, is
not available yet.
:param update_args: Arguments to pass to update_callback
:param update_kwargs: Keyword arguments to pass to update_callback
:param timeout: The maximum amount of time(in seconds) to wait for the IP
address.
:param interval: The looping interval, i.e., the amount of time to sleep
before the next iteration.
:param interval_multiplier: Increase the interval by this multiplier after
each request; helps with throttling
:param max_failures: If update_callback returns ``False`` it's considered
query failure. This value is the amount of failures
accepted before giving up.
:returns: The update_callback returned data
:raises: SaltCloudExecutionTimeout
'''
if update_args is None:
update_args = ()
if update_kwargs is None:
update_kwargs = {}
duration = timeout
while True:
log.debug(
'Waiting for spot instance reservation. Giving up in '
'00:{0:02d}:{1:02d}'.format(
int(timeout // 60),
int(timeout % 60)
)
)
data = update_callback(*update_args, **update_kwargs)
if data is False:
log.debug(
'update_callback has returned False which is considered a '
'failure. Remaining Failures: {0}'.format(max_failures)
)
max_failures -= 1
if max_failures <= 0:
raise SaltCloudExecutionFailure(
'Too many failures occurred while waiting for '
'the spot instance reservation to become active.'
)
elif data is not None:
return data
if timeout < 0:
raise SaltCloudExecutionTimeout(
'Unable to get an active spot instance request for '
'00:{0:02d}:{1:02d}'.format(
int(duration // 60),
int(duration % 60)
)
)
time.sleep(interval)
timeout -= interval
if interval_multiplier > 1:
interval *= interval_multiplier
if interval > timeout:
interval = timeout + 1
log.info('Interval multiplier in effect; interval is '
'now {0}s'.format(interval))
def avail_sizes(call=None):
'''
Return a dict of all available VM sizes on the cloud provider with
relevant data. Latest version can be found at:
http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-types.html
'''
if call == 'action':
raise SaltCloudSystemExit(
'The avail_sizes function must be called with '
'-f or --function, or with the --list-sizes option'
)
sizes = {
'Cluster Compute': {
'cc2.8xlarge': {
'id': 'cc2.8xlarge',
'cores': '16 (2 x Intel Xeon E5-2670, eight-core with '
'hyperthread)',
'disk': '3360 GiB (4 x 840 GiB)',
'ram': '60.5 GiB'
},
'cc1.4xlarge': {
'id': 'cc1.4xlarge',
'cores': '8 (2 x Intel Xeon X5570, quad-core with '
'hyperthread)',
'disk': '1690 GiB (2 x 840 GiB)',
'ram': '22.5 GiB'
},
},
'Cluster CPU': {
'cg1.4xlarge': {
'id': 'cg1.4xlarge',
'cores': '8 (2 x Intel Xeon X5570, quad-core with '
'hyperthread), plus 2 NVIDIA Tesla M2050 GPUs',
'disk': '1680 GiB (2 x 840 GiB)',
'ram': '22.5 GiB'
},
},
'High CPU': {
'c1.xlarge': {
'id': 'c1.xlarge',
'cores': '8 (with 2.5 ECUs each)',
'disk': '1680 GiB (4 x 420 GiB)',
'ram': '8 GiB'
},
'c1.medium': {
'id': 'c1.medium',
'cores': '2 (with 2.5 ECUs each)',
'disk': '340 GiB (1 x 340 GiB)',
'ram': '1.7 GiB'
},
'c3.large': {
'id': 'c3.large',
'cores': '2 (with 3.5 ECUs each)',
'disk': '32 GiB (2 x 16 GiB SSD)',
'ram': '3.75 GiB'
},
'c3.xlarge': {
'id': 'c3.xlarge',
'cores': '4 (with 3.5 ECUs each)',
'disk': '80 GiB (2 x 40 GiB SSD)',
'ram': '7.5 GiB'
},
'c3.2xlarge': {
'id': 'c3.2xlarge',
'cores': '8 (with 3.5 ECUs each)',
'disk': '160 GiB (2 x 80 GiB SSD)',
'ram': '15 GiB'
},
'c3.4xlarge': {
'id': 'c3.4xlarge',
'cores': '16 (with 3.5 ECUs each)',
'disk': '320 GiB (2 x 80 GiB SSD)',
'ram': '30 GiB'
},
'c3.8xlarge': {
'id': 'c3.8xlarge',
'cores': '32 (with 3.5 ECUs each)',
'disk': '320 GiB (2 x 160 GiB SSD)',
'ram': '60 GiB'
}
},
'High I/O': {
'hi1.4xlarge': {
'id': 'hi1.4xlarge',
'cores': '8 (with 4.37 ECUs each)',
'disk': '2 TiB',
'ram': '60.5 GiB'
},
},
'High Memory': {
'm2.2xlarge': {
'id': 'm2.2xlarge',
'cores': '4 (with 3.25 ECUs each)',
'disk': '840 GiB (1 x 840 GiB)',
'ram': '34.2 GiB'
},
'm2.xlarge': {
'id': 'm2.xlarge',
'cores': '2 (with 3.25 ECUs each)',
'disk': '410 GiB (1 x 410 GiB)',
'ram': '17.1 GiB'
},
'm2.4xlarge': {
'id': 'm2.4xlarge',
'cores': '8 (with 3.25 ECUs each)',
'disk': '1680 GiB (2 x 840 GiB)',
'ram': '68.4 GiB'
},
'r3.large': {
'id': 'r3.large',
'cores': '2 (with 3.25 ECUs each)',
'disk': '32 GiB (1 x 32 GiB SSD)',
'ram': '15 GiB'
},
'r3.xlarge': {
'id': 'r3.xlarge',
'cores': '4 (with 3.25 ECUs each)',
'disk': '80 GiB (1 x 80 GiB SSD)',
'ram': '30.5 GiB'
},
'r3.2xlarge': {
'id': 'r3.2xlarge',
'cores': '8 (with 3.25 ECUs each)',
'disk': '160 GiB (1 x 160 GiB SSD)',
'ram': '61 GiB'
},
'r3.4xlarge': {
'id': 'r3.4xlarge',
'cores': '16 (with 3.25 ECUs each)',
'disk': '320 GiB (1 x 320 GiB SSD)',
'ram': '122 GiB'
},
'r3.8xlarge': {
'id': 'r3.8xlarge',
'cores': '32 (with 3.25 ECUs each)',
'disk': '640 GiB (2 x 320 GiB SSD)',
'ram': '244 GiB'
}
},
'High-Memory Cluster': {
'cr1.8xlarge': {
'id': 'cr1.8xlarge',
'cores': '16 (2 x Intel Xeon E5-2670, eight-core)',
'disk': '240 GiB (2 x 120 GiB SSD)',
'ram': '244 GiB'
},
},
'High Storage': {
'hs1.8xlarge': {
'id': 'hs1.8xlarge',
'cores': '16 (8 cores + 8 hyperthreads)',
'disk': '48 TiB (24 x 2 TiB hard disk drives)',
'ram': '117 GiB'
},
},
'Micro': {
't1.micro': {
'id': 't1.micro',
'cores': '1',
'disk': 'EBS',
'ram': '615 MiB'
},
},
'Standard': {
'm1.xlarge': {
'id': 'm1.xlarge',
'cores': '4 (with 2 ECUs each)',
'disk': '1680 GB (4 x 420 GiB)',
'ram': '15 GiB'
},
'm1.large': {
'id': 'm1.large',
'cores': '2 (with 2 ECUs each)',
'disk': '840 GiB (2 x 420 GiB)',
'ram': '7.5 GiB'
},
'm1.medium': {
'id': 'm1.medium',
'cores': '1',
'disk': '400 GiB',
'ram': '3.75 GiB'
},
'm1.small': {
'id': 'm1.small',
'cores': '1',
'disk': '150 GiB',
'ram': '1.7 GiB'
},
'm3.2xlarge': {
'id': 'm3.2xlarge',
'cores': '8 (with 3.25 ECUs each)',
'disk': 'EBS',
'ram': '30 GiB'
},
'm3.xlarge': {
'id': 'm3.xlarge',
'cores': '4 (with 3.25 ECUs each)',
'disk': 'EBS',
'ram': '15 GiB'
},
}
}
return sizes
def avail_images(kwargs=None, call=None):
'''
Return a dict of all available VM images on the cloud provider.
'''
if call == 'action':
raise SaltCloudSystemExit(
'The avail_images function must be called with '
'-f or --function, or with the --list-images option'
)
if not isinstance(kwargs, dict):
kwargs = {}
if 'owner' in kwargs:
owner = kwargs['owner']
else:
provider = get_configured_provider()
owner = config.get_cloud_config_value(
'owner', provider, __opts__, default='amazon'
)
ret = {}
params = {'Action': 'DescribeImages',
'Owner': owner}
images = aws.query(params,
location=get_location(),
provider=get_provider(),
opts=__opts__,
sigver='4')
for image in images:
ret[image['imageId']] = image
return ret
def script(vm_):
'''
Return the script deployment object
'''
return salt.utils.cloud.os_script(
config.get_cloud_config_value('script', vm_, __opts__),
vm_,
__opts__,
salt.utils.cloud.salt_config_to_yaml(
salt.utils.cloud.minion_config(__opts__, vm_)
)
)
def keyname(vm_):
'''
Return the keyname
'''
return config.get_cloud_config_value(
'keyname', vm_, __opts__, search_global=False
)
def securitygroup(vm_):
'''
Return the security group
'''
return config.get_cloud_config_value(
'securitygroup', vm_, __opts__, search_global=False
)
def iam_profile(vm_):
'''
Return the IAM profile.
The IAM instance profile to associate with the instances.
This is either the Amazon Resource Name (ARN) of the instance profile
or the name of the role.
Type: String
Default: None
Required: No
Example: arn:aws:iam::111111111111:instance-profile/s3access
Example: s3access
'''
return config.get_cloud_config_value(
'iam_profile', vm_, __opts__, search_global=False
)
def ssh_interface(vm_):
'''
Return the ssh_interface type to connect to. Either 'public_ips' (default)
or 'private_ips'.
'''
return config.get_cloud_config_value(
'ssh_interface', vm_, __opts__, default='public_ips',
search_global=False
)
def get_ssh_gateway_config(vm_):
'''
Return the ssh_gateway configuration.
'''
ssh_gateway = config.get_cloud_config_value(
'ssh_gateway', vm_, __opts__, default=None,
search_global=False
)
# Check to see if a SSH Gateway will be used.
if not isinstance(ssh_gateway, str):
return None
# Create dictionary of configuration items
# ssh_gateway
ssh_gateway_config = {'ssh_gateway': ssh_gateway}
# ssh_gateway_port
ssh_gateway_config['ssh_gateway_port'] = config.get_cloud_config_value(
'ssh_gateway_port', vm_, __opts__, default=None,
search_global=False
)
# ssh_gateway_username
ssh_gateway_config['ssh_gateway_user'] = config.get_cloud_config_value(
'ssh_gateway_username', vm_, __opts__, default=None,
search_global=False
)
# ssh_gateway_private_key
ssh_gateway_config['ssh_gateway_key'] = config.get_cloud_config_value(
'ssh_gateway_private_key', vm_, __opts__, default=None,
search_global=False
)
# ssh_gateway_password
ssh_gateway_config['ssh_gateway_password'] = config.get_cloud_config_value(
'ssh_gateway_password', vm_, __opts__, default=None,
search_global=False
)
# Check if private key exists
key_filename = ssh_gateway_config['ssh_gateway_key']
if key_filename is not None and not os.path.isfile(key_filename):
raise SaltCloudConfigError(
'The defined ssh_gateway_private_key {0!r} does not exist'.format(
key_filename
)
)
elif (
key_filename is None and
not ssh_gateway_config['ssh_gateway_password']
):
raise SaltCloudConfigError(
'No authentication method. Please define: '
' ssh_gateway_password or ssh_gateway_private_key'
)
return ssh_gateway_config
def get_location(vm_=None):
'''
Return the EC2 region to use, in this order:
- CLI parameter
- VM parameter
- Cloud profile setting
'''
return __opts__.get(
'location',
config.get_cloud_config_value(
'location',
vm_ or get_configured_provider(),
__opts__,
default=DEFAULT_LOCATION,
search_global=False
)
)
def avail_locations(call=None):
'''
List all available locations
'''
if call == 'action':
raise SaltCloudSystemExit(
'The avail_locations function must be called with '
'-f or --function, or with the --list-locations option'
)
ret = {}
params = {'Action': 'DescribeRegions'}
result = aws.query(params,
location=get_location(),
provider=get_provider(),
opts=__opts__,
sigver='4')
for region in result:
ret[region['regionName']] = {
'name': region['regionName'],
'endpoint': region['regionEndpoint'],
}
return ret
def get_availability_zone(vm_):
'''
Return the availability zone to use
'''
avz = config.get_cloud_config_value(
'availability_zone', vm_, __opts__, search_global=False
)
if avz is None:
return None
zones = _list_availability_zones()
# Validate user-specified AZ
if avz not in zones:
raise SaltCloudException(
'The specified availability zone isn\'t valid in this region: '
'{0}\n'.format(
avz
)
)
# check specified AZ is available
elif zones[avz] != 'available':
raise SaltCloudException(
'The specified availability zone isn\'t currently available: '
'{0}\n'.format(
avz
)
)
return avz
def get_tenancy(vm_):
'''
Returns the Tenancy to use.
Can be "dedicated" or "default". Cannot be present for spot instances.
'''
return config.get_cloud_config_value(
'tenancy', vm_, __opts__, search_global=False
)
def get_subnetid(vm_):
'''
Returns the SubnetId to use
'''
return config.get_cloud_config_value(
'subnetid', vm_, __opts__, search_global=False
)
def securitygroupid(vm_):
'''
Returns the SecurityGroupId
'''
return config.get_cloud_config_value(
'securitygroupid', vm_, __opts__, search_global=False
)
def get_placementgroup(vm_):
'''
Returns the PlacementGroup to use
'''
return config.get_cloud_config_value(
'placementgroup', vm_, __opts__, search_global=False
)
def get_spot_config(vm_):
'''
Returns the spot instance configuration for the provided vm
'''
return config.get_cloud_config_value(
'spot_config', vm_, __opts__, search_global=False
)
def get_provider(vm_=None):
'''
Extract the provider name from vm
'''
if vm_ is None:
provider = __active_provider_name__ or 'ec2'
else:
provider = vm_.get('provider', 'ec2')
if ':' in provider:
prov_comps = provider.split(':')
provider = prov_comps[0]
return provider
def _list_availability_zones():
'''
List all availability zones in the current region
'''
ret = {}
params = {'Action': 'DescribeAvailabilityZones',
'Filter.0.Name': 'region-name',
'Filter.0.Value.0': get_location()}
result = aws.query(params,
location=get_location(),
provider=get_provider(),
opts=__opts__,
sigver='4')
for zone in result:
ret[zone['zoneName']] = zone['zoneState']
return ret
def block_device_mappings(vm_):
'''
Return the block device mapping:
.. code-block:: python
[{'DeviceName': '/dev/sdb', 'VirtualName': 'ephemeral0'},
{'DeviceName': '/dev/sdc', 'VirtualName': 'ephemeral1'}]
'''
return config.get_cloud_config_value(
'block_device_mappings', vm_, __opts__, search_global=True
)
def _request_eip(interface):
'''
Request and return Elastic IP
'''
params = {'Action': 'AllocateAddress'}
params['Domain'] = interface.setdefault('domain', 'vpc')
eips = aws.query(params,
return_root=True,
location=get_location(),
provider=get_provider(),
opts=__opts__,
sigver='4')
for eip in eips:
if 'allocationId' in eip:
return eip['allocationId']
return None
def _create_eni(interface):
'''
Create and return an Elastic Interface
'''
params = {'Action': 'DescribeSubnets'}
subnet_query = aws.query(params,
return_root=True,
location=get_location(),
provider=get_provider(),
opts=__opts__,
sigver='4')
found = False
for subnet_query_result in subnet_query:
if 'item' in subnet_query_result:
for subnet in subnet_query_result['item']:
if subnet['subnetId'] == interface['SubnetId']:
found = True
break
if not found:
raise SaltCloudConfigError(
'No such subnet <{0}>'.format(interface['SubnetId'])
)
params = {'Action': 'CreateNetworkInterface',
'SubnetId': interface['SubnetId']}
for k in ('Description', 'PrivateIpAddress',
'SecondaryPrivateIpAddressCount'):
if k in interface:
params[k] = interface[k]
for k in ('PrivateIpAddresses', 'SecurityGroupId'):
if k in interface:
params.update(_param_from_config(k, interface[k]))
result = aws.query(params,
return_root=True,
location=get_location(),
provider=get_provider(),
opts=__opts__,
sigver='4')
eni_desc = result[1]
if not eni_desc or not eni_desc.get('networkInterfaceId'):
raise SaltCloudException('Failed to create interface: {0}'.format(result))
eni_id = eni_desc.get('networkInterfaceId')
log.debug(
'Created network interface {0} inst {1}'.format(
eni_id, interface['DeviceIndex']
)
)
if interface.get('associate_eip'):
_associate_eip_with_interface(eni_id, interface.get('associate_eip'))
elif interface.get('allocate_new_eip'):
_new_eip = _request_eip(interface)
_associate_eip_with_interface(eni_id, _new_eip)
elif interface.get('allocate_new_eips'):
addr_list = _list_interface_private_addresses(eni_desc)
eip_list = []
for idx, addr in enumerate(addr_list):
eip_list.append(_request_eip(interface))
for idx, addr in enumerate(addr_list):
_associate_eip_with_interface(eni_id, eip_list[idx], addr)
return {'DeviceIndex': interface['DeviceIndex'],
'NetworkInterfaceId': eni_id}
def _list_interface_private_addresses(eni_desc):
'''
Returns a list of all of the private IP addresses attached to a
network interface. The 'primary' address will be listed first.
'''
primary = eni_desc.get('privateIpAddress')
if not primary:
return None
addresses = [primary]
lst = eni_desc.get('privateIpAddressesSet', {}).get('item', [])
if not isinstance(lst, list):
return addresses
for entry in lst:
if entry.get('primary') == 'true':
continue
if entry.get('privateIpAddress'):
addresses.append(entry.get('privateIpAddress'))
return addresses
def _associate_eip_with_interface(eni_id, eip_id, private_ip=None):
'''
Accept the id of a network interface, and the id of an elastic ip
address, and associate the two of them, such that traffic sent to the
elastic ip address will be forwarded (NATted) to this network interface.
Optionally specify the private (10.x.x.x) IP address that traffic should
be NATted to - useful if you have multiple IP addresses assigned to an
interface.
'''
retries = 5
while retries > 0:
params = {'Action': 'AssociateAddress',
'NetworkInterfaceId': eni_id,
'AllocationId': eip_id}
if private_ip:
params['PrivateIpAddress'] = private_ip
retries = retries - 1
result = aws.query(params,
return_root=True,
location=get_location(),
provider=get_provider(),
opts=__opts__,
sigver='4')
if isinstance(result, dict) and result.get('error'):
time.sleep(1)
continue
if not result[2].get('associationId'):
break
log.debug(
'Associated ElasticIP address {0} with interface {1}'.format(
eip_id, eni_id
)
)
return result[2].get('associationId')
raise SaltCloudException(
'Could not associate elastic ip address '
'<{0}> with network interface <{1}>'.format(
eip_id, eni_id
)
)
def _update_enis(interfaces, instance):
config_enis = {}
instance_enis = []
for interface in interfaces:
if 'DeviceIndex' in interface:
if interface['DeviceIndex'] in config_enis:
log.error(
'Duplicate DeviceIndex in profile. Cannot update ENIs.'
)
return None
config_enis[str(interface['DeviceIndex'])] = interface
query_enis = instance[0]['instancesSet']['item']['networkInterfaceSet']['item']
if isinstance(query_enis, list):
for query_eni in query_enis:
instance_enis.append((query_eni['networkInterfaceId'], query_eni['attachment']))
else:
instance_enis.append((query_enis['networkInterfaceId'], query_enis['attachment']))
for eni_id, eni_data in instance_enis:
params = {'Action': 'ModifyNetworkInterfaceAttribute',
'NetworkInterfaceId': eni_id,
'Attachment.AttachmentId': eni_data['attachmentId'],
'Attachment.DeleteOnTermination': config_enis[eni_data['deviceIndex']].setdefault('delete_interface_on_terminate', True)}
set_eni_attributes = aws.query(params,
return_root=True,
location=get_location(),
provider=get_provider(),
opts=__opts__,
sigver='4')
return None
def _param_from_config(key, data):
'''
Return EC2 API parameters based on the given config data.
Examples:
1. List of dictionaries
>>> data = [
... {'DeviceIndex': 0, 'SubnetId': 'subid0',
... 'AssociatePublicIpAddress': True},
... {'DeviceIndex': 1,
... 'SubnetId': 'subid1',
... 'PrivateIpAddress': '192.168.1.128'}
... ]
>>> _param_from_config('NetworkInterface', data)
... {'NetworkInterface.0.SubnetId': 'subid0',
... 'NetworkInterface.0.DeviceIndex': 0,
... 'NetworkInterface.1.SubnetId': 'subid1',
... 'NetworkInterface.1.PrivateIpAddress': '192.168.1.128',
... 'NetworkInterface.0.AssociatePublicIpAddress': 'true',
... 'NetworkInterface.1.DeviceIndex': 1}
2. List of nested dictionaries
>>> data = [
... {'DeviceName': '/dev/sdf',
... 'Ebs': {
... 'SnapshotId': 'dummy0',
... 'VolumeSize': 200,
... 'VolumeType': 'standard'}},
... {'DeviceName': '/dev/sdg',
... 'Ebs': {
... 'SnapshotId': 'dummy1',
... 'VolumeSize': 100,
... 'VolumeType': 'standard'}}
... ]
>>> _param_from_config('BlockDeviceMapping', data)
... {'BlockDeviceMapping.0.Ebs.VolumeType': 'standard',
... 'BlockDeviceMapping.1.Ebs.SnapshotId': 'dummy1',
... 'BlockDeviceMapping.0.Ebs.VolumeSize': 200,
... 'BlockDeviceMapping.0.Ebs.SnapshotId': 'dummy0',
... 'BlockDeviceMapping.1.Ebs.VolumeType': 'standard',
... 'BlockDeviceMapping.1.DeviceName': '/dev/sdg',
... 'BlockDeviceMapping.1.Ebs.VolumeSize': 100,
... 'BlockDeviceMapping.0.DeviceName': '/dev/sdf'}
3. Dictionary of dictionaries
>>> data = { 'Arn': 'dummyarn', 'Name': 'Tester' }
>>> _param_from_config('IamInstanceProfile', data)
{'IamInstanceProfile.Arn': 'dummyarn', 'IamInstanceProfile.Name': 'Tester'}
'''
param = {}
if isinstance(data, dict):
for k, v in data.items():
param.update(_param_from_config('{0}.{1}'.format(key, k), v))
elif isinstance(data, list) or isinstance(data, tuple):
for idx, conf_item in enumerate(data):
prefix = '{0}.{1}'.format(key, idx)
param.update(_param_from_config(prefix, conf_item))
else:
if isinstance(data, bool):
# convert boolean Trur/False to 'true'/'false'
param.update({key: str(data).lower()})
else:
param.update({key: data})
return param
def request_instance(vm_=None, call=None):
'''
Put together all of the information necessary to request an instance on EC2,
and then fire off the request the instance.
Returns data about the instance
'''
if call == 'function':
# Technically this function may be called other ways too, but it
# definitely cannot be called with --function.
raise SaltCloudSystemExit(
'The request_instance action must be called with -a or --action.'
)
location = vm_.get('location', get_location(vm_))
# do we launch a regular vm or a spot instance?
# see http://goo.gl/hYZ13f for more information on EC2 API
spot_config = get_spot_config(vm_)
if spot_config is not None:
if 'spot_price' not in spot_config:
raise SaltCloudSystemExit(
'Spot instance config for {0} requires a spot_price '
'attribute.'.format(vm_['name'])
)
params = {'Action': 'RequestSpotInstances',
'InstanceCount': '1',
'Type': spot_config['type']
if 'type' in spot_config else 'one-time',
'SpotPrice': spot_config['spot_price']}
# All of the necessary launch parameters for a VM when using
# spot instances are the same except for the prefix below
# being tacked on.
spot_prefix = 'LaunchSpecification.'
# regular EC2 instance
else:
# WARNING! EXPERIMENTAL!
# This allows more than one instance to be spun up in a single call.
# The first instance will be called by the name provided, but all other
# instances will be nameless (or more specifically, they will use the
# InstanceId as the name). This interface is expected to change, so
# use at your own risk.
min_instance = config.get_cloud_config_value(
'min_instance', vm_, __opts__, search_global=False, default=1
)
max_instance = config.get_cloud_config_value(
'max_instance', vm_, __opts__, search_global=False, default=1
)
params = {'Action': 'RunInstances',
'MinCount': min_instance,
'MaxCount': max_instance}
# Normal instances should have no prefix.
spot_prefix = ''
image_id = vm_['image']
params[spot_prefix + 'ImageId'] = image_id
userdata_file = config.get_cloud_config_value(
'userdata_file', vm_, __opts__, search_global=False, default=None
)
if userdata_file is None:
userdata = config.get_cloud_config_value(
'userdata', vm_, __opts__, search_global=False, default=None
)
else:
log.trace('userdata_file: {0}'.format(userdata_file))
if os.path.exists(userdata_file):
with salt.utils.fopen(userdata_file, 'r') as fh_:
userdata = fh_.read()
if userdata is not None:
params['UserData'] = base64.b64encode(userdata)
vm_size = config.get_cloud_config_value(
'size', vm_, __opts__, search_global=False
)
params[spot_prefix + 'InstanceType'] = vm_size
ex_keyname = keyname(vm_)
if ex_keyname:
params[spot_prefix + 'KeyName'] = ex_keyname
ex_securitygroup = securitygroup(vm_)
if ex_securitygroup:
if not isinstance(ex_securitygroup, list):
params[spot_prefix + 'SecurityGroup.1'] = ex_securitygroup
else:
for counter, sg_ in enumerate(ex_securitygroup):
params[spot_prefix + 'SecurityGroup.{0}'.format(counter)] = sg_
ex_iam_profile = iam_profile(vm_)
if ex_iam_profile:
try:
if ex_iam_profile.startswith('arn:aws:iam:'):
params[
spot_prefix + 'IamInstanceProfile.Arn'
] = ex_iam_profile
else:
params[
spot_prefix + 'IamInstanceProfile.Name'
] = ex_iam_profile
except AttributeError:
raise SaltCloudConfigError(
'\'iam_profile\' should be a string value.'
)
az_ = get_availability_zone(vm_)
if az_ is not None:
params[spot_prefix + 'Placement.AvailabilityZone'] = az_
tenancy_ = get_tenancy(vm_)
if tenancy_ is not None:
if spot_config is not None:
raise SaltCloudConfigError(
'Spot instance config for {0} does not support '
'specifying tenancy.'.format(vm_['name'])
)
params['Placement.Tenancy'] = tenancy_
subnetid_ = get_subnetid(vm_)
if subnetid_ is not None:
params[spot_prefix + 'SubnetId'] = subnetid_
ex_securitygroupid = securitygroupid(vm_)
if ex_securitygroupid:
if not isinstance(ex_securitygroupid, list):
params[spot_prefix + 'SecurityGroupId.1'] = ex_securitygroupid
else:
for (counter, sg_) in enumerate(ex_securitygroupid):
params[
spot_prefix + 'SecurityGroupId.{0}'.format(counter)
] = sg_
placementgroup_ = get_placementgroup(vm_)
if placementgroup_ is not None:
params[spot_prefix + 'Placement.GroupName'] = placementgroup_
ex_blockdevicemappings = block_device_mappings(vm_)
if ex_blockdevicemappings:
params.update(_param_from_config(spot_prefix + 'BlockDeviceMapping',
ex_blockdevicemappings))
network_interfaces = config.get_cloud_config_value(
'network_interfaces',
vm_,
__opts__,
search_global=False
)
if network_interfaces:
eni_devices = []
for interface in network_interfaces:
log.debug('Create network interface: {0}'.format(interface))
_new_eni = _create_eni(interface)
eni_devices.append(_new_eni)
params.update(_param_from_config(spot_prefix + 'NetworkInterface',
eni_devices))
set_ebs_optimized = config.get_cloud_config_value(
'ebs_optimized', vm_, __opts__, search_global=False
)
if set_ebs_optimized is not None:
if not isinstance(set_ebs_optimized, bool):
raise SaltCloudConfigError(
'\'ebs_optimized\' should be a boolean value.'
)
params[spot_prefix + 'EbsOptimized'] = set_ebs_optimized
set_del_root_vol_on_destroy = config.get_cloud_config_value(
'del_root_vol_on_destroy', vm_, __opts__, search_global=False
)
if set_del_root_vol_on_destroy is not None:
if not isinstance(set_del_root_vol_on_destroy, bool):
raise SaltCloudConfigError(
'\'del_root_vol_on_destroy\' should be a boolean value.'
)
vm_['set_del_root_vol_on_destroy'] = set_del_root_vol_on_destroy
if set_del_root_vol_on_destroy:
# first make sure to look up the root device name
# as Ubuntu and CentOS (and most likely other OSs)
# use different device identifiers
log.info('Attempting to look up root device name for image id {0} on '
'VM {1}'.format(image_id, vm_['name']))
rd_params = {
'Action': 'DescribeImages',
'ImageId.1': image_id
}
try:
rd_data = aws.query(rd_params,
return_root=True,
location=get_location(),
provider=get_provider(),
opts=__opts__,
sigver='4')
if 'error' in rd_data:
return rd_data['error']
log.debug('EC2 Response: {0!r}'.format(rd_data))
except Exception as exc:
log.error(
'Error getting root device name for image id {0} for '
'VM {1}: \n{2}'.format(image_id, vm_['name'], exc),
# Show the traceback if the debug logging level is enabled
exc_info_on_loglevel=logging.DEBUG
)
raise
# make sure we have a response
if not rd_data:
err_msg = 'There was an error querying EC2 for the root device ' \
'of image id {0}. Empty response.'.format(image_id)
raise SaltCloudSystemExit(err_msg)
# pull the root device name from the result and use it when
# launching the new VM
rd_name = None
if 'blockDeviceMapping' in rd_data[0]:
if rd_data[0]['blockDeviceMapping'] is None:
# Some ami instances do not have a root volume. Ignore such cases
rd_name = None
elif isinstance(rd_data[0]['blockDeviceMapping']['item'], list):
rd_name = rd_data[0]['blockDeviceMapping']['item'][0]['deviceName']
else:
rd_name = rd_data[0]['blockDeviceMapping']['item']['deviceName']
log.info('Found root device name: {0}'.format(rd_name))
if rd_name is not None:
if ex_blockdevicemappings:
dev_list = [
dev['DeviceName'] for dev in ex_blockdevicemappings
]
else:
dev_list = []
if rd_name in dev_list:
dev_index = dev_list.index(rd_name)
termination_key = '{0}BlockDeviceMapping.{1}.Ebs.DeleteOnTermination'.format(spot_prefix, dev_index)
params[termination_key] = str(set_del_root_vol_on_destroy).lower()
else:
dev_index = len(dev_list)
params[
'{0}BlockDeviceMapping.{1}.DeviceName'.format(
spot_prefix, dev_index
)
] = rd_name
params[
'{0}BlockDeviceMapping.{1}.Ebs.DeleteOnTermination'.format(
spot_prefix, dev_index
)
] = str(set_del_root_vol_on_destroy).lower()
set_del_all_vols_on_destroy = config.get_cloud_config_value(
'del_all_vols_on_destroy', vm_, __opts__, search_global=False, default=False
)
if set_del_all_vols_on_destroy is not None:
if not isinstance(set_del_all_vols_on_destroy, bool):
raise SaltCloudConfigError(
'\'del_all_vols_on_destroy\' should be a boolean value.'
)
salt.utils.cloud.fire_event(
'event',
'requesting instance',
'salt/cloud/{0}/requesting'.format(vm_['name']),
{'kwargs': params, 'location': location},
transport=__opts__['transport']
)
provider = get_provider(vm_)
try:
data = aws.query(params,
'instancesSet',
location=location,
provider=provider,
opts=__opts__,
sigver='4')
if 'error' in data:
return data['error']
except Exception as exc:
log.error(
'Error creating {0} on EC2 when trying to run the initial '
'deployment: \n{1}'.format(
vm_['name'], exc
),
# Show the traceback if the debug logging level is enabled
exc_info_on_loglevel=logging.DEBUG
)
raise
# if we're using spot instances, we need to wait for the spot request
# to become active before we continue
if spot_config:
sir_id = data[0]['spotInstanceRequestId']
def __query_spot_instance_request(sir_id, location):
params = {'Action': 'DescribeSpotInstanceRequests',
'SpotInstanceRequestId.1': sir_id}
data = aws.query(params,
location=location,
provider=provider,
opts=__opts__,
sigver='4')
if not data:
log.error(
'There was an error while querying EC2. Empty response'
)
# Trigger a failure in the wait for spot instance method
return False
if isinstance(data, dict) and 'error' in data:
log.warn(
'There was an error in the query. {0}'
.format(data['error'])
)
# Trigger a failure in the wait for spot instance method
return False
log.debug('Returned query data: {0}'.format(data))
if 'state' in data[0]:
state = data[0]['state']
if state == 'active':
return data
if state == 'open':
# Still waiting for an active state
log.info('Spot instance status: {0}'.format(
data[0]['status']['message']
))
return None
if state in ['cancelled', 'failed', 'closed']:
# Request will never be active, fail
log.error('Spot instance request resulted in state \'{0}\'. '
'Nothing else we can do here.')
return False
salt.utils.cloud.fire_event(
'event',
'waiting for spot instance',
'salt/cloud/{0}/waiting_for_spot'.format(vm_['name']),
transport=__opts__['transport']
)
try:
data = _wait_for_spot_instance(
__query_spot_instance_request,
update_args=(sir_id, location),
timeout=config.get_cloud_config_value(
'wait_for_spot_timeout', vm_, __opts__, default=10 * 60),
interval=config.get_cloud_config_value(
'wait_for_spot_interval', vm_, __opts__, default=30),
interval_multiplier=config.get_cloud_config_value(
'wait_for_spot_interval_multiplier',
vm_,
__opts__,
default=1),
max_failures=config.get_cloud_config_value(
'wait_for_spot_max_failures',
vm_,
__opts__,
default=10),
)
log.debug('wait_for_spot_instance data {0}'.format(data))
except (SaltCloudExecutionTimeout, SaltCloudExecutionFailure) as exc:
try:
# Cancel the existing spot instance request
params = {'Action': 'CancelSpotInstanceRequests',
'SpotInstanceRequestId.1': sir_id}
data = aws.query(params,
location=location,
provider=provider,
opts=__opts__,
sigver='4')
log.debug('Canceled spot instance request {0}. Data '
'returned: {1}'.format(sir_id, data))
except SaltCloudSystemExit:
pass
finally:
raise SaltCloudSystemExit(str(exc))
return data, vm_
def query_instance(vm_=None, call=None):
'''
Query an instance upon creation from the EC2 API
'''
if call == 'function':
# Technically this function may be called other ways too, but it
# definitely cannot be called with --function.
raise SaltCloudSystemExit(
'The query_instance action must be called with -a or --action.'
)
instance_id = vm_['instance_id']
location = vm_.get('location', get_location(vm_))
salt.utils.cloud.fire_event(
'event',
'querying instance',
'salt/cloud/{0}/querying'.format(vm_['name']),
{'instance_id': instance_id},
transport=__opts__['transport']
)
log.debug('The new VM instance_id is {0}'.format(instance_id))
params = {'Action': 'DescribeInstances',
'InstanceId.1': instance_id}
provider = get_provider(vm_)
attempts = 5
while attempts > 0:
data, requesturl = aws.query(params, # pylint: disable=W0632
location=location,
provider=provider,
opts=__opts__,
return_url=True,
sigver='4')
log.debug('The query returned: {0}'.format(data))
if isinstance(data, dict) and 'error' in data:
log.warn(
'There was an error in the query. {0} attempts '
'remaining: {1}'.format(
attempts, data['error']
)
)
attempts -= 1
# Just a little delay between attempts...
time.sleep(1)
continue
if isinstance(data, list) and not data:
log.warn(
'Query returned an empty list. {0} attempts '
'remaining.'.format(attempts)
)
attempts -= 1
# Just a little delay between attempts...
time.sleep(1)
continue
break
else:
raise SaltCloudSystemExit(
'An error occurred while creating VM: {0}'.format(data['error'])
)
def __query_ip_address(params, url):
data = aws.query(params,
#requesturl=url,
location=location,
provider=provider,
opts=__opts__,
sigver='4')
if not data:
log.error(
'There was an error while querying EC2. Empty response'
)
# Trigger a failure in the wait for IP function
return False
if isinstance(data, dict) and 'error' in data:
log.warn(
'There was an error in the query. {0}'.format(data['error'])
)
# Trigger a failure in the wait for IP function
return False
log.debug('Returned query data: {0}'.format(data))
if 'ipAddress' in data[0]['instancesSet']['item']:
return data
if ssh_interface(vm_) == 'private_ips' and \
'privateIpAddress' in data[0]['instancesSet']['item']:
return data
try:
data = salt.utils.cloud.wait_for_ip(
__query_ip_address,
update_args=(params, requesturl),
timeout=config.get_cloud_config_value(
'wait_for_ip_timeout', vm_, __opts__, default=10 * 60),
interval=config.get_cloud_config_value(
'wait_for_ip_interval', vm_, __opts__, default=10),
interval_multiplier=config.get_cloud_config_value(
'wait_for_ip_interval_multiplier', vm_, __opts__, default=1),
)
except (SaltCloudExecutionTimeout, SaltCloudExecutionFailure) as exc:
try:
# It might be already up, let's destroy it!
destroy(vm_['name'])
except SaltCloudSystemExit:
pass
finally:
raise SaltCloudSystemExit(str(exc))
if 'reactor' in vm_ and vm_['reactor'] is True:
salt.utils.cloud.fire_event(
'event',
'instance queried',
'salt/cloud/{0}/query_reactor'.format(vm_['name']),
{'data': data},
transport=__opts__['transport']
)
return data
def wait_for_instance(
vm_=None,
data=None,
ip_address=None,
display_ssh_output=True,
call=None,
):
'''
Wait for an instance upon creation from the EC2 API, to become available
'''
if call == 'function':
# Technically this function may be called other ways too, but it
# definitely cannot be called with --function.
raise SaltCloudSystemExit(
'The wait_for_instance action must be called with -a or --action.'
)
if vm_ is None:
vm_ = {}
if data is None:
data = {}
ssh_gateway_config = vm_.get(
'ssh_gateway_config', get_ssh_gateway_config(vm_)
)
salt.utils.cloud.fire_event(
'event',
'waiting for ssh',
'salt/cloud/{0}/waiting_for_ssh'.format(vm_['name']),
{'ip_address': ip_address},
transport=__opts__['transport']
)
ssh_connect_timeout = config.get_cloud_config_value(
'ssh_connect_timeout', vm_, __opts__, 900 # 15 minutes
)
if config.get_cloud_config_value('win_installer', vm_, __opts__):
username = config.get_cloud_config_value(
'win_username', vm_, __opts__, default='Administrator'
)
win_passwd = config.get_cloud_config_value(
'win_password', vm_, __opts__, default=''
)
if win_passwd and win_passwd == 'auto':
log.debug('Waiting for auto-generated Windows EC2 password')
while True:
password_data = get_password_data(
name=vm_['name'],
kwargs={
'key_file': vm_['private_key'],
},
call='action',
)
log.debug(password_data)
win_passwd = password_data.get('password', None)
if win_passwd is None:
# This wait is so high, because the password is unlikely to
# be generated for at least 4 minutes
time.sleep(60)
else:
break
if not salt.utils.cloud.wait_for_port(ip_address,
port=445,
timeout=ssh_connect_timeout):
raise SaltCloudSystemExit(
'Failed to connect to remote windows host'
)
if not salt.utils.cloud.validate_windows_cred(ip_address,
username,
win_passwd):
raise SaltCloudSystemExit(
'Failed to authenticate against remote windows host'
)
elif salt.utils.cloud.wait_for_port(ip_address,
timeout=ssh_connect_timeout,
gateway=ssh_gateway_config
):
# If a known_hosts_file is configured, this instance will not be
# accessible until it has a host key. Since this is provided on
# supported instances by cloud-init, and viewable to us only from the
# console output (which may take several minutes to become available,
# we have some more waiting to do here.
known_hosts_file = config.get_cloud_config_value(
'known_hosts_file', vm_, __opts__, default=None
)
if known_hosts_file:
console = {}
while 'output_decoded' not in console:
console = get_console_output(
instance_id=vm_['instance_id'],
call='action',
)
pprint.pprint(console)
time.sleep(5)
output = console['output_decoded']
comps = output.split('-----BEGIN SSH HOST KEY KEYS-----')
if len(comps) < 2:
# Fail; there are no host keys
return False
comps = comps[1].split('-----END SSH HOST KEY KEYS-----')
keys = ''
for line in comps[0].splitlines():
if not line:
continue
keys += '\n{0} {1}'.format(ip_address, line)
with salt.utils.fopen(known_hosts_file, 'a') as fp_:
fp_.write(keys)
fp_.close()
for user in vm_['usernames']:
if salt.utils.cloud.wait_for_passwd(
host=ip_address,
username=user,
ssh_timeout=config.get_cloud_config_value(
'wait_for_passwd_timeout', vm_, __opts__, default=1 * 60
),
key_filename=vm_['key_filename'],
display_ssh_output=display_ssh_output,
gateway=ssh_gateway_config,
maxtries=config.get_cloud_config_value(
'wait_for_passwd_maxtries', vm_, __opts__, default=15
),
known_hosts_file=config.get_cloud_config_value(
'known_hosts_file', vm_, __opts__,
default='/dev/null'
),
):
__opts__['ssh_username'] = user
vm_['ssh_username'] = user
break
else:
raise SaltCloudSystemExit(
'Failed to authenticate against remote ssh'
)
else:
raise SaltCloudSystemExit(
'Failed to connect to remote ssh'
)
if 'reactor' in vm_ and vm_['reactor'] is True:
salt.utils.cloud.fire_event(
'event',
'ssh is available',
'salt/cloud/{0}/ssh_ready_reactor'.format(vm_['name']),
{'ip_address': ip_address},
transport=__opts__['transport']
)
return vm_
def create(vm_=None, call=None):
'''
Create a single VM from a data dict
'''
if call:
raise SaltCloudSystemExit(
'You cannot create an instance with -a or -f.'
)
salt.utils.cloud.fire_event(
'event',
'starting create',
'salt/cloud/{0}/creating'.format(vm_['name']),
{
'name': vm_['name'],
'profile': vm_['profile'],
'provider': vm_['provider'],
},
transport=__opts__['transport']
)
salt.utils.cloud.cachedir_index_add(
vm_['name'], vm_['profile'], 'ec2', vm_['provider']
)
key_filename = config.get_cloud_config_value(
'private_key', vm_, __opts__, search_global=False, default=None
)
if key_filename is not None and not os.path.isfile(key_filename):
raise SaltCloudConfigError(
'The defined key_filename {0!r} does not exist'.format(
key_filename
)
)
vm_['key_filename'] = key_filename
# Get SSH Gateway config early to verify the private_key,
# if used, exists or not. We don't want to deploy an instance
# and not be able to access it via the gateway.
ssh_gateway_config = get_ssh_gateway_config(vm_)
vm_['ssh_gateway_config'] = ssh_gateway_config
location = get_location(vm_)
vm_['location'] = location
log.info('Creating Cloud VM {0} in {1}'.format(vm_['name'], location))
vm_['usernames'] = salt.utils.cloud.ssh_usernames(
vm_,
__opts__,
default_users=(
'ec2-user', 'ubuntu', 'fedora', 'admin', 'bitnami', 'root'
)
)
if 'instance_id' in vm_:
# This was probably created via another process, and doesn't have
# things like salt keys created yet, so let's create them now.
if 'pub_key' not in vm_ and 'priv_key' not in vm_:
log.debug('Generating minion keys for {0[name]!r}'.format(vm_))
vm_['priv_key'], vm_['pub_key'] = salt.utils.cloud.gen_keys(
salt.config.get_cloud_config_value(
'keysize',
vm_,
__opts__
)
)
else:
# Put together all of the information required to request the instance,
# and then fire off the request for it
data, vm_ = request_instance(vm_, location)
# If data is a str, it's an error
if isinstance(data, str):
log.error('Error requesting instance: {0}'.format(data))
return {}
# Pull the instance ID, valid for both spot and normal instances
# Multiple instances may have been spun up, get all their IDs
vm_['instance_id_list'] = []
for instance in data:
vm_['instance_id_list'].append(instance['instanceId'])
vm_['instance_id'] = vm_['instance_id_list'].pop()
if len(vm_['instance_id_list']) > 0:
# Multiple instances were spun up, get one now, and queue the rest
queue_instances(vm_['instance_id_list'])
# Wait for vital information, such as IP addresses, to be available
# for the new instance
data = query_instance(vm_)
# Now that the instance is available, tag it appropriately. Should
# mitigate race conditions with tags
tags = config.get_cloud_config_value('tag',
vm_,
__opts__,
{},
search_global=False)
if not isinstance(tags, dict):
raise SaltCloudConfigError(
'\'tag\' should be a dict.'
)
for value in six.itervalues(tags):
if not isinstance(value, str):
raise SaltCloudConfigError(
'\'tag\' values must be strings. Try quoting the values. '
'e.g. "2013-09-19T20:09:46Z".'
)
tags['Name'] = vm_['name']
salt.utils.cloud.fire_event(
'event',
'setting tags',
'salt/cloud/{0}/tagging'.format(vm_['name']),
{'tags': tags},
transport=__opts__['transport']
)
set_tags(
vm_['name'],
tags,
instance_id=vm_['instance_id'],
call='action',
location=location
)
network_interfaces = config.get_cloud_config_value(
'network_interfaces',
vm_,
__opts__,
search_global=False
)
if network_interfaces:
_update_enis(network_interfaces, data)
# At this point, the node is created and tagged, and now needs to be
# bootstrapped, once the necessary port is available.
log.info('Created node {0}'.format(vm_['name']))
instance = data[0]['instancesSet']['item']
# Wait for the necessary port to become available to bootstrap
if ssh_interface(vm_) == 'private_ips':
ip_address = instance['privateIpAddress']
log.info('Salt node data. Private_ip: {0}'.format(ip_address))
else:
ip_address = instance['ipAddress']
log.info('Salt node data. Public_ip: {0}'.format(ip_address))
vm_['ssh_host'] = ip_address
if get_salt_interface(vm_) == 'private_ips':
salt_ip_address = instance['privateIpAddress']
log.info('Salt interface set to: {0}'.format(salt_ip_address))
else:
salt_ip_address = instance['ipAddress']
log.debug('Salt interface set to: {0}'.format(salt_ip_address))
vm_['salt_host'] = salt_ip_address
display_ssh_output = config.get_cloud_config_value(
'display_ssh_output', vm_, __opts__, default=True
)
vm_ = wait_for_instance(
vm_, data, ip_address, display_ssh_output
)
# The instance is booted and accessible, let's Salt it!
ret = instance.copy()
# Get ANY defined volumes settings, merging data, in the following order
# 1. VM config
# 2. Profile config
# 3. Global configuration
volumes = config.get_cloud_config_value(
'volumes', vm_, __opts__, search_global=True
)
if volumes:
salt.utils.cloud.fire_event(
'event',
'attaching volumes',
'salt/cloud/{0}/attaching_volumes'.format(vm_['name']),
{'volumes': volumes},
transport=__opts__['transport']
)
log.info('Create and attach volumes to node {0}'.format(vm_['name']))
created = create_attach_volumes(
vm_['name'],
{
'volumes': volumes,
'zone': ret['placement']['availabilityZone'],
'instance_id': ret['instanceId'],
'del_all_vols_on_destroy': vm_.get('set_del_all_vols_on_destroy', False)
},
call='action'
)
ret['Attached Volumes'] = created
for key, value in salt.utils.cloud.bootstrap(vm_, __opts__).items():
ret.setdefault(key, value)
log.info('Created Cloud VM {0[name]!r}'.format(vm_))
log.debug(
'{0[name]!r} VM creation details:\n{1}'.format(
vm_, pprint.pformat(instance)
)
)
event_data = {
'name': vm_['name'],
'profile': vm_['profile'],
'provider': vm_['provider'],
'instance_id': vm_['instance_id'],
}
if volumes:
event_data['volumes'] = volumes
salt.utils.cloud.fire_event(
'event',
'created instance',
'salt/cloud/{0}/created'.format(vm_['name']),
event_data,
transport=__opts__['transport']
)
return ret
def queue_instances(instances):
'''
Queue a set of instances to be provisioned later. Expects a list.
Currently this only queries node data, and then places it in the cloud
cache (if configured). If the salt-cloud-reactor is being used, these
instances will be automatically provisioned using that.
For more information about the salt-cloud-reactor, see:
https://github.com/saltstack-formulas/salt-cloud-reactor
'''
for instance_id in instances:
node = _get_node(instance_id=instance_id)
salt.utils.cloud.cache_node(node, __active_provider_name__, __opts__)
def create_attach_volumes(name, kwargs, call=None, wait_to_finish=True):
'''
Create and attach volumes to created node
'''
if call != 'action':
raise SaltCloudSystemExit(
'The create_attach_volumes action must be called with '
'-a or --action.'
)
if 'instance_id' not in kwargs:
kwargs['instance_id'] = _get_node(name)[name]['instanceId']
if isinstance(kwargs['volumes'], str):
volumes = yaml.safe_load(kwargs['volumes'])
else:
volumes = kwargs['volumes']
ret = []
for volume in volumes:
created = False
volume_name = '{0} on {1}'.format(volume['device'], name)
volume_dict = {
'volume_name': volume_name,
'zone': kwargs['zone']
}
if 'volume_id' in volume:
volume_dict['volume_id'] = volume['volume_id']
elif 'snapshot' in volume:
volume_dict['snapshot'] = volume['snapshot']
else:
volume_dict['size'] = volume['size']
if 'type' in volume:
volume_dict['type'] = volume['type']
if 'iops' in volume:
volume_dict['iops'] = volume['iops']
if 'encrypted' in volume:
volume_dict['encrypted'] = volume['encrypted']
if 'volume_id' not in volume_dict:
created_volume = create_volume(volume_dict, call='function', wait_to_finish=wait_to_finish)
created = True
for item in created_volume:
if 'volumeId' in item:
volume_dict['volume_id'] = item['volumeId']
attach = attach_volume(
name,
{'volume_id': volume_dict['volume_id'],
'device': volume['device']},
instance_id=kwargs['instance_id'],
call='action'
)
# Update the delvol parameter for this volume
delvols_on_destroy = kwargs.get('del_all_vols_on_destroy', None)
if attach and created and delvols_on_destroy is not None:
_toggle_delvol(instance_id=kwargs['instance_id'],
device=volume['device'],
value=delvols_on_destroy)
if attach:
msg = (
'{0} attached to {1} (aka {2}) as device {3}'.format(
volume_dict['volume_id'],
kwargs['instance_id'],
name,
volume['device']
)
)
log.info(msg)
ret.append(msg)
return ret
def stop(name, call=None):
'''
Stop a node
'''
if call != 'action':
raise SaltCloudSystemExit(
'The stop action must be called with -a or --action.'
)
log.info('Stopping node {0}'.format(name))
instance_id = _get_node(name)[name]['instanceId']
params = {'Action': 'StopInstances',
'InstanceId.1': instance_id}
result = aws.query(params,
location=get_location(),
provider=get_provider(),
opts=__opts__,
sigver='4')
return result
def start(name, call=None):
'''
Start a node
'''
if call != 'action':
raise SaltCloudSystemExit(
'The start action must be called with -a or --action.'
)
log.info('Starting node {0}'.format(name))
instance_id = _get_node(name)[name]['instanceId']
params = {'Action': 'StartInstances',
'InstanceId.1': instance_id}
result = aws.query(params,
location=get_location(),
provider=get_provider(),
opts=__opts__,
sigver='4')
return result
def set_tags(name=None,
tags=None,
call=None,
location=None,
instance_id=None,
resource_id=None,
kwargs=None): # pylint: disable=W0613
'''
Set tags for a resource. Normally a VM name or instance_id is passed in,
but a resource_id may be passed instead. If both are passed in, the
instance_id will be used.
CLI Examples:
.. code-block:: bash
salt-cloud -a set_tags mymachine tag1=somestuff tag2='Other stuff'
salt-cloud -a set_tags resource_id=vol-3267ab32 tag=somestuff
'''
if kwargs is None:
kwargs = {}
if instance_id is None:
if 'resource_id' in kwargs:
resource_id = kwargs['resource_id']
del kwargs['resource_id']
if 'instance_id' in kwargs:
instance_id = kwargs['instance_id']
del kwargs['instance_id']
if resource_id is None:
if instance_id is None:
instance_id = _get_node(name, location)[name]['instanceId']
else:
instance_id = resource_id
# This second check is a safety, in case the above still failed to produce
# a usable ID
if instance_id is None:
return {
'Error': 'A valid instance_id or resource_id was not specified.'
}
params = {'Action': 'CreateTags',
'ResourceId.1': instance_id}
log.debug('Tags to set for {0}: {1}'.format(name, tags))
if kwargs and not tags:
tags = kwargs
for idx, (tag_k, tag_v) in enumerate(six.iteritems(tags)):
params['Tag.{0}.Key'.format(idx)] = tag_k
params['Tag.{0}.Value'.format(idx)] = tag_v
attempts = 5
while attempts >= 0:
result = aws.query(params,
setname='tagSet',
location=get_location(),
provider=get_provider(),
opts=__opts__,
sigver='4')
settags = get_tags(
instance_id=instance_id, call='action', location=location
)
log.debug('Setting the tags returned: {0}'.format(settags))
failed_to_set_tags = False
for tag in settags:
if tag['key'] not in tags:
# We were not setting this tag
continue
if str(tags.get(tag['key'])) != str(tag['value']):
# Not set to the proper value!?
failed_to_set_tags = True
break
if failed_to_set_tags:
log.warn(
'Failed to set tags. Remaining attempts {0}'.format(
attempts
)
)
attempts -= 1
# Just a little delay between attempts...
time.sleep(1)
continue
return settags
raise SaltCloudSystemExit(
'Failed to set tags on {0}!'.format(name)
)
def get_tags(name=None,
instance_id=None,
call=None,
location=None,
kwargs=None,
resource_id=None): # pylint: disable=W0613
'''
Retrieve tags for a resource. Normally a VM name or instance_id is passed
in, but a resource_id may be passed instead. If both are passed in, the
instance_id will be used.
CLI Examples:
.. code-block:: bash
salt-cloud -a get_tags mymachine
salt-cloud -a get_tags resource_id=vol-3267ab32
'''
if location is None:
location = get_location()
if instance_id is None:
if resource_id is None:
if name:
instances = list_nodes_full(location)
if name in instances:
instance_id = instances[name]['instanceId']
elif 'instance_id' in kwargs:
instance_id = kwargs['instance_id']
elif 'resource_id' in kwargs:
instance_id = kwargs['resource_id']
else:
instance_id = resource_id
params = {'Action': 'DescribeTags',
'Filter.1.Name': 'resource-id',
'Filter.1.Value': instance_id}
return aws.query(params,
setname='tagSet',
location=location,
provider=get_provider(),
opts=__opts__,
sigver='4')
def del_tags(name=None,
kwargs=None,
call=None,
instance_id=None,
resource_id=None): # pylint: disable=W0613
'''
Delete tags for a resource. Normally a VM name or instance_id is passed in,
but a resource_id may be passed instead. If both are passed in, the
instance_id will be used.
CLI Examples:
.. code-block:: bash
salt-cloud -a del_tags mymachine tags=mytag,
salt-cloud -a del_tags mymachine tags=tag1,tag2,tag3
salt-cloud -a del_tags resource_id=vol-3267ab32 tags=tag1,tag2,tag3
'''
if kwargs is None:
kwargs = {}
if 'tags' not in kwargs:
raise SaltCloudSystemExit(
'A tag or tags must be specified using tags=list,of,tags'
)
if not name and 'resource_id' in kwargs:
instance_id = kwargs['resource_id']
del kwargs['resource_id']
if not instance_id:
instance_id = _get_node(name)[name]['instanceId']
params = {'Action': 'DeleteTags',
'ResourceId.1': instance_id}
for idx, tag in enumerate(kwargs['tags'].split(',')):
params['Tag.{0}.Key'.format(idx)] = tag
aws.query(params,
setname='tagSet',
location=get_location(),
provider=get_provider(),
opts=__opts__,
sigver='4')
if resource_id:
return get_tags(resource_id=resource_id)
else:
return get_tags(instance_id=instance_id)
def rename(name, kwargs, call=None):
'''
Properly rename a node. Pass in the new name as "new name".
CLI Example:
.. code-block:: bash
salt-cloud -a rename mymachine newname=yourmachine
'''
if call != 'action':
raise SaltCloudSystemExit(
'The rename action must be called with -a or --action.'
)
log.info('Renaming {0} to {1}'.format(name, kwargs['newname']))
set_tags(name, {'Name': kwargs['newname']}, call='action')
salt.utils.cloud.rename_key(
__opts__['pki_dir'], name, kwargs['newname']
)
def destroy(name, call=None):
'''
Destroy a node. Will check termination protection and warn if enabled.
CLI Example:
.. code-block:: bash
salt-cloud --destroy mymachine
'''
if call == 'function':
raise SaltCloudSystemExit(
'The destroy action must be called with -d, --destroy, '
'-a or --action.'
)
node_metadata = _get_node(name)
instance_id = node_metadata[name]['instanceId']
sir_id = node_metadata.get('spotInstanceRequestId')
protected = show_term_protect(
name=name,
instance_id=instance_id,
call='action',
quiet=True
)
salt.utils.cloud.fire_event(
'event',
'destroying instance',
'salt/cloud/{0}/destroying'.format(name),
{'name': name, 'instance_id': instance_id},
transport=__opts__['transport']
)
if protected == 'true':
raise SaltCloudSystemExit(
'This instance has been protected from being destroyed. '
'Use the following command to disable protection:\n\n'
'salt-cloud -a disable_term_protect {0}'.format(
name
)
)
ret = {}
if config.get_cloud_config_value('rename_on_destroy',
get_configured_provider(),
__opts__,
search_global=False) is True:
newname = '{0}-DEL{1}'.format(name, uuid.uuid4().hex)
rename(name, kwargs={'newname': newname}, call='action')
log.info(
'Machine will be identified as {0} until it has been '
'cleaned up.'.format(
newname
)
)
ret['newname'] = newname
params = {'Action': 'TerminateInstances',
'InstanceId.1': instance_id}
location = get_location()
provider = get_provider()
result = aws.query(params,
location=location,
provider=provider,
opts=__opts__,
sigver='4')
log.info(result)
ret.update(result[0])
# If this instance is part of a spot instance request, we
# need to cancel it as well
if sir_id is not None:
params = {'Action': 'CancelSpotInstanceRequests',
'SpotInstanceRequestId.1': sir_id}
result = aws.query(params,
location=location,
provider=provider,
opts=__opts__,
sigver='4')
ret['spotInstance'] = result[0]
salt.utils.cloud.fire_event(
'event',
'destroyed instance',
'salt/cloud/{0}/destroyed'.format(name),
{'name': name, 'instance_id': instance_id},
transport=__opts__['transport']
)
salt.utils.cloud.cachedir_index_del(name)
if __opts__.get('update_cachedir', False) is True:
salt.utils.cloud.delete_minion_cachedir(name, __active_provider_name__.split(':')[0], __opts__)
return ret
def reboot(name, call=None):
'''
Reboot a node.
CLI Example:
.. code-block:: bash
salt-cloud -a reboot mymachine
'''
instance_id = _get_node(name)[name]['instanceId']
params = {'Action': 'RebootInstances',
'InstanceId.1': instance_id}
result = aws.query(params,
setname='tagSet',
location=get_location(),
provider=get_provider(),
opts=__opts__,
sigver='4')
if result == []:
log.info('Complete')
return {'Reboot': 'Complete'}
def show_image(kwargs, call=None):
'''
Show the details from EC2 concerning an AMI
'''
if call != 'function':
raise SaltCloudSystemExit(
'The show_image action must be called with -f or --function.'
)
params = {'ImageId.1': kwargs['image'],
'Action': 'DescribeImages'}
result = aws.query(params,
setname='tagSet',
location=get_location(),
provider=get_provider(),
opts=__opts__,
sigver='4')
log.info(result)
return result
def show_instance(name=None, instance_id=None, call=None, kwargs=None):
'''
Show the details from EC2 concerning an AMI.
Can be called as an action (which requires a name):
.. code-block:: bash
salt-cloud -a show_instance myinstance
...or as a function (which requires either a name or instance_id):
.. code-block:: bash
salt-cloud -f show_instance my-ec2 name=myinstance
salt-cloud -f show_instance my-ec2 instance_id=i-d34db33f
'''
if not name and call == 'action':
raise SaltCloudSystemExit(
'The show_instance action requires a name.'
)
if call == 'function':
name = kwargs.get('name', None)
instance_id = kwargs.get('instance_id', None)
if not name and not instance_id:
raise SaltCloudSystemExit(
'The show_instance function requires '
'either a name or an instance_id'
)
node = _get_node(name=name, instance_id=instance_id)
salt.utils.cloud.cache_node(node, __active_provider_name__, __opts__)
return node
def _get_node(name=None, instance_id=None, location=None):
if location is None:
location = get_location()
params = {'Action': 'DescribeInstances'}
if str(name).startswith('i-') and len(name) == 10:
instance_id = name
if instance_id:
params['InstanceId.1'] = instance_id
else:
params['Filter.1.Name'] = 'tag:Name'
params['Filter.1.Value.1'] = name
log.trace(params)
provider = get_provider()
attempts = 10
while attempts >= 0:
try:
instances = aws.query(params,
location=location,
provider=provider,
opts=__opts__,
sigver='4')
return _extract_instance_info(instances)
except KeyError:
attempts -= 1
log.debug(
'Failed to get the data for the node {0!r}. Remaining '
'attempts {1}'.format(
name, attempts
)
)
# Just a little delay between attempts...
time.sleep(0.5)
return {}
def list_nodes_full(location=None, call=None):
'''
Return a list of the VMs that are on the provider
'''
if call == 'action':
raise SaltCloudSystemExit(
'The list_nodes_full function must be called with -f '
'or --function.'
)
if not location:
ret = {}
locations = set(
get_location(vm_) for vm_ in six.itervalues(__opts__['profiles'])
if _vm_provider_driver(vm_)
)
for loc in locations:
ret.update(_list_nodes_full(loc))
return ret
return _list_nodes_full(location)
def _vm_provider_driver(vm_):
alias, driver = vm_['provider'].split(':')
if alias not in __opts__['providers']:
return None
if driver not in __opts__['providers'][alias]:
return None
return driver == 'ec2'
def _extract_name_tag(item):
if 'tagSet' in item:
tagset = item['tagSet']
if isinstance(tagset['item'], list):
for tag in tagset['item']:
if tag['key'] == 'Name':
return tag['value']
return item['instanceId']
return item['tagSet']['item']['value']
return item['instanceId']
def _extract_instance_info(instances):
'''
Given an instance query, return a dict of all instance data
'''
ret = {}
for instance in instances:
# items could be type dict or list (for stopped EC2 instances)
if isinstance(instance['instancesSet']['item'], list):
for item in instance['instancesSet']['item']:
name = _extract_name_tag(item)
ret[name] = item
ret[name].update(
dict(
id=item['instanceId'],
image=item['imageId'],
size=item['instanceType'],
state=item['instanceState']['name'],
private_ips=item.get('privateIpAddress', []),
public_ips=item.get('ipAddress', [])
)
)
else:
item = instance['instancesSet']['item']
name = _extract_name_tag(item)
ret[name] = item
ret[name].update(
dict(
id=item['instanceId'],
image=item['imageId'],
size=item['instanceType'],
state=item['instanceState']['name'],
private_ips=item.get('privateIpAddress', []),
public_ips=item.get('ipAddress', [])
)
)
return ret
def _list_nodes_full(location=None):
'''
Return a list of the VMs that in this location
'''
provider = __active_provider_name__ or 'ec2'
if ':' in provider:
comps = provider.split(':')
provider = comps[0]
params = {'Action': 'DescribeInstances'}
instances = aws.query(params,
location=location,
provider=provider,
opts=__opts__,
sigver='4')
if 'error' in instances:
raise SaltCloudSystemExit(
'An error occurred while listing nodes: {0}'.format(
instances['error']['Errors']['Error']['Message']
)
)
ret = _extract_instance_info(instances)
salt.utils.cloud.cache_node_list(ret, provider, __opts__)
return ret
def list_nodes_min(location=None, call=None):
'''
Return a list of the VMs that are on the provider. Only a list of VM names,
and their state, is returned. This is the minimum amount of information
needed to check for existing VMs.
'''
if call == 'action':
raise SaltCloudSystemExit(
'The list_nodes_min function must be called with -f or --function.'
)
ret = {}
params = {'Action': 'DescribeInstances'}
instances = aws.query(params,
location=get_location(),
provider=get_provider(),
opts=__opts__,
sigver='4')
if 'error' in instances:
raise SaltCloudSystemExit(
'An error occurred while listing nodes: {0}'.format(
instances['error']['Errors']['Error']['Message']
)
)
for instance in instances:
if isinstance(instance['instancesSet']['item'], list):
for item in instance['instancesSet']['item']:
state = item['instanceState']['name']
name = _extract_name_tag(item)
else:
item = instance['instancesSet']['item']
state = item['instanceState']['name']
name = _extract_name_tag(item)
ret[name] = {'state': state}
return ret
def list_nodes(call=None):
'''
Return a list of the VMs that are on the provider
'''
if call == 'action':
raise SaltCloudSystemExit(
'The list_nodes function must be called with -f or --function.'
)
ret = {}
nodes = list_nodes_full(get_location())
if 'error' in nodes:
raise SaltCloudSystemExit(
'An error occurred while listing nodes: {0}'.format(
nodes['error']['Errors']['Error']['Message']
)
)
for node in nodes:
ret[node] = {
'id': nodes[node]['id'],
'image': nodes[node]['image'],
'size': nodes[node]['size'],
'state': nodes[node]['state'],
'private_ips': nodes[node]['private_ips'],
'public_ips': nodes[node]['public_ips'],
}
return ret
def list_nodes_select(call=None):
'''
Return a list of the VMs that are on the provider, with select fields
'''
return salt.utils.cloud.list_nodes_select(
list_nodes_full(get_location()), __opts__['query.selection'], call,
)
def show_term_protect(name=None, instance_id=None, call=None, quiet=False):
'''
Show the details from EC2 concerning an AMI
'''
if call != 'action':
raise SaltCloudSystemExit(
'The show_term_protect action must be called with -a or --action.'
)
if not instance_id:
instances = list_nodes_full(get_location())
instance_id = instances[name]['instanceId']
params = {'Action': 'DescribeInstanceAttribute',
'InstanceId': instance_id,
'Attribute': 'disableApiTermination'}
result = aws.query(params,
location=get_location(),
provider=get_provider(),
return_root=True,
opts=__opts__,
sigver='4')
disable_protect = False
for item in result:
if 'value' in item:
disable_protect = item['value']
break
log.log(
logging.DEBUG if quiet is True else logging.INFO,
'Termination Protection is {0} for {1}'.format(
disable_protect == 'true' and 'enabled' or 'disabled',
name
)
)
return disable_protect
def enable_term_protect(name, call=None):
'''
Enable termination protection on a node
CLI Example:
.. code-block:: bash
salt-cloud -a enable_term_protect mymachine
'''
if call != 'action':
raise SaltCloudSystemExit(
'The enable_term_protect action must be called with '
'-a or --action.'
)
return _toggle_term_protect(name, 'true')
def disable_term_protect(name, call=None):
'''
Disable termination protection on a node
CLI Example:
.. code-block:: bash
salt-cloud -a disable_term_protect mymachine
'''
if call != 'action':
raise SaltCloudSystemExit(
'The disable_term_protect action must be called with '
'-a or --action.'
)
return _toggle_term_protect(name, 'false')
def _toggle_term_protect(name, value):
'''
Disable termination protection on a node
CLI Example:
.. code-block:: bash
salt-cloud -a disable_term_protect mymachine
'''
instances = list_nodes_full(get_location())
instance_id = instances[name]['instanceId']
params = {'Action': 'ModifyInstanceAttribute',
'InstanceId': instance_id,
'DisableApiTermination.Value': value}
result = aws.query(params,
location=get_location(),
provider=get_provider(),
return_root=True,
opts=__opts__,
sigver='4')
return show_term_protect(name=name, instance_id=instance_id, call='action')
def show_delvol_on_destroy(name, kwargs=None, call=None):
'''
Do not delete all/specified EBS volumes upon instance termination
CLI Example:
.. code-block:: bash
salt-cloud -a show_delvol_on_destroy mymachine
'''
if call != 'action':
raise SaltCloudSystemExit(
'The show_delvol_on_destroy action must be called '
'with -a or --action.'
)
if not kwargs:
kwargs = {}
instance_id = kwargs.get('instance_id', None)
device = kwargs.get('device', None)
volume_id = kwargs.get('volume_id', None)
if instance_id is None:
instances = list_nodes_full()
instance_id = instances[name]['instanceId']
params = {'Action': 'DescribeInstances',
'InstanceId.1': instance_id}
data = aws.query(params,
location=get_location(),
provider=get_provider(),
opts=__opts__,
sigver='4')
blockmap = data[0]['instancesSet']['item']['blockDeviceMapping']
if not isinstance(blockmap['item'], list):
blockmap['item'] = [blockmap['item']]
items = []
for idx, item in enumerate(blockmap['item']):
device_name = item['deviceName']
if device is not None and device != device_name:
continue
if volume_id is not None and volume_id != item['ebs']['volumeId']:
continue
info = {
'device_name': device_name,
'volume_id': item['ebs']['volumeId'],
'deleteOnTermination': item['ebs']['deleteOnTermination']
}
items.append(info)
return items
def keepvol_on_destroy(name, kwargs=None, call=None):
'''
Do not delete all/specified EBS volumes upon instance termination
CLI Example:
.. code-block:: bash
salt-cloud -a keepvol_on_destroy mymachine
'''
if call != 'action':
raise SaltCloudSystemExit(
'The keepvol_on_destroy action must be called with -a or --action.'
)
if not kwargs:
kwargs = {}
device = kwargs.get('device', None)
volume_id = kwargs.get('volume_id', None)
return _toggle_delvol(name=name, device=device,
volume_id=volume_id, value='false')
def delvol_on_destroy(name, kwargs=None, call=None):
'''
Delete all/specified EBS volumes upon instance termination
CLI Example:
.. code-block:: bash
salt-cloud -a delvol_on_destroy mymachine
'''
if call != 'action':
raise SaltCloudSystemExit(
'The delvol_on_destroy action must be called with -a or --action.'
)
if not kwargs:
kwargs = {}
device = kwargs.get('device', None)
volume_id = kwargs.get('volume_id', None)
return _toggle_delvol(name=name, device=device,
volume_id=volume_id, value='true')
def _toggle_delvol(name=None, instance_id=None, device=None, volume_id=None,
value=None, requesturl=None):
if not instance_id:
instances = list_nodes_full(get_location())
instance_id = instances[name]['instanceId']
if requesturl:
data = aws.query(requesturl=requesturl,
location=get_location(),
provider=get_provider(),
opts=__opts__,
sigver='4')
else:
params = {'Action': 'DescribeInstances',
'InstanceId.1': instance_id}
data, requesturl = aws.query(params, # pylint: disable=W0632
return_url=True,
location=get_location(),
provider=get_provider(),
opts=__opts__,
sigver='4')
blockmap = data[0]['instancesSet']['item']['blockDeviceMapping']
params = {'Action': 'ModifyInstanceAttribute',
'InstanceId': instance_id}
if not isinstance(blockmap['item'], list):
blockmap['item'] = [blockmap['item']]
for idx, item in enumerate(blockmap['item']):
device_name = item['deviceName']
if device is not None and device != device_name:
continue
if volume_id is not None and volume_id != item['ebs']['volumeId']:
continue
params['BlockDeviceMapping.{0}.DeviceName'.format(idx)] = device_name
params['BlockDeviceMapping.{0}.Ebs.DeleteOnTermination'.format(idx)] = value
aws.query(params,
return_root=True,
location=get_location(),
provider=get_provider(),
opts=__opts__,
sigver='4')
return _get_node(instance_id=instance_id)
def create_volume(kwargs=None, call=None, wait_to_finish=False):
'''
Create a volume
CLI Examples:
.. code-block:: bash
salt-cloud -f create_volume my-ec2-config zone=us-east-1b
salt-cloud -f create_volume my-ec2-config zone=us-east-1b tags='{"tag1": "val1", "tag2", "val2"}'
'''
if call != 'function':
log.error(
'The create_volume function must be called with -f or --function.'
)
return False
if 'zone' not in kwargs:
log.error('An availability zone must be specified to create a volume.')
return False
if 'size' not in kwargs and 'snapshot' not in kwargs:
# This number represents GiB
kwargs['size'] = '10'
params = {'Action': 'CreateVolume',
'AvailabilityZone': kwargs['zone']}
if 'size' in kwargs:
params['Size'] = kwargs['size']
if 'snapshot' in kwargs:
params['SnapshotId'] = kwargs['snapshot']
if 'type' in kwargs:
params['VolumeType'] = kwargs['type']
if 'iops' in kwargs and kwargs.get('type', 'standard') == 'io1':
params['Iops'] = kwargs['iops']
if 'encrypted' in kwargs:
params['Encrypted'] = kwargs['encrypted']
log.debug(params)
data = aws.query(params,
return_url=True,
return_root=True,
location=get_location(),
provider=get_provider(),
opts=__opts__,
sigver='4')
r_data = {}
for d in data[0]:
for k, v in d.items():
r_data[k] = v
volume_id = r_data['volumeId']
# Allow tags to be set upon creation
if 'tags' in kwargs:
if isinstance(kwargs['tags'], six.string_types):
tags = yaml.safe_load(kwargs['tags'])
else:
tags = kwargs['tags']
if isinstance(tags, dict):
new_tags = set_tags(tags=tags,
resource_id=volume_id,
call='action',
location=get_location())
r_data['tags'] = new_tags
# Waits till volume is available
if wait_to_finish:
salt.utils.cloud.run_func_until_ret_arg(fun=describe_volumes,
kwargs={'volume_id': volume_id},
fun_call=call,
argument_being_watched='status',
required_argument_response='available')
return r_data
def attach_volume(name=None, kwargs=None, instance_id=None, call=None):
'''
Attach a volume to an instance
'''
if call != 'action':
raise SaltCloudSystemExit(
'The attach_volume action must be called with -a or --action.'
)
if not kwargs:
kwargs = {}
if 'instance_id' in kwargs:
instance_id = kwargs['instance_id']
if name and not instance_id:
instances = list_nodes_full(get_location())
instance_id = instances[name]['instanceId']
if not name and not instance_id:
log.error('Either a name or an instance_id is required.')
return False
if 'volume_id' not in kwargs:
log.error('A volume_id is required.')
return False
if 'device' not in kwargs:
log.error('A device is required (ex. /dev/sdb1).')
return False
params = {'Action': 'AttachVolume',
'VolumeId': kwargs['volume_id'],
'InstanceId': instance_id,
'Device': kwargs['device']}
log.debug(params)
data = aws.query(params,
return_url=True,
location=get_location(),
provider=get_provider(),
opts=__opts__,
sigver='4')
return data
def show_volume(kwargs=None, call=None):
'''
Wrapper around describe_volumes.
Here just to keep functionality.
Might be depreciated later.
'''
if not kwargs:
kwargs = {}
return describe_volumes(kwargs, call)
def detach_volume(name=None, kwargs=None, instance_id=None, call=None):
'''
Detach a volume from an instance
'''
if call != 'action':
raise SaltCloudSystemExit(
'The detach_volume action must be called with -a or --action.'
)
if not kwargs:
kwargs = {}
if 'volume_id' not in kwargs:
log.error('A volume_id is required.')
return False
params = {'Action': 'DetachVolume',
'VolumeId': kwargs['volume_id']}
data = aws.query(params,
return_url=True,
location=get_location(),
provider=get_provider(),
opts=__opts__,
sigver='4')
return data
def delete_volume(name=None, kwargs=None, instance_id=None, call=None):
'''
Delete a volume
'''
if not kwargs:
kwargs = {}
if 'volume_id' not in kwargs:
log.error('A volume_id is required.')
return False
params = {'Action': 'DeleteVolume',
'VolumeId': kwargs['volume_id']}
data = aws.query(params,
return_url=True,
location=get_location(),
provider=get_provider(),
opts=__opts__,
sigver='4')
return data
def describe_volumes(kwargs=None, call=None):
'''
Describe a volume (or volumes)
volume_id
One or more volume IDs. Multiple IDs must be separated by ",".
TODO: Add all of the filters.
'''
if call != 'function':
log.error(
'The describe_volumes function must be called with -f '
'or --function.'
)
return False
if not kwargs:
kwargs = {}
params = {'Action': 'DescribeVolumes'}
if 'volume_id' in kwargs:
volume_id = kwargs['volume_id'].split(',')
for volume_index, volume_id in enumerate(volume_id):
params['VolumeId.{0}'.format(volume_index)] = volume_id
log.debug(params)
data = aws.query(params,
return_url=True,
location=get_location(),
provider=get_provider(),
opts=__opts__,
sigver='4')
return data
def create_keypair(kwargs=None, call=None):
'''
Create an SSH keypair
'''
if call != 'function':
log.error(
'The create_keypair function must be called with -f or --function.'
)
return False
if not kwargs:
kwargs = {}
if 'keyname' not in kwargs:
log.error('A keyname is required.')
return False
params = {'Action': 'CreateKeyPair',
'KeyName': kwargs['keyname']}
data = aws.query(params,
return_url=True,
location=get_location(),
provider=get_provider(),
opts=__opts__,
sigver='4')
return data
def show_keypair(kwargs=None, call=None):
'''
Show the details of an SSH keypair
'''
if call != 'function':
log.error(
'The show_keypair function must be called with -f or --function.'
)
return False
if not kwargs:
kwargs = {}
if 'keyname' not in kwargs:
log.error('A keyname is required.')
return False
params = {'Action': 'DescribeKeyPairs',
'KeyName.1': kwargs['keyname']}
data = aws.query(params,
return_url=True,
location=get_location(),
provider=get_provider(),
opts=__opts__,
sigver='4')
return data
def delete_keypair(kwargs=None, call=None):
'''
Delete an SSH keypair
'''
if call != 'function':
log.error(
'The delete_keypair function must be called with -f or --function.'
)
return False
if not kwargs:
kwargs = {}
if 'keyname' not in kwargs:
log.error('A keyname is required.')
return False
params = {'Action': 'DeleteKeyPair',
'KeyName.1': kwargs['keyname']}
data = aws.query(params,
return_url=True,
location=get_location(),
provider=get_provider(),
opts=__opts__,
sigver='4')
return data
def create_snapshot(kwargs=None, call=None, wait_to_finish=False):
'''
Create a snapshot
'''
if call != 'function':
log.error(
'The create_snapshot function must be called with -f '
'or --function.'
)
return False
if 'volume_id' not in kwargs:
log.error('A volume_id must be specified to create a snapshot.')
return False
if 'description' not in kwargs:
kwargs['description'] = ''
params = {'Action': 'CreateSnapshot'}
if 'volume_id' in kwargs:
params['VolumeId'] = kwargs['volume_id']
if 'description' in kwargs:
params['Description'] = kwargs['description']
log.debug(params)
data = aws.query(params,
return_url=True,
location=get_location(),
provider=get_provider(),
opts=__opts__,
sigver='4')
r_data = {}
for d in data:
for k, v in d.items():
r_data[k] = v
snapshot_id = r_data['snapshotId']
# Waits till volume is available
if wait_to_finish:
salt.utils.cloud.run_func_until_ret_arg(fun=describe_snapshots,
kwargs={'snapshot_id': snapshot_id},
fun_call=call,
argument_being_watched='status',
required_argument_response='completed')
return data
def delete_snapshot(kwargs=None, call=None):
'''
Delete a snapshot
'''
if call != 'function':
log.error(
'The delete_snapshot function must be called with -f '
'or --function.'
)
return False
if 'snapshot_id' not in kwargs:
log.error('A snapshot_id must be specified to delete a snapshot.')
return False
params = {'Action': 'DeleteSnapshot'}
if 'snapshot_id' in kwargs:
params['SnapshotId'] = kwargs['snapshot_id']
log.debug(params)
data = aws.query(params,
return_url=True,
location=get_location(),
provider=get_provider(),
opts=__opts__,
sigver='4')
return data
def copy_snapshot(kwargs=None, call=None):
'''
Copy a snapshot
'''
if call != 'function':
log.error(
'The copy_snapshot function must be called with -f or --function.'
)
return False
if 'source_region' not in kwargs:
log.error('A source_region must be specified to copy a snapshot.')
return False
if 'source_snapshot_id' not in kwargs:
log.error('A source_snapshot_id must be specified to copy a snapshot.')
return False
if 'description' not in kwargs:
kwargs['description'] = ''
params = {'Action': 'CopySnapshot'}
if 'source_region' in kwargs:
params['SourceRegion'] = kwargs['source_region']
if 'source_snapshot_id' in kwargs:
params['SourceSnapshotId'] = kwargs['source_snapshot_id']
if 'description' in kwargs:
params['Description'] = kwargs['description']
log.debug(params)
data = aws.query(params,
return_url=True,
location=get_location(),
provider=get_provider(),
opts=__opts__,
sigver='4')
return data
def describe_snapshots(kwargs=None, call=None):
'''
Describe a snapshot (or snapshots)
snapshot_id
One or more snapshot IDs. Multiple IDs must be separated by ",".
owner
Return the snapshots owned by the specified owner. Valid values
include: self, amazon, <AWS Account ID>. Multiple values must be
separated by ",".
restorable_by
One or more AWS accounts IDs that can create volumes from the snapshot.
Multiple aws account IDs must be separated by ",".
TODO: Add all of the filters.
'''
if call != 'function':
log.error(
'The describe_snapshot function must be called with -f '
'or --function.'
)
return False
params = {'Action': 'DescribeSnapshots'}
# The AWS correct way is to use non-plurals like snapshot_id INSTEAD of snapshot_ids.
if 'snapshot_ids' in kwargs:
kwargs['snapshot_id'] = kwargs['snapshot_ids']
if 'snapshot_id' in kwargs:
snapshot_ids = kwargs['snapshot_id'].split(',')
for snapshot_index, snapshot_id in enumerate(snapshot_ids):
params['SnapshotId.{0}'.format(snapshot_index)] = snapshot_id
if 'owner' in kwargs:
owners = kwargs['owner'].split(',')
for owner_index, owner in enumerate(owners):
params['Owner.{0}'.format(owner_index)] = owner
if 'restorable_by' in kwargs:
restorable_bys = kwargs['restorable_by'].split(',')
for restorable_by_index, restorable_by in enumerate(restorable_bys):
params[
'RestorableBy.{0}'.format(restorable_by_index)
] = restorable_by
log.debug(params)
data = aws.query(params,
return_url=True,
location=get_location(),
provider=get_provider(),
opts=__opts__,
sigver='4')
return data
def get_console_output(
name=None,
instance_id=None,
call=None,
kwargs=None,
):
'''
Show the console output from the instance.
By default, returns decoded data, not the Base64-encoded data that is
actually returned from the EC2 API.
'''
if call != 'action':
raise SaltCloudSystemExit(
'The get_console_output action must be called with '
'-a or --action.'
)
if not instance_id:
instance_id = _get_node(name)[name]['instanceId']
if kwargs is None:
kwargs = {}
if instance_id is None:
if 'instance_id' in kwargs:
instance_id = kwargs['instance_id']
del kwargs['instance_id']
params = {'Action': 'GetConsoleOutput',
'InstanceId': instance_id}
ret = {}
data = aws.query(params,
return_url=True,
location=get_location(),
provider=get_provider(),
opts=__opts__,
sigver='4')
for item in data:
if next(item.iterkeys()) == 'output':
ret['output_decoded'] = binascii.a2b_base64(next(item.itervalues()))
else:
ret[next(item.iterkeys())] = next(item.itervalues())
return ret
def get_password_data(
name=None,
kwargs=None,
instance_id=None,
call=None,
):
'''
Return password data for a Windows instance.
By default only the encrypted password data will be returned. However, if a
key_file is passed in, then a decrypted password will also be returned.
Note that the key_file references the private key that was used to generate
the keypair associated with this instance. This private key will _not_ be
transmitted to Amazon; it is only used internally inside of Salt Cloud to
decrypt data _after_ it has been received from Amazon.
CLI Examples:
.. code-block:: bash
salt-cloud -a get_password_data mymachine
salt-cloud -a get_password_data mymachine key_file=/root/ec2key.pem
Note: PKCS1_v1_5 was added in PyCrypto 2.5
'''
if call != 'action':
raise SaltCloudSystemExit(
'The get_password_data action must be called with '
'-a or --action.'
)
if not instance_id:
instance_id = _get_node(name)[name]['instanceId']
if kwargs is None:
kwargs = {}
if instance_id is None:
if 'instance_id' in kwargs:
instance_id = kwargs['instance_id']
del kwargs['instance_id']
params = {'Action': 'GetPasswordData',
'InstanceId': instance_id}
ret = {}
data = aws.query(params,
return_root=True,
location=get_location(),
provider=get_provider(),
opts=__opts__,
sigver='4')
for item in data:
ret[item.keys()[0]] = item.values()[0]
if not HAS_PYCRYPTO:
return ret
if 'key' not in kwargs:
if 'key_file' in kwargs:
with salt.utils.fopen(kwargs['key_file'], 'r') as kf_:
kwargs['key'] = kf_.read()
if 'key' in kwargs:
pwdata = ret.get('passwordData', None)
if pwdata is not None:
rsa_key = kwargs['key']
pwdata = base64.b64decode(pwdata)
dsize = Crypto.Hash.SHA.digest_size
sentinel = Crypto.Random.new().read(15 + dsize)
key_obj = Crypto.PublicKey.RSA.importKey(rsa_key)
key_obj = PKCS1_v1_5.new(key_obj)
ret['password'] = key_obj.decrypt(pwdata, sentinel)
return ret
| 31.312468 | 139 | 0.548961 |
from __future__ import absolute_import
import os
import sys
import stat
import time
import uuid
import pprint
import logging
import yaml
import requests
import salt.ext.six as six
from salt.ext.six.moves import map, range, zip
from salt.ext.six.moves.urllib.parse import urlparse as _urlparse, urlencode as _urlencode
import hmac
import hashlib
import binascii
import datetime
import base64
import salt.utils
from salt.utils import namespaced_function
from salt.cloud.libcloudfuncs import get_salt_interface
from salt._compat import ElementTree as ET
import salt.utils.aws as aws
import salt.utils.cloud
import salt.config as config
from salt.exceptions import (
SaltCloudException,
SaltCloudSystemExit,
SaltCloudConfigError,
SaltCloudExecutionTimeout,
SaltCloudExecutionFailure
)
try:
import Crypto
from Crypto.Cipher import PKCS1_v1_5
HAS_PYCRYPTO = True
except ImportError:
HAS_PYCRYPTO = False
log = logging.getLogger(__name__)
get_salt_interface = namespaced_function(get_salt_interface, globals())
SIZE_MAP = {
'Micro Instance': 't1.micro',
'Small Instance': 'm1.small',
'Medium Instance': 'm1.medium',
'Large Instance': 'm1.large',
'Extra Large Instance': 'm1.xlarge',
'High-CPU Medium Instance': 'c1.medium',
'High-CPU Extra Large Instance': 'c1.xlarge',
'High-Memory Extra Large Instance': 'm2.xlarge',
'High-Memory Double Extra Large Instance': 'm2.2xlarge',
'High-Memory Quadruple Extra Large Instance': 'm2.4xlarge',
'Cluster GPU Quadruple Extra Large Instance': 'cg1.4xlarge',
'Cluster Compute Quadruple Extra Large Instance': 'cc1.4xlarge',
'Cluster Compute Eight Extra Large Instance': 'cc2.8xlarge',
}
EC2_LOCATIONS = {
'ap-northeast-1': 'ec2_ap_northeast',
'ap-southeast-1': 'ec2_ap_southeast',
'ap-southeast-2': 'ec2_ap_southeast_2',
'eu-west-1': 'ec2_eu_west',
'sa-east-1': 'ec2_sa_east',
'us-east-1': 'ec2_us_east',
'us-west-1': 'ec2_us_west',
'us-west-2': 'ec2_us_west_oregon',
}
DEFAULT_LOCATION = 'us-east-1'
DEFAULT_EC2_API_VERSION = '2014-10-01'
EC2_RETRY_CODES = [
'RequestLimitExceeded',
'InsufficientInstanceCapacity',
'InternalError',
'Unavailable',
'InsufficientAddressCapacity',
'InsufficientReservedInstanceCapacity',
]
def __virtual__():
if get_configured_provider() is False:
return False
for provider, details in six.iteritems(__opts__['providers']):
if 'provider' not in details or details['provider'] != 'ec2':
continue
if not os.path.exists(details['private_key']):
raise SaltCloudException(
'The EC2 key file {0!r} used in the {1!r} provider '
'configuration does not exist\n'.format(
details['private_key'],
provider
)
)
keymode = str(
oct(stat.S_IMODE(os.stat(details['private_key']).st_mode))
)
if keymode not in ('0400', '0600'):
raise SaltCloudException(
'The EC2 key file {0!r} used in the {1!r} provider '
'configuration needs to be set to mode 0400 or 0600\n'.format(
details['private_key'],
provider
)
)
return True
def get_configured_provider():
return config.is_provider_configured(
__opts__,
__active_provider_name__ or 'ec2',
('id', 'key', 'keyname', 'private_key')
)
def _xml_to_dict(xmltree):
if sys.version_info < (2, 7):
children_len = len(xmltree.getchildren())
else:
children_len = len(xmltree)
if children_len < 1:
name = xmltree.tag
if '}' in name:
comps = name.split('}')
name = comps[1]
return {name: xmltree.text}
xmldict = {}
for item in xmltree:
name = item.tag
if '}' in name:
comps = name.split('}')
name = comps[1]
if name not in xmldict:
if sys.version_info < (2, 7):
children_len = len(item.getchildren())
else:
children_len = len(item)
if children_len > 0:
xmldict[name] = _xml_to_dict(item)
else:
xmldict[name] = item.text
else:
if not isinstance(xmldict[name], list):
tempvar = xmldict[name]
xmldict[name] = []
xmldict[name].append(tempvar)
xmldict[name].append(_xml_to_dict(item))
return xmldict
def optimize_providers(providers):
tmp_providers = {}
optimized_providers = {}
for name, data in six.iteritems(providers):
if 'location' not in data:
data['location'] = DEFAULT_LOCATION
if data['location'] not in tmp_providers:
tmp_providers[data['location']] = {}
creds = (data['id'], data['key'])
if creds not in tmp_providers[data['location']]:
tmp_providers[data['location']][creds] = {'name': name,
'data': data,
}
for location, tmp_data in six.iteritems(tmp_providers):
for creds, data in six.iteritems(tmp_data):
_id, _key = creds
_name = data['name']
_data = data['data']
if _name not in optimized_providers:
optimized_providers[_name] = _data
return optimized_providers
def query(params=None, setname=None, requesturl=None, location=None,
return_url=False, return_root=False):
provider = get_configured_provider()
service_url = provider.get('service_url', 'amazonaws.com')
attempts = 5
while attempts > 0:
params_with_headers = params.copy()
timestamp = datetime.datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%SZ')
if not location:
location = get_location()
if not requesturl:
endpoint = provider.get(
'endpoint',
'ec2.{0}.{1}'.format(location, service_url)
)
requesturl = 'https://{0}/'.format(endpoint)
else:
endpoint = _urlparse(requesturl).netloc
if endpoint == '':
endpoint_err = (
'Could not find a valid endpoint in the '
'requesturl: {0}. Looking for something '
'like https://some.ec2.endpoint/?args').format(requesturl)
log.error(endpoint_err)
if return_url is True:
return {'error': endpoint_err}, requesturl
return {'error': endpoint_err}
log.debug('Using EC2 endpoint: {0}'.format(endpoint))
method = 'GET'
ec2_api_version = provider.get(
'ec2_api_version',
DEFAULT_EC2_API_VERSION
)
params_with_headers['AWSAccessKeyId'] = provider['id']
params_with_headers['SignatureVersion'] = '2'
params_with_headers['SignatureMethod'] = 'HmacSHA256'
params_with_headers['Timestamp'] = '{0}'.format(timestamp)
params_with_headers['Version'] = ec2_api_version
keys = sorted(params_with_headers)
values = list(map(params_with_headers.get, keys))
querystring = _urlencode(list(zip(keys, values)))
querystring = querystring.replace('+', '%20')
uri = '{0}\n{1}\n/\n{2}'.format(method.encode('utf-8'),
endpoint.encode('utf-8'),
querystring.encode('utf-8'))
hashed = hmac.new(provider['key'], uri, hashlib.sha256)
sig = binascii.b2a_base64(hashed.digest())
params_with_headers['Signature'] = sig.strip()
log.debug('EC2 Request: {0}'.format(requesturl))
log.trace('EC2 Request Parameters: {0}'.format(params_with_headers))
try:
result = requests.get(requesturl, params=params_with_headers)
log.debug(
'EC2 Response Status Code: {0}'.format(
result.status_code
)
)
log.trace(
'EC2 Response Text: {0}'.format(
result.text
)
)
result.raise_for_status()
break
except requests.exceptions.HTTPError as exc:
root = ET.fromstring(exc.response.content)
data = _xml_to_dict(root)
err_code = data.get('Errors', {}).get('Error', {}).get('Code', '')
if attempts > 0 and err_code and err_code in EC2_RETRY_CODES:
attempts -= 1
log.error(
'EC2 Response Status Code and Error: [{0} {1}] {2}; '
'Attempts remaining: {3}'.format(
exc.response.status_code, exc, data, attempts
)
)
time.sleep(2)
continue
log.error(
'EC2 Response Status Code and Error: [{0} {1}] {2}'.format(
exc.response.status_code, exc, data
)
)
if return_url is True:
return {'error': data}, requesturl
return {'error': data}
else:
log.error(
'EC2 Response Status Code and Error: [{0} {1}] {2}'.format(
exc.response.status_code, exc, data
)
)
if return_url is True:
return {'error': data}, requesturl
return {'error': data}
response = result.text
root = ET.fromstring(response)
items = root[1]
if return_root is True:
items = root
if setname:
if sys.version_info < (2, 7):
children_len = len(root.getchildren())
else:
children_len = len(root)
for item in range(0, children_len):
comps = root[item].tag.split('}')
if comps[1] == setname:
items = root[item]
ret = []
for item in items:
ret.append(_xml_to_dict(item))
if return_url is True:
return ret, requesturl
return ret
def _wait_for_spot_instance(update_callback,
update_args=None,
update_kwargs=None,
timeout=10 * 60,
interval=30,
interval_multiplier=1,
max_failures=10):
if update_args is None:
update_args = ()
if update_kwargs is None:
update_kwargs = {}
duration = timeout
while True:
log.debug(
'Waiting for spot instance reservation. Giving up in '
'00:{0:02d}:{1:02d}'.format(
int(timeout // 60),
int(timeout % 60)
)
)
data = update_callback(*update_args, **update_kwargs)
if data is False:
log.debug(
'update_callback has returned False which is considered a '
'failure. Remaining Failures: {0}'.format(max_failures)
)
max_failures -= 1
if max_failures <= 0:
raise SaltCloudExecutionFailure(
'Too many failures occurred while waiting for '
'the spot instance reservation to become active.'
)
elif data is not None:
return data
if timeout < 0:
raise SaltCloudExecutionTimeout(
'Unable to get an active spot instance request for '
'00:{0:02d}:{1:02d}'.format(
int(duration // 60),
int(duration % 60)
)
)
time.sleep(interval)
timeout -= interval
if interval_multiplier > 1:
interval *= interval_multiplier
if interval > timeout:
interval = timeout + 1
log.info('Interval multiplier in effect; interval is '
'now {0}s'.format(interval))
def avail_sizes(call=None):
if call == 'action':
raise SaltCloudSystemExit(
'The avail_sizes function must be called with '
'-f or --function, or with the --list-sizes option'
)
sizes = {
'Cluster Compute': {
'cc2.8xlarge': {
'id': 'cc2.8xlarge',
'cores': '16 (2 x Intel Xeon E5-2670, eight-core with '
'hyperthread)',
'disk': '3360 GiB (4 x 840 GiB)',
'ram': '60.5 GiB'
},
'cc1.4xlarge': {
'id': 'cc1.4xlarge',
'cores': '8 (2 x Intel Xeon X5570, quad-core with '
'hyperthread)',
'disk': '1690 GiB (2 x 840 GiB)',
'ram': '22.5 GiB'
},
},
'Cluster CPU': {
'cg1.4xlarge': {
'id': 'cg1.4xlarge',
'cores': '8 (2 x Intel Xeon X5570, quad-core with '
'hyperthread), plus 2 NVIDIA Tesla M2050 GPUs',
'disk': '1680 GiB (2 x 840 GiB)',
'ram': '22.5 GiB'
},
},
'High CPU': {
'c1.xlarge': {
'id': 'c1.xlarge',
'cores': '8 (with 2.5 ECUs each)',
'disk': '1680 GiB (4 x 420 GiB)',
'ram': '8 GiB'
},
'c1.medium': {
'id': 'c1.medium',
'cores': '2 (with 2.5 ECUs each)',
'disk': '340 GiB (1 x 340 GiB)',
'ram': '1.7 GiB'
},
'c3.large': {
'id': 'c3.large',
'cores': '2 (with 3.5 ECUs each)',
'disk': '32 GiB (2 x 16 GiB SSD)',
'ram': '3.75 GiB'
},
'c3.xlarge': {
'id': 'c3.xlarge',
'cores': '4 (with 3.5 ECUs each)',
'disk': '80 GiB (2 x 40 GiB SSD)',
'ram': '7.5 GiB'
},
'c3.2xlarge': {
'id': 'c3.2xlarge',
'cores': '8 (with 3.5 ECUs each)',
'disk': '160 GiB (2 x 80 GiB SSD)',
'ram': '15 GiB'
},
'c3.4xlarge': {
'id': 'c3.4xlarge',
'cores': '16 (with 3.5 ECUs each)',
'disk': '320 GiB (2 x 80 GiB SSD)',
'ram': '30 GiB'
},
'c3.8xlarge': {
'id': 'c3.8xlarge',
'cores': '32 (with 3.5 ECUs each)',
'disk': '320 GiB (2 x 160 GiB SSD)',
'ram': '60 GiB'
}
},
'High I/O': {
'hi1.4xlarge': {
'id': 'hi1.4xlarge',
'cores': '8 (with 4.37 ECUs each)',
'disk': '2 TiB',
'ram': '60.5 GiB'
},
},
'High Memory': {
'm2.2xlarge': {
'id': 'm2.2xlarge',
'cores': '4 (with 3.25 ECUs each)',
'disk': '840 GiB (1 x 840 GiB)',
'ram': '34.2 GiB'
},
'm2.xlarge': {
'id': 'm2.xlarge',
'cores': '2 (with 3.25 ECUs each)',
'disk': '410 GiB (1 x 410 GiB)',
'ram': '17.1 GiB'
},
'm2.4xlarge': {
'id': 'm2.4xlarge',
'cores': '8 (with 3.25 ECUs each)',
'disk': '1680 GiB (2 x 840 GiB)',
'ram': '68.4 GiB'
},
'r3.large': {
'id': 'r3.large',
'cores': '2 (with 3.25 ECUs each)',
'disk': '32 GiB (1 x 32 GiB SSD)',
'ram': '15 GiB'
},
'r3.xlarge': {
'id': 'r3.xlarge',
'cores': '4 (with 3.25 ECUs each)',
'disk': '80 GiB (1 x 80 GiB SSD)',
'ram': '30.5 GiB'
},
'r3.2xlarge': {
'id': 'r3.2xlarge',
'cores': '8 (with 3.25 ECUs each)',
'disk': '160 GiB (1 x 160 GiB SSD)',
'ram': '61 GiB'
},
'r3.4xlarge': {
'id': 'r3.4xlarge',
'cores': '16 (with 3.25 ECUs each)',
'disk': '320 GiB (1 x 320 GiB SSD)',
'ram': '122 GiB'
},
'r3.8xlarge': {
'id': 'r3.8xlarge',
'cores': '32 (with 3.25 ECUs each)',
'disk': '640 GiB (2 x 320 GiB SSD)',
'ram': '244 GiB'
}
},
'High-Memory Cluster': {
'cr1.8xlarge': {
'id': 'cr1.8xlarge',
'cores': '16 (2 x Intel Xeon E5-2670, eight-core)',
'disk': '240 GiB (2 x 120 GiB SSD)',
'ram': '244 GiB'
},
},
'High Storage': {
'hs1.8xlarge': {
'id': 'hs1.8xlarge',
'cores': '16 (8 cores + 8 hyperthreads)',
'disk': '48 TiB (24 x 2 TiB hard disk drives)',
'ram': '117 GiB'
},
},
'Micro': {
't1.micro': {
'id': 't1.micro',
'cores': '1',
'disk': 'EBS',
'ram': '615 MiB'
},
},
'Standard': {
'm1.xlarge': {
'id': 'm1.xlarge',
'cores': '4 (with 2 ECUs each)',
'disk': '1680 GB (4 x 420 GiB)',
'ram': '15 GiB'
},
'm1.large': {
'id': 'm1.large',
'cores': '2 (with 2 ECUs each)',
'disk': '840 GiB (2 x 420 GiB)',
'ram': '7.5 GiB'
},
'm1.medium': {
'id': 'm1.medium',
'cores': '1',
'disk': '400 GiB',
'ram': '3.75 GiB'
},
'm1.small': {
'id': 'm1.small',
'cores': '1',
'disk': '150 GiB',
'ram': '1.7 GiB'
},
'm3.2xlarge': {
'id': 'm3.2xlarge',
'cores': '8 (with 3.25 ECUs each)',
'disk': 'EBS',
'ram': '30 GiB'
},
'm3.xlarge': {
'id': 'm3.xlarge',
'cores': '4 (with 3.25 ECUs each)',
'disk': 'EBS',
'ram': '15 GiB'
},
}
}
return sizes
def avail_images(kwargs=None, call=None):
if call == 'action':
raise SaltCloudSystemExit(
'The avail_images function must be called with '
'-f or --function, or with the --list-images option'
)
if not isinstance(kwargs, dict):
kwargs = {}
if 'owner' in kwargs:
owner = kwargs['owner']
else:
provider = get_configured_provider()
owner = config.get_cloud_config_value(
'owner', provider, __opts__, default='amazon'
)
ret = {}
params = {'Action': 'DescribeImages',
'Owner': owner}
images = aws.query(params,
location=get_location(),
provider=get_provider(),
opts=__opts__,
sigver='4')
for image in images:
ret[image['imageId']] = image
return ret
def script(vm_):
return salt.utils.cloud.os_script(
config.get_cloud_config_value('script', vm_, __opts__),
vm_,
__opts__,
salt.utils.cloud.salt_config_to_yaml(
salt.utils.cloud.minion_config(__opts__, vm_)
)
)
def keyname(vm_):
return config.get_cloud_config_value(
'keyname', vm_, __opts__, search_global=False
)
def securitygroup(vm_):
return config.get_cloud_config_value(
'securitygroup', vm_, __opts__, search_global=False
)
def iam_profile(vm_):
return config.get_cloud_config_value(
'iam_profile', vm_, __opts__, search_global=False
)
def ssh_interface(vm_):
return config.get_cloud_config_value(
'ssh_interface', vm_, __opts__, default='public_ips',
search_global=False
)
def get_ssh_gateway_config(vm_):
ssh_gateway = config.get_cloud_config_value(
'ssh_gateway', vm_, __opts__, default=None,
search_global=False
)
if not isinstance(ssh_gateway, str):
return None
ssh_gateway_config = {'ssh_gateway': ssh_gateway}
ssh_gateway_config['ssh_gateway_port'] = config.get_cloud_config_value(
'ssh_gateway_port', vm_, __opts__, default=None,
search_global=False
)
ssh_gateway_config['ssh_gateway_user'] = config.get_cloud_config_value(
'ssh_gateway_username', vm_, __opts__, default=None,
search_global=False
)
ssh_gateway_config['ssh_gateway_key'] = config.get_cloud_config_value(
'ssh_gateway_private_key', vm_, __opts__, default=None,
search_global=False
)
ssh_gateway_config['ssh_gateway_password'] = config.get_cloud_config_value(
'ssh_gateway_password', vm_, __opts__, default=None,
search_global=False
)
key_filename = ssh_gateway_config['ssh_gateway_key']
if key_filename is not None and not os.path.isfile(key_filename):
raise SaltCloudConfigError(
'The defined ssh_gateway_private_key {0!r} does not exist'.format(
key_filename
)
)
elif (
key_filename is None and
not ssh_gateway_config['ssh_gateway_password']
):
raise SaltCloudConfigError(
'No authentication method. Please define: '
' ssh_gateway_password or ssh_gateway_private_key'
)
return ssh_gateway_config
def get_location(vm_=None):
return __opts__.get(
'location',
config.get_cloud_config_value(
'location',
vm_ or get_configured_provider(),
__opts__,
default=DEFAULT_LOCATION,
search_global=False
)
)
def avail_locations(call=None):
if call == 'action':
raise SaltCloudSystemExit(
'The avail_locations function must be called with '
'-f or --function, or with the --list-locations option'
)
ret = {}
params = {'Action': 'DescribeRegions'}
result = aws.query(params,
location=get_location(),
provider=get_provider(),
opts=__opts__,
sigver='4')
for region in result:
ret[region['regionName']] = {
'name': region['regionName'],
'endpoint': region['regionEndpoint'],
}
return ret
def get_availability_zone(vm_):
avz = config.get_cloud_config_value(
'availability_zone', vm_, __opts__, search_global=False
)
if avz is None:
return None
zones = _list_availability_zones()
if avz not in zones:
raise SaltCloudException(
'The specified availability zone isn\'t valid in this region: '
'{0}\n'.format(
avz
)
)
# check specified AZ is available
elif zones[avz] != 'available':
raise SaltCloudException(
'The specified availability zone isn\'t currently available: '
'{0}\n'.format(
avz
)
)
return avz
def get_tenancy(vm_):
return config.get_cloud_config_value(
'tenancy', vm_, __opts__, search_global=False
)
def get_subnetid(vm_):
return config.get_cloud_config_value(
'subnetid', vm_, __opts__, search_global=False
)
def securitygroupid(vm_):
return config.get_cloud_config_value(
'securitygroupid', vm_, __opts__, search_global=False
)
def get_placementgroup(vm_):
return config.get_cloud_config_value(
'placementgroup', vm_, __opts__, search_global=False
)
def get_spot_config(vm_):
return config.get_cloud_config_value(
'spot_config', vm_, __opts__, search_global=False
)
def get_provider(vm_=None):
if vm_ is None:
provider = __active_provider_name__ or 'ec2'
else:
provider = vm_.get('provider', 'ec2')
if ':' in provider:
prov_comps = provider.split(':')
provider = prov_comps[0]
return provider
def _list_availability_zones():
ret = {}
params = {'Action': 'DescribeAvailabilityZones',
'Filter.0.Name': 'region-name',
'Filter.0.Value.0': get_location()}
result = aws.query(params,
location=get_location(),
provider=get_provider(),
opts=__opts__,
sigver='4')
for zone in result:
ret[zone['zoneName']] = zone['zoneState']
return ret
def block_device_mappings(vm_):
return config.get_cloud_config_value(
'block_device_mappings', vm_, __opts__, search_global=True
)
def _request_eip(interface):
params = {'Action': 'AllocateAddress'}
params['Domain'] = interface.setdefault('domain', 'vpc')
eips = aws.query(params,
return_root=True,
location=get_location(),
provider=get_provider(),
opts=__opts__,
sigver='4')
for eip in eips:
if 'allocationId' in eip:
return eip['allocationId']
return None
def _create_eni(interface):
params = {'Action': 'DescribeSubnets'}
subnet_query = aws.query(params,
return_root=True,
location=get_location(),
provider=get_provider(),
opts=__opts__,
sigver='4')
found = False
for subnet_query_result in subnet_query:
if 'item' in subnet_query_result:
for subnet in subnet_query_result['item']:
if subnet['subnetId'] == interface['SubnetId']:
found = True
break
if not found:
raise SaltCloudConfigError(
'No such subnet <{0}>'.format(interface['SubnetId'])
)
params = {'Action': 'CreateNetworkInterface',
'SubnetId': interface['SubnetId']}
for k in ('Description', 'PrivateIpAddress',
'SecondaryPrivateIpAddressCount'):
if k in interface:
params[k] = interface[k]
for k in ('PrivateIpAddresses', 'SecurityGroupId'):
if k in interface:
params.update(_param_from_config(k, interface[k]))
result = aws.query(params,
return_root=True,
location=get_location(),
provider=get_provider(),
opts=__opts__,
sigver='4')
eni_desc = result[1]
if not eni_desc or not eni_desc.get('networkInterfaceId'):
raise SaltCloudException('Failed to create interface: {0}'.format(result))
eni_id = eni_desc.get('networkInterfaceId')
log.debug(
'Created network interface {0} inst {1}'.format(
eni_id, interface['DeviceIndex']
)
)
if interface.get('associate_eip'):
_associate_eip_with_interface(eni_id, interface.get('associate_eip'))
elif interface.get('allocate_new_eip'):
_new_eip = _request_eip(interface)
_associate_eip_with_interface(eni_id, _new_eip)
elif interface.get('allocate_new_eips'):
addr_list = _list_interface_private_addresses(eni_desc)
eip_list = []
for idx, addr in enumerate(addr_list):
eip_list.append(_request_eip(interface))
for idx, addr in enumerate(addr_list):
_associate_eip_with_interface(eni_id, eip_list[idx], addr)
return {'DeviceIndex': interface['DeviceIndex'],
'NetworkInterfaceId': eni_id}
def _list_interface_private_addresses(eni_desc):
primary = eni_desc.get('privateIpAddress')
if not primary:
return None
addresses = [primary]
lst = eni_desc.get('privateIpAddressesSet', {}).get('item', [])
if not isinstance(lst, list):
return addresses
for entry in lst:
if entry.get('primary') == 'true':
continue
if entry.get('privateIpAddress'):
addresses.append(entry.get('privateIpAddress'))
return addresses
def _associate_eip_with_interface(eni_id, eip_id, private_ip=None):
retries = 5
while retries > 0:
params = {'Action': 'AssociateAddress',
'NetworkInterfaceId': eni_id,
'AllocationId': eip_id}
if private_ip:
params['PrivateIpAddress'] = private_ip
retries = retries - 1
result = aws.query(params,
return_root=True,
location=get_location(),
provider=get_provider(),
opts=__opts__,
sigver='4')
if isinstance(result, dict) and result.get('error'):
time.sleep(1)
continue
if not result[2].get('associationId'):
break
log.debug(
'Associated ElasticIP address {0} with interface {1}'.format(
eip_id, eni_id
)
)
return result[2].get('associationId')
raise SaltCloudException(
'Could not associate elastic ip address '
'<{0}> with network interface <{1}>'.format(
eip_id, eni_id
)
)
def _update_enis(interfaces, instance):
config_enis = {}
instance_enis = []
for interface in interfaces:
if 'DeviceIndex' in interface:
if interface['DeviceIndex'] in config_enis:
log.error(
'Duplicate DeviceIndex in profile. Cannot update ENIs.'
)
return None
config_enis[str(interface['DeviceIndex'])] = interface
query_enis = instance[0]['instancesSet']['item']['networkInterfaceSet']['item']
if isinstance(query_enis, list):
for query_eni in query_enis:
instance_enis.append((query_eni['networkInterfaceId'], query_eni['attachment']))
else:
instance_enis.append((query_enis['networkInterfaceId'], query_enis['attachment']))
for eni_id, eni_data in instance_enis:
params = {'Action': 'ModifyNetworkInterfaceAttribute',
'NetworkInterfaceId': eni_id,
'Attachment.AttachmentId': eni_data['attachmentId'],
'Attachment.DeleteOnTermination': config_enis[eni_data['deviceIndex']].setdefault('delete_interface_on_terminate', True)}
set_eni_attributes = aws.query(params,
return_root=True,
location=get_location(),
provider=get_provider(),
opts=__opts__,
sigver='4')
return None
def _param_from_config(key, data):
param = {}
if isinstance(data, dict):
for k, v in data.items():
param.update(_param_from_config('{0}.{1}'.format(key, k), v))
elif isinstance(data, list) or isinstance(data, tuple):
for idx, conf_item in enumerate(data):
prefix = '{0}.{1}'.format(key, idx)
param.update(_param_from_config(prefix, conf_item))
else:
if isinstance(data, bool):
param.update({key: str(data).lower()})
else:
param.update({key: data})
return param
def request_instance(vm_=None, call=None):
if call == 'function':
raise SaltCloudSystemExit(
'The request_instance action must be called with -a or --action.'
)
location = vm_.get('location', get_location(vm_))
spot_config = get_spot_config(vm_)
if spot_config is not None:
if 'spot_price' not in spot_config:
raise SaltCloudSystemExit(
'Spot instance config for {0} requires a spot_price '
'attribute.'.format(vm_['name'])
)
params = {'Action': 'RequestSpotInstances',
'InstanceCount': '1',
'Type': spot_config['type']
if 'type' in spot_config else 'one-time',
'SpotPrice': spot_config['spot_price']}
spot_prefix = 'LaunchSpecification.'
else:
min_instance = config.get_cloud_config_value(
'min_instance', vm_, __opts__, search_global=False, default=1
)
max_instance = config.get_cloud_config_value(
'max_instance', vm_, __opts__, search_global=False, default=1
)
params = {'Action': 'RunInstances',
'MinCount': min_instance,
'MaxCount': max_instance}
spot_prefix = ''
image_id = vm_['image']
params[spot_prefix + 'ImageId'] = image_id
userdata_file = config.get_cloud_config_value(
'userdata_file', vm_, __opts__, search_global=False, default=None
)
if userdata_file is None:
userdata = config.get_cloud_config_value(
'userdata', vm_, __opts__, search_global=False, default=None
)
else:
log.trace('userdata_file: {0}'.format(userdata_file))
if os.path.exists(userdata_file):
with salt.utils.fopen(userdata_file, 'r') as fh_:
userdata = fh_.read()
if userdata is not None:
params['UserData'] = base64.b64encode(userdata)
vm_size = config.get_cloud_config_value(
'size', vm_, __opts__, search_global=False
)
params[spot_prefix + 'InstanceType'] = vm_size
ex_keyname = keyname(vm_)
if ex_keyname:
params[spot_prefix + 'KeyName'] = ex_keyname
ex_securitygroup = securitygroup(vm_)
if ex_securitygroup:
if not isinstance(ex_securitygroup, list):
params[spot_prefix + 'SecurityGroup.1'] = ex_securitygroup
else:
for counter, sg_ in enumerate(ex_securitygroup):
params[spot_prefix + 'SecurityGroup.{0}'.format(counter)] = sg_
ex_iam_profile = iam_profile(vm_)
if ex_iam_profile:
try:
if ex_iam_profile.startswith('arn:aws:iam:'):
params[
spot_prefix + 'IamInstanceProfile.Arn'
] = ex_iam_profile
else:
params[
spot_prefix + 'IamInstanceProfile.Name'
] = ex_iam_profile
except AttributeError:
raise SaltCloudConfigError(
'\'iam_profile\' should be a string value.'
)
az_ = get_availability_zone(vm_)
if az_ is not None:
params[spot_prefix + 'Placement.AvailabilityZone'] = az_
tenancy_ = get_tenancy(vm_)
if tenancy_ is not None:
if spot_config is not None:
raise SaltCloudConfigError(
'Spot instance config for {0} does not support '
'specifying tenancy.'.format(vm_['name'])
)
params['Placement.Tenancy'] = tenancy_
subnetid_ = get_subnetid(vm_)
if subnetid_ is not None:
params[spot_prefix + 'SubnetId'] = subnetid_
ex_securitygroupid = securitygroupid(vm_)
if ex_securitygroupid:
if not isinstance(ex_securitygroupid, list):
params[spot_prefix + 'SecurityGroupId.1'] = ex_securitygroupid
else:
for (counter, sg_) in enumerate(ex_securitygroupid):
params[
spot_prefix + 'SecurityGroupId.{0}'.format(counter)
] = sg_
placementgroup_ = get_placementgroup(vm_)
if placementgroup_ is not None:
params[spot_prefix + 'Placement.GroupName'] = placementgroup_
ex_blockdevicemappings = block_device_mappings(vm_)
if ex_blockdevicemappings:
params.update(_param_from_config(spot_prefix + 'BlockDeviceMapping',
ex_blockdevicemappings))
network_interfaces = config.get_cloud_config_value(
'network_interfaces',
vm_,
__opts__,
search_global=False
)
if network_interfaces:
eni_devices = []
for interface in network_interfaces:
log.debug('Create network interface: {0}'.format(interface))
_new_eni = _create_eni(interface)
eni_devices.append(_new_eni)
params.update(_param_from_config(spot_prefix + 'NetworkInterface',
eni_devices))
set_ebs_optimized = config.get_cloud_config_value(
'ebs_optimized', vm_, __opts__, search_global=False
)
if set_ebs_optimized is not None:
if not isinstance(set_ebs_optimized, bool):
raise SaltCloudConfigError(
'\'ebs_optimized\' should be a boolean value.'
)
params[spot_prefix + 'EbsOptimized'] = set_ebs_optimized
set_del_root_vol_on_destroy = config.get_cloud_config_value(
'del_root_vol_on_destroy', vm_, __opts__, search_global=False
)
if set_del_root_vol_on_destroy is not None:
if not isinstance(set_del_root_vol_on_destroy, bool):
raise SaltCloudConfigError(
'\'del_root_vol_on_destroy\' should be a boolean value.'
)
vm_['set_del_root_vol_on_destroy'] = set_del_root_vol_on_destroy
if set_del_root_vol_on_destroy:
log.info('Attempting to look up root device name for image id {0} on '
'VM {1}'.format(image_id, vm_['name']))
rd_params = {
'Action': 'DescribeImages',
'ImageId.1': image_id
}
try:
rd_data = aws.query(rd_params,
return_root=True,
location=get_location(),
provider=get_provider(),
opts=__opts__,
sigver='4')
if 'error' in rd_data:
return rd_data['error']
log.debug('EC2 Response: {0!r}'.format(rd_data))
except Exception as exc:
log.error(
'Error getting root device name for image id {0} for '
'VM {1}: \n{2}'.format(image_id, vm_['name'], exc),
exc_info_on_loglevel=logging.DEBUG
)
raise
if not rd_data:
err_msg = 'There was an error querying EC2 for the root device ' \
'of image id {0}. Empty response.'.format(image_id)
raise SaltCloudSystemExit(err_msg)
rd_name = None
if 'blockDeviceMapping' in rd_data[0]:
if rd_data[0]['blockDeviceMapping'] is None:
rd_name = None
elif isinstance(rd_data[0]['blockDeviceMapping']['item'], list):
rd_name = rd_data[0]['blockDeviceMapping']['item'][0]['deviceName']
else:
rd_name = rd_data[0]['blockDeviceMapping']['item']['deviceName']
log.info('Found root device name: {0}'.format(rd_name))
if rd_name is not None:
if ex_blockdevicemappings:
dev_list = [
dev['DeviceName'] for dev in ex_blockdevicemappings
]
else:
dev_list = []
if rd_name in dev_list:
dev_index = dev_list.index(rd_name)
termination_key = '{0}BlockDeviceMapping.{1}.Ebs.DeleteOnTermination'.format(spot_prefix, dev_index)
params[termination_key] = str(set_del_root_vol_on_destroy).lower()
else:
dev_index = len(dev_list)
params[
'{0}BlockDeviceMapping.{1}.DeviceName'.format(
spot_prefix, dev_index
)
] = rd_name
params[
'{0}BlockDeviceMapping.{1}.Ebs.DeleteOnTermination'.format(
spot_prefix, dev_index
)
] = str(set_del_root_vol_on_destroy).lower()
set_del_all_vols_on_destroy = config.get_cloud_config_value(
'del_all_vols_on_destroy', vm_, __opts__, search_global=False, default=False
)
if set_del_all_vols_on_destroy is not None:
if not isinstance(set_del_all_vols_on_destroy, bool):
raise SaltCloudConfigError(
'\'del_all_vols_on_destroy\' should be a boolean value.'
)
salt.utils.cloud.fire_event(
'event',
'requesting instance',
'salt/cloud/{0}/requesting'.format(vm_['name']),
{'kwargs': params, 'location': location},
transport=__opts__['transport']
)
provider = get_provider(vm_)
try:
data = aws.query(params,
'instancesSet',
location=location,
provider=provider,
opts=__opts__,
sigver='4')
if 'error' in data:
return data['error']
except Exception as exc:
log.error(
'Error creating {0} on EC2 when trying to run the initial '
'deployment: \n{1}'.format(
vm_['name'], exc
),
exc_info_on_loglevel=logging.DEBUG
)
raise
# to become active before we continue
if spot_config:
sir_id = data[0]['spotInstanceRequestId']
def __query_spot_instance_request(sir_id, location):
params = {'Action': 'DescribeSpotInstanceRequests',
'SpotInstanceRequestId.1': sir_id}
data = aws.query(params,
location=location,
provider=provider,
opts=__opts__,
sigver='4')
if not data:
log.error(
'There was an error while querying EC2. Empty response'
)
# Trigger a failure in the wait for spot instance method
return False
if isinstance(data, dict) and 'error' in data:
log.warn(
'There was an error in the query. {0}'
.format(data['error'])
)
# Trigger a failure in the wait for spot instance method
return False
log.debug('Returned query data: {0}'.format(data))
if 'state' in data[0]:
state = data[0]['state']
if state == 'active':
return data
if state == 'open':
# Still waiting for an active state
log.info('Spot instance status: {0}'.format(
data[0]['status']['message']
))
return None
if state in ['cancelled', 'failed', 'closed']:
# Request will never be active, fail
log.error('Spot instance request resulted in state \'{0}\'. '
'Nothing else we can do here.')
return False
salt.utils.cloud.fire_event(
'event',
'waiting for spot instance',
'salt/cloud/{0}/waiting_for_spot'.format(vm_['name']),
transport=__opts__['transport']
)
try:
data = _wait_for_spot_instance(
__query_spot_instance_request,
update_args=(sir_id, location),
timeout=config.get_cloud_config_value(
'wait_for_spot_timeout', vm_, __opts__, default=10 * 60),
interval=config.get_cloud_config_value(
'wait_for_spot_interval', vm_, __opts__, default=30),
interval_multiplier=config.get_cloud_config_value(
'wait_for_spot_interval_multiplier',
vm_,
__opts__,
default=1),
max_failures=config.get_cloud_config_value(
'wait_for_spot_max_failures',
vm_,
__opts__,
default=10),
)
log.debug('wait_for_spot_instance data {0}'.format(data))
except (SaltCloudExecutionTimeout, SaltCloudExecutionFailure) as exc:
try:
# Cancel the existing spot instance request
params = {'Action': 'CancelSpotInstanceRequests',
'SpotInstanceRequestId.1': sir_id}
data = aws.query(params,
location=location,
provider=provider,
opts=__opts__,
sigver='4')
log.debug('Canceled spot instance request {0}. Data '
'returned: {1}'.format(sir_id, data))
except SaltCloudSystemExit:
pass
finally:
raise SaltCloudSystemExit(str(exc))
return data, vm_
def query_instance(vm_=None, call=None):
if call == 'function':
# Technically this function may be called other ways too, but it
# definitely cannot be called with --function.
raise SaltCloudSystemExit(
'The query_instance action must be called with -a or --action.'
)
instance_id = vm_['instance_id']
location = vm_.get('location', get_location(vm_))
salt.utils.cloud.fire_event(
'event',
'querying instance',
'salt/cloud/{0}/querying'.format(vm_['name']),
{'instance_id': instance_id},
transport=__opts__['transport']
)
log.debug('The new VM instance_id is {0}'.format(instance_id))
params = {'Action': 'DescribeInstances',
'InstanceId.1': instance_id}
provider = get_provider(vm_)
attempts = 5
while attempts > 0:
data, requesturl = aws.query(params, # pylint: disable=W0632
location=location,
provider=provider,
opts=__opts__,
return_url=True,
sigver='4')
log.debug('The query returned: {0}'.format(data))
if isinstance(data, dict) and 'error' in data:
log.warn(
'There was an error in the query. {0} attempts '
'remaining: {1}'.format(
attempts, data['error']
)
)
attempts -= 1
# Just a little delay between attempts...
time.sleep(1)
continue
if isinstance(data, list) and not data:
log.warn(
'Query returned an empty list. {0} attempts '
'remaining.'.format(attempts)
)
attempts -= 1
# Just a little delay between attempts...
time.sleep(1)
continue
break
else:
raise SaltCloudSystemExit(
'An error occurred while creating VM: {0}'.format(data['error'])
)
def __query_ip_address(params, url):
data = aws.query(params,
#requesturl=url,
location=location,
provider=provider,
opts=__opts__,
sigver='4')
if not data:
log.error(
'There was an error while querying EC2. Empty response'
)
# Trigger a failure in the wait for IP function
return False
if isinstance(data, dict) and 'error' in data:
log.warn(
'There was an error in the query. {0}'.format(data['error'])
)
# Trigger a failure in the wait for IP function
return False
log.debug('Returned query data: {0}'.format(data))
if 'ipAddress' in data[0]['instancesSet']['item']:
return data
if ssh_interface(vm_) == 'private_ips' and \
'privateIpAddress' in data[0]['instancesSet']['item']:
return data
try:
data = salt.utils.cloud.wait_for_ip(
__query_ip_address,
update_args=(params, requesturl),
timeout=config.get_cloud_config_value(
'wait_for_ip_timeout', vm_, __opts__, default=10 * 60),
interval=config.get_cloud_config_value(
'wait_for_ip_interval', vm_, __opts__, default=10),
interval_multiplier=config.get_cloud_config_value(
'wait_for_ip_interval_multiplier', vm_, __opts__, default=1),
)
except (SaltCloudExecutionTimeout, SaltCloudExecutionFailure) as exc:
try:
# It might be already up, let's destroy it!
destroy(vm_['name'])
except SaltCloudSystemExit:
pass
finally:
raise SaltCloudSystemExit(str(exc))
if 'reactor' in vm_ and vm_['reactor'] is True:
salt.utils.cloud.fire_event(
'event',
'instance queried',
'salt/cloud/{0}/query_reactor'.format(vm_['name']),
{'data': data},
transport=__opts__['transport']
)
return data
def wait_for_instance(
vm_=None,
data=None,
ip_address=None,
display_ssh_output=True,
call=None,
):
if call == 'function':
raise SaltCloudSystemExit(
'The wait_for_instance action must be called with -a or --action.'
)
if vm_ is None:
vm_ = {}
if data is None:
data = {}
ssh_gateway_config = vm_.get(
'ssh_gateway_config', get_ssh_gateway_config(vm_)
)
salt.utils.cloud.fire_event(
'event',
'waiting for ssh',
'salt/cloud/{0}/waiting_for_ssh'.format(vm_['name']),
{'ip_address': ip_address},
transport=__opts__['transport']
)
ssh_connect_timeout = config.get_cloud_config_value(
'ssh_connect_timeout', vm_, __opts__, 900
)
if config.get_cloud_config_value('win_installer', vm_, __opts__):
username = config.get_cloud_config_value(
'win_username', vm_, __opts__, default='Administrator'
)
win_passwd = config.get_cloud_config_value(
'win_password', vm_, __opts__, default=''
)
if win_passwd and win_passwd == 'auto':
log.debug('Waiting for auto-generated Windows EC2 password')
while True:
password_data = get_password_data(
name=vm_['name'],
kwargs={
'key_file': vm_['private_key'],
},
call='action',
)
log.debug(password_data)
win_passwd = password_data.get('password', None)
if win_passwd is None:
time.sleep(60)
else:
break
if not salt.utils.cloud.wait_for_port(ip_address,
port=445,
timeout=ssh_connect_timeout):
raise SaltCloudSystemExit(
'Failed to connect to remote windows host'
)
if not salt.utils.cloud.validate_windows_cred(ip_address,
username,
win_passwd):
raise SaltCloudSystemExit(
'Failed to authenticate against remote windows host'
)
elif salt.utils.cloud.wait_for_port(ip_address,
timeout=ssh_connect_timeout,
gateway=ssh_gateway_config
):
known_hosts_file = config.get_cloud_config_value(
'known_hosts_file', vm_, __opts__, default=None
)
if known_hosts_file:
console = {}
while 'output_decoded' not in console:
console = get_console_output(
instance_id=vm_['instance_id'],
call='action',
)
pprint.pprint(console)
time.sleep(5)
output = console['output_decoded']
comps = output.split('-----BEGIN SSH HOST KEY KEYS-----')
if len(comps) < 2:
return False
comps = comps[1].split('-----END SSH HOST KEY KEYS-----')
keys = ''
for line in comps[0].splitlines():
if not line:
continue
keys += '\n{0} {1}'.format(ip_address, line)
with salt.utils.fopen(known_hosts_file, 'a') as fp_:
fp_.write(keys)
fp_.close()
for user in vm_['usernames']:
if salt.utils.cloud.wait_for_passwd(
host=ip_address,
username=user,
ssh_timeout=config.get_cloud_config_value(
'wait_for_passwd_timeout', vm_, __opts__, default=1 * 60
),
key_filename=vm_['key_filename'],
display_ssh_output=display_ssh_output,
gateway=ssh_gateway_config,
maxtries=config.get_cloud_config_value(
'wait_for_passwd_maxtries', vm_, __opts__, default=15
),
known_hosts_file=config.get_cloud_config_value(
'known_hosts_file', vm_, __opts__,
default='/dev/null'
),
):
__opts__['ssh_username'] = user
vm_['ssh_username'] = user
break
else:
raise SaltCloudSystemExit(
'Failed to authenticate against remote ssh'
)
else:
raise SaltCloudSystemExit(
'Failed to connect to remote ssh'
)
if 'reactor' in vm_ and vm_['reactor'] is True:
salt.utils.cloud.fire_event(
'event',
'ssh is available',
'salt/cloud/{0}/ssh_ready_reactor'.format(vm_['name']),
{'ip_address': ip_address},
transport=__opts__['transport']
)
return vm_
def create(vm_=None, call=None):
if call:
raise SaltCloudSystemExit(
'You cannot create an instance with -a or -f.'
)
salt.utils.cloud.fire_event(
'event',
'starting create',
'salt/cloud/{0}/creating'.format(vm_['name']),
{
'name': vm_['name'],
'profile': vm_['profile'],
'provider': vm_['provider'],
},
transport=__opts__['transport']
)
salt.utils.cloud.cachedir_index_add(
vm_['name'], vm_['profile'], 'ec2', vm_['provider']
)
key_filename = config.get_cloud_config_value(
'private_key', vm_, __opts__, search_global=False, default=None
)
if key_filename is not None and not os.path.isfile(key_filename):
raise SaltCloudConfigError(
'The defined key_filename {0!r} does not exist'.format(
key_filename
)
)
vm_['key_filename'] = key_filename
# and not be able to access it via the gateway.
ssh_gateway_config = get_ssh_gateway_config(vm_)
vm_['ssh_gateway_config'] = ssh_gateway_config
location = get_location(vm_)
vm_['location'] = location
log.info('Creating Cloud VM {0} in {1}'.format(vm_['name'], location))
vm_['usernames'] = salt.utils.cloud.ssh_usernames(
vm_,
__opts__,
default_users=(
'ec2-user', 'ubuntu', 'fedora', 'admin', 'bitnami', 'root'
)
)
if 'instance_id' in vm_:
# This was probably created via another process, and doesn't have
if 'pub_key' not in vm_ and 'priv_key' not in vm_:
log.debug('Generating minion keys for {0[name]!r}'.format(vm_))
vm_['priv_key'], vm_['pub_key'] = salt.utils.cloud.gen_keys(
salt.config.get_cloud_config_value(
'keysize',
vm_,
__opts__
)
)
else:
# Put together all of the information required to request the instance,
# and then fire off the request for it
data, vm_ = request_instance(vm_, location)
# If data is a str, it's an error
if isinstance(data, str):
log.error('Error requesting instance: {0}'.format(data))
return {}
vm_['instance_id_list'] = []
for instance in data:
vm_['instance_id_list'].append(instance['instanceId'])
vm_['instance_id'] = vm_['instance_id_list'].pop()
if len(vm_['instance_id_list']) > 0:
queue_instances(vm_['instance_id_list'])
data = query_instance(vm_)
tags = config.get_cloud_config_value('tag',
vm_,
__opts__,
{},
search_global=False)
if not isinstance(tags, dict):
raise SaltCloudConfigError(
'\'tag\' should be a dict.'
)
for value in six.itervalues(tags):
if not isinstance(value, str):
raise SaltCloudConfigError(
'\'tag\' values must be strings. Try quoting the values. '
'e.g. "2013-09-19T20:09:46Z".'
)
tags['Name'] = vm_['name']
salt.utils.cloud.fire_event(
'event',
'setting tags',
'salt/cloud/{0}/tagging'.format(vm_['name']),
{'tags': tags},
transport=__opts__['transport']
)
set_tags(
vm_['name'],
tags,
instance_id=vm_['instance_id'],
call='action',
location=location
)
network_interfaces = config.get_cloud_config_value(
'network_interfaces',
vm_,
__opts__,
search_global=False
)
if network_interfaces:
_update_enis(network_interfaces, data)
log.info('Created node {0}'.format(vm_['name']))
instance = data[0]['instancesSet']['item']
if ssh_interface(vm_) == 'private_ips':
ip_address = instance['privateIpAddress']
log.info('Salt node data. Private_ip: {0}'.format(ip_address))
else:
ip_address = instance['ipAddress']
log.info('Salt node data. Public_ip: {0}'.format(ip_address))
vm_['ssh_host'] = ip_address
if get_salt_interface(vm_) == 'private_ips':
salt_ip_address = instance['privateIpAddress']
log.info('Salt interface set to: {0}'.format(salt_ip_address))
else:
salt_ip_address = instance['ipAddress']
log.debug('Salt interface set to: {0}'.format(salt_ip_address))
vm_['salt_host'] = salt_ip_address
display_ssh_output = config.get_cloud_config_value(
'display_ssh_output', vm_, __opts__, default=True
)
vm_ = wait_for_instance(
vm_, data, ip_address, display_ssh_output
)
ret = instance.copy()
# Get ANY defined volumes settings, merging data, in the following order
# 1. VM config
# 2. Profile config
# 3. Global configuration
volumes = config.get_cloud_config_value(
'volumes', vm_, __opts__, search_global=True
)
if volumes:
salt.utils.cloud.fire_event(
'event',
'attaching volumes',
'salt/cloud/{0}/attaching_volumes'.format(vm_['name']),
{'volumes': volumes},
transport=__opts__['transport']
)
log.info('Create and attach volumes to node {0}'.format(vm_['name']))
created = create_attach_volumes(
vm_['name'],
{
'volumes': volumes,
'zone': ret['placement']['availabilityZone'],
'instance_id': ret['instanceId'],
'del_all_vols_on_destroy': vm_.get('set_del_all_vols_on_destroy', False)
},
call='action'
)
ret['Attached Volumes'] = created
for key, value in salt.utils.cloud.bootstrap(vm_, __opts__).items():
ret.setdefault(key, value)
log.info('Created Cloud VM {0[name]!r}'.format(vm_))
log.debug(
'{0[name]!r} VM creation details:\n{1}'.format(
vm_, pprint.pformat(instance)
)
)
event_data = {
'name': vm_['name'],
'profile': vm_['profile'],
'provider': vm_['provider'],
'instance_id': vm_['instance_id'],
}
if volumes:
event_data['volumes'] = volumes
salt.utils.cloud.fire_event(
'event',
'created instance',
'salt/cloud/{0}/created'.format(vm_['name']),
event_data,
transport=__opts__['transport']
)
return ret
def queue_instances(instances):
for instance_id in instances:
node = _get_node(instance_id=instance_id)
salt.utils.cloud.cache_node(node, __active_provider_name__, __opts__)
def create_attach_volumes(name, kwargs, call=None, wait_to_finish=True):
if call != 'action':
raise SaltCloudSystemExit(
'The create_attach_volumes action must be called with '
'-a or --action.'
)
if 'instance_id' not in kwargs:
kwargs['instance_id'] = _get_node(name)[name]['instanceId']
if isinstance(kwargs['volumes'], str):
volumes = yaml.safe_load(kwargs['volumes'])
else:
volumes = kwargs['volumes']
ret = []
for volume in volumes:
created = False
volume_name = '{0} on {1}'.format(volume['device'], name)
volume_dict = {
'volume_name': volume_name,
'zone': kwargs['zone']
}
if 'volume_id' in volume:
volume_dict['volume_id'] = volume['volume_id']
elif 'snapshot' in volume:
volume_dict['snapshot'] = volume['snapshot']
else:
volume_dict['size'] = volume['size']
if 'type' in volume:
volume_dict['type'] = volume['type']
if 'iops' in volume:
volume_dict['iops'] = volume['iops']
if 'encrypted' in volume:
volume_dict['encrypted'] = volume['encrypted']
if 'volume_id' not in volume_dict:
created_volume = create_volume(volume_dict, call='function', wait_to_finish=wait_to_finish)
created = True
for item in created_volume:
if 'volumeId' in item:
volume_dict['volume_id'] = item['volumeId']
attach = attach_volume(
name,
{'volume_id': volume_dict['volume_id'],
'device': volume['device']},
instance_id=kwargs['instance_id'],
call='action'
)
# Update the delvol parameter for this volume
delvols_on_destroy = kwargs.get('del_all_vols_on_destroy', None)
if attach and created and delvols_on_destroy is not None:
_toggle_delvol(instance_id=kwargs['instance_id'],
device=volume['device'],
value=delvols_on_destroy)
if attach:
msg = (
'{0} attached to {1} (aka {2}) as device {3}'.format(
volume_dict['volume_id'],
kwargs['instance_id'],
name,
volume['device']
)
)
log.info(msg)
ret.append(msg)
return ret
def stop(name, call=None):
if call != 'action':
raise SaltCloudSystemExit(
'The stop action must be called with -a or --action.'
)
log.info('Stopping node {0}'.format(name))
instance_id = _get_node(name)[name]['instanceId']
params = {'Action': 'StopInstances',
'InstanceId.1': instance_id}
result = aws.query(params,
location=get_location(),
provider=get_provider(),
opts=__opts__,
sigver='4')
return result
def start(name, call=None):
if call != 'action':
raise SaltCloudSystemExit(
'The start action must be called with -a or --action.'
)
log.info('Starting node {0}'.format(name))
instance_id = _get_node(name)[name]['instanceId']
params = {'Action': 'StartInstances',
'InstanceId.1': instance_id}
result = aws.query(params,
location=get_location(),
provider=get_provider(),
opts=__opts__,
sigver='4')
return result
def set_tags(name=None,
tags=None,
call=None,
location=None,
instance_id=None,
resource_id=None,
kwargs=None): # pylint: disable=W0613
if kwargs is None:
kwargs = {}
if instance_id is None:
if 'resource_id' in kwargs:
resource_id = kwargs['resource_id']
del kwargs['resource_id']
if 'instance_id' in kwargs:
instance_id = kwargs['instance_id']
del kwargs['instance_id']
if resource_id is None:
if instance_id is None:
instance_id = _get_node(name, location)[name]['instanceId']
else:
instance_id = resource_id
# This second check is a safety, in case the above still failed to produce
# a usable ID
if instance_id is None:
return {
'Error': 'A valid instance_id or resource_id was not specified.'
}
params = {'Action': 'CreateTags',
'ResourceId.1': instance_id}
log.debug('Tags to set for {0}: {1}'.format(name, tags))
if kwargs and not tags:
tags = kwargs
for idx, (tag_k, tag_v) in enumerate(six.iteritems(tags)):
params['Tag.{0}.Key'.format(idx)] = tag_k
params['Tag.{0}.Value'.format(idx)] = tag_v
attempts = 5
while attempts >= 0:
result = aws.query(params,
setname='tagSet',
location=get_location(),
provider=get_provider(),
opts=__opts__,
sigver='4')
settags = get_tags(
instance_id=instance_id, call='action', location=location
)
log.debug('Setting the tags returned: {0}'.format(settags))
failed_to_set_tags = False
for tag in settags:
if tag['key'] not in tags:
# We were not setting this tag
continue
if str(tags.get(tag['key'])) != str(tag['value']):
# Not set to the proper value!?
failed_to_set_tags = True
break
if failed_to_set_tags:
log.warn(
'Failed to set tags. Remaining attempts {0}'.format(
attempts
)
)
attempts -= 1
# Just a little delay between attempts...
time.sleep(1)
continue
return settags
raise SaltCloudSystemExit(
'Failed to set tags on {0}!'.format(name)
)
def get_tags(name=None,
instance_id=None,
call=None,
location=None,
kwargs=None,
resource_id=None): # pylint: disable=W0613
if location is None:
location = get_location()
if instance_id is None:
if resource_id is None:
if name:
instances = list_nodes_full(location)
if name in instances:
instance_id = instances[name]['instanceId']
elif 'instance_id' in kwargs:
instance_id = kwargs['instance_id']
elif 'resource_id' in kwargs:
instance_id = kwargs['resource_id']
else:
instance_id = resource_id
params = {'Action': 'DescribeTags',
'Filter.1.Name': 'resource-id',
'Filter.1.Value': instance_id}
return aws.query(params,
setname='tagSet',
location=location,
provider=get_provider(),
opts=__opts__,
sigver='4')
def del_tags(name=None,
kwargs=None,
call=None,
instance_id=None,
resource_id=None): # pylint: disable=W0613
if kwargs is None:
kwargs = {}
if 'tags' not in kwargs:
raise SaltCloudSystemExit(
'A tag or tags must be specified using tags=list,of,tags'
)
if not name and 'resource_id' in kwargs:
instance_id = kwargs['resource_id']
del kwargs['resource_id']
if not instance_id:
instance_id = _get_node(name)[name]['instanceId']
params = {'Action': 'DeleteTags',
'ResourceId.1': instance_id}
for idx, tag in enumerate(kwargs['tags'].split(',')):
params['Tag.{0}.Key'.format(idx)] = tag
aws.query(params,
setname='tagSet',
location=get_location(),
provider=get_provider(),
opts=__opts__,
sigver='4')
if resource_id:
return get_tags(resource_id=resource_id)
else:
return get_tags(instance_id=instance_id)
def rename(name, kwargs, call=None):
if call != 'action':
raise SaltCloudSystemExit(
'The rename action must be called with -a or --action.'
)
log.info('Renaming {0} to {1}'.format(name, kwargs['newname']))
set_tags(name, {'Name': kwargs['newname']}, call='action')
salt.utils.cloud.rename_key(
__opts__['pki_dir'], name, kwargs['newname']
)
def destroy(name, call=None):
if call == 'function':
raise SaltCloudSystemExit(
'The destroy action must be called with -d, --destroy, '
'-a or --action.'
)
node_metadata = _get_node(name)
instance_id = node_metadata[name]['instanceId']
sir_id = node_metadata.get('spotInstanceRequestId')
protected = show_term_protect(
name=name,
instance_id=instance_id,
call='action',
quiet=True
)
salt.utils.cloud.fire_event(
'event',
'destroying instance',
'salt/cloud/{0}/destroying'.format(name),
{'name': name, 'instance_id': instance_id},
transport=__opts__['transport']
)
if protected == 'true':
raise SaltCloudSystemExit(
'This instance has been protected from being destroyed. '
'Use the following command to disable protection:\n\n'
'salt-cloud -a disable_term_protect {0}'.format(
name
)
)
ret = {}
if config.get_cloud_config_value('rename_on_destroy',
get_configured_provider(),
__opts__,
search_global=False) is True:
newname = '{0}-DEL{1}'.format(name, uuid.uuid4().hex)
rename(name, kwargs={'newname': newname}, call='action')
log.info(
'Machine will be identified as {0} until it has been '
'cleaned up.'.format(
newname
)
)
ret['newname'] = newname
params = {'Action': 'TerminateInstances',
'InstanceId.1': instance_id}
location = get_location()
provider = get_provider()
result = aws.query(params,
location=location,
provider=provider,
opts=__opts__,
sigver='4')
log.info(result)
ret.update(result[0])
# If this instance is part of a spot instance request, we
# need to cancel it as well
if sir_id is not None:
params = {'Action': 'CancelSpotInstanceRequests',
'SpotInstanceRequestId.1': sir_id}
result = aws.query(params,
location=location,
provider=provider,
opts=__opts__,
sigver='4')
ret['spotInstance'] = result[0]
salt.utils.cloud.fire_event(
'event',
'destroyed instance',
'salt/cloud/{0}/destroyed'.format(name),
{'name': name, 'instance_id': instance_id},
transport=__opts__['transport']
)
salt.utils.cloud.cachedir_index_del(name)
if __opts__.get('update_cachedir', False) is True:
salt.utils.cloud.delete_minion_cachedir(name, __active_provider_name__.split(':')[0], __opts__)
return ret
def reboot(name, call=None):
instance_id = _get_node(name)[name]['instanceId']
params = {'Action': 'RebootInstances',
'InstanceId.1': instance_id}
result = aws.query(params,
setname='tagSet',
location=get_location(),
provider=get_provider(),
opts=__opts__,
sigver='4')
if result == []:
log.info('Complete')
return {'Reboot': 'Complete'}
def show_image(kwargs, call=None):
if call != 'function':
raise SaltCloudSystemExit(
'The show_image action must be called with -f or --function.'
)
params = {'ImageId.1': kwargs['image'],
'Action': 'DescribeImages'}
result = aws.query(params,
setname='tagSet',
location=get_location(),
provider=get_provider(),
opts=__opts__,
sigver='4')
log.info(result)
return result
def show_instance(name=None, instance_id=None, call=None, kwargs=None):
if not name and call == 'action':
raise SaltCloudSystemExit(
'The show_instance action requires a name.'
)
if call == 'function':
name = kwargs.get('name', None)
instance_id = kwargs.get('instance_id', None)
if not name and not instance_id:
raise SaltCloudSystemExit(
'The show_instance function requires '
'either a name or an instance_id'
)
node = _get_node(name=name, instance_id=instance_id)
salt.utils.cloud.cache_node(node, __active_provider_name__, __opts__)
return node
def _get_node(name=None, instance_id=None, location=None):
if location is None:
location = get_location()
params = {'Action': 'DescribeInstances'}
if str(name).startswith('i-') and len(name) == 10:
instance_id = name
if instance_id:
params['InstanceId.1'] = instance_id
else:
params['Filter.1.Name'] = 'tag:Name'
params['Filter.1.Value.1'] = name
log.trace(params)
provider = get_provider()
attempts = 10
while attempts >= 0:
try:
instances = aws.query(params,
location=location,
provider=provider,
opts=__opts__,
sigver='4')
return _extract_instance_info(instances)
except KeyError:
attempts -= 1
log.debug(
'Failed to get the data for the node {0!r}. Remaining '
'attempts {1}'.format(
name, attempts
)
)
# Just a little delay between attempts...
time.sleep(0.5)
return {}
def list_nodes_full(location=None, call=None):
if call == 'action':
raise SaltCloudSystemExit(
'The list_nodes_full function must be called with -f '
'or --function.'
)
if not location:
ret = {}
locations = set(
get_location(vm_) for vm_ in six.itervalues(__opts__['profiles'])
if _vm_provider_driver(vm_)
)
for loc in locations:
ret.update(_list_nodes_full(loc))
return ret
return _list_nodes_full(location)
def _vm_provider_driver(vm_):
alias, driver = vm_['provider'].split(':')
if alias not in __opts__['providers']:
return None
if driver not in __opts__['providers'][alias]:
return None
return driver == 'ec2'
def _extract_name_tag(item):
if 'tagSet' in item:
tagset = item['tagSet']
if isinstance(tagset['item'], list):
for tag in tagset['item']:
if tag['key'] == 'Name':
return tag['value']
return item['instanceId']
return item['tagSet']['item']['value']
return item['instanceId']
def _extract_instance_info(instances):
ret = {}
for instance in instances:
# items could be type dict or list (for stopped EC2 instances)
if isinstance(instance['instancesSet']['item'], list):
for item in instance['instancesSet']['item']:
name = _extract_name_tag(item)
ret[name] = item
ret[name].update(
dict(
id=item['instanceId'],
image=item['imageId'],
size=item['instanceType'],
state=item['instanceState']['name'],
private_ips=item.get('privateIpAddress', []),
public_ips=item.get('ipAddress', [])
)
)
else:
item = instance['instancesSet']['item']
name = _extract_name_tag(item)
ret[name] = item
ret[name].update(
dict(
id=item['instanceId'],
image=item['imageId'],
size=item['instanceType'],
state=item['instanceState']['name'],
private_ips=item.get('privateIpAddress', []),
public_ips=item.get('ipAddress', [])
)
)
return ret
def _list_nodes_full(location=None):
provider = __active_provider_name__ or 'ec2'
if ':' in provider:
comps = provider.split(':')
provider = comps[0]
params = {'Action': 'DescribeInstances'}
instances = aws.query(params,
location=location,
provider=provider,
opts=__opts__,
sigver='4')
if 'error' in instances:
raise SaltCloudSystemExit(
'An error occurred while listing nodes: {0}'.format(
instances['error']['Errors']['Error']['Message']
)
)
ret = _extract_instance_info(instances)
salt.utils.cloud.cache_node_list(ret, provider, __opts__)
return ret
def list_nodes_min(location=None, call=None):
if call == 'action':
raise SaltCloudSystemExit(
'The list_nodes_min function must be called with -f or --function.'
)
ret = {}
params = {'Action': 'DescribeInstances'}
instances = aws.query(params,
location=get_location(),
provider=get_provider(),
opts=__opts__,
sigver='4')
if 'error' in instances:
raise SaltCloudSystemExit(
'An error occurred while listing nodes: {0}'.format(
instances['error']['Errors']['Error']['Message']
)
)
for instance in instances:
if isinstance(instance['instancesSet']['item'], list):
for item in instance['instancesSet']['item']:
state = item['instanceState']['name']
name = _extract_name_tag(item)
else:
item = instance['instancesSet']['item']
state = item['instanceState']['name']
name = _extract_name_tag(item)
ret[name] = {'state': state}
return ret
def list_nodes(call=None):
if call == 'action':
raise SaltCloudSystemExit(
'The list_nodes function must be called with -f or --function.'
)
ret = {}
nodes = list_nodes_full(get_location())
if 'error' in nodes:
raise SaltCloudSystemExit(
'An error occurred while listing nodes: {0}'.format(
nodes['error']['Errors']['Error']['Message']
)
)
for node in nodes:
ret[node] = {
'id': nodes[node]['id'],
'image': nodes[node]['image'],
'size': nodes[node]['size'],
'state': nodes[node]['state'],
'private_ips': nodes[node]['private_ips'],
'public_ips': nodes[node]['public_ips'],
}
return ret
def list_nodes_select(call=None):
return salt.utils.cloud.list_nodes_select(
list_nodes_full(get_location()), __opts__['query.selection'], call,
)
def show_term_protect(name=None, instance_id=None, call=None, quiet=False):
if call != 'action':
raise SaltCloudSystemExit(
'The show_term_protect action must be called with -a or --action.'
)
if not instance_id:
instances = list_nodes_full(get_location())
instance_id = instances[name]['instanceId']
params = {'Action': 'DescribeInstanceAttribute',
'InstanceId': instance_id,
'Attribute': 'disableApiTermination'}
result = aws.query(params,
location=get_location(),
provider=get_provider(),
return_root=True,
opts=__opts__,
sigver='4')
disable_protect = False
for item in result:
if 'value' in item:
disable_protect = item['value']
break
log.log(
logging.DEBUG if quiet is True else logging.INFO,
'Termination Protection is {0} for {1}'.format(
disable_protect == 'true' and 'enabled' or 'disabled',
name
)
)
return disable_protect
def enable_term_protect(name, call=None):
if call != 'action':
raise SaltCloudSystemExit(
'The enable_term_protect action must be called with '
'-a or --action.'
)
return _toggle_term_protect(name, 'true')
def disable_term_protect(name, call=None):
if call != 'action':
raise SaltCloudSystemExit(
'The disable_term_protect action must be called with '
'-a or --action.'
)
return _toggle_term_protect(name, 'false')
def _toggle_term_protect(name, value):
instances = list_nodes_full(get_location())
instance_id = instances[name]['instanceId']
params = {'Action': 'ModifyInstanceAttribute',
'InstanceId': instance_id,
'DisableApiTermination.Value': value}
result = aws.query(params,
location=get_location(),
provider=get_provider(),
return_root=True,
opts=__opts__,
sigver='4')
return show_term_protect(name=name, instance_id=instance_id, call='action')
def show_delvol_on_destroy(name, kwargs=None, call=None):
if call != 'action':
raise SaltCloudSystemExit(
'The show_delvol_on_destroy action must be called '
'with -a or --action.'
)
if not kwargs:
kwargs = {}
instance_id = kwargs.get('instance_id', None)
device = kwargs.get('device', None)
volume_id = kwargs.get('volume_id', None)
if instance_id is None:
instances = list_nodes_full()
instance_id = instances[name]['instanceId']
params = {'Action': 'DescribeInstances',
'InstanceId.1': instance_id}
data = aws.query(params,
location=get_location(),
provider=get_provider(),
opts=__opts__,
sigver='4')
blockmap = data[0]['instancesSet']['item']['blockDeviceMapping']
if not isinstance(blockmap['item'], list):
blockmap['item'] = [blockmap['item']]
items = []
for idx, item in enumerate(blockmap['item']):
device_name = item['deviceName']
if device is not None and device != device_name:
continue
if volume_id is not None and volume_id != item['ebs']['volumeId']:
continue
info = {
'device_name': device_name,
'volume_id': item['ebs']['volumeId'],
'deleteOnTermination': item['ebs']['deleteOnTermination']
}
items.append(info)
return items
def keepvol_on_destroy(name, kwargs=None, call=None):
if call != 'action':
raise SaltCloudSystemExit(
'The keepvol_on_destroy action must be called with -a or --action.'
)
if not kwargs:
kwargs = {}
device = kwargs.get('device', None)
volume_id = kwargs.get('volume_id', None)
return _toggle_delvol(name=name, device=device,
volume_id=volume_id, value='false')
def delvol_on_destroy(name, kwargs=None, call=None):
if call != 'action':
raise SaltCloudSystemExit(
'The delvol_on_destroy action must be called with -a or --action.'
)
if not kwargs:
kwargs = {}
device = kwargs.get('device', None)
volume_id = kwargs.get('volume_id', None)
return _toggle_delvol(name=name, device=device,
volume_id=volume_id, value='true')
def _toggle_delvol(name=None, instance_id=None, device=None, volume_id=None,
value=None, requesturl=None):
if not instance_id:
instances = list_nodes_full(get_location())
instance_id = instances[name]['instanceId']
if requesturl:
data = aws.query(requesturl=requesturl,
location=get_location(),
provider=get_provider(),
opts=__opts__,
sigver='4')
else:
params = {'Action': 'DescribeInstances',
'InstanceId.1': instance_id}
data, requesturl = aws.query(params, # pylint: disable=W0632
return_url=True,
location=get_location(),
provider=get_provider(),
opts=__opts__,
sigver='4')
blockmap = data[0]['instancesSet']['item']['blockDeviceMapping']
params = {'Action': 'ModifyInstanceAttribute',
'InstanceId': instance_id}
if not isinstance(blockmap['item'], list):
blockmap['item'] = [blockmap['item']]
for idx, item in enumerate(blockmap['item']):
device_name = item['deviceName']
if device is not None and device != device_name:
continue
if volume_id is not None and volume_id != item['ebs']['volumeId']:
continue
params['BlockDeviceMapping.{0}.DeviceName'.format(idx)] = device_name
params['BlockDeviceMapping.{0}.Ebs.DeleteOnTermination'.format(idx)] = value
aws.query(params,
return_root=True,
location=get_location(),
provider=get_provider(),
opts=__opts__,
sigver='4')
return _get_node(instance_id=instance_id)
def create_volume(kwargs=None, call=None, wait_to_finish=False):
if call != 'function':
log.error(
'The create_volume function must be called with -f or --function.'
)
return False
if 'zone' not in kwargs:
log.error('An availability zone must be specified to create a volume.')
return False
if 'size' not in kwargs and 'snapshot' not in kwargs:
# This number represents GiB
kwargs['size'] = '10'
params = {'Action': 'CreateVolume',
'AvailabilityZone': kwargs['zone']}
if 'size' in kwargs:
params['Size'] = kwargs['size']
if 'snapshot' in kwargs:
params['SnapshotId'] = kwargs['snapshot']
if 'type' in kwargs:
params['VolumeType'] = kwargs['type']
if 'iops' in kwargs and kwargs.get('type', 'standard') == 'io1':
params['Iops'] = kwargs['iops']
if 'encrypted' in kwargs:
params['Encrypted'] = kwargs['encrypted']
log.debug(params)
data = aws.query(params,
return_url=True,
return_root=True,
location=get_location(),
provider=get_provider(),
opts=__opts__,
sigver='4')
r_data = {}
for d in data[0]:
for k, v in d.items():
r_data[k] = v
volume_id = r_data['volumeId']
# Allow tags to be set upon creation
if 'tags' in kwargs:
if isinstance(kwargs['tags'], six.string_types):
tags = yaml.safe_load(kwargs['tags'])
else:
tags = kwargs['tags']
if isinstance(tags, dict):
new_tags = set_tags(tags=tags,
resource_id=volume_id,
call='action',
location=get_location())
r_data['tags'] = new_tags
# Waits till volume is available
if wait_to_finish:
salt.utils.cloud.run_func_until_ret_arg(fun=describe_volumes,
kwargs={'volume_id': volume_id},
fun_call=call,
argument_being_watched='status',
required_argument_response='available')
return r_data
def attach_volume(name=None, kwargs=None, instance_id=None, call=None):
if call != 'action':
raise SaltCloudSystemExit(
'The attach_volume action must be called with -a or --action.'
)
if not kwargs:
kwargs = {}
if 'instance_id' in kwargs:
instance_id = kwargs['instance_id']
if name and not instance_id:
instances = list_nodes_full(get_location())
instance_id = instances[name]['instanceId']
if not name and not instance_id:
log.error('Either a name or an instance_id is required.')
return False
if 'volume_id' not in kwargs:
log.error('A volume_id is required.')
return False
if 'device' not in kwargs:
log.error('A device is required (ex. /dev/sdb1).')
return False
params = {'Action': 'AttachVolume',
'VolumeId': kwargs['volume_id'],
'InstanceId': instance_id,
'Device': kwargs['device']}
log.debug(params)
data = aws.query(params,
return_url=True,
location=get_location(),
provider=get_provider(),
opts=__opts__,
sigver='4')
return data
def show_volume(kwargs=None, call=None):
if not kwargs:
kwargs = {}
return describe_volumes(kwargs, call)
def detach_volume(name=None, kwargs=None, instance_id=None, call=None):
if call != 'action':
raise SaltCloudSystemExit(
'The detach_volume action must be called with -a or --action.'
)
if not kwargs:
kwargs = {}
if 'volume_id' not in kwargs:
log.error('A volume_id is required.')
return False
params = {'Action': 'DetachVolume',
'VolumeId': kwargs['volume_id']}
data = aws.query(params,
return_url=True,
location=get_location(),
provider=get_provider(),
opts=__opts__,
sigver='4')
return data
def delete_volume(name=None, kwargs=None, instance_id=None, call=None):
if not kwargs:
kwargs = {}
if 'volume_id' not in kwargs:
log.error('A volume_id is required.')
return False
params = {'Action': 'DeleteVolume',
'VolumeId': kwargs['volume_id']}
data = aws.query(params,
return_url=True,
location=get_location(),
provider=get_provider(),
opts=__opts__,
sigver='4')
return data
def describe_volumes(kwargs=None, call=None):
if call != 'function':
log.error(
'The describe_volumes function must be called with -f '
'or --function.'
)
return False
if not kwargs:
kwargs = {}
params = {'Action': 'DescribeVolumes'}
if 'volume_id' in kwargs:
volume_id = kwargs['volume_id'].split(',')
for volume_index, volume_id in enumerate(volume_id):
params['VolumeId.{0}'.format(volume_index)] = volume_id
log.debug(params)
data = aws.query(params,
return_url=True,
location=get_location(),
provider=get_provider(),
opts=__opts__,
sigver='4')
return data
def create_keypair(kwargs=None, call=None):
if call != 'function':
log.error(
'The create_keypair function must be called with -f or --function.'
)
return False
if not kwargs:
kwargs = {}
if 'keyname' not in kwargs:
log.error('A keyname is required.')
return False
params = {'Action': 'CreateKeyPair',
'KeyName': kwargs['keyname']}
data = aws.query(params,
return_url=True,
location=get_location(),
provider=get_provider(),
opts=__opts__,
sigver='4')
return data
def show_keypair(kwargs=None, call=None):
if call != 'function':
log.error(
'The show_keypair function must be called with -f or --function.'
)
return False
if not kwargs:
kwargs = {}
if 'keyname' not in kwargs:
log.error('A keyname is required.')
return False
params = {'Action': 'DescribeKeyPairs',
'KeyName.1': kwargs['keyname']}
data = aws.query(params,
return_url=True,
location=get_location(),
provider=get_provider(),
opts=__opts__,
sigver='4')
return data
def delete_keypair(kwargs=None, call=None):
if call != 'function':
log.error(
'The delete_keypair function must be called with -f or --function.'
)
return False
if not kwargs:
kwargs = {}
if 'keyname' not in kwargs:
log.error('A keyname is required.')
return False
params = {'Action': 'DeleteKeyPair',
'KeyName.1': kwargs['keyname']}
data = aws.query(params,
return_url=True,
location=get_location(),
provider=get_provider(),
opts=__opts__,
sigver='4')
return data
def create_snapshot(kwargs=None, call=None, wait_to_finish=False):
if call != 'function':
log.error(
'The create_snapshot function must be called with -f '
'or --function.'
)
return False
if 'volume_id' not in kwargs:
log.error('A volume_id must be specified to create a snapshot.')
return False
if 'description' not in kwargs:
kwargs['description'] = ''
params = {'Action': 'CreateSnapshot'}
if 'volume_id' in kwargs:
params['VolumeId'] = kwargs['volume_id']
if 'description' in kwargs:
params['Description'] = kwargs['description']
log.debug(params)
data = aws.query(params,
return_url=True,
location=get_location(),
provider=get_provider(),
opts=__opts__,
sigver='4')
r_data = {}
for d in data:
for k, v in d.items():
r_data[k] = v
snapshot_id = r_data['snapshotId']
# Waits till volume is available
if wait_to_finish:
salt.utils.cloud.run_func_until_ret_arg(fun=describe_snapshots,
kwargs={'snapshot_id': snapshot_id},
fun_call=call,
argument_being_watched='status',
required_argument_response='completed')
return data
def delete_snapshot(kwargs=None, call=None):
if call != 'function':
log.error(
'The delete_snapshot function must be called with -f '
'or --function.'
)
return False
if 'snapshot_id' not in kwargs:
log.error('A snapshot_id must be specified to delete a snapshot.')
return False
params = {'Action': 'DeleteSnapshot'}
if 'snapshot_id' in kwargs:
params['SnapshotId'] = kwargs['snapshot_id']
log.debug(params)
data = aws.query(params,
return_url=True,
location=get_location(),
provider=get_provider(),
opts=__opts__,
sigver='4')
return data
def copy_snapshot(kwargs=None, call=None):
if call != 'function':
log.error(
'The copy_snapshot function must be called with -f or --function.'
)
return False
if 'source_region' not in kwargs:
log.error('A source_region must be specified to copy a snapshot.')
return False
if 'source_snapshot_id' not in kwargs:
log.error('A source_snapshot_id must be specified to copy a snapshot.')
return False
if 'description' not in kwargs:
kwargs['description'] = ''
params = {'Action': 'CopySnapshot'}
if 'source_region' in kwargs:
params['SourceRegion'] = kwargs['source_region']
if 'source_snapshot_id' in kwargs:
params['SourceSnapshotId'] = kwargs['source_snapshot_id']
if 'description' in kwargs:
params['Description'] = kwargs['description']
log.debug(params)
data = aws.query(params,
return_url=True,
location=get_location(),
provider=get_provider(),
opts=__opts__,
sigver='4')
return data
def describe_snapshots(kwargs=None, call=None):
if call != 'function':
log.error(
'The describe_snapshot function must be called with -f '
'or --function.'
)
return False
params = {'Action': 'DescribeSnapshots'}
# The AWS correct way is to use non-plurals like snapshot_id INSTEAD of snapshot_ids.
if 'snapshot_ids' in kwargs:
kwargs['snapshot_id'] = kwargs['snapshot_ids']
if 'snapshot_id' in kwargs:
snapshot_ids = kwargs['snapshot_id'].split(',')
for snapshot_index, snapshot_id in enumerate(snapshot_ids):
params['SnapshotId.{0}'.format(snapshot_index)] = snapshot_id
if 'owner' in kwargs:
owners = kwargs['owner'].split(',')
for owner_index, owner in enumerate(owners):
params['Owner.{0}'.format(owner_index)] = owner
if 'restorable_by' in kwargs:
restorable_bys = kwargs['restorable_by'].split(',')
for restorable_by_index, restorable_by in enumerate(restorable_bys):
params[
'RestorableBy.{0}'.format(restorable_by_index)
] = restorable_by
log.debug(params)
data = aws.query(params,
return_url=True,
location=get_location(),
provider=get_provider(),
opts=__opts__,
sigver='4')
return data
def get_console_output(
name=None,
instance_id=None,
call=None,
kwargs=None,
):
if call != 'action':
raise SaltCloudSystemExit(
'The get_console_output action must be called with '
'-a or --action.'
)
if not instance_id:
instance_id = _get_node(name)[name]['instanceId']
if kwargs is None:
kwargs = {}
if instance_id is None:
if 'instance_id' in kwargs:
instance_id = kwargs['instance_id']
del kwargs['instance_id']
params = {'Action': 'GetConsoleOutput',
'InstanceId': instance_id}
ret = {}
data = aws.query(params,
return_url=True,
location=get_location(),
provider=get_provider(),
opts=__opts__,
sigver='4')
for item in data:
if next(item.iterkeys()) == 'output':
ret['output_decoded'] = binascii.a2b_base64(next(item.itervalues()))
else:
ret[next(item.iterkeys())] = next(item.itervalues())
return ret
def get_password_data(
name=None,
kwargs=None,
instance_id=None,
call=None,
):
if call != 'action':
raise SaltCloudSystemExit(
'The get_password_data action must be called with '
'-a or --action.'
)
if not instance_id:
instance_id = _get_node(name)[name]['instanceId']
if kwargs is None:
kwargs = {}
if instance_id is None:
if 'instance_id' in kwargs:
instance_id = kwargs['instance_id']
del kwargs['instance_id']
params = {'Action': 'GetPasswordData',
'InstanceId': instance_id}
ret = {}
data = aws.query(params,
return_root=True,
location=get_location(),
provider=get_provider(),
opts=__opts__,
sigver='4')
for item in data:
ret[item.keys()[0]] = item.values()[0]
if not HAS_PYCRYPTO:
return ret
if 'key' not in kwargs:
if 'key_file' in kwargs:
with salt.utils.fopen(kwargs['key_file'], 'r') as kf_:
kwargs['key'] = kf_.read()
if 'key' in kwargs:
pwdata = ret.get('passwordData', None)
if pwdata is not None:
rsa_key = kwargs['key']
pwdata = base64.b64decode(pwdata)
dsize = Crypto.Hash.SHA.digest_size
sentinel = Crypto.Random.new().read(15 + dsize)
key_obj = Crypto.PublicKey.RSA.importKey(rsa_key)
key_obj = PKCS1_v1_5.new(key_obj)
ret['password'] = key_obj.decrypt(pwdata, sentinel)
return ret
| true | true |
f733e262f12794cbbfcbebe1b731b2f9cfb8f27d | 3,875 | py | Python | pmedian/views.py | ibadkureshi/tnk-locationallocation | b06abcb7bf8675b13e4c2e4fe419afb5ee11018f | [
"MIT"
] | 1 | 2021-02-07T10:37:52.000Z | 2021-02-07T10:37:52.000Z | pmedian/views.py | panosprotopapas/tnk-locationallocation | b06abcb7bf8675b13e4c2e4fe419afb5ee11018f | [
"MIT"
] | null | null | null | pmedian/views.py | panosprotopapas/tnk-locationallocation | b06abcb7bf8675b13e4c2e4fe419afb5ee11018f | [
"MIT"
] | 2 | 2020-10-23T13:14:53.000Z | 2020-11-13T12:01:44.000Z | from django.shortcuts import render
from pmedian.tasks import *
from pandas import errors
from pmedapp.common.utilities import *
import json
import pandas as pd
from django.views.decorators.csrf import csrf_exempt
from django.utils.datastructures import MultiValueDictKeyError
import glob
import os.path
@csrf_exempt
def extract_csv(request):
"""
Getting a (two-column) csv and returning it as a json
**Expected a lat/lon csv with headers
"""
if request.method == 'POST' and request.FILES['myfile']:
if not validate_upload(request, '.csv'):
return HttpResponseBadRequest("Data error: Please provide a valid csv file")
try:
# expecting csv with headers
df = pd.read_csv(request.FILES['myfile'])
if column_numeric(df[df.columns[0]]) and column_numeric(df[df.columns[1]]) and not df.isnull().values.any():
df.columns = ['latitude', 'longitude']
return HttpResponse(df.to_json(orient='records'))
else:
return HttpResponseBadRequest("Data input error: Ensure data is numeric and no missing values exist")
except errors.EmptyDataError:
return HttpResponse('CSV file is empty')
else:
# In case of GET request, just show the form
return render(request, 'file_upload.html', locals())
@csrf_exempt
def create_task(request):
if request.method == 'POST':
try:
args = json.loads(request.POST.get('data')) # error checking
input_df = pd.read_csv(request.FILES['myfile'], header=0)
task = p_median_calculation_task.delay(input_df.to_json(), args)
response_data = {'task_id': str(task)}
return HttpResponse(json.dumps(response_data), content_type="application/json")
except MultiValueDictKeyError:
return HttpResponseBadRequest("Please provide the correct input data")
else:
return HttpResponse(status=405, reason="Method not allowed")
@csrf_exempt
def get_task(request):
"""
Return the status of a task given it's id
"""
try:
task_id = request.GET['task-id']
result = AsyncResult(task_id)
result_dct = {result.task_id: {
'status': result.status, 'date_done': str(result.date_done)}}
result_dct[result.task_id]['result'] = result.result
try:
file = glob.glob("output/*"+str(result)+".json")[0]
result_dct['result_location'] = "http://localhost:8000/pmedian/get-file?filename=" + file[7:]
except IndexError:
result_dct['result_location'] = 'Calculation ongoing'
return HttpResponse(json.dumps(result_dct))
except KeyError:
return HttpResponseBadRequest("Please provide a valid task-id")
@csrf_exempt
def get_all_tasks(request):
"""
Get all celery tasks from and return id, status (json)
"""
path = "/tmp/results/celery-task-meta-*"
results = (glob.glob(path))
result_array = []
for result in results:
asyng_result = AsyncResult(result[len(path) - 1:])
result_dct = {}
result_dct['id'] = result[len(path) - 1:]
result_dct['status'] = asyng_result.status
result_dct['date_done'] = str(asyng_result.date_done)
try:
file = glob.glob("output/*"+str(asyng_result)+".json")[0]
result_dct['result'] = "http://localhost:8000/pmedian/get-file?filename=" + file[7:]
with open(file) as f:
result_dct['name'] = json.load(f)['name']
except IndexError:
result_dct['result'] = 'Calculation ongoing'
result_array.append(result_dct)
return HttpResponse(json.dumps(result_array))
@csrf_exempt
def get_file(request):
"""
Download output file to disk.
"""
return download_output_file(request)
| 33.405172 | 120 | 0.640774 | from django.shortcuts import render
from pmedian.tasks import *
from pandas import errors
from pmedapp.common.utilities import *
import json
import pandas as pd
from django.views.decorators.csrf import csrf_exempt
from django.utils.datastructures import MultiValueDictKeyError
import glob
import os.path
@csrf_exempt
def extract_csv(request):
if request.method == 'POST' and request.FILES['myfile']:
if not validate_upload(request, '.csv'):
return HttpResponseBadRequest("Data error: Please provide a valid csv file")
try:
df = pd.read_csv(request.FILES['myfile'])
if column_numeric(df[df.columns[0]]) and column_numeric(df[df.columns[1]]) and not df.isnull().values.any():
df.columns = ['latitude', 'longitude']
return HttpResponse(df.to_json(orient='records'))
else:
return HttpResponseBadRequest("Data input error: Ensure data is numeric and no missing values exist")
except errors.EmptyDataError:
return HttpResponse('CSV file is empty')
else:
return render(request, 'file_upload.html', locals())
@csrf_exempt
def create_task(request):
if request.method == 'POST':
try:
args = json.loads(request.POST.get('data'))
input_df = pd.read_csv(request.FILES['myfile'], header=0)
task = p_median_calculation_task.delay(input_df.to_json(), args)
response_data = {'task_id': str(task)}
return HttpResponse(json.dumps(response_data), content_type="application/json")
except MultiValueDictKeyError:
return HttpResponseBadRequest("Please provide the correct input data")
else:
return HttpResponse(status=405, reason="Method not allowed")
@csrf_exempt
def get_task(request):
try:
task_id = request.GET['task-id']
result = AsyncResult(task_id)
result_dct = {result.task_id: {
'status': result.status, 'date_done': str(result.date_done)}}
result_dct[result.task_id]['result'] = result.result
try:
file = glob.glob("output/*"+str(result)+".json")[0]
result_dct['result_location'] = "http://localhost:8000/pmedian/get-file?filename=" + file[7:]
except IndexError:
result_dct['result_location'] = 'Calculation ongoing'
return HttpResponse(json.dumps(result_dct))
except KeyError:
return HttpResponseBadRequest("Please provide a valid task-id")
@csrf_exempt
def get_all_tasks(request):
path = "/tmp/results/celery-task-meta-*"
results = (glob.glob(path))
result_array = []
for result in results:
asyng_result = AsyncResult(result[len(path) - 1:])
result_dct = {}
result_dct['id'] = result[len(path) - 1:]
result_dct['status'] = asyng_result.status
result_dct['date_done'] = str(asyng_result.date_done)
try:
file = glob.glob("output/*"+str(asyng_result)+".json")[0]
result_dct['result'] = "http://localhost:8000/pmedian/get-file?filename=" + file[7:]
with open(file) as f:
result_dct['name'] = json.load(f)['name']
except IndexError:
result_dct['result'] = 'Calculation ongoing'
result_array.append(result_dct)
return HttpResponse(json.dumps(result_array))
@csrf_exempt
def get_file(request):
return download_output_file(request)
| true | true |
f733e2f0e15fdb2e869d8fb86904ba81a2ca84d5 | 5,020 | py | Python | sdk/AsposeEmailCloudSdk/models/ai_name_formatted.py | aspose-email-cloud/aspose-email-cloud-python | c5c13839cbbbfa5b6617bd1aedf3cf30cd664227 | [
"MIT"
] | 1 | 2020-02-26T13:19:06.000Z | 2020-02-26T13:19:06.000Z | sdk/AsposeEmailCloudSdk/models/ai_name_formatted.py | aspose-email-cloud/aspose-email-cloud-python | c5c13839cbbbfa5b6617bd1aedf3cf30cd664227 | [
"MIT"
] | null | null | null | sdk/AsposeEmailCloudSdk/models/ai_name_formatted.py | aspose-email-cloud/aspose-email-cloud-python | c5c13839cbbbfa5b6617bd1aedf3cf30cd664227 | [
"MIT"
] | null | null | null | # coding: utf-8
# ----------------------------------------------------------------------------
# <copyright company="Aspose" file="AiNameFormatted.py">
# Copyright (c) 2018-2020 Aspose Pty Ltd. All rights reserved.
# </copyright>
# <summary>
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
# </summary>
# ----------------------------------------------------------------------------
import pprint
import re
import six
from typing import List, Set, Dict, Tuple, Optional
from datetime import datetime
class AiNameFormatted(object):
"""Formatted name
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'name': 'str',
'comments': 'str'
}
attribute_map = {
'name': 'name',
'comments': 'comments'
}
def __init__(self, name: str = None, comments: str = None):
"""
Formatted name
:param name: Formatted name value
:type name: str
:param comments: Usually empty; can contain extra message describing some issue occurred during the formatting
:type comments: str
"""
self._name = None
self._comments = None
if name is not None:
self.name = name
if comments is not None:
self.comments = comments
@property
def name(self) -> str:
"""
Formatted name value
:return: The name of this AiNameFormatted.
:rtype: str
"""
return self._name
@name.setter
def name(self, name: str):
"""
Formatted name value
:param name: The name of this AiNameFormatted.
:type: str
"""
self._name = name
@property
def comments(self) -> str:
"""
Usually empty; can contain extra message describing some issue occurred during the formatting
:return: The comments of this AiNameFormatted.
:rtype: str
"""
return self._comments
@comments.setter
def comments(self, comments: str):
"""
Usually empty; can contain extra message describing some issue occurred during the formatting
:param comments: The comments of this AiNameFormatted.
:type: str
"""
self._comments = comments
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, AiNameFormatted):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 32.387097 | 131 | 0.569124 |
import pprint
import re
import six
from typing import List, Set, Dict, Tuple, Optional
from datetime import datetime
class AiNameFormatted(object):
swagger_types = {
'name': 'str',
'comments': 'str'
}
attribute_map = {
'name': 'name',
'comments': 'comments'
}
def __init__(self, name: str = None, comments: str = None):
self._name = None
self._comments = None
if name is not None:
self.name = name
if comments is not None:
self.comments = comments
@property
def name(self) -> str:
return self._name
@name.setter
def name(self, name: str):
self._name = name
@property
def comments(self) -> str:
return self._comments
@comments.setter
def comments(self, comments: str):
self._comments = comments
def to_dict(self):
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
return pprint.pformat(self.to_dict())
def __repr__(self):
return self.to_str()
def __eq__(self, other):
if not isinstance(other, AiNameFormatted):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| true | true |
f733e3763f40a4b5dfcc0e785f7d49d28810498c | 22,832 | py | Python | mspray/apps/main/tasks.py | onaio/mspray | b3e0f4b5855abbf0298de6b66f2e9f472f2bf838 | [
"Apache-2.0"
] | null | null | null | mspray/apps/main/tasks.py | onaio/mspray | b3e0f4b5855abbf0298de6b66f2e9f472f2bf838 | [
"Apache-2.0"
] | 76 | 2018-03-15T09:37:56.000Z | 2019-05-15T12:45:51.000Z | mspray/apps/main/tasks.py | onaio/mspray | b3e0f4b5855abbf0298de6b66f2e9f472f2bf838 | [
"Apache-2.0"
] | 1 | 2020-10-31T07:15:22.000Z | 2020-10-31T07:15:22.000Z | # -*- coding: utf-8 -*-
"""Mspray task module."""
from __future__ import absolute_import
import gc
import logging
import os
from datetime import timedelta
from django.conf import settings
from django.contrib.gis.geos import Point
from django.contrib.gis.geos.polygon import Polygon
from django.db.models import Q, Sum, Value
from django.db.models.functions import Coalesce
from django.db.utils import IntegrityError
from django.utils import timezone
from mspray.apps.alerts.tasks import no_gps, user_distance
from mspray.apps.main.models import (
DirectlyObservedSprayingForm,
Household,
Location,
Mobilisation,
SensitizationVisit,
SprayDay,
SprayOperatorDailySummary,
WeeklyReport,
)
from mspray.apps.main.models.mobilisation import create_mobilisation_visit
from mspray.apps.main.models.sensitization_visit import (
create_sensitization_visit
)
from mspray.apps.main.models.spray_day import (
NON_STRUCTURE_GPS_FIELD,
STRUCTURE_GPS_FIELD,
get_osmid,
)
from mspray.apps.warehouse.tasks import stream_to_druid
from mspray.celery import app
from mspray.libs.ona import fetch_form_data, fetch_osm_xml
from mspray.libs.osm import parse_osm, parse_osm_nodes, parse_osm_ways
from mspray.libs.utils.geom_buffer import with_metric_buffer
BUFFER_SIZE = getattr(settings, "MSPRAY_NEW_BUFFER_WIDTH", 4) # default to 4m
HAS_UNIQUE_FIELD = getattr(settings, "MSPRAY_UNIQUE_FIELD", None)
STRUCTURE_GPS_FIELD = getattr(
settings, "MSPRAY_STRUCTURE_GPS_FIELD", STRUCTURE_GPS_FIELD
)
FORM_ID = getattr(settings, "ONA_FORM_PK", None)
LOCATION_VISITED_PERCENTAGE = getattr(
settings, "LOCATION_VISITED_PERCENTAGE", 20
)
LOCATION_SPRAYED_PERCENTAGE = getattr(
settings, "LOCATION_SPRAYED_PERCENTAGE", 90
)
UPDATE_VISITED_MINUTES = getattr(settings, "UPDATE_VISITED_MINUTES", 5)
DIRECTLY_OBSERVED_FORM_ID = getattr(
settings, "DIRECTLY_OBSERVED_FORM_ID", None
)
DAILY_SUMMARY_FORM_PK = getattr(settings, "SOP_DAILY_SUMMARY_FORM_PK", None)
FALLBACK_TO_ODK = settings.FALLBACK_TO_SUBMISSION_DATA_LOCATION
logger = logging.getLogger(__name__)
def get_new_structure_location(data, geom, is_node=False):
from mspray.apps.main.utils import geojson_from_gps_string
if is_node and geom is None:
gps_field = data.get(
STRUCTURE_GPS_FIELD, data.get(NON_STRUCTURE_GPS_FIELD)
)
geom = (
geojson_from_gps_string(gps_field, True)
if gps_field is not None
else geom
)
location = None
if geom is not None:
locations = Location.objects.filter(
geom__contains=geom, target=True, level=settings.MSPRAY_TA_LEVEL
)
if locations:
location = locations[0]
return location, geom
def get_location_from_data(data):
district = data.get("district")
target_area = data.get("spray_area")
location = None
try:
location = Location.objects.get(
name=target_area, target=True, parent__parent__code=district
)
except Location.DoesNotExist:
pass
except ValueError:
pass
return location
def get_location_from_osm(data):
geom = None
is_node = False
location = None
filename = data.get(HAS_UNIQUE_FIELD)
osm_xml = fetch_osm_xml(data, filename)
if osm_xml is not None:
geoms = []
geoms = parse_osm_ways(osm_xml) or parse_osm_nodes(osm_xml)
if len(geoms):
geom = geoms[0]["geom"]
is_node = isinstance(geom, Point)
locations = Location.objects.filter(
geom__covers=geom, target=True, level=settings.MSPRAY_TA_LEVEL
)
if locations:
location = locations.first()
else:
location = get_location_from_data(data)
return location, geom, is_node
def set_spraypoint_location(sp, location, geom, is_node=False):
if geom:
sp.geom = geom.centroid if not is_node else geom
sp.bgeom = (
geom if not is_node else with_metric_buffer(sp.geom, BUFFER_SIZE)
)
if location:
sp.location = location
sp.save()
add_unique_record.delay(sp.pk, location.pk)
elif geom is not None:
sp.save()
def get_updated_osm_from_ona(sp):
if FORM_ID:
data = fetch_form_data(FORM_ID, dataid=sp.submission_id)
if data:
osmid = get_osmid(data)
if osmid:
sp.data = data
sp.save()
return osmid
def run_tasks_after_spray_data(sprayday):
"""
Additional tasks to be run after receive new spray data submission.
"""
if getattr(settings, "ENABLE_ALERTS", False):
# no gps alert
no_gps.delay(sprayday.id)
# user distance alert
user_distance.delay(sprayday.id)
# stream to druid
if getattr(settings, "STREAM_TO_DRUID", False):
stream_to_druid.delay(sprayday.id)
@app.task
def add_unique_record(sprayday_pk, location_pk):
"""Add a spraypoint for the submission."""
try:
sprayday = SprayDay.objects.get(pk=sprayday_pk)
location = Location.objects.get(pk=location_pk)
except (SprayDay.DoesNotExist, Location.DoesNotExist):
pass
else:
from mspray.apps.main.utils import add_unique_data
osmid = (
get_osmid(sprayday.data)
or get_updated_osm_from_ona(sprayday)
or sprayday.data.get("newstructure/gps")
)
if osmid:
try:
osmid = int(osmid)
except ValueError:
pass
else:
if osmid > 0:
# see if we have a matching household structure
try:
Household.objects.get(hh_id=osmid)
except Household.DoesNotExist:
try:
household = Household.objects.get(
bgeom=sprayday.bgeom
)
except Household.DoesNotExist:
pass
else:
osmid = household.hh_id
sprayday.osmid = osmid
sprayday.save()
sprayday.refresh_from_db()
add_unique_data(sprayday, HAS_UNIQUE_FIELD, location, osmid)
@app.task
def link_spraypoint_with_osm(spray_day_id):
"""Use OSM to link a record to a location."""
try:
spray_day = SprayDay.objects.get(pk=spray_day_id)
except SprayDay.DoesNotExist:
pass
else:
location, geom, is_node = get_location_from_osm(spray_day.data)
if location is None:
location, geom = get_new_structure_location(
spray_day.data, geom, is_node
)
if location is None and FALLBACK_TO_ODK:
location = get_location_from_data(spray_day.data)
else:
is_node = isinstance(geom, Point)
if not location and not FALLBACK_TO_ODK and not spray_day.location:
spray_day.location = None
set_spraypoint_location(spray_day, location, geom, is_node)
return spray_day.pk
def _create_household(way, location):
bgeom = None
if isinstance(way.get("geom"), Polygon):
bgeom = way.get("geom")
try:
Household.objects.create(
hh_id=way.get("osm_id"),
geom=way.get("geom").centroid,
bgeom=bgeom,
data=way.get("tags"),
location=location,
)
except Household.DoesNotExist:
pass
except IntegrityError:
pass
@app.task
def process_osm_file(path):
with open(path) as f:
name = os.path.basename(path).replace(".osm", "")
content = f.read()
nodes = parse_osm(content.strip())
ways = [way for way in nodes if way.get("osm_type") == "way"]
if ways:
for way in ways:
location = (
Location.objects.filter(
geom__contains=ways[0].get("geom"), level="ta"
).first()
or Location.objects.filter(name=name, level="ta").first()
)
if location:
_create_household(way, location)
gc.collect()
@app.task
def refresh_data_with_no_osm():
def _process_no_osm(queryset):
for rec in data:
osmid = get_updated_osm_from_ona(rec)
if osmid:
link_spraypoint_with_osm.delay(rec.pk)
data = (
SprayDay.objects.exclude(data__has_key="osmstructure:way:id")
.exclude(data__has_key="osmstructure:node:id")
.filter(data__has_key="osmstructure")
)
found = data.count()
_process_no_osm(data)
data = SprayDay.objects.filter(geom__isnull=True)
found = data.count() + found
_process_no_osm(data)
return found
def set_sprayed_visited_week(
location, week_number, visited, sprayed, structures
):
try:
report = WeeklyReport.objects.get(
location=location, week_number=week_number
)
except WeeklyReport.DoesNotExist:
report = WeeklyReport(location=location, week_number=week_number)
report.visited = visited
report.sprayed = sprayed
report.structures = structures
report.save()
@app.task
def task_set_sprayed_visited(location_id, week_number=None):
try:
location = Location.objects.get(pk=location_id)
except Location.DoesNotExist:
pass
else:
set_sprayed_visited(location, week_number=week_number)
def set_sprayed_visited(location, week_number=None):
"""Persists visited and sprayed values for locations.
20% sprayed is a visit.
90% sprayed is sprayed.
"""
from mspray.apps.main.serializers.target_area import get_spray_area_stats
if location.level == "ta":
sprayed = 0
visited = 0
if week_number:
context = {"week_number": week_number}
data, total_structures = get_spray_area_stats(location, context)
visited_sprayed = data.get("sprayed")
found = data.get("found")
else:
total_structures = location.structures_on_ground
found = location.visited_found
visited_sprayed = location.visited_sprayed
if total_structures and found:
ratio = round((found * 100) / total_structures)
if ratio >= LOCATION_VISITED_PERCENTAGE:
visited = 1
if total_structures and visited_sprayed:
ratio = round((visited_sprayed * 100) / total_structures)
if ratio >= LOCATION_SPRAYED_PERCENTAGE:
sprayed = 1
if week_number:
# print(week_number, location, week_number, visited, sprayed)
set_sprayed_visited_week(
location, week_number, visited, sprayed, total_structures
)
else:
location.visited = visited
location.sprayed = sprayed
location.save()
else:
if week_number:
kwargs = {"week_number": week_number}
if location.level == "RHC":
kwargs["location__parent"] = location
else:
kwargs["location__parent__parent"] = location
queryset = WeeklyReport.objects.filter(**kwargs).aggregate(
structures_sum=Coalesce(
Sum("structures", distinct=True), Value(0)
),
visited_sum=Coalesce(Sum("visited", distinct=True), Value(0)),
sprayed_sum=Coalesce(Sum("sprayed", distinct=True), Value(0)),
)
# print(week_number, location, week_number,
# queryset.get('visited_sum'), queryset.get('sprayed_sum'))
set_sprayed_visited_week(
location,
week_number,
queryset.get("visited_sum"),
queryset.get("sprayed_sum"),
queryset.get("structures_sum"),
)
else:
queryset = location.location_set.values("id").aggregate(
visited_sum=Coalesce(Sum("visited", distinct=True), Value(0)),
sprayed_sum=Coalesce(Sum("sprayed", distinct=True), Value(0)),
)
location.visited = queryset.get("visited_sum") or 0
location.sprayed = queryset.get("sprayed_sum") or 0
location.save()
@app.task
def update_sprayed_visited(time_within=UPDATE_VISITED_MINUTES):
"""
Sets 'sprayed' and 'visited' values for locations on submissions within
UPDATE_VISITED_MINUTES which defaults to every 5 minutes.
"""
def _set_sprayed_visited(key):
for loc_id in submissions.values_list(key, flat=True).distinct():
location = Location.objects.get(pk=loc_id)
set_sprayed_visited(location)
time_since = timezone.now() - timedelta(minutes=time_within + 1)
submissions = SprayDay.objects.filter(created_on__gte=time_since).exclude(
location__isnull=True
)
# spray areas
_set_sprayed_visited("location")
# RHC
_set_sprayed_visited("location__parent")
# District
_set_sprayed_visited("location__parent__parent")
@app.task
def update_sprayed_visited_week(
time_within=UPDATE_VISITED_MINUTES, week_number=None
):
"""
Sets 'sprayed' and 'visited' values for locations on submissions within
UPDATE_VISITED_MINUTES which defaults to every 5 minutes.
"""
def _set_sprayed_visited(key):
for loc_id in submissions.values_list(key, flat=True).distinct():
location = Location.objects.get(pk=loc_id)
set_sprayed_visited(location, week_number=week_number)
# time_since = timezone.now() - timedelta(minutes=time_within + 1)
# submissions = SprayDay.objects.filter(created_on__gte=time_since)\
submissions = SprayDay.objects.filter().exclude(location__isnull=True)
if not week_number:
week_number = int(timezone.now().strftime("%W"))
# spray areas
_set_sprayed_visited("location")
# RHC
_set_sprayed_visited("location__parent")
# District
_set_sprayed_visited("location__parent__parent")
@app.task
def set_district_sprayed_visited():
"""
Update sprayed and visited numbers on all objects.
"""
queryset = Location.objects.filter(level="ta", target=True)
for location in queryset.iterator():
set_sprayed_visited(location)
for location in Location.objects.filter(level="RHC"):
set_sprayed_visited(location)
for location in Location.objects.filter(level="district"):
set_sprayed_visited(location)
@app.task
def remove_deleted_records():
"""Remove deleted records."""
count = 0
if FORM_ID:
data = fetch_form_data(FORM_ID, dataids_only=True)
if not data:
return count
pks = [i["_id"] for i in data]
deleted_submissions = SprayDay.objects.exclude(submission_id__in=pks)
count = deleted_submissions.count()
deleted_submissions.delete()
return count
@app.task
def update_edited_records():
"""
Update edited records.
"""
count = 0
if FORM_ID:
data = fetch_form_data(FORM_ID, dataids_only=True, edited_only=True)
if not data:
return count
pks = [i["_id"] for i in data]
edited_submissions = SprayDay.objects.filter(submission_id__in=pks)
for rec in edited_submissions:
data = fetch_form_data(FORM_ID, dataid=rec.submission_id)
if data:
from mspray.apps.main.utils import add_spray_data
add_spray_data(data)
count += 1
return count
@app.task
def remove_deleted_daily_summary_records():
"""
Deletes deleted Daily Summary records.
"""
count = 0
summary = SprayOperatorDailySummary.objects.last()
formid = summary and summary.data.get("_xform_id")
if formid:
data = fetch_form_data(formid, dataids_only=True)
if not data:
return count
pks = [i["_id"] for i in data]
records = SprayOperatorDailySummary.objects.exclude(
submission_id__in=pks
)
count = records.count()
records.delete()
return count
@app.task
def fetch_directly_observed():
"""
Update edited records.
"""
count = 0
dos = DirectlyObservedSprayingForm.objects.last()
formid = dos.data.get("_xform_id") if dos else DIRECTLY_OBSERVED_FORM_ID
if formid:
data = fetch_form_data(formid, dataids_only=True)
if not data:
return count
pks = [i["_id"] for i in data]
received = DirectlyObservedSprayingForm.objects.values_list(
"submission_id", flat=True
)
new_ids = set(pks).difference(set((i for i in received)))
for rec in new_ids:
data = fetch_form_data(formid, dataid=rec)
if data:
from mspray.apps.main.utils import (
add_directly_observed_spraying_data
) # NOQA
add_directly_observed_spraying_data(data)
count += 1
return count
@app.task
def fetch_updated_directly_observed():
"""
Update edited records.
"""
count = 0
dos = DirectlyObservedSprayingForm.objects.last()
formid = dos.data.get("_xform_id") if dos else DIRECTLY_OBSERVED_FORM_ID
if formid:
data = fetch_form_data(formid, dataids_only=True, edited_only=True)
if not data:
return count
pks = [i["_id"] for i in data]
for rec in pks:
data = fetch_form_data(formid, dataid=rec)
if data:
from mspray.apps.main.utils import (
add_directly_observed_spraying_data
) # NOQA
add_directly_observed_spraying_data(data)
count += 1
return count
@app.task
def remove_deleted_dos_records():
"""
Remove directly observed records..
"""
count = 0
dos = DirectlyObservedSprayingForm.objects.last()
formid = dos.data.get("_xform_id") if dos else DIRECTLY_OBSERVED_FORM_ID
if formid:
data = fetch_form_data(formid, dataids_only=True)
if not data:
return count
pks = [i["_id"] for i in data]
deleted_submissions = DirectlyObservedSprayingForm.objects.exclude(
submission_id__in=pks
)
count = deleted_submissions.count()
deleted_submissions.delete()
return count
@app.task
def check_missing_data():
"""
Sync missing spray data from Ona.
"""
from mspray.apps.main.utils import sync_missing_sprays
sync_missing_sprays(FORM_ID, print)
@app.task
def check_missing_sopdailysummary_data():
"""
Sync missing SOP daily summary form data from Ona.
"""
from mspray.apps.main.utils import sync_missing_sopdailysummary
sync_missing_sopdailysummary(DAILY_SUMMARY_FORM_PK, print)
@app.task
def check_missing_unique_link():
"""
Checks and forces a linking of submitted data that has not yet been
identified as unique.
"""
from mspray.apps.main.utils import queryset_iterator
queryset = SprayDay.objects.filter(spraypoint__isnull=True).only(
"pk", "location_id"
)
for record in queryset_iterator(queryset):
add_unique_record(record.pk, record.location_id)
gc.collect()
@app.task
def update_performance_reports(update_all=True):
"""
Update performance records updated in the last UPDATE_VISITED_MINUTES
minutes.
"""
from mspray.apps.main.utils import performance_report
time_within = UPDATE_VISITED_MINUTES
time_since = timezone.now() - timedelta(minutes=time_within + 1)
if update_all:
submissions = SprayDay.objects.all()
else:
submissions = SprayDay.objects.filter(
Q(created_on__gte=time_since) | Q(modified_on__gte=time_since)
)
sop_queryset = (
SprayDay.objects.filter(
Q(created_on__gte=time_since) | Q(modified_on__gte=time_since)
)
.filter(spray_operator__isnull=False)
.only("spray_operator")
.distinct("spray_operator")
)
for record in sop_queryset:
performance_report(
record.spray_operator,
submissions.filter(spray_operator=record.spray_operator),
)
@app.task
def sync_performance_reports():
"""
Task to find missing performance reports and sync them back in
"""
from mspray.apps.main.utils import find_missing_performance_report_records
from mspray.apps.main.utils import performance_report
from mspray.apps.main.utils import queryset_iterator
missing_sprayformids = find_missing_performance_report_records()
queryset = SprayDay.objects.filter(
data__sprayformid__in=missing_sprayformids
).distinct("spray_operator")
for record in queryset_iterator(queryset):
performance_report(record.spray_operator)
def get_missing_ids(formid, target_class):
"""Return submission ids not yet synchronised."""
data_ids = fetch_form_data(formid, dataids_only=True)
if data_ids:
data_ids = set(i["_id"] for i in data_ids)
existing = set(
i
for i in target_class.objects.values_list(
"submission_id", flat=True
)
)
return data_ids - existing
return []
@app.task
def fetch_sensitization_visits():
"""Fetch sensitization visit submissions."""
formid = getattr(settings, "SENSITIZATION_VISIT_FORM_ID", None)
if formid:
data_ids = get_missing_ids(formid, SensitizationVisit)
for data_id in data_ids:
data = fetch_form_data(formid, dataid=data_id)
if data:
try:
create_sensitization_visit(data)
except IntegrityError:
# Fail silently, likely we did not find the household
# matching the osm id.
pass
@app.task
def fetch_mobilisation():
"""Fetch mobilisation submissions."""
formid = getattr(settings, "MOBILISATION_FORM_ID", None)
if formid:
data_ids = get_missing_ids(formid, Mobilisation)
for data_id in data_ids:
data = fetch_form_data(formid, dataid=data_id)
if data:
try:
create_mobilisation_visit(data)
except IntegrityError:
logger.exception("{} Record not found.".format(formid))
continue
| 30.081686 | 78 | 0.63411 |
from __future__ import absolute_import
import gc
import logging
import os
from datetime import timedelta
from django.conf import settings
from django.contrib.gis.geos import Point
from django.contrib.gis.geos.polygon import Polygon
from django.db.models import Q, Sum, Value
from django.db.models.functions import Coalesce
from django.db.utils import IntegrityError
from django.utils import timezone
from mspray.apps.alerts.tasks import no_gps, user_distance
from mspray.apps.main.models import (
DirectlyObservedSprayingForm,
Household,
Location,
Mobilisation,
SensitizationVisit,
SprayDay,
SprayOperatorDailySummary,
WeeklyReport,
)
from mspray.apps.main.models.mobilisation import create_mobilisation_visit
from mspray.apps.main.models.sensitization_visit import (
create_sensitization_visit
)
from mspray.apps.main.models.spray_day import (
NON_STRUCTURE_GPS_FIELD,
STRUCTURE_GPS_FIELD,
get_osmid,
)
from mspray.apps.warehouse.tasks import stream_to_druid
from mspray.celery import app
from mspray.libs.ona import fetch_form_data, fetch_osm_xml
from mspray.libs.osm import parse_osm, parse_osm_nodes, parse_osm_ways
from mspray.libs.utils.geom_buffer import with_metric_buffer
BUFFER_SIZE = getattr(settings, "MSPRAY_NEW_BUFFER_WIDTH", 4)
HAS_UNIQUE_FIELD = getattr(settings, "MSPRAY_UNIQUE_FIELD", None)
STRUCTURE_GPS_FIELD = getattr(
settings, "MSPRAY_STRUCTURE_GPS_FIELD", STRUCTURE_GPS_FIELD
)
FORM_ID = getattr(settings, "ONA_FORM_PK", None)
LOCATION_VISITED_PERCENTAGE = getattr(
settings, "LOCATION_VISITED_PERCENTAGE", 20
)
LOCATION_SPRAYED_PERCENTAGE = getattr(
settings, "LOCATION_SPRAYED_PERCENTAGE", 90
)
UPDATE_VISITED_MINUTES = getattr(settings, "UPDATE_VISITED_MINUTES", 5)
DIRECTLY_OBSERVED_FORM_ID = getattr(
settings, "DIRECTLY_OBSERVED_FORM_ID", None
)
DAILY_SUMMARY_FORM_PK = getattr(settings, "SOP_DAILY_SUMMARY_FORM_PK", None)
FALLBACK_TO_ODK = settings.FALLBACK_TO_SUBMISSION_DATA_LOCATION
logger = logging.getLogger(__name__)
def get_new_structure_location(data, geom, is_node=False):
from mspray.apps.main.utils import geojson_from_gps_string
if is_node and geom is None:
gps_field = data.get(
STRUCTURE_GPS_FIELD, data.get(NON_STRUCTURE_GPS_FIELD)
)
geom = (
geojson_from_gps_string(gps_field, True)
if gps_field is not None
else geom
)
location = None
if geom is not None:
locations = Location.objects.filter(
geom__contains=geom, target=True, level=settings.MSPRAY_TA_LEVEL
)
if locations:
location = locations[0]
return location, geom
def get_location_from_data(data):
district = data.get("district")
target_area = data.get("spray_area")
location = None
try:
location = Location.objects.get(
name=target_area, target=True, parent__parent__code=district
)
except Location.DoesNotExist:
pass
except ValueError:
pass
return location
def get_location_from_osm(data):
geom = None
is_node = False
location = None
filename = data.get(HAS_UNIQUE_FIELD)
osm_xml = fetch_osm_xml(data, filename)
if osm_xml is not None:
geoms = []
geoms = parse_osm_ways(osm_xml) or parse_osm_nodes(osm_xml)
if len(geoms):
geom = geoms[0]["geom"]
is_node = isinstance(geom, Point)
locations = Location.objects.filter(
geom__covers=geom, target=True, level=settings.MSPRAY_TA_LEVEL
)
if locations:
location = locations.first()
else:
location = get_location_from_data(data)
return location, geom, is_node
def set_spraypoint_location(sp, location, geom, is_node=False):
if geom:
sp.geom = geom.centroid if not is_node else geom
sp.bgeom = (
geom if not is_node else with_metric_buffer(sp.geom, BUFFER_SIZE)
)
if location:
sp.location = location
sp.save()
add_unique_record.delay(sp.pk, location.pk)
elif geom is not None:
sp.save()
def get_updated_osm_from_ona(sp):
if FORM_ID:
data = fetch_form_data(FORM_ID, dataid=sp.submission_id)
if data:
osmid = get_osmid(data)
if osmid:
sp.data = data
sp.save()
return osmid
def run_tasks_after_spray_data(sprayday):
if getattr(settings, "ENABLE_ALERTS", False):
no_gps.delay(sprayday.id)
user_distance.delay(sprayday.id)
if getattr(settings, "STREAM_TO_DRUID", False):
stream_to_druid.delay(sprayday.id)
@app.task
def add_unique_record(sprayday_pk, location_pk):
try:
sprayday = SprayDay.objects.get(pk=sprayday_pk)
location = Location.objects.get(pk=location_pk)
except (SprayDay.DoesNotExist, Location.DoesNotExist):
pass
else:
from mspray.apps.main.utils import add_unique_data
osmid = (
get_osmid(sprayday.data)
or get_updated_osm_from_ona(sprayday)
or sprayday.data.get("newstructure/gps")
)
if osmid:
try:
osmid = int(osmid)
except ValueError:
pass
else:
if osmid > 0:
try:
Household.objects.get(hh_id=osmid)
except Household.DoesNotExist:
try:
household = Household.objects.get(
bgeom=sprayday.bgeom
)
except Household.DoesNotExist:
pass
else:
osmid = household.hh_id
sprayday.osmid = osmid
sprayday.save()
sprayday.refresh_from_db()
add_unique_data(sprayday, HAS_UNIQUE_FIELD, location, osmid)
@app.task
def link_spraypoint_with_osm(spray_day_id):
try:
spray_day = SprayDay.objects.get(pk=spray_day_id)
except SprayDay.DoesNotExist:
pass
else:
location, geom, is_node = get_location_from_osm(spray_day.data)
if location is None:
location, geom = get_new_structure_location(
spray_day.data, geom, is_node
)
if location is None and FALLBACK_TO_ODK:
location = get_location_from_data(spray_day.data)
else:
is_node = isinstance(geom, Point)
if not location and not FALLBACK_TO_ODK and not spray_day.location:
spray_day.location = None
set_spraypoint_location(spray_day, location, geom, is_node)
return spray_day.pk
def _create_household(way, location):
bgeom = None
if isinstance(way.get("geom"), Polygon):
bgeom = way.get("geom")
try:
Household.objects.create(
hh_id=way.get("osm_id"),
geom=way.get("geom").centroid,
bgeom=bgeom,
data=way.get("tags"),
location=location,
)
except Household.DoesNotExist:
pass
except IntegrityError:
pass
@app.task
def process_osm_file(path):
with open(path) as f:
name = os.path.basename(path).replace(".osm", "")
content = f.read()
nodes = parse_osm(content.strip())
ways = [way for way in nodes if way.get("osm_type") == "way"]
if ways:
for way in ways:
location = (
Location.objects.filter(
geom__contains=ways[0].get("geom"), level="ta"
).first()
or Location.objects.filter(name=name, level="ta").first()
)
if location:
_create_household(way, location)
gc.collect()
@app.task
def refresh_data_with_no_osm():
def _process_no_osm(queryset):
for rec in data:
osmid = get_updated_osm_from_ona(rec)
if osmid:
link_spraypoint_with_osm.delay(rec.pk)
data = (
SprayDay.objects.exclude(data__has_key="osmstructure:way:id")
.exclude(data__has_key="osmstructure:node:id")
.filter(data__has_key="osmstructure")
)
found = data.count()
_process_no_osm(data)
data = SprayDay.objects.filter(geom__isnull=True)
found = data.count() + found
_process_no_osm(data)
return found
def set_sprayed_visited_week(
location, week_number, visited, sprayed, structures
):
try:
report = WeeklyReport.objects.get(
location=location, week_number=week_number
)
except WeeklyReport.DoesNotExist:
report = WeeklyReport(location=location, week_number=week_number)
report.visited = visited
report.sprayed = sprayed
report.structures = structures
report.save()
@app.task
def task_set_sprayed_visited(location_id, week_number=None):
try:
location = Location.objects.get(pk=location_id)
except Location.DoesNotExist:
pass
else:
set_sprayed_visited(location, week_number=week_number)
def set_sprayed_visited(location, week_number=None):
from mspray.apps.main.serializers.target_area import get_spray_area_stats
if location.level == "ta":
sprayed = 0
visited = 0
if week_number:
context = {"week_number": week_number}
data, total_structures = get_spray_area_stats(location, context)
visited_sprayed = data.get("sprayed")
found = data.get("found")
else:
total_structures = location.structures_on_ground
found = location.visited_found
visited_sprayed = location.visited_sprayed
if total_structures and found:
ratio = round((found * 100) / total_structures)
if ratio >= LOCATION_VISITED_PERCENTAGE:
visited = 1
if total_structures and visited_sprayed:
ratio = round((visited_sprayed * 100) / total_structures)
if ratio >= LOCATION_SPRAYED_PERCENTAGE:
sprayed = 1
if week_number:
set_sprayed_visited_week(
location, week_number, visited, sprayed, total_structures
)
else:
location.visited = visited
location.sprayed = sprayed
location.save()
else:
if week_number:
kwargs = {"week_number": week_number}
if location.level == "RHC":
kwargs["location__parent"] = location
else:
kwargs["location__parent__parent"] = location
queryset = WeeklyReport.objects.filter(**kwargs).aggregate(
structures_sum=Coalesce(
Sum("structures", distinct=True), Value(0)
),
visited_sum=Coalesce(Sum("visited", distinct=True), Value(0)),
sprayed_sum=Coalesce(Sum("sprayed", distinct=True), Value(0)),
)
set_sprayed_visited_week(
location,
week_number,
queryset.get("visited_sum"),
queryset.get("sprayed_sum"),
queryset.get("structures_sum"),
)
else:
queryset = location.location_set.values("id").aggregate(
visited_sum=Coalesce(Sum("visited", distinct=True), Value(0)),
sprayed_sum=Coalesce(Sum("sprayed", distinct=True), Value(0)),
)
location.visited = queryset.get("visited_sum") or 0
location.sprayed = queryset.get("sprayed_sum") or 0
location.save()
@app.task
def update_sprayed_visited(time_within=UPDATE_VISITED_MINUTES):
def _set_sprayed_visited(key):
for loc_id in submissions.values_list(key, flat=True).distinct():
location = Location.objects.get(pk=loc_id)
set_sprayed_visited(location)
time_since = timezone.now() - timedelta(minutes=time_within + 1)
submissions = SprayDay.objects.filter(created_on__gte=time_since).exclude(
location__isnull=True
)
_set_sprayed_visited("location")
_set_sprayed_visited("location__parent")
_set_sprayed_visited("location__parent__parent")
@app.task
def update_sprayed_visited_week(
time_within=UPDATE_VISITED_MINUTES, week_number=None
):
def _set_sprayed_visited(key):
for loc_id in submissions.values_list(key, flat=True).distinct():
location = Location.objects.get(pk=loc_id)
set_sprayed_visited(location, week_number=week_number)
submissions = SprayDay.objects.filter().exclude(location__isnull=True)
if not week_number:
week_number = int(timezone.now().strftime("%W"))
_set_sprayed_visited("location")
_set_sprayed_visited("location__parent")
_set_sprayed_visited("location__parent__parent")
@app.task
def set_district_sprayed_visited():
queryset = Location.objects.filter(level="ta", target=True)
for location in queryset.iterator():
set_sprayed_visited(location)
for location in Location.objects.filter(level="RHC"):
set_sprayed_visited(location)
for location in Location.objects.filter(level="district"):
set_sprayed_visited(location)
@app.task
def remove_deleted_records():
count = 0
if FORM_ID:
data = fetch_form_data(FORM_ID, dataids_only=True)
if not data:
return count
pks = [i["_id"] for i in data]
deleted_submissions = SprayDay.objects.exclude(submission_id__in=pks)
count = deleted_submissions.count()
deleted_submissions.delete()
return count
@app.task
def update_edited_records():
count = 0
if FORM_ID:
data = fetch_form_data(FORM_ID, dataids_only=True, edited_only=True)
if not data:
return count
pks = [i["_id"] for i in data]
edited_submissions = SprayDay.objects.filter(submission_id__in=pks)
for rec in edited_submissions:
data = fetch_form_data(FORM_ID, dataid=rec.submission_id)
if data:
from mspray.apps.main.utils import add_spray_data
add_spray_data(data)
count += 1
return count
@app.task
def remove_deleted_daily_summary_records():
count = 0
summary = SprayOperatorDailySummary.objects.last()
formid = summary and summary.data.get("_xform_id")
if formid:
data = fetch_form_data(formid, dataids_only=True)
if not data:
return count
pks = [i["_id"] for i in data]
records = SprayOperatorDailySummary.objects.exclude(
submission_id__in=pks
)
count = records.count()
records.delete()
return count
@app.task
def fetch_directly_observed():
count = 0
dos = DirectlyObservedSprayingForm.objects.last()
formid = dos.data.get("_xform_id") if dos else DIRECTLY_OBSERVED_FORM_ID
if formid:
data = fetch_form_data(formid, dataids_only=True)
if not data:
return count
pks = [i["_id"] for i in data]
received = DirectlyObservedSprayingForm.objects.values_list(
"submission_id", flat=True
)
new_ids = set(pks).difference(set((i for i in received)))
for rec in new_ids:
data = fetch_form_data(formid, dataid=rec)
if data:
from mspray.apps.main.utils import (
add_directly_observed_spraying_data
)
add_directly_observed_spraying_data(data)
count += 1
return count
@app.task
def fetch_updated_directly_observed():
count = 0
dos = DirectlyObservedSprayingForm.objects.last()
formid = dos.data.get("_xform_id") if dos else DIRECTLY_OBSERVED_FORM_ID
if formid:
data = fetch_form_data(formid, dataids_only=True, edited_only=True)
if not data:
return count
pks = [i["_id"] for i in data]
for rec in pks:
data = fetch_form_data(formid, dataid=rec)
if data:
from mspray.apps.main.utils import (
add_directly_observed_spraying_data
)
add_directly_observed_spraying_data(data)
count += 1
return count
@app.task
def remove_deleted_dos_records():
count = 0
dos = DirectlyObservedSprayingForm.objects.last()
formid = dos.data.get("_xform_id") if dos else DIRECTLY_OBSERVED_FORM_ID
if formid:
data = fetch_form_data(formid, dataids_only=True)
if not data:
return count
pks = [i["_id"] for i in data]
deleted_submissions = DirectlyObservedSprayingForm.objects.exclude(
submission_id__in=pks
)
count = deleted_submissions.count()
deleted_submissions.delete()
return count
@app.task
def check_missing_data():
from mspray.apps.main.utils import sync_missing_sprays
sync_missing_sprays(FORM_ID, print)
@app.task
def check_missing_sopdailysummary_data():
from mspray.apps.main.utils import sync_missing_sopdailysummary
sync_missing_sopdailysummary(DAILY_SUMMARY_FORM_PK, print)
@app.task
def check_missing_unique_link():
from mspray.apps.main.utils import queryset_iterator
queryset = SprayDay.objects.filter(spraypoint__isnull=True).only(
"pk", "location_id"
)
for record in queryset_iterator(queryset):
add_unique_record(record.pk, record.location_id)
gc.collect()
@app.task
def update_performance_reports(update_all=True):
from mspray.apps.main.utils import performance_report
time_within = UPDATE_VISITED_MINUTES
time_since = timezone.now() - timedelta(minutes=time_within + 1)
if update_all:
submissions = SprayDay.objects.all()
else:
submissions = SprayDay.objects.filter(
Q(created_on__gte=time_since) | Q(modified_on__gte=time_since)
)
sop_queryset = (
SprayDay.objects.filter(
Q(created_on__gte=time_since) | Q(modified_on__gte=time_since)
)
.filter(spray_operator__isnull=False)
.only("spray_operator")
.distinct("spray_operator")
)
for record in sop_queryset:
performance_report(
record.spray_operator,
submissions.filter(spray_operator=record.spray_operator),
)
@app.task
def sync_performance_reports():
from mspray.apps.main.utils import find_missing_performance_report_records
from mspray.apps.main.utils import performance_report
from mspray.apps.main.utils import queryset_iterator
missing_sprayformids = find_missing_performance_report_records()
queryset = SprayDay.objects.filter(
data__sprayformid__in=missing_sprayformids
).distinct("spray_operator")
for record in queryset_iterator(queryset):
performance_report(record.spray_operator)
def get_missing_ids(formid, target_class):
data_ids = fetch_form_data(formid, dataids_only=True)
if data_ids:
data_ids = set(i["_id"] for i in data_ids)
existing = set(
i
for i in target_class.objects.values_list(
"submission_id", flat=True
)
)
return data_ids - existing
return []
@app.task
def fetch_sensitization_visits():
formid = getattr(settings, "SENSITIZATION_VISIT_FORM_ID", None)
if formid:
data_ids = get_missing_ids(formid, SensitizationVisit)
for data_id in data_ids:
data = fetch_form_data(formid, dataid=data_id)
if data:
try:
create_sensitization_visit(data)
except IntegrityError:
pass
@app.task
def fetch_mobilisation():
formid = getattr(settings, "MOBILISATION_FORM_ID", None)
if formid:
data_ids = get_missing_ids(formid, Mobilisation)
for data_id in data_ids:
data = fetch_form_data(formid, dataid=data_id)
if data:
try:
create_mobilisation_visit(data)
except IntegrityError:
logger.exception("{} Record not found.".format(formid))
continue
| true | true |
f733e3db1a4d26f03d87a4c1a4615c27ffe99363 | 676 | py | Python | Home/migrations/0010_auto_20200722_1738.py | varunofficial2509/HMS-1 | d1b618575d038cd432e5b8742cdebdae82d34d30 | [
"MIT"
] | 4 | 2021-06-13T13:52:37.000Z | 2021-09-16T16:54:11.000Z | Home/migrations/0010_auto_20200722_1738.py | varunofficial2509/HMS-1 | d1b618575d038cd432e5b8742cdebdae82d34d30 | [
"MIT"
] | 5 | 2021-07-10T10:37:06.000Z | 2022-03-12T00:58:24.000Z | Home/migrations/0010_auto_20200722_1738.py | varunofficial2509/HMS-1 | d1b618575d038cd432e5b8742cdebdae82d34d30 | [
"MIT"
] | 2 | 2021-06-13T08:16:15.000Z | 2021-07-27T13:47:00.000Z | # Generated by Django 3.0.8 on 2020-07-22 12:08
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('Home', '0009_auto_20200722_1734'),
]
operations = [
migrations.AlterField(
model_name='student',
name='branch',
field=models.CharField(choices=[('CSE', 'CSE'), ('IT', 'IT'), ('EEE', 'EEE'), ('MECH', 'MECH'), ('ECE', 'ECE')], max_length=200, null=True),
),
migrations.AlterField(
model_name='student',
name='rollno',
field=models.CharField(max_length=200, primary_key=True, serialize=False),
),
]
| 28.166667 | 152 | 0.568047 |
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('Home', '0009_auto_20200722_1734'),
]
operations = [
migrations.AlterField(
model_name='student',
name='branch',
field=models.CharField(choices=[('CSE', 'CSE'), ('IT', 'IT'), ('EEE', 'EEE'), ('MECH', 'MECH'), ('ECE', 'ECE')], max_length=200, null=True),
),
migrations.AlterField(
model_name='student',
name='rollno',
field=models.CharField(max_length=200, primary_key=True, serialize=False),
),
]
| true | true |
f733e44f09e96e0d059abab74c2b3a65b40eaaad | 314 | py | Python | simple_fun_#182_happy_g.py | Kunalpod/codewars | 8dc1af2f3c70e209471045118fd88b3ea1e627e5 | [
"MIT"
] | null | null | null | simple_fun_#182_happy_g.py | Kunalpod/codewars | 8dc1af2f3c70e209471045118fd88b3ea1e627e5 | [
"MIT"
] | null | null | null | simple_fun_#182_happy_g.py | Kunalpod/codewars | 8dc1af2f3c70e209471045118fd88b3ea1e627e5 | [
"MIT"
] | null | null | null | #Kunal Gautam
#Codewars : @Kunalpod
#Problem name: Simple Fun #182: Happy "g"
#Problem level: 7 kyu
def happy_g(s):
for i in range(len(s)):
if s[i]=='g':
if i==0 and s[i+1]!='g':
return False
if s[i-1]!='g' and s[i+1]!='g':
return False
return True
| 22.428571 | 41 | 0.506369 |
s):
for i in range(len(s)):
if s[i]=='g':
if i==0 and s[i+1]!='g':
return False
if s[i-1]!='g' and s[i+1]!='g':
return False
return True
| true | true |
f733e51a8d8a7a793aceab829d65c5be4e73c9eb | 2,306 | py | Python | crossmodal/door_models/layers.py | brentyi/multimodalfilter | 210b0e241120e0fbbeaef5e478bab36ffe1e159d | [
"MIT"
] | 21 | 2020-10-25T21:31:41.000Z | 2022-03-12T17:46:20.000Z | crossmodal/door_models/layers.py | brentyi/multimodalfilter | 210b0e241120e0fbbeaef5e478bab36ffe1e159d | [
"MIT"
] | 2 | 2020-11-19T00:46:06.000Z | 2021-02-17T21:57:42.000Z | crossmodal/door_models/layers.py | brentyi/multimodalfilter | 210b0e241120e0fbbeaef5e478bab36ffe1e159d | [
"MIT"
] | 6 | 2020-11-04T22:21:23.000Z | 2021-12-13T04:46:51.000Z | import torch
import torch.nn as nn
from fannypack.nn import resblocks
state_dim = 3
control_dim = 7
obs_pos_dim = 3
obs_sensors_dim = 7
def state_layers(units: int) -> nn.Module:
"""Create a state encoder block.
Args:
units (int): # of hidden units in network layers.
Returns:
nn.Module: Encoder block.
"""
return nn.Sequential(
nn.Linear(state_dim, units),
nn.ReLU(inplace=True),
resblocks.Linear(units),
)
def control_layers(units: int) -> nn.Module:
"""Create a control command encoder block.
Args:
units (int): # of hidden units in network layers.
Returns:
nn.Module: Encoder block.
"""
return nn.Sequential(
nn.Linear(control_dim, units),
nn.ReLU(inplace=True),
resblocks.Linear(units),
)
def observation_image_layers(units: int) -> nn.Module:
"""Create an image encoder block.
Args:
units (int): # of hidden units in network layers.
Returns:
nn.Module: Encoder block.
"""
return nn.Sequential(
nn.Conv2d(in_channels=1, out_channels=32, kernel_size=5, padding=2),
nn.ReLU(inplace=True),
resblocks.Conv2d(channels=32, kernel_size=3),
nn.Conv2d(in_channels=32, out_channels=16, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.Conv2d(in_channels=16, out_channels=8, kernel_size=3, padding=1),
nn.Flatten(), # 32 * 32 * 8
nn.Linear(8 * 32 * 32, units),
nn.ReLU(inplace=True),
resblocks.Linear(units),
)
def observation_pos_layers(units: int) -> nn.Module:
"""Create an end effector position encoder block.
Args:
units (int): # of hidden units in network layers.
Returns:
nn.Module: Encoder block.
"""
return nn.Sequential(
nn.Linear(obs_pos_dim, units),
nn.ReLU(inplace=True),
resblocks.Linear(units),
)
def observation_sensors_layers(units: int) -> nn.Module:
"""Create an F/T sensor encoder block.
Args:
units (int): # of hidden units in network layers.
Returns:
nn.Module: Encoder block.
"""
return nn.Sequential(
nn.Linear(obs_sensors_dim, units),
nn.ReLU(inplace=True),
resblocks.Linear(units),
)
| 24.020833 | 77 | 0.61752 | import torch
import torch.nn as nn
from fannypack.nn import resblocks
state_dim = 3
control_dim = 7
obs_pos_dim = 3
obs_sensors_dim = 7
def state_layers(units: int) -> nn.Module:
return nn.Sequential(
nn.Linear(state_dim, units),
nn.ReLU(inplace=True),
resblocks.Linear(units),
)
def control_layers(units: int) -> nn.Module:
return nn.Sequential(
nn.Linear(control_dim, units),
nn.ReLU(inplace=True),
resblocks.Linear(units),
)
def observation_image_layers(units: int) -> nn.Module:
return nn.Sequential(
nn.Conv2d(in_channels=1, out_channels=32, kernel_size=5, padding=2),
nn.ReLU(inplace=True),
resblocks.Conv2d(channels=32, kernel_size=3),
nn.Conv2d(in_channels=32, out_channels=16, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.Conv2d(in_channels=16, out_channels=8, kernel_size=3, padding=1),
nn.Flatten(),
nn.Linear(8 * 32 * 32, units),
nn.ReLU(inplace=True),
resblocks.Linear(units),
)
def observation_pos_layers(units: int) -> nn.Module:
return nn.Sequential(
nn.Linear(obs_pos_dim, units),
nn.ReLU(inplace=True),
resblocks.Linear(units),
)
def observation_sensors_layers(units: int) -> nn.Module:
return nn.Sequential(
nn.Linear(obs_sensors_dim, units),
nn.ReLU(inplace=True),
resblocks.Linear(units),
)
| true | true |
f733e56203d598956174c7eeca876e297e5be2f6 | 13,100 | py | Python | kws_streaming/train/model_train_eval.py | ojInc/google-research | 9929c88b664800a25b8716c22068dd77d80bd5ee | [
"Apache-2.0"
] | 2 | 2020-11-09T08:04:33.000Z | 2020-11-09T08:04:57.000Z | kws_streaming/train/model_train_eval.py | ArturHD/google-research | 650580cbf928aa640bf39897c5758ddb71b68a51 | [
"Apache-2.0"
] | null | null | null | kws_streaming/train/model_train_eval.py | ArturHD/google-research | 650580cbf928aa640bf39897c5758ddb71b68a51 | [
"Apache-2.0"
] | 1 | 2020-12-14T08:24:59.000Z | 2020-12-14T08:24:59.000Z | # coding=utf-8
# Copyright 2020 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Simple speech recognition to spot a limited number of keywords.
It is based on tensorflow/examples/speech_commands
This is a self-contained example script that will train a very basic audio
recognition model in TensorFlow. It downloads the necessary training data and
runs with reasonable defaults to train within a few hours even only using a CPU.
It is intended as an introduction to using neural networks for audio
recognition, and is not a full speech recognition system. This network uses a
keyword detection style to spot discrete words from a small vocabulary,
consisting of
"yes", "no", "up", "down", "left", "right", "on", "off", "stop", and "go".
To run the training process, use:
bazel run model_train_eval.py
This will write out checkpoints to /tmp/speech_commands_train/, and will
download over 1GB of open source training data, so you'll need enough free space
and a good internet connection. The default data is a collection of thousands of
one-second .wav files, each containing one spoken word. This data set is
collected from https://aiyprojects.withgoogle.com/open_speech_recording, please
consider contributing to help improve this and other models!
As training progresses, it will print out its accuracy metrics, which should
rise above 90% by the end. Once it's complete, it will produce
Keras, SavedModel, TFLite and graphdef representations.
If you want to train on your own data, you'll need to create .wavs with your
recordings, all at a consistent length, and then arrange them into subfolders
organized by label. For example, here's a possible file structure:
data >
up >
audio_0.wav
audio_1.wav
down >
audio_2.wav
audio_3.wav
other>
audio_4.wav
audio_5.wav
You'll also need to tell the script what labels to look for, using the
`--wanted_words` argument. In this case, 'up,down' might be what you want, and
the audio in the 'other' folder would be used to train an 'unknown' category.
To pull this all together, you'd run:
bazel run tensorflow/examples/speech_commands:train --
--data_dir /data --wanted_words up,down
Above script will automatically split data into training/validation and testing.
If you prefer to split the data on your own, then you should set flag
"--split_data 0" and prepare folders with structure:
data >
training >
up >
audio_0.wav
audio_1.wav
down >
audio_2.wav
audio_3.wav
validation >
up >
audio_6.wav
audio_7.wav
down >
audio_8.wav
audio_9.wav
testing >
up >
audio_12.wav
audio_13.wav
down >
audio_14.wav
audio_15.wav
_background_noise_ >
audio_18.wav
To pull this all together, you'd run:
bazel run tensorflow/examples/speech_commands:train --
--data_dir /data --wanted_words up,down --split_data 0
"""
import json
import os
import sys
from absl import logging
import tensorflow.compat.v1 as tf
from kws_streaming.layers import modes
import kws_streaming.models.att_mh_rnn as att_mh_rnn
import kws_streaming.models.att_rnn as att_rnn
import kws_streaming.models.cnn as cnn
import kws_streaming.models.crnn as crnn
import kws_streaming.models.dnn as dnn
import kws_streaming.models.dnn_raw as dnn_raw
import kws_streaming.models.ds_cnn as ds_cnn
import kws_streaming.models.ds_tc_resnet as ds_tc_resnet
import kws_streaming.models.gru as gru
import kws_streaming.models.inception as inception
import kws_streaming.models.inception_resnet as inception_resnet
import kws_streaming.models.lstm as lstm
import kws_streaming.models.mobilenet as mobilenet
import kws_streaming.models.mobilenet_v2 as mobilenet_v2
import kws_streaming.models.svdf as svdf
import kws_streaming.models.svdf_resnet as svdf_resnet
import kws_streaming.models.tc_resnet as tc_resnet
from kws_streaming.models.utils import parse
import kws_streaming.models.xception as xception
from kws_streaming.train import base_parser
from kws_streaming.train import model_flags
from kws_streaming.train import train
import kws_streaming.train.test as test
FLAGS = None
def main(_):
# Update flags
flags = model_flags.update_flags(FLAGS)
if flags.train:
# Create model folders where logs and model will be stored
os.makedirs(flags.train_dir)
os.mkdir(flags.summaries_dir)
# Model training
train.train(flags)
else:
if not os.path.isdir(flags.train_dir):
raise ValueError('model is not trained set "--train 1" and retrain it')
# write all flags settings into json
with open(os.path.join(flags.train_dir, 'flags.json'), 'wt') as f:
json.dump(flags.__dict__, f)
# convert to SavedModel
test.convert_model_saved(flags, 'non_stream',
modes.Modes.NON_STREAM_INFERENCE)
try:
test.convert_model_saved(flags, 'stream_state_internal',
modes.Modes.STREAM_INTERNAL_STATE_INFERENCE)
except (ValueError, IndexError) as e:
logging.info('FAILED to run TF streaming: %s', e)
logging.info('run TF non streaming model accuracy evaluation')
# with TF
folder_name = 'tf'
test.tf_non_stream_model_accuracy(flags, folder_name)
# with TF.
# We can apply non stream model on stream data, by running inference
# every 200ms (for example), so that total latency will be similar with
# streaming model which is executed every 20ms.
# To measure the impact of sampling on model accuracy,
# we introduce time_shift_ms during accuracy evaluation.
# Convert milliseconds to samples:
time_shift_samples = int(
(flags.time_shift_ms * flags.sample_rate) / model_flags.MS_PER_SECOND)
test.tf_non_stream_model_accuracy(
flags,
folder_name,
time_shift_samples,
accuracy_name='tf_non_stream_model_sampling_stream_accuracy.txt')
name2opt = {
'': None,
'quantize_opt_for_size_': [tf.lite.Optimize.OPTIMIZE_FOR_SIZE],
}
for opt_name, optimizations in name2opt.items():
if (opt_name and flags.feature_type == 'mfcc_tf' and
flags.preprocess == 'raw'):
logging.info('feature type mfcc_tf needs quantization aware training '
'for quantization - it is not implemented')
continue
folder_name = opt_name + 'tflite_non_stream'
file_name = 'non_stream.tflite'
mode = modes.Modes.NON_STREAM_INFERENCE
test.convert_model_tflite(flags, folder_name, mode, file_name,
optimizations=optimizations)
test.tflite_non_stream_model_accuracy(flags, folder_name, file_name)
# these models are using bi-rnn, so they are non streamable by default
# also models using striding or pooling are not supported for streaming now
non_streamable_models = {'att_mh_rnn', 'att_rnn', 'tc_resnet'}
model_is_streamable = True
if flags.model_name in non_streamable_models:
model_is_streamable = False
# below models can use striding in time dimension,
# but this is currently unsupported
elif flags.model_name == 'cnn':
for strides in parse(flags.cnn_strides):
if strides[0] > 1:
model_is_streamable = False
break
elif flags.model_name == 'ds_cnn':
if parse(flags.cnn1_strides)[0] > 1:
model_is_streamable = False
for strides in parse(flags.dw2_strides):
if strides[0] > 1:
model_is_streamable = False
break
# if model can be streamed, then run conversion/evaluation in streaming mode
if model_is_streamable:
# ---------------- TF streaming model accuracy evaluation ----------------
# Streaming model with external state evaluation using TF with state reset
if not opt_name:
logging.info('run TF evalution only without optimization/quantization')
try:
folder_name = 'tf'
test.tf_stream_state_external_model_accuracy(
flags,
folder_name,
accuracy_name='stream_state_external_model_accuracy_sub_set_reset1.txt',
reset_state=True) # with state reset between test sequences
# Streaming (with external state) evaluation using TF no state reset
test.tf_stream_state_external_model_accuracy(
flags,
folder_name,
accuracy_name='stream_state_external_model_accuracy_sub_set_reset0.txt',
reset_state=False) # without state reset
# Streaming (with internal state) evaluation using TF no state reset
test.tf_stream_state_internal_model_accuracy(flags, folder_name)
except (ValueError, IndexError) as e:
logging.info('FAILED to run TF streaming: %s', e)
logging.info('run TFlite streaming model accuracy evaluation')
try:
# convert model to TFlite
folder_name = opt_name + 'tflite_stream_state_external'
file_name = 'stream_state_external.tflite'
mode = modes.Modes.STREAM_EXTERNAL_STATE_INFERENCE
test.convert_model_tflite(flags, folder_name, mode, file_name,
optimizations=optimizations)
# Streaming model accuracy evaluation with TFLite with state reset
test.tflite_stream_state_external_model_accuracy(
flags,
folder_name,
file_name,
accuracy_name='tflite_stream_state_external_model_accuracy_reset1.txt',
reset_state=True)
# Streaming model accuracy evaluation with TFLite without state reset
test.tflite_stream_state_external_model_accuracy(
flags,
folder_name,
file_name,
accuracy_name='tflite_stream_state_external_model_accuracy_reset0.txt',
reset_state=False)
except (ValueError, IndexError) as e:
logging.info('FAILED to run TFLite streaming: %s', e)
if __name__ == '__main__':
# parser for training/testing data and speach feature flags
parser = base_parser.base_parser()
# sub parser for model settings
subparsers = parser.add_subparsers(dest='model_name', help='NN model name')
# DNN model settings
parser_dnn = subparsers.add_parser('dnn')
dnn.model_parameters(parser_dnn)
# DNN raw model settings
parser_dnn_raw = subparsers.add_parser('dnn_raw')
dnn_raw.model_parameters(parser_dnn_raw)
# LSTM model settings
parser_lstm = subparsers.add_parser('lstm')
lstm.model_parameters(parser_lstm)
# GRU model settings
parser_gru = subparsers.add_parser('gru')
gru.model_parameters(parser_gru)
# SVDF model settings
parser_svdf = subparsers.add_parser('svdf')
svdf.model_parameters(parser_svdf)
# CNN model settings
parser_cnn = subparsers.add_parser('cnn')
cnn.model_parameters(parser_cnn)
# CRNN model settings
parser_crnn = subparsers.add_parser('crnn')
crnn.model_parameters(parser_crnn)
# ATT MH RNN model settings
parser_att_mh_rnn = subparsers.add_parser('att_mh_rnn')
att_mh_rnn.model_parameters(parser_att_mh_rnn)
# ATT RNN model settings
parser_att_rnn = subparsers.add_parser('att_rnn')
att_rnn.model_parameters(parser_att_rnn)
# DS_CNN model settings
parser_ds_cnn = subparsers.add_parser('ds_cnn')
ds_cnn.model_parameters(parser_ds_cnn)
# TC Resnet model settings
parser_tc_resnet = subparsers.add_parser('tc_resnet')
tc_resnet.model_parameters(parser_tc_resnet)
# Mobilenet model settings
parser_mobilenet = subparsers.add_parser('mobilenet')
mobilenet.model_parameters(parser_mobilenet)
# Mobilenet V2 model settings
parser_mobilenet_v2 = subparsers.add_parser('mobilenet_v2')
mobilenet_v2.model_parameters(parser_mobilenet_v2)
# xception model settings
parser_xception = subparsers.add_parser('xception')
xception.model_parameters(parser_xception)
# inception model settings
parser_inception = subparsers.add_parser('inception')
inception.model_parameters(parser_inception)
# inception resnet model settings
parser_inception_resnet = subparsers.add_parser('inception_resnet')
inception_resnet.model_parameters(parser_inception_resnet)
# svdf resnet model settings
parser_svdf_resnet = subparsers.add_parser('svdf_resnet')
svdf_resnet.model_parameters(parser_svdf_resnet)
# ds_tc_resnet model settings
parser_ds_tc_resnet = subparsers.add_parser('ds_tc_resnet')
ds_tc_resnet.model_parameters(parser_ds_tc_resnet)
FLAGS, unparsed = parser.parse_known_args()
if unparsed and tuple(unparsed) != ('--alsologtostderr',):
raise ValueError('Unknown argument: {}'.format(unparsed))
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
| 36.088154 | 86 | 0.736336 |
import json
import os
import sys
from absl import logging
import tensorflow.compat.v1 as tf
from kws_streaming.layers import modes
import kws_streaming.models.att_mh_rnn as att_mh_rnn
import kws_streaming.models.att_rnn as att_rnn
import kws_streaming.models.cnn as cnn
import kws_streaming.models.crnn as crnn
import kws_streaming.models.dnn as dnn
import kws_streaming.models.dnn_raw as dnn_raw
import kws_streaming.models.ds_cnn as ds_cnn
import kws_streaming.models.ds_tc_resnet as ds_tc_resnet
import kws_streaming.models.gru as gru
import kws_streaming.models.inception as inception
import kws_streaming.models.inception_resnet as inception_resnet
import kws_streaming.models.lstm as lstm
import kws_streaming.models.mobilenet as mobilenet
import kws_streaming.models.mobilenet_v2 as mobilenet_v2
import kws_streaming.models.svdf as svdf
import kws_streaming.models.svdf_resnet as svdf_resnet
import kws_streaming.models.tc_resnet as tc_resnet
from kws_streaming.models.utils import parse
import kws_streaming.models.xception as xception
from kws_streaming.train import base_parser
from kws_streaming.train import model_flags
from kws_streaming.train import train
import kws_streaming.train.test as test
FLAGS = None
def main(_):
flags = model_flags.update_flags(FLAGS)
if flags.train:
os.makedirs(flags.train_dir)
os.mkdir(flags.summaries_dir)
train.train(flags)
else:
if not os.path.isdir(flags.train_dir):
raise ValueError('model is not trained set "--train 1" and retrain it')
with open(os.path.join(flags.train_dir, 'flags.json'), 'wt') as f:
json.dump(flags.__dict__, f)
test.convert_model_saved(flags, 'non_stream',
modes.Modes.NON_STREAM_INFERENCE)
try:
test.convert_model_saved(flags, 'stream_state_internal',
modes.Modes.STREAM_INTERNAL_STATE_INFERENCE)
except (ValueError, IndexError) as e:
logging.info('FAILED to run TF streaming: %s', e)
logging.info('run TF non streaming model accuracy evaluation')
folder_name = 'tf'
test.tf_non_stream_model_accuracy(flags, folder_name)
time_shift_samples = int(
(flags.time_shift_ms * flags.sample_rate) / model_flags.MS_PER_SECOND)
test.tf_non_stream_model_accuracy(
flags,
folder_name,
time_shift_samples,
accuracy_name='tf_non_stream_model_sampling_stream_accuracy.txt')
name2opt = {
'': None,
'quantize_opt_for_size_': [tf.lite.Optimize.OPTIMIZE_FOR_SIZE],
}
for opt_name, optimizations in name2opt.items():
if (opt_name and flags.feature_type == 'mfcc_tf' and
flags.preprocess == 'raw'):
logging.info('feature type mfcc_tf needs quantization aware training '
'for quantization - it is not implemented')
continue
folder_name = opt_name + 'tflite_non_stream'
file_name = 'non_stream.tflite'
mode = modes.Modes.NON_STREAM_INFERENCE
test.convert_model_tflite(flags, folder_name, mode, file_name,
optimizations=optimizations)
test.tflite_non_stream_model_accuracy(flags, folder_name, file_name)
non_streamable_models = {'att_mh_rnn', 'att_rnn', 'tc_resnet'}
model_is_streamable = True
if flags.model_name in non_streamable_models:
model_is_streamable = False
elif flags.model_name == 'cnn':
for strides in parse(flags.cnn_strides):
if strides[0] > 1:
model_is_streamable = False
break
elif flags.model_name == 'ds_cnn':
if parse(flags.cnn1_strides)[0] > 1:
model_is_streamable = False
for strides in parse(flags.dw2_strides):
if strides[0] > 1:
model_is_streamable = False
break
if model_is_streamable:
if not opt_name:
logging.info('run TF evalution only without optimization/quantization')
try:
folder_name = 'tf'
test.tf_stream_state_external_model_accuracy(
flags,
folder_name,
accuracy_name='stream_state_external_model_accuracy_sub_set_reset1.txt',
reset_state=True)
test.tf_stream_state_external_model_accuracy(
flags,
folder_name,
accuracy_name='stream_state_external_model_accuracy_sub_set_reset0.txt',
reset_state=False)
test.tf_stream_state_internal_model_accuracy(flags, folder_name)
except (ValueError, IndexError) as e:
logging.info('FAILED to run TF streaming: %s', e)
logging.info('run TFlite streaming model accuracy evaluation')
try:
folder_name = opt_name + 'tflite_stream_state_external'
file_name = 'stream_state_external.tflite'
mode = modes.Modes.STREAM_EXTERNAL_STATE_INFERENCE
test.convert_model_tflite(flags, folder_name, mode, file_name,
optimizations=optimizations)
test.tflite_stream_state_external_model_accuracy(
flags,
folder_name,
file_name,
accuracy_name='tflite_stream_state_external_model_accuracy_reset1.txt',
reset_state=True)
test.tflite_stream_state_external_model_accuracy(
flags,
folder_name,
file_name,
accuracy_name='tflite_stream_state_external_model_accuracy_reset0.txt',
reset_state=False)
except (ValueError, IndexError) as e:
logging.info('FAILED to run TFLite streaming: %s', e)
if __name__ == '__main__':
parser = base_parser.base_parser()
subparsers = parser.add_subparsers(dest='model_name', help='NN model name')
parser_dnn = subparsers.add_parser('dnn')
dnn.model_parameters(parser_dnn)
parser_dnn_raw = subparsers.add_parser('dnn_raw')
dnn_raw.model_parameters(parser_dnn_raw)
parser_lstm = subparsers.add_parser('lstm')
lstm.model_parameters(parser_lstm)
parser_gru = subparsers.add_parser('gru')
gru.model_parameters(parser_gru)
parser_svdf = subparsers.add_parser('svdf')
svdf.model_parameters(parser_svdf)
parser_cnn = subparsers.add_parser('cnn')
cnn.model_parameters(parser_cnn)
parser_crnn = subparsers.add_parser('crnn')
crnn.model_parameters(parser_crnn)
parser_att_mh_rnn = subparsers.add_parser('att_mh_rnn')
att_mh_rnn.model_parameters(parser_att_mh_rnn)
parser_att_rnn = subparsers.add_parser('att_rnn')
att_rnn.model_parameters(parser_att_rnn)
parser_ds_cnn = subparsers.add_parser('ds_cnn')
ds_cnn.model_parameters(parser_ds_cnn)
parser_tc_resnet = subparsers.add_parser('tc_resnet')
tc_resnet.model_parameters(parser_tc_resnet)
parser_mobilenet = subparsers.add_parser('mobilenet')
mobilenet.model_parameters(parser_mobilenet)
parser_mobilenet_v2 = subparsers.add_parser('mobilenet_v2')
mobilenet_v2.model_parameters(parser_mobilenet_v2)
parser_xception = subparsers.add_parser('xception')
xception.model_parameters(parser_xception)
parser_inception = subparsers.add_parser('inception')
inception.model_parameters(parser_inception)
parser_inception_resnet = subparsers.add_parser('inception_resnet')
inception_resnet.model_parameters(parser_inception_resnet)
parser_svdf_resnet = subparsers.add_parser('svdf_resnet')
svdf_resnet.model_parameters(parser_svdf_resnet)
parser_ds_tc_resnet = subparsers.add_parser('ds_tc_resnet')
ds_tc_resnet.model_parameters(parser_ds_tc_resnet)
FLAGS, unparsed = parser.parse_known_args()
if unparsed and tuple(unparsed) != ('--alsologtostderr',):
raise ValueError('Unknown argument: {}'.format(unparsed))
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
| true | true |
f733e70177c69899d221879f527ec02a98e6dc62 | 864 | py | Python | api/accounts/admin.py | DamianKocjan/YoutubeClone | f3e27378f0ab9ad015590e5c4358696843bf332f | [
"MIT"
] | 2 | 2021-05-19T14:29:58.000Z | 2021-05-19T14:30:05.000Z | api/accounts/admin.py | DamianKocjan/YoutubeClone | f3e27378f0ab9ad015590e5c4358696843bf332f | [
"MIT"
] | 10 | 2021-05-04T18:00:30.000Z | 2022-03-12T00:57:37.000Z | api/accounts/admin.py | DamianKocjan/YoutubeClone | f3e27378f0ab9ad015590e5c4358696843bf332f | [
"MIT"
] | null | null | null | from django.contrib import admin
from django.contrib.auth.admin import UserAdmin
from django.utils.translation import gettext_lazy as _
from .models import User, Subscription
@admin.register(User)
class UserAdmin(UserAdmin):
fieldsets = (
(None, {'fields': ('username', 'avatar',
'background', 'password')}),
(_('Personal info'), {'fields': ('first_name',
'last_name', 'email', 'description', 'location')}),
(_('Permissions'), {
'fields': ('is_active', 'is_staff', 'is_superuser', 'groups', 'user_permissions'),
}),
(_('Important dates'), {'fields': ('last_login', 'date_joined')}),
)
add_fieldsets = (
(None, {
'classes': ('wide',),
'fields': ('username', 'email', 'password1', 'password2'),
}),
)
admin.site.register(Subscription)
| 29.793103 | 94 | 0.585648 | from django.contrib import admin
from django.contrib.auth.admin import UserAdmin
from django.utils.translation import gettext_lazy as _
from .models import User, Subscription
@admin.register(User)
class UserAdmin(UserAdmin):
fieldsets = (
(None, {'fields': ('username', 'avatar',
'background', 'password')}),
(_('Personal info'), {'fields': ('first_name',
'last_name', 'email', 'description', 'location')}),
(_('Permissions'), {
'fields': ('is_active', 'is_staff', 'is_superuser', 'groups', 'user_permissions'),
}),
(_('Important dates'), {'fields': ('last_login', 'date_joined')}),
)
add_fieldsets = (
(None, {
'classes': ('wide',),
'fields': ('username', 'email', 'password1', 'password2'),
}),
)
admin.site.register(Subscription)
| true | true |
f733e7a2ce115df321a102cf8ddb156c6a27bf83 | 1,606 | py | Python | src/server/traffic-forwarder.py | hxhwing/Nitro-Enclave-Demo | f0683fcf843b6728fee221f31e6600a124ab793b | [
"Apache-2.0"
] | 4 | 2021-07-12T08:24:03.000Z | 2022-03-28T16:02:17.000Z | src/server/traffic-forwarder.py | hxhwing/Nitro-Enclave-Demo | f0683fcf843b6728fee221f31e6600a124ab793b | [
"Apache-2.0"
] | null | null | null | src/server/traffic-forwarder.py | hxhwing/Nitro-Enclave-Demo | f0683fcf843b6728fee221f31e6600a124ab793b | [
"Apache-2.0"
] | 1 | 2021-07-12T09:18:24.000Z | 2021-07-12T09:18:24.000Z | ### Full credit for this file goes to Richard Fan @ https://github.com/richardfan1126/nitro-enclave-python-demo
import socket
import sys
import threading
import time
def server(local_port, remote_cid, remote_port):
try:
dock_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
dock_socket.bind(('', local_port))
dock_socket.listen(5)
while True:
client_socket = dock_socket.accept()[0]
server_socket = socket.socket(socket.AF_VSOCK, socket.SOCK_STREAM)
server_socket.connect((remote_cid, remote_port))
outgoing_thread = threading.Thread(target = forward, args = (client_socket, server_socket))
incoming_thread = threading.Thread(target = forward, args = (server_socket, client_socket))
outgoing_thread.start()
incoming_thread.start()
finally:
new_thread = threading.Thread(target = server, args = (local_port, remote_cid, remote_port))
new_thread.start()
return
def forward(source, destination):
string = ' '
while string:
string = source.recv(1024)
if string:
destination.sendall(string)
else:
source.shutdown(socket.SHUT_RD)
destination.shutdown(socket.SHUT_WR)
def main(args):
local_port = int(args[0])
remote_cid = int(args[1])
remote_port = int(args[2])
thread = threading.Thread(target = server, args = (local_port, remote_cid, remote_port))
thread.start()
while True:
time.sleep(60)
if __name__ == '__main__':
main(sys.argv[1:])
| 29.740741 | 111 | 0.653176 | cal_port))
dock_socket.listen(5)
while True:
client_socket = dock_socket.accept()[0]
server_socket = socket.socket(socket.AF_VSOCK, socket.SOCK_STREAM)
server_socket.connect((remote_cid, remote_port))
outgoing_thread = threading.Thread(target = forward, args = (client_socket, server_socket))
incoming_thread = threading.Thread(target = forward, args = (server_socket, client_socket))
outgoing_thread.start()
incoming_thread.start()
finally:
new_thread = threading.Thread(target = server, args = (local_port, remote_cid, remote_port))
new_thread.start()
return
def forward(source, destination):
string = ' '
while string:
string = source.recv(1024)
if string:
destination.sendall(string)
else:
source.shutdown(socket.SHUT_RD)
destination.shutdown(socket.SHUT_WR)
def main(args):
local_port = int(args[0])
remote_cid = int(args[1])
remote_port = int(args[2])
thread = threading.Thread(target = server, args = (local_port, remote_cid, remote_port))
thread.start()
while True:
time.sleep(60)
if __name__ == '__main__':
main(sys.argv[1:])
| true | true |
f733e8389cf5d5002c75aba6aa101b9e2cd40e86 | 121 | py | Python | src/ukbsearch/conf.py | danielmsk/ukbsearch | 23748d4af7dac7cfc418a6f779ac4e1c90e2523c | [
"MIT"
] | null | null | null | src/ukbsearch/conf.py | danielmsk/ukbsearch | 23748d4af7dac7cfc418a6f779ac4e1c90e2523c | [
"MIT"
] | null | null | null | src/ukbsearch/conf.py | danielmsk/ukbsearch | 23748d4af7dac7cfc418a6f779ac4e1c90e2523c | [
"MIT"
] | null | null | null |
COLNAMES = ["Column", "UDI", "Count", "Type", "Description"]
COL_JUSTIFY = ["right", "left", "right", "center", "left"] | 30.25 | 60 | 0.595041 |
COLNAMES = ["Column", "UDI", "Count", "Type", "Description"]
COL_JUSTIFY = ["right", "left", "right", "center", "left"] | true | true |
f733e877803c1cc436dbe25bda10070ff0ff2fc3 | 18,217 | py | Python | src/cart/test/util/cart_logparse.py | vatelzh/daos | 3aca9ae033946ca24179ba0a180c0b8422cd2738 | [
"Apache-2.0"
] | 1 | 2019-11-28T07:26:38.000Z | 2019-11-28T07:26:38.000Z | src/cart/test/util/cart_logparse.py | vatelzh/daos | 3aca9ae033946ca24179ba0a180c0b8422cd2738 | [
"Apache-2.0"
] | 52 | 2019-12-04T05:47:10.000Z | 2020-06-09T03:26:12.000Z | src/cart/test/util/cart_logparse.py | vatelzh/daos | 3aca9ae033946ca24179ba0a180c0b8422cd2738 | [
"Apache-2.0"
] | 8 | 2019-12-04T08:26:00.000Z | 2020-06-09T07:40:11.000Z | #!/usr/bin/env python3
# Copyright (C) 2018-2019 Intel Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted for any purpose (including commercial purposes)
# provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions, and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions, and the following disclaimer in the
# documentation and/or materials provided with the distribution.
#
# 3. In addition, redistributions of modified forms of the source or binary
# code must carry prominent notices stating that the original code was
# changed and the date of the change.
#
# 4. All publications or advertising materials mentioning features or use of
# this software are asked, but not required, to acknowledge that it was
# developed by Intel Corporation and credit the contributors.
#
# 5. Neither the name of Intel Corporation, nor the name of any Contributor
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
LogIter class definition.
LogLine class definition.
This provides a way of querying CaRT logfiles for processing.
"""
import os
import re
class InvalidPid(Exception):
"""Exception to be raised when invalid pid is requested"""
pass
class InvalidLogFile(Exception):
"""Exception to be raised when log file cannot be parsed"""
pass
LOG_LEVELS = {'FATAL' :1,
'EMRG' :2,
'CRIT' :3,
'ERR' :4,
'WARN' :5,
'NOTE' :6,
'INFO' :7,
'DBUG' :8}
# pylint: disable=too-few-public-methods
class LogRaw():
"""Class for raw (non cart log lines) in cart log files
This is used for lines that cannot be identified as cart log lines,
for example mercury logs being sent to the same file.
"""
def __init__(self, line):
self.line = line.rstrip('\n')
self.trace = False
def to_str(self):
"""Convert the object to a string, in a way that is compatible with
LogLine
"""
return self.line
# pylint: disable=too-many-instance-attributes
class LogLine():
"""Class for parsing CaRT log lines
This class implements a way of inspecting individual lines of a log
file.
It allows for queries such as 'string in line' which will match against
the message only, and != which will match the entire line.
index is the line in the file, starting at 1.
"""
def __init__(self, line, index):
fields = line.split()
# Work out the end of the fixed-width portion, and the beginning of the
# message. The hostname and pid fields are both variable width
idx = 29 + len(fields[1]) + len(fields[2])
pidtid = fields[2][5:-1]
pid = pidtid.split("/")
self.pid = int(pid[0])
self._preamble = line[:idx]
self.index = index
self.mask = fields[3]
try:
self.level = LOG_LEVELS[fields[4]]
except KeyError:
raise InvalidLogFile(fields[4])
self._fields = fields[5:]
if self._fields[1][-2:] == '()':
self.trace = False
self.function = self._fields[1][:-2]
elif self._fields[1][-1:] == ')':
self.trace = True
else:
self.trace = False
if self.trace:
if self.level == 7 or self.level == 3:
if self.mask == 'rpc' or self.mask == 'hg':
del self._fields[2:5]
if self.trace:
fn_str = self._fields[1]
start_idx = fn_str.find('(')
self.function = fn_str[:start_idx]
desc = fn_str[start_idx+1:-1]
if desc == '(nil)':
self.descriptor = ''
else:
self.descriptor = desc
self._msg = ' '.join(self._fields)
def to_str(self, mark=False):
"""Convert the object to a string"""
# pre = self._preamble.split(' ', maxsplit=3)
pre = self._preamble.split(' ', 3)
preamble = ' '.join([pre[0], pre[3]])
if mark:
return '{} ** {}'.format(preamble, self._msg)
return '{} {}'.format(preamble, self._msg)
def __getattr__(self, attr):
if attr == 'parent':
if self._fields[2] == 'Registered':
# This is a bit of a hack but handle the case where descriptor
# names contain spaces.
if self._fields[6] == 'from':
return self._fields[7]
return self._fields[6]
if self._fields[2] == 'Link':
return self._fields[5]
if attr == 'filename':
try:
(filename, _) = self._fields[0].split(':')
return filename
except ValueError:
pass
elif attr == 'lineno':
try:
(_, lineno) = self._fields[0].split(':')
return int(lineno)
except ValueError:
pass
raise AttributeError
def get_msg(self):
"""Return the message part of a line, stripping up to and
including the filename"""
return ' '.join(self._fields[1:])
def get_anon_msg(self):
"""Return the message part of a line, stripping up to and
including the filename but removing pointers"""
# As get_msg, but try and remove specific information from the message,
# This is so that large volumes of logs can be amalgamated and reduced
# a common set for easier reporting. Specifically the trace pointer,
# fid/revision of GAH values and other pointers are removed.
#
# These can then be fed back as source-level comments to the source-code
# without creating too much output.
fields = []
for entry in self._fields[2:]:
field = None
if entry.startswith('0x') and len(entry) > 5:
if entry.endswith(')'):
field = '0x...)'
else:
field = '0x...'
if not field:
r = re.search("^[0-9,a-f]{8}$", entry)
if r:
field = 'uuid'
if not field:
r = re.search("^[0-9,a-f]{8}\[\d+\]\:$", entry)
if r:
field = 'uuid/rank'
if not field:
r = re.search("^\d+.\d+.\d+\:*$", entry)
if r:
field = 'low/high/shard'
if field:
fields.append(field)
else:
fields.append(entry)
return '{}() {}'.format(self.function, ' '.join(fields))
def endswith(self, item):
"""Mimic the str.endswith() function
This only matches on the actual string part of the message, not the
timestamp/pid/faculty parts.
"""
return self._msg.endswith(item)
def get_field(self, idx):
"""Return a specific field from the line"""
return self._fields[idx]
def _is_type(self, text, trace=True):
"""Checks for text in a log message
Retuns True if the line starts with the text provided
"""
if trace and not self.trace:
return False
# Check that the contents of two arrays are equal, using text as is and
# selecting only the correct entries of the fields array.
return text == self._fields[2:2+len(text)]
def is_new(self):
"""Returns True if line is new descriptor"""
return self._is_type(['Registered', 'new'])
def is_dereg(self):
"""Returns true if line is descriptor deregister"""
return self._is_type(['Deregistered'])
def is_new_rpc(self):
"""Returns True if line is new rpc"""
if not self.trace:
return False
if self._fields[-1] == 'allocated.':
return True
if self._fields[-1] == 'received.' and self._fields[-5] == 'allocated':
return True
return False
def is_dereg_rpc(self):
"""Returns true if line is a rpc deregister"""
if not self.trace:
return False
if self.function != 'crt_hg_req_destroy':
return False
return self._fields[-1] == 'destroying'
def is_callback(self):
"""Returns true if line is RPC callback"""
# TODO: This is broken for now but the RPCtrace has not been ported yet
# so there are no current users of it.
return self._is_type(['Invoking', 'RPC', 'callback'])
def is_link(self):
"""Returns True if line is Link descriptor"""
return self._is_type(['Link'])
def is_fi_site(self):
return self._is_type(['fault_id'], trace=False)
def is_fi_alloc_fail(self):
return self._is_type(['out', 'of', 'memory'], trace=False)
def is_calloc(self):
"""Returns True if line is a allocation point"""
return self.get_field(2).startswith('alloc(')
def is_realloc(self):
"""Returns True if line is a call to"""
return self.get_field(2) == 'realloc'
def calloc_size(self):
"""Returns the size of the allocation"""
if self.get_field(5) == '*':
if self.is_realloc():
field = -5
else:
field = -3
count = int(self.get_field(field).split(':')[-1])
return count * int(self.get_field(4))
return int(self.get_field(4))
def is_free(self):
"""Returns True if line is a call to free"""
return self.get_field(2) == 'free'
# pylint: disable=too-many-branches
class StateIter():
"""Helper class for LogIter to add a statefull iterator.
Implement a new iterator() for LogIter() that tracks descriptors
and adds two new attributes, pdesc and pparent which are the local
descriptor with the reuse-count appended.
"""
def __init__(self, li):
self.reuse_table = {}
self.active_desc = {}
self.li = li
self._l = None
def __iter__(self):
# Dict, indexed by pointer, containing re-use index for that pointer.
self.reuse_table = {}
# Conversion from active pointer to line where it was created.
self.active_desc = {}
self._l = iter(self.li)
return self
def __next__(self):
line = next(self._l)
if not line.trace:
line.rpc = False
return line
if line.is_new() or line.is_new_rpc():
if line.descriptor in self.reuse_table:
self.reuse_table[line.descriptor] += 1
line.pdesc = '{}_{}'.format(line.descriptor,
self.reuse_table[line.descriptor])
else:
self.reuse_table[line.descriptor] = 0
line.pdesc = line.descriptor
self.active_desc[line.descriptor] = line
if line.is_new():
if line.parent in self.active_desc:
line.pparent = self.active_desc[line.parent].pdesc
else:
line.pparent = line.parent
line.rpc = False
else:
line.rpc = True
elif line.is_link():
if line.parent in self.active_desc:
line.pparent = self.active_desc[line.parent].pdesc
else:
line.pparent = line.parent
line.pdesc = line.descriptor
line.rpc = False
else:
if line.descriptor in self.active_desc:
line.rpc = self.active_desc[line.descriptor].rpc
if not line.rpc:
line.pparent = self.active_desc[line.descriptor].pparent
line.pdesc = self.active_desc[line.descriptor].pdesc
line.rpc_opcode = self.active_desc[line.descriptor].get_field(3)
else:
line.pdesc = line.descriptor
line.rpc = False
if (line.is_dereg() or line.is_dereg_rpc()) and \
line.descriptor in self.active_desc:
del self.active_desc[line.descriptor]
return line
def next(self):
"""Python2/3 compat function"""
return self.__next__()
# pylint: disable=too-many-branches
# pylint: disable=too-few-public-methods
class LogIter():
"""Class for parsing CaRT log files
This class implements a iterator for lines in a cart log file. The iterator
is rewindable, and there are options for automatically skipping lines.
"""
def __init__(self, fname):
"""Load a file, and check how many processes have written to it"""
# Depending on file size either pre-read entire file into memory,
# or do a first pass checking the pid list. This allows the same
# iterator to work fast if the file can be kept in memory, or the
# same, bug slower if it needs to be re-read each time.
#
# Try and open the file as utf-8, but if that doesn't work then
# find and report the error, then continue with the file open as
# latin-1
self._fd = None
try:
self._fd = open(fname, 'r', encoding='utf-8')
self._fd.read()
except UnicodeDecodeError as err:
print('ERROR: Invalid data in server.log on following line')
self._fd = open(fname, 'r', encoding='latin-1')
self._fd.read(err.start - 200)
data = self._fd.read(199)
lines = data.splitlines()
print(lines[-1])
self._fd.seek(0)
self.fname = fname
self._data = []
index = 0
pids = set()
i = os.fstat(self._fd.fileno())
self.__from_file = bool(i.st_size > (1024*1024*20))
self.__index = 0
for line in self._fd:
# fields = line.split(maxsplit=8)
fields = line.split(' ', 8)
index += 1
if self.__from_file:
if len(fields) < 6 or len(fields[0]) != 17:
continue
l_obj = LogLine(line, index)
pids.add(l_obj.pid)
else:
if len(fields) < 6 or len(fields[0]) != 17:
self._data.append(LogRaw(line))
else:
l_obj = LogLine(line, index)
pids.add(l_obj.pid)
self._data.append(l_obj)
# Offset into the file when iterating. This is an array index, and is
# based from zero, as opposed to line index which is based from 1.
self._offset = 0
self._pid = None
self._trace_only = False
self._raw = False
self._pids = sorted(pids)
def __del__(self):
if self._fd:
self._fd.close()
def new_iter(self,
pid=None,
stateful=False,
trace_only=False,
raw=False):
"""Rewind file iterator, and set options
If pid is set the the iterator will only return lines matching the pid
If trace_only is True then the iterator will only return trace lines.
if raw is set then all lines in the file are returned, even non-log
lines.
"""
if pid is not None:
if pid not in self._pids:
raise InvalidPid
self._pid = pid
else:
self._pid = None
self._trace_only = trace_only
self._raw = raw
if stateful:
if not pid:
raise InvalidPid
return StateIter(self)
return self
def __iter__(self, pid=None):
if self.__from_file:
self._fd.seek(0)
self.__index = 0
else:
self._offset = 0
return self
def __lnext(self):
"""Helper function for __next__"""
if self.__from_file:
line = self._fd.readline()
if not line:
raise StopIteration
self.__index += 1
# fields = line.split(maxsplit=8)
fields = line.split(' ', 8)
if len(fields) < 6 or len(fields[0]) != 17:
return LogRaw(line)
return LogLine(line, self.__index)
try:
line = self._data[self._offset]
except IndexError:
raise StopIteration
self._offset += 1
return line
def __next__(self):
while True:
line = self.__lnext()
if not self._raw and isinstance(line, LogRaw):
continue
if self._trace_only and not line.trace:
continue
if isinstance(line, LogRaw) and self._pid:
continue
if self._pid and line.pid != self._pid:
continue
return line
def next(self):
"""Python2/3 compat function"""
return self.__next__()
def get_pids(self):
"""Return an array of pids appearing in the file"""
return self._pids
# pylint: enable=too-many-instance-attributes
| 33.182149 | 80 | 0.567108 |
import os
import re
class InvalidPid(Exception):
pass
class InvalidLogFile(Exception):
pass
LOG_LEVELS = {'FATAL' :1,
'EMRG' :2,
'CRIT' :3,
'ERR' :4,
'WARN' :5,
'NOTE' :6,
'INFO' :7,
'DBUG' :8}
class LogRaw():
def __init__(self, line):
self.line = line.rstrip('\n')
self.trace = False
def to_str(self):
return self.line
class LogLine():
def __init__(self, line, index):
fields = line.split()
idx = 29 + len(fields[1]) + len(fields[2])
pidtid = fields[2][5:-1]
pid = pidtid.split("/")
self.pid = int(pid[0])
self._preamble = line[:idx]
self.index = index
self.mask = fields[3]
try:
self.level = LOG_LEVELS[fields[4]]
except KeyError:
raise InvalidLogFile(fields[4])
self._fields = fields[5:]
if self._fields[1][-2:] == '()':
self.trace = False
self.function = self._fields[1][:-2]
elif self._fields[1][-1:] == ')':
self.trace = True
else:
self.trace = False
if self.trace:
if self.level == 7 or self.level == 3:
if self.mask == 'rpc' or self.mask == 'hg':
del self._fields[2:5]
if self.trace:
fn_str = self._fields[1]
start_idx = fn_str.find('(')
self.function = fn_str[:start_idx]
desc = fn_str[start_idx+1:-1]
if desc == '(nil)':
self.descriptor = ''
else:
self.descriptor = desc
self._msg = ' '.join(self._fields)
def to_str(self, mark=False):
pre = self._preamble.split(' ', 3)
preamble = ' '.join([pre[0], pre[3]])
if mark:
return '{} ** {}'.format(preamble, self._msg)
return '{} {}'.format(preamble, self._msg)
def __getattr__(self, attr):
if attr == 'parent':
if self._fields[2] == 'Registered':
if self._fields[6] == 'from':
return self._fields[7]
return self._fields[6]
if self._fields[2] == 'Link':
return self._fields[5]
if attr == 'filename':
try:
(filename, _) = self._fields[0].split(':')
return filename
except ValueError:
pass
elif attr == 'lineno':
try:
(_, lineno) = self._fields[0].split(':')
return int(lineno)
except ValueError:
pass
raise AttributeError
def get_msg(self):
return ' '.join(self._fields[1:])
def get_anon_msg(self):
fields = []
for entry in self._fields[2:]:
field = None
if entry.startswith('0x') and len(entry) > 5:
if entry.endswith(')'):
field = '0x...)'
else:
field = '0x...'
if not field:
r = re.search("^[0-9,a-f]{8}$", entry)
if r:
field = 'uuid'
if not field:
r = re.search("^[0-9,a-f]{8}\[\d+\]\:$", entry)
if r:
field = 'uuid/rank'
if not field:
r = re.search("^\d+.\d+.\d+\:*$", entry)
if r:
field = 'low/high/shard'
if field:
fields.append(field)
else:
fields.append(entry)
return '{}() {}'.format(self.function, ' '.join(fields))
def endswith(self, item):
return self._msg.endswith(item)
def get_field(self, idx):
return self._fields[idx]
def _is_type(self, text, trace=True):
if trace and not self.trace:
return False
return text == self._fields[2:2+len(text)]
def is_new(self):
return self._is_type(['Registered', 'new'])
def is_dereg(self):
return self._is_type(['Deregistered'])
def is_new_rpc(self):
if not self.trace:
return False
if self._fields[-1] == 'allocated.':
return True
if self._fields[-1] == 'received.' and self._fields[-5] == 'allocated':
return True
return False
def is_dereg_rpc(self):
if not self.trace:
return False
if self.function != 'crt_hg_req_destroy':
return False
return self._fields[-1] == 'destroying'
def is_callback(self):
return self._is_type(['Invoking', 'RPC', 'callback'])
def is_link(self):
return self._is_type(['Link'])
def is_fi_site(self):
return self._is_type(['fault_id'], trace=False)
def is_fi_alloc_fail(self):
return self._is_type(['out', 'of', 'memory'], trace=False)
def is_calloc(self):
return self.get_field(2).startswith('alloc(')
def is_realloc(self):
return self.get_field(2) == 'realloc'
def calloc_size(self):
if self.get_field(5) == '*':
if self.is_realloc():
field = -5
else:
field = -3
count = int(self.get_field(field).split(':')[-1])
return count * int(self.get_field(4))
return int(self.get_field(4))
def is_free(self):
return self.get_field(2) == 'free'
class StateIter():
def __init__(self, li):
self.reuse_table = {}
self.active_desc = {}
self.li = li
self._l = None
def __iter__(self):
self.reuse_table = {}
self.active_desc = {}
self._l = iter(self.li)
return self
def __next__(self):
line = next(self._l)
if not line.trace:
line.rpc = False
return line
if line.is_new() or line.is_new_rpc():
if line.descriptor in self.reuse_table:
self.reuse_table[line.descriptor] += 1
line.pdesc = '{}_{}'.format(line.descriptor,
self.reuse_table[line.descriptor])
else:
self.reuse_table[line.descriptor] = 0
line.pdesc = line.descriptor
self.active_desc[line.descriptor] = line
if line.is_new():
if line.parent in self.active_desc:
line.pparent = self.active_desc[line.parent].pdesc
else:
line.pparent = line.parent
line.rpc = False
else:
line.rpc = True
elif line.is_link():
if line.parent in self.active_desc:
line.pparent = self.active_desc[line.parent].pdesc
else:
line.pparent = line.parent
line.pdesc = line.descriptor
line.rpc = False
else:
if line.descriptor in self.active_desc:
line.rpc = self.active_desc[line.descriptor].rpc
if not line.rpc:
line.pparent = self.active_desc[line.descriptor].pparent
line.pdesc = self.active_desc[line.descriptor].pdesc
line.rpc_opcode = self.active_desc[line.descriptor].get_field(3)
else:
line.pdesc = line.descriptor
line.rpc = False
if (line.is_dereg() or line.is_dereg_rpc()) and \
line.descriptor in self.active_desc:
del self.active_desc[line.descriptor]
return line
def next(self):
return self.__next__()
class LogIter():
def __init__(self, fname):
# find and report the error, then continue with the file open as
# latin-1
self._fd = None
try:
self._fd = open(fname, 'r', encoding='utf-8')
self._fd.read()
except UnicodeDecodeError as err:
print('ERROR: Invalid data in server.log on following line')
self._fd = open(fname, 'r', encoding='latin-1')
self._fd.read(err.start - 200)
data = self._fd.read(199)
lines = data.splitlines()
print(lines[-1])
self._fd.seek(0)
self.fname = fname
self._data = []
index = 0
pids = set()
i = os.fstat(self._fd.fileno())
self.__from_file = bool(i.st_size > (1024*1024*20))
self.__index = 0
for line in self._fd:
# fields = line.split(maxsplit=8)
fields = line.split(' ', 8)
index += 1
if self.__from_file:
if len(fields) < 6 or len(fields[0]) != 17:
continue
l_obj = LogLine(line, index)
pids.add(l_obj.pid)
else:
if len(fields) < 6 or len(fields[0]) != 17:
self._data.append(LogRaw(line))
else:
l_obj = LogLine(line, index)
pids.add(l_obj.pid)
self._data.append(l_obj)
# Offset into the file when iterating. This is an array index, and is
# based from zero, as opposed to line index which is based from 1.
self._offset = 0
self._pid = None
self._trace_only = False
self._raw = False
self._pids = sorted(pids)
def __del__(self):
if self._fd:
self._fd.close()
def new_iter(self,
pid=None,
stateful=False,
trace_only=False,
raw=False):
if pid is not None:
if pid not in self._pids:
raise InvalidPid
self._pid = pid
else:
self._pid = None
self._trace_only = trace_only
self._raw = raw
if stateful:
if not pid:
raise InvalidPid
return StateIter(self)
return self
def __iter__(self, pid=None):
if self.__from_file:
self._fd.seek(0)
self.__index = 0
else:
self._offset = 0
return self
def __lnext(self):
if self.__from_file:
line = self._fd.readline()
if not line:
raise StopIteration
self.__index += 1
# fields = line.split(maxsplit=8)
fields = line.split(' ', 8)
if len(fields) < 6 or len(fields[0]) != 17:
return LogRaw(line)
return LogLine(line, self.__index)
try:
line = self._data[self._offset]
except IndexError:
raise StopIteration
self._offset += 1
return line
def __next__(self):
while True:
line = self.__lnext()
if not self._raw and isinstance(line, LogRaw):
continue
if self._trace_only and not line.trace:
continue
if isinstance(line, LogRaw) and self._pid:
continue
if self._pid and line.pid != self._pid:
continue
return line
def next(self):
return self.__next__()
def get_pids(self):
return self._pids
# pylint: enable=too-many-instance-attributes
| true | true |
f733e90ee56443a9aa248c1c44b6e19205ec6c64 | 2,806 | py | Python | production/src/components/wallet.py | baptistedesarnauts81/trading_project | 47627c732f4a00702d5aadb1b088d17cce2ecc3d | [
"MIT"
] | null | null | null | production/src/components/wallet.py | baptistedesarnauts81/trading_project | 47627c732f4a00702d5aadb1b088d17cce2ecc3d | [
"MIT"
] | null | null | null | production/src/components/wallet.py | baptistedesarnauts81/trading_project | 47627c732f4a00702d5aadb1b088d17cce2ecc3d | [
"MIT"
] | null | null | null | class Wallet:
def __init__(self, stocks, initial_account=3000):
self.stocks_amount = 0
for stock in stocks:
self.stocks_amount += stock.getCostPrice()
self.available_cash = initial_account - \
self.stocks_amount # argent disponible en cash
self.virtual_account = initial_account
self.total_commission = 0
self.total_transaction = 0
self.last_account = initial_account
self.stocks = stocks
def initdata(self, wallet):
self.virtual_account = wallet["virtual_account"]
self.available_cash = wallet["available_cash"]
self.stocks_amount = wallet["stocks_amount"]
self.last_account = wallet["last_account"]
self.total_commission = wallet["total_commission"]
self.total_transaction = wallet["total_transaction"]
return
def update(self, date):
self.stocks_amount = 0
for stock in self.stocks:
self.stocks_amount += stock.getQuantity()*stock.getDateValue(date)
self.virtual_account = self.available_cash + self.stocks_amount
def save_last_account(self):
self.last_account = self.virtual_account
def buying_autorisation(self, i, quantity, date):
# enough to cash buy stock's quantity, taking commissions into account
return self.available_cash > quantity * (self.stocks[i].getDateValue(date) * (1 + self.stocks[i].getPropCommission()) + self.stocks[i].getFixedCommission())
def sell(self, i, date):
"""
This method is called when the bot sells i stocks. The commission is updated and
the available cash is diminished (money is moved from available cash acount to stock account)
"""
self.total_commission += self.stocks[i].getQuantity() * (self.stocks[i].getDateValue(
date) * self.stocks[i].getPropCommission() + self.stocks[i].getFixedCommission())
self.available_cash += self.stocks[i].getQuantity() * (self.stocks[i].getDateValue(
date) * (1 - self.stocks[i].getPropCommission()) - self.stocks[i].getFixedCommission())
self.total_transaction += 1
def buy(self, i, date, quantity=1):
"""
This method is called when the bot buys i stocks. The commission is updated and
the available cash is increased (money is moved from stock account to available cash acount)
"""
self.total_commission += self.stocks[i].getQuantity() * (self.stocks[i].getDateValue(
date) * self.stocks[i].getPropCommission() + self.stocks[i].getFixedCommission())
self.available_cash -= quantity * (self.stocks[i].getDateValue(date) * (
1 + self.stocks[i].getPropCommission()) + self.stocks[i].getFixedCommission())
self.total_transaction += 1
| 47.559322 | 164 | 0.664647 | class Wallet:
def __init__(self, stocks, initial_account=3000):
self.stocks_amount = 0
for stock in stocks:
self.stocks_amount += stock.getCostPrice()
self.available_cash = initial_account - \
self.stocks_amount
self.virtual_account = initial_account
self.total_commission = 0
self.total_transaction = 0
self.last_account = initial_account
self.stocks = stocks
def initdata(self, wallet):
self.virtual_account = wallet["virtual_account"]
self.available_cash = wallet["available_cash"]
self.stocks_amount = wallet["stocks_amount"]
self.last_account = wallet["last_account"]
self.total_commission = wallet["total_commission"]
self.total_transaction = wallet["total_transaction"]
return
def update(self, date):
self.stocks_amount = 0
for stock in self.stocks:
self.stocks_amount += stock.getQuantity()*stock.getDateValue(date)
self.virtual_account = self.available_cash + self.stocks_amount
def save_last_account(self):
self.last_account = self.virtual_account
def buying_autorisation(self, i, quantity, date):
return self.available_cash > quantity * (self.stocks[i].getDateValue(date) * (1 + self.stocks[i].getPropCommission()) + self.stocks[i].getFixedCommission())
def sell(self, i, date):
self.total_commission += self.stocks[i].getQuantity() * (self.stocks[i].getDateValue(
date) * self.stocks[i].getPropCommission() + self.stocks[i].getFixedCommission())
self.available_cash += self.stocks[i].getQuantity() * (self.stocks[i].getDateValue(
date) * (1 - self.stocks[i].getPropCommission()) - self.stocks[i].getFixedCommission())
self.total_transaction += 1
def buy(self, i, date, quantity=1):
self.total_commission += self.stocks[i].getQuantity() * (self.stocks[i].getDateValue(
date) * self.stocks[i].getPropCommission() + self.stocks[i].getFixedCommission())
self.available_cash -= quantity * (self.stocks[i].getDateValue(date) * (
1 + self.stocks[i].getPropCommission()) + self.stocks[i].getFixedCommission())
self.total_transaction += 1
| true | true |
f733e9ab3db62124be970edbed846b82f67d10ae | 1,525 | py | Python | RecoMuon/L2MuonProducer/python/L2Muons_cff.py | NTrevisani/cmssw | a212a27526f34eb9507cf8b875c93896e6544781 | [
"Apache-2.0"
] | 3 | 2018-08-24T19:10:26.000Z | 2019-02-19T11:45:32.000Z | RecoMuon/L2MuonProducer/python/L2Muons_cff.py | NTrevisani/cmssw | a212a27526f34eb9507cf8b875c93896e6544781 | [
"Apache-2.0"
] | 7 | 2016-07-17T02:34:54.000Z | 2019-08-13T07:58:37.000Z | RecoMuon/L2MuonProducer/python/L2Muons_cff.py | NTrevisani/cmssw | a212a27526f34eb9507cf8b875c93896e6544781 | [
"Apache-2.0"
] | 5 | 2018-08-21T16:37:52.000Z | 2020-01-09T13:33:17.000Z | import FWCore.ParameterSet.Config as cms
# Magnetic Field
# Geometries
# from Geometry.CommonDetUnit.bareGlobalTrackingGeometry_cfi import *
# from RecoMuon.DetLayers.muonDetLayerGeometry_cfi import *
import TrackingTools.KalmanUpdators.Chi2MeasurementEstimator_cfi
EstimatorForSTA = TrackingTools.KalmanUpdators.Chi2MeasurementEstimator_cfi.Chi2MeasurementEstimator.clone()
import TrackingTools.TrackFitters.KFTrajectoryFitter_cfi
KFTrajectoryFitterForSTA = TrackingTools.TrackFitters.KFTrajectoryFitter_cfi.KFTrajectoryFitter.clone()
import TrackingTools.TrackFitters.KFTrajectorySmoother_cfi
KFTrajectorySmootherForSTA = TrackingTools.TrackFitters.KFTrajectorySmoother_cfi.KFTrajectorySmoother.clone()
import TrackingTools.TrackFitters.KFFittingSmoother_cfi
KFFittingSmootheForSTA = TrackingTools.TrackFitters.KFFittingSmoother_cfi.KFFittingSmoother.clone()
# Stand Alone Muons Producer
from RecoMuon.L2MuonProducer.L2Muons_cfi import *
EstimatorForSTA.ComponentName = 'Chi2STA'
EstimatorForSTA.MaxChi2 = 1000.
KFTrajectoryFitterForSTA.ComponentName = 'KFFitterSTA'
KFTrajectoryFitterForSTA.Propagator = 'SteppingHelixPropagatorAny'
KFTrajectoryFitterForSTA.Estimator = 'Chi2STA'
KFTrajectorySmootherForSTA.ComponentName = 'KFSmootherSTA'
KFTrajectorySmootherForSTA.Propagator = 'SteppingHelixPropagatorOpposite'
KFTrajectorySmootherForSTA.Estimator = 'Chi2STA'
KFFittingSmootheForSTA.ComponentName = 'KFFitterSmootherSTA'
KFFittingSmootheForSTA.Fitter = 'KFFitterSTA'
KFFittingSmootheForSTA.Smoother = 'KFSmootherSTA'
| 50.833333 | 109 | 0.882623 | import FWCore.ParameterSet.Config as cms
import TrackingTools.KalmanUpdators.Chi2MeasurementEstimator_cfi
EstimatorForSTA = TrackingTools.KalmanUpdators.Chi2MeasurementEstimator_cfi.Chi2MeasurementEstimator.clone()
import TrackingTools.TrackFitters.KFTrajectoryFitter_cfi
KFTrajectoryFitterForSTA = TrackingTools.TrackFitters.KFTrajectoryFitter_cfi.KFTrajectoryFitter.clone()
import TrackingTools.TrackFitters.KFTrajectorySmoother_cfi
KFTrajectorySmootherForSTA = TrackingTools.TrackFitters.KFTrajectorySmoother_cfi.KFTrajectorySmoother.clone()
import TrackingTools.TrackFitters.KFFittingSmoother_cfi
KFFittingSmootheForSTA = TrackingTools.TrackFitters.KFFittingSmoother_cfi.KFFittingSmoother.clone()
from RecoMuon.L2MuonProducer.L2Muons_cfi import *
EstimatorForSTA.ComponentName = 'Chi2STA'
EstimatorForSTA.MaxChi2 = 1000.
KFTrajectoryFitterForSTA.ComponentName = 'KFFitterSTA'
KFTrajectoryFitterForSTA.Propagator = 'SteppingHelixPropagatorAny'
KFTrajectoryFitterForSTA.Estimator = 'Chi2STA'
KFTrajectorySmootherForSTA.ComponentName = 'KFSmootherSTA'
KFTrajectorySmootherForSTA.Propagator = 'SteppingHelixPropagatorOpposite'
KFTrajectorySmootherForSTA.Estimator = 'Chi2STA'
KFFittingSmootheForSTA.ComponentName = 'KFFitterSmootherSTA'
KFFittingSmootheForSTA.Fitter = 'KFFitterSTA'
KFFittingSmootheForSTA.Smoother = 'KFSmootherSTA'
| true | true |
f733eb779760bdac56af2ae2248f27730e3608e9 | 2,639 | py | Python | corpusdiagnostics.py | MarissaSkud/Wordsworth | a807603d7f34c908621a807f305c540a287d5b90 | [
"MIT"
] | 7 | 2019-06-18T03:11:54.000Z | 2020-10-22T03:45:46.000Z | corpusdiagnostics.py | MarissaSkud/Anachronism-Finder | a807603d7f34c908621a807f305c540a287d5b90 | [
"MIT"
] | 12 | 2019-06-17T04:29:44.000Z | 2020-09-22T04:53:40.000Z | corpusdiagnostics.py | MarissaSkud/Anachronism-Finder | a807603d7f34c908621a807f305c540a287d5b90 | [
"MIT"
] | null | null | null | from server import app, format_decades
from model import Decade, Country, Book, connect_to_db, db
from textprocessor import unpickle_data
from random import sample
from collections import Counter
def measure_and_sample_corpus(data_type, want_sample):
with app.app_context():
decades = format_decades()
for decade in decades:
books_from_decade = Book.query.filter_by(decade=decade).all()
if data_type == "word_set":
num_books_from_decade = len(books_from_decade)
decade_set = set()
for book in books_from_decade:
decade_set.update(unpickle_data(book.word_set))
words_from_decade = len(decade_set)
print(f"The {decade} corpus contains {words_from_decade} unique words")
if want_sample == True:
decade_sample = sample(decade_set, k=10)
print(f"Ten of those words are: {decade_sample}")
if data_type == "bigram_dict":
decade_dict = Counter({})
for book in books_from_decade:
book_bigrams = Counter(unpickle_data(book.bigram_dict))
decade_dict += book_bigrams
decade_unique_bigrams = "{:,}".format(len(decade_dict))
decade_total = "{:,}".format(sum(decade_dict.values()))
print(f"The {decade} corpus contains {decade_unique_bigrams} unique and {decade_total} total bigrams")
if want_sample == True:
decade_sample = sample(decade_dict.keys(), k=10)
print(f"Ten of those bigrams are {decade_sample}")
def print_whole_decade_set(data_type, decade):
with app.app_context():
books_from_decade = Book.query.filter_by(decade=decade).all()
if data_type == "word_set":
decade_set = set()
for book in books_from_decade:
decade_set.update(unpickle_data(book.word_set))
print(f"The {decade} word set:")
print(sorted(decade_set))
if data_type == "bigram_dict":
decade_dict = Counter({})
for book in books_from_decade:
book_bigrams = Counter(unpickle_data(book.bigram_dict))
decade_dict += book_bigrams
print(f"The {decade} bigram dictionary:")
print(decade_dict)
if __name__ == "__main__":
connect_to_db(app)
measure_and_sample_corpus("word_set", True)
#measure_and_sample_corpus("bigram_dict", False)
#print_whole_decade_set("word_set", "1920s") | 35.186667 | 118 | 0.608185 | from server import app, format_decades
from model import Decade, Country, Book, connect_to_db, db
from textprocessor import unpickle_data
from random import sample
from collections import Counter
def measure_and_sample_corpus(data_type, want_sample):
with app.app_context():
decades = format_decades()
for decade in decades:
books_from_decade = Book.query.filter_by(decade=decade).all()
if data_type == "word_set":
num_books_from_decade = len(books_from_decade)
decade_set = set()
for book in books_from_decade:
decade_set.update(unpickle_data(book.word_set))
words_from_decade = len(decade_set)
print(f"The {decade} corpus contains {words_from_decade} unique words")
if want_sample == True:
decade_sample = sample(decade_set, k=10)
print(f"Ten of those words are: {decade_sample}")
if data_type == "bigram_dict":
decade_dict = Counter({})
for book in books_from_decade:
book_bigrams = Counter(unpickle_data(book.bigram_dict))
decade_dict += book_bigrams
decade_unique_bigrams = "{:,}".format(len(decade_dict))
decade_total = "{:,}".format(sum(decade_dict.values()))
print(f"The {decade} corpus contains {decade_unique_bigrams} unique and {decade_total} total bigrams")
if want_sample == True:
decade_sample = sample(decade_dict.keys(), k=10)
print(f"Ten of those bigrams are {decade_sample}")
def print_whole_decade_set(data_type, decade):
with app.app_context():
books_from_decade = Book.query.filter_by(decade=decade).all()
if data_type == "word_set":
decade_set = set()
for book in books_from_decade:
decade_set.update(unpickle_data(book.word_set))
print(f"The {decade} word set:")
print(sorted(decade_set))
if data_type == "bigram_dict":
decade_dict = Counter({})
for book in books_from_decade:
book_bigrams = Counter(unpickle_data(book.bigram_dict))
decade_dict += book_bigrams
print(f"The {decade} bigram dictionary:")
print(decade_dict)
if __name__ == "__main__":
connect_to_db(app)
measure_and_sample_corpus("word_set", True)
| true | true |
f733ec6d4d3d880d44c82f50d338939e9099b4ca | 7,745 | py | Python | docs/conf.py | YPlan/gargoyle | 005c400ae0a4b29f646507b4d0b01c79d5c62f28 | [
"Apache-2.0"
] | 138 | 2016-01-08T13:37:53.000Z | 2022-03-25T10:41:06.000Z | docs/conf.py | YPlan/gargoyle | 005c400ae0a4b29f646507b4d0b01c79d5c62f28 | [
"Apache-2.0"
] | 39 | 2015-12-28T23:16:17.000Z | 2018-05-26T09:30:29.000Z | docs/conf.py | YPlan/gargoyle | 005c400ae0a4b29f646507b4d0b01c79d5c62f28 | [
"Apache-2.0"
] | 11 | 2015-12-28T20:20:57.000Z | 2019-03-12T23:49:45.000Z | # -*- coding: utf-8 -*-
#
# Gargoyle documentation build configuration file, created by
# sphinx-quickstart on Fri May 6 11:47:36 2011.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
from __future__ import unicode_literals
import os
import sys
# Get the project root dir, which is the parent dir of this
cwd = os.getcwd()
project_root = os.path.dirname(cwd)
# Insert the project root dir as the first element in the PYTHONPATH.
# This lets us ensure that the source package is imported, and that its
# version is used.
sys.path.insert(0, project_root)
import django
from django.conf import settings # noqa
settings.configure()
django.setup()
import gargoyle # noqa
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'Gargoyle'
copyright = '2011, DISQUS'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = gargoyle.__version__
# The full version, including alpha/beta/rc tags.
release = version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if not on_rtd: # only import and set the theme if we're building docs locally
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'Gargoyledoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
# latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
# latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'Gargoyle.tex', 'Gargoyle Documentation',
'DISQUS', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Additional stuff for the LaTeX preamble.
# latex_preamble = ''
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'gargoyle', 'Gargoyle Documentation',
['DISQUS'], 1)
]
| 32.004132 | 80 | 0.720207 |
from __future__ import unicode_literals
import os
import sys
cwd = os.getcwd()
project_root = os.path.dirname(cwd)
sys.path.insert(0, project_root)
import django
from django.conf import settings
settings.configure()
django.setup()
import gargoyle
extensions = ['sphinx.ext.autodoc']
templates_path = ['_templates']
source_suffix = '.rst'
master_doc = 'index'
project = 'Gargoyle'
copyright = '2011, DISQUS'
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = gargoyle.__version__
# The full version, including alpha/beta/rc tags.
release = version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if not on_rtd: # only import and set the theme if we're building docs locally
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
html_static_path = ['_static']
htmlhelp_basename = 'Gargoyledoc'
latex_documents = [
('index', 'Gargoyle.tex', 'Gargoyle Documentation',
'DISQUS', 'manual'),
]
man_pages = [
('index', 'gargoyle', 'Gargoyle Documentation',
['DISQUS'], 1)
]
| true | true |
f733eca74173829e6a2e510354c46a221abb2241 | 4,385 | py | Python | maskrcnn_benchmark/modeling/roi_heads/box_head/roi_box_predictors.py | RyanXLi/OneshotDet | 77f629978d9d1739787b08de8cccea81341507bf | [
"BSD-2-Clause"
] | 16 | 2020-09-07T15:28:57.000Z | 2022-03-03T02:52:25.000Z | maskrcnn_benchmark/modeling/roi_heads/box_head/roi_box_predictors.py | RyanXLi/OneshotDet | 77f629978d9d1739787b08de8cccea81341507bf | [
"BSD-2-Clause"
] | 3 | 2021-01-06T12:02:54.000Z | 2021-03-14T14:08:57.000Z | maskrcnn_benchmark/modeling/roi_heads/box_head/roi_box_predictors.py | RyanXLi/OneshotDet | 77f629978d9d1739787b08de8cccea81341507bf | [
"BSD-2-Clause"
] | 4 | 2020-11-13T09:21:36.000Z | 2021-05-27T02:12:19.000Z | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
from maskrcnn_benchmark.modeling import registry
from torch import nn
@registry.ROI_BOX_PREDICTOR.register("FastRCNNPredictor")
class FastRCNNPredictor(nn.Module):
def __init__(self, config, in_channels):
super(FastRCNNPredictor, self).__init__()
assert in_channels is not None
num_inputs = in_channels
num_classes = config.MODEL.ROI_BOX_HEAD.NUM_CLASSES
if config.FEW_SHOT.SECOND_STAGE_METHOD == 'rn':
num_classes = 2
self.avgpool = nn.AdaptiveAvgPool2d(1)
self.cls_score = nn.Linear(num_inputs, num_classes)
num_bbox_reg_classes = 2 if config.MODEL.CLS_AGNOSTIC_BBOX_REG else num_classes
self.bbox_pred = nn.Linear(num_inputs, num_bbox_reg_classes * 4)
nn.init.normal_(self.cls_score.weight, mean=0, std=0.01)
nn.init.constant_(self.cls_score.bias, 0)
nn.init.normal_(self.bbox_pred.weight, mean=0, std=0.001)
nn.init.constant_(self.bbox_pred.bias, 0)
def forward(self, x):
x = self.avgpool(x)
x = x.view(x.size(0), -1)
cls_logit = self.cls_score(x)
bbox_pred = self.bbox_pred(x)
return cls_logit, bbox_pred
@registry.ROI_BOX_PREDICTOR.register("FPNPredictor")
class FPNPredictor(nn.Module):
def __init__(self, cfg, in_channels):
super(FPNPredictor, self).__init__()
num_classes = cfg.MODEL.ROI_BOX_HEAD.NUM_CLASSES
num_bbox_reg_classes = 2
if cfg.FEW_SHOT.SECOND_STAGE_METHOD == 'rn' and cfg.FEW_SHOT.SECOND_STAGE_CLS_LOSS == 'focal_loss':
num_classes = 1
elif cfg.FEW_SHOT.SECOND_STAGE_METHOD == 'rn' and cfg.FEW_SHOT.SECOND_STAGE_CLS_LOSS != 'focal_loss':
num_classes= 2
elif cfg.FEW_SHOT.SECOND_STAGE_METHOD == 'concat' and \
cfg.FEW_SHOT.SECOND_STAGE_CLS_LOSS == 'focal_loss' and \
not cfg.FEW_SHOT.NEG_SUPPORT.TURN_ON:
num_classes = 1
elif cfg.FEW_SHOT.SECOND_STAGE_METHOD == 'concat' and \
cfg.FEW_SHOT.SECOND_STAGE_CLS_LOSS == 'focal_loss' and \
cfg.FEW_SHOT.NEG_SUPPORT.TURN_ON:
num_classes = 2
elif cfg.FEW_SHOT.SECOND_STAGE_METHOD == 'concat' and \
cfg.FEW_SHOT.SECOND_STAGE_CLS_LOSS == 'ce_loss' and \
not cfg.FEW_SHOT.NEG_SUPPORT.TURN_ON:
num_classes = 2
elif cfg.FEW_SHOT.SECOND_STAGE_METHOD == 'concat' and \
cfg.FEW_SHOT.SECOND_STAGE_CLS_LOSS == 'ce_loss' and \
cfg.FEW_SHOT.NEG_SUPPORT.TURN_ON:
num_classes = 2 # originally 3, but 2 in new version neg support
elif cfg.FEW_SHOT.SECOND_STAGE_METHOD == 'concat' and \
cfg.FEW_SHOT.SECOND_STAGE_CLS_LOSS =='cxe_loss' and cfg.FEW_SHOT.SOFT_LABELING:
num_classes = 2
elif cfg.FEW_SHOT.SECOND_STAGE_METHOD == 'concat' and \
cfg.FEW_SHOT.SECOND_STAGE_CLS_LOSS in ['mse_loss','l1_loss']:
num_classes = 1
else:
raise Exception('setting not compatible {} {} {}'.format(
cfg.FEW_SHOT.SECOND_STAGE_METHOD,
cfg.FEW_SHOT.SECOND_STAGE_CLS_LOSS,
cfg.FEW_SHOT.NEG_SUPPORT.TURN_ON
))
if cfg.FEW_SHOT.SECOND_STAGE_CLS_LOSS in ['focal_loss', 'mse_loss', 'l1_loss']:
num_bbox_reg_classes = num_classes+1
else:
num_bbox_reg_classes = num_classes
representation_size = in_channels
self.cls_score = nn.Linear(representation_size, num_classes)
# num_bbox_reg_classes = 2 #if cfg.MODEL.CLS_AGNOSTIC_BBOX_REG else num_classes
self.bbox_pred = nn.Linear(representation_size, num_bbox_reg_classes * 4)
nn.init.normal_(self.cls_score.weight, std=0.01)
nn.init.normal_(self.bbox_pred.weight, std=0.001)
for l in [self.cls_score, self.bbox_pred]:
nn.init.constant_(l.bias, 0)
def forward(self, x):
if x.ndimension() == 4:
assert list(x.shape[2:]) == [1, 1]
x = x.view(x.size(0), -1)
scores = self.cls_score(x)
bbox_deltas = self.bbox_pred(x)
return scores, bbox_deltas
def make_roi_box_predictor(cfg, in_channels):
func = registry.ROI_BOX_PREDICTOR[cfg.MODEL.ROI_BOX_HEAD.PREDICTOR]
return func(cfg, in_channels)
| 41.761905 | 109 | 0.65382 |
from maskrcnn_benchmark.modeling import registry
from torch import nn
@registry.ROI_BOX_PREDICTOR.register("FastRCNNPredictor")
class FastRCNNPredictor(nn.Module):
def __init__(self, config, in_channels):
super(FastRCNNPredictor, self).__init__()
assert in_channels is not None
num_inputs = in_channels
num_classes = config.MODEL.ROI_BOX_HEAD.NUM_CLASSES
if config.FEW_SHOT.SECOND_STAGE_METHOD == 'rn':
num_classes = 2
self.avgpool = nn.AdaptiveAvgPool2d(1)
self.cls_score = nn.Linear(num_inputs, num_classes)
num_bbox_reg_classes = 2 if config.MODEL.CLS_AGNOSTIC_BBOX_REG else num_classes
self.bbox_pred = nn.Linear(num_inputs, num_bbox_reg_classes * 4)
nn.init.normal_(self.cls_score.weight, mean=0, std=0.01)
nn.init.constant_(self.cls_score.bias, 0)
nn.init.normal_(self.bbox_pred.weight, mean=0, std=0.001)
nn.init.constant_(self.bbox_pred.bias, 0)
def forward(self, x):
x = self.avgpool(x)
x = x.view(x.size(0), -1)
cls_logit = self.cls_score(x)
bbox_pred = self.bbox_pred(x)
return cls_logit, bbox_pred
@registry.ROI_BOX_PREDICTOR.register("FPNPredictor")
class FPNPredictor(nn.Module):
def __init__(self, cfg, in_channels):
super(FPNPredictor, self).__init__()
num_classes = cfg.MODEL.ROI_BOX_HEAD.NUM_CLASSES
num_bbox_reg_classes = 2
if cfg.FEW_SHOT.SECOND_STAGE_METHOD == 'rn' and cfg.FEW_SHOT.SECOND_STAGE_CLS_LOSS == 'focal_loss':
num_classes = 1
elif cfg.FEW_SHOT.SECOND_STAGE_METHOD == 'rn' and cfg.FEW_SHOT.SECOND_STAGE_CLS_LOSS != 'focal_loss':
num_classes= 2
elif cfg.FEW_SHOT.SECOND_STAGE_METHOD == 'concat' and \
cfg.FEW_SHOT.SECOND_STAGE_CLS_LOSS == 'focal_loss' and \
not cfg.FEW_SHOT.NEG_SUPPORT.TURN_ON:
num_classes = 1
elif cfg.FEW_SHOT.SECOND_STAGE_METHOD == 'concat' and \
cfg.FEW_SHOT.SECOND_STAGE_CLS_LOSS == 'focal_loss' and \
cfg.FEW_SHOT.NEG_SUPPORT.TURN_ON:
num_classes = 2
elif cfg.FEW_SHOT.SECOND_STAGE_METHOD == 'concat' and \
cfg.FEW_SHOT.SECOND_STAGE_CLS_LOSS == 'ce_loss' and \
not cfg.FEW_SHOT.NEG_SUPPORT.TURN_ON:
num_classes = 2
elif cfg.FEW_SHOT.SECOND_STAGE_METHOD == 'concat' and \
cfg.FEW_SHOT.SECOND_STAGE_CLS_LOSS == 'ce_loss' and \
cfg.FEW_SHOT.NEG_SUPPORT.TURN_ON:
num_classes = 2
elif cfg.FEW_SHOT.SECOND_STAGE_METHOD == 'concat' and \
cfg.FEW_SHOT.SECOND_STAGE_CLS_LOSS =='cxe_loss' and cfg.FEW_SHOT.SOFT_LABELING:
num_classes = 2
elif cfg.FEW_SHOT.SECOND_STAGE_METHOD == 'concat' and \
cfg.FEW_SHOT.SECOND_STAGE_CLS_LOSS in ['mse_loss','l1_loss']:
num_classes = 1
else:
raise Exception('setting not compatible {} {} {}'.format(
cfg.FEW_SHOT.SECOND_STAGE_METHOD,
cfg.FEW_SHOT.SECOND_STAGE_CLS_LOSS,
cfg.FEW_SHOT.NEG_SUPPORT.TURN_ON
))
if cfg.FEW_SHOT.SECOND_STAGE_CLS_LOSS in ['focal_loss', 'mse_loss', 'l1_loss']:
num_bbox_reg_classes = num_classes+1
else:
num_bbox_reg_classes = num_classes
representation_size = in_channels
self.cls_score = nn.Linear(representation_size, num_classes)
ize, num_bbox_reg_classes * 4)
nn.init.normal_(self.cls_score.weight, std=0.01)
nn.init.normal_(self.bbox_pred.weight, std=0.001)
for l in [self.cls_score, self.bbox_pred]:
nn.init.constant_(l.bias, 0)
def forward(self, x):
if x.ndimension() == 4:
assert list(x.shape[2:]) == [1, 1]
x = x.view(x.size(0), -1)
scores = self.cls_score(x)
bbox_deltas = self.bbox_pred(x)
return scores, bbox_deltas
def make_roi_box_predictor(cfg, in_channels):
func = registry.ROI_BOX_PREDICTOR[cfg.MODEL.ROI_BOX_HEAD.PREDICTOR]
return func(cfg, in_channels)
| true | true |
f733ed70d8b010ed8e84d41a51af894639aa1809 | 5,417 | py | Python | sdk/python/pulumi_azure_nextgen/recoveryservices/latest/replication_policy.py | test-wiz-sec/pulumi-azure-nextgen | 20a695af0d020b34b0f1c336e1b69702755174cc | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure_nextgen/recoveryservices/latest/replication_policy.py | test-wiz-sec/pulumi-azure-nextgen | 20a695af0d020b34b0f1c336e1b69702755174cc | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure_nextgen/recoveryservices/latest/replication_policy.py | test-wiz-sec/pulumi-azure-nextgen | 20a695af0d020b34b0f1c336e1b69702755174cc | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from . import outputs
from ._inputs import *
__all__ = ['ReplicationPolicy']
class ReplicationPolicy(pulumi.CustomResource):
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
policy_name: Optional[pulumi.Input[str]] = None,
properties: Optional[pulumi.Input[pulumi.InputType['CreatePolicyInputPropertiesArgs']]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
resource_name_: Optional[pulumi.Input[str]] = None,
__props__=None,
__name__=None,
__opts__=None):
"""
Protection profile details.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] policy_name: Replication policy name
:param pulumi.Input[pulumi.InputType['CreatePolicyInputPropertiesArgs']] properties: Policy creation properties.
:param pulumi.Input[str] resource_group_name: The name of the resource group where the recovery services vault is present.
:param pulumi.Input[str] resource_name_: The name of the recovery services vault.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
if policy_name is None:
raise TypeError("Missing required property 'policy_name'")
__props__['policy_name'] = policy_name
__props__['properties'] = properties
if resource_group_name is None:
raise TypeError("Missing required property 'resource_group_name'")
__props__['resource_group_name'] = resource_group_name
if resource_name_ is None:
raise TypeError("Missing required property 'resource_name_'")
__props__['resource_name'] = resource_name_
__props__['location'] = None
__props__['name'] = None
__props__['type'] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:recoveryservices/v20160810:ReplicationPolicy"), pulumi.Alias(type_="azure-nextgen:recoveryservices/v20180110:ReplicationPolicy"), pulumi.Alias(type_="azure-nextgen:recoveryservices/v20180710:ReplicationPolicy")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(ReplicationPolicy, __self__).__init__(
'azure-nextgen:recoveryservices/latest:ReplicationPolicy',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'ReplicationPolicy':
"""
Get an existing ReplicationPolicy resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
return ReplicationPolicy(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def location(self) -> pulumi.Output[Optional[str]]:
"""
Resource Location
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Resource Name
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def properties(self) -> pulumi.Output['outputs.PolicyPropertiesResponse']:
"""
The custom data.
"""
return pulumi.get(self, "properties")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
Resource Type
"""
return pulumi.get(self, "type")
def translate_output_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
| 41.351145 | 299 | 0.65036 |
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from . import outputs
from ._inputs import *
__all__ = ['ReplicationPolicy']
class ReplicationPolicy(pulumi.CustomResource):
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
policy_name: Optional[pulumi.Input[str]] = None,
properties: Optional[pulumi.Input[pulumi.InputType['CreatePolicyInputPropertiesArgs']]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
resource_name_: Optional[pulumi.Input[str]] = None,
__props__=None,
__name__=None,
__opts__=None):
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
if policy_name is None:
raise TypeError("Missing required property 'policy_name'")
__props__['policy_name'] = policy_name
__props__['properties'] = properties
if resource_group_name is None:
raise TypeError("Missing required property 'resource_group_name'")
__props__['resource_group_name'] = resource_group_name
if resource_name_ is None:
raise TypeError("Missing required property 'resource_name_'")
__props__['resource_name'] = resource_name_
__props__['location'] = None
__props__['name'] = None
__props__['type'] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:recoveryservices/v20160810:ReplicationPolicy"), pulumi.Alias(type_="azure-nextgen:recoveryservices/v20180110:ReplicationPolicy"), pulumi.Alias(type_="azure-nextgen:recoveryservices/v20180710:ReplicationPolicy")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(ReplicationPolicy, __self__).__init__(
'azure-nextgen:recoveryservices/latest:ReplicationPolicy',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'ReplicationPolicy':
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
return ReplicationPolicy(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def location(self) -> pulumi.Output[Optional[str]]:
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
return pulumi.get(self, "name")
@property
@pulumi.getter
def properties(self) -> pulumi.Output['outputs.PolicyPropertiesResponse']:
return pulumi.get(self, "properties")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
return pulumi.get(self, "type")
def translate_output_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
| true | true |
f733ed9e26d85e77b4d34f3a5aeb5fedf27bb359 | 4,515 | py | Python | tests/fixtures/hello/hello.py | akretion/xsdata | 3bedb4ac3e0e6eb6a1cdbad54a3356f46e3ab90e | [
"MIT"
] | null | null | null | tests/fixtures/hello/hello.py | akretion/xsdata | 3bedb4ac3e0e6eb6a1cdbad54a3356f46e3ab90e | [
"MIT"
] | null | null | null | tests/fixtures/hello/hello.py | akretion/xsdata | 3bedb4ac3e0e6eb6a1cdbad54a3356f46e3ab90e | [
"MIT"
] | null | null | null | from dataclasses import dataclass, field
from typing import Optional
__NAMESPACE__ = "http://hello/"
@dataclass
class HelloByeError:
class Meta:
namespace = "http://hello/"
message: Optional[str] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
}
)
@dataclass
class HelloError:
class Meta:
namespace = "http://hello/"
message: Optional[str] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
}
)
@dataclass
class GetHelloAsString:
class Meta:
name = "getHelloAsString"
namespace = "http://hello/"
arg0: Optional[str] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
}
)
@dataclass
class GetHelloAsStringResponse:
class Meta:
name = "getHelloAsStringResponse"
namespace = "http://hello/"
return_value: Optional[str] = field(
default=None,
metadata={
"name": "return",
"type": "Element",
"namespace": "",
}
)
@dataclass
class HelloGetHelloAsStringInput:
class Meta:
name = "Envelope"
namespace = "http://schemas.xmlsoap.org/soap/envelope/"
body: Optional["HelloGetHelloAsStringInput.Body"] = field(
default=None,
metadata={
"name": "Body",
"type": "Element",
}
)
@dataclass
class Body:
get_hello_as_string: Optional[GetHelloAsString] = field(
default=None,
metadata={
"name": "getHelloAsString",
"type": "Element",
"namespace": "http://hello/",
}
)
@dataclass
class HelloGetHelloAsStringOutput:
class Meta:
name = "Envelope"
namespace = "http://schemas.xmlsoap.org/soap/envelope/"
body: Optional["HelloGetHelloAsStringOutput.Body"] = field(
default=None,
metadata={
"name": "Body",
"type": "Element",
}
)
@dataclass
class Body:
get_hello_as_string_response: Optional[GetHelloAsStringResponse] = field(
default=None,
metadata={
"name": "getHelloAsStringResponse",
"type": "Element",
"namespace": "http://hello/",
}
)
fault: Optional["HelloGetHelloAsStringOutput.Body.Fault"] = field(
default=None,
metadata={
"name": "Fault",
"type": "Element",
}
)
@dataclass
class Fault:
faultcode: Optional[str] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
}
)
faultstring: Optional[str] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
}
)
faultactor: Optional[str] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
}
)
detail: Optional["HelloGetHelloAsStringOutput.Body.Fault.Detail"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
}
)
@dataclass
class Detail:
hello_error: Optional[HelloError] = field(
default=None,
metadata={
"name": "HelloError",
"type": "Element",
"namespace": "http://hello/",
}
)
hello_bye_error: Optional[HelloByeError] = field(
default=None,
metadata={
"name": "HelloByeError",
"type": "Element",
"namespace": "http://hello/",
}
)
class HelloGetHelloAsString:
style = "rpc"
location = "http://localhost:9999/ws/hello"
transport = "http://schemas.xmlsoap.org/soap/http"
input = HelloGetHelloAsStringInput
output = HelloGetHelloAsStringOutput
| 24.944751 | 86 | 0.461573 | from dataclasses import dataclass, field
from typing import Optional
__NAMESPACE__ = "http://hello/"
@dataclass
class HelloByeError:
class Meta:
namespace = "http://hello/"
message: Optional[str] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
}
)
@dataclass
class HelloError:
class Meta:
namespace = "http://hello/"
message: Optional[str] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
}
)
@dataclass
class GetHelloAsString:
class Meta:
name = "getHelloAsString"
namespace = "http://hello/"
arg0: Optional[str] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
}
)
@dataclass
class GetHelloAsStringResponse:
class Meta:
name = "getHelloAsStringResponse"
namespace = "http://hello/"
return_value: Optional[str] = field(
default=None,
metadata={
"name": "return",
"type": "Element",
"namespace": "",
}
)
@dataclass
class HelloGetHelloAsStringInput:
class Meta:
name = "Envelope"
namespace = "http://schemas.xmlsoap.org/soap/envelope/"
body: Optional["HelloGetHelloAsStringInput.Body"] = field(
default=None,
metadata={
"name": "Body",
"type": "Element",
}
)
@dataclass
class Body:
get_hello_as_string: Optional[GetHelloAsString] = field(
default=None,
metadata={
"name": "getHelloAsString",
"type": "Element",
"namespace": "http://hello/",
}
)
@dataclass
class HelloGetHelloAsStringOutput:
class Meta:
name = "Envelope"
namespace = "http://schemas.xmlsoap.org/soap/envelope/"
body: Optional["HelloGetHelloAsStringOutput.Body"] = field(
default=None,
metadata={
"name": "Body",
"type": "Element",
}
)
@dataclass
class Body:
get_hello_as_string_response: Optional[GetHelloAsStringResponse] = field(
default=None,
metadata={
"name": "getHelloAsStringResponse",
"type": "Element",
"namespace": "http://hello/",
}
)
fault: Optional["HelloGetHelloAsStringOutput.Body.Fault"] = field(
default=None,
metadata={
"name": "Fault",
"type": "Element",
}
)
@dataclass
class Fault:
faultcode: Optional[str] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
}
)
faultstring: Optional[str] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
}
)
faultactor: Optional[str] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
}
)
detail: Optional["HelloGetHelloAsStringOutput.Body.Fault.Detail"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
}
)
@dataclass
class Detail:
hello_error: Optional[HelloError] = field(
default=None,
metadata={
"name": "HelloError",
"type": "Element",
"namespace": "http://hello/",
}
)
hello_bye_error: Optional[HelloByeError] = field(
default=None,
metadata={
"name": "HelloByeError",
"type": "Element",
"namespace": "http://hello/",
}
)
class HelloGetHelloAsString:
style = "rpc"
location = "http://localhost:9999/ws/hello"
transport = "http://schemas.xmlsoap.org/soap/http"
input = HelloGetHelloAsStringInput
output = HelloGetHelloAsStringOutput
| true | true |
f733ee3a1c748aebb772e1649de2677004587126 | 15,205 | py | Python | watsonv3_connector.py | splunk-soar-connectors/ibmwatsonv3 | 2038974d2db4172c555272d725b81815e416b638 | [
"Apache-2.0"
] | null | null | null | watsonv3_connector.py | splunk-soar-connectors/ibmwatsonv3 | 2038974d2db4172c555272d725b81815e416b638 | [
"Apache-2.0"
] | 1 | 2022-01-26T23:20:53.000Z | 2022-01-26T23:20:53.000Z | watsonv3_connector.py | splunk-soar-connectors/ibmwatsonv3 | 2038974d2db4172c555272d725b81815e416b638 | [
"Apache-2.0"
] | null | null | null | # File: watsonv3_connector.py
#
# Copyright (c) 2021-2022 Splunk Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under
# the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific language governing permissions
# and limitations under the License.
#
#
# Phantom App imports
import json
import phantom.app as phantom
import requests
from bs4 import BeautifulSoup
from phantom.action_result import ActionResult
from phantom.base_connector import BaseConnector
# Usage of the consts file is recommended
import watsonv3_consts as consts
class RetVal(tuple):
def __new__(cls, val1, val2=None):
return tuple.__new__(RetVal, (val1, val2))
class WatsonLanguageTranslatorV3Connector(BaseConnector):
def __init__(self):
# Call the BaseConnectors init first
super(WatsonLanguageTranslatorV3Connector, self).__init__()
self._state = None
# Variable to hold a base_url in case the app makes REST calls
# Do note that the app json defines the asset config, so please
# modify this as you deem fit.
self._base_url = None
self._api_key = None
self._version = None
def _process_empty_response(self, response, action_result):
if response.status_code == 200:
return RetVal(phantom.APP_SUCCESS, {})
return RetVal(action_result.set_status(phantom.APP_ERROR, "Empty response and no information in the header"), None)
def _process_html_response(self, response, action_result):
# An html response, treat it like an error
status_code = response.status_code
try:
soup = BeautifulSoup(response.text, "html.parser")
error_text = soup.text
# Remove the script, style, footer and navigation part from the HTML message
for element in soup(["script", "style", "footer", "nav"]):
element.extract()
split_lines = error_text.split('\n')
split_lines = [x.strip() for x in split_lines if x.strip()]
error_text = '\n'.join(split_lines)
except:
error_text = "Cannot parse error details"
message = "Status Code: {0}. Data from server:\n{1}\n".format(status_code, error_text)
message = message.replace('{', '{{').replace('}', '}}')
return RetVal(action_result.set_status(phantom.APP_ERROR, message), None)
def _process_json_response(self, r, action_result):
# Try a json parse
try:
resp_json = r.json()
except Exception as e:
return RetVal(action_result.set_status(phantom.APP_ERROR, "Unable to parse JSON response. Error: {0}".format(str(e))), None)
# Please specify the status codes here
if 200 <= r.status_code < 399:
return RetVal(phantom.APP_SUCCESS, resp_json)
# You should process the error returned in the json
message = "Error from server. Status Code: {0} Data from server: {1}".format(
r.status_code,
r.text.replace('{', '{{').replace('}', '}}')
)
return RetVal(action_result.set_status(phantom.APP_ERROR, message), None)
def _process_response(self, r, action_result):
# store the r_text in debug data, it will get dumped in the logs if the action fails
if hasattr(action_result, 'add_debug_data'):
action_result.add_debug_data({'r_status_code': r.status_code})
action_result.add_debug_data({'r_text': r.text})
action_result.add_debug_data({'r_headers': r.headers})
# Process each 'Content-Type' of response separately
# Process a json response
if 'json' in r.headers.get('Content-Type', ''):
return self._process_json_response(r, action_result)
# Process an HTML response, Do this no matter what the api talks.
# There is a high chance of a PROXY in between phantom and the rest of
# world, in case of errors, PROXY's return HTML, this function parses
# the error and adds it to the action_result.
if 'html' in r.headers.get('Content-Type', ''):
return self._process_html_response(r, action_result)
# it's not content-type that is to be parsed, handle an empty response
if not r.text:
return self._process_empty_response(r, action_result)
# everything else is actually an error at this point
message = "Can't process response from server. Status Code: {0} Data from server: {1}".format(
r.status_code,
r.text.replace('{', '{{').replace('}', '}}')
)
return RetVal(action_result.set_status(phantom.APP_ERROR, message), None)
def _make_rest_call(self, endpoint, action_result, method="get", **kwargs):
# **kwargs can be any additional parameters that requests.request accepts
config = self.get_config()
resp_json = None
try:
request_func = getattr(requests, method)
except AttributeError:
return RetVal(action_result.set_status(phantom.APP_ERROR, "Invalid method: {0}".format(method)), resp_json)
# Create a URL to connect to
url = self._base_url + '/v3' + endpoint
try:
r = request_func(
url,
auth=('apikey', self._api_key),
verify=config.get('verify_server_cert', False),
**kwargs
)
except Exception as e:
return RetVal(action_result.set_status(phantom.APP_ERROR, "Error Connecting to server. Details: {0}".format(str(e))), resp_json)
return self._process_response(r, action_result)
def _handle_test_connectivity(self, param):
# Add an action result object to self (BaseConnector) to represent the action for this param
action_result = self.add_action_result(ActionResult(dict(param)))
self.save_progress("Connecting to watson language translator")
# make rest call
ret_val, response = self._make_rest_call('/identifiable_languages?version={}'.format(self._version), action_result)
if phantom.is_fail(ret_val):
# the call to the 3rd party device or service failed, action result should contain all the error details
# so just return from here
self.save_progress("Test Connectivity Failed")
return action_result.get_status()
# Return success
self.save_progress("Test Connectivity Passed")
return action_result.set_status(phantom.APP_SUCCESS)
def _handle_get_language(self, param):
# Implement the handler here
# use self.save_progress(...) to send progress messages back to the platform
self.save_progress("In action handler for: {0}".format(self.get_action_identifier()))
# Add an action result object to self (BaseConnector) to represent the action for this param
action_result = self.add_action_result(ActionResult(dict(param)))
# Required values can be accessed directly
text = param['text']
headers = {"content-type": "text/plain", "accept": "application/json"}
json = {"text": text}
# make rest call
ret_val, response = self._make_rest_call('/identify?version={}'.format(self._version), action_result, method='post',
headers=headers, json=json)
if phantom.is_fail(ret_val):
return action_result.get_status()
languages = response.get('languages')
if type(languages) != list:
languages = [languages]
for curr_item in languages:
action_result.add_data(curr_item)
action_result.update_summary({'total_languages': action_result.get_data_size()})
try:
action_result.update_summary({'high_confidence_match': languages[0]['language']})
except:
pass
return action_result.set_status(phantom.APP_SUCCESS)
def _handle_list_languages(self, param):
# Implement the handler here
# use self.save_progress(...) to send progress messages back to the platform
self.save_progress("In action handler for: {0}".format(self.get_action_identifier()))
# Add an action result object to self (BaseConnector) to represent the action for this param
action_result = self.add_action_result(ActionResult(dict(param)))
# make rest call
ret_val, response = self._make_rest_call('/identifiable_languages?version={}'.format(self._version), action_result)
if phantom.is_fail(ret_val):
return action_result.get_status()
languages = response.get('languages')
if type(languages) != list:
languages = [languages]
for curr_item in languages:
action_result.add_data(curr_item)
action_result.update_summary({'total_languages': action_result.get_data_size()})
return action_result.set_status(phantom.APP_SUCCESS)
def _handle_list_translations(self, param):
# Add an action result object to self (BaseConnector) to represent the action for this param
action_result = self.add_action_result(ActionResult(dict(param)))
# make rest call
ret_val, response = self._make_rest_call('/models?version={}'.format(self._version), action_result)
if phantom.is_fail(ret_val):
return action_result.get_status()
models = response.get('models')
if type(models) != list:
models = [models]
for curr_item in models:
action_result.add_data(curr_item)
action_result.update_summary({'total_models': action_result.get_data_size()})
return action_result.set_status(phantom.APP_SUCCESS)
def _handle_translate_text(self, param):
# use self.save_progress(...) to send progress messages back to the platform
self.save_progress("In action handler for: {0}".format(self.get_action_identifier()))
# Add an action result object to self (BaseConnector) to represent the action for this param
action_result = self.add_action_result(ActionResult(dict(param)))
if ('model_id' not in param):
if ('source' not in param or 'target' not in param):
return action_result.set_status(phantom.APP_ERROR, "Please specify either model_id or source and target to use")
headers = {"accept": "application/json"}
# make rest call
ret_val, response = self._make_rest_call('/translate?version={}'.format(self._version), action_result,
headers=headers, json=param, method='post')
if phantom.is_fail(ret_val):
return action_result.get_status()
# Add the response into the data section
action_result.add_data(response)
return action_result.set_status(phantom.APP_SUCCESS)
def handle_action(self, param):
ret_val = phantom.APP_SUCCESS
# Get the action that we are supposed to execute for this App Run
action_id = self.get_action_identifier()
self.debug_print("action_id", self.get_action_identifier())
if action_id == 'test_connectivity':
ret_val = self._handle_test_connectivity(param)
elif action_id == 'get_language':
ret_val = self._handle_get_language(param)
elif action_id == 'list_languages':
ret_val = self._handle_list_languages(param)
elif action_id == 'list_translations':
ret_val = self._handle_list_translations(param)
elif action_id == 'translate_text':
ret_val = self._handle_translate_text(param)
return ret_val
def initialize(self):
# Load the state in initialize, use it to store data
# that needs to be accessed across actions
self._state = self.load_state()
# get the asset config
config = self.get_config()
self._base_url = config['base_url']
if self._base_url.endswith('/'):
self._base_url = self._base_url[:-1]
self._api_key = config['api_key']
self._version = config.get('version', consts.GET_DEFAULT_VERSION)
return phantom.APP_SUCCESS
def finalize(self):
# Save the state, this data is saved accross actions and app upgrades
self.save_state(self._state)
return phantom.APP_SUCCESS
def main():
import argparse
import sys
import pudb
pudb.set_trace()
argparser = argparse.ArgumentParser()
argparser.add_argument('input_test_json', help='Input Test JSON file')
argparser.add_argument('-u', '--username', help='username', required=False)
argparser.add_argument('-p', '--password', help='password', required=False)
argparser.add_argument('-v', '--verify', action='store_true', help='verify', required=False, default=False)
args = argparser.parse_args()
session_id = None
username = args.username
password = args.password
verify = args.verify
if username is not None and password is None:
# User specified a username but not a password, so ask
import getpass
password = getpass.getpass("Password: ")
if username and password:
try:
login_url = WatsonLanguageTranslatorV3Connector._get_phantom_base_url() + '/login'
print("Accessing the Login page")
r = requests.get(login_url, verify=verify, timeout=consts.DEFAULT_TIMEOUT)
csrftoken = r.cookies['csrftoken']
data = dict()
data['username'] = username
data['password'] = password
data['csrfmiddlewaretoken'] = csrftoken
headers = dict()
headers['Cookie'] = 'csrftoken=' + csrftoken
headers['Referer'] = login_url
print("Logging into Platform to get the session id")
r2 = requests.post(login_url, verify=verify,
data=data, headers=headers, timeout=consts.DEFAULT_TIMEOUT)
session_id = r2.cookies['sessionid']
except Exception as e:
print("Unable to get session id from the platform. Error: " + str(e))
sys.exit(1)
with open(args.input_test_json) as f:
in_json = f.read()
in_json = json.loads(in_json)
print(json.dumps(in_json, indent=4))
connector = WatsonLanguageTranslatorV3Connector()
connector.print_progress_message = True
if session_id is not None:
in_json['user_session_token'] = session_id
connector._set_csrf_info(csrftoken, headers['Referer'])
ret_val = connector._handle_action(json.dumps(in_json), None)
print(json.dumps(json.loads(ret_val), indent=4))
sys.exit(0)
if __name__ == '__main__':
main()
| 36.202381 | 140 | 0.652549 |
import json
import phantom.app as phantom
import requests
from bs4 import BeautifulSoup
from phantom.action_result import ActionResult
from phantom.base_connector import BaseConnector
import watsonv3_consts as consts
class RetVal(tuple):
def __new__(cls, val1, val2=None):
return tuple.__new__(RetVal, (val1, val2))
class WatsonLanguageTranslatorV3Connector(BaseConnector):
def __init__(self):
super(WatsonLanguageTranslatorV3Connector, self).__init__()
self._state = None
self._base_url = None
self._api_key = None
self._version = None
def _process_empty_response(self, response, action_result):
if response.status_code == 200:
return RetVal(phantom.APP_SUCCESS, {})
return RetVal(action_result.set_status(phantom.APP_ERROR, "Empty response and no information in the header"), None)
def _process_html_response(self, response, action_result):
status_code = response.status_code
try:
soup = BeautifulSoup(response.text, "html.parser")
error_text = soup.text
for element in soup(["script", "style", "footer", "nav"]):
element.extract()
split_lines = error_text.split('\n')
split_lines = [x.strip() for x in split_lines if x.strip()]
error_text = '\n'.join(split_lines)
except:
error_text = "Cannot parse error details"
message = "Status Code: {0}. Data from server:\n{1}\n".format(status_code, error_text)
message = message.replace('{', '{{').replace('}', '}}')
return RetVal(action_result.set_status(phantom.APP_ERROR, message), None)
def _process_json_response(self, r, action_result):
try:
resp_json = r.json()
except Exception as e:
return RetVal(action_result.set_status(phantom.APP_ERROR, "Unable to parse JSON response. Error: {0}".format(str(e))), None)
if 200 <= r.status_code < 399:
return RetVal(phantom.APP_SUCCESS, resp_json)
message = "Error from server. Status Code: {0} Data from server: {1}".format(
r.status_code,
r.text.replace('{', '{{').replace('}', '}}')
)
return RetVal(action_result.set_status(phantom.APP_ERROR, message), None)
def _process_response(self, r, action_result):
if hasattr(action_result, 'add_debug_data'):
action_result.add_debug_data({'r_status_code': r.status_code})
action_result.add_debug_data({'r_text': r.text})
action_result.add_debug_data({'r_headers': r.headers})
if 'json' in r.headers.get('Content-Type', ''):
return self._process_json_response(r, action_result)
# the error and adds it to the action_result.
if 'html' in r.headers.get('Content-Type', ''):
return self._process_html_response(r, action_result)
# it's not content-type that is to be parsed, handle an empty response
if not r.text:
return self._process_empty_response(r, action_result)
message = "Can't process response from server. Status Code: {0} Data from server: {1}".format(
r.status_code,
r.text.replace('{', '{{').replace('}', '}}')
)
return RetVal(action_result.set_status(phantom.APP_ERROR, message), None)
def _make_rest_call(self, endpoint, action_result, method="get", **kwargs):
# **kwargs can be any additional parameters that requests.request accepts
config = self.get_config()
resp_json = None
try:
request_func = getattr(requests, method)
except AttributeError:
return RetVal(action_result.set_status(phantom.APP_ERROR, "Invalid method: {0}".format(method)), resp_json)
# Create a URL to connect to
url = self._base_url + '/v3' + endpoint
try:
r = request_func(
url,
auth=('apikey', self._api_key),
verify=config.get('verify_server_cert', False),
**kwargs
)
except Exception as e:
return RetVal(action_result.set_status(phantom.APP_ERROR, "Error Connecting to server. Details: {0}".format(str(e))), resp_json)
return self._process_response(r, action_result)
def _handle_test_connectivity(self, param):
# Add an action result object to self (BaseConnector) to represent the action for this param
action_result = self.add_action_result(ActionResult(dict(param)))
self.save_progress("Connecting to watson language translator")
# make rest call
ret_val, response = self._make_rest_call('/identifiable_languages?version={}'.format(self._version), action_result)
if phantom.is_fail(ret_val):
# the call to the 3rd party device or service failed, action result should contain all the error details
# so just return from here
self.save_progress("Test Connectivity Failed")
return action_result.get_status()
# Return success
self.save_progress("Test Connectivity Passed")
return action_result.set_status(phantom.APP_SUCCESS)
def _handle_get_language(self, param):
# Implement the handler here
# use self.save_progress(...) to send progress messages back to the platform
self.save_progress("In action handler for: {0}".format(self.get_action_identifier()))
# Add an action result object to self (BaseConnector) to represent the action for this param
action_result = self.add_action_result(ActionResult(dict(param)))
# Required values can be accessed directly
text = param['text']
headers = {"content-type": "text/plain", "accept": "application/json"}
json = {"text": text}
# make rest call
ret_val, response = self._make_rest_call('/identify?version={}'.format(self._version), action_result, method='post',
headers=headers, json=json)
if phantom.is_fail(ret_val):
return action_result.get_status()
languages = response.get('languages')
if type(languages) != list:
languages = [languages]
for curr_item in languages:
action_result.add_data(curr_item)
action_result.update_summary({'total_languages': action_result.get_data_size()})
try:
action_result.update_summary({'high_confidence_match': languages[0]['language']})
except:
pass
return action_result.set_status(phantom.APP_SUCCESS)
def _handle_list_languages(self, param):
# Implement the handler here
# use self.save_progress(...) to send progress messages back to the platform
self.save_progress("In action handler for: {0}".format(self.get_action_identifier()))
# Add an action result object to self (BaseConnector) to represent the action for this param
action_result = self.add_action_result(ActionResult(dict(param)))
# make rest call
ret_val, response = self._make_rest_call('/identifiable_languages?version={}'.format(self._version), action_result)
if phantom.is_fail(ret_val):
return action_result.get_status()
languages = response.get('languages')
if type(languages) != list:
languages = [languages]
for curr_item in languages:
action_result.add_data(curr_item)
action_result.update_summary({'total_languages': action_result.get_data_size()})
return action_result.set_status(phantom.APP_SUCCESS)
def _handle_list_translations(self, param):
# Add an action result object to self (BaseConnector) to represent the action for this param
action_result = self.add_action_result(ActionResult(dict(param)))
# make rest call
ret_val, response = self._make_rest_call('/models?version={}'.format(self._version), action_result)
if phantom.is_fail(ret_val):
return action_result.get_status()
models = response.get('models')
if type(models) != list:
models = [models]
for curr_item in models:
action_result.add_data(curr_item)
action_result.update_summary({'total_models': action_result.get_data_size()})
return action_result.set_status(phantom.APP_SUCCESS)
def _handle_translate_text(self, param):
# use self.save_progress(...) to send progress messages back to the platform
self.save_progress("In action handler for: {0}".format(self.get_action_identifier()))
# Add an action result object to self (BaseConnector) to represent the action for this param
action_result = self.add_action_result(ActionResult(dict(param)))
if ('model_id' not in param):
if ('source' not in param or 'target' not in param):
return action_result.set_status(phantom.APP_ERROR, "Please specify either model_id or source and target to use")
headers = {"accept": "application/json"}
# make rest call
ret_val, response = self._make_rest_call('/translate?version={}'.format(self._version), action_result,
headers=headers, json=param, method='post')
if phantom.is_fail(ret_val):
return action_result.get_status()
# Add the response into the data section
action_result.add_data(response)
return action_result.set_status(phantom.APP_SUCCESS)
def handle_action(self, param):
ret_val = phantom.APP_SUCCESS
# Get the action that we are supposed to execute for this App Run
action_id = self.get_action_identifier()
self.debug_print("action_id", self.get_action_identifier())
if action_id == 'test_connectivity':
ret_val = self._handle_test_connectivity(param)
elif action_id == 'get_language':
ret_val = self._handle_get_language(param)
elif action_id == 'list_languages':
ret_val = self._handle_list_languages(param)
elif action_id == 'list_translations':
ret_val = self._handle_list_translations(param)
elif action_id == 'translate_text':
ret_val = self._handle_translate_text(param)
return ret_val
def initialize(self):
# Load the state in initialize, use it to store data
# that needs to be accessed across actions
self._state = self.load_state()
# get the asset config
config = self.get_config()
self._base_url = config['base_url']
if self._base_url.endswith('/'):
self._base_url = self._base_url[:-1]
self._api_key = config['api_key']
self._version = config.get('version', consts.GET_DEFAULT_VERSION)
return phantom.APP_SUCCESS
def finalize(self):
# Save the state, this data is saved accross actions and app upgrades
self.save_state(self._state)
return phantom.APP_SUCCESS
def main():
import argparse
import sys
import pudb
pudb.set_trace()
argparser = argparse.ArgumentParser()
argparser.add_argument('input_test_json', help='Input Test JSON file')
argparser.add_argument('-u', '--username', help='username', required=False)
argparser.add_argument('-p', '--password', help='password', required=False)
argparser.add_argument('-v', '--verify', action='store_true', help='verify', required=False, default=False)
args = argparser.parse_args()
session_id = None
username = args.username
password = args.password
verify = args.verify
if username is not None and password is None:
# User specified a username but not a password, so ask
import getpass
password = getpass.getpass("Password: ")
if username and password:
try:
login_url = WatsonLanguageTranslatorV3Connector._get_phantom_base_url() + '/login'
print("Accessing the Login page")
r = requests.get(login_url, verify=verify, timeout=consts.DEFAULT_TIMEOUT)
csrftoken = r.cookies['csrftoken']
data = dict()
data['username'] = username
data['password'] = password
data['csrfmiddlewaretoken'] = csrftoken
headers = dict()
headers['Cookie'] = 'csrftoken=' + csrftoken
headers['Referer'] = login_url
print("Logging into Platform to get the session id")
r2 = requests.post(login_url, verify=verify,
data=data, headers=headers, timeout=consts.DEFAULT_TIMEOUT)
session_id = r2.cookies['sessionid']
except Exception as e:
print("Unable to get session id from the platform. Error: " + str(e))
sys.exit(1)
with open(args.input_test_json) as f:
in_json = f.read()
in_json = json.loads(in_json)
print(json.dumps(in_json, indent=4))
connector = WatsonLanguageTranslatorV3Connector()
connector.print_progress_message = True
if session_id is not None:
in_json['user_session_token'] = session_id
connector._set_csrf_info(csrftoken, headers['Referer'])
ret_val = connector._handle_action(json.dumps(in_json), None)
print(json.dumps(json.loads(ret_val), indent=4))
sys.exit(0)
if __name__ == '__main__':
main()
| true | true |
f733ee46ee0e2b9ada989eb2579719cfcb6c3e4d | 4,563 | py | Python | advancedmovieselection/src/Source/MovieDB/tmdb3/cache.py | builder08/enigma2-plugins | 45583c69b807ce26d756a51973c81d877afe3694 | [
"OLDAP-2.3"
] | null | null | null | advancedmovieselection/src/Source/MovieDB/tmdb3/cache.py | builder08/enigma2-plugins | 45583c69b807ce26d756a51973c81d877afe3694 | [
"OLDAP-2.3"
] | null | null | null | advancedmovieselection/src/Source/MovieDB/tmdb3/cache.py | builder08/enigma2-plugins | 45583c69b807ce26d756a51973c81d877afe3694 | [
"OLDAP-2.3"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#-----------------------
# Name: cache.py
# Python Library
# Author: Raymond Wagner
# Purpose: Caching framework to store TMDb API results
#-----------------------
from __future__ import absolute_import
import time
import os
from .tmdb_exceptions import *
from .cache_engine import Engines
from . import cache_null
from . import cache_file
class Cache( object ):
"""
This class implements a persistent cache, backed in a file specified in
the object creation. The file is protected for safe, concurrent access
by multiple instances using flock.
This cache uses JSON for speed and storage efficiency, so only simple
data types are supported.
Data is stored in a simple format {key:(expiretimestamp, data)}
"""
def __init__(self, engine=None, *args, **kwargs):
self._engine = None
self._data = {}
self._age = 0
self.configure(engine, *args, **kwargs)
def _import(self, data=None):
if data is None:
data = self._engine.get(self._age)
for obj in sorted(data, key=lambda x: x.creation):
if not obj.expired:
self._data[obj.key] = obj
self._age = max(self._age, obj.creation)
def _expire(self):
for k, v in list(self._data.items()):
if v.expired:
del self._data[k]
def configure(self, engine, *args, **kwargs):
if engine is None:
engine = 'file'
elif engine not in Engines:
raise TMDBCacheError("Invalid cache engine specified: "+engine)
self._engine = Engines[engine](self)
self._engine.configure(*args, **kwargs)
def put(self, key, data, lifetime=60*60*12):
# pull existing data, so cache will be fresh when written back out
if self._engine is None:
raise TMDBCacheError("No cache engine configured")
self._expire()
self._import(self._engine.put(key, data, lifetime))
def get(self, key):
if self._engine is None:
raise TMDBCacheError("No cache engine configured")
self._expire()
if key not in self._data:
self._import()
try:
return self._data[key].data
except:
return None
def cached(self, callback):
"""
Returns a decorator that uses a callback to specify the key to use
for caching the responses from the decorated function.
"""
return self.Cached(self, callback)
class Cached( object ):
def __init__(self, cache, callback, func=None, inst=None):
self.cache = cache
self.callback = callback
self.func = func
self.inst = inst
if func:
self.__module__ = func.__module__
self.__name__ = func.__name__
self.__doc__ = func.__doc__
def __call__(self, *args, **kwargs):
if self.func is None: # decorator is waiting to be given a function
if len(kwargs) or (len(args) != 1):
raise TMDBCacheError('Cache.Cached decorator must be called '+\
'a single callable argument before it '+\
'be used.')
elif args[0] is None:
raise TMDBCacheError('Cache.Cached decorator called before '+\
'being given a function to wrap.')
elif not callable(args[0]):
raise TMDBCacheError('Cache.Cached must be provided a '+\
'callable object.')
return self.__class__(self.cache, self.callback, args[0])
elif self.inst.lifetime == 0:
return self.func(*args, **kwargs)
else:
key = self.callback()
data = self.cache.get(key)
if data is None:
data = self.func(*args, **kwargs)
if hasattr(self.inst, 'lifetime'):
self.cache.put(key, data, self.inst.lifetime)
else:
self.cache.put(key, data)
return data
def __get__(self, inst, owner):
if inst is None:
return self
func = self.func.__get__(inst, owner)
callback = self.callback.__get__(inst, owner)
return self.__class__(self.cache, callback, func, inst)
| 36.214286 | 83 | 0.55183 |
from __future__ import absolute_import
import time
import os
from .tmdb_exceptions import *
from .cache_engine import Engines
from . import cache_null
from . import cache_file
class Cache( object ):
def __init__(self, engine=None, *args, **kwargs):
self._engine = None
self._data = {}
self._age = 0
self.configure(engine, *args, **kwargs)
def _import(self, data=None):
if data is None:
data = self._engine.get(self._age)
for obj in sorted(data, key=lambda x: x.creation):
if not obj.expired:
self._data[obj.key] = obj
self._age = max(self._age, obj.creation)
def _expire(self):
for k, v in list(self._data.items()):
if v.expired:
del self._data[k]
def configure(self, engine, *args, **kwargs):
if engine is None:
engine = 'file'
elif engine not in Engines:
raise TMDBCacheError("Invalid cache engine specified: "+engine)
self._engine = Engines[engine](self)
self._engine.configure(*args, **kwargs)
def put(self, key, data, lifetime=60*60*12):
if self._engine is None:
raise TMDBCacheError("No cache engine configured")
self._expire()
self._import(self._engine.put(key, data, lifetime))
def get(self, key):
if self._engine is None:
raise TMDBCacheError("No cache engine configured")
self._expire()
if key not in self._data:
self._import()
try:
return self._data[key].data
except:
return None
def cached(self, callback):
return self.Cached(self, callback)
class Cached( object ):
def __init__(self, cache, callback, func=None, inst=None):
self.cache = cache
self.callback = callback
self.func = func
self.inst = inst
if func:
self.__module__ = func.__module__
self.__name__ = func.__name__
self.__doc__ = func.__doc__
def __call__(self, *args, **kwargs):
if self.func is None:
if len(kwargs) or (len(args) != 1):
raise TMDBCacheError('Cache.Cached decorator must be called '+\
'a single callable argument before it '+\
'be used.')
elif args[0] is None:
raise TMDBCacheError('Cache.Cached decorator called before '+\
'being given a function to wrap.')
elif not callable(args[0]):
raise TMDBCacheError('Cache.Cached must be provided a '+\
'callable object.')
return self.__class__(self.cache, self.callback, args[0])
elif self.inst.lifetime == 0:
return self.func(*args, **kwargs)
else:
key = self.callback()
data = self.cache.get(key)
if data is None:
data = self.func(*args, **kwargs)
if hasattr(self.inst, 'lifetime'):
self.cache.put(key, data, self.inst.lifetime)
else:
self.cache.put(key, data)
return data
def __get__(self, inst, owner):
if inst is None:
return self
func = self.func.__get__(inst, owner)
callback = self.callback.__get__(inst, owner)
return self.__class__(self.cache, callback, func, inst)
| true | true |
f733ee7fc8179183522c57191edd20618ec63c97 | 4,012 | py | Python | spinup/rewards/cvar_utils.py | jerryzhucs21/spinningup | 2992e6a8163d78c3f82a3d92c5235fda0527c398 | [
"MIT"
] | 2 | 2021-06-21T05:19:01.000Z | 2021-07-02T14:51:16.000Z | spinup/rewards/cvar_utils.py | zaynahjaved/pg-broil | 2992e6a8163d78c3f82a3d92c5235fda0527c398 | [
"MIT"
] | null | null | null | spinup/rewards/cvar_utils.py | zaynahjaved/pg-broil | 2992e6a8163d78c3f82a3d92c5235fda0527c398 | [
"MIT"
] | null | null | null | import numpy as np
def relu(x):
if x > 0:
return x
else:
return 0.0
def cvar_fn_val(sigma, exp_ret_rs, prob_rs, alpha):
fn_val_relu_part = 0.0
for i,ret in enumerate(exp_ret_rs):
fn_val_relu_part += prob_rs[i] * relu(sigma - ret)
fn_val = sigma - 1.0 / (1.0 - alpha) * fn_val_relu_part
return fn_val
def cvar_line_search_pg(exp_ret_rs, prob_rs, alpha, num_discretize=1000):
'''use a line search to approximate sigma'''
assert(len(exp_ret_rs) == len(prob_rs))
assert(alpha >= 0 and alpha <= 1)
assert(np.abs(np.sum(prob_rs) - 1.0) < 0.000001)
#run simple discrete line search to approximate sigma for now
max_val = -np.inf
max_sigma = None
for x in np.linspace(min(exp_ret_rs), max(exp_ret_rs), num_discretize):
cvar_val = cvar_fn_val(x, exp_ret_rs, prob_rs, alpha)
#print(x, cvar_val)
if cvar_val > max_val:
max_val = cvar_val
max_sigma = x
#print("updating")
return max_sigma, max_val
def cvar_enumerate_pg(exp_ret_rs, prob_rs, alpha):
'''cvar is piecewise linear/concave so the max must be at one of the endpoints!
we can just iterate over them until we find the smallest one'''
sorted_exp_ret_rs, sorted_prob_rs = zip(*sorted(zip(exp_ret_rs, prob_rs)))
#print("sorted rets", sorted_exp_ret_rs)
#print("sorted probs", sorted_prob_rs)
cum_prob = 0.0
max_val = -np.inf
max_sigma = None
for ret in sorted_exp_ret_rs:
cvar_val = cvar_fn_val(ret, exp_ret_rs, prob_rs, alpha)
#print(x, cvar_val)
if cvar_val >= max_val:
max_val = cvar_val
max_sigma = ret
#print("updating")
elif cvar_val < max_val:
#this function is concave so once it starts decreasing we can stop since we are only interested in maximum
break
return max_sigma, max_val
# if __name__ == "__main__":
# #run test to make sure both give same answers.
# #Note cvar_enumerate_pg is orders of magnitude faster and gives same answer as far as I can tell
# for i in range(100):
# seed = np.random.randint(1000)
# print(seed)
# np.random.seed(seed)
# num_rewards = 50
# exp_rets = 200*np.random.rand(num_rewards) - 100 #[10,40, 80]
# probs = np.random.rand(num_rewards)#[0.3, 0.3, 0.4]
# probs /= np.sum(probs)
# #print(np.sum(probs))
# alpha = 0.6
# num_discretize = 10000
# #print("exp rets", exp_rets)
# #print("probs", probs)
# sigma, cvar = cvar_line_search_pg(exp_rets, probs, alpha, num_discretize)
# print("sigma = ", sigma)
# print("cvar = ", cvar)
# sigma_enumerate, cvar_enumerate = cvar_enumerate_pg(exp_rets, probs, alpha)
# print("enum sigma", sigma_enumerate)
# print("sort cvar", cvar_enumerate)
# if abs(sigma_enumerate - sigma) > 0.1 or abs(cvar - cvar_enumerate) > 0.001:
# print("wrong")
# print(abs(sigma_enumerate - sigma))
# input()
if __name__ == "__main__":
#run test to make sure both give same answers.
#Note cvar_enumerate_pg is orders of magnitude faster and gives same answer as far as I can tell
num_rewards = 2
exp_rets = [10, 90]
probs = [0.05, 0.95]
probs /= np.sum(probs)
#print(np.sum(probs))
alpha = 0.95
num_discretize = 10000
#print("exp rets", exp_rets)
#print("probs", probs)
sigma, cvar = cvar_line_search_pg(exp_rets, probs, alpha, num_discretize)
print("sigma = ", sigma)
print("cvar = ", cvar)
sigma_enumerate, cvar_enumerate = cvar_enumerate_pg(exp_rets, probs, alpha)
print("enum sigma", sigma_enumerate)
print("sort cvar", cvar_enumerate)
if abs(sigma_enumerate - sigma) > 0.1 or abs(cvar - cvar_enumerate) > 0.001:
print("wrong")
print(abs(sigma_enumerate - sigma))
input()
| 32.885246 | 118 | 0.619641 | import numpy as np
def relu(x):
if x > 0:
return x
else:
return 0.0
def cvar_fn_val(sigma, exp_ret_rs, prob_rs, alpha):
fn_val_relu_part = 0.0
for i,ret in enumerate(exp_ret_rs):
fn_val_relu_part += prob_rs[i] * relu(sigma - ret)
fn_val = sigma - 1.0 / (1.0 - alpha) * fn_val_relu_part
return fn_val
def cvar_line_search_pg(exp_ret_rs, prob_rs, alpha, num_discretize=1000):
assert(len(exp_ret_rs) == len(prob_rs))
assert(alpha >= 0 and alpha <= 1)
assert(np.abs(np.sum(prob_rs) - 1.0) < 0.000001)
max_val = -np.inf
max_sigma = None
for x in np.linspace(min(exp_ret_rs), max(exp_ret_rs), num_discretize):
cvar_val = cvar_fn_val(x, exp_ret_rs, prob_rs, alpha)
if cvar_val > max_val:
max_val = cvar_val
max_sigma = x
return max_sigma, max_val
def cvar_enumerate_pg(exp_ret_rs, prob_rs, alpha):
sorted_exp_ret_rs, sorted_prob_rs = zip(*sorted(zip(exp_ret_rs, prob_rs)))
cum_prob = 0.0
max_val = -np.inf
max_sigma = None
for ret in sorted_exp_ret_rs:
cvar_val = cvar_fn_val(ret, exp_ret_rs, prob_rs, alpha)
if cvar_val >= max_val:
max_val = cvar_val
max_sigma = ret
elif cvar_val < max_val:
break
return max_sigma, max_val
ar_line_search_pg(exp_rets, probs, alpha, num_discretize)
print("sigma = ", sigma)
print("cvar = ", cvar)
sigma_enumerate, cvar_enumerate = cvar_enumerate_pg(exp_rets, probs, alpha)
print("enum sigma", sigma_enumerate)
print("sort cvar", cvar_enumerate)
if abs(sigma_enumerate - sigma) > 0.1 or abs(cvar - cvar_enumerate) > 0.001:
print("wrong")
print(abs(sigma_enumerate - sigma))
input()
| true | true |
f733eef4fc41f2dcbe6fd75f6aa36f1dead325c5 | 1,816 | py | Python | library/migrations/0002_auto_20201017_1322.py | himasnhu1/example | 27db7941c5f7bd16ffb407654818012e43d82f7e | [
"MIT"
] | null | null | null | library/migrations/0002_auto_20201017_1322.py | himasnhu1/example | 27db7941c5f7bd16ffb407654818012e43d82f7e | [
"MIT"
] | null | null | null | library/migrations/0002_auto_20201017_1322.py | himasnhu1/example | 27db7941c5f7bd16ffb407654818012e43d82f7e | [
"MIT"
] | null | null | null | # Generated by Django 2.2 on 2020-10-17 07:52
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('student', '0001_initial'),
('core', '0001_initial'),
('library', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='librarylocker',
name='assigned_student',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.PROTECT, to='student.Student'),
),
migrations.AddField(
model_name='librarylocker',
name='library_branch',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='library.LibraryBranch'),
),
migrations.AddField(
model_name='librarybranch',
name='ammenities',
field=models.ManyToManyField(to='core.Ammenity'),
),
migrations.AddField(
model_name='librarybranch',
name='library',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='library.Library'),
),
migrations.AddField(
model_name='librarybranch',
name='opening_days',
field=models.ManyToManyField(to='core.OpeningDays'),
),
migrations.AddField(
model_name='holidays',
name='library_branch',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='library.LibraryBranch'),
),
migrations.AddField(
model_name='attendanceqrcode',
name='branch',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='library.LibraryBranch'),
),
]
| 33.62963 | 126 | 0.605727 |
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('student', '0001_initial'),
('core', '0001_initial'),
('library', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='librarylocker',
name='assigned_student',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.PROTECT, to='student.Student'),
),
migrations.AddField(
model_name='librarylocker',
name='library_branch',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='library.LibraryBranch'),
),
migrations.AddField(
model_name='librarybranch',
name='ammenities',
field=models.ManyToManyField(to='core.Ammenity'),
),
migrations.AddField(
model_name='librarybranch',
name='library',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='library.Library'),
),
migrations.AddField(
model_name='librarybranch',
name='opening_days',
field=models.ManyToManyField(to='core.OpeningDays'),
),
migrations.AddField(
model_name='holidays',
name='library_branch',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='library.LibraryBranch'),
),
migrations.AddField(
model_name='attendanceqrcode',
name='branch',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='library.LibraryBranch'),
),
]
| true | true |
f733ef14b6391cad0c7d651b0efd1ecac874f994 | 663 | py | Python | manage.py | Abdihakim-Muhumed/phogram | 314e3b9149de6022ca79172c55ce0c55439813ed | [
"Unlicense"
] | null | null | null | manage.py | Abdihakim-Muhumed/phogram | 314e3b9149de6022ca79172c55ce0c55439813ed | [
"Unlicense"
] | null | null | null | manage.py | Abdihakim-Muhumed/phogram | 314e3b9149de6022ca79172c55ce0c55439813ed | [
"Unlicense"
] | null | null | null | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'phogram.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| 28.826087 | 73 | 0.678733 |
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'phogram.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| true | true |
f733efe9187ce166c7679c03c87756eb028fd98f | 56 | py | Python | api/v1/urls.py | Osca-M/django-oauth2 | 15bdd5c39e013c9f390eb586e64367e98ff59c4c | [
"MIT"
] | null | null | null | api/v1/urls.py | Osca-M/django-oauth2 | 15bdd5c39e013c9f390eb586e64367e98ff59c4c | [
"MIT"
] | null | null | null | api/v1/urls.py | Osca-M/django-oauth2 | 15bdd5c39e013c9f390eb586e64367e98ff59c4c | [
"MIT"
] | null | null | null | from django.urls import path, include
urlpatterns = []
| 14 | 37 | 0.75 | from django.urls import path, include
urlpatterns = []
| true | true |
f733efea09cb36caa25e04787c3aa703d3e64396 | 95,690 | py | Python | Lib/typing.py | y0urself/cpython | 39dec1c09c9f5ddf951bed5b875f837735a06733 | [
"0BSD"
] | null | null | null | Lib/typing.py | y0urself/cpython | 39dec1c09c9f5ddf951bed5b875f837735a06733 | [
"0BSD"
] | null | null | null | Lib/typing.py | y0urself/cpython | 39dec1c09c9f5ddf951bed5b875f837735a06733 | [
"0BSD"
] | null | null | null | """
The typing module: Support for gradual typing as defined by PEP 484.
At large scale, the structure of the module is following:
* Imports and exports, all public names should be explicitly added to __all__.
* Internal helper functions: these should never be used in code outside this module.
* _SpecialForm and its instances (special forms):
Any, NoReturn, ClassVar, Union, Optional, Concatenate
* Classes whose instances can be type arguments in addition to types:
ForwardRef, TypeVar and ParamSpec
* The core of internal generics API: _GenericAlias and _VariadicGenericAlias, the latter is
currently only used by Tuple and Callable. All subscripted types like X[int], Union[int, str],
etc., are instances of either of these classes.
* The public counterpart of the generics API consists of two classes: Generic and Protocol.
* Public helper functions: get_type_hints, overload, cast, no_type_check,
no_type_check_decorator.
* Generic aliases for collections.abc ABCs and few additional protocols.
* Special types: NewType, NamedTuple, TypedDict.
* Wrapper submodules for re and io related types.
"""
from abc import abstractmethod, ABCMeta
import collections
import collections.abc
import contextlib
import functools
import operator
import re as stdlib_re # Avoid confusion with the re we export.
import sys
import types
import warnings
from types import WrapperDescriptorType, MethodWrapperType, MethodDescriptorType, GenericAlias
try:
from _typing import _idfunc
except ImportError:
def _idfunc(_, x):
return x
# Please keep __all__ alphabetized within each category.
__all__ = [
# Super-special typing primitives.
'Annotated',
'Any',
'Callable',
'ClassVar',
'Concatenate',
'Final',
'ForwardRef',
'Generic',
'Literal',
'Optional',
'ParamSpec',
'Protocol',
'Tuple',
'Type',
'TypeVar',
'Union',
# ABCs (from collections.abc).
'AbstractSet', # collections.abc.Set.
'ByteString',
'Container',
'ContextManager',
'Hashable',
'ItemsView',
'Iterable',
'Iterator',
'KeysView',
'Mapping',
'MappingView',
'MutableMapping',
'MutableSequence',
'MutableSet',
'Sequence',
'Sized',
'ValuesView',
'Awaitable',
'AsyncIterator',
'AsyncIterable',
'Coroutine',
'Collection',
'AsyncGenerator',
'AsyncContextManager',
# Structural checks, a.k.a. protocols.
'Reversible',
'SupportsAbs',
'SupportsBytes',
'SupportsComplex',
'SupportsFloat',
'SupportsIndex',
'SupportsInt',
'SupportsRound',
# Concrete collection types.
'ChainMap',
'Counter',
'Deque',
'Dict',
'DefaultDict',
'List',
'OrderedDict',
'Set',
'FrozenSet',
'NamedTuple', # Not really a type.
'TypedDict', # Not really a type.
'Generator',
# Other concrete types.
'BinaryIO',
'IO',
'Match',
'Pattern',
'TextIO',
# One-off things.
'AnyStr',
'cast',
'final',
'get_args',
'get_origin',
'get_type_hints',
'is_typeddict',
'NewType',
'no_type_check',
'no_type_check_decorator',
'NoReturn',
'overload',
'ParamSpecArgs',
'ParamSpecKwargs',
'reveal_type',
'runtime_checkable',
'Text',
'TYPE_CHECKING',
'TypeAlias',
'TypeGuard',
]
# The pseudo-submodules 're' and 'io' are part of the public
# namespace, but excluded from __all__ because they might stomp on
# legitimate imports of those modules.
def _type_convert(arg, module=None, *, allow_special_forms=False):
"""For converting None to type(None), and strings to ForwardRef."""
if arg is None:
return type(None)
if isinstance(arg, str):
return ForwardRef(arg, module=module, is_class=allow_special_forms)
return arg
def _type_check(arg, msg, is_argument=True, module=None, *, allow_special_forms=False):
"""Check that the argument is a type, and return it (internal helper).
As a special case, accept None and return type(None) instead. Also wrap strings
into ForwardRef instances. Consider several corner cases, for example plain
special forms like Union are not valid, while Union[int, str] is OK, etc.
The msg argument is a human-readable error message, e.g::
"Union[arg, ...]: arg should be a type."
We append the repr() of the actual value (truncated to 100 chars).
"""
invalid_generic_forms = (Generic, Protocol)
if not allow_special_forms:
invalid_generic_forms += (ClassVar,)
if is_argument:
invalid_generic_forms += (Final,)
arg = _type_convert(arg, module=module, allow_special_forms=allow_special_forms)
if (isinstance(arg, _GenericAlias) and
arg.__origin__ in invalid_generic_forms):
raise TypeError(f"{arg} is not valid as type argument")
if arg in (Any, NoReturn, ClassVar, Final, TypeAlias):
return arg
if isinstance(arg, _SpecialForm) or arg in (Generic, Protocol):
raise TypeError(f"Plain {arg} is not valid as type argument")
if isinstance(arg, (type, TypeVar, ForwardRef, types.UnionType, ParamSpec)):
return arg
if not callable(arg):
raise TypeError(f"{msg} Got {arg!r:.100}.")
return arg
def _is_param_expr(arg):
return arg is ... or isinstance(arg,
(tuple, list, ParamSpec, _ConcatenateGenericAlias))
def _type_repr(obj):
"""Return the repr() of an object, special-casing types (internal helper).
If obj is a type, we return a shorter version than the default
type.__repr__, based on the module and qualified name, which is
typically enough to uniquely identify a type. For everything
else, we fall back on repr(obj).
"""
if isinstance(obj, types.GenericAlias):
return repr(obj)
if isinstance(obj, type):
if obj.__module__ == 'builtins':
return obj.__qualname__
return f'{obj.__module__}.{obj.__qualname__}'
if obj is ...:
return('...')
if isinstance(obj, types.FunctionType):
return obj.__name__
return repr(obj)
def _collect_type_vars(types_, typevar_types=None):
"""Collect all type variable contained
in types in order of first appearance (lexicographic order). For example::
_collect_type_vars((T, List[S, T])) == (T, S)
"""
if typevar_types is None:
typevar_types = TypeVar
tvars = []
for t in types_:
if isinstance(t, typevar_types) and t not in tvars:
tvars.append(t)
if isinstance(t, (_GenericAlias, GenericAlias, types.UnionType)):
tvars.extend([t for t in t.__parameters__ if t not in tvars])
return tuple(tvars)
def _check_generic(cls, parameters, elen):
"""Check correct count for parameters of a generic cls (internal helper).
This gives a nice error message in case of count mismatch.
"""
if not elen:
raise TypeError(f"{cls} is not a generic class")
alen = len(parameters)
if alen != elen:
raise TypeError(f"Too {'many' if alen > elen else 'few'} arguments for {cls};"
f" actual {alen}, expected {elen}")
def _prepare_paramspec_params(cls, params):
"""Prepares the parameters for a Generic containing ParamSpec
variables (internal helper).
"""
# Special case where Z[[int, str, bool]] == Z[int, str, bool] in PEP 612.
if (len(cls.__parameters__) == 1
and params and not _is_param_expr(params[0])):
assert isinstance(cls.__parameters__[0], ParamSpec)
return (params,)
else:
_check_generic(cls, params, len(cls.__parameters__))
_params = []
# Convert lists to tuples to help other libraries cache the results.
for p, tvar in zip(params, cls.__parameters__):
if isinstance(tvar, ParamSpec) and isinstance(p, list):
p = tuple(p)
_params.append(p)
return tuple(_params)
def _deduplicate(params):
# Weed out strict duplicates, preserving the first of each occurrence.
all_params = set(params)
if len(all_params) < len(params):
new_params = []
for t in params:
if t in all_params:
new_params.append(t)
all_params.remove(t)
params = new_params
assert not all_params, all_params
return params
def _remove_dups_flatten(parameters):
"""An internal helper for Union creation and substitution: flatten Unions
among parameters, then remove duplicates.
"""
# Flatten out Union[Union[...], ...].
params = []
for p in parameters:
if isinstance(p, (_UnionGenericAlias, types.UnionType)):
params.extend(p.__args__)
else:
params.append(p)
return tuple(_deduplicate(params))
def _flatten_literal_params(parameters):
"""An internal helper for Literal creation: flatten Literals among parameters"""
params = []
for p in parameters:
if isinstance(p, _LiteralGenericAlias):
params.extend(p.__args__)
else:
params.append(p)
return tuple(params)
_cleanups = []
def _tp_cache(func=None, /, *, typed=False):
"""Internal wrapper caching __getitem__ of generic types with a fallback to
original function for non-hashable arguments.
"""
def decorator(func):
cached = functools.lru_cache(typed=typed)(func)
_cleanups.append(cached.cache_clear)
@functools.wraps(func)
def inner(*args, **kwds):
try:
return cached(*args, **kwds)
except TypeError:
pass # All real errors (not unhashable args) are raised below.
return func(*args, **kwds)
return inner
if func is not None:
return decorator(func)
return decorator
def _eval_type(t, globalns, localns, recursive_guard=frozenset()):
"""Evaluate all forward references in the given type t.
For use of globalns and localns see the docstring for get_type_hints().
recursive_guard is used to prevent prevent infinite recursion
with recursive ForwardRef.
"""
if isinstance(t, ForwardRef):
return t._evaluate(globalns, localns, recursive_guard)
if isinstance(t, (_GenericAlias, GenericAlias, types.UnionType)):
ev_args = tuple(_eval_type(a, globalns, localns, recursive_guard) for a in t.__args__)
if ev_args == t.__args__:
return t
if isinstance(t, GenericAlias):
return GenericAlias(t.__origin__, ev_args)
if isinstance(t, types.UnionType):
return functools.reduce(operator.or_, ev_args)
else:
return t.copy_with(ev_args)
return t
class _Final:
"""Mixin to prohibit subclassing"""
__slots__ = ('__weakref__',)
def __init_subclass__(cls, /, *args, **kwds):
if '_root' not in kwds:
raise TypeError("Cannot subclass special typing classes")
class _Immutable:
"""Mixin to indicate that object should not be copied."""
__slots__ = ()
def __copy__(self):
return self
def __deepcopy__(self, memo):
return self
# Internal indicator of special typing constructs.
# See __doc__ instance attribute for specific docs.
class _SpecialForm(_Final, _root=True):
__slots__ = ('_name', '__doc__', '_getitem')
def __init__(self, getitem):
self._getitem = getitem
self._name = getitem.__name__
self.__doc__ = getitem.__doc__
def __getattr__(self, item):
if item in {'__name__', '__qualname__'}:
return self._name
raise AttributeError(item)
def __mro_entries__(self, bases):
raise TypeError(f"Cannot subclass {self!r}")
def __repr__(self):
return 'typing.' + self._name
def __reduce__(self):
return self._name
def __call__(self, *args, **kwds):
raise TypeError(f"Cannot instantiate {self!r}")
def __or__(self, other):
return Union[self, other]
def __ror__(self, other):
return Union[other, self]
def __instancecheck__(self, obj):
raise TypeError(f"{self} cannot be used with isinstance()")
def __subclasscheck__(self, cls):
raise TypeError(f"{self} cannot be used with issubclass()")
@_tp_cache
def __getitem__(self, parameters):
return self._getitem(self, parameters)
class _LiteralSpecialForm(_SpecialForm, _root=True):
def __getitem__(self, parameters):
if not isinstance(parameters, tuple):
parameters = (parameters,)
return self._getitem(self, *parameters)
@_SpecialForm
def Any(self, parameters):
"""Special type indicating an unconstrained type.
- Any is compatible with every type.
- Any assumed to have all methods.
- All values assumed to be instances of Any.
Note that all the above statements are true from the point of view of
static type checkers. At runtime, Any should not be used with instance
or class checks.
"""
raise TypeError(f"{self} is not subscriptable")
@_SpecialForm
def NoReturn(self, parameters):
"""Special type indicating functions that never return.
Example::
from typing import NoReturn
def stop() -> NoReturn:
raise Exception('no way')
This type is invalid in other positions, e.g., ``List[NoReturn]``
will fail in static type checkers.
"""
raise TypeError(f"{self} is not subscriptable")
@_SpecialForm
def ClassVar(self, parameters):
"""Special type construct to mark class variables.
An annotation wrapped in ClassVar indicates that a given
attribute is intended to be used as a class variable and
should not be set on instances of that class. Usage::
class Starship:
stats: ClassVar[Dict[str, int]] = {} # class variable
damage: int = 10 # instance variable
ClassVar accepts only types and cannot be further subscribed.
Note that ClassVar is not a class itself, and should not
be used with isinstance() or issubclass().
"""
item = _type_check(parameters, f'{self} accepts only single type.')
return _GenericAlias(self, (item,))
@_SpecialForm
def Final(self, parameters):
"""Special typing construct to indicate final names to type checkers.
A final name cannot be re-assigned or overridden in a subclass.
For example:
MAX_SIZE: Final = 9000
MAX_SIZE += 1 # Error reported by type checker
class Connection:
TIMEOUT: Final[int] = 10
class FastConnector(Connection):
TIMEOUT = 1 # Error reported by type checker
There is no runtime checking of these properties.
"""
item = _type_check(parameters, f'{self} accepts only single type.')
return _GenericAlias(self, (item,))
@_SpecialForm
def Union(self, parameters):
"""Union type; Union[X, Y] means either X or Y.
To define a union, use e.g. Union[int, str]. Details:
- The arguments must be types and there must be at least one.
- None as an argument is a special case and is replaced by
type(None).
- Unions of unions are flattened, e.g.::
Union[Union[int, str], float] == Union[int, str, float]
- Unions of a single argument vanish, e.g.::
Union[int] == int # The constructor actually returns int
- Redundant arguments are skipped, e.g.::
Union[int, str, int] == Union[int, str]
- When comparing unions, the argument order is ignored, e.g.::
Union[int, str] == Union[str, int]
- You cannot subclass or instantiate a union.
- You can use Optional[X] as a shorthand for Union[X, None].
"""
if parameters == ():
raise TypeError("Cannot take a Union of no types.")
if not isinstance(parameters, tuple):
parameters = (parameters,)
msg = "Union[arg, ...]: each arg must be a type."
parameters = tuple(_type_check(p, msg) for p in parameters)
parameters = _remove_dups_flatten(parameters)
if len(parameters) == 1:
return parameters[0]
if len(parameters) == 2 and type(None) in parameters:
return _UnionGenericAlias(self, parameters, name="Optional")
return _UnionGenericAlias(self, parameters)
@_SpecialForm
def Optional(self, parameters):
"""Optional type.
Optional[X] is equivalent to Union[X, None].
"""
arg = _type_check(parameters, f"{self} requires a single type.")
return Union[arg, type(None)]
@_LiteralSpecialForm
@_tp_cache(typed=True)
def Literal(self, *parameters):
"""Special typing form to define literal types (a.k.a. value types).
This form can be used to indicate to type checkers that the corresponding
variable or function parameter has a value equivalent to the provided
literal (or one of several literals):
def validate_simple(data: Any) -> Literal[True]: # always returns True
...
MODE = Literal['r', 'rb', 'w', 'wb']
def open_helper(file: str, mode: MODE) -> str:
...
open_helper('/some/path', 'r') # Passes type check
open_helper('/other/path', 'typo') # Error in type checker
Literal[...] cannot be subclassed. At runtime, an arbitrary value
is allowed as type argument to Literal[...], but type checkers may
impose restrictions.
"""
# There is no '_type_check' call because arguments to Literal[...] are
# values, not types.
parameters = _flatten_literal_params(parameters)
try:
parameters = tuple(p for p, _ in _deduplicate(list(_value_and_type_iter(parameters))))
except TypeError: # unhashable parameters
pass
return _LiteralGenericAlias(self, parameters)
@_SpecialForm
def TypeAlias(self, parameters):
"""Special marker indicating that an assignment should
be recognized as a proper type alias definition by type
checkers.
For example::
Predicate: TypeAlias = Callable[..., bool]
It's invalid when used anywhere except as in the example above.
"""
raise TypeError(f"{self} is not subscriptable")
@_SpecialForm
def Concatenate(self, parameters):
"""Used in conjunction with ``ParamSpec`` and ``Callable`` to represent a
higher order function which adds, removes or transforms parameters of a
callable.
For example::
Callable[Concatenate[int, P], int]
See PEP 612 for detailed information.
"""
if parameters == ():
raise TypeError("Cannot take a Concatenate of no types.")
if not isinstance(parameters, tuple):
parameters = (parameters,)
if not isinstance(parameters[-1], ParamSpec):
raise TypeError("The last parameter to Concatenate should be a "
"ParamSpec variable.")
msg = "Concatenate[arg, ...]: each arg must be a type."
parameters = (*(_type_check(p, msg) for p in parameters[:-1]), parameters[-1])
return _ConcatenateGenericAlias(self, parameters)
@_SpecialForm
def TypeGuard(self, parameters):
"""Special typing form used to annotate the return type of a user-defined
type guard function. ``TypeGuard`` only accepts a single type argument.
At runtime, functions marked this way should return a boolean.
``TypeGuard`` aims to benefit *type narrowing* -- a technique used by static
type checkers to determine a more precise type of an expression within a
program's code flow. Usually type narrowing is done by analyzing
conditional code flow and applying the narrowing to a block of code. The
conditional expression here is sometimes referred to as a "type guard".
Sometimes it would be convenient to use a user-defined boolean function
as a type guard. Such a function should use ``TypeGuard[...]`` as its
return type to alert static type checkers to this intention.
Using ``-> TypeGuard`` tells the static type checker that for a given
function:
1. The return value is a boolean.
2. If the return value is ``True``, the type of its argument
is the type inside ``TypeGuard``.
For example::
def is_str(val: Union[str, float]):
# "isinstance" type guard
if isinstance(val, str):
# Type of ``val`` is narrowed to ``str``
...
else:
# Else, type of ``val`` is narrowed to ``float``.
...
Strict type narrowing is not enforced -- ``TypeB`` need not be a narrower
form of ``TypeA`` (it can even be a wider form) and this may lead to
type-unsafe results. The main reason is to allow for things like
narrowing ``List[object]`` to ``List[str]`` even though the latter is not
a subtype of the former, since ``List`` is invariant. The responsibility of
writing type-safe type guards is left to the user.
``TypeGuard`` also works with type variables. For more information, see
PEP 647 (User-Defined Type Guards).
"""
item = _type_check(parameters, f'{self} accepts only single type.')
return _GenericAlias(self, (item,))
class ForwardRef(_Final, _root=True):
"""Internal wrapper to hold a forward reference."""
__slots__ = ('__forward_arg__', '__forward_code__',
'__forward_evaluated__', '__forward_value__',
'__forward_is_argument__', '__forward_is_class__',
'__forward_module__')
def __init__(self, arg, is_argument=True, module=None, *, is_class=False):
if not isinstance(arg, str):
raise TypeError(f"Forward reference must be a string -- got {arg!r}")
try:
code = compile(arg, '<string>', 'eval')
except SyntaxError:
raise SyntaxError(f"Forward reference must be an expression -- got {arg!r}")
self.__forward_arg__ = arg
self.__forward_code__ = code
self.__forward_evaluated__ = False
self.__forward_value__ = None
self.__forward_is_argument__ = is_argument
self.__forward_is_class__ = is_class
self.__forward_module__ = module
def _evaluate(self, globalns, localns, recursive_guard):
if self.__forward_arg__ in recursive_guard:
return self
if not self.__forward_evaluated__ or localns is not globalns:
if globalns is None and localns is None:
globalns = localns = {}
elif globalns is None:
globalns = localns
elif localns is None:
localns = globalns
if self.__forward_module__ is not None:
globalns = getattr(
sys.modules.get(self.__forward_module__, None), '__dict__', globalns
)
type_ = _type_check(
eval(self.__forward_code__, globalns, localns),
"Forward references must evaluate to types.",
is_argument=self.__forward_is_argument__,
allow_special_forms=self.__forward_is_class__,
)
self.__forward_value__ = _eval_type(
type_, globalns, localns, recursive_guard | {self.__forward_arg__}
)
self.__forward_evaluated__ = True
return self.__forward_value__
def __eq__(self, other):
if not isinstance(other, ForwardRef):
return NotImplemented
if self.__forward_evaluated__ and other.__forward_evaluated__:
return (self.__forward_arg__ == other.__forward_arg__ and
self.__forward_value__ == other.__forward_value__)
return self.__forward_arg__ == other.__forward_arg__
def __hash__(self):
return hash(self.__forward_arg__)
def __or__(self, other):
return Union[self, other]
def __ror__(self, other):
return Union[other, self]
def __repr__(self):
return f'ForwardRef({self.__forward_arg__!r})'
class _TypeVarLike:
"""Mixin for TypeVar-like types (TypeVar and ParamSpec)."""
def __init__(self, bound, covariant, contravariant):
"""Used to setup TypeVars and ParamSpec's bound, covariant and
contravariant attributes.
"""
if covariant and contravariant:
raise ValueError("Bivariant types are not supported.")
self.__covariant__ = bool(covariant)
self.__contravariant__ = bool(contravariant)
if bound:
self.__bound__ = _type_check(bound, "Bound must be a type.")
else:
self.__bound__ = None
def __or__(self, right):
return Union[self, right]
def __ror__(self, left):
return Union[left, self]
def __repr__(self):
if self.__covariant__:
prefix = '+'
elif self.__contravariant__:
prefix = '-'
else:
prefix = '~'
return prefix + self.__name__
def __reduce__(self):
return self.__name__
class TypeVar( _Final, _Immutable, _TypeVarLike, _root=True):
"""Type variable.
Usage::
T = TypeVar('T') # Can be anything
A = TypeVar('A', str, bytes) # Must be str or bytes
Type variables exist primarily for the benefit of static type
checkers. They serve as the parameters for generic types as well
as for generic function definitions. See class Generic for more
information on generic types. Generic functions work as follows:
def repeat(x: T, n: int) -> List[T]:
'''Return a list containing n references to x.'''
return [x]*n
def longest(x: A, y: A) -> A:
'''Return the longest of two strings.'''
return x if len(x) >= len(y) else y
The latter example's signature is essentially the overloading
of (str, str) -> str and (bytes, bytes) -> bytes. Also note
that if the arguments are instances of some subclass of str,
the return type is still plain str.
At runtime, isinstance(x, T) and issubclass(C, T) will raise TypeError.
Type variables defined with covariant=True or contravariant=True
can be used to declare covariant or contravariant generic types.
See PEP 484 for more details. By default generic types are invariant
in all type variables.
Type variables can be introspected. e.g.:
T.__name__ == 'T'
T.__constraints__ == ()
T.__covariant__ == False
T.__contravariant__ = False
A.__constraints__ == (str, bytes)
Note that only type variables defined in global scope can be pickled.
"""
def __init__(self, name, *constraints, bound=None,
covariant=False, contravariant=False):
self.__name__ = name
super().__init__(bound, covariant, contravariant)
if constraints and bound is not None:
raise TypeError("Constraints cannot be combined with bound=...")
if constraints and len(constraints) == 1:
raise TypeError("A single constraint is not allowed")
msg = "TypeVar(name, constraint, ...): constraints must be types."
self.__constraints__ = tuple(_type_check(t, msg) for t in constraints)
def_mod = _caller()
if def_mod != 'typing':
self.__module__ = def_mod
class ParamSpecArgs(_Final, _Immutable, _root=True):
"""The args for a ParamSpec object.
Given a ParamSpec object P, P.args is an instance of ParamSpecArgs.
ParamSpecArgs objects have a reference back to their ParamSpec:
P.args.__origin__ is P
This type is meant for runtime introspection and has no special meaning to
static type checkers.
"""
def __init__(self, origin):
self.__origin__ = origin
def __repr__(self):
return f"{self.__origin__.__name__}.args"
class ParamSpecKwargs(_Final, _Immutable, _root=True):
"""The kwargs for a ParamSpec object.
Given a ParamSpec object P, P.kwargs is an instance of ParamSpecKwargs.
ParamSpecKwargs objects have a reference back to their ParamSpec:
P.kwargs.__origin__ is P
This type is meant for runtime introspection and has no special meaning to
static type checkers.
"""
def __init__(self, origin):
self.__origin__ = origin
def __repr__(self):
return f"{self.__origin__.__name__}.kwargs"
class ParamSpec(_Final, _Immutable, _TypeVarLike, _root=True):
"""Parameter specification variable.
Usage::
P = ParamSpec('P')
Parameter specification variables exist primarily for the benefit of static
type checkers. They are used to forward the parameter types of one
callable to another callable, a pattern commonly found in higher order
functions and decorators. They are only valid when used in ``Concatenate``,
or as the first argument to ``Callable``, or as parameters for user-defined
Generics. See class Generic for more information on generic types. An
example for annotating a decorator::
T = TypeVar('T')
P = ParamSpec('P')
def add_logging(f: Callable[P, T]) -> Callable[P, T]:
'''A type-safe decorator to add logging to a function.'''
def inner(*args: P.args, **kwargs: P.kwargs) -> T:
logging.info(f'{f.__name__} was called')
return f(*args, **kwargs)
return inner
@add_logging
def add_two(x: float, y: float) -> float:
'''Add two numbers together.'''
return x + y
Parameter specification variables defined with covariant=True or
contravariant=True can be used to declare covariant or contravariant
generic types. These keyword arguments are valid, but their actual semantics
are yet to be decided. See PEP 612 for details.
Parameter specification variables can be introspected. e.g.:
P.__name__ == 'T'
P.__bound__ == None
P.__covariant__ == False
P.__contravariant__ == False
Note that only parameter specification variables defined in global scope can
be pickled.
"""
@property
def args(self):
return ParamSpecArgs(self)
@property
def kwargs(self):
return ParamSpecKwargs(self)
def __init__(self, name, *, bound=None, covariant=False, contravariant=False):
self.__name__ = name
super().__init__(bound, covariant, contravariant)
def_mod = _caller()
if def_mod != 'typing':
self.__module__ = def_mod
def _is_dunder(attr):
return attr.startswith('__') and attr.endswith('__')
class _BaseGenericAlias(_Final, _root=True):
"""The central part of internal API.
This represents a generic version of type 'origin' with type arguments 'params'.
There are two kind of these aliases: user defined and special. The special ones
are wrappers around builtin collections and ABCs in collections.abc. These must
have 'name' always set. If 'inst' is False, then the alias can't be instantiated,
this is used by e.g. typing.List and typing.Dict.
"""
def __init__(self, origin, *, inst=True, name=None):
self._inst = inst
self._name = name
self.__origin__ = origin
self.__slots__ = None # This is not documented.
def __call__(self, *args, **kwargs):
if not self._inst:
raise TypeError(f"Type {self._name} cannot be instantiated; "
f"use {self.__origin__.__name__}() instead")
result = self.__origin__(*args, **kwargs)
try:
result.__orig_class__ = self
except AttributeError:
pass
return result
def __mro_entries__(self, bases):
res = []
if self.__origin__ not in bases:
res.append(self.__origin__)
i = bases.index(self)
for b in bases[i+1:]:
if isinstance(b, _BaseGenericAlias) or issubclass(b, Generic):
break
else:
res.append(Generic)
return tuple(res)
def __getattr__(self, attr):
if attr in {'__name__', '__qualname__'}:
return self._name or self.__origin__.__name__
# We are careful for copy and pickle.
# Also for simplicity we just don't relay all dunder names
if '__origin__' in self.__dict__ and not _is_dunder(attr):
return getattr(self.__origin__, attr)
raise AttributeError(attr)
def __setattr__(self, attr, val):
if _is_dunder(attr) or attr in {'_name', '_inst', '_nparams',
'_typevar_types', '_paramspec_tvars'}:
super().__setattr__(attr, val)
else:
setattr(self.__origin__, attr, val)
def __instancecheck__(self, obj):
return self.__subclasscheck__(type(obj))
def __subclasscheck__(self, cls):
raise TypeError("Subscripted generics cannot be used with"
" class and instance checks")
def __dir__(self):
return list(set(super().__dir__()
+ [attr for attr in dir(self.__origin__) if not _is_dunder(attr)]))
# Special typing constructs Union, Optional, Generic, Callable and Tuple
# use three special attributes for internal bookkeeping of generic types:
# * __parameters__ is a tuple of unique free type parameters of a generic
# type, for example, Dict[T, T].__parameters__ == (T,);
# * __origin__ keeps a reference to a type that was subscripted,
# e.g., Union[T, int].__origin__ == Union, or the non-generic version of
# the type.
# * __args__ is a tuple of all arguments used in subscripting,
# e.g., Dict[T, int].__args__ == (T, int).
class _GenericAlias(_BaseGenericAlias, _root=True):
# The type of parameterized generics.
#
# That is, for example, `type(List[int])` is `_GenericAlias`.
#
# Objects which are instances of this class include:
# * Parameterized container types, e.g. `Tuple[int]`, `List[int]`.
# * Note that native container types, e.g. `tuple`, `list`, use
# `types.GenericAlias` instead.
# * Parameterized classes:
# T = TypeVar('T')
# class C(Generic[T]): pass
# # C[int] is a _GenericAlias
# * `Callable` aliases, generic `Callable` aliases, and
# parameterized `Callable` aliases:
# T = TypeVar('T')
# # _CallableGenericAlias inherits from _GenericAlias.
# A = Callable[[], None] # _CallableGenericAlias
# B = Callable[[T], None] # _CallableGenericAlias
# C = B[int] # _CallableGenericAlias
# * Parameterized `Final`, `ClassVar` and `TypeGuard`:
# # All _GenericAlias
# Final[int]
# ClassVar[float]
# TypeVar[bool]
def __init__(self, origin, args, *, inst=True, name=None,
_typevar_types=TypeVar,
_paramspec_tvars=False):
super().__init__(origin, inst=inst, name=name)
if not isinstance(args, tuple):
args = (args,)
self.__args__ = tuple(... if a is _TypingEllipsis else
() if a is _TypingEmpty else
a for a in args)
self.__parameters__ = _collect_type_vars(args, typevar_types=_typevar_types)
self._typevar_types = _typevar_types
self._paramspec_tvars = _paramspec_tvars
if not name:
self.__module__ = origin.__module__
def __eq__(self, other):
if not isinstance(other, _GenericAlias):
return NotImplemented
return (self.__origin__ == other.__origin__
and self.__args__ == other.__args__)
def __hash__(self):
return hash((self.__origin__, self.__args__))
def __or__(self, right):
return Union[self, right]
def __ror__(self, left):
return Union[left, self]
@_tp_cache
def __getitem__(self, args):
# Parameterizes an already-parameterized object.
#
# For example, we arrive here doing something like:
# T1 = TypeVar('T1')
# T2 = TypeVar('T2')
# T3 = TypeVar('T3')
# class A(Generic[T1]): pass
# B = A[T2] # B is a _GenericAlias
# C = B[T3] # Invokes _GenericAlias.__getitem__
#
# We also arrive here when parameterizing a generic `Callable` alias:
# T = TypeVar('T')
# C = Callable[[T], None]
# C[int] # Invokes _GenericAlias.__getitem__
if self.__origin__ in (Generic, Protocol):
# Can't subscript Generic[...] or Protocol[...].
raise TypeError(f"Cannot subscript already-subscripted {self}")
# Preprocess `args`.
if not isinstance(args, tuple):
args = (args,)
args = tuple(_type_convert(p) for p in args)
if (self._paramspec_tvars
and any(isinstance(t, ParamSpec) for t in self.__parameters__)):
args = _prepare_paramspec_params(self, args)
else:
_check_generic(self, args, len(self.__parameters__))
new_args = self._determine_new_args(args)
r = self.copy_with(new_args)
return r
def _determine_new_args(self, args):
# Determines new __args__ for __getitem__.
#
# For example, suppose we had:
# T1 = TypeVar('T1')
# T2 = TypeVar('T2')
# class A(Generic[T1, T2]): pass
# T3 = TypeVar('T3')
# B = A[int, T3]
# C = B[str]
# `B.__args__` is `(int, T3)`, so `C.__args__` should be `(int, str)`.
# Unfortunately, this is harder than it looks, because if `T3` is
# anything more exotic than a plain `TypeVar`, we need to consider
# edge cases.
# In the example above, this would be {T3: str}
new_arg_by_param = dict(zip(self.__parameters__, args))
new_args = []
for old_arg in self.__args__:
if isinstance(old_arg, ParamSpec):
new_arg = new_arg_by_param[old_arg]
if not _is_param_expr(new_arg):
raise TypeError(f"Expected a list of types, an ellipsis, "
f"ParamSpec, or Concatenate. Got {new_arg}")
elif isinstance(old_arg, self._typevar_types):
new_arg = new_arg_by_param[old_arg]
elif isinstance(old_arg, (_GenericAlias, GenericAlias, types.UnionType)):
subparams = old_arg.__parameters__
if not subparams:
new_arg = old_arg
else:
subargs = tuple(new_arg_by_param[x] for x in subparams)
new_arg = old_arg[subargs]
else:
new_arg = old_arg
if self.__origin__ == collections.abc.Callable and isinstance(new_arg, tuple):
# Consider the following `Callable`.
# C = Callable[[int], str]
# Here, `C.__args__` should be (int, str) - NOT ([int], str).
# That means that if we had something like...
# P = ParamSpec('P')
# T = TypeVar('T')
# C = Callable[P, T]
# D = C[[int, str], float]
# ...we need to be careful; `new_args` should end up as
# `(int, str, float)` rather than `([int, str], float)`.
new_args.extend(new_arg)
else:
new_args.append(new_arg)
return tuple(new_args)
def copy_with(self, args):
return self.__class__(self.__origin__, args, name=self._name, inst=self._inst)
def __repr__(self):
if self._name:
name = 'typing.' + self._name
else:
name = _type_repr(self.__origin__)
args = ", ".join([_type_repr(a) for a in self.__args__])
return f'{name}[{args}]'
def __reduce__(self):
if self._name:
origin = globals()[self._name]
else:
origin = self.__origin__
args = tuple(self.__args__)
if len(args) == 1 and not isinstance(args[0], tuple):
args, = args
return operator.getitem, (origin, args)
def __mro_entries__(self, bases):
if isinstance(self.__origin__, _SpecialForm):
raise TypeError(f"Cannot subclass {self!r}")
if self._name: # generic version of an ABC or built-in class
return super().__mro_entries__(bases)
if self.__origin__ is Generic:
if Protocol in bases:
return ()
i = bases.index(self)
for b in bases[i+1:]:
if isinstance(b, _BaseGenericAlias) and b is not self:
return ()
return (self.__origin__,)
# _nparams is the number of accepted parameters, e.g. 0 for Hashable,
# 1 for List and 2 for Dict. It may be -1 if variable number of
# parameters are accepted (needs custom __getitem__).
class _SpecialGenericAlias(_BaseGenericAlias, _root=True):
def __init__(self, origin, nparams, *, inst=True, name=None):
if name is None:
name = origin.__name__
super().__init__(origin, inst=inst, name=name)
self._nparams = nparams
if origin.__module__ == 'builtins':
self.__doc__ = f'A generic version of {origin.__qualname__}.'
else:
self.__doc__ = f'A generic version of {origin.__module__}.{origin.__qualname__}.'
@_tp_cache
def __getitem__(self, params):
if not isinstance(params, tuple):
params = (params,)
msg = "Parameters to generic types must be types."
params = tuple(_type_check(p, msg) for p in params)
_check_generic(self, params, self._nparams)
return self.copy_with(params)
def copy_with(self, params):
return _GenericAlias(self.__origin__, params,
name=self._name, inst=self._inst)
def __repr__(self):
return 'typing.' + self._name
def __subclasscheck__(self, cls):
if isinstance(cls, _SpecialGenericAlias):
return issubclass(cls.__origin__, self.__origin__)
if not isinstance(cls, _GenericAlias):
return issubclass(cls, self.__origin__)
return super().__subclasscheck__(cls)
def __reduce__(self):
return self._name
def __or__(self, right):
return Union[self, right]
def __ror__(self, left):
return Union[left, self]
class _CallableGenericAlias(_GenericAlias, _root=True):
def __repr__(self):
assert self._name == 'Callable'
args = self.__args__
if len(args) == 2 and _is_param_expr(args[0]):
return super().__repr__()
return (f'typing.Callable'
f'[[{", ".join([_type_repr(a) for a in args[:-1]])}], '
f'{_type_repr(args[-1])}]')
def __reduce__(self):
args = self.__args__
if not (len(args) == 2 and _is_param_expr(args[0])):
args = list(args[:-1]), args[-1]
return operator.getitem, (Callable, args)
class _CallableType(_SpecialGenericAlias, _root=True):
def copy_with(self, params):
return _CallableGenericAlias(self.__origin__, params,
name=self._name, inst=self._inst,
_typevar_types=(TypeVar, ParamSpec),
_paramspec_tvars=True)
def __getitem__(self, params):
if not isinstance(params, tuple) or len(params) != 2:
raise TypeError("Callable must be used as "
"Callable[[arg, ...], result].")
args, result = params
# This relaxes what args can be on purpose to allow things like
# PEP 612 ParamSpec. Responsibility for whether a user is using
# Callable[...] properly is deferred to static type checkers.
if isinstance(args, list):
params = (tuple(args), result)
else:
params = (args, result)
return self.__getitem_inner__(params)
@_tp_cache
def __getitem_inner__(self, params):
args, result = params
msg = "Callable[args, result]: result must be a type."
result = _type_check(result, msg)
if args is Ellipsis:
return self.copy_with((_TypingEllipsis, result))
if not isinstance(args, tuple):
args = (args,)
args = tuple(_type_convert(arg) for arg in args)
params = args + (result,)
return self.copy_with(params)
class _TupleType(_SpecialGenericAlias, _root=True):
@_tp_cache
def __getitem__(self, params):
if params == ():
return self.copy_with((_TypingEmpty,))
if not isinstance(params, tuple):
params = (params,)
if len(params) == 2 and params[1] is ...:
msg = "Tuple[t, ...]: t must be a type."
p = _type_check(params[0], msg)
return self.copy_with((p, _TypingEllipsis))
msg = "Tuple[t0, t1, ...]: each t must be a type."
params = tuple(_type_check(p, msg) for p in params)
return self.copy_with(params)
class _UnionGenericAlias(_GenericAlias, _root=True):
def copy_with(self, params):
return Union[params]
def __eq__(self, other):
if not isinstance(other, (_UnionGenericAlias, types.UnionType)):
return NotImplemented
return set(self.__args__) == set(other.__args__)
def __hash__(self):
return hash(frozenset(self.__args__))
def __repr__(self):
args = self.__args__
if len(args) == 2:
if args[0] is type(None):
return f'typing.Optional[{_type_repr(args[1])}]'
elif args[1] is type(None):
return f'typing.Optional[{_type_repr(args[0])}]'
return super().__repr__()
def __instancecheck__(self, obj):
return self.__subclasscheck__(type(obj))
def __subclasscheck__(self, cls):
for arg in self.__args__:
if issubclass(cls, arg):
return True
def __reduce__(self):
func, (origin, args) = super().__reduce__()
return func, (Union, args)
def _value_and_type_iter(parameters):
return ((p, type(p)) for p in parameters)
class _LiteralGenericAlias(_GenericAlias, _root=True):
def __eq__(self, other):
if not isinstance(other, _LiteralGenericAlias):
return NotImplemented
return set(_value_and_type_iter(self.__args__)) == set(_value_and_type_iter(other.__args__))
def __hash__(self):
return hash(frozenset(_value_and_type_iter(self.__args__)))
class _ConcatenateGenericAlias(_GenericAlias, _root=True):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs,
_typevar_types=(TypeVar, ParamSpec),
_paramspec_tvars=True)
def copy_with(self, params):
if isinstance(params[-1], (list, tuple)):
return (*params[:-1], *params[-1])
if isinstance(params[-1], _ConcatenateGenericAlias):
params = (*params[:-1], *params[-1].__args__)
elif not isinstance(params[-1], ParamSpec):
raise TypeError("The last parameter to Concatenate should be a "
"ParamSpec variable.")
return super().copy_with(params)
class Generic:
"""Abstract base class for generic types.
A generic type is typically declared by inheriting from
this class parameterized with one or more type variables.
For example, a generic mapping type might be defined as::
class Mapping(Generic[KT, VT]):
def __getitem__(self, key: KT) -> VT:
...
# Etc.
This class can then be used as follows::
def lookup_name(mapping: Mapping[KT, VT], key: KT, default: VT) -> VT:
try:
return mapping[key]
except KeyError:
return default
"""
__slots__ = ()
_is_protocol = False
@_tp_cache
def __class_getitem__(cls, params):
if not isinstance(params, tuple):
params = (params,)
if not params and cls is not Tuple:
raise TypeError(
f"Parameter list to {cls.__qualname__}[...] cannot be empty")
params = tuple(_type_convert(p) for p in params)
if cls in (Generic, Protocol):
# Generic and Protocol can only be subscripted with unique type variables.
if not all(isinstance(p, (TypeVar, ParamSpec)) for p in params):
raise TypeError(
f"Parameters to {cls.__name__}[...] must all be type variables "
f"or parameter specification variables.")
if len(set(params)) != len(params):
raise TypeError(
f"Parameters to {cls.__name__}[...] must all be unique")
else:
# Subscripting a regular Generic subclass.
if any(isinstance(t, ParamSpec) for t in cls.__parameters__):
params = _prepare_paramspec_params(cls, params)
else:
_check_generic(cls, params, len(cls.__parameters__))
return _GenericAlias(cls, params,
_typevar_types=(TypeVar, ParamSpec),
_paramspec_tvars=True)
def __init_subclass__(cls, *args, **kwargs):
super().__init_subclass__(*args, **kwargs)
tvars = []
if '__orig_bases__' in cls.__dict__:
error = Generic in cls.__orig_bases__
else:
error = Generic in cls.__bases__ and cls.__name__ != 'Protocol'
if error:
raise TypeError("Cannot inherit from plain Generic")
if '__orig_bases__' in cls.__dict__:
tvars = _collect_type_vars(cls.__orig_bases__, (TypeVar, ParamSpec))
# Look for Generic[T1, ..., Tn].
# If found, tvars must be a subset of it.
# If not found, tvars is it.
# Also check for and reject plain Generic,
# and reject multiple Generic[...].
gvars = None
for base in cls.__orig_bases__:
if (isinstance(base, _GenericAlias) and
base.__origin__ is Generic):
if gvars is not None:
raise TypeError(
"Cannot inherit from Generic[...] multiple types.")
gvars = base.__parameters__
if gvars is not None:
tvarset = set(tvars)
gvarset = set(gvars)
if not tvarset <= gvarset:
s_vars = ', '.join(str(t) for t in tvars if t not in gvarset)
s_args = ', '.join(str(g) for g in gvars)
raise TypeError(f"Some type variables ({s_vars}) are"
f" not listed in Generic[{s_args}]")
tvars = gvars
cls.__parameters__ = tuple(tvars)
class _TypingEmpty:
"""Internal placeholder for () or []. Used by TupleMeta and CallableMeta
to allow empty list/tuple in specific places, without allowing them
to sneak in where prohibited.
"""
class _TypingEllipsis:
"""Internal placeholder for ... (ellipsis)."""
_TYPING_INTERNALS = ['__parameters__', '__orig_bases__', '__orig_class__',
'_is_protocol', '_is_runtime_protocol']
_SPECIAL_NAMES = ['__abstractmethods__', '__annotations__', '__dict__', '__doc__',
'__init__', '__module__', '__new__', '__slots__',
'__subclasshook__', '__weakref__', '__class_getitem__']
# These special attributes will be not collected as protocol members.
EXCLUDED_ATTRIBUTES = _TYPING_INTERNALS + _SPECIAL_NAMES + ['_MutableMapping__marker']
def _get_protocol_attrs(cls):
"""Collect protocol members from a protocol class objects.
This includes names actually defined in the class dictionary, as well
as names that appear in annotations. Special names (above) are skipped.
"""
attrs = set()
for base in cls.__mro__[:-1]: # without object
if base.__name__ in ('Protocol', 'Generic'):
continue
annotations = getattr(base, '__annotations__', {})
for attr in list(base.__dict__.keys()) + list(annotations.keys()):
if not attr.startswith('_abc_') and attr not in EXCLUDED_ATTRIBUTES:
attrs.add(attr)
return attrs
def _is_callable_members_only(cls):
# PEP 544 prohibits using issubclass() with protocols that have non-method members.
return all(callable(getattr(cls, attr, None)) for attr in _get_protocol_attrs(cls))
def _no_init_or_replace_init(self, *args, **kwargs):
cls = type(self)
if cls._is_protocol:
raise TypeError('Protocols cannot be instantiated')
# Already using a custom `__init__`. No need to calculate correct
# `__init__` to call. This can lead to RecursionError. See bpo-45121.
if cls.__init__ is not _no_init_or_replace_init:
return
# Initially, `__init__` of a protocol subclass is set to `_no_init_or_replace_init`.
# The first instantiation of the subclass will call `_no_init_or_replace_init` which
# searches for a proper new `__init__` in the MRO. The new `__init__`
# replaces the subclass' old `__init__` (ie `_no_init_or_replace_init`). Subsequent
# instantiation of the protocol subclass will thus use the new
# `__init__` and no longer call `_no_init_or_replace_init`.
for base in cls.__mro__:
init = base.__dict__.get('__init__', _no_init_or_replace_init)
if init is not _no_init_or_replace_init:
cls.__init__ = init
break
else:
# should not happen
cls.__init__ = object.__init__
cls.__init__(self, *args, **kwargs)
def _caller(depth=1, default='__main__'):
try:
return sys._getframe(depth + 1).f_globals.get('__name__', default)
except (AttributeError, ValueError): # For platforms without _getframe()
return None
def _allow_reckless_class_checks(depth=3):
"""Allow instance and class checks for special stdlib modules.
The abc and functools modules indiscriminately call isinstance() and
issubclass() on the whole MRO of a user class, which may contain protocols.
"""
return _caller(depth) in {'abc', 'functools', None}
_PROTO_ALLOWLIST = {
'collections.abc': [
'Callable', 'Awaitable', 'Iterable', 'Iterator', 'AsyncIterable',
'Hashable', 'Sized', 'Container', 'Collection', 'Reversible',
],
'contextlib': ['AbstractContextManager', 'AbstractAsyncContextManager'],
}
class _ProtocolMeta(ABCMeta):
# This metaclass is really unfortunate and exists only because of
# the lack of __instancehook__.
def __instancecheck__(cls, instance):
# We need this method for situations where attributes are
# assigned in __init__.
if (
getattr(cls, '_is_protocol', False) and
not getattr(cls, '_is_runtime_protocol', False) and
not _allow_reckless_class_checks(depth=2)
):
raise TypeError("Instance and class checks can only be used with"
" @runtime_checkable protocols")
if ((not getattr(cls, '_is_protocol', False) or
_is_callable_members_only(cls)) and
issubclass(instance.__class__, cls)):
return True
if cls._is_protocol:
if all(hasattr(instance, attr) and
# All *methods* can be blocked by setting them to None.
(not callable(getattr(cls, attr, None)) or
getattr(instance, attr) is not None)
for attr in _get_protocol_attrs(cls)):
return True
return super().__instancecheck__(instance)
class Protocol(Generic, metaclass=_ProtocolMeta):
"""Base class for protocol classes.
Protocol classes are defined as::
class Proto(Protocol):
def meth(self) -> int:
...
Such classes are primarily used with static type checkers that recognize
structural subtyping (static duck-typing), for example::
class C:
def meth(self) -> int:
return 0
def func(x: Proto) -> int:
return x.meth()
func(C()) # Passes static type check
See PEP 544 for details. Protocol classes decorated with
@typing.runtime_checkable act as simple-minded runtime protocols that check
only the presence of given attributes, ignoring their type signatures.
Protocol classes can be generic, they are defined as::
class GenProto(Protocol[T]):
def meth(self) -> T:
...
"""
__slots__ = ()
_is_protocol = True
_is_runtime_protocol = False
def __init_subclass__(cls, *args, **kwargs):
super().__init_subclass__(*args, **kwargs)
# Determine if this is a protocol or a concrete subclass.
if not cls.__dict__.get('_is_protocol', False):
cls._is_protocol = any(b is Protocol for b in cls.__bases__)
# Set (or override) the protocol subclass hook.
def _proto_hook(other):
if not cls.__dict__.get('_is_protocol', False):
return NotImplemented
# First, perform various sanity checks.
if not getattr(cls, '_is_runtime_protocol', False):
if _allow_reckless_class_checks():
return NotImplemented
raise TypeError("Instance and class checks can only be used with"
" @runtime_checkable protocols")
if not _is_callable_members_only(cls):
if _allow_reckless_class_checks():
return NotImplemented
raise TypeError("Protocols with non-method members"
" don't support issubclass()")
if not isinstance(other, type):
# Same error message as for issubclass(1, int).
raise TypeError('issubclass() arg 1 must be a class')
# Second, perform the actual structural compatibility check.
for attr in _get_protocol_attrs(cls):
for base in other.__mro__:
# Check if the members appears in the class dictionary...
if attr in base.__dict__:
if base.__dict__[attr] is None:
return NotImplemented
break
# ...or in annotations, if it is a sub-protocol.
annotations = getattr(base, '__annotations__', {})
if (isinstance(annotations, collections.abc.Mapping) and
attr in annotations and
issubclass(other, Generic) and other._is_protocol):
break
else:
return NotImplemented
return True
if '__subclasshook__' not in cls.__dict__:
cls.__subclasshook__ = _proto_hook
# We have nothing more to do for non-protocols...
if not cls._is_protocol:
return
# ... otherwise check consistency of bases, and prohibit instantiation.
for base in cls.__bases__:
if not (base in (object, Generic) or
base.__module__ in _PROTO_ALLOWLIST and
base.__name__ in _PROTO_ALLOWLIST[base.__module__] or
issubclass(base, Generic) and base._is_protocol):
raise TypeError('Protocols can only inherit from other'
' protocols, got %r' % base)
cls.__init__ = _no_init_or_replace_init
class _AnnotatedAlias(_GenericAlias, _root=True):
"""Runtime representation of an annotated type.
At its core 'Annotated[t, dec1, dec2, ...]' is an alias for the type 't'
with extra annotations. The alias behaves like a normal typing alias,
instantiating is the same as instantiating the underlying type, binding
it to types is also the same.
"""
def __init__(self, origin, metadata):
if isinstance(origin, _AnnotatedAlias):
metadata = origin.__metadata__ + metadata
origin = origin.__origin__
super().__init__(origin, origin)
self.__metadata__ = metadata
def copy_with(self, params):
assert len(params) == 1
new_type = params[0]
return _AnnotatedAlias(new_type, self.__metadata__)
def __repr__(self):
return "typing.Annotated[{}, {}]".format(
_type_repr(self.__origin__),
", ".join(repr(a) for a in self.__metadata__)
)
def __reduce__(self):
return operator.getitem, (
Annotated, (self.__origin__,) + self.__metadata__
)
def __eq__(self, other):
if not isinstance(other, _AnnotatedAlias):
return NotImplemented
return (self.__origin__ == other.__origin__
and self.__metadata__ == other.__metadata__)
def __hash__(self):
return hash((self.__origin__, self.__metadata__))
def __getattr__(self, attr):
if attr in {'__name__', '__qualname__'}:
return 'Annotated'
return super().__getattr__(attr)
class Annotated:
"""Add context specific metadata to a type.
Example: Annotated[int, runtime_check.Unsigned] indicates to the
hypothetical runtime_check module that this type is an unsigned int.
Every other consumer of this type can ignore this metadata and treat
this type as int.
The first argument to Annotated must be a valid type.
Details:
- It's an error to call `Annotated` with less than two arguments.
- Nested Annotated are flattened::
Annotated[Annotated[T, Ann1, Ann2], Ann3] == Annotated[T, Ann1, Ann2, Ann3]
- Instantiating an annotated type is equivalent to instantiating the
underlying type::
Annotated[C, Ann1](5) == C(5)
- Annotated can be used as a generic type alias::
Optimized = Annotated[T, runtime.Optimize()]
Optimized[int] == Annotated[int, runtime.Optimize()]
OptimizedList = Annotated[List[T], runtime.Optimize()]
OptimizedList[int] == Annotated[List[int], runtime.Optimize()]
"""
__slots__ = ()
def __new__(cls, *args, **kwargs):
raise TypeError("Type Annotated cannot be instantiated.")
@_tp_cache
def __class_getitem__(cls, params):
if not isinstance(params, tuple) or len(params) < 2:
raise TypeError("Annotated[...] should be used "
"with at least two arguments (a type and an "
"annotation).")
msg = "Annotated[t, ...]: t must be a type."
origin = _type_check(params[0], msg, allow_special_forms=True)
metadata = tuple(params[1:])
return _AnnotatedAlias(origin, metadata)
def __init_subclass__(cls, *args, **kwargs):
raise TypeError(
"Cannot subclass {}.Annotated".format(cls.__module__)
)
def runtime_checkable(cls):
"""Mark a protocol class as a runtime protocol.
Such protocol can be used with isinstance() and issubclass().
Raise TypeError if applied to a non-protocol class.
This allows a simple-minded structural check very similar to
one trick ponies in collections.abc such as Iterable.
For example::
@runtime_checkable
class Closable(Protocol):
def close(self): ...
assert isinstance(open('/some/file'), Closable)
Warning: this will check only the presence of the required methods,
not their type signatures!
"""
if not issubclass(cls, Generic) or not cls._is_protocol:
raise TypeError('@runtime_checkable can be only applied to protocol classes,'
' got %r' % cls)
cls._is_runtime_protocol = True
return cls
def cast(typ, val):
"""Cast a value to a type.
This returns the value unchanged. To the type checker this
signals that the return value has the designated type, but at
runtime we intentionally don't check anything (we want this
to be as fast as possible).
"""
return val
def _get_defaults(func):
"""Internal helper to extract the default arguments, by name."""
try:
code = func.__code__
except AttributeError:
# Some built-in functions don't have __code__, __defaults__, etc.
return {}
pos_count = code.co_argcount
arg_names = code.co_varnames
arg_names = arg_names[:pos_count]
defaults = func.__defaults__ or ()
kwdefaults = func.__kwdefaults__
res = dict(kwdefaults) if kwdefaults else {}
pos_offset = pos_count - len(defaults)
for name, value in zip(arg_names[pos_offset:], defaults):
assert name not in res
res[name] = value
return res
_allowed_types = (types.FunctionType, types.BuiltinFunctionType,
types.MethodType, types.ModuleType,
WrapperDescriptorType, MethodWrapperType, MethodDescriptorType)
def get_type_hints(obj, globalns=None, localns=None, include_extras=False):
"""Return type hints for an object.
This is often the same as obj.__annotations__, but it handles
forward references encoded as string literals, adds Optional[t] if a
default value equal to None is set and recursively replaces all
'Annotated[T, ...]' with 'T' (unless 'include_extras=True').
The argument may be a module, class, method, or function. The annotations
are returned as a dictionary. For classes, annotations include also
inherited members.
TypeError is raised if the argument is not of a type that can contain
annotations, and an empty dictionary is returned if no annotations are
present.
BEWARE -- the behavior of globalns and localns is counterintuitive
(unless you are familiar with how eval() and exec() work). The
search order is locals first, then globals.
- If no dict arguments are passed, an attempt is made to use the
globals from obj (or the respective module's globals for classes),
and these are also used as the locals. If the object does not appear
to have globals, an empty dictionary is used. For classes, the search
order is globals first then locals.
- If one dict argument is passed, it is used for both globals and
locals.
- If two dict arguments are passed, they specify globals and
locals, respectively.
"""
if getattr(obj, '__no_type_check__', None):
return {}
# Classes require a special treatment.
if isinstance(obj, type):
hints = {}
for base in reversed(obj.__mro__):
if globalns is None:
base_globals = getattr(sys.modules.get(base.__module__, None), '__dict__', {})
else:
base_globals = globalns
ann = base.__dict__.get('__annotations__', {})
if isinstance(ann, types.GetSetDescriptorType):
ann = {}
base_locals = dict(vars(base)) if localns is None else localns
if localns is None and globalns is None:
# This is surprising, but required. Before Python 3.10,
# get_type_hints only evaluated the globalns of
# a class. To maintain backwards compatibility, we reverse
# the globalns and localns order so that eval() looks into
# *base_globals* first rather than *base_locals*.
# This only affects ForwardRefs.
base_globals, base_locals = base_locals, base_globals
for name, value in ann.items():
if value is None:
value = type(None)
if isinstance(value, str):
value = ForwardRef(value, is_argument=False, is_class=True)
value = _eval_type(value, base_globals, base_locals)
hints[name] = value
return hints if include_extras else {k: _strip_annotations(t) for k, t in hints.items()}
if globalns is None:
if isinstance(obj, types.ModuleType):
globalns = obj.__dict__
else:
nsobj = obj
# Find globalns for the unwrapped object.
while hasattr(nsobj, '__wrapped__'):
nsobj = nsobj.__wrapped__
globalns = getattr(nsobj, '__globals__', {})
if localns is None:
localns = globalns
elif localns is None:
localns = globalns
hints = getattr(obj, '__annotations__', None)
if hints is None:
# Return empty annotations for something that _could_ have them.
if isinstance(obj, _allowed_types):
return {}
else:
raise TypeError('{!r} is not a module, class, method, '
'or function.'.format(obj))
defaults = _get_defaults(obj)
hints = dict(hints)
for name, value in hints.items():
if value is None:
value = type(None)
if isinstance(value, str):
# class-level forward refs were handled above, this must be either
# a module-level annotation or a function argument annotation
value = ForwardRef(
value,
is_argument=not isinstance(obj, types.ModuleType),
is_class=False,
)
value = _eval_type(value, globalns, localns)
if name in defaults and defaults[name] is None:
value = Optional[value]
hints[name] = value
return hints if include_extras else {k: _strip_annotations(t) for k, t in hints.items()}
def _strip_annotations(t):
"""Strips the annotations from a given type.
"""
if isinstance(t, _AnnotatedAlias):
return _strip_annotations(t.__origin__)
if isinstance(t, _GenericAlias):
stripped_args = tuple(_strip_annotations(a) for a in t.__args__)
if stripped_args == t.__args__:
return t
return t.copy_with(stripped_args)
if isinstance(t, GenericAlias):
stripped_args = tuple(_strip_annotations(a) for a in t.__args__)
if stripped_args == t.__args__:
return t
return GenericAlias(t.__origin__, stripped_args)
if isinstance(t, types.UnionType):
stripped_args = tuple(_strip_annotations(a) for a in t.__args__)
if stripped_args == t.__args__:
return t
return functools.reduce(operator.or_, stripped_args)
return t
def get_origin(tp):
"""Get the unsubscripted version of a type.
This supports generic types, Callable, Tuple, Union, Literal, Final, ClassVar
and Annotated. Return None for unsupported types. Examples::
get_origin(Literal[42]) is Literal
get_origin(int) is None
get_origin(ClassVar[int]) is ClassVar
get_origin(Generic) is Generic
get_origin(Generic[T]) is Generic
get_origin(Union[T, int]) is Union
get_origin(List[Tuple[T, T]][int]) == list
get_origin(P.args) is P
"""
if isinstance(tp, _AnnotatedAlias):
return Annotated
if isinstance(tp, (_BaseGenericAlias, GenericAlias,
ParamSpecArgs, ParamSpecKwargs)):
return tp.__origin__
if tp is Generic:
return Generic
if isinstance(tp, types.UnionType):
return types.UnionType
return None
def get_args(tp):
"""Get type arguments with all substitutions performed.
For unions, basic simplifications used by Union constructor are performed.
Examples::
get_args(Dict[str, int]) == (str, int)
get_args(int) == ()
get_args(Union[int, Union[T, int], str][int]) == (int, str)
get_args(Union[int, Tuple[T, int]][str]) == (int, Tuple[str, int])
get_args(Callable[[], T][int]) == ([], int)
"""
if isinstance(tp, _AnnotatedAlias):
return (tp.__origin__,) + tp.__metadata__
if isinstance(tp, (_GenericAlias, GenericAlias)):
res = tp.__args__
if (tp.__origin__ is collections.abc.Callable
and not (len(res) == 2 and _is_param_expr(res[0]))):
res = (list(res[:-1]), res[-1])
return res
if isinstance(tp, types.UnionType):
return tp.__args__
return ()
def is_typeddict(tp):
"""Check if an annotation is a TypedDict class
For example::
class Film(TypedDict):
title: str
year: int
is_typeddict(Film) # => True
is_typeddict(Union[list, str]) # => False
"""
return isinstance(tp, _TypedDictMeta)
def no_type_check(arg):
"""Decorator to indicate that annotations are not type hints.
The argument must be a class or function; if it is a class, it
applies recursively to all methods and classes defined in that class
(but not to methods defined in its superclasses or subclasses).
This mutates the function(s) or class(es) in place.
"""
if isinstance(arg, type):
arg_attrs = arg.__dict__.copy()
for attr, val in arg.__dict__.items():
if val in arg.__bases__ + (arg,):
arg_attrs.pop(attr)
for obj in arg_attrs.values():
if isinstance(obj, types.FunctionType):
obj.__no_type_check__ = True
if isinstance(obj, type):
no_type_check(obj)
try:
arg.__no_type_check__ = True
except TypeError: # built-in classes
pass
return arg
def no_type_check_decorator(decorator):
"""Decorator to give another decorator the @no_type_check effect.
This wraps the decorator with something that wraps the decorated
function in @no_type_check.
"""
@functools.wraps(decorator)
def wrapped_decorator(*args, **kwds):
func = decorator(*args, **kwds)
func = no_type_check(func)
return func
return wrapped_decorator
def _overload_dummy(*args, **kwds):
"""Helper for @overload to raise when called."""
raise NotImplementedError(
"You should not call an overloaded function. "
"A series of @overload-decorated functions "
"outside a stub module should always be followed "
"by an implementation that is not @overload-ed.")
def overload(func):
"""Decorator for overloaded functions/methods.
In a stub file, place two or more stub definitions for the same
function in a row, each decorated with @overload. For example:
@overload
def utf8(value: None) -> None: ...
@overload
def utf8(value: bytes) -> bytes: ...
@overload
def utf8(value: str) -> bytes: ...
In a non-stub file (i.e. a regular .py file), do the same but
follow it with an implementation. The implementation should *not*
be decorated with @overload. For example:
@overload
def utf8(value: None) -> None: ...
@overload
def utf8(value: bytes) -> bytes: ...
@overload
def utf8(value: str) -> bytes: ...
def utf8(value):
# implementation goes here
"""
return _overload_dummy
def final(f):
"""A decorator to indicate final methods and final classes.
Use this decorator to indicate to type checkers that the decorated
method cannot be overridden, and decorated class cannot be subclassed.
For example:
class Base:
@final
def done(self) -> None:
...
class Sub(Base):
def done(self) -> None: # Error reported by type checker
...
@final
class Leaf:
...
class Other(Leaf): # Error reported by type checker
...
There is no runtime checking of these properties. The decorator
sets the ``__final__`` attribute to ``True`` on the decorated object
to allow runtime introspection.
"""
try:
f.__final__ = True
except (AttributeError, TypeError):
# Skip the attribute silently if it is not writable.
# AttributeError happens if the object has __slots__ or a
# read-only property, TypeError if it's a builtin class.
pass
return f
# Some unconstrained type variables. These are used by the container types.
# (These are not for export.)
T = TypeVar('T') # Any type.
KT = TypeVar('KT') # Key type.
VT = TypeVar('VT') # Value type.
T_co = TypeVar('T_co', covariant=True) # Any type covariant containers.
V_co = TypeVar('V_co', covariant=True) # Any type covariant containers.
VT_co = TypeVar('VT_co', covariant=True) # Value type covariant containers.
T_contra = TypeVar('T_contra', contravariant=True) # Ditto contravariant.
# Internal type variable used for Type[].
CT_co = TypeVar('CT_co', covariant=True, bound=type)
# A useful type variable with constraints. This represents string types.
# (This one *is* for export!)
AnyStr = TypeVar('AnyStr', bytes, str)
# Various ABCs mimicking those in collections.abc.
_alias = _SpecialGenericAlias
Hashable = _alias(collections.abc.Hashable, 0) # Not generic.
Awaitable = _alias(collections.abc.Awaitable, 1)
Coroutine = _alias(collections.abc.Coroutine, 3)
AsyncIterable = _alias(collections.abc.AsyncIterable, 1)
AsyncIterator = _alias(collections.abc.AsyncIterator, 1)
Iterable = _alias(collections.abc.Iterable, 1)
Iterator = _alias(collections.abc.Iterator, 1)
Reversible = _alias(collections.abc.Reversible, 1)
Sized = _alias(collections.abc.Sized, 0) # Not generic.
Container = _alias(collections.abc.Container, 1)
Collection = _alias(collections.abc.Collection, 1)
Callable = _CallableType(collections.abc.Callable, 2)
Callable.__doc__ = \
"""Callable type; Callable[[int], str] is a function of (int) -> str.
The subscription syntax must always be used with exactly two
values: the argument list and the return type. The argument list
must be a list of types or ellipsis; the return type must be a single type.
There is no syntax to indicate optional or keyword arguments,
such function types are rarely used as callback types.
"""
AbstractSet = _alias(collections.abc.Set, 1, name='AbstractSet')
MutableSet = _alias(collections.abc.MutableSet, 1)
# NOTE: Mapping is only covariant in the value type.
Mapping = _alias(collections.abc.Mapping, 2)
MutableMapping = _alias(collections.abc.MutableMapping, 2)
Sequence = _alias(collections.abc.Sequence, 1)
MutableSequence = _alias(collections.abc.MutableSequence, 1)
ByteString = _alias(collections.abc.ByteString, 0) # Not generic
# Tuple accepts variable number of parameters.
Tuple = _TupleType(tuple, -1, inst=False, name='Tuple')
Tuple.__doc__ = \
"""Tuple type; Tuple[X, Y] is the cross-product type of X and Y.
Example: Tuple[T1, T2] is a tuple of two elements corresponding
to type variables T1 and T2. Tuple[int, float, str] is a tuple
of an int, a float and a string.
To specify a variable-length tuple of homogeneous type, use Tuple[T, ...].
"""
List = _alias(list, 1, inst=False, name='List')
Deque = _alias(collections.deque, 1, name='Deque')
Set = _alias(set, 1, inst=False, name='Set')
FrozenSet = _alias(frozenset, 1, inst=False, name='FrozenSet')
MappingView = _alias(collections.abc.MappingView, 1)
KeysView = _alias(collections.abc.KeysView, 1)
ItemsView = _alias(collections.abc.ItemsView, 2)
ValuesView = _alias(collections.abc.ValuesView, 1)
ContextManager = _alias(contextlib.AbstractContextManager, 1, name='ContextManager')
AsyncContextManager = _alias(contextlib.AbstractAsyncContextManager, 1, name='AsyncContextManager')
Dict = _alias(dict, 2, inst=False, name='Dict')
DefaultDict = _alias(collections.defaultdict, 2, name='DefaultDict')
OrderedDict = _alias(collections.OrderedDict, 2)
Counter = _alias(collections.Counter, 1)
ChainMap = _alias(collections.ChainMap, 2)
Generator = _alias(collections.abc.Generator, 3)
AsyncGenerator = _alias(collections.abc.AsyncGenerator, 2)
Type = _alias(type, 1, inst=False, name='Type')
Type.__doc__ = \
"""A special construct usable to annotate class objects.
For example, suppose we have the following classes::
class User: ... # Abstract base for User classes
class BasicUser(User): ...
class ProUser(User): ...
class TeamUser(User): ...
And a function that takes a class argument that's a subclass of
User and returns an instance of the corresponding class::
U = TypeVar('U', bound=User)
def new_user(user_class: Type[U]) -> U:
user = user_class()
# (Here we could write the user object to a database)
return user
joe = new_user(BasicUser)
At this point the type checker knows that joe has type BasicUser.
"""
@runtime_checkable
class SupportsInt(Protocol):
"""An ABC with one abstract method __int__."""
__slots__ = ()
@abstractmethod
def __int__(self) -> int:
pass
@runtime_checkable
class SupportsFloat(Protocol):
"""An ABC with one abstract method __float__."""
__slots__ = ()
@abstractmethod
def __float__(self) -> float:
pass
@runtime_checkable
class SupportsComplex(Protocol):
"""An ABC with one abstract method __complex__."""
__slots__ = ()
@abstractmethod
def __complex__(self) -> complex:
pass
@runtime_checkable
class SupportsBytes(Protocol):
"""An ABC with one abstract method __bytes__."""
__slots__ = ()
@abstractmethod
def __bytes__(self) -> bytes:
pass
@runtime_checkable
class SupportsIndex(Protocol):
"""An ABC with one abstract method __index__."""
__slots__ = ()
@abstractmethod
def __index__(self) -> int:
pass
@runtime_checkable
class SupportsAbs(Protocol[T_co]):
"""An ABC with one abstract method __abs__ that is covariant in its return type."""
__slots__ = ()
@abstractmethod
def __abs__(self) -> T_co:
pass
@runtime_checkable
class SupportsRound(Protocol[T_co]):
"""An ABC with one abstract method __round__ that is covariant in its return type."""
__slots__ = ()
@abstractmethod
def __round__(self, ndigits: int = 0) -> T_co:
pass
def _make_nmtuple(name, types, module, defaults = ()):
fields = [n for n, t in types]
types = {n: _type_check(t, f"field {n} annotation must be a type")
for n, t in types}
nm_tpl = collections.namedtuple(name, fields,
defaults=defaults, module=module)
nm_tpl.__annotations__ = nm_tpl.__new__.__annotations__ = types
return nm_tpl
# attributes prohibited to set in NamedTuple class syntax
_prohibited = frozenset({'__new__', '__init__', '__slots__', '__getnewargs__',
'_fields', '_field_defaults',
'_make', '_replace', '_asdict', '_source'})
_special = frozenset({'__module__', '__name__', '__annotations__'})
class NamedTupleMeta(type):
def __new__(cls, typename, bases, ns):
assert bases[0] is _NamedTuple
types = ns.get('__annotations__', {})
default_names = []
for field_name in types:
if field_name in ns:
default_names.append(field_name)
elif default_names:
raise TypeError(f"Non-default namedtuple field {field_name} "
f"cannot follow default field"
f"{'s' if len(default_names) > 1 else ''} "
f"{', '.join(default_names)}")
nm_tpl = _make_nmtuple(typename, types.items(),
defaults=[ns[n] for n in default_names],
module=ns['__module__'])
# update from user namespace without overriding special namedtuple attributes
for key in ns:
if key in _prohibited:
raise AttributeError("Cannot overwrite NamedTuple attribute " + key)
elif key not in _special and key not in nm_tpl._fields:
setattr(nm_tpl, key, ns[key])
return nm_tpl
def NamedTuple(typename, fields=None, /, **kwargs):
"""Typed version of namedtuple.
Usage in Python versions >= 3.6::
class Employee(NamedTuple):
name: str
id: int
This is equivalent to::
Employee = collections.namedtuple('Employee', ['name', 'id'])
The resulting class has an extra __annotations__ attribute, giving a
dict that maps field names to types. (The field names are also in
the _fields attribute, which is part of the namedtuple API.)
Alternative equivalent keyword syntax is also accepted::
Employee = NamedTuple('Employee', name=str, id=int)
In Python versions <= 3.5 use::
Employee = NamedTuple('Employee', [('name', str), ('id', int)])
"""
if fields is None:
fields = kwargs.items()
elif kwargs:
raise TypeError("Either list of fields or keywords"
" can be provided to NamedTuple, not both")
return _make_nmtuple(typename, fields, module=_caller())
_NamedTuple = type.__new__(NamedTupleMeta, 'NamedTuple', (), {})
def _namedtuple_mro_entries(bases):
if len(bases) > 1:
raise TypeError("Multiple inheritance with NamedTuple is not supported")
assert bases[0] is NamedTuple
return (_NamedTuple,)
NamedTuple.__mro_entries__ = _namedtuple_mro_entries
class _TypedDictMeta(type):
def __new__(cls, name, bases, ns, total=True):
"""Create new typed dict class object.
This method is called when TypedDict is subclassed,
or when TypedDict is instantiated. This way
TypedDict supports all three syntax forms described in its docstring.
Subclasses and instances of TypedDict return actual dictionaries.
"""
for base in bases:
if type(base) is not _TypedDictMeta:
raise TypeError('cannot inherit from both a TypedDict type '
'and a non-TypedDict base class')
tp_dict = type.__new__(_TypedDictMeta, name, (dict,), ns)
annotations = {}
own_annotations = ns.get('__annotations__', {})
own_annotation_keys = set(own_annotations.keys())
msg = "TypedDict('Name', {f0: t0, f1: t1, ...}); each t must be a type"
own_annotations = {
n: _type_check(tp, msg, module=tp_dict.__module__)
for n, tp in own_annotations.items()
}
required_keys = set()
optional_keys = set()
for base in bases:
annotations.update(base.__dict__.get('__annotations__', {}))
required_keys.update(base.__dict__.get('__required_keys__', ()))
optional_keys.update(base.__dict__.get('__optional_keys__', ()))
annotations.update(own_annotations)
if total:
required_keys.update(own_annotation_keys)
else:
optional_keys.update(own_annotation_keys)
tp_dict.__annotations__ = annotations
tp_dict.__required_keys__ = frozenset(required_keys)
tp_dict.__optional_keys__ = frozenset(optional_keys)
if not hasattr(tp_dict, '__total__'):
tp_dict.__total__ = total
return tp_dict
__call__ = dict # static method
def __subclasscheck__(cls, other):
# Typed dicts are only for static structural subtyping.
raise TypeError('TypedDict does not support instance and class checks')
__instancecheck__ = __subclasscheck__
def TypedDict(typename, fields=None, /, *, total=True, **kwargs):
"""A simple typed namespace. At runtime it is equivalent to a plain dict.
TypedDict creates a dictionary type that expects all of its
instances to have a certain set of keys, where each key is
associated with a value of a consistent type. This expectation
is not checked at runtime but is only enforced by type checkers.
Usage::
class Point2D(TypedDict):
x: int
y: int
label: str
a: Point2D = {'x': 1, 'y': 2, 'label': 'good'} # OK
b: Point2D = {'z': 3, 'label': 'bad'} # Fails type check
assert Point2D(x=1, y=2, label='first') == dict(x=1, y=2, label='first')
The type info can be accessed via the Point2D.__annotations__ dict, and
the Point2D.__required_keys__ and Point2D.__optional_keys__ frozensets.
TypedDict supports two additional equivalent forms::
Point2D = TypedDict('Point2D', x=int, y=int, label=str)
Point2D = TypedDict('Point2D', {'x': int, 'y': int, 'label': str})
By default, all keys must be present in a TypedDict. It is possible
to override this by specifying totality.
Usage::
class point2D(TypedDict, total=False):
x: int
y: int
This means that a point2D TypedDict can have any of the keys omitted.A type
checker is only expected to support a literal False or True as the value of
the total argument. True is the default, and makes all items defined in the
class body be required.
The class syntax is only supported in Python 3.6+, while two other
syntax forms work for Python 2.7 and 3.2+
"""
if fields is None:
fields = kwargs
elif kwargs:
raise TypeError("TypedDict takes either a dict or keyword arguments,"
" but not both")
ns = {'__annotations__': dict(fields)}
module = _caller()
if module is not None:
# Setting correct module is necessary to make typed dict classes pickleable.
ns['__module__'] = module
return _TypedDictMeta(typename, (), ns, total=total)
_TypedDict = type.__new__(_TypedDictMeta, 'TypedDict', (), {})
TypedDict.__mro_entries__ = lambda bases: (_TypedDict,)
class NewType:
"""NewType creates simple unique types with almost zero
runtime overhead. NewType(name, tp) is considered a subtype of tp
by static type checkers. At runtime, NewType(name, tp) returns
a dummy callable that simply returns its argument. Usage::
UserId = NewType('UserId', int)
def name_by_id(user_id: UserId) -> str:
...
UserId('user') # Fails type check
name_by_id(42) # Fails type check
name_by_id(UserId(42)) # OK
num = UserId(5) + 1 # type: int
"""
__call__ = _idfunc
def __init__(self, name, tp):
self.__qualname__ = name
if '.' in name:
name = name.rpartition('.')[-1]
self.__name__ = name
self.__supertype__ = tp
def_mod = _caller()
if def_mod != 'typing':
self.__module__ = def_mod
def __repr__(self):
return f'{self.__module__}.{self.__qualname__}'
def __reduce__(self):
return self.__qualname__
def __or__(self, other):
return Union[self, other]
def __ror__(self, other):
return Union[other, self]
# Python-version-specific alias (Python 2: unicode; Python 3: str)
Text = str
# Constant that's True when type checking, but False here.
TYPE_CHECKING = False
class IO(Generic[AnyStr]):
"""Generic base class for TextIO and BinaryIO.
This is an abstract, generic version of the return of open().
NOTE: This does not distinguish between the different possible
classes (text vs. binary, read vs. write vs. read/write,
append-only, unbuffered). The TextIO and BinaryIO subclasses
below capture the distinctions between text vs. binary, which is
pervasive in the interface; however we currently do not offer a
way to track the other distinctions in the type system.
"""
__slots__ = ()
@property
@abstractmethod
def mode(self) -> str:
pass
@property
@abstractmethod
def name(self) -> str:
pass
@abstractmethod
def close(self) -> None:
pass
@property
@abstractmethod
def closed(self) -> bool:
pass
@abstractmethod
def fileno(self) -> int:
pass
@abstractmethod
def flush(self) -> None:
pass
@abstractmethod
def isatty(self) -> bool:
pass
@abstractmethod
def read(self, n: int = -1) -> AnyStr:
pass
@abstractmethod
def readable(self) -> bool:
pass
@abstractmethod
def readline(self, limit: int = -1) -> AnyStr:
pass
@abstractmethod
def readlines(self, hint: int = -1) -> List[AnyStr]:
pass
@abstractmethod
def seek(self, offset: int, whence: int = 0) -> int:
pass
@abstractmethod
def seekable(self) -> bool:
pass
@abstractmethod
def tell(self) -> int:
pass
@abstractmethod
def truncate(self, size: int = None) -> int:
pass
@abstractmethod
def writable(self) -> bool:
pass
@abstractmethod
def write(self, s: AnyStr) -> int:
pass
@abstractmethod
def writelines(self, lines: List[AnyStr]) -> None:
pass
@abstractmethod
def __enter__(self) -> 'IO[AnyStr]':
pass
@abstractmethod
def __exit__(self, type, value, traceback) -> None:
pass
class BinaryIO(IO[bytes]):
"""Typed version of the return of open() in binary mode."""
__slots__ = ()
@abstractmethod
def write(self, s: Union[bytes, bytearray]) -> int:
pass
@abstractmethod
def __enter__(self) -> 'BinaryIO':
pass
class TextIO(IO[str]):
"""Typed version of the return of open() in text mode."""
__slots__ = ()
@property
@abstractmethod
def buffer(self) -> BinaryIO:
pass
@property
@abstractmethod
def encoding(self) -> str:
pass
@property
@abstractmethod
def errors(self) -> Optional[str]:
pass
@property
@abstractmethod
def line_buffering(self) -> bool:
pass
@property
@abstractmethod
def newlines(self) -> Any:
pass
@abstractmethod
def __enter__(self) -> 'TextIO':
pass
class _DeprecatedType(type):
def __getattribute__(cls, name):
if name not in ("__dict__", "__module__") and name in cls.__dict__:
warnings.warn(
f"{cls.__name__} is deprecated, import directly "
f"from typing instead. {cls.__name__} will be removed "
"in Python 3.12.",
DeprecationWarning,
stacklevel=2,
)
return super().__getattribute__(name)
class io(metaclass=_DeprecatedType):
"""Wrapper namespace for IO generic classes."""
__all__ = ['IO', 'TextIO', 'BinaryIO']
IO = IO
TextIO = TextIO
BinaryIO = BinaryIO
io.__name__ = __name__ + '.io'
sys.modules[io.__name__] = io
Pattern = _alias(stdlib_re.Pattern, 1)
Match = _alias(stdlib_re.Match, 1)
class re(metaclass=_DeprecatedType):
"""Wrapper namespace for re type aliases."""
__all__ = ['Pattern', 'Match']
Pattern = Pattern
Match = Match
re.__name__ = __name__ + '.re'
sys.modules[re.__name__] = re
def reveal_type(obj: T, /) -> T:
"""Reveal the inferred type of a variable.
When a static type checker encounters a call to ``reveal_type()``,
it will emit the inferred type of the argument::
x: int = 1
reveal_type(x)
Running a static type checker (e.g., ``mypy``) on this example
will produce output similar to 'Revealed type is "builtins.int"'.
At runtime, the function prints the runtime type of the
argument and returns it unchanged.
"""
print(f"Runtime type is {type(obj).__name__!r}", file=sys.stderr)
return obj
| 34.458048 | 100 | 0.630358 |
from abc import abstractmethod, ABCMeta
import collections
import collections.abc
import contextlib
import functools
import operator
import re as stdlib_re
import sys
import types
import warnings
from types import WrapperDescriptorType, MethodWrapperType, MethodDescriptorType, GenericAlias
try:
from _typing import _idfunc
except ImportError:
def _idfunc(_, x):
return x
__all__ = [
'Annotated',
'Any',
'Callable',
'ClassVar',
'Concatenate',
'Final',
'ForwardRef',
'Generic',
'Literal',
'Optional',
'ParamSpec',
'Protocol',
'Tuple',
'Type',
'TypeVar',
'Union',
'AbstractSet',
'ByteString',
'Container',
'ContextManager',
'Hashable',
'ItemsView',
'Iterable',
'Iterator',
'KeysView',
'Mapping',
'MappingView',
'MutableMapping',
'MutableSequence',
'MutableSet',
'Sequence',
'Sized',
'ValuesView',
'Awaitable',
'AsyncIterator',
'AsyncIterable',
'Coroutine',
'Collection',
'AsyncGenerator',
'AsyncContextManager',
'Reversible',
'SupportsAbs',
'SupportsBytes',
'SupportsComplex',
'SupportsFloat',
'SupportsIndex',
'SupportsInt',
'SupportsRound',
'ChainMap',
'Counter',
'Deque',
'Dict',
'DefaultDict',
'List',
'OrderedDict',
'Set',
'FrozenSet',
'NamedTuple',
'TypedDict',
'Generator',
'BinaryIO',
'IO',
'Match',
'Pattern',
'TextIO',
'AnyStr',
'cast',
'final',
'get_args',
'get_origin',
'get_type_hints',
'is_typeddict',
'NewType',
'no_type_check',
'no_type_check_decorator',
'NoReturn',
'overload',
'ParamSpecArgs',
'ParamSpecKwargs',
'reveal_type',
'runtime_checkable',
'Text',
'TYPE_CHECKING',
'TypeAlias',
'TypeGuard',
]
def _type_convert(arg, module=None, *, allow_special_forms=False):
if arg is None:
return type(None)
if isinstance(arg, str):
return ForwardRef(arg, module=module, is_class=allow_special_forms)
return arg
def _type_check(arg, msg, is_argument=True, module=None, *, allow_special_forms=False):
invalid_generic_forms = (Generic, Protocol)
if not allow_special_forms:
invalid_generic_forms += (ClassVar,)
if is_argument:
invalid_generic_forms += (Final,)
arg = _type_convert(arg, module=module, allow_special_forms=allow_special_forms)
if (isinstance(arg, _GenericAlias) and
arg.__origin__ in invalid_generic_forms):
raise TypeError(f"{arg} is not valid as type argument")
if arg in (Any, NoReturn, ClassVar, Final, TypeAlias):
return arg
if isinstance(arg, _SpecialForm) or arg in (Generic, Protocol):
raise TypeError(f"Plain {arg} is not valid as type argument")
if isinstance(arg, (type, TypeVar, ForwardRef, types.UnionType, ParamSpec)):
return arg
if not callable(arg):
raise TypeError(f"{msg} Got {arg!r:.100}.")
return arg
def _is_param_expr(arg):
return arg is ... or isinstance(arg,
(tuple, list, ParamSpec, _ConcatenateGenericAlias))
def _type_repr(obj):
if isinstance(obj, types.GenericAlias):
return repr(obj)
if isinstance(obj, type):
if obj.__module__ == 'builtins':
return obj.__qualname__
return f'{obj.__module__}.{obj.__qualname__}'
if obj is ...:
return('...')
if isinstance(obj, types.FunctionType):
return obj.__name__
return repr(obj)
def _collect_type_vars(types_, typevar_types=None):
if typevar_types is None:
typevar_types = TypeVar
tvars = []
for t in types_:
if isinstance(t, typevar_types) and t not in tvars:
tvars.append(t)
if isinstance(t, (_GenericAlias, GenericAlias, types.UnionType)):
tvars.extend([t for t in t.__parameters__ if t not in tvars])
return tuple(tvars)
def _check_generic(cls, parameters, elen):
if not elen:
raise TypeError(f"{cls} is not a generic class")
alen = len(parameters)
if alen != elen:
raise TypeError(f"Too {'many' if alen > elen else 'few'} arguments for {cls};"
f" actual {alen}, expected {elen}")
def _prepare_paramspec_params(cls, params):
if (len(cls.__parameters__) == 1
and params and not _is_param_expr(params[0])):
assert isinstance(cls.__parameters__[0], ParamSpec)
return (params,)
else:
_check_generic(cls, params, len(cls.__parameters__))
_params = []
for p, tvar in zip(params, cls.__parameters__):
if isinstance(tvar, ParamSpec) and isinstance(p, list):
p = tuple(p)
_params.append(p)
return tuple(_params)
def _deduplicate(params):
all_params = set(params)
if len(all_params) < len(params):
new_params = []
for t in params:
if t in all_params:
new_params.append(t)
all_params.remove(t)
params = new_params
assert not all_params, all_params
return params
def _remove_dups_flatten(parameters):
params = []
for p in parameters:
if isinstance(p, (_UnionGenericAlias, types.UnionType)):
params.extend(p.__args__)
else:
params.append(p)
return tuple(_deduplicate(params))
def _flatten_literal_params(parameters):
params = []
for p in parameters:
if isinstance(p, _LiteralGenericAlias):
params.extend(p.__args__)
else:
params.append(p)
return tuple(params)
_cleanups = []
def _tp_cache(func=None, /, *, typed=False):
def decorator(func):
cached = functools.lru_cache(typed=typed)(func)
_cleanups.append(cached.cache_clear)
@functools.wraps(func)
def inner(*args, **kwds):
try:
return cached(*args, **kwds)
except TypeError:
pass
return func(*args, **kwds)
return inner
if func is not None:
return decorator(func)
return decorator
def _eval_type(t, globalns, localns, recursive_guard=frozenset()):
if isinstance(t, ForwardRef):
return t._evaluate(globalns, localns, recursive_guard)
if isinstance(t, (_GenericAlias, GenericAlias, types.UnionType)):
ev_args = tuple(_eval_type(a, globalns, localns, recursive_guard) for a in t.__args__)
if ev_args == t.__args__:
return t
if isinstance(t, GenericAlias):
return GenericAlias(t.__origin__, ev_args)
if isinstance(t, types.UnionType):
return functools.reduce(operator.or_, ev_args)
else:
return t.copy_with(ev_args)
return t
class _Final:
__slots__ = ('__weakref__',)
def __init_subclass__(cls, /, *args, **kwds):
if '_root' not in kwds:
raise TypeError("Cannot subclass special typing classes")
class _Immutable:
__slots__ = ()
def __copy__(self):
return self
def __deepcopy__(self, memo):
return self
class _SpecialForm(_Final, _root=True):
__slots__ = ('_name', '__doc__', '_getitem')
def __init__(self, getitem):
self._getitem = getitem
self._name = getitem.__name__
self.__doc__ = getitem.__doc__
def __getattr__(self, item):
if item in {'__name__', '__qualname__'}:
return self._name
raise AttributeError(item)
def __mro_entries__(self, bases):
raise TypeError(f"Cannot subclass {self!r}")
def __repr__(self):
return 'typing.' + self._name
def __reduce__(self):
return self._name
def __call__(self, *args, **kwds):
raise TypeError(f"Cannot instantiate {self!r}")
def __or__(self, other):
return Union[self, other]
def __ror__(self, other):
return Union[other, self]
def __instancecheck__(self, obj):
raise TypeError(f"{self} cannot be used with isinstance()")
def __subclasscheck__(self, cls):
raise TypeError(f"{self} cannot be used with issubclass()")
@_tp_cache
def __getitem__(self, parameters):
return self._getitem(self, parameters)
class _LiteralSpecialForm(_SpecialForm, _root=True):
def __getitem__(self, parameters):
if not isinstance(parameters, tuple):
parameters = (parameters,)
return self._getitem(self, *parameters)
@_SpecialForm
def Any(self, parameters):
raise TypeError(f"{self} is not subscriptable")
@_SpecialForm
def NoReturn(self, parameters):
raise TypeError(f"{self} is not subscriptable")
@_SpecialForm
def ClassVar(self, parameters):
item = _type_check(parameters, f'{self} accepts only single type.')
return _GenericAlias(self, (item,))
@_SpecialForm
def Final(self, parameters):
item = _type_check(parameters, f'{self} accepts only single type.')
return _GenericAlias(self, (item,))
@_SpecialForm
def Union(self, parameters):
if parameters == ():
raise TypeError("Cannot take a Union of no types.")
if not isinstance(parameters, tuple):
parameters = (parameters,)
msg = "Union[arg, ...]: each arg must be a type."
parameters = tuple(_type_check(p, msg) for p in parameters)
parameters = _remove_dups_flatten(parameters)
if len(parameters) == 1:
return parameters[0]
if len(parameters) == 2 and type(None) in parameters:
return _UnionGenericAlias(self, parameters, name="Optional")
return _UnionGenericAlias(self, parameters)
@_SpecialForm
def Optional(self, parameters):
arg = _type_check(parameters, f"{self} requires a single type.")
return Union[arg, type(None)]
@_LiteralSpecialForm
@_tp_cache(typed=True)
def Literal(self, *parameters):
parameters = _flatten_literal_params(parameters)
try:
parameters = tuple(p for p, _ in _deduplicate(list(_value_and_type_iter(parameters))))
except TypeError:
pass
return _LiteralGenericAlias(self, parameters)
@_SpecialForm
def TypeAlias(self, parameters):
raise TypeError(f"{self} is not subscriptable")
@_SpecialForm
def Concatenate(self, parameters):
if parameters == ():
raise TypeError("Cannot take a Concatenate of no types.")
if not isinstance(parameters, tuple):
parameters = (parameters,)
if not isinstance(parameters[-1], ParamSpec):
raise TypeError("The last parameter to Concatenate should be a "
"ParamSpec variable.")
msg = "Concatenate[arg, ...]: each arg must be a type."
parameters = (*(_type_check(p, msg) for p in parameters[:-1]), parameters[-1])
return _ConcatenateGenericAlias(self, parameters)
@_SpecialForm
def TypeGuard(self, parameters):
item = _type_check(parameters, f'{self} accepts only single type.')
return _GenericAlias(self, (item,))
class ForwardRef(_Final, _root=True):
__slots__ = ('__forward_arg__', '__forward_code__',
'__forward_evaluated__', '__forward_value__',
'__forward_is_argument__', '__forward_is_class__',
'__forward_module__')
def __init__(self, arg, is_argument=True, module=None, *, is_class=False):
if not isinstance(arg, str):
raise TypeError(f"Forward reference must be a string -- got {arg!r}")
try:
code = compile(arg, '<string>', 'eval')
except SyntaxError:
raise SyntaxError(f"Forward reference must be an expression -- got {arg!r}")
self.__forward_arg__ = arg
self.__forward_code__ = code
self.__forward_evaluated__ = False
self.__forward_value__ = None
self.__forward_is_argument__ = is_argument
self.__forward_is_class__ = is_class
self.__forward_module__ = module
def _evaluate(self, globalns, localns, recursive_guard):
if self.__forward_arg__ in recursive_guard:
return self
if not self.__forward_evaluated__ or localns is not globalns:
if globalns is None and localns is None:
globalns = localns = {}
elif globalns is None:
globalns = localns
elif localns is None:
localns = globalns
if self.__forward_module__ is not None:
globalns = getattr(
sys.modules.get(self.__forward_module__, None), '__dict__', globalns
)
type_ = _type_check(
eval(self.__forward_code__, globalns, localns),
"Forward references must evaluate to types.",
is_argument=self.__forward_is_argument__,
allow_special_forms=self.__forward_is_class__,
)
self.__forward_value__ = _eval_type(
type_, globalns, localns, recursive_guard | {self.__forward_arg__}
)
self.__forward_evaluated__ = True
return self.__forward_value__
def __eq__(self, other):
if not isinstance(other, ForwardRef):
return NotImplemented
if self.__forward_evaluated__ and other.__forward_evaluated__:
return (self.__forward_arg__ == other.__forward_arg__ and
self.__forward_value__ == other.__forward_value__)
return self.__forward_arg__ == other.__forward_arg__
def __hash__(self):
return hash(self.__forward_arg__)
def __or__(self, other):
return Union[self, other]
def __ror__(self, other):
return Union[other, self]
def __repr__(self):
return f'ForwardRef({self.__forward_arg__!r})'
class _TypeVarLike:
def __init__(self, bound, covariant, contravariant):
if covariant and contravariant:
raise ValueError("Bivariant types are not supported.")
self.__covariant__ = bool(covariant)
self.__contravariant__ = bool(contravariant)
if bound:
self.__bound__ = _type_check(bound, "Bound must be a type.")
else:
self.__bound__ = None
def __or__(self, right):
return Union[self, right]
def __ror__(self, left):
return Union[left, self]
def __repr__(self):
if self.__covariant__:
prefix = '+'
elif self.__contravariant__:
prefix = '-'
else:
prefix = '~'
return prefix + self.__name__
def __reduce__(self):
return self.__name__
class TypeVar( _Final, _Immutable, _TypeVarLike, _root=True):
def __init__(self, name, *constraints, bound=None,
covariant=False, contravariant=False):
self.__name__ = name
super().__init__(bound, covariant, contravariant)
if constraints and bound is not None:
raise TypeError("Constraints cannot be combined with bound=...")
if constraints and len(constraints) == 1:
raise TypeError("A single constraint is not allowed")
msg = "TypeVar(name, constraint, ...): constraints must be types."
self.__constraints__ = tuple(_type_check(t, msg) for t in constraints)
def_mod = _caller()
if def_mod != 'typing':
self.__module__ = def_mod
class ParamSpecArgs(_Final, _Immutable, _root=True):
def __init__(self, origin):
self.__origin__ = origin
def __repr__(self):
return f"{self.__origin__.__name__}.args"
class ParamSpecKwargs(_Final, _Immutable, _root=True):
def __init__(self, origin):
self.__origin__ = origin
def __repr__(self):
return f"{self.__origin__.__name__}.kwargs"
class ParamSpec(_Final, _Immutable, _TypeVarLike, _root=True):
@property
def args(self):
return ParamSpecArgs(self)
@property
def kwargs(self):
return ParamSpecKwargs(self)
def __init__(self, name, *, bound=None, covariant=False, contravariant=False):
self.__name__ = name
super().__init__(bound, covariant, contravariant)
def_mod = _caller()
if def_mod != 'typing':
self.__module__ = def_mod
def _is_dunder(attr):
return attr.startswith('__') and attr.endswith('__')
class _BaseGenericAlias(_Final, _root=True):
def __init__(self, origin, *, inst=True, name=None):
self._inst = inst
self._name = name
self.__origin__ = origin
self.__slots__ = None
def __call__(self, *args, **kwargs):
if not self._inst:
raise TypeError(f"Type {self._name} cannot be instantiated; "
f"use {self.__origin__.__name__}() instead")
result = self.__origin__(*args, **kwargs)
try:
result.__orig_class__ = self
except AttributeError:
pass
return result
def __mro_entries__(self, bases):
res = []
if self.__origin__ not in bases:
res.append(self.__origin__)
i = bases.index(self)
for b in bases[i+1:]:
if isinstance(b, _BaseGenericAlias) or issubclass(b, Generic):
break
else:
res.append(Generic)
return tuple(res)
def __getattr__(self, attr):
if attr in {'__name__', '__qualname__'}:
return self._name or self.__origin__.__name__
if '__origin__' in self.__dict__ and not _is_dunder(attr):
return getattr(self.__origin__, attr)
raise AttributeError(attr)
def __setattr__(self, attr, val):
if _is_dunder(attr) or attr in {'_name', '_inst', '_nparams',
'_typevar_types', '_paramspec_tvars'}:
super().__setattr__(attr, val)
else:
setattr(self.__origin__, attr, val)
def __instancecheck__(self, obj):
return self.__subclasscheck__(type(obj))
def __subclasscheck__(self, cls):
raise TypeError("Subscripted generics cannot be used with"
" class and instance checks")
def __dir__(self):
return list(set(super().__dir__()
+ [attr for attr in dir(self.__origin__) if not _is_dunder(attr)]))
# Special typing constructs Union, Optional, Generic, Callable and Tuple
# use three special attributes for internal bookkeeping of generic types:
# * __parameters__ is a tuple of unique free type parameters of a generic
# type, for example, Dict[T, T].__parameters__ == (T,);
# * __origin__ keeps a reference to a type that was subscripted,
# e.g., Union[T, int].__origin__ == Union, or the non-generic version of
# the type.
# * __args__ is a tuple of all arguments used in subscripting,
# e.g., Dict[T, int].__args__ == (T, int).
class _GenericAlias(_BaseGenericAlias, _root=True):
# The type of parameterized generics.
#
# That is, for example, `type(List[int])` is `_GenericAlias`.
#
# Objects which are instances of this class include:
# * Parameterized container types, e.g. `Tuple[int]`, `List[int]`.
# * Note that native container types, e.g. `tuple`, `list`, use
# `types.GenericAlias` instead.
# * Parameterized classes:
# T = TypeVar('T')
# class C(Generic[T]): pass
# # C[int] is a _GenericAlias
# * `Callable` aliases, generic `Callable` aliases, and
# parameterized `Callable` aliases:
# T = TypeVar('T')
# # _CallableGenericAlias inherits from _GenericAlias.
# A = Callable[[], None] # _CallableGenericAlias
# B = Callable[[T], None] # _CallableGenericAlias
# C = B[int] # _CallableGenericAlias
# * Parameterized `Final`, `ClassVar` and `TypeGuard`:
# # All _GenericAlias
# Final[int]
# ClassVar[float]
# TypeVar[bool]
def __init__(self, origin, args, *, inst=True, name=None,
_typevar_types=TypeVar,
_paramspec_tvars=False):
super().__init__(origin, inst=inst, name=name)
if not isinstance(args, tuple):
args = (args,)
self.__args__ = tuple(... if a is _TypingEllipsis else
() if a is _TypingEmpty else
a for a in args)
self.__parameters__ = _collect_type_vars(args, typevar_types=_typevar_types)
self._typevar_types = _typevar_types
self._paramspec_tvars = _paramspec_tvars
if not name:
self.__module__ = origin.__module__
def __eq__(self, other):
if not isinstance(other, _GenericAlias):
return NotImplemented
return (self.__origin__ == other.__origin__
and self.__args__ == other.__args__)
def __hash__(self):
return hash((self.__origin__, self.__args__))
def __or__(self, right):
return Union[self, right]
def __ror__(self, left):
return Union[left, self]
@_tp_cache
def __getitem__(self, args):
# Parameterizes an already-parameterized object.
#
# For example, we arrive here doing something like:
# T1 = TypeVar('T1')
# T2 = TypeVar('T2')
# T3 = TypeVar('T3')
# class A(Generic[T1]): pass
# B = A[T2] # B is a _GenericAlias
# C = B[T3] # Invokes _GenericAlias.__getitem__
#
# We also arrive here when parameterizing a generic `Callable` alias:
# T = TypeVar('T')
# C = Callable[[T], None]
# C[int] # Invokes _GenericAlias.__getitem__
if self.__origin__ in (Generic, Protocol):
# Can't subscript Generic[...] or Protocol[...].
raise TypeError(f"Cannot subscript already-subscripted {self}")
if not isinstance(args, tuple):
args = (args,)
args = tuple(_type_convert(p) for p in args)
if (self._paramspec_tvars
and any(isinstance(t, ParamSpec) for t in self.__parameters__)):
args = _prepare_paramspec_params(self, args)
else:
_check_generic(self, args, len(self.__parameters__))
new_args = self._determine_new_args(args)
r = self.copy_with(new_args)
return r
def _determine_new_args(self, args):
new_arg_by_param = dict(zip(self.__parameters__, args))
new_args = []
for old_arg in self.__args__:
if isinstance(old_arg, ParamSpec):
new_arg = new_arg_by_param[old_arg]
if not _is_param_expr(new_arg):
raise TypeError(f"Expected a list of types, an ellipsis, "
f"ParamSpec, or Concatenate. Got {new_arg}")
elif isinstance(old_arg, self._typevar_types):
new_arg = new_arg_by_param[old_arg]
elif isinstance(old_arg, (_GenericAlias, GenericAlias, types.UnionType)):
subparams = old_arg.__parameters__
if not subparams:
new_arg = old_arg
else:
subargs = tuple(new_arg_by_param[x] for x in subparams)
new_arg = old_arg[subargs]
else:
new_arg = old_arg
if self.__origin__ == collections.abc.Callable and isinstance(new_arg, tuple):
new_args.extend(new_arg)
else:
new_args.append(new_arg)
return tuple(new_args)
def copy_with(self, args):
return self.__class__(self.__origin__, args, name=self._name, inst=self._inst)
def __repr__(self):
if self._name:
name = 'typing.' + self._name
else:
name = _type_repr(self.__origin__)
args = ", ".join([_type_repr(a) for a in self.__args__])
return f'{name}[{args}]'
def __reduce__(self):
if self._name:
origin = globals()[self._name]
else:
origin = self.__origin__
args = tuple(self.__args__)
if len(args) == 1 and not isinstance(args[0], tuple):
args, = args
return operator.getitem, (origin, args)
def __mro_entries__(self, bases):
if isinstance(self.__origin__, _SpecialForm):
raise TypeError(f"Cannot subclass {self!r}")
if self._name:
return super().__mro_entries__(bases)
if self.__origin__ is Generic:
if Protocol in bases:
return ()
i = bases.index(self)
for b in bases[i+1:]:
if isinstance(b, _BaseGenericAlias) and b is not self:
return ()
return (self.__origin__,)
class _SpecialGenericAlias(_BaseGenericAlias, _root=True):
def __init__(self, origin, nparams, *, inst=True, name=None):
if name is None:
name = origin.__name__
super().__init__(origin, inst=inst, name=name)
self._nparams = nparams
if origin.__module__ == 'builtins':
self.__doc__ = f'A generic version of {origin.__qualname__}.'
else:
self.__doc__ = f'A generic version of {origin.__module__}.{origin.__qualname__}.'
@_tp_cache
def __getitem__(self, params):
if not isinstance(params, tuple):
params = (params,)
msg = "Parameters to generic types must be types."
params = tuple(_type_check(p, msg) for p in params)
_check_generic(self, params, self._nparams)
return self.copy_with(params)
def copy_with(self, params):
return _GenericAlias(self.__origin__, params,
name=self._name, inst=self._inst)
def __repr__(self):
return 'typing.' + self._name
def __subclasscheck__(self, cls):
if isinstance(cls, _SpecialGenericAlias):
return issubclass(cls.__origin__, self.__origin__)
if not isinstance(cls, _GenericAlias):
return issubclass(cls, self.__origin__)
return super().__subclasscheck__(cls)
def __reduce__(self):
return self._name
def __or__(self, right):
return Union[self, right]
def __ror__(self, left):
return Union[left, self]
class _CallableGenericAlias(_GenericAlias, _root=True):
def __repr__(self):
assert self._name == 'Callable'
args = self.__args__
if len(args) == 2 and _is_param_expr(args[0]):
return super().__repr__()
return (f'typing.Callable'
f'[[{", ".join([_type_repr(a) for a in args[:-1]])}], '
f'{_type_repr(args[-1])}]')
def __reduce__(self):
args = self.__args__
if not (len(args) == 2 and _is_param_expr(args[0])):
args = list(args[:-1]), args[-1]
return operator.getitem, (Callable, args)
class _CallableType(_SpecialGenericAlias, _root=True):
def copy_with(self, params):
return _CallableGenericAlias(self.__origin__, params,
name=self._name, inst=self._inst,
_typevar_types=(TypeVar, ParamSpec),
_paramspec_tvars=True)
def __getitem__(self, params):
if not isinstance(params, tuple) or len(params) != 2:
raise TypeError("Callable must be used as "
"Callable[[arg, ...], result].")
args, result = params
if isinstance(args, list):
params = (tuple(args), result)
else:
params = (args, result)
return self.__getitem_inner__(params)
@_tp_cache
def __getitem_inner__(self, params):
args, result = params
msg = "Callable[args, result]: result must be a type."
result = _type_check(result, msg)
if args is Ellipsis:
return self.copy_with((_TypingEllipsis, result))
if not isinstance(args, tuple):
args = (args,)
args = tuple(_type_convert(arg) for arg in args)
params = args + (result,)
return self.copy_with(params)
class _TupleType(_SpecialGenericAlias, _root=True):
@_tp_cache
def __getitem__(self, params):
if params == ():
return self.copy_with((_TypingEmpty,))
if not isinstance(params, tuple):
params = (params,)
if len(params) == 2 and params[1] is ...:
msg = "Tuple[t, ...]: t must be a type."
p = _type_check(params[0], msg)
return self.copy_with((p, _TypingEllipsis))
msg = "Tuple[t0, t1, ...]: each t must be a type."
params = tuple(_type_check(p, msg) for p in params)
return self.copy_with(params)
class _UnionGenericAlias(_GenericAlias, _root=True):
def copy_with(self, params):
return Union[params]
def __eq__(self, other):
if not isinstance(other, (_UnionGenericAlias, types.UnionType)):
return NotImplemented
return set(self.__args__) == set(other.__args__)
def __hash__(self):
return hash(frozenset(self.__args__))
def __repr__(self):
args = self.__args__
if len(args) == 2:
if args[0] is type(None):
return f'typing.Optional[{_type_repr(args[1])}]'
elif args[1] is type(None):
return f'typing.Optional[{_type_repr(args[0])}]'
return super().__repr__()
def __instancecheck__(self, obj):
return self.__subclasscheck__(type(obj))
def __subclasscheck__(self, cls):
for arg in self.__args__:
if issubclass(cls, arg):
return True
def __reduce__(self):
func, (origin, args) = super().__reduce__()
return func, (Union, args)
def _value_and_type_iter(parameters):
return ((p, type(p)) for p in parameters)
class _LiteralGenericAlias(_GenericAlias, _root=True):
def __eq__(self, other):
if not isinstance(other, _LiteralGenericAlias):
return NotImplemented
return set(_value_and_type_iter(self.__args__)) == set(_value_and_type_iter(other.__args__))
def __hash__(self):
return hash(frozenset(_value_and_type_iter(self.__args__)))
class _ConcatenateGenericAlias(_GenericAlias, _root=True):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs,
_typevar_types=(TypeVar, ParamSpec),
_paramspec_tvars=True)
def copy_with(self, params):
if isinstance(params[-1], (list, tuple)):
return (*params[:-1], *params[-1])
if isinstance(params[-1], _ConcatenateGenericAlias):
params = (*params[:-1], *params[-1].__args__)
elif not isinstance(params[-1], ParamSpec):
raise TypeError("The last parameter to Concatenate should be a "
"ParamSpec variable.")
return super().copy_with(params)
class Generic:
__slots__ = ()
_is_protocol = False
@_tp_cache
def __class_getitem__(cls, params):
if not isinstance(params, tuple):
params = (params,)
if not params and cls is not Tuple:
raise TypeError(
f"Parameter list to {cls.__qualname__}[...] cannot be empty")
params = tuple(_type_convert(p) for p in params)
if cls in (Generic, Protocol):
if not all(isinstance(p, (TypeVar, ParamSpec)) for p in params):
raise TypeError(
f"Parameters to {cls.__name__}[...] must all be type variables "
f"or parameter specification variables.")
if len(set(params)) != len(params):
raise TypeError(
f"Parameters to {cls.__name__}[...] must all be unique")
else:
if any(isinstance(t, ParamSpec) for t in cls.__parameters__):
params = _prepare_paramspec_params(cls, params)
else:
_check_generic(cls, params, len(cls.__parameters__))
return _GenericAlias(cls, params,
_typevar_types=(TypeVar, ParamSpec),
_paramspec_tvars=True)
def __init_subclass__(cls, *args, **kwargs):
super().__init_subclass__(*args, **kwargs)
tvars = []
if '__orig_bases__' in cls.__dict__:
error = Generic in cls.__orig_bases__
else:
error = Generic in cls.__bases__ and cls.__name__ != 'Protocol'
if error:
raise TypeError("Cannot inherit from plain Generic")
if '__orig_bases__' in cls.__dict__:
tvars = _collect_type_vars(cls.__orig_bases__, (TypeVar, ParamSpec))
gvars = None
for base in cls.__orig_bases__:
if (isinstance(base, _GenericAlias) and
base.__origin__ is Generic):
if gvars is not None:
raise TypeError(
"Cannot inherit from Generic[...] multiple types.")
gvars = base.__parameters__
if gvars is not None:
tvarset = set(tvars)
gvarset = set(gvars)
if not tvarset <= gvarset:
s_vars = ', '.join(str(t) for t in tvars if t not in gvarset)
s_args = ', '.join(str(g) for g in gvars)
raise TypeError(f"Some type variables ({s_vars}) are"
f" not listed in Generic[{s_args}]")
tvars = gvars
cls.__parameters__ = tuple(tvars)
class _TypingEmpty:
class _TypingEllipsis:
_TYPING_INTERNALS = ['__parameters__', '__orig_bases__', '__orig_class__',
'_is_protocol', '_is_runtime_protocol']
_SPECIAL_NAMES = ['__abstractmethods__', '__annotations__', '__dict__', '__doc__',
'__init__', '__module__', '__new__', '__slots__',
'__subclasshook__', '__weakref__', '__class_getitem__']
EXCLUDED_ATTRIBUTES = _TYPING_INTERNALS + _SPECIAL_NAMES + ['_MutableMapping__marker']
def _get_protocol_attrs(cls):
attrs = set()
for base in cls.__mro__[:-1]:
if base.__name__ in ('Protocol', 'Generic'):
continue
annotations = getattr(base, '__annotations__', {})
for attr in list(base.__dict__.keys()) + list(annotations.keys()):
if not attr.startswith('_abc_') and attr not in EXCLUDED_ATTRIBUTES:
attrs.add(attr)
return attrs
def _is_callable_members_only(cls):
return all(callable(getattr(cls, attr, None)) for attr in _get_protocol_attrs(cls))
def _no_init_or_replace_init(self, *args, **kwargs):
cls = type(self)
if cls._is_protocol:
raise TypeError('Protocols cannot be instantiated')
if cls.__init__ is not _no_init_or_replace_init:
return
# instantiation of the protocol subclass will thus use the new
# `__init__` and no longer call `_no_init_or_replace_init`.
for base in cls.__mro__:
init = base.__dict__.get('__init__', _no_init_or_replace_init)
if init is not _no_init_or_replace_init:
cls.__init__ = init
break
else:
# should not happen
cls.__init__ = object.__init__
cls.__init__(self, *args, **kwargs)
def _caller(depth=1, default='__main__'):
try:
return sys._getframe(depth + 1).f_globals.get('__name__', default)
except (AttributeError, ValueError): # For platforms without _getframe()
return None
def _allow_reckless_class_checks(depth=3):
return _caller(depth) in {'abc', 'functools', None}
_PROTO_ALLOWLIST = {
'collections.abc': [
'Callable', 'Awaitable', 'Iterable', 'Iterator', 'AsyncIterable',
'Hashable', 'Sized', 'Container', 'Collection', 'Reversible',
],
'contextlib': ['AbstractContextManager', 'AbstractAsyncContextManager'],
}
class _ProtocolMeta(ABCMeta):
# This metaclass is really unfortunate and exists only because of
# the lack of __instancehook__.
def __instancecheck__(cls, instance):
# We need this method for situations where attributes are
# assigned in __init__.
if (
getattr(cls, '_is_protocol', False) and
not getattr(cls, '_is_runtime_protocol', False) and
not _allow_reckless_class_checks(depth=2)
):
raise TypeError("Instance and class checks can only be used with"
" @runtime_checkable protocols")
if ((not getattr(cls, '_is_protocol', False) or
_is_callable_members_only(cls)) and
issubclass(instance.__class__, cls)):
return True
if cls._is_protocol:
if all(hasattr(instance, attr) and
# All *methods* can be blocked by setting them to None.
(not callable(getattr(cls, attr, None)) or
getattr(instance, attr) is not None)
for attr in _get_protocol_attrs(cls)):
return True
return super().__instancecheck__(instance)
class Protocol(Generic, metaclass=_ProtocolMeta):
__slots__ = ()
_is_protocol = True
_is_runtime_protocol = False
def __init_subclass__(cls, *args, **kwargs):
super().__init_subclass__(*args, **kwargs)
# Determine if this is a protocol or a concrete subclass.
if not cls.__dict__.get('_is_protocol', False):
cls._is_protocol = any(b is Protocol for b in cls.__bases__)
# Set (or override) the protocol subclass hook.
def _proto_hook(other):
if not cls.__dict__.get('_is_protocol', False):
return NotImplemented
# First, perform various sanity checks.
if not getattr(cls, '_is_runtime_protocol', False):
if _allow_reckless_class_checks():
return NotImplemented
raise TypeError("Instance and class checks can only be used with"
" @runtime_checkable protocols")
if not _is_callable_members_only(cls):
if _allow_reckless_class_checks():
return NotImplemented
raise TypeError("Protocols with non-method members"
" don't support issubclass()")
if not isinstance(other, type):
raise TypeError('issubclass() arg 1 must be a class')
for attr in _get_protocol_attrs(cls):
for base in other.__mro__:
if attr in base.__dict__:
if base.__dict__[attr] is None:
return NotImplemented
break
annotations = getattr(base, '__annotations__', {})
if (isinstance(annotations, collections.abc.Mapping) and
attr in annotations and
issubclass(other, Generic) and other._is_protocol):
break
else:
return NotImplemented
return True
if '__subclasshook__' not in cls.__dict__:
cls.__subclasshook__ = _proto_hook
if not cls._is_protocol:
return
for base in cls.__bases__:
if not (base in (object, Generic) or
base.__module__ in _PROTO_ALLOWLIST and
base.__name__ in _PROTO_ALLOWLIST[base.__module__] or
issubclass(base, Generic) and base._is_protocol):
raise TypeError('Protocols can only inherit from other'
' protocols, got %r' % base)
cls.__init__ = _no_init_or_replace_init
class _AnnotatedAlias(_GenericAlias, _root=True):
def __init__(self, origin, metadata):
if isinstance(origin, _AnnotatedAlias):
metadata = origin.__metadata__ + metadata
origin = origin.__origin__
super().__init__(origin, origin)
self.__metadata__ = metadata
def copy_with(self, params):
assert len(params) == 1
new_type = params[0]
return _AnnotatedAlias(new_type, self.__metadata__)
def __repr__(self):
return "typing.Annotated[{}, {}]".format(
_type_repr(self.__origin__),
", ".join(repr(a) for a in self.__metadata__)
)
def __reduce__(self):
return operator.getitem, (
Annotated, (self.__origin__,) + self.__metadata__
)
def __eq__(self, other):
if not isinstance(other, _AnnotatedAlias):
return NotImplemented
return (self.__origin__ == other.__origin__
and self.__metadata__ == other.__metadata__)
def __hash__(self):
return hash((self.__origin__, self.__metadata__))
def __getattr__(self, attr):
if attr in {'__name__', '__qualname__'}:
return 'Annotated'
return super().__getattr__(attr)
class Annotated:
__slots__ = ()
def __new__(cls, *args, **kwargs):
raise TypeError("Type Annotated cannot be instantiated.")
@_tp_cache
def __class_getitem__(cls, params):
if not isinstance(params, tuple) or len(params) < 2:
raise TypeError("Annotated[...] should be used "
"with at least two arguments (a type and an "
"annotation).")
msg = "Annotated[t, ...]: t must be a type."
origin = _type_check(params[0], msg, allow_special_forms=True)
metadata = tuple(params[1:])
return _AnnotatedAlias(origin, metadata)
def __init_subclass__(cls, *args, **kwargs):
raise TypeError(
"Cannot subclass {}.Annotated".format(cls.__module__)
)
def runtime_checkable(cls):
if not issubclass(cls, Generic) or not cls._is_protocol:
raise TypeError('@runtime_checkable can be only applied to protocol classes,'
' got %r' % cls)
cls._is_runtime_protocol = True
return cls
def cast(typ, val):
return val
def _get_defaults(func):
try:
code = func.__code__
except AttributeError:
return {}
pos_count = code.co_argcount
arg_names = code.co_varnames
arg_names = arg_names[:pos_count]
defaults = func.__defaults__ or ()
kwdefaults = func.__kwdefaults__
res = dict(kwdefaults) if kwdefaults else {}
pos_offset = pos_count - len(defaults)
for name, value in zip(arg_names[pos_offset:], defaults):
assert name not in res
res[name] = value
return res
_allowed_types = (types.FunctionType, types.BuiltinFunctionType,
types.MethodType, types.ModuleType,
WrapperDescriptorType, MethodWrapperType, MethodDescriptorType)
def get_type_hints(obj, globalns=None, localns=None, include_extras=False):
if getattr(obj, '__no_type_check__', None):
return {}
# Classes require a special treatment.
if isinstance(obj, type):
hints = {}
for base in reversed(obj.__mro__):
if globalns is None:
base_globals = getattr(sys.modules.get(base.__module__, None), '__dict__', {})
else:
base_globals = globalns
ann = base.__dict__.get('__annotations__', {})
if isinstance(ann, types.GetSetDescriptorType):
ann = {}
base_locals = dict(vars(base)) if localns is None else localns
if localns is None and globalns is None:
# This is surprising, but required. Before Python 3.10,
# get_type_hints only evaluated the globalns of
# a class. To maintain backwards compatibility, we reverse
# the globalns and localns order so that eval() looks into
# *base_globals* first rather than *base_locals*.
# This only affects ForwardRefs.
base_globals, base_locals = base_locals, base_globals
for name, value in ann.items():
if value is None:
value = type(None)
if isinstance(value, str):
value = ForwardRef(value, is_argument=False, is_class=True)
value = _eval_type(value, base_globals, base_locals)
hints[name] = value
return hints if include_extras else {k: _strip_annotations(t) for k, t in hints.items()}
if globalns is None:
if isinstance(obj, types.ModuleType):
globalns = obj.__dict__
else:
nsobj = obj
# Find globalns for the unwrapped object.
while hasattr(nsobj, '__wrapped__'):
nsobj = nsobj.__wrapped__
globalns = getattr(nsobj, '__globals__', {})
if localns is None:
localns = globalns
elif localns is None:
localns = globalns
hints = getattr(obj, '__annotations__', None)
if hints is None:
# Return empty annotations for something that _could_ have them.
if isinstance(obj, _allowed_types):
return {}
else:
raise TypeError('{!r} is not a module, class, method, '
'or function.'.format(obj))
defaults = _get_defaults(obj)
hints = dict(hints)
for name, value in hints.items():
if value is None:
value = type(None)
if isinstance(value, str):
# class-level forward refs were handled above, this must be either
# a module-level annotation or a function argument annotation
value = ForwardRef(
value,
is_argument=not isinstance(obj, types.ModuleType),
is_class=False,
)
value = _eval_type(value, globalns, localns)
if name in defaults and defaults[name] is None:
value = Optional[value]
hints[name] = value
return hints if include_extras else {k: _strip_annotations(t) for k, t in hints.items()}
def _strip_annotations(t):
if isinstance(t, _AnnotatedAlias):
return _strip_annotations(t.__origin__)
if isinstance(t, _GenericAlias):
stripped_args = tuple(_strip_annotations(a) for a in t.__args__)
if stripped_args == t.__args__:
return t
return t.copy_with(stripped_args)
if isinstance(t, GenericAlias):
stripped_args = tuple(_strip_annotations(a) for a in t.__args__)
if stripped_args == t.__args__:
return t
return GenericAlias(t.__origin__, stripped_args)
if isinstance(t, types.UnionType):
stripped_args = tuple(_strip_annotations(a) for a in t.__args__)
if stripped_args == t.__args__:
return t
return functools.reduce(operator.or_, stripped_args)
return t
def get_origin(tp):
if isinstance(tp, _AnnotatedAlias):
return Annotated
if isinstance(tp, (_BaseGenericAlias, GenericAlias,
ParamSpecArgs, ParamSpecKwargs)):
return tp.__origin__
if tp is Generic:
return Generic
if isinstance(tp, types.UnionType):
return types.UnionType
return None
def get_args(tp):
if isinstance(tp, _AnnotatedAlias):
return (tp.__origin__,) + tp.__metadata__
if isinstance(tp, (_GenericAlias, GenericAlias)):
res = tp.__args__
if (tp.__origin__ is collections.abc.Callable
and not (len(res) == 2 and _is_param_expr(res[0]))):
res = (list(res[:-1]), res[-1])
return res
if isinstance(tp, types.UnionType):
return tp.__args__
return ()
def is_typeddict(tp):
return isinstance(tp, _TypedDictMeta)
def no_type_check(arg):
if isinstance(arg, type):
arg_attrs = arg.__dict__.copy()
for attr, val in arg.__dict__.items():
if val in arg.__bases__ + (arg,):
arg_attrs.pop(attr)
for obj in arg_attrs.values():
if isinstance(obj, types.FunctionType):
obj.__no_type_check__ = True
if isinstance(obj, type):
no_type_check(obj)
try:
arg.__no_type_check__ = True
except TypeError: # built-in classes
pass
return arg
def no_type_check_decorator(decorator):
@functools.wraps(decorator)
def wrapped_decorator(*args, **kwds):
func = decorator(*args, **kwds)
func = no_type_check(func)
return func
return wrapped_decorator
def _overload_dummy(*args, **kwds):
raise NotImplementedError(
"You should not call an overloaded function. "
"A series of @overload-decorated functions "
"outside a stub module should always be followed "
"by an implementation that is not @overload-ed.")
def overload(func):
return _overload_dummy
def final(f):
try:
f.__final__ = True
except (AttributeError, TypeError):
# Skip the attribute silently if it is not writable.
# AttributeError happens if the object has __slots__ or a
# read-only property, TypeError if it's a builtin class.
pass
return f
T = TypeVar('T')
KT = TypeVar('KT')
VT = TypeVar('VT')
T_co = TypeVar('T_co', covariant=True)
V_co = TypeVar('V_co', covariant=True)
VT_co = TypeVar('VT_co', covariant=True)
T_contra = TypeVar('T_contra', contravariant=True)
CT_co = TypeVar('CT_co', covariant=True, bound=type)
AnyStr = TypeVar('AnyStr', bytes, str)
_alias = _SpecialGenericAlias
Hashable = _alias(collections.abc.Hashable, 0)
Awaitable = _alias(collections.abc.Awaitable, 1)
Coroutine = _alias(collections.abc.Coroutine, 3)
AsyncIterable = _alias(collections.abc.AsyncIterable, 1)
AsyncIterator = _alias(collections.abc.AsyncIterator, 1)
Iterable = _alias(collections.abc.Iterable, 1)
Iterator = _alias(collections.abc.Iterator, 1)
Reversible = _alias(collections.abc.Reversible, 1)
Sized = _alias(collections.abc.Sized, 0)
Container = _alias(collections.abc.Container, 1)
Collection = _alias(collections.abc.Collection, 1)
Callable = _CallableType(collections.abc.Callable, 2)
Callable.__doc__ = \
"""Callable type; Callable[[int], str] is a function of (int) -> str.
The subscription syntax must always be used with exactly two
values: the argument list and the return type. The argument list
must be a list of types or ellipsis; the return type must be a single type.
There is no syntax to indicate optional or keyword arguments,
such function types are rarely used as callback types.
"""
AbstractSet = _alias(collections.abc.Set, 1, name='AbstractSet')
MutableSet = _alias(collections.abc.MutableSet, 1)
Mapping = _alias(collections.abc.Mapping, 2)
MutableMapping = _alias(collections.abc.MutableMapping, 2)
Sequence = _alias(collections.abc.Sequence, 1)
MutableSequence = _alias(collections.abc.MutableSequence, 1)
ByteString = _alias(collections.abc.ByteString, 0)
Tuple = _TupleType(tuple, -1, inst=False, name='Tuple')
Tuple.__doc__ = \
"""Tuple type; Tuple[X, Y] is the cross-product type of X and Y.
Example: Tuple[T1, T2] is a tuple of two elements corresponding
to type variables T1 and T2. Tuple[int, float, str] is a tuple
of an int, a float and a string.
To specify a variable-length tuple of homogeneous type, use Tuple[T, ...].
"""
List = _alias(list, 1, inst=False, name='List')
Deque = _alias(collections.deque, 1, name='Deque')
Set = _alias(set, 1, inst=False, name='Set')
FrozenSet = _alias(frozenset, 1, inst=False, name='FrozenSet')
MappingView = _alias(collections.abc.MappingView, 1)
KeysView = _alias(collections.abc.KeysView, 1)
ItemsView = _alias(collections.abc.ItemsView, 2)
ValuesView = _alias(collections.abc.ValuesView, 1)
ContextManager = _alias(contextlib.AbstractContextManager, 1, name='ContextManager')
AsyncContextManager = _alias(contextlib.AbstractAsyncContextManager, 1, name='AsyncContextManager')
Dict = _alias(dict, 2, inst=False, name='Dict')
DefaultDict = _alias(collections.defaultdict, 2, name='DefaultDict')
OrderedDict = _alias(collections.OrderedDict, 2)
Counter = _alias(collections.Counter, 1)
ChainMap = _alias(collections.ChainMap, 2)
Generator = _alias(collections.abc.Generator, 3)
AsyncGenerator = _alias(collections.abc.AsyncGenerator, 2)
Type = _alias(type, 1, inst=False, name='Type')
Type.__doc__ = \
"""A special construct usable to annotate class objects.
For example, suppose we have the following classes::
class User: ... # Abstract base for User classes
class BasicUser(User): ...
class ProUser(User): ...
class TeamUser(User): ...
And a function that takes a class argument that's a subclass of
User and returns an instance of the corresponding class::
U = TypeVar('U', bound=User)
def new_user(user_class: Type[U]) -> U:
user = user_class()
# (Here we could write the user object to a database)
return user
joe = new_user(BasicUser)
At this point the type checker knows that joe has type BasicUser.
"""
@runtime_checkable
class SupportsInt(Protocol):
__slots__ = ()
@abstractmethod
def __int__(self) -> int:
pass
@runtime_checkable
class SupportsFloat(Protocol):
__slots__ = ()
@abstractmethod
def __float__(self) -> float:
pass
@runtime_checkable
class SupportsComplex(Protocol):
__slots__ = ()
@abstractmethod
def __complex__(self) -> complex:
pass
@runtime_checkable
class SupportsBytes(Protocol):
__slots__ = ()
@abstractmethod
def __bytes__(self) -> bytes:
pass
@runtime_checkable
class SupportsIndex(Protocol):
__slots__ = ()
@abstractmethod
def __index__(self) -> int:
pass
@runtime_checkable
class SupportsAbs(Protocol[T_co]):
__slots__ = ()
@abstractmethod
def __abs__(self) -> T_co:
pass
@runtime_checkable
class SupportsRound(Protocol[T_co]):
__slots__ = ()
@abstractmethod
def __round__(self, ndigits: int = 0) -> T_co:
pass
def _make_nmtuple(name, types, module, defaults = ()):
fields = [n for n, t in types]
types = {n: _type_check(t, f"field {n} annotation must be a type")
for n, t in types}
nm_tpl = collections.namedtuple(name, fields,
defaults=defaults, module=module)
nm_tpl.__annotations__ = nm_tpl.__new__.__annotations__ = types
return nm_tpl
# attributes prohibited to set in NamedTuple class syntax
_prohibited = frozenset({'__new__', '__init__', '__slots__', '__getnewargs__',
'_fields', '_field_defaults',
'_make', '_replace', '_asdict', '_source'})
_special = frozenset({'__module__', '__name__', '__annotations__'})
class NamedTupleMeta(type):
def __new__(cls, typename, bases, ns):
assert bases[0] is _NamedTuple
types = ns.get('__annotations__', {})
default_names = []
for field_name in types:
if field_name in ns:
default_names.append(field_name)
elif default_names:
raise TypeError(f"Non-default namedtuple field {field_name} "
f"cannot follow default field"
f"{'s' if len(default_names) > 1 else ''} "
f"{', '.join(default_names)}")
nm_tpl = _make_nmtuple(typename, types.items(),
defaults=[ns[n] for n in default_names],
module=ns['__module__'])
# update from user namespace without overriding special namedtuple attributes
for key in ns:
if key in _prohibited:
raise AttributeError("Cannot overwrite NamedTuple attribute " + key)
elif key not in _special and key not in nm_tpl._fields:
setattr(nm_tpl, key, ns[key])
return nm_tpl
def NamedTuple(typename, fields=None, /, **kwargs):
if fields is None:
fields = kwargs.items()
elif kwargs:
raise TypeError("Either list of fields or keywords"
" can be provided to NamedTuple, not both")
return _make_nmtuple(typename, fields, module=_caller())
_NamedTuple = type.__new__(NamedTupleMeta, 'NamedTuple', (), {})
def _namedtuple_mro_entries(bases):
if len(bases) > 1:
raise TypeError("Multiple inheritance with NamedTuple is not supported")
assert bases[0] is NamedTuple
return (_NamedTuple,)
NamedTuple.__mro_entries__ = _namedtuple_mro_entries
class _TypedDictMeta(type):
def __new__(cls, name, bases, ns, total=True):
for base in bases:
if type(base) is not _TypedDictMeta:
raise TypeError('cannot inherit from both a TypedDict type '
'and a non-TypedDict base class')
tp_dict = type.__new__(_TypedDictMeta, name, (dict,), ns)
annotations = {}
own_annotations = ns.get('__annotations__', {})
own_annotation_keys = set(own_annotations.keys())
msg = "TypedDict('Name', {f0: t0, f1: t1, ...}); each t must be a type"
own_annotations = {
n: _type_check(tp, msg, module=tp_dict.__module__)
for n, tp in own_annotations.items()
}
required_keys = set()
optional_keys = set()
for base in bases:
annotations.update(base.__dict__.get('__annotations__', {}))
required_keys.update(base.__dict__.get('__required_keys__', ()))
optional_keys.update(base.__dict__.get('__optional_keys__', ()))
annotations.update(own_annotations)
if total:
required_keys.update(own_annotation_keys)
else:
optional_keys.update(own_annotation_keys)
tp_dict.__annotations__ = annotations
tp_dict.__required_keys__ = frozenset(required_keys)
tp_dict.__optional_keys__ = frozenset(optional_keys)
if not hasattr(tp_dict, '__total__'):
tp_dict.__total__ = total
return tp_dict
__call__ = dict # static method
def __subclasscheck__(cls, other):
# Typed dicts are only for static structural subtyping.
raise TypeError('TypedDict does not support instance and class checks')
__instancecheck__ = __subclasscheck__
def TypedDict(typename, fields=None, /, *, total=True, **kwargs):
if fields is None:
fields = kwargs
elif kwargs:
raise TypeError("TypedDict takes either a dict or keyword arguments,"
" but not both")
ns = {'__annotations__': dict(fields)}
module = _caller()
if module is not None:
# Setting correct module is necessary to make typed dict classes pickleable.
ns['__module__'] = module
return _TypedDictMeta(typename, (), ns, total=total)
_TypedDict = type.__new__(_TypedDictMeta, 'TypedDict', (), {})
TypedDict.__mro_entries__ = lambda bases: (_TypedDict,)
class NewType:
__call__ = _idfunc
def __init__(self, name, tp):
self.__qualname__ = name
if '.' in name:
name = name.rpartition('.')[-1]
self.__name__ = name
self.__supertype__ = tp
def_mod = _caller()
if def_mod != 'typing':
self.__module__ = def_mod
def __repr__(self):
return f'{self.__module__}.{self.__qualname__}'
def __reduce__(self):
return self.__qualname__
def __or__(self, other):
return Union[self, other]
def __ror__(self, other):
return Union[other, self]
# Python-version-specific alias (Python 2: unicode; Python 3: str)
Text = str
# Constant that's True when type checking, but False here.
TYPE_CHECKING = False
class IO(Generic[AnyStr]):
__slots__ = ()
@property
@abstractmethod
def mode(self) -> str:
pass
@property
@abstractmethod
def name(self) -> str:
pass
@abstractmethod
def close(self) -> None:
pass
@property
@abstractmethod
def closed(self) -> bool:
pass
@abstractmethod
def fileno(self) -> int:
pass
@abstractmethod
def flush(self) -> None:
pass
@abstractmethod
def isatty(self) -> bool:
pass
@abstractmethod
def read(self, n: int = -1) -> AnyStr:
pass
@abstractmethod
def readable(self) -> bool:
pass
@abstractmethod
def readline(self, limit: int = -1) -> AnyStr:
pass
@abstractmethod
def readlines(self, hint: int = -1) -> List[AnyStr]:
pass
@abstractmethod
def seek(self, offset: int, whence: int = 0) -> int:
pass
@abstractmethod
def seekable(self) -> bool:
pass
@abstractmethod
def tell(self) -> int:
pass
@abstractmethod
def truncate(self, size: int = None) -> int:
pass
@abstractmethod
def writable(self) -> bool:
pass
@abstractmethod
def write(self, s: AnyStr) -> int:
pass
@abstractmethod
def writelines(self, lines: List[AnyStr]) -> None:
pass
@abstractmethod
def __enter__(self) -> 'IO[AnyStr]':
pass
@abstractmethod
def __exit__(self, type, value, traceback) -> None:
pass
class BinaryIO(IO[bytes]):
__slots__ = ()
@abstractmethod
def write(self, s: Union[bytes, bytearray]) -> int:
pass
@abstractmethod
def __enter__(self) -> 'BinaryIO':
pass
class TextIO(IO[str]):
__slots__ = ()
@property
@abstractmethod
def buffer(self) -> BinaryIO:
pass
@property
@abstractmethod
def encoding(self) -> str:
pass
@property
@abstractmethod
def errors(self) -> Optional[str]:
pass
@property
@abstractmethod
def line_buffering(self) -> bool:
pass
@property
@abstractmethod
def newlines(self) -> Any:
pass
@abstractmethod
def __enter__(self) -> 'TextIO':
pass
class _DeprecatedType(type):
def __getattribute__(cls, name):
if name not in ("__dict__", "__module__") and name in cls.__dict__:
warnings.warn(
f"{cls.__name__} is deprecated, import directly "
f"from typing instead. {cls.__name__} will be removed "
"in Python 3.12.",
DeprecationWarning,
stacklevel=2,
)
return super().__getattribute__(name)
class io(metaclass=_DeprecatedType):
__all__ = ['IO', 'TextIO', 'BinaryIO']
IO = IO
TextIO = TextIO
BinaryIO = BinaryIO
io.__name__ = __name__ + '.io'
sys.modules[io.__name__] = io
Pattern = _alias(stdlib_re.Pattern, 1)
Match = _alias(stdlib_re.Match, 1)
class re(metaclass=_DeprecatedType):
__all__ = ['Pattern', 'Match']
Pattern = Pattern
Match = Match
re.__name__ = __name__ + '.re'
sys.modules[re.__name__] = re
def reveal_type(obj: T, /) -> T:
print(f"Runtime type is {type(obj).__name__!r}", file=sys.stderr)
return obj
| true | true |
f733f0215f9234ea817c6a0b1d3d49541dae3c9f | 30,863 | py | Python | resolwe_bio/tests/processes/test_expression.py | HudoGriz/resolwe-bio | 4f7363cfa7d9d5a43f1a70ef36c69be3faed7fea | [
"Apache-2.0"
] | null | null | null | resolwe_bio/tests/processes/test_expression.py | HudoGriz/resolwe-bio | 4f7363cfa7d9d5a43f1a70ef36c69be3faed7fea | [
"Apache-2.0"
] | null | null | null | resolwe_bio/tests/processes/test_expression.py | HudoGriz/resolwe-bio | 4f7363cfa7d9d5a43f1a70ef36c69be3faed7fea | [
"Apache-2.0"
] | null | null | null | # pylint: disable=missing-docstring
import os
from resolwe.flow.models import Data, Collection, Relation
from resolwe.flow.models.entity import RelationPartition, RelationType
from resolwe.test import tag_process, with_resolwe_host
from resolwe_bio.expression_filters.relation import replicate_groups
from resolwe_bio.utils.test import KBBioProcessTestCase
class ExpressionProcessorTestCase(KBBioProcessTestCase):
fixtures = ['relationtypes.yaml']
@tag_process('cufflinks', 'cuffmerge')
def test_cufflinks(self):
with self.preparation_stage():
genome = self.prepare_genome()
reads = self.prepare_reads()
annotation_gtf = self.prepare_annotation('annotation dicty.gff.gz')
annotation_gff3 = self.prepare_annotation_gff()
aligned_reads = self.run_process('alignment-hisat2', {
'genome': genome.pk,
'reads': reads.pk,
'spliced_alignments': {
'cufflinks': True
}
})
inputs = {
'alignment': aligned_reads.pk,
'annotation': annotation_gtf.pk,
'genome': genome.pk}
cuff_exp = self.run_process('cufflinks', inputs)
self.assertFile(cuff_exp, 'transcripts', 'cufflinks_transcripts.gtf', sort=True)
self.assertFields(cuff_exp, 'species', 'Dictyostelium discoideum')
self.assertFields(cuff_exp, 'build', 'dd-05-2009')
self.assertFields(cuff_exp, 'source', 'DICTYBASE')
inputs = {
'alignment': aligned_reads.pk,
'annotation': annotation_gtf.pk,
'genome': genome.pk}
cuff_exp2 = self.run_process('cufflinks', inputs)
inputs = {
'expressions': [cuff_exp.pk, cuff_exp2.pk],
'gff': annotation_gff3.pk,
'genome': genome.pk}
cuff_merge_gff3 = self.run_process('cuffmerge', inputs)
self.assertFile(cuff_merge_gff3, 'annot', 'cuffmerge_transcripts.gtf')
self.assertFields(cuff_merge_gff3, 'species', 'Dictyostelium discoideum')
self.assertFields(cuff_merge_gff3, 'build', 'dd-05-2009')
self.assertFields(cuff_exp, 'source', 'DICTYBASE')
inputs['gff'] = annotation_gtf.pk
cuff_merge_gtf = self.run_process('cuffmerge', inputs)
self.assertFile(cuff_merge_gtf, 'annot', 'cuffmerge_transcripts.gtf')
self.assertFields(cuff_merge_gtf, 'species', 'Dictyostelium discoideum')
self.assertFields(cuff_merge_gtf, 'build', 'dd-05-2009')
self.assertFields(cuff_exp, 'source', 'DICTYBASE')
@tag_process('cuffquant')
def test_cuffquant(self):
with self.preparation_stage():
inputs = {
'src': 'cuffquant_mapping.bam',
'species': 'Homo sapiens',
'build': 'hg19'
}
bam = self.run_process('upload-bam', inputs)
annotation = self.prepare_annotation(
fn='hg19_chr20_small.gtf.gz',
source='UCSC',
species='Homo sapiens',
build='hg19'
)
inputs = {
'alignment': bam.id,
'annotation': annotation.id}
cuffquant = self.run_process('cuffquant', inputs)
self.assertFields(cuffquant, 'species', 'Homo sapiens')
self.assertFields(cuffquant, 'build', 'hg19')
self.assertFields(cuffquant, 'source', 'UCSC')
@with_resolwe_host
@tag_process('cuffnorm')
def test_cuffnorm(self):
with self.preparation_stage():
collection = Collection.objects.create(
name='Test collection',
contributor=self.contributor
)
rel_type_group = RelationType.objects.get(name='group')
replicate_group = Relation.objects.create(
contributor=self.contributor,
collection=collection,
type=rel_type_group,
category='Replicate'
)
inputs = {
'src': 'cuffquant 1.cxb',
'source': 'UCSC',
'species': 'Homo sapiens',
'build': 'hg19'
}
sample_1 = self.run_process("upload-cxb", inputs)
inputs = {
'src': 'cuffquant_2.cxb',
'source': 'UCSC',
'species': 'Homo sapiens',
'build': 'hg19'
}
sample_2 = self.run_process("upload-cxb", inputs)
inputs = {
'src': '3-cuffquant.cxb',
'source': 'UCSC',
'species': 'Homo sapiens',
'build': 'hg19'
}
sample_3 = self.run_process("upload-cxb", inputs)
inputs = {
'src': '4-cuffquant.cxb',
'source': 'UCSC',
'species': 'Homo sapiens',
'build': 'hg19'
}
sample_4 = self.run_process("upload-cxb", inputs)
inputs = {
'src': '5-cuffquant.cxb',
'source': 'UCSC',
'species': 'Homo sapiens',
'build': 'hg19'
}
sample_5 = self.run_process("upload-cxb", inputs)
inputs = {
'src': '6-cuffquant.cxb',
'source': 'UCSC',
'species': 'Homo sapiens',
'build': 'hg19'
}
sample_6 = self.run_process("upload-cxb", inputs)
RelationPartition.objects.create(relation=replicate_group, entity=sample_1.entity, label='1')
RelationPartition.objects.create(relation=replicate_group, entity=sample_2.entity, label='1')
RelationPartition.objects.create(relation=replicate_group, entity=sample_3.entity, label='2')
RelationPartition.objects.create(relation=replicate_group, entity=sample_4.entity, label='2')
RelationPartition.objects.create(relation=replicate_group, entity=sample_5.entity, label='2')
RelationPartition.objects.create(relation=replicate_group, entity=sample_6.entity, label='3')
annotation = self.prepare_annotation(fn='hg19_chr20_small.gtf.gz', source='UCSC',
species='Homo sapiens', build='hg19')
self.assertEqual(
replicate_groups([
{'__id': sample_1.id},
{'__id': sample_2.id},
{'__id': sample_3.id},
{'__id': sample_4.id},
{'__id': sample_5.id},
{'__id': sample_6.id}
]),
[1, 1, 2, 2, 2, 3]
)
inputs = {
'cuffquant': [sample_1.pk, sample_2.pk, sample_3.pk, sample_4.pk, sample_5.pk, sample_6.pk],
'annotation': annotation.id,
}
cuffnorm = self.run_process('cuffnorm', inputs)
self.assertFile(cuffnorm, 'fpkm_means', 'cuffnorm_all_fpkm_means.txt')
self.assertFile(cuffnorm, 'genes_fpkm', 'cuffnorm_genes.fpkm_table')
self.assertFileExists(cuffnorm, 'raw_scatter')
self.assertFields(cuffnorm, 'source', 'UCSC')
self.assertFields(cuffnorm, 'species', 'Homo sapiens')
self.assertFields(cuffnorm, 'build', 'hg19')
exp = Data.objects.last()
self.assertFile(exp, 'exp', 'cuffnorm_expression.tab.gz', compression='gzip')
self.assertFile(exp, 'exp_set', 'cuffnorm_out_exp_set.txt.gz', compression='gzip')
self.assertJSON(exp, exp.output['exp_set_json'], '', 'cuffnorm_exp_set.json.gz')
@tag_process('mappability-bcm')
def test_mappability(self):
with self.preparation_stage():
genome = self.prepare_genome()
annotation = self.prepare_annotation_gff()
mappability = self.run_process('mappability-bcm', {
'genome': genome.id,
'gff': annotation.id,
'length': 50,
})
self.assertFileExists(mappability, 'mappability')
@tag_process('expression-dicty', 'etc-bcm')
def test_expression_dicty(self):
with self.preparation_stage():
genome = self.prepare_genome()
reads = self.prepare_reads()
annotation = self.prepare_annotation_gff()
aligned_reads = self.run_process('alignment-hisat2', {
'genome': genome.pk,
'reads': reads.pk
})
mappa = self.run_process("upload-mappability", {"src": "purpureum_mappability_50.tab.gz"})
inputs = {
'alignment': aligned_reads.pk,
'gff': annotation.pk,
'mappable': mappa.pk}
expression = self.run_process('expression-dicty', inputs)
self.assertFile(expression, 'rpkm', 'expression_bcm_rpkm.tab.gz', compression='gzip')
self.assertFields(expression, "source", "DICTYBASE")
self.assertFields(expression, 'species', 'Dictyostelium discoideum')
self.assertFields(expression, 'build', 'dd-05-2009')
self.assertFields(expression, 'feature_type', 'gene')
inputs = {'expressions': [expression.pk, expression.pk]}
etc = self.run_process('etc-bcm', inputs)
self.assertJSON(etc, etc.output['etc'], '', 'etc.json.gz')
@with_resolwe_host
@tag_process('htseq-count')
def test_expression_htseq(self):
with self.preparation_stage():
genome = self.prepare_genome()
reads = self.prepare_reads()
inputs = {
'src': 'annotation dicty.gtf.gz',
'source': 'DICTYBASE',
'species': 'Dictyostelium discoideum',
'build': 'dd-05-2009',
}
annotation_correct = self.run_process('upload-gtf', inputs)
inputs = {
'src': 'annotation dicty.gtf.gz',
'source': 'DICTYBASE',
'species': 'Homo sapiens',
'build': 'dd-05-2009',
}
annotation_wrong_species = self.run_process('upload-gtf', inputs)
inputs = {
'src': 'annotation dicty.gtf.gz',
'source': 'DICTYBASE',
'species': 'Dictyostelium discoideum',
'build': 'wrong build',
}
annotation_wrong_build = self.run_process('upload-gtf', inputs)
aligned_reads = self.run_process('alignment-hisat2', {
'genome': genome.pk,
'reads': reads.pk,
})
inputs = {
'alignments': aligned_reads.pk,
'gff': annotation_correct.pk,
'stranded': 'no',
'id_attribute': 'transcript_id',
}
expression = self.run_process('htseq-count', inputs)
self.assertFile(expression, 'rc', 'reads_rc.tab.gz', compression='gzip')
self.assertFile(expression, 'fpkm', 'reads_fpkm.tab.gz', compression='gzip')
self.assertFile(expression, 'exp', 'reads_tpm.tab.gz', compression='gzip')
self.assertJSON(expression, expression.output['exp_json'], '', 'expression_htseq.json.gz')
self.assertFile(expression, 'exp_set', 'htseq_count_out_exp_set.txt.gz', compression='gzip')
self.assertJSON(expression, expression.output['exp_set_json'], '', 'htseq_count_exp_set.json.gz')
self.assertFields(expression, 'species', 'Dictyostelium discoideum')
self.assertFields(expression, 'build', 'dd-05-2009')
self.assertFields(expression, 'feature_type', 'gene')
inputs['gff'] = annotation_wrong_species.pk
expression = self.run_process('htseq-count', inputs, Data.STATUS_ERROR)
inputs['gff'] = annotation_wrong_build.pk
expression = self.run_process('htseq-count', inputs, Data.STATUS_ERROR)
@with_resolwe_host
@tag_process('htseq-count-raw')
def test_expression_htseq_cpm(self):
with self.preparation_stage():
inputs = {
'src': 'annotation dicty.gtf.gz',
'source': 'DICTYBASE',
'species': 'Dictyostelium discoideum',
'build': 'dd-05-2009',
}
annotation = self.run_process('upload-gtf', inputs)
inputs = {
'src': 'feature_counts hs.gtf.gz',
'source': 'ENSEMBL',
'species': 'Homo sapiens',
'build': 'GRCh38_ens90',
}
annotation_hs = self.run_process('upload-gtf', inputs)
bam = {
'src': 'reads.bam',
'species': 'Dictyostelium discoideum',
'build': 'dd-05-2009',
}
bam = self.run_process('upload-bam', bam)
inputs = {
'src': 'feature_counts hs_paired.bam',
'species': 'Homo sapiens',
'build': 'GRCh38_ens90',
}
bam_paired = self.run_process('upload-bam', inputs)
inputs = {
'alignments': bam.pk,
'gtf': annotation.pk,
'stranded': 'no',
'id_attribute': 'transcript_id',
}
expression = self.run_process('htseq-count-raw', inputs)
self.assertFile(expression, 'rc', 'reads_rc.tab.gz', compression='gzip')
self.assertFile(expression, 'exp', 'reads_cpm.tab.gz', compression='gzip')
self.assertFields(expression, 'species', 'Dictyostelium discoideum')
self.assertFields(expression, 'build', 'dd-05-2009')
inputs = {
'alignments': bam_paired.pk,
'gtf': annotation_hs.pk,
}
expression = self.run_process('htseq-count-raw', inputs)
self.assertFile(expression, 'rc', 'htseq_raw_rc.tab.gz', compression='gzip')
self.assertFile(expression, 'exp', 'htseq_raw_cpm.tab.gz', compression='gzip')
self.assertFile(expression, 'exp_set', 'htseq_cpm_exp_set.txt.gz', compression='gzip')
self.assertJSON(expression, expression.output['exp_set_json'], '', 'htseq_cpm_exp_set.json.gz')
self.assertFields(expression, 'species', 'Homo sapiens')
self.assertFields(expression, 'build', 'GRCh38_ens90')
self.assertFields(expression, 'feature_type', 'gene')
@tag_process('index-fasta-nucl')
def test_index_fasta_nucl(self):
with self.preparation_stage():
inputs = {'src': 'HS chr21_ensembl.fa.gz'}
genome = self.run_process('upload-fasta-nucl', inputs)
inputs = {
'src': 'HS chr21_short.gtf.gz',
'source': 'ENSEMBL',
'species': 'Homo sapiens',
'build': 'ens_90'
}
annotation = self.run_process('upload-gtf', inputs)
inputs = {'nucl': genome.pk, 'annotation': annotation.pk}
index_fasta_nucl = self.run_process('index-fasta-nucl', inputs)
del index_fasta_nucl.output['rsem_index']['total_size'] # Non-deterministic output.
self.assertFields(index_fasta_nucl, 'rsem_index', {'dir': 'rsem'})
self.assertFields(index_fasta_nucl, 'source', 'ENSEMBL')
self.assertFields(index_fasta_nucl, 'species', 'Homo sapiens')
self.assertFields(index_fasta_nucl, 'build', 'ens_90')
@with_resolwe_host
@tag_process('mergeexpressions')
def test_mergeexpression(self):
with self.preparation_stage():
expression_1 = self.prepare_expression(f_rc='exp_1_rc.tab.gz', f_exp='exp_1_tpm.tab.gz', f_type="TPM")
expression_2 = self.prepare_expression(f_rc='exp_2_rc.tab.gz', f_exp='exp_2_tpm.tab.gz', f_type="TPM")
expression_3 = self.prepare_expression(f_rc='exp_2_rc.tab.gz', f_exp='exp_2_tpm.tab.gz', f_type="RC")
inputs = {
'exps': [expression_1.pk, expression_2.pk],
'genes': ['DPU_G0067096', 'DPU_G0067098', 'DPU_G0067102']
}
mergeexpression_1 = self.run_process('mergeexpressions', inputs)
self.assertFile(mergeexpression_1, "expset", "merged_expset_subset.tab")
inputs = {
'exps': [expression_1.pk, expression_2.pk],
'genes': []
}
mergeexpression_2 = self.run_process('mergeexpressions', inputs)
self.assertFile(mergeexpression_2, "expset", "merged_expset_all.tab")
inputs = {
'exps': [expression_1.pk, expression_2.pk, expression_3.pk],
'genes': ['DPU_G0067096', 'DPU_G0067098', 'DPU_G0067102']
}
self.run_process('mergeexpressions', inputs, Data.STATUS_ERROR)
@tag_process('mergeetc')
def test_etcmerge(self):
with self.preparation_stage():
genome = self.prepare_genome()
reads = self.prepare_reads()
annotation = self.prepare_annotation_gff()
aligned_reads = self.run_process('alignment-hisat2', {
'genome': genome.pk,
'reads': reads.pk,
})
mappa = self.run_process("upload-mappability", {"src": "purpureum_mappability_50.tab.gz"})
inputs = {
'alignment': aligned_reads.pk,
'gff': annotation.pk,
'mappable': mappa.pk}
expression = self.run_process('expression-dicty', inputs)
inputs = {'expressions': [expression.pk, expression.pk]}
etc = self.run_process('etc-bcm', inputs)
inputs = {
'exps': [etc.pk],
'genes': ['DPU_G0067110', 'DPU_G0067098', 'DPU_G0067102']
}
etcmerge = self.run_process('mergeetc', inputs)
self.assertFile(etcmerge, "expset", "merged_etc.tab.gz", compression='gzip')
@with_resolwe_host
@tag_process('feature_counts')
def test_feature_counts(self):
with self.preparation_stage():
inputs = {
'src': 'feature_counts hs.gtf.gz',
'source': 'ENSEMBL',
'species': 'Homo sapiens',
'build': 'GRCh38_ens90',
}
annotation_gtf = self.run_process('upload-gtf', inputs)
annotation_gff3 = self.prepare_annotation_gff()
bam_single_inputs = {
'src': 'reads.bam',
'species': 'Dictyostelium discoideum',
'build': 'dd-05-2009'
}
bam_single = self.run_process('upload-bam', bam_single_inputs)
inputs = {
'src': 'feature_counts hs_paired.bam',
'species': 'Homo sapiens',
'build': 'GRCh38_ens90',
}
bam_paired = self.run_process('upload-bam', inputs)
inputs = {
'src': 'cuffquant_mapping.bam',
'species': 'Homo sapiens',
'build': 'hg19',
}
bam_ucsc = self.run_process('upload-bam', inputs)
annotation_ucsc = self.prepare_annotation(
fn='hg19_chr20_small_modified.gtf.gz',
source='UCSC',
species='Homo sapiens',
build='hg19',
)
inputs = {
'alignment': {
'aligned_reads': bam_paired.id,
},
'annotation': {
'annotation': annotation_gtf.id,
},
}
expression = self.run_process('feature_counts', inputs)
self.assertFile(expression, 'rc', 'feature_counts_out_rc.tab.gz', compression='gzip')
self.assertFile(expression, 'fpkm', 'feature_counts_out_fpkm.tab.gz', compression='gzip')
self.assertFile(expression, 'cpm', 'feature_counts_out_cpm.tab.gz', compression='gzip')
self.assertFile(expression, 'exp', 'feature_counts_out_tpm.tab.gz', compression='gzip')
self.assertFile(expression, 'exp_set', 'feature_counts_out_exp_set.txt.gz', compression='gzip')
self.assertJSON(expression, expression.output['exp_set_json'], '', 'feature_counts_exp_set.json.gz')
self.assertFields(expression, 'species', 'Homo sapiens')
self.assertFields(expression, 'build', 'GRCh38_ens90')
self.assertFields(expression, 'feature_type', 'gene')
inputs = {
'alignment': {
'aligned_reads': bam_single.id,
},
'annotation': {
'annotation': annotation_gff3.id,
'id_attribute': 'Parent',
},
}
expression = self.run_process('feature_counts', inputs)
self.assertFile(expression, 'rc', 'reads_rc.tab.gz', compression='gzip')
self.assertFile(expression, 'fpkm', 'reads_fpkm.tab.gz', compression='gzip')
self.assertFile(expression, 'exp', 'reads_tpm.tab.gz', compression='gzip')
self.assertFields(expression, 'feature_type', 'gene')
inputs = {
'alignment': {
'aligned_reads': bam_ucsc.id,
},
'annotation': {
'annotation': annotation_ucsc.id,
},
}
self.run_process('feature_counts', inputs)
@with_resolwe_host
@tag_process('feature_counts')
def test_feature_counts_rpkum(self):
with self.preparation_stage():
genome = self.prepare_genome()
reads = self.prepare_reads()
annotation = self.prepare_annotation(fn='annotation dicty.gtf.gz')
annotation_gff = self.prepare_annotation_gff()
aligned_reads = self.run_process('alignment-hisat2', {
'genome': genome.pk,
'reads': reads.pk
})
mappability = self.run_process("mappability-bcm", {
"genome": genome.id,
"gff": annotation_gff.id,
"length": 50,
})
feature_counts = self.run_process('feature_counts', {
'alignment': {
'aligned_reads': aligned_reads.id,
},
'annotation': {
'annotation': annotation.id,
'id_attribute': 'transcript_id',
},
'normalization_type': 'RPKUM',
'mappability': mappability.id,
})
self.assertFile(feature_counts, 'exp', 'expression_fc_rpkum.tab.gz', compression='gzip')
self.assertFields(feature_counts, "source", "DICTYBASE")
self.assertFields(feature_counts, 'species', 'Dictyostelium discoideum')
self.assertFields(feature_counts, 'build', 'dd-05-2009')
self.assertFields(feature_counts, 'feature_type', 'gene')
@tag_process('salmon-index')
def test_salmon_index(self):
with self.preparation_stage():
cds = self.run_process('upload-fasta-nucl', {'src': 'salmon_cds.fa.gz'})
inputs = {
'nucl': cds.id,
'perfect_hash': True,
'gencode': False,
'keep_duplicates': True,
'source': 'ENSEMBL',
'species': 'Homo sapiens',
'build': 'ens_90',
}
salmon_index = self.run_process('salmon-index', inputs)
del salmon_index.output['index']['total_size'] # Non-deterministic output.
self.assertFields(salmon_index, 'index', {'dir': 'salmon_index'})
self.assertFields(salmon_index, 'source', 'ENSEMBL')
self.assertFields(salmon_index, 'species', 'Homo sapiens')
self.assertFields(salmon_index, 'build', 'ens_90')
@with_resolwe_host
@tag_process('salmon-quant')
def test_salmon_quant(self):
with self.preparation_stage():
reads = self.prepare_reads([os.path.join('salmon_quant', 'input', 'hs sim_reads_single.fastq.gz')])
annotation = self.prepare_annotation(
os.path.join('salmon_quant', 'input', 'hs annotation.gtf.gz'),
source='ENSEMBL',
species='Homo sapiens',
build='ens_92',
)
transcripts = self.run_process('upload-fasta-nucl', {
'src': os.path.join('salmon_quant', 'input', 'hs cdna.fasta.gz'),
'source': 'ENSEMBL',
'species': 'Homo sapiens',
'build': 'ens_92',
})
salmon_index = self.run_process('salmon-index', {
'nucl': transcripts.id,
'source': 'ENSEMBL',
'species': 'Homo sapiens',
'build': 'ens_92',
})
inputs = {
'reads': reads.id,
'salmon_index': salmon_index.id,
'annotation': annotation.id,
'options': {
'min_assigned_frag': 5,
'gc_bias': True,
'seq_bias': True,
'validate_mappings': True,
'range_factorization_bins': 4,
'incompat_prior': 0.05,
'min_score_fraction': 0.7,
'consensus_slack': 0.25,
'no_length_correction': False,
'discard_orphans_quasi': True,
}
}
salmon_quant = self.run_process('salmon-quant', inputs)
self.assertFile(
salmon_quant,
'exp_set',
os.path.join('salmon_quant', 'output', 'salmon_quant_tpm.tab.gz'),
compression='gzip',
)
self.assertFile(
salmon_quant,
'transcripts',
os.path.join('salmon_quant', 'output', 'salmon_transcripts_tpm.tab.gz'),
compression='gzip',
)
@with_resolwe_host
@tag_process('feature_counts')
def test_featurecounts_strandedness(self):
with self.preparation_stage():
cds = self.run_process('upload-fasta-nucl', {'src': 'salmon_cds.fa.gz'})
salmon_index = self.run_process('salmon-index', {
'nucl': cds.id,
'source': 'ENSEMBL',
'species': 'Homo sapiens',
'build': 'ens_90',
})
annotation = self.run_process('upload-gtf', {
'src': 'annotation_rsem.gtf.gz',
'source': 'ENSEMBL',
'species': 'Homo sapiens',
'build': 'ens_90',
})
aligned_reads = self.run_process('upload-bam', {
'src': 'feature counts_detect_strandedness.bam',
'species': 'Homo sapiens',
'build': 'ens_90',
})
inputs = {
'alignment': {
'aligned_reads': aligned_reads.id,
'assay_type': 'auto',
'cdna_index': salmon_index.id,
},
'annotation': {
'annotation': annotation.id,
},
}
expression = self.run_process('feature_counts', inputs)
self.assertFile(expression, 'exp', 'auto_detect_strand_tpm.tab.gz', compression='gzip')
@tag_process('shrna-quant')
def test_shrna_quant(self):
with self.preparation_stage():
pf_in = './shrna_diffexp/input/'
pf_out = './shrna_diffexp/output/'
species = 'Homo sapiens'
build = 'custom-from-file'
bam_single_inputs = {
'src': pf_in + 'SM18_ss.bam',
'species': species,
'build': build
}
bam = self.run_process('upload-bam', bam_single_inputs)
inputs = {
'alignment': bam.id,
'readlengths': 26,
'alignscores': -6
}
quant = self.run_process('shrna-quant', inputs)
self.assertFile(quant, 'rc', pf_out + 'SM18_ss_count_matrix.txt.gz', compression='gzip')
self.assertFile(quant, 'exp', pf_out + 'SM18_ss_count_matrix.txt.gz', compression='gzip')
self.assertFields(quant, 'exp_type', 'RC')
self.assertJSON(quant, quant.output['exp_json'], '', pf_out + 'SM18_ss_json.txt.gz')
self.assertFields(quant, 'source', 'shRNA-gene-sequences')
self.assertFields(quant, 'species', species)
self.assertFields(quant, 'build', build)
self.assertFields(quant, 'feature_type', 'shRNA')
self.assertFile(quant, 'mapped_species', pf_out + 'SM18_ss_mapped_species.txt.gz', compression='gzip')
@with_resolwe_host
@tag_process('stringtie')
def test_stringtie(self):
with self.preparation_stage():
alignment = self.run_process('upload-bam', {
'src': './corall/input/corall_paired.bam',
'species': 'Homo sapiens',
'build': 'ens_90',
})
annotation = self.run_process('upload-gtf', {
'src': './corall/input/hs_annotation_chr2_1_45000.gtf.gz',
'source': 'ENSEMBL',
'species': 'Homo sapiens',
'build': 'ens_90'
})
stringtie = self.run_process('stringtie', {
'alignment': alignment.id,
'annotation': annotation.id,
'options': {
'stranded': 'forward',
}
})
self.assertFile(stringtie, 'exp', './corall/output/stringtie_tpm.txt.gz', compression='gzip')
self.assertFile(stringtie, 'exp_set', './corall/output/stringtie_exp_set.txt.gz', compression='gzip')
self.assertFile(stringtie, 'ctab', './corall/output/stringtie_transcripts.ctab')
self.assertFields(stringtie, 'exp_type', 'TPM')
self.assertFields(stringtie, 'source', 'ENSEMBL')
self.assertFields(stringtie, 'species', 'Homo sapiens')
self.assertFields(stringtie, 'build', 'ens_90')
self.assertFields(stringtie, 'feature_type', 'gene')
@with_resolwe_host
@tag_process('slamdunk-all-paired')
def test_slamdunk_paired(self):
with self.preparation_stage():
paired_reads = self.prepare_paired_reads(['hs_slamseq_R1_complemented.fastq.gz'],
['hs_slamseq_R2.fastq.gz'])
transcripts = self.run_process('upload-fasta-nucl', {
'src': os.path.join('slamseq', 'input', 'hs_transcript.fasta'),
'species': 'Homo sapiens',
'build': 'Gencode 32'
})
bedfile = self.run_process('upload-bed', {
'src': os.path.join('slamseq', 'input', 'hs_transcript.bed'),
'species': 'Homo sapiens',
'build': 'Gencode 32'
})
inputs = {
'reads': paired_reads.id,
'transcriptome': transcripts.id,
'regions': bedfile.id,
'filter_multimappers': True,
'max_alignments': 1,
'read_length': 75
}
slamdunk = self.run_process('slamdunk-all-paired', inputs)
self.assertFile(slamdunk, 'tcount', os.path.join('slamseq', 'output', 'hs_slamseq_tcount.tsv'))
| 39.926261 | 114 | 0.559764 |
import os
from resolwe.flow.models import Data, Collection, Relation
from resolwe.flow.models.entity import RelationPartition, RelationType
from resolwe.test import tag_process, with_resolwe_host
from resolwe_bio.expression_filters.relation import replicate_groups
from resolwe_bio.utils.test import KBBioProcessTestCase
class ExpressionProcessorTestCase(KBBioProcessTestCase):
fixtures = ['relationtypes.yaml']
@tag_process('cufflinks', 'cuffmerge')
def test_cufflinks(self):
with self.preparation_stage():
genome = self.prepare_genome()
reads = self.prepare_reads()
annotation_gtf = self.prepare_annotation('annotation dicty.gff.gz')
annotation_gff3 = self.prepare_annotation_gff()
aligned_reads = self.run_process('alignment-hisat2', {
'genome': genome.pk,
'reads': reads.pk,
'spliced_alignments': {
'cufflinks': True
}
})
inputs = {
'alignment': aligned_reads.pk,
'annotation': annotation_gtf.pk,
'genome': genome.pk}
cuff_exp = self.run_process('cufflinks', inputs)
self.assertFile(cuff_exp, 'transcripts', 'cufflinks_transcripts.gtf', sort=True)
self.assertFields(cuff_exp, 'species', 'Dictyostelium discoideum')
self.assertFields(cuff_exp, 'build', 'dd-05-2009')
self.assertFields(cuff_exp, 'source', 'DICTYBASE')
inputs = {
'alignment': aligned_reads.pk,
'annotation': annotation_gtf.pk,
'genome': genome.pk}
cuff_exp2 = self.run_process('cufflinks', inputs)
inputs = {
'expressions': [cuff_exp.pk, cuff_exp2.pk],
'gff': annotation_gff3.pk,
'genome': genome.pk}
cuff_merge_gff3 = self.run_process('cuffmerge', inputs)
self.assertFile(cuff_merge_gff3, 'annot', 'cuffmerge_transcripts.gtf')
self.assertFields(cuff_merge_gff3, 'species', 'Dictyostelium discoideum')
self.assertFields(cuff_merge_gff3, 'build', 'dd-05-2009')
self.assertFields(cuff_exp, 'source', 'DICTYBASE')
inputs['gff'] = annotation_gtf.pk
cuff_merge_gtf = self.run_process('cuffmerge', inputs)
self.assertFile(cuff_merge_gtf, 'annot', 'cuffmerge_transcripts.gtf')
self.assertFields(cuff_merge_gtf, 'species', 'Dictyostelium discoideum')
self.assertFields(cuff_merge_gtf, 'build', 'dd-05-2009')
self.assertFields(cuff_exp, 'source', 'DICTYBASE')
@tag_process('cuffquant')
def test_cuffquant(self):
with self.preparation_stage():
inputs = {
'src': 'cuffquant_mapping.bam',
'species': 'Homo sapiens',
'build': 'hg19'
}
bam = self.run_process('upload-bam', inputs)
annotation = self.prepare_annotation(
fn='hg19_chr20_small.gtf.gz',
source='UCSC',
species='Homo sapiens',
build='hg19'
)
inputs = {
'alignment': bam.id,
'annotation': annotation.id}
cuffquant = self.run_process('cuffquant', inputs)
self.assertFields(cuffquant, 'species', 'Homo sapiens')
self.assertFields(cuffquant, 'build', 'hg19')
self.assertFields(cuffquant, 'source', 'UCSC')
@with_resolwe_host
@tag_process('cuffnorm')
def test_cuffnorm(self):
with self.preparation_stage():
collection = Collection.objects.create(
name='Test collection',
contributor=self.contributor
)
rel_type_group = RelationType.objects.get(name='group')
replicate_group = Relation.objects.create(
contributor=self.contributor,
collection=collection,
type=rel_type_group,
category='Replicate'
)
inputs = {
'src': 'cuffquant 1.cxb',
'source': 'UCSC',
'species': 'Homo sapiens',
'build': 'hg19'
}
sample_1 = self.run_process("upload-cxb", inputs)
inputs = {
'src': 'cuffquant_2.cxb',
'source': 'UCSC',
'species': 'Homo sapiens',
'build': 'hg19'
}
sample_2 = self.run_process("upload-cxb", inputs)
inputs = {
'src': '3-cuffquant.cxb',
'source': 'UCSC',
'species': 'Homo sapiens',
'build': 'hg19'
}
sample_3 = self.run_process("upload-cxb", inputs)
inputs = {
'src': '4-cuffquant.cxb',
'source': 'UCSC',
'species': 'Homo sapiens',
'build': 'hg19'
}
sample_4 = self.run_process("upload-cxb", inputs)
inputs = {
'src': '5-cuffquant.cxb',
'source': 'UCSC',
'species': 'Homo sapiens',
'build': 'hg19'
}
sample_5 = self.run_process("upload-cxb", inputs)
inputs = {
'src': '6-cuffquant.cxb',
'source': 'UCSC',
'species': 'Homo sapiens',
'build': 'hg19'
}
sample_6 = self.run_process("upload-cxb", inputs)
RelationPartition.objects.create(relation=replicate_group, entity=sample_1.entity, label='1')
RelationPartition.objects.create(relation=replicate_group, entity=sample_2.entity, label='1')
RelationPartition.objects.create(relation=replicate_group, entity=sample_3.entity, label='2')
RelationPartition.objects.create(relation=replicate_group, entity=sample_4.entity, label='2')
RelationPartition.objects.create(relation=replicate_group, entity=sample_5.entity, label='2')
RelationPartition.objects.create(relation=replicate_group, entity=sample_6.entity, label='3')
annotation = self.prepare_annotation(fn='hg19_chr20_small.gtf.gz', source='UCSC',
species='Homo sapiens', build='hg19')
self.assertEqual(
replicate_groups([
{'__id': sample_1.id},
{'__id': sample_2.id},
{'__id': sample_3.id},
{'__id': sample_4.id},
{'__id': sample_5.id},
{'__id': sample_6.id}
]),
[1, 1, 2, 2, 2, 3]
)
inputs = {
'cuffquant': [sample_1.pk, sample_2.pk, sample_3.pk, sample_4.pk, sample_5.pk, sample_6.pk],
'annotation': annotation.id,
}
cuffnorm = self.run_process('cuffnorm', inputs)
self.assertFile(cuffnorm, 'fpkm_means', 'cuffnorm_all_fpkm_means.txt')
self.assertFile(cuffnorm, 'genes_fpkm', 'cuffnorm_genes.fpkm_table')
self.assertFileExists(cuffnorm, 'raw_scatter')
self.assertFields(cuffnorm, 'source', 'UCSC')
self.assertFields(cuffnorm, 'species', 'Homo sapiens')
self.assertFields(cuffnorm, 'build', 'hg19')
exp = Data.objects.last()
self.assertFile(exp, 'exp', 'cuffnorm_expression.tab.gz', compression='gzip')
self.assertFile(exp, 'exp_set', 'cuffnorm_out_exp_set.txt.gz', compression='gzip')
self.assertJSON(exp, exp.output['exp_set_json'], '', 'cuffnorm_exp_set.json.gz')
@tag_process('mappability-bcm')
def test_mappability(self):
with self.preparation_stage():
genome = self.prepare_genome()
annotation = self.prepare_annotation_gff()
mappability = self.run_process('mappability-bcm', {
'genome': genome.id,
'gff': annotation.id,
'length': 50,
})
self.assertFileExists(mappability, 'mappability')
@tag_process('expression-dicty', 'etc-bcm')
def test_expression_dicty(self):
with self.preparation_stage():
genome = self.prepare_genome()
reads = self.prepare_reads()
annotation = self.prepare_annotation_gff()
aligned_reads = self.run_process('alignment-hisat2', {
'genome': genome.pk,
'reads': reads.pk
})
mappa = self.run_process("upload-mappability", {"src": "purpureum_mappability_50.tab.gz"})
inputs = {
'alignment': aligned_reads.pk,
'gff': annotation.pk,
'mappable': mappa.pk}
expression = self.run_process('expression-dicty', inputs)
self.assertFile(expression, 'rpkm', 'expression_bcm_rpkm.tab.gz', compression='gzip')
self.assertFields(expression, "source", "DICTYBASE")
self.assertFields(expression, 'species', 'Dictyostelium discoideum')
self.assertFields(expression, 'build', 'dd-05-2009')
self.assertFields(expression, 'feature_type', 'gene')
inputs = {'expressions': [expression.pk, expression.pk]}
etc = self.run_process('etc-bcm', inputs)
self.assertJSON(etc, etc.output['etc'], '', 'etc.json.gz')
@with_resolwe_host
@tag_process('htseq-count')
def test_expression_htseq(self):
with self.preparation_stage():
genome = self.prepare_genome()
reads = self.prepare_reads()
inputs = {
'src': 'annotation dicty.gtf.gz',
'source': 'DICTYBASE',
'species': 'Dictyostelium discoideum',
'build': 'dd-05-2009',
}
annotation_correct = self.run_process('upload-gtf', inputs)
inputs = {
'src': 'annotation dicty.gtf.gz',
'source': 'DICTYBASE',
'species': 'Homo sapiens',
'build': 'dd-05-2009',
}
annotation_wrong_species = self.run_process('upload-gtf', inputs)
inputs = {
'src': 'annotation dicty.gtf.gz',
'source': 'DICTYBASE',
'species': 'Dictyostelium discoideum',
'build': 'wrong build',
}
annotation_wrong_build = self.run_process('upload-gtf', inputs)
aligned_reads = self.run_process('alignment-hisat2', {
'genome': genome.pk,
'reads': reads.pk,
})
inputs = {
'alignments': aligned_reads.pk,
'gff': annotation_correct.pk,
'stranded': 'no',
'id_attribute': 'transcript_id',
}
expression = self.run_process('htseq-count', inputs)
self.assertFile(expression, 'rc', 'reads_rc.tab.gz', compression='gzip')
self.assertFile(expression, 'fpkm', 'reads_fpkm.tab.gz', compression='gzip')
self.assertFile(expression, 'exp', 'reads_tpm.tab.gz', compression='gzip')
self.assertJSON(expression, expression.output['exp_json'], '', 'expression_htseq.json.gz')
self.assertFile(expression, 'exp_set', 'htseq_count_out_exp_set.txt.gz', compression='gzip')
self.assertJSON(expression, expression.output['exp_set_json'], '', 'htseq_count_exp_set.json.gz')
self.assertFields(expression, 'species', 'Dictyostelium discoideum')
self.assertFields(expression, 'build', 'dd-05-2009')
self.assertFields(expression, 'feature_type', 'gene')
inputs['gff'] = annotation_wrong_species.pk
expression = self.run_process('htseq-count', inputs, Data.STATUS_ERROR)
inputs['gff'] = annotation_wrong_build.pk
expression = self.run_process('htseq-count', inputs, Data.STATUS_ERROR)
@with_resolwe_host
@tag_process('htseq-count-raw')
def test_expression_htseq_cpm(self):
with self.preparation_stage():
inputs = {
'src': 'annotation dicty.gtf.gz',
'source': 'DICTYBASE',
'species': 'Dictyostelium discoideum',
'build': 'dd-05-2009',
}
annotation = self.run_process('upload-gtf', inputs)
inputs = {
'src': 'feature_counts hs.gtf.gz',
'source': 'ENSEMBL',
'species': 'Homo sapiens',
'build': 'GRCh38_ens90',
}
annotation_hs = self.run_process('upload-gtf', inputs)
bam = {
'src': 'reads.bam',
'species': 'Dictyostelium discoideum',
'build': 'dd-05-2009',
}
bam = self.run_process('upload-bam', bam)
inputs = {
'src': 'feature_counts hs_paired.bam',
'species': 'Homo sapiens',
'build': 'GRCh38_ens90',
}
bam_paired = self.run_process('upload-bam', inputs)
inputs = {
'alignments': bam.pk,
'gtf': annotation.pk,
'stranded': 'no',
'id_attribute': 'transcript_id',
}
expression = self.run_process('htseq-count-raw', inputs)
self.assertFile(expression, 'rc', 'reads_rc.tab.gz', compression='gzip')
self.assertFile(expression, 'exp', 'reads_cpm.tab.gz', compression='gzip')
self.assertFields(expression, 'species', 'Dictyostelium discoideum')
self.assertFields(expression, 'build', 'dd-05-2009')
inputs = {
'alignments': bam_paired.pk,
'gtf': annotation_hs.pk,
}
expression = self.run_process('htseq-count-raw', inputs)
self.assertFile(expression, 'rc', 'htseq_raw_rc.tab.gz', compression='gzip')
self.assertFile(expression, 'exp', 'htseq_raw_cpm.tab.gz', compression='gzip')
self.assertFile(expression, 'exp_set', 'htseq_cpm_exp_set.txt.gz', compression='gzip')
self.assertJSON(expression, expression.output['exp_set_json'], '', 'htseq_cpm_exp_set.json.gz')
self.assertFields(expression, 'species', 'Homo sapiens')
self.assertFields(expression, 'build', 'GRCh38_ens90')
self.assertFields(expression, 'feature_type', 'gene')
@tag_process('index-fasta-nucl')
def test_index_fasta_nucl(self):
with self.preparation_stage():
inputs = {'src': 'HS chr21_ensembl.fa.gz'}
genome = self.run_process('upload-fasta-nucl', inputs)
inputs = {
'src': 'HS chr21_short.gtf.gz',
'source': 'ENSEMBL',
'species': 'Homo sapiens',
'build': 'ens_90'
}
annotation = self.run_process('upload-gtf', inputs)
inputs = {'nucl': genome.pk, 'annotation': annotation.pk}
index_fasta_nucl = self.run_process('index-fasta-nucl', inputs)
del index_fasta_nucl.output['rsem_index']['total_size']
self.assertFields(index_fasta_nucl, 'rsem_index', {'dir': 'rsem'})
self.assertFields(index_fasta_nucl, 'source', 'ENSEMBL')
self.assertFields(index_fasta_nucl, 'species', 'Homo sapiens')
self.assertFields(index_fasta_nucl, 'build', 'ens_90')
@with_resolwe_host
@tag_process('mergeexpressions')
def test_mergeexpression(self):
with self.preparation_stage():
expression_1 = self.prepare_expression(f_rc='exp_1_rc.tab.gz', f_exp='exp_1_tpm.tab.gz', f_type="TPM")
expression_2 = self.prepare_expression(f_rc='exp_2_rc.tab.gz', f_exp='exp_2_tpm.tab.gz', f_type="TPM")
expression_3 = self.prepare_expression(f_rc='exp_2_rc.tab.gz', f_exp='exp_2_tpm.tab.gz', f_type="RC")
inputs = {
'exps': [expression_1.pk, expression_2.pk],
'genes': ['DPU_G0067096', 'DPU_G0067098', 'DPU_G0067102']
}
mergeexpression_1 = self.run_process('mergeexpressions', inputs)
self.assertFile(mergeexpression_1, "expset", "merged_expset_subset.tab")
inputs = {
'exps': [expression_1.pk, expression_2.pk],
'genes': []
}
mergeexpression_2 = self.run_process('mergeexpressions', inputs)
self.assertFile(mergeexpression_2, "expset", "merged_expset_all.tab")
inputs = {
'exps': [expression_1.pk, expression_2.pk, expression_3.pk],
'genes': ['DPU_G0067096', 'DPU_G0067098', 'DPU_G0067102']
}
self.run_process('mergeexpressions', inputs, Data.STATUS_ERROR)
@tag_process('mergeetc')
def test_etcmerge(self):
with self.preparation_stage():
genome = self.prepare_genome()
reads = self.prepare_reads()
annotation = self.prepare_annotation_gff()
aligned_reads = self.run_process('alignment-hisat2', {
'genome': genome.pk,
'reads': reads.pk,
})
mappa = self.run_process("upload-mappability", {"src": "purpureum_mappability_50.tab.gz"})
inputs = {
'alignment': aligned_reads.pk,
'gff': annotation.pk,
'mappable': mappa.pk}
expression = self.run_process('expression-dicty', inputs)
inputs = {'expressions': [expression.pk, expression.pk]}
etc = self.run_process('etc-bcm', inputs)
inputs = {
'exps': [etc.pk],
'genes': ['DPU_G0067110', 'DPU_G0067098', 'DPU_G0067102']
}
etcmerge = self.run_process('mergeetc', inputs)
self.assertFile(etcmerge, "expset", "merged_etc.tab.gz", compression='gzip')
@with_resolwe_host
@tag_process('feature_counts')
def test_feature_counts(self):
with self.preparation_stage():
inputs = {
'src': 'feature_counts hs.gtf.gz',
'source': 'ENSEMBL',
'species': 'Homo sapiens',
'build': 'GRCh38_ens90',
}
annotation_gtf = self.run_process('upload-gtf', inputs)
annotation_gff3 = self.prepare_annotation_gff()
bam_single_inputs = {
'src': 'reads.bam',
'species': 'Dictyostelium discoideum',
'build': 'dd-05-2009'
}
bam_single = self.run_process('upload-bam', bam_single_inputs)
inputs = {
'src': 'feature_counts hs_paired.bam',
'species': 'Homo sapiens',
'build': 'GRCh38_ens90',
}
bam_paired = self.run_process('upload-bam', inputs)
inputs = {
'src': 'cuffquant_mapping.bam',
'species': 'Homo sapiens',
'build': 'hg19',
}
bam_ucsc = self.run_process('upload-bam', inputs)
annotation_ucsc = self.prepare_annotation(
fn='hg19_chr20_small_modified.gtf.gz',
source='UCSC',
species='Homo sapiens',
build='hg19',
)
inputs = {
'alignment': {
'aligned_reads': bam_paired.id,
},
'annotation': {
'annotation': annotation_gtf.id,
},
}
expression = self.run_process('feature_counts', inputs)
self.assertFile(expression, 'rc', 'feature_counts_out_rc.tab.gz', compression='gzip')
self.assertFile(expression, 'fpkm', 'feature_counts_out_fpkm.tab.gz', compression='gzip')
self.assertFile(expression, 'cpm', 'feature_counts_out_cpm.tab.gz', compression='gzip')
self.assertFile(expression, 'exp', 'feature_counts_out_tpm.tab.gz', compression='gzip')
self.assertFile(expression, 'exp_set', 'feature_counts_out_exp_set.txt.gz', compression='gzip')
self.assertJSON(expression, expression.output['exp_set_json'], '', 'feature_counts_exp_set.json.gz')
self.assertFields(expression, 'species', 'Homo sapiens')
self.assertFields(expression, 'build', 'GRCh38_ens90')
self.assertFields(expression, 'feature_type', 'gene')
inputs = {
'alignment': {
'aligned_reads': bam_single.id,
},
'annotation': {
'annotation': annotation_gff3.id,
'id_attribute': 'Parent',
},
}
expression = self.run_process('feature_counts', inputs)
self.assertFile(expression, 'rc', 'reads_rc.tab.gz', compression='gzip')
self.assertFile(expression, 'fpkm', 'reads_fpkm.tab.gz', compression='gzip')
self.assertFile(expression, 'exp', 'reads_tpm.tab.gz', compression='gzip')
self.assertFields(expression, 'feature_type', 'gene')
inputs = {
'alignment': {
'aligned_reads': bam_ucsc.id,
},
'annotation': {
'annotation': annotation_ucsc.id,
},
}
self.run_process('feature_counts', inputs)
@with_resolwe_host
@tag_process('feature_counts')
def test_feature_counts_rpkum(self):
with self.preparation_stage():
genome = self.prepare_genome()
reads = self.prepare_reads()
annotation = self.prepare_annotation(fn='annotation dicty.gtf.gz')
annotation_gff = self.prepare_annotation_gff()
aligned_reads = self.run_process('alignment-hisat2', {
'genome': genome.pk,
'reads': reads.pk
})
mappability = self.run_process("mappability-bcm", {
"genome": genome.id,
"gff": annotation_gff.id,
"length": 50,
})
feature_counts = self.run_process('feature_counts', {
'alignment': {
'aligned_reads': aligned_reads.id,
},
'annotation': {
'annotation': annotation.id,
'id_attribute': 'transcript_id',
},
'normalization_type': 'RPKUM',
'mappability': mappability.id,
})
self.assertFile(feature_counts, 'exp', 'expression_fc_rpkum.tab.gz', compression='gzip')
self.assertFields(feature_counts, "source", "DICTYBASE")
self.assertFields(feature_counts, 'species', 'Dictyostelium discoideum')
self.assertFields(feature_counts, 'build', 'dd-05-2009')
self.assertFields(feature_counts, 'feature_type', 'gene')
@tag_process('salmon-index')
def test_salmon_index(self):
with self.preparation_stage():
cds = self.run_process('upload-fasta-nucl', {'src': 'salmon_cds.fa.gz'})
inputs = {
'nucl': cds.id,
'perfect_hash': True,
'gencode': False,
'keep_duplicates': True,
'source': 'ENSEMBL',
'species': 'Homo sapiens',
'build': 'ens_90',
}
salmon_index = self.run_process('salmon-index', inputs)
del salmon_index.output['index']['total_size']
self.assertFields(salmon_index, 'index', {'dir': 'salmon_index'})
self.assertFields(salmon_index, 'source', 'ENSEMBL')
self.assertFields(salmon_index, 'species', 'Homo sapiens')
self.assertFields(salmon_index, 'build', 'ens_90')
@with_resolwe_host
@tag_process('salmon-quant')
def test_salmon_quant(self):
with self.preparation_stage():
reads = self.prepare_reads([os.path.join('salmon_quant', 'input', 'hs sim_reads_single.fastq.gz')])
annotation = self.prepare_annotation(
os.path.join('salmon_quant', 'input', 'hs annotation.gtf.gz'),
source='ENSEMBL',
species='Homo sapiens',
build='ens_92',
)
transcripts = self.run_process('upload-fasta-nucl', {
'src': os.path.join('salmon_quant', 'input', 'hs cdna.fasta.gz'),
'source': 'ENSEMBL',
'species': 'Homo sapiens',
'build': 'ens_92',
})
salmon_index = self.run_process('salmon-index', {
'nucl': transcripts.id,
'source': 'ENSEMBL',
'species': 'Homo sapiens',
'build': 'ens_92',
})
inputs = {
'reads': reads.id,
'salmon_index': salmon_index.id,
'annotation': annotation.id,
'options': {
'min_assigned_frag': 5,
'gc_bias': True,
'seq_bias': True,
'validate_mappings': True,
'range_factorization_bins': 4,
'incompat_prior': 0.05,
'min_score_fraction': 0.7,
'consensus_slack': 0.25,
'no_length_correction': False,
'discard_orphans_quasi': True,
}
}
salmon_quant = self.run_process('salmon-quant', inputs)
self.assertFile(
salmon_quant,
'exp_set',
os.path.join('salmon_quant', 'output', 'salmon_quant_tpm.tab.gz'),
compression='gzip',
)
self.assertFile(
salmon_quant,
'transcripts',
os.path.join('salmon_quant', 'output', 'salmon_transcripts_tpm.tab.gz'),
compression='gzip',
)
@with_resolwe_host
@tag_process('feature_counts')
def test_featurecounts_strandedness(self):
with self.preparation_stage():
cds = self.run_process('upload-fasta-nucl', {'src': 'salmon_cds.fa.gz'})
salmon_index = self.run_process('salmon-index', {
'nucl': cds.id,
'source': 'ENSEMBL',
'species': 'Homo sapiens',
'build': 'ens_90',
})
annotation = self.run_process('upload-gtf', {
'src': 'annotation_rsem.gtf.gz',
'source': 'ENSEMBL',
'species': 'Homo sapiens',
'build': 'ens_90',
})
aligned_reads = self.run_process('upload-bam', {
'src': 'feature counts_detect_strandedness.bam',
'species': 'Homo sapiens',
'build': 'ens_90',
})
inputs = {
'alignment': {
'aligned_reads': aligned_reads.id,
'assay_type': 'auto',
'cdna_index': salmon_index.id,
},
'annotation': {
'annotation': annotation.id,
},
}
expression = self.run_process('feature_counts', inputs)
self.assertFile(expression, 'exp', 'auto_detect_strand_tpm.tab.gz', compression='gzip')
@tag_process('shrna-quant')
def test_shrna_quant(self):
with self.preparation_stage():
pf_in = './shrna_diffexp/input/'
pf_out = './shrna_diffexp/output/'
species = 'Homo sapiens'
build = 'custom-from-file'
bam_single_inputs = {
'src': pf_in + 'SM18_ss.bam',
'species': species,
'build': build
}
bam = self.run_process('upload-bam', bam_single_inputs)
inputs = {
'alignment': bam.id,
'readlengths': 26,
'alignscores': -6
}
quant = self.run_process('shrna-quant', inputs)
self.assertFile(quant, 'rc', pf_out + 'SM18_ss_count_matrix.txt.gz', compression='gzip')
self.assertFile(quant, 'exp', pf_out + 'SM18_ss_count_matrix.txt.gz', compression='gzip')
self.assertFields(quant, 'exp_type', 'RC')
self.assertJSON(quant, quant.output['exp_json'], '', pf_out + 'SM18_ss_json.txt.gz')
self.assertFields(quant, 'source', 'shRNA-gene-sequences')
self.assertFields(quant, 'species', species)
self.assertFields(quant, 'build', build)
self.assertFields(quant, 'feature_type', 'shRNA')
self.assertFile(quant, 'mapped_species', pf_out + 'SM18_ss_mapped_species.txt.gz', compression='gzip')
@with_resolwe_host
@tag_process('stringtie')
def test_stringtie(self):
with self.preparation_stage():
alignment = self.run_process('upload-bam', {
'src': './corall/input/corall_paired.bam',
'species': 'Homo sapiens',
'build': 'ens_90',
})
annotation = self.run_process('upload-gtf', {
'src': './corall/input/hs_annotation_chr2_1_45000.gtf.gz',
'source': 'ENSEMBL',
'species': 'Homo sapiens',
'build': 'ens_90'
})
stringtie = self.run_process('stringtie', {
'alignment': alignment.id,
'annotation': annotation.id,
'options': {
'stranded': 'forward',
}
})
self.assertFile(stringtie, 'exp', './corall/output/stringtie_tpm.txt.gz', compression='gzip')
self.assertFile(stringtie, 'exp_set', './corall/output/stringtie_exp_set.txt.gz', compression='gzip')
self.assertFile(stringtie, 'ctab', './corall/output/stringtie_transcripts.ctab')
self.assertFields(stringtie, 'exp_type', 'TPM')
self.assertFields(stringtie, 'source', 'ENSEMBL')
self.assertFields(stringtie, 'species', 'Homo sapiens')
self.assertFields(stringtie, 'build', 'ens_90')
self.assertFields(stringtie, 'feature_type', 'gene')
@with_resolwe_host
@tag_process('slamdunk-all-paired')
def test_slamdunk_paired(self):
with self.preparation_stage():
paired_reads = self.prepare_paired_reads(['hs_slamseq_R1_complemented.fastq.gz'],
['hs_slamseq_R2.fastq.gz'])
transcripts = self.run_process('upload-fasta-nucl', {
'src': os.path.join('slamseq', 'input', 'hs_transcript.fasta'),
'species': 'Homo sapiens',
'build': 'Gencode 32'
})
bedfile = self.run_process('upload-bed', {
'src': os.path.join('slamseq', 'input', 'hs_transcript.bed'),
'species': 'Homo sapiens',
'build': 'Gencode 32'
})
inputs = {
'reads': paired_reads.id,
'transcriptome': transcripts.id,
'regions': bedfile.id,
'filter_multimappers': True,
'max_alignments': 1,
'read_length': 75
}
slamdunk = self.run_process('slamdunk-all-paired', inputs)
self.assertFile(slamdunk, 'tcount', os.path.join('slamseq', 'output', 'hs_slamseq_tcount.tsv'))
| true | true |
f733f220cc226a31cd237cf27b93840a2d886f4f | 2,213 | py | Python | pyscreenshot/plugins/mac_quartz.py | robocorp/pyscreenshot | 7cf03b23f4bdf1e4a2e3df1893de598e852dd346 | [
"BSD-2-Clause"
] | 1 | 2021-03-17T17:02:28.000Z | 2021-03-17T17:02:28.000Z | pyscreenshot/plugins/mac_quartz.py | robocorp/rpaframework-screenshot | 7cf03b23f4bdf1e4a2e3df1893de598e852dd346 | [
"BSD-2-Clause"
] | null | null | null | pyscreenshot/plugins/mac_quartz.py | robocorp/rpaframework-screenshot | 7cf03b23f4bdf1e4a2e3df1893de598e852dd346 | [
"BSD-2-Clause"
] | null | null | null | # Javier Escalada Gomez
#
# from:
# https://stackoverflow.com/questions/4524723/take-screenshot-in-python-on-mac-os-x
from pyscreenshot.plugins.backend import CBackend
from pyscreenshot.tempexport import read_func_img
class MacQuartzWrapper(CBackend):
name = "mac_quartz"
childprocess = False
def __init__(self):
pass
def grab(self, bbox=None):
im = read_func_img(self._grab_to_file, bbox)
return im
def _grab_to_file(self, filename, bbox=None, dpi=72):
# FIXME: Should query dpi from somewhere, e.g for retina displays
import Quartz
import LaunchServices
from Cocoa import NSURL
import Quartz.CoreGraphics as CG
import objc
if bbox:
width = bbox[2] - bbox[0]
height = bbox[3] - bbox[1]
region = CG.CGRectMake(bbox[0], bbox[1], width, height)
else:
region = CG.CGRectInfinite
# Create screenshot as CGImage
image = CG.CGWindowListCreateImage(
region,
CG.kCGWindowListOptionOnScreenOnly,
CG.kCGNullWindowID,
CG.kCGWindowImageDefault,
)
# XXX: Can add more types:
# https://developer.apple.com/library/mac/documentation/MobileCoreServices/Reference/UTTypeRef/Reference/reference.html#//apple_ref/doc/uid/TP40008771
file_type = LaunchServices.kUTTypePNG
url = NSURL.fileURLWithPath_(filename)
dest = Quartz.CGImageDestinationCreateWithURL(
url,
file_type,
# 1 image in file
1,
None,
)
properties = {
Quartz.kCGImagePropertyDPIWidth: dpi,
Quartz.kCGImagePropertyDPIHeight: dpi,
}
# Add the image to the destination, characterizing the image with
# the properties dictionary.
Quartz.CGImageDestinationAddImage(dest, image, properties)
# When all the images (only 1 in this example) are added to the destination,
# finalize the CGImageDestination object.
Quartz.CGImageDestinationFinalize(dest)
def backend_version(self):
import objc
return objc.__version__
| 29.118421 | 158 | 0.635337 |
from pyscreenshot.plugins.backend import CBackend
from pyscreenshot.tempexport import read_func_img
class MacQuartzWrapper(CBackend):
name = "mac_quartz"
childprocess = False
def __init__(self):
pass
def grab(self, bbox=None):
im = read_func_img(self._grab_to_file, bbox)
return im
def _grab_to_file(self, filename, bbox=None, dpi=72):
import Quartz
import LaunchServices
from Cocoa import NSURL
import Quartz.CoreGraphics as CG
import objc
if bbox:
width = bbox[2] - bbox[0]
height = bbox[3] - bbox[1]
region = CG.CGRectMake(bbox[0], bbox[1], width, height)
else:
region = CG.CGRectInfinite
image = CG.CGWindowListCreateImage(
region,
CG.kCGWindowListOptionOnScreenOnly,
CG.kCGNullWindowID,
CG.kCGWindowImageDefault,
)
ices.kUTTypePNG
url = NSURL.fileURLWithPath_(filename)
dest = Quartz.CGImageDestinationCreateWithURL(
url,
file_type,
1,
None,
)
properties = {
Quartz.kCGImagePropertyDPIWidth: dpi,
Quartz.kCGImagePropertyDPIHeight: dpi,
}
Quartz.CGImageDestinationAddImage(dest, image, properties)
Quartz.CGImageDestinationFinalize(dest)
def backend_version(self):
import objc
return objc.__version__
| true | true |
f733f2a1784395f7f9f4f6cf9fedb6cc76d32a56 | 1,568 | py | Python | src/third_party/skia/tools/infra/go.py | rhencke/engine | 1016db292c4e73374a0a11536b18303c9522a224 | [
"BSD-3-Clause"
] | 6,304 | 2015-01-05T23:45:12.000Z | 2022-03-31T09:48:13.000Z | src/third_party/skia/tools/infra/go.py | rhencke/engine | 1016db292c4e73374a0a11536b18303c9522a224 | [
"BSD-3-Clause"
] | 67 | 2016-04-18T13:30:02.000Z | 2022-03-31T23:06:55.000Z | src/third_party/skia/tools/infra/go.py | rhencke/engine | 1016db292c4e73374a0a11536b18303c9522a224 | [
"BSD-3-Clause"
] | 1,231 | 2015-01-05T03:17:39.000Z | 2022-03-31T22:54:58.000Z | #!/usr/bin/env python
#
# Copyright 2019 Google Inc.
#
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import subprocess
import sys
INFRA_GO = 'go.skia.org/infra'
WHICH = 'where' if sys.platform == 'win32' else 'which'
def check():
'''Verify that golang is properly installed. If not, exit with an error.'''
def _fail(msg):
print >> sys.stderr, msg
sys.exit(1)
try:
go_exe = subprocess.check_output([WHICH, 'go'])
except (subprocess.CalledProcessError, OSError):
go_exe = None
if not go_exe:
_fail('Unable to find Golang installation; see '
'https://golang.org/doc/install')
if not os.environ.get('GOPATH'):
_fail('GOPATH environment variable is not set; is Golang properly '
'installed?')
go_bin = os.path.join(os.environ['GOPATH'], 'bin')
for entry in os.environ.get('PATH', '').split(os.pathsep):
if entry == go_bin:
break
else:
_fail('%s not in PATH; is Golang properly installed?' % go_bin)
def get(pkg):
'''Obtain/update the given package/module via "go get".'''
check()
subprocess.check_call(['go', 'get', '-u', pkg])
def update_infra():
'''Update the local checkout of the Skia infra codebase.'''
get(INFRA_GO + '/...')
def mod_download(*pkgs):
'''Run "go mod download" to obtain the given package(s).'''
check()
subprocess.check_call(['go', 'mod', 'download']+list(pkgs))
def install(pkg):
'''"go install" the given package.'''
check()
subprocess.check_call(['go', 'install', pkg])
| 24.888889 | 77 | 0.655612 |
import os
import subprocess
import sys
INFRA_GO = 'go.skia.org/infra'
WHICH = 'where' if sys.platform == 'win32' else 'which'
def check():
def _fail(msg):
print >> sys.stderr, msg
sys.exit(1)
try:
go_exe = subprocess.check_output([WHICH, 'go'])
except (subprocess.CalledProcessError, OSError):
go_exe = None
if not go_exe:
_fail('Unable to find Golang installation; see '
'https://golang.org/doc/install')
if not os.environ.get('GOPATH'):
_fail('GOPATH environment variable is not set; is Golang properly '
'installed?')
go_bin = os.path.join(os.environ['GOPATH'], 'bin')
for entry in os.environ.get('PATH', '').split(os.pathsep):
if entry == go_bin:
break
else:
_fail('%s not in PATH; is Golang properly installed?' % go_bin)
def get(pkg):
check()
subprocess.check_call(['go', 'get', '-u', pkg])
def update_infra():
get(INFRA_GO + '/...')
def mod_download(*pkgs):
check()
subprocess.check_call(['go', 'mod', 'download']+list(pkgs))
def install(pkg):
check()
subprocess.check_call(['go', 'install', pkg])
| true | true |
f733f2bf14d68ca46e6e18dcb2d86e9062767d53 | 86 | py | Python | config.py | klazich/FSND-01-movie-website | 6a0dd8c74ed96c955b417794f1a4d84bfae9d190 | [
"Unlicense"
] | null | null | null | config.py | klazich/FSND-01-movie-website | 6a0dd8c74ed96c955b417794f1a4d84bfae9d190 | [
"Unlicense"
] | null | null | null | config.py | klazich/FSND-01-movie-website | 6a0dd8c74ed96c955b417794f1a4d84bfae9d190 | [
"Unlicense"
] | null | null | null | OMDB_API_KEY = 'df397f1b'
YOUTUBE_API_KEY = 'AIzaSyBlr3kG98VwGz5D3QufXG2dqXgj6HDnwpQ'
| 28.666667 | 59 | 0.860465 | OMDB_API_KEY = 'df397f1b'
YOUTUBE_API_KEY = 'AIzaSyBlr3kG98VwGz5D3QufXG2dqXgj6HDnwpQ'
| true | true |
f733f301e8dc0dbd2b145c5d08b9b4cc8f6f5df8 | 22,533 | py | Python | utils/tests_fetcher.py | marshmellow77/transformers | 215e0681e4c3f6ade6e219d022a5e640b42fcb76 | [
"Apache-2.0"
] | 8,028 | 2018-11-05T15:19:44.000Z | 2019-07-16T09:14:59.000Z | utils/tests_fetcher.py | marshmellow77/transformers | 215e0681e4c3f6ade6e219d022a5e640b42fcb76 | [
"Apache-2.0"
] | 731 | 2018-11-05T21:35:52.000Z | 2019-07-16T09:51:26.000Z | utils/tests_fetcher.py | marshmellow77/transformers | 215e0681e4c3f6ade6e219d022a5e640b42fcb76 | [
"Apache-2.0"
] | 2,106 | 2018-11-05T15:29:15.000Z | 2019-07-16T08:51:57.000Z | # coding=utf-8
# Copyright 2021 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import collections
import os
import re
from contextlib import contextmanager
from pathlib import Path
from git import Repo
# This script is intended to be run from the root of the repo but you can adapt this constant if you need to.
PATH_TO_TRANFORMERS = "."
@contextmanager
def checkout_commit(repo, commit_id):
"""
Context manager that checks out a commit in the repo.
"""
current_head = repo.head.commit if repo.head.is_detached else repo.head.ref
try:
repo.git.checkout(commit_id)
yield
finally:
repo.git.checkout(current_head)
def clean_code(content):
"""
Remove docstrings, empty line or comments from `content`.
"""
# fmt: off
# Remove docstrings by splitting on triple " then triple ':
splits = content.split('\"\"\"')
content = "".join(splits[::2])
splits = content.split("\'\'\'")
# fmt: on
content = "".join(splits[::2])
# Remove empty lines and comments
lines_to_keep = []
for line in content.split("\n"):
# remove anything that is after a # sign.
line = re.sub("#.*$", "", line)
if len(line) == 0 or line.isspace():
continue
lines_to_keep.append(line)
return "\n".join(lines_to_keep)
def diff_is_docstring_only(repo, branching_point, filename):
"""
Check if the diff is only in docstrings in a filename.
"""
with checkout_commit(repo, branching_point):
with open(filename, "r", encoding="utf-8") as f:
old_content = f.read()
with open(filename, "r", encoding="utf-8") as f:
new_content = f.read()
old_content_clean = clean_code(old_content)
new_content_clean = clean_code(new_content)
return old_content_clean == new_content_clean
def get_modified_python_files(diff_with_last_commit=False):
"""
Return a list of python files that have been modified between:
- the current head and the main branch if `diff_with_last_commit=False` (default)
- the current head and its parent commit otherwise.
"""
repo = Repo(PATH_TO_TRANFORMERS)
if not diff_with_last_commit:
print(f"main is at {repo.refs.main.commit}")
print(f"Current head is at {repo.head.commit}")
branching_commits = repo.merge_base(repo.refs.main, repo.head)
for commit in branching_commits:
print(f"Branching commit: {commit}")
return get_diff(repo, repo.head.commit, branching_commits)
else:
print(f"main is at {repo.head.commit}")
parent_commits = repo.head.commit.parents
for commit in parent_commits:
print(f"Parent commit: {commit}")
return get_diff(repo, repo.head.commit, parent_commits)
def get_diff(repo, base_commit, commits):
"""
Get's the diff between one or several commits and the head of the repository.
"""
print("\n### DIFF ###\n")
code_diff = []
for commit in commits:
for diff_obj in commit.diff(base_commit):
# We always add new python files
if diff_obj.change_type == "A" and diff_obj.b_path.endswith(".py"):
code_diff.append(diff_obj.b_path)
# We check that deleted python files won't break corresponding tests.
elif diff_obj.change_type == "D" and diff_obj.a_path.endswith(".py"):
code_diff.append(diff_obj.a_path)
# Now for modified files
elif diff_obj.change_type in ["M", "R"] and diff_obj.b_path.endswith(".py"):
# In case of renames, we'll look at the tests using both the old and new name.
if diff_obj.a_path != diff_obj.b_path:
code_diff.extend([diff_obj.a_path, diff_obj.b_path])
else:
# Otherwise, we check modifications are in code and not docstrings.
if diff_is_docstring_only(repo, commit, diff_obj.b_path):
print(f"Ignoring diff in {diff_obj.b_path} as it only concerns docstrings or comments.")
else:
code_diff.append(diff_obj.a_path)
return code_diff
def get_module_dependencies(module_fname):
"""
Get the dependencies of a module.
"""
with open(os.path.join(PATH_TO_TRANFORMERS, module_fname), "r", encoding="utf-8") as f:
content = f.read()
module_parts = module_fname.split(os.path.sep)
imported_modules = []
# Let's start with relative imports
relative_imports = re.findall(r"from\s+(\.+\S+)\s+import\s+([^\n]+)\n", content)
relative_imports = [mod for mod, imp in relative_imports if "# tests_ignore" not in imp]
for imp in relative_imports:
level = 0
while imp.startswith("."):
imp = imp[1:]
level += 1
if len(imp) > 0:
dep_parts = module_parts[: len(module_parts) - level] + imp.split(".")
else:
dep_parts = module_parts[: len(module_parts) - level] + ["__init__.py"]
imported_module = os.path.sep.join(dep_parts)
# We ignore the main init import as it's only for the __version__ that it's done
# and it would add everything as a dependency.
if not imported_module.endswith("transformers/__init__.py"):
imported_modules.append(imported_module)
# Let's continue with direct imports
# The import from the transformers module are ignored for the same reason we ignored the
# main init before.
direct_imports = re.findall(r"from\s+transformers\.(\S+)\s+import\s+([^\n]+)\n", content)
direct_imports = [mod for mod, imp in direct_imports if "# tests_ignore" not in imp]
for imp in direct_imports:
import_parts = imp.split(".")
dep_parts = ["src", "transformers"] + import_parts
imported_modules.append(os.path.sep.join(dep_parts))
# Now let's just check that we have proper module files, or append an init for submodules
dependencies = []
for imported_module in imported_modules:
if os.path.isfile(os.path.join(PATH_TO_TRANFORMERS, f"{imported_module}.py")):
dependencies.append(f"{imported_module}.py")
elif os.path.isdir(os.path.join(PATH_TO_TRANFORMERS, imported_module)) and os.path.isfile(
os.path.sep.join([PATH_TO_TRANFORMERS, imported_module, "__init__.py"])
):
dependencies.append(os.path.sep.join([imported_module, "__init__.py"]))
return dependencies
def get_test_dependencies(test_fname):
"""
Get the dependencies of a test file.
"""
with open(os.path.join(PATH_TO_TRANFORMERS, test_fname), "r", encoding="utf-8") as f:
content = f.read()
# Tests only have relative imports for other test files
# TODO Sylvain: handle relative imports cleanly
relative_imports = re.findall(r"from\s+(\.\S+)\s+import\s+([^\n]+)\n", content)
relative_imports = [test for test, imp in relative_imports if "# tests_ignore" not in imp]
# Removes the double trailing '..' for parent imports, and creates an absolute path from the root dir with
# `tests` as a prefix.
parent_imports = [imp.strip(".") for imp in relative_imports if ".." in imp]
parent_imports = [os.path.join("tests", f"{test.replace('.', os.path.sep)}.py") for test in parent_imports]
# Removes the single trailing '.' for current dir imports, and creates an absolute path from the root dir with
# tests/{module_name} as a prefix.
current_dir_imports = [imp.strip(".") for imp in relative_imports if ".." not in imp]
directory = os.path.sep.join(test_fname.split(os.path.sep)[:-1])
current_dir_imports = [
os.path.join(directory, f"{test.replace('.', os.path.sep)}.py") for test in current_dir_imports
]
return [f for f in [*parent_imports, *current_dir_imports] if os.path.isfile(f)]
def create_reverse_dependency_map():
"""
Create the dependency map from module/test filename to the list of modules/tests that depend on it (even
recursively).
"""
modules = [
str(f.relative_to(PATH_TO_TRANFORMERS))
for f in (Path(PATH_TO_TRANFORMERS) / "src/transformers").glob("**/*.py")
]
# We grab all the dependencies of each module.
direct_deps = {m: get_module_dependencies(m) for m in modules}
# We add all the dependencies of each test file
tests = [str(f.relative_to(PATH_TO_TRANFORMERS)) for f in (Path(PATH_TO_TRANFORMERS) / "tests").glob("**/*.py")]
direct_deps.update({t: get_test_dependencies(t) for t in tests})
all_files = modules + tests
# This recurses the dependencies
something_changed = True
while something_changed:
something_changed = False
for m in all_files:
for d in direct_deps[m]:
if d not in direct_deps:
raise ValueError(f"KeyError:{d}. From {m}")
for dep in direct_deps[d]:
if dep not in direct_deps[m]:
direct_deps[m].append(dep)
something_changed = True
# Finally we can build the reverse map.
reverse_map = collections.defaultdict(list)
for m in all_files:
if m.endswith("__init__.py"):
reverse_map[m].extend(direct_deps[m])
for d in direct_deps[m]:
reverse_map[d].append(m)
return reverse_map
# Any module file that has a test name which can't be inferred automatically from its name should go here. A better
# approach is to (re-)name the test file accordingly, and second best to add the correspondence map here.
SPECIAL_MODULE_TO_TEST_MAP = {
"commands/add_new_model_like.py": "utils/test_add_new_model_like.py",
"configuration_utils.py": "test_configuration_common.py",
"convert_graph_to_onnx.py": "onnx/test_onnx.py",
"data/data_collator.py": "trainer/test_data_collator.py",
"deepspeed.py": "deepspeed/",
"feature_extraction_sequence_utils.py": "test_sequence_feature_extraction_common.py",
"feature_extraction_utils.py": "test_feature_extraction_common.py",
"file_utils.py": ["utils/test_file_utils.py", "utils/test_model_output.py"],
"utils/generic.py": ["utils/test_file_utils.py", "utils/test_model_output.py"],
"utils/hub.py": "utils/test_file_utils.py",
"modelcard.py": "utils/test_model_card.py",
"modeling_flax_utils.py": "test_modeling_flax_common.py",
"modeling_tf_utils.py": ["test_modeling_tf_common.py", "utils/test_modeling_tf_core.py"],
"modeling_utils.py": ["test_modeling_common.py", "utils/test_offline.py"],
"models/auto/modeling_auto.py": [
"models/auto/test_modeling_auto.py",
"models/auto/test_modeling_tf_pytorch.py",
"models/bort/test_modeling_bort.py",
"models/dit/test_modeling_dit.py",
],
"models/auto/modeling_flax_auto.py": "models/auto/test_modeling_flax_auto.py",
"models/auto/modeling_tf_auto.py": [
"models/auto/test_modeling_tf_auto.py",
"models/auto/test_modeling_tf_pytorch.py",
"models/bort/test_modeling_tf_bort.py",
],
"models/gpt2/modeling_gpt2.py": [
"models/gpt2/test_modeling_gpt2.py",
"models/megatron_gpt2/test_modeling_megatron_gpt2.py",
],
"optimization.py": "optimization/test_optimization.py",
"optimization_tf.py": "optimization/test_optimization_tf.py",
"pipelines/base.py": "pipelines/test_pipelines_*.py",
"pipelines/text2text_generation.py": [
"pipelines/test_pipelines_text2text_generation.py",
"pipelines/test_pipelines_summarization.py",
"pipelines/test_pipelines_translation.py",
],
"pipelines/zero_shot_classification.py": "pipelines/test_pipelines_zero_shot.py",
"testing_utils.py": "utils/test_skip_decorators.py",
"tokenization_utils.py": ["test_tokenization_common.py", "tokenization/test_tokenization_utils.py"],
"tokenization_utils_base.py": ["test_tokenization_common.py", "tokenization/test_tokenization_utils.py"],
"tokenization_utils_fast.py": [
"test_tokenization_common.py",
"tokenization/test_tokenization_utils.py",
"tokenization/test_tokenization_fast.py",
],
"trainer.py": [
"trainer/test_trainer.py",
"extended/test_trainer_ext.py",
"trainer/test_trainer_distributed.py",
"trainer/test_trainer_tpu.py",
],
"train_pt_utils.py": "trainer/test_trainer_utils.py",
"utils/versions.py": "utils/test_versions_utils.py",
}
def module_to_test_file(module_fname):
"""
Returns the name of the file(s) where `module_fname` is tested.
"""
splits = module_fname.split(os.path.sep)
# Special map has priority
short_name = os.path.sep.join(splits[2:])
if short_name in SPECIAL_MODULE_TO_TEST_MAP:
test_file = SPECIAL_MODULE_TO_TEST_MAP[short_name]
if isinstance(test_file, str):
return f"tests/{test_file}"
return [f"tests/{f}" for f in test_file]
module_name = splits[-1]
# Fast tokenizers are tested in the same file as the slow ones.
if module_name.endswith("_fast.py"):
module_name = module_name.replace("_fast.py", ".py")
# Special case for pipelines submodules
if len(splits) >= 2 and splits[-2] == "pipelines":
default_test_file = f"tests/pipelines/test_pipelines_{module_name}"
# Special case for benchmarks submodules
elif len(splits) >= 2 and splits[-2] == "benchmark":
return ["tests/benchmark/test_benchmark.py", "tests/benchmark/test_benchmark_tf.py"]
# Special case for commands submodules
elif len(splits) >= 2 and splits[-2] == "commands":
return "tests/utils/test_cli.py"
# Special case for onnx submodules
elif len(splits) >= 2 and splits[-2] == "onnx":
return ["tests/onnx/test_onnx.py", "tests/onnx/test_onnx_v2.py"]
# Special case for utils (not the one in src/transformers, the ones at the root of the repo).
elif len(splits) > 0 and splits[0] == "utils":
default_test_file = f"tests/utils/test_utils_{module_name}"
elif len(splits) > 4 and splits[2] == "models":
default_test_file = f"tests/models/{splits[3]}/test_{module_name}"
elif len(splits) > 2 and splits[2].startswith("generation"):
default_test_file = f"tests/generation/test_{module_name}"
elif len(splits) > 2 and splits[2].startswith("trainer"):
default_test_file = f"tests/trainer/test_{module_name}"
else:
default_test_file = f"tests/utils/test_{module_name}"
if os.path.isfile(default_test_file):
return default_test_file
# Processing -> processor
if "processing" in default_test_file:
test_file = default_test_file.replace("processing", "processor")
if os.path.isfile(test_file):
return test_file
# This list contains the list of test files we expect never to be launched from a change in a module/util. Those are
# launched separately.
EXPECTED_TEST_FILES_NEVER_TOUCHED = [
"tests/utils/test_doc_samples.py", # Doc tests
"tests/pipelines/test_pipelines_common.py", # Actually checked by the pipeline based file
"tests/sagemaker/test_single_node_gpu.py", # SageMaker test
"tests/sagemaker/test_multi_node_model_parallel.py", # SageMaker test
"tests/sagemaker/test_multi_node_data_parallel.py", # SageMaker test
]
def _print_list(l):
return "\n".join([f"- {f}" for f in l])
def sanity_check():
"""
Checks that all test files can be touched by a modification in at least one module/utils. This test ensures that
newly-added test files are properly mapped to some module or utils, so they can be run by the CI.
"""
# Grab all module and utils
all_files = [
str(p.relative_to(PATH_TO_TRANFORMERS))
for p in (Path(PATH_TO_TRANFORMERS) / "src/transformers").glob("**/*.py")
]
all_files += [
str(p.relative_to(PATH_TO_TRANFORMERS)) for p in (Path(PATH_TO_TRANFORMERS) / "utils").glob("**/*.py")
]
# Compute all the test files we get from those.
test_files_found = []
for f in all_files:
test_f = module_to_test_file(f)
if test_f is not None:
if isinstance(test_f, str):
test_files_found.append(test_f)
else:
test_files_found.extend(test_f)
# Some of the test files might actually be subfolders so we grab the tests inside.
test_files = []
for test_f in test_files_found:
if os.path.isdir(os.path.join(PATH_TO_TRANFORMERS, test_f)):
test_files.extend(
[
str(p.relative_to(PATH_TO_TRANFORMERS))
for p in (Path(PATH_TO_TRANFORMERS) / test_f).glob("**/test*.py")
]
)
else:
test_files.append(test_f)
# Compare to existing test files
existing_test_files = [
str(p.relative_to(PATH_TO_TRANFORMERS)) for p in (Path(PATH_TO_TRANFORMERS) / "tests").glob("**/test*.py")
]
not_touched_test_files = [f for f in existing_test_files if f not in test_files]
should_be_tested = set(not_touched_test_files) - set(EXPECTED_TEST_FILES_NEVER_TOUCHED)
if len(should_be_tested) > 0:
raise ValueError(
"The following test files are not currently associated with any module or utils files, which means they "
f"will never get run by the CI:\n{_print_list(should_be_tested)}\n. Make sure the names of these test "
"files match the name of the module or utils they are testing, or adapt the constant "
"`SPECIAL_MODULE_TO_TEST_MAP` in `utils/tests_fetcher.py` to add them. If your test file is triggered "
"separately and is not supposed to be run by the regular CI, add it to the "
"`EXPECTED_TEST_FILES_NEVER_TOUCHED` constant instead."
)
def infer_tests_to_run(output_file, diff_with_last_commit=False, filters=None):
modified_files = get_modified_python_files(diff_with_last_commit=diff_with_last_commit)
print(f"\n### MODIFIED FILES ###\n{_print_list(modified_files)}")
# Create the map that will give us all impacted modules.
impacted_modules_map = create_reverse_dependency_map()
impacted_files = modified_files.copy()
for f in modified_files:
if f in impacted_modules_map:
impacted_files.extend(impacted_modules_map[f])
# Remove duplicates
impacted_files = sorted(list(set(impacted_files)))
print(f"\n### IMPACTED FILES ###\n{_print_list(impacted_files)}")
# Grab the corresponding test files:
if "setup.py" in impacted_files:
test_files_to_run = ["tests"]
else:
# Grab the corresponding test files:
test_files_to_run = []
for f in impacted_files:
# Modified test files are always added
if f.startswith("tests/"):
test_files_to_run.append(f)
# Example files are tested separately
elif f.startswith("examples/pytorch"):
test_files_to_run.append("examples/pytorch/test_pytorch_examples.py")
test_files_to_run.append("examples/pytorch/test_accelerate_examples.py")
elif f.startswith("examples/flax"):
test_files_to_run.append("examples/flax/test_flax_examples.py")
else:
new_tests = module_to_test_file(f)
if new_tests is not None:
if isinstance(new_tests, str):
test_files_to_run.append(new_tests)
else:
test_files_to_run.extend(new_tests)
# Remove duplicates
test_files_to_run = sorted(list(set(test_files_to_run)))
# Make sure we did not end up with a test file that was removed
test_files_to_run = [f for f in test_files_to_run if os.path.isfile(f) or os.path.isdir(f)]
if filters is not None:
filtered_files = []
for filter in filters:
filtered_files.extend([f for f in test_files_to_run if f.startswith(filter)])
test_files_to_run = filtered_files
print(f"\n### TEST TO RUN ###\n{_print_list(test_files_to_run)}")
if len(test_files_to_run) > 0:
with open(output_file, "w", encoding="utf-8") as f:
f.write(" ".join(test_files_to_run))
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--sanity_check", action="store_true", help="Only test that all tests and modules are accounted for."
)
parser.add_argument(
"--output_file", type=str, default="test_list.txt", help="Where to store the list of tests to run"
)
parser.add_argument(
"--diff_with_last_commit",
action="store_true",
help="To fetch the tests between the current commit and the last commit",
)
parser.add_argument(
"--filters",
type=str,
nargs="*",
default=["tests"],
help="Only keep the test files matching one of those filters.",
)
args = parser.parse_args()
if args.sanity_check:
sanity_check()
else:
repo = Repo(PATH_TO_TRANFORMERS)
diff_with_last_commit = args.diff_with_last_commit
if not diff_with_last_commit and not repo.head.is_detached and repo.head.ref == repo.refs.main:
print("main branch detected, fetching tests against last commit.")
diff_with_last_commit = True
try:
infer_tests_to_run(args.output_file, diff_with_last_commit=diff_with_last_commit, filters=args.filters)
except Exception as e:
print(f"\nError when trying to grab the relevant tests: {e}\n\nRunning all tests.")
with open(args.output_file, "w", encoding="utf-8") as f:
if args.filters is None:
f.write("./tests/")
else:
f.write(" ".join(args.filters))
| 41.805195 | 117 | 0.66143 |
import argparse
import collections
import os
import re
from contextlib import contextmanager
from pathlib import Path
from git import Repo
PATH_TO_TRANFORMERS = "."
@contextmanager
def checkout_commit(repo, commit_id):
current_head = repo.head.commit if repo.head.is_detached else repo.head.ref
try:
repo.git.checkout(commit_id)
yield
finally:
repo.git.checkout(current_head)
def clean_code(content):
splits = content.split('\"\"\"')
content = "".join(splits[::2])
splits = content.split("\'\'\'")
content = "".join(splits[::2])
lines_to_keep = []
for line in content.split("\n"):
line = re.sub("#.*$", "", line)
if len(line) == 0 or line.isspace():
continue
lines_to_keep.append(line)
return "\n".join(lines_to_keep)
def diff_is_docstring_only(repo, branching_point, filename):
with checkout_commit(repo, branching_point):
with open(filename, "r", encoding="utf-8") as f:
old_content = f.read()
with open(filename, "r", encoding="utf-8") as f:
new_content = f.read()
old_content_clean = clean_code(old_content)
new_content_clean = clean_code(new_content)
return old_content_clean == new_content_clean
def get_modified_python_files(diff_with_last_commit=False):
repo = Repo(PATH_TO_TRANFORMERS)
if not diff_with_last_commit:
print(f"main is at {repo.refs.main.commit}")
print(f"Current head is at {repo.head.commit}")
branching_commits = repo.merge_base(repo.refs.main, repo.head)
for commit in branching_commits:
print(f"Branching commit: {commit}")
return get_diff(repo, repo.head.commit, branching_commits)
else:
print(f"main is at {repo.head.commit}")
parent_commits = repo.head.commit.parents
for commit in parent_commits:
print(f"Parent commit: {commit}")
return get_diff(repo, repo.head.commit, parent_commits)
def get_diff(repo, base_commit, commits):
print("\n### DIFF ###\n")
code_diff = []
for commit in commits:
for diff_obj in commit.diff(base_commit):
if diff_obj.change_type == "A" and diff_obj.b_path.endswith(".py"):
code_diff.append(diff_obj.b_path)
elif diff_obj.change_type == "D" and diff_obj.a_path.endswith(".py"):
code_diff.append(diff_obj.a_path)
# Now for modified files
elif diff_obj.change_type in ["M", "R"] and diff_obj.b_path.endswith(".py"):
# In case of renames, we'll look at the tests using both the old and new name.
if diff_obj.a_path != diff_obj.b_path:
code_diff.extend([diff_obj.a_path, diff_obj.b_path])
else:
if diff_is_docstring_only(repo, commit, diff_obj.b_path):
print(f"Ignoring diff in {diff_obj.b_path} as it only concerns docstrings or comments.")
else:
code_diff.append(diff_obj.a_path)
return code_diff
def get_module_dependencies(module_fname):
with open(os.path.join(PATH_TO_TRANFORMERS, module_fname), "r", encoding="utf-8") as f:
content = f.read()
module_parts = module_fname.split(os.path.sep)
imported_modules = []
relative_imports = re.findall(r"from\s+(\.+\S+)\s+import\s+([^\n]+)\n", content)
relative_imports = [mod for mod, imp in relative_imports if "# tests_ignore" not in imp]
for imp in relative_imports:
level = 0
while imp.startswith("."):
imp = imp[1:]
level += 1
if len(imp) > 0:
dep_parts = module_parts[: len(module_parts) - level] + imp.split(".")
else:
dep_parts = module_parts[: len(module_parts) - level] + ["__init__.py"]
imported_module = os.path.sep.join(dep_parts)
# We ignore the main init import as it's only for the __version__ that it's done
# and it would add everything as a dependency.
if not imported_module.endswith("transformers/__init__.py"):
imported_modules.append(imported_module)
# Let's continue with direct imports
direct_imports = re.findall(r"from\s+transformers\.(\S+)\s+import\s+([^\n]+)\n", content)
direct_imports = [mod for mod, imp in direct_imports if "# tests_ignore" not in imp]
for imp in direct_imports:
import_parts = imp.split(".")
dep_parts = ["src", "transformers"] + import_parts
imported_modules.append(os.path.sep.join(dep_parts))
dependencies = []
for imported_module in imported_modules:
if os.path.isfile(os.path.join(PATH_TO_TRANFORMERS, f"{imported_module}.py")):
dependencies.append(f"{imported_module}.py")
elif os.path.isdir(os.path.join(PATH_TO_TRANFORMERS, imported_module)) and os.path.isfile(
os.path.sep.join([PATH_TO_TRANFORMERS, imported_module, "__init__.py"])
):
dependencies.append(os.path.sep.join([imported_module, "__init__.py"]))
return dependencies
def get_test_dependencies(test_fname):
with open(os.path.join(PATH_TO_TRANFORMERS, test_fname), "r", encoding="utf-8") as f:
content = f.read()
# Tests only have relative imports for other test files
# TODO Sylvain: handle relative imports cleanly
relative_imports = re.findall(r"from\s+(\.\S+)\s+import\s+([^\n]+)\n", content)
relative_imports = [test for test, imp in relative_imports if "# tests_ignore" not in imp]
# Removes the double trailing '..' for parent imports, and creates an absolute path from the root dir with
# `tests` as a prefix.
parent_imports = [imp.strip(".") for imp in relative_imports if ".." in imp]
parent_imports = [os.path.join("tests", f"{test.replace('.', os.path.sep)}.py") for test in parent_imports]
# Removes the single trailing '.' for current dir imports, and creates an absolute path from the root dir with
# tests/{module_name} as a prefix.
current_dir_imports = [imp.strip(".") for imp in relative_imports if ".." not in imp]
directory = os.path.sep.join(test_fname.split(os.path.sep)[:-1])
current_dir_imports = [
os.path.join(directory, f"{test.replace('.', os.path.sep)}.py") for test in current_dir_imports
]
return [f for f in [*parent_imports, *current_dir_imports] if os.path.isfile(f)]
def create_reverse_dependency_map():
modules = [
str(f.relative_to(PATH_TO_TRANFORMERS))
for f in (Path(PATH_TO_TRANFORMERS) / "src/transformers").glob("**/*.py")
]
# We grab all the dependencies of each module.
direct_deps = {m: get_module_dependencies(m) for m in modules}
# We add all the dependencies of each test file
tests = [str(f.relative_to(PATH_TO_TRANFORMERS)) for f in (Path(PATH_TO_TRANFORMERS) / "tests").glob("**/*.py")]
direct_deps.update({t: get_test_dependencies(t) for t in tests})
all_files = modules + tests
# This recurses the dependencies
something_changed = True
while something_changed:
something_changed = False
for m in all_files:
for d in direct_deps[m]:
if d not in direct_deps:
raise ValueError(f"KeyError:{d}. From {m}")
for dep in direct_deps[d]:
if dep not in direct_deps[m]:
direct_deps[m].append(dep)
something_changed = True
# Finally we can build the reverse map.
reverse_map = collections.defaultdict(list)
for m in all_files:
if m.endswith("__init__.py"):
reverse_map[m].extend(direct_deps[m])
for d in direct_deps[m]:
reverse_map[d].append(m)
return reverse_map
# Any module file that has a test name which can't be inferred automatically from its name should go here. A better
SPECIAL_MODULE_TO_TEST_MAP = {
"commands/add_new_model_like.py": "utils/test_add_new_model_like.py",
"configuration_utils.py": "test_configuration_common.py",
"convert_graph_to_onnx.py": "onnx/test_onnx.py",
"data/data_collator.py": "trainer/test_data_collator.py",
"deepspeed.py": "deepspeed/",
"feature_extraction_sequence_utils.py": "test_sequence_feature_extraction_common.py",
"feature_extraction_utils.py": "test_feature_extraction_common.py",
"file_utils.py": ["utils/test_file_utils.py", "utils/test_model_output.py"],
"utils/generic.py": ["utils/test_file_utils.py", "utils/test_model_output.py"],
"utils/hub.py": "utils/test_file_utils.py",
"modelcard.py": "utils/test_model_card.py",
"modeling_flax_utils.py": "test_modeling_flax_common.py",
"modeling_tf_utils.py": ["test_modeling_tf_common.py", "utils/test_modeling_tf_core.py"],
"modeling_utils.py": ["test_modeling_common.py", "utils/test_offline.py"],
"models/auto/modeling_auto.py": [
"models/auto/test_modeling_auto.py",
"models/auto/test_modeling_tf_pytorch.py",
"models/bort/test_modeling_bort.py",
"models/dit/test_modeling_dit.py",
],
"models/auto/modeling_flax_auto.py": "models/auto/test_modeling_flax_auto.py",
"models/auto/modeling_tf_auto.py": [
"models/auto/test_modeling_tf_auto.py",
"models/auto/test_modeling_tf_pytorch.py",
"models/bort/test_modeling_tf_bort.py",
],
"models/gpt2/modeling_gpt2.py": [
"models/gpt2/test_modeling_gpt2.py",
"models/megatron_gpt2/test_modeling_megatron_gpt2.py",
],
"optimization.py": "optimization/test_optimization.py",
"optimization_tf.py": "optimization/test_optimization_tf.py",
"pipelines/base.py": "pipelines/test_pipelines_*.py",
"pipelines/text2text_generation.py": [
"pipelines/test_pipelines_text2text_generation.py",
"pipelines/test_pipelines_summarization.py",
"pipelines/test_pipelines_translation.py",
],
"pipelines/zero_shot_classification.py": "pipelines/test_pipelines_zero_shot.py",
"testing_utils.py": "utils/test_skip_decorators.py",
"tokenization_utils.py": ["test_tokenization_common.py", "tokenization/test_tokenization_utils.py"],
"tokenization_utils_base.py": ["test_tokenization_common.py", "tokenization/test_tokenization_utils.py"],
"tokenization_utils_fast.py": [
"test_tokenization_common.py",
"tokenization/test_tokenization_utils.py",
"tokenization/test_tokenization_fast.py",
],
"trainer.py": [
"trainer/test_trainer.py",
"extended/test_trainer_ext.py",
"trainer/test_trainer_distributed.py",
"trainer/test_trainer_tpu.py",
],
"train_pt_utils.py": "trainer/test_trainer_utils.py",
"utils/versions.py": "utils/test_versions_utils.py",
}
def module_to_test_file(module_fname):
splits = module_fname.split(os.path.sep)
short_name = os.path.sep.join(splits[2:])
if short_name in SPECIAL_MODULE_TO_TEST_MAP:
test_file = SPECIAL_MODULE_TO_TEST_MAP[short_name]
if isinstance(test_file, str):
return f"tests/{test_file}"
return [f"tests/{f}" for f in test_file]
module_name = splits[-1]
if module_name.endswith("_fast.py"):
module_name = module_name.replace("_fast.py", ".py")
if len(splits) >= 2 and splits[-2] == "pipelines":
default_test_file = f"tests/pipelines/test_pipelines_{module_name}"
elif len(splits) >= 2 and splits[-2] == "benchmark":
return ["tests/benchmark/test_benchmark.py", "tests/benchmark/test_benchmark_tf.py"]
elif len(splits) >= 2 and splits[-2] == "commands":
return "tests/utils/test_cli.py"
elif len(splits) >= 2 and splits[-2] == "onnx":
return ["tests/onnx/test_onnx.py", "tests/onnx/test_onnx_v2.py"]
elif len(splits) > 0 and splits[0] == "utils":
default_test_file = f"tests/utils/test_utils_{module_name}"
elif len(splits) > 4 and splits[2] == "models":
default_test_file = f"tests/models/{splits[3]}/test_{module_name}"
elif len(splits) > 2 and splits[2].startswith("generation"):
default_test_file = f"tests/generation/test_{module_name}"
elif len(splits) > 2 and splits[2].startswith("trainer"):
default_test_file = f"tests/trainer/test_{module_name}"
else:
default_test_file = f"tests/utils/test_{module_name}"
if os.path.isfile(default_test_file):
return default_test_file
if "processing" in default_test_file:
test_file = default_test_file.replace("processing", "processor")
if os.path.isfile(test_file):
return test_file
EXPECTED_TEST_FILES_NEVER_TOUCHED = [
"tests/utils/test_doc_samples.py",
"tests/pipelines/test_pipelines_common.py",
"tests/sagemaker/test_single_node_gpu.py",
"tests/sagemaker/test_multi_node_model_parallel.py",
"tests/sagemaker/test_multi_node_data_parallel.py",
]
def _print_list(l):
return "\n".join([f"- {f}" for f in l])
def sanity_check():
all_files = [
str(p.relative_to(PATH_TO_TRANFORMERS))
for p in (Path(PATH_TO_TRANFORMERS) / "src/transformers").glob("**/*.py")
]
all_files += [
str(p.relative_to(PATH_TO_TRANFORMERS)) for p in (Path(PATH_TO_TRANFORMERS) / "utils").glob("**/*.py")
]
test_files_found = []
for f in all_files:
test_f = module_to_test_file(f)
if test_f is not None:
if isinstance(test_f, str):
test_files_found.append(test_f)
else:
test_files_found.extend(test_f)
test_files = []
for test_f in test_files_found:
if os.path.isdir(os.path.join(PATH_TO_TRANFORMERS, test_f)):
test_files.extend(
[
str(p.relative_to(PATH_TO_TRANFORMERS))
for p in (Path(PATH_TO_TRANFORMERS) / test_f).glob("**/test*.py")
]
)
else:
test_files.append(test_f)
existing_test_files = [
str(p.relative_to(PATH_TO_TRANFORMERS)) for p in (Path(PATH_TO_TRANFORMERS) / "tests").glob("**/test*.py")
]
not_touched_test_files = [f for f in existing_test_files if f not in test_files]
should_be_tested = set(not_touched_test_files) - set(EXPECTED_TEST_FILES_NEVER_TOUCHED)
if len(should_be_tested) > 0:
raise ValueError(
"The following test files are not currently associated with any module or utils files, which means they "
f"will never get run by the CI:\n{_print_list(should_be_tested)}\n. Make sure the names of these test "
"files match the name of the module or utils they are testing, or adapt the constant "
"`SPECIAL_MODULE_TO_TEST_MAP` in `utils/tests_fetcher.py` to add them. If your test file is triggered "
"separately and is not supposed to be run by the regular CI, add it to the "
"`EXPECTED_TEST_FILES_NEVER_TOUCHED` constant instead."
)
def infer_tests_to_run(output_file, diff_with_last_commit=False, filters=None):
modified_files = get_modified_python_files(diff_with_last_commit=diff_with_last_commit)
print(f"\n### MODIFIED FILES ###\n{_print_list(modified_files)}")
impacted_modules_map = create_reverse_dependency_map()
impacted_files = modified_files.copy()
for f in modified_files:
if f in impacted_modules_map:
impacted_files.extend(impacted_modules_map[f])
impacted_files = sorted(list(set(impacted_files)))
print(f"\n### IMPACTED FILES ###\n{_print_list(impacted_files)}")
if "setup.py" in impacted_files:
test_files_to_run = ["tests"]
else:
test_files_to_run = []
for f in impacted_files:
if f.startswith("tests/"):
test_files_to_run.append(f)
elif f.startswith("examples/pytorch"):
test_files_to_run.append("examples/pytorch/test_pytorch_examples.py")
test_files_to_run.append("examples/pytorch/test_accelerate_examples.py")
elif f.startswith("examples/flax"):
test_files_to_run.append("examples/flax/test_flax_examples.py")
else:
new_tests = module_to_test_file(f)
if new_tests is not None:
if isinstance(new_tests, str):
test_files_to_run.append(new_tests)
else:
test_files_to_run.extend(new_tests)
test_files_to_run = sorted(list(set(test_files_to_run)))
test_files_to_run = [f for f in test_files_to_run if os.path.isfile(f) or os.path.isdir(f)]
if filters is not None:
filtered_files = []
for filter in filters:
filtered_files.extend([f for f in test_files_to_run if f.startswith(filter)])
test_files_to_run = filtered_files
print(f"\n### TEST TO RUN ###\n{_print_list(test_files_to_run)}")
if len(test_files_to_run) > 0:
with open(output_file, "w", encoding="utf-8") as f:
f.write(" ".join(test_files_to_run))
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--sanity_check", action="store_true", help="Only test that all tests and modules are accounted for."
)
parser.add_argument(
"--output_file", type=str, default="test_list.txt", help="Where to store the list of tests to run"
)
parser.add_argument(
"--diff_with_last_commit",
action="store_true",
help="To fetch the tests between the current commit and the last commit",
)
parser.add_argument(
"--filters",
type=str,
nargs="*",
default=["tests"],
help="Only keep the test files matching one of those filters.",
)
args = parser.parse_args()
if args.sanity_check:
sanity_check()
else:
repo = Repo(PATH_TO_TRANFORMERS)
diff_with_last_commit = args.diff_with_last_commit
if not diff_with_last_commit and not repo.head.is_detached and repo.head.ref == repo.refs.main:
print("main branch detected, fetching tests against last commit.")
diff_with_last_commit = True
try:
infer_tests_to_run(args.output_file, diff_with_last_commit=diff_with_last_commit, filters=args.filters)
except Exception as e:
print(f"\nError when trying to grab the relevant tests: {e}\n\nRunning all tests.")
with open(args.output_file, "w", encoding="utf-8") as f:
if args.filters is None:
f.write("./tests/")
else:
f.write(" ".join(args.filters))
| true | true |
f733f33c99191ec0f9f1a9d033ef9fe1a55561f0 | 4,472 | py | Python | test/test.py | leovandriel/ramachandran | 1be7fc5268621fc45a7c5c95cedbf483f383b6d7 | [
"MIT"
] | 2 | 2020-03-24T00:03:31.000Z | 2020-07-28T17:34:47.000Z | test/test.py | leonardvandriel/ramachandran | 1be7fc5268621fc45a7c5c95cedbf483f383b6d7 | [
"MIT"
] | null | null | null | test/test.py | leonardvandriel/ramachandran | 1be7fc5268621fc45a7c5c95cedbf483f383b6d7 | [
"MIT"
] | null | null | null | import unittest
import math
import random
from src.util.point import Point
from src.core.translate import Translator
from src.core.parse import Parser
from src.util.amino import Amino
class TestPoint(unittest.TestCase):
def test_add(self):
p = Point(1, 2, 3) + Point(2, 4, 6)
self.assertEqual(p, Point(3, 6, 9))
p = Point(1, 2, 3) + 2
self.assertEqual(p, Point(3, 4, 5))
p = Point(1, 2, 3)
p += Point(2, 4, 6)
self.assertEqual(p, Point(3, 6, 9))
p = Point(1, 2, 3)
p += 2
self.assertEqual(p, Point(3, 4, 5))
def test_sub(self):
p = Point(1, 2, 3) - Point(2, 4, 6)
self.assertEqual(p, Point(-1, -2, -3))
p = Point(1, 2, 3) - 2
self.assertEqual(p, Point(-1, 0, 1))
p = Point(1, 2, 3)
p -= Point(2, 4, 6)
self.assertEqual(p, Point(-1, -2, -3))
p = Point(1, 2, 3)
p -= 2
self.assertEqual(p, Point(-1, 0, 1))
def test_mul(self):
p = Point(1, 2, 3) * Point(2, 4, 6)
self.assertEqual(p, Point(2, 8, 18))
p = Point(1, 2, 3) * 2
self.assertEqual(p, Point(2, 4, 6))
p = Point(1, 2, 3)
p *= Point(2, 4, 6)
self.assertEqual(p, Point(2, 8, 18))
p = Point(1, 2, 3)
p *= 2
self.assertEqual(p, Point(2, 4, 6))
def test_div(self):
p = Point(1, 2, 3) / Point(2, 4, 6)
self.assertEqual(p, Point(0.5, 0.5, 0.5))
p = Point(1, 2, 3) / 2
self.assertEqual(p, Point(0.5, 1, 1.5))
p = Point(1, 2, 3)
p /= Point(2, 4, 6)
self.assertEqual(p, Point(0.5, 0.5, 0.5))
p = Point(1, 2, 3)
p /= 2
self.assertEqual(p, Point(0.5, 1, 1.5))
def test_dot(self):
d = Point(1, 2, 3) | Point(4, 6, 2)
self.assertEqual(d, 22)
def test_cross(self):
p = Point(1, 2, 3) ^ Point(4, 6, 2)
self.assertEqual(p, Point(-14, 10, -2))
p = Point(1, 2, 3)
p ^= Point(4, 6, 2)
self.assertEqual(p, Point(-14, 10, -2))
def test_neg(self):
p = Point(1, 2, 3)
self.assertEqual(-p, Point(-1, -2, -3))
def test_len(self):
p = Point(1, 2, 3)
self.assertEqual(p.lensq(), 14)
self.assertAlmostEqual(p.length(), 3.74165738)
def test_norm(self):
p = Point(3, 4, 0)
self.assertEqual(~p, Point(0.6, 0.8, 0))
p.norm()
self.assertEqual(p, Point(0.6, 0.8, 0))
def test_rotate(self):
p = Point(1, 2, 3)
self.assertEqual(p.rotated(Point(1, 0, 0), 0), Point(1, 2, 3))
self.assertEqual(
p.rotated(Point(1, 0, 0), math.pi / 2).rounded(5), Point(1, -3, 2))
self.assertEqual(
p.rotated(Point(1, 0, 0), -math.pi / 2).rounded(5), Point(
1, 3, -2))
class TestTranslator(unittest.TestCase):
def test_forward_backward(self):
points = []
for i in range(0, 10):
points.append(
Point(random.random() * 20 - 10,
random.random() * 20 - 10,
random.random() * 20 - 10))
trans = Translator()
polars = trans.forward(points)
trans = Translator()
original = trans.backward(polars)
for i in range(0, 10):
self.assertEqual(points[i].rounded(5), original[i].rounded(5))
class TestParser(unittest.TestCase):
def test_read_file(self):
aminos = Parser().read_file('test/test.pdb')
self.assertEqual(len(aminos), 3)
self.assertEqual(aminos[0].type, 'PRO')
self.assertEqual(aminos[0].N, Point(-69.116000, 7.943000, -16.525000))
self.assertEqual(aminos[0].CA, Point(-70.302000, 8.654000, -17.017000))
self.assertEqual(aminos[0].C, Point(-71.449000, 7.711000, -17.377000))
class TestAmino(unittest.TestCase):
def test_lookup_code(self):
self.assertEqual(Amino.lookup_code(None), None)
self.assertEqual(Amino.lookup_code(''), None)
self.assertEqual(Amino.lookup_code('A'), 'ALA')
self.assertEqual(Amino.lookup_code('a'), 'ALA')
self.assertEqual(Amino.lookup_code('B'), None)
self.assertEqual(Amino.lookup_code('Alanine'), 'ALA')
self.assertEqual(Amino.lookup_code('aLanine'), 'ALA')
self.assertEqual(Amino.lookup_code('ala'), 'ALA')
self.assertEqual(Amino.lookup_code('ALA'), 'ALA')
if __name__ == '__main__':
unittest.main()
| 32.882353 | 79 | 0.538462 | import unittest
import math
import random
from src.util.point import Point
from src.core.translate import Translator
from src.core.parse import Parser
from src.util.amino import Amino
class TestPoint(unittest.TestCase):
def test_add(self):
p = Point(1, 2, 3) + Point(2, 4, 6)
self.assertEqual(p, Point(3, 6, 9))
p = Point(1, 2, 3) + 2
self.assertEqual(p, Point(3, 4, 5))
p = Point(1, 2, 3)
p += Point(2, 4, 6)
self.assertEqual(p, Point(3, 6, 9))
p = Point(1, 2, 3)
p += 2
self.assertEqual(p, Point(3, 4, 5))
def test_sub(self):
p = Point(1, 2, 3) - Point(2, 4, 6)
self.assertEqual(p, Point(-1, -2, -3))
p = Point(1, 2, 3) - 2
self.assertEqual(p, Point(-1, 0, 1))
p = Point(1, 2, 3)
p -= Point(2, 4, 6)
self.assertEqual(p, Point(-1, -2, -3))
p = Point(1, 2, 3)
p -= 2
self.assertEqual(p, Point(-1, 0, 1))
def test_mul(self):
p = Point(1, 2, 3) * Point(2, 4, 6)
self.assertEqual(p, Point(2, 8, 18))
p = Point(1, 2, 3) * 2
self.assertEqual(p, Point(2, 4, 6))
p = Point(1, 2, 3)
p *= Point(2, 4, 6)
self.assertEqual(p, Point(2, 8, 18))
p = Point(1, 2, 3)
p *= 2
self.assertEqual(p, Point(2, 4, 6))
def test_div(self):
p = Point(1, 2, 3) / Point(2, 4, 6)
self.assertEqual(p, Point(0.5, 0.5, 0.5))
p = Point(1, 2, 3) / 2
self.assertEqual(p, Point(0.5, 1, 1.5))
p = Point(1, 2, 3)
p /= Point(2, 4, 6)
self.assertEqual(p, Point(0.5, 0.5, 0.5))
p = Point(1, 2, 3)
p /= 2
self.assertEqual(p, Point(0.5, 1, 1.5))
def test_dot(self):
d = Point(1, 2, 3) | Point(4, 6, 2)
self.assertEqual(d, 22)
def test_cross(self):
p = Point(1, 2, 3) ^ Point(4, 6, 2)
self.assertEqual(p, Point(-14, 10, -2))
p = Point(1, 2, 3)
p ^= Point(4, 6, 2)
self.assertEqual(p, Point(-14, 10, -2))
def test_neg(self):
p = Point(1, 2, 3)
self.assertEqual(-p, Point(-1, -2, -3))
def test_len(self):
p = Point(1, 2, 3)
self.assertEqual(p.lensq(), 14)
self.assertAlmostEqual(p.length(), 3.74165738)
def test_norm(self):
p = Point(3, 4, 0)
self.assertEqual(~p, Point(0.6, 0.8, 0))
p.norm()
self.assertEqual(p, Point(0.6, 0.8, 0))
def test_rotate(self):
p = Point(1, 2, 3)
self.assertEqual(p.rotated(Point(1, 0, 0), 0), Point(1, 2, 3))
self.assertEqual(
p.rotated(Point(1, 0, 0), math.pi / 2).rounded(5), Point(1, -3, 2))
self.assertEqual(
p.rotated(Point(1, 0, 0), -math.pi / 2).rounded(5), Point(
1, 3, -2))
class TestTranslator(unittest.TestCase):
def test_forward_backward(self):
points = []
for i in range(0, 10):
points.append(
Point(random.random() * 20 - 10,
random.random() * 20 - 10,
random.random() * 20 - 10))
trans = Translator()
polars = trans.forward(points)
trans = Translator()
original = trans.backward(polars)
for i in range(0, 10):
self.assertEqual(points[i].rounded(5), original[i].rounded(5))
class TestParser(unittest.TestCase):
def test_read_file(self):
aminos = Parser().read_file('test/test.pdb')
self.assertEqual(len(aminos), 3)
self.assertEqual(aminos[0].type, 'PRO')
self.assertEqual(aminos[0].N, Point(-69.116000, 7.943000, -16.525000))
self.assertEqual(aminos[0].CA, Point(-70.302000, 8.654000, -17.017000))
self.assertEqual(aminos[0].C, Point(-71.449000, 7.711000, -17.377000))
class TestAmino(unittest.TestCase):
def test_lookup_code(self):
self.assertEqual(Amino.lookup_code(None), None)
self.assertEqual(Amino.lookup_code(''), None)
self.assertEqual(Amino.lookup_code('A'), 'ALA')
self.assertEqual(Amino.lookup_code('a'), 'ALA')
self.assertEqual(Amino.lookup_code('B'), None)
self.assertEqual(Amino.lookup_code('Alanine'), 'ALA')
self.assertEqual(Amino.lookup_code('aLanine'), 'ALA')
self.assertEqual(Amino.lookup_code('ala'), 'ALA')
self.assertEqual(Amino.lookup_code('ALA'), 'ALA')
if __name__ == '__main__':
unittest.main()
| true | true |
f733f3ffc26b3265e56173b7c18db7ac2cc062c7 | 17,926 | py | Python | brawlcord/utils/club.py | LenCoding/brawlcord | f17b01a49546321917cb5e2eb87ce6ff5a469c66 | [
"MIT"
] | 6 | 2020-11-27T15:47:31.000Z | 2022-01-22T11:28:54.000Z | brawlcord/utils/club.py | LenCoding/brawlcord | f17b01a49546321917cb5e2eb87ce6ff5a469c66 | [
"MIT"
] | 1 | 2020-11-30T10:10:01.000Z | 2020-11-30T10:10:01.000Z | brawlcord/utils/club.py | LenCoding/brawlcord | f17b01a49546321917cb5e2eb87ce6ff5a469c66 | [
"MIT"
] | 3 | 2020-11-27T15:00:38.000Z | 2021-12-16T14:26:21.000Z | import random
import string
from typing import Callable, List, Optional
import discord
from redbot.core import Config
from redbot.core.commands import Context
from redbot.core.bot import Red
from redbot.core.utils.menus import start_adding_reactions
from redbot.core.utils.predicates import MessagePredicate, ReactionPredicate
# from redbot.core.utils.chat_formatting import text_to_file
from .constants import EMBED_COLOR
from .emojis import emojis
from .errors import CancellationError
# Credits to Star List
club_thumb = "https://www.starlist.pro/assets/club/{}.png"
class Club:
"""Represents a Brawlcord club."""
def __init__(self, data: dict):
self.id: str = data["id"]
self.name: str = data["name"]
self.description: str = data["description"]
self.required_trophies: int = data["required_trophies"]
self.location: str = data["location"]
self.icon_num: int = data["icon_num"]
self.ctype: str = data["ctype"]
self.president: discord.User = data["president"]
self.vice_presidents: List[discord.User] = data.get("vice_presidents", [])
self.seniors: List[discord.User] = data.get("seniors", [])
self.members: List[discord.User] = data.get("members", [])
self.all_members = [self.president] + self.vice_presidents + self.seniors + self.members
@classmethod
async def create_club(cls, config: Config, ctx: Context):
"""Interactive club creation process.
This function creates the club, adds it to both user and global database
and returns the club object. It also adjusts the `club_id_length` if required.
All errors must be handled in the caller function.
"""
async def get_input(timeout=30):
pred = await ctx.bot.wait_for(
"message", check=MessagePredicate.same_context(ctx), timeout=timeout
)
if pred.content.strip().lower() == "cancel":
raise CancellationError
return pred.content.strip()
data = {}
await ctx.send(
":tada: Let's create your club! First, what name do you want the club to have?"
" Note that it cannot be changed later!"
)
data["name"] = await get_input()
await ctx.send(
"Set the name! Now, what do you want to set as the server description?"
)
data["description"] = await get_input(60)
await ctx.send(
"Set the description! What should be the required trophies?"
" Enter a number. (without commas)"
)
data["required_trophies"] = int(await get_input())
await ctx.send(
"Set required trophies! Select a icon for the club!"
" Enter the number corresponding to icon of choice."
)
data["icon_num"] = int(await get_input(60))
await ctx.send(
"Set club icon! Now, enter a location for your club!"
)
data["location"] = await get_input()
await ctx.send(
"Set the location. Lastly, what kind of club do you want to create?"
" Enter one of `open`, `closed`, or `invite`."
)
club_type = await get_input()
if club_type.strip().lower() not in ["open", "closed", "invite"]:
# We raise `NameError` instead of `ValueError` to keep
# it separate from the above `int` conversions.
raise NameError
else:
data["ctype"] = club_type
data["president"] = ctx.author
await ctx.send(
f"All set! Club created! :tada:")
default_length = await config.club_id_length()
async with config.clubs() as clubs:
# First we get all club IDs we've used so far to get an ID for our new club.
ids = [c["id"] for c in clubs]
data["id"], new_length = cls.get_club_id(ids, default_length)
club = cls(data)
clubs.append(club.to_json())
await config.user(ctx.author).club.set(club.id)
if default_length != new_length:
await config.club_id_length.set(new_length)
return club
def to_json(self) -> dict:
"""Returns a dictionary represeting the `Club` object."""
return {
"id": self.id,
"name": self.name,
"description": self.description,
"required_trophies": self.required_trophies,
"location": self.location,
"icon_num": self.icon_num,
"ctype": self.ctype,
"president_id": self.president.id,
"vice_president_ids": [vp.id for vp in self.vice_presidents],
"senior_ids": [s.id for s in self.seniors],
"member_ids": [m.id for m in self.members]
}
@classmethod
async def from_json(cls, data: dict, bot: Red):
"""Return a `Club` object from dictionary representation of the club."""
data["president"] = await cls.get_user(data["president_id"], bot)
vice_presidents = []
for vp_id in data["vice_president_ids"]:
vp = await cls.get_user(vp_id, bot)
if vp is not None:
vice_presidents.append(vp)
data["vice_presidents"] = vice_presidents
seniors = []
for s_id in data["senior_ids"]:
sen = await cls.get_user(s_id, bot)
if sen is not None:
seniors.append(sen)
data["seniors"] = seniors
members = []
for m_id in data["member_ids"]:
mem = await cls.get_user(m_id, bot)
if mem is not None:
members.append(mem)
data["members"] = members
data.pop("president_id")
data.pop("vice_president_ids")
data.pop("senior_ids")
data.pop("member_ids")
return cls(data)
@staticmethod
async def get_user(user_id: int, bot: Red) -> Optional[discord.User]:
"""Returns `discord.User` object from the given ID.
Returns `None` if user can't be found.
"""
user = bot.get_user(user_id)
if user is None:
try:
user = await bot.fetch_user(user_id)
except Exception:
pass
return user
@staticmethod
async def show_club(
data: dict, bot: Red, config: Config, get_league: Callable
) -> (discord.Embed, discord.File):
"""Returns a tuple of length two.
First element is a formatted `discord.Embed` object to display the club.
Second is a `discord.File` with data about all club members. It is `None`
is club has less than or equal to 10 members.
"""
if isinstance(data, Club):
club = data
else:
club: Club = await Club.from_json(data, bot)
embeds = []
pages = await club.members_list(config, get_league)
total_pages = len(pages)
total_trophies = await club.total_trophies(config)
if club.icon_num not in range(1, 31):
icon_url = "https://www.starlist.pro/assets/icon/Club.png"
else:
icon_url = club_thumb.format(club.icon_num - 1)
for idx, page in enumerate(pages):
# if not page.strip():
# continue
embed = discord.Embed(color=EMBED_COLOR, description=club.description)
# Star List's club indexing starts a 0, ours at 1.
# It goes all the way up till 29.
embed.set_author(name=club.name, icon_url=icon_url)
embed.set_footer(text=f"Club ID: {club.id} | Page {idx+1}/{total_pages}")
embed.add_field(
name="Total Trophies",
value=f"{emojis['trophies']} {total_trophies:,}"
)
embed.add_field(name="President", value=club.president.name)
embed.add_field(
name="Required Trophies", value=f"{emojis['trophies']} {club.required_trophies:,}"
)
embed.add_field(name="Total Members", value=f"{len(club.all_members)}/100")
embed.add_field(name="Type", value=club.ctype.title())
embed.add_field(name="Location", value=club.location)
embed.add_field(name="\u200b\n", value=page.strip(), inline=False)
embeds.append(embed)
# if whole:
# club_file = text_to_file(whole, "club_data.txt")
# else:
# club_file = None
return embeds
async def total_trophies(self, config: Config) -> int:
"""Returns total club trophies."""
total = 0
for member in self.all_members:
try:
brawlers = await config.user(member).brawlers()
total += self.get_user_trophies(brawlers)
except Exception:
continue
return total
@staticmethod
def get_user_trophies(brawlers: dict) -> int:
"""Returns total trophies of the user."""
return sum([brawlers[brawler]["trophies"] for brawler in brawlers])
async def members_list(self, config: Config, get_league: Callable) -> (str, str):
"""Returns a tuple of two strings.
First string is for top ten club members (in terms of trophies).
Second is for all. If the club has less than or equal to 10 members,
the second string is empty.
"""
mapping = {}
for member in self.all_members:
try:
brawlers = await config.user(member).brawlers()
mapping[member] = self.get_user_trophies(brawlers)
except Exception:
pass
# Sort mapping to get users with most trophies at the top.
mapping = {k: v for k, v in sorted(mapping.items(), key=lambda x: x[1], reverse=True)}
# total_num = len(mapping)
first_ten_txt = ""
second_ten_txt = ""
third_ten_txt = ""
fourth_ten_txt = ""
fifth_ten_txt = ""
# whole_txt = ""
for idx, user in enumerate(mapping):
pos = "Member"
if user.id == self.president.id:
pos = "**President**"
elif user.id in [vp.id for vp in self.vice_presidents]:
pos = "**Vice President**"
elif user.id in [s.id for s in self.seniors]:
pos = "**Senior**"
_, emoji = await get_league(mapping[user])
txt = f"\n`{(idx+1):02d}.` {user} {emoji}{mapping[user]} ({pos})"
if idx in range(0, 10):
first_ten_txt += txt
if idx in range(10, 20):
second_ten_txt += txt
if idx in range(20, 30):
third_ten_txt += txt
if idx in range(30, 40):
fourth_ten_txt += txt
if idx in range(40, 50):
fifth_ten_txt += txt
pages = [
page for page in
[first_ten_txt, second_ten_txt, third_ten_txt, fourth_ten_txt, fifth_ten_txt]
if page.strip()
]
return pages
@staticmethod
def get_club_id(used_ids: list, default_length: int) -> (str, int):
"""Returns a unique id for the club and the default length we should use."""
def gen_id(length=default_length):
id = "".join(
[random.choice(string.ascii_uppercase + string.digits) for _ in range(length)]
)
if id not in used_ids:
return id
else:
return False
id = gen_id()
if id is False:
# If id is not unique, try generating id of default length 3 more times.
# Increase length by one if still not unique.
for _ in range(3):
id = gen_id()
if id is False:
continue
else:
return id
default_length += 1
id = gen_id(default_length)
return id, default_length
@classmethod
async def club_from_id(cls, id: str, config: Config, bot: Red):
"""Returns `Club` instance representing club with given id.
Returns `None` if club with given id doesn't exist.
"""
clubs = await config.clubs()
for club in clubs:
if club["id"] == id:
return await cls.from_json(club, bot)
async def remove_user(self, user: discord.User, config: Config):
"""Removes user from club lists."""
def choose_new_pres(pool: list):
try:
new_pres = random.choice(pool)
# Remove it from pool.
pool.remove(new_pres)
# Set it as new president.
self.president = new_pres
return True
except IndexError:
return False
if user in self.all_members:
self.all_members.remove(user)
if user.id == self.president.id:
if not choose_new_pres(self.vice_presidents):
if not choose_new_pres(self.seniors):
if not choose_new_pres(self.members):
# Empty club, remove it from database.
async with config.clubs() as clubs:
where = next(i for i, d in enumerate(clubs) if d.get('id') == self.id)
del clubs[where]
return True
else:
if user in self.vice_presidents:
self.vice_presidents.remove(user)
elif user in self.seniors:
self.seniors.remove(user)
elif user in self.members:
self.members.remove(user)
await self.update_club(config)
async def add_user(self, user: discord.User, config: Config):
"""Adds users to the club list."""
if self.ctype in ["closed", "invite"]:
raise ValueError("Club type is `closed` or `invite-only`.")
self.members.append(user)
await self.update_club(config)
async def promote_user(self, user: discord.User, ctx: Context, config: Config):
"""Promotes a user.
Raises ValueError if not allowed.
"""
if user.id == self.president.id:
raise ValueError(f"{user.name} is the club President!")
if ctx.author.id == self.president.id:
if user in self.vice_presidents:
msg = await ctx.send(
f"Promoting {user.name} will demote you and make them the President."
" Are you sure you want to continue?"
)
start_adding_reactions(msg, ReactionPredicate.YES_OR_NO_EMOJIS)
pred = ReactionPredicate.yes_or_no(msg, ctx.author)
await ctx.bot.wait_for("reaction_add", check=pred)
if pred.result is True:
self.president = user
self.vice_presidents.remove(user)
self.vice_presidents.append(ctx.author)
await ctx.send(f"Promoted {user.name} to President!")
else:
return await ctx.send("Cancelled promotion.")
elif user in self.seniors:
self.seniors.remove(user)
self.vice_presidents.append(user)
await ctx.send(f"Promoted {user.name} to Vice President!")
elif user in self.members:
self.members.remove(user)
self.seniors.append(user)
await ctx.send(f"Promoted {user.name} to Senior!")
if ctx.author in self.vice_presidents:
if user in self.vice_presidents:
raise ValueError(f"{user.name} is equal to you in hierarchy!")
elif user in self.seniors:
raise ValueError(f"Only club President can promote a Senior to Vice President")
elif user in self.members:
self.members.remove(user)
self.seniors.append(user)
await ctx.send(f"Promoted {user.name} to Senior!")
await self.update_club(config)
async def demote_user(self, user: discord.User, ctx: Context, config: Config):
"""Demotes a user.
Raises ValueError if not allowed.
"""
if user.id == self.president.id:
raise ValueError(f"{user.name} is the club President!")
if ctx.author.id == self.president.id:
if user in self.vice_presidents:
self.vice_presidents.remove(user)
await ctx.send(f"Demoted {user.name} to Senior!")
elif user in self.seniors:
self.seniors.remove(user)
await ctx.send(f"Demoted {user.name} to Member!")
elif user in self.members:
raise ValueError(
f"{user.name} is already a Member."
" Use `club kick` command to kick member out of the club."
)
if ctx.author in self.vice_presidents:
if user in self.vice_presidents:
raise ValueError(f"{user.name} is equal to you in hierarchy!")
elif user in self.seniors:
self.seniors.remove(user)
await ctx.send(f"Demoted {user.name} to Member!")
elif user in self.members:
raise ValueError(
f"{user.name} is already a Member."
" Use `club kick` command to kick member out of the club."
)
await self.update_club(config)
async def update_club(self, config: Config):
"""Updates club in the bot database."""
async with config.clubs() as clubs:
where = next(i for i, d in enumerate(clubs) if d.get('id') == self.id)
clubs[where] = self.to_json()
return True
| 35.852 | 98 | 0.563093 | import random
import string
from typing import Callable, List, Optional
import discord
from redbot.core import Config
from redbot.core.commands import Context
from redbot.core.bot import Red
from redbot.core.utils.menus import start_adding_reactions
from redbot.core.utils.predicates import MessagePredicate, ReactionPredicate
from .constants import EMBED_COLOR
from .emojis import emojis
from .errors import CancellationError
club_thumb = "https://www.starlist.pro/assets/club/{}.png"
class Club:
def __init__(self, data: dict):
self.id: str = data["id"]
self.name: str = data["name"]
self.description: str = data["description"]
self.required_trophies: int = data["required_trophies"]
self.location: str = data["location"]
self.icon_num: int = data["icon_num"]
self.ctype: str = data["ctype"]
self.president: discord.User = data["president"]
self.vice_presidents: List[discord.User] = data.get("vice_presidents", [])
self.seniors: List[discord.User] = data.get("seniors", [])
self.members: List[discord.User] = data.get("members", [])
self.all_members = [self.president] + self.vice_presidents + self.seniors + self.members
@classmethod
async def create_club(cls, config: Config, ctx: Context):
async def get_input(timeout=30):
pred = await ctx.bot.wait_for(
"message", check=MessagePredicate.same_context(ctx), timeout=timeout
)
if pred.content.strip().lower() == "cancel":
raise CancellationError
return pred.content.strip()
data = {}
await ctx.send(
":tada: Let's create your club! First, what name do you want the club to have?"
" Note that it cannot be changed later!"
)
data["name"] = await get_input()
await ctx.send(
"Set the name! Now, what do you want to set as the server description?"
)
data["description"] = await get_input(60)
await ctx.send(
"Set the description! What should be the required trophies?"
" Enter a number. (without commas)"
)
data["required_trophies"] = int(await get_input())
await ctx.send(
"Set required trophies! Select a icon for the club!"
" Enter the number corresponding to icon of choice."
)
data["icon_num"] = int(await get_input(60))
await ctx.send(
"Set club icon! Now, enter a location for your club!"
)
data["location"] = await get_input()
await ctx.send(
"Set the location. Lastly, what kind of club do you want to create?"
" Enter one of `open`, `closed`, or `invite`."
)
club_type = await get_input()
if club_type.strip().lower() not in ["open", "closed", "invite"]:
# We raise `NameError` instead of `ValueError` to keep
# it separate from the above `int` conversions.
raise NameError
else:
data["ctype"] = club_type
data["president"] = ctx.author
await ctx.send(
f"All set! Club created! :tada:")
default_length = await config.club_id_length()
async with config.clubs() as clubs:
# First we get all club IDs we've used so far to get an ID for our new club.
ids = [c["id"] for c in clubs]
data["id"], new_length = cls.get_club_id(ids, default_length)
club = cls(data)
clubs.append(club.to_json())
await config.user(ctx.author).club.set(club.id)
if default_length != new_length:
await config.club_id_length.set(new_length)
return club
def to_json(self) -> dict:
return {
"id": self.id,
"name": self.name,
"description": self.description,
"required_trophies": self.required_trophies,
"location": self.location,
"icon_num": self.icon_num,
"ctype": self.ctype,
"president_id": self.president.id,
"vice_president_ids": [vp.id for vp in self.vice_presidents],
"senior_ids": [s.id for s in self.seniors],
"member_ids": [m.id for m in self.members]
}
@classmethod
async def from_json(cls, data: dict, bot: Red):
data["president"] = await cls.get_user(data["president_id"], bot)
vice_presidents = []
for vp_id in data["vice_president_ids"]:
vp = await cls.get_user(vp_id, bot)
if vp is not None:
vice_presidents.append(vp)
data["vice_presidents"] = vice_presidents
seniors = []
for s_id in data["senior_ids"]:
sen = await cls.get_user(s_id, bot)
if sen is not None:
seniors.append(sen)
data["seniors"] = seniors
members = []
for m_id in data["member_ids"]:
mem = await cls.get_user(m_id, bot)
if mem is not None:
members.append(mem)
data["members"] = members
data.pop("president_id")
data.pop("vice_president_ids")
data.pop("senior_ids")
data.pop("member_ids")
return cls(data)
@staticmethod
async def get_user(user_id: int, bot: Red) -> Optional[discord.User]:
user = bot.get_user(user_id)
if user is None:
try:
user = await bot.fetch_user(user_id)
except Exception:
pass
return user
@staticmethod
async def show_club(
data: dict, bot: Red, config: Config, get_league: Callable
) -> (discord.Embed, discord.File):
if isinstance(data, Club):
club = data
else:
club: Club = await Club.from_json(data, bot)
embeds = []
pages = await club.members_list(config, get_league)
total_pages = len(pages)
total_trophies = await club.total_trophies(config)
if club.icon_num not in range(1, 31):
icon_url = "https://www.starlist.pro/assets/icon/Club.png"
else:
icon_url = club_thumb.format(club.icon_num - 1)
for idx, page in enumerate(pages):
embed = discord.Embed(color=EMBED_COLOR, description=club.description)
# It goes all the way up till 29.
embed.set_author(name=club.name, icon_url=icon_url)
embed.set_footer(text=f"Club ID: {club.id} | Page {idx+1}/{total_pages}")
embed.add_field(
name="Total Trophies",
value=f"{emojis['trophies']} {total_trophies:,}"
)
embed.add_field(name="President", value=club.president.name)
embed.add_field(
name="Required Trophies", value=f"{emojis['trophies']} {club.required_trophies:,}"
)
embed.add_field(name="Total Members", value=f"{len(club.all_members)}/100")
embed.add_field(name="Type", value=club.ctype.title())
embed.add_field(name="Location", value=club.location)
embed.add_field(name="\u200b\n", value=page.strip(), inline=False)
embeds.append(embed)
# if whole:
# club_file = text_to_file(whole, "club_data.txt")
# else:
# club_file = None
return embeds
async def total_trophies(self, config: Config) -> int:
total = 0
for member in self.all_members:
try:
brawlers = await config.user(member).brawlers()
total += self.get_user_trophies(brawlers)
except Exception:
continue
return total
@staticmethod
def get_user_trophies(brawlers: dict) -> int:
return sum([brawlers[brawler]["trophies"] for brawler in brawlers])
async def members_list(self, config: Config, get_league: Callable) -> (str, str):
mapping = {}
for member in self.all_members:
try:
brawlers = await config.user(member).brawlers()
mapping[member] = self.get_user_trophies(brawlers)
except Exception:
pass
# Sort mapping to get users with most trophies at the top.
mapping = {k: v for k, v in sorted(mapping.items(), key=lambda x: x[1], reverse=True)}
# total_num = len(mapping)
first_ten_txt = ""
second_ten_txt = ""
third_ten_txt = ""
fourth_ten_txt = ""
fifth_ten_txt = ""
# whole_txt = ""
for idx, user in enumerate(mapping):
pos = "Member"
if user.id == self.president.id:
pos = "**President**"
elif user.id in [vp.id for vp in self.vice_presidents]:
pos = "**Vice President**"
elif user.id in [s.id for s in self.seniors]:
pos = "**Senior**"
_, emoji = await get_league(mapping[user])
txt = f"\n`{(idx+1):02d}.` {user} {emoji}{mapping[user]} ({pos})"
if idx in range(0, 10):
first_ten_txt += txt
if idx in range(10, 20):
second_ten_txt += txt
if idx in range(20, 30):
third_ten_txt += txt
if idx in range(30, 40):
fourth_ten_txt += txt
if idx in range(40, 50):
fifth_ten_txt += txt
pages = [
page for page in
[first_ten_txt, second_ten_txt, third_ten_txt, fourth_ten_txt, fifth_ten_txt]
if page.strip()
]
return pages
@staticmethod
def get_club_id(used_ids: list, default_length: int) -> (str, int):
def gen_id(length=default_length):
id = "".join(
[random.choice(string.ascii_uppercase + string.digits) for _ in range(length)]
)
if id not in used_ids:
return id
else:
return False
id = gen_id()
if id is False:
# If id is not unique, try generating id of default length 3 more times.
# Increase length by one if still not unique.
for _ in range(3):
id = gen_id()
if id is False:
continue
else:
return id
default_length += 1
id = gen_id(default_length)
return id, default_length
@classmethod
async def club_from_id(cls, id: str, config: Config, bot: Red):
clubs = await config.clubs()
for club in clubs:
if club["id"] == id:
return await cls.from_json(club, bot)
async def remove_user(self, user: discord.User, config: Config):
def choose_new_pres(pool: list):
try:
new_pres = random.choice(pool)
# Remove it from pool.
pool.remove(new_pres)
# Set it as new president.
self.president = new_pres
return True
except IndexError:
return False
if user in self.all_members:
self.all_members.remove(user)
if user.id == self.president.id:
if not choose_new_pres(self.vice_presidents):
if not choose_new_pres(self.seniors):
if not choose_new_pres(self.members):
# Empty club, remove it from database.
async with config.clubs() as clubs:
where = next(i for i, d in enumerate(clubs) if d.get('id') == self.id)
del clubs[where]
return True
else:
if user in self.vice_presidents:
self.vice_presidents.remove(user)
elif user in self.seniors:
self.seniors.remove(user)
elif user in self.members:
self.members.remove(user)
await self.update_club(config)
async def add_user(self, user: discord.User, config: Config):
if self.ctype in ["closed", "invite"]:
raise ValueError("Club type is `closed` or `invite-only`.")
self.members.append(user)
await self.update_club(config)
async def promote_user(self, user: discord.User, ctx: Context, config: Config):
if user.id == self.president.id:
raise ValueError(f"{user.name} is the club President!")
if ctx.author.id == self.president.id:
if user in self.vice_presidents:
msg = await ctx.send(
f"Promoting {user.name} will demote you and make them the President."
" Are you sure you want to continue?"
)
start_adding_reactions(msg, ReactionPredicate.YES_OR_NO_EMOJIS)
pred = ReactionPredicate.yes_or_no(msg, ctx.author)
await ctx.bot.wait_for("reaction_add", check=pred)
if pred.result is True:
self.president = user
self.vice_presidents.remove(user)
self.vice_presidents.append(ctx.author)
await ctx.send(f"Promoted {user.name} to President!")
else:
return await ctx.send("Cancelled promotion.")
elif user in self.seniors:
self.seniors.remove(user)
self.vice_presidents.append(user)
await ctx.send(f"Promoted {user.name} to Vice President!")
elif user in self.members:
self.members.remove(user)
self.seniors.append(user)
await ctx.send(f"Promoted {user.name} to Senior!")
if ctx.author in self.vice_presidents:
if user in self.vice_presidents:
raise ValueError(f"{user.name} is equal to you in hierarchy!")
elif user in self.seniors:
raise ValueError(f"Only club President can promote a Senior to Vice President")
elif user in self.members:
self.members.remove(user)
self.seniors.append(user)
await ctx.send(f"Promoted {user.name} to Senior!")
await self.update_club(config)
async def demote_user(self, user: discord.User, ctx: Context, config: Config):
if user.id == self.president.id:
raise ValueError(f"{user.name} is the club President!")
if ctx.author.id == self.president.id:
if user in self.vice_presidents:
self.vice_presidents.remove(user)
await ctx.send(f"Demoted {user.name} to Senior!")
elif user in self.seniors:
self.seniors.remove(user)
await ctx.send(f"Demoted {user.name} to Member!")
elif user in self.members:
raise ValueError(
f"{user.name} is already a Member."
" Use `club kick` command to kick member out of the club."
)
if ctx.author in self.vice_presidents:
if user in self.vice_presidents:
raise ValueError(f"{user.name} is equal to you in hierarchy!")
elif user in self.seniors:
self.seniors.remove(user)
await ctx.send(f"Demoted {user.name} to Member!")
elif user in self.members:
raise ValueError(
f"{user.name} is already a Member."
" Use `club kick` command to kick member out of the club."
)
await self.update_club(config)
async def update_club(self, config: Config):
async with config.clubs() as clubs:
where = next(i for i, d in enumerate(clubs) if d.get('id') == self.id)
clubs[where] = self.to_json()
return True
| true | true |
f733f4092a476f843a89ad62d76a322b2386d5f2 | 3,586 | py | Python | gui/qt/qrcodewidget.py | namuyan/electrum-fjc | 1a5c4a582f0fcdbaeca2b721ee729f43cd7915a2 | [
"MIT"
] | 1 | 2017-07-06T03:03:25.000Z | 2017-07-06T03:03:25.000Z | gui/qt/qrcodewidget.py | namuyan/electrum-fjc | 1a5c4a582f0fcdbaeca2b721ee729f43cd7915a2 | [
"MIT"
] | null | null | null | gui/qt/qrcodewidget.py | namuyan/electrum-fjc | 1a5c4a582f0fcdbaeca2b721ee729f43cd7915a2 | [
"MIT"
] | null | null | null | from PyQt4.QtGui import *
from PyQt4.QtCore import *
import PyQt4.QtGui as QtGui
import os
import qrcode
import electrum_ltc
from electrum_ltc.i18n import _
from util import WindowModalDialog
class QRCodeWidget(QWidget):
def __init__(self, data = None, fixedSize=False):
QWidget.__init__(self)
self.data = None
self.qr = None
self.fixedSize=fixedSize
if fixedSize:
self.setFixedSize(fixedSize, fixedSize)
self.setData(data)
def setData(self, data):
if self.data != data:
self.data = data
if self.data:
self.qr = qrcode.QRCode()
self.qr.add_data(self.data)
if not self.fixedSize:
k = len(self.qr.get_matrix())
self.setMinimumSize(k*5,k*5)
else:
self.qr = None
self.update()
def paintEvent(self, e):
if not self.data:
return
black = QColor(0, 0, 0, 255)
white = QColor(255, 255, 255, 255)
if not self.qr:
qp = QtGui.QPainter()
qp.begin(self)
qp.setBrush(white)
qp.setPen(white)
r = qp.viewport()
qp.drawRect(0, 0, r.width(), r.height())
qp.end()
return
matrix = self.qr.get_matrix()
k = len(matrix)
qp = QtGui.QPainter()
qp.begin(self)
r = qp.viewport()
margin = 10
framesize = min(r.width(), r.height())
boxsize = int( (framesize - 2*margin)/k )
size = k*boxsize
left = (r.width() - size)/2
top = (r.height() - size)/2
# Make a white margin around the QR in case of dark theme use
qp.setBrush(white)
qp.setPen(white)
qp.drawRect(left-margin, top-margin, size+(margin*2), size+(margin*2))
qp.setBrush(black)
qp.setPen(black)
for r in range(k):
for c in range(k):
if matrix[r][c]:
qp.drawRect(left+c*boxsize, top+r*boxsize, boxsize - 1, boxsize - 1)
qp.end()
class QRDialog(WindowModalDialog):
def __init__(self, data, parent=None, title = "", show_text=False):
WindowModalDialog.__init__(self, parent, title)
vbox = QVBoxLayout()
qrw = QRCodeWidget(data)
vbox.addWidget(qrw, 1)
if show_text:
text = QTextEdit()
text.setText(data)
text.setReadOnly(True)
vbox.addWidget(text)
hbox = QHBoxLayout()
hbox.addStretch(1)
config = electrum_ltc.get_config()
if config:
filename = os.path.join(config.path, "qrcode.png")
def print_qr():
p = QPixmap.grabWindow(qrw.winId())
p.save(filename, 'png')
self.show_message(_("QR code saved to file") + " " + filename)
def copy_to_clipboard():
p = QPixmap.grabWindow(qrw.winId())
p.save(filename, 'png')
QApplication.clipboard().setImage(QImage(filename))
self.show_message(_("QR code copied to clipboard"))
b = QPushButton(_("Copy"))
hbox.addWidget(b)
b.clicked.connect(copy_to_clipboard)
b = QPushButton(_("Save"))
hbox.addWidget(b)
b.clicked.connect(print_qr)
b = QPushButton(_("Close"))
hbox.addWidget(b)
b.clicked.connect(self.accept)
b.setDefault(True)
vbox.addLayout(hbox)
self.setLayout(vbox)
| 27.374046 | 88 | 0.53932 | from PyQt4.QtGui import *
from PyQt4.QtCore import *
import PyQt4.QtGui as QtGui
import os
import qrcode
import electrum_ltc
from electrum_ltc.i18n import _
from util import WindowModalDialog
class QRCodeWidget(QWidget):
def __init__(self, data = None, fixedSize=False):
QWidget.__init__(self)
self.data = None
self.qr = None
self.fixedSize=fixedSize
if fixedSize:
self.setFixedSize(fixedSize, fixedSize)
self.setData(data)
def setData(self, data):
if self.data != data:
self.data = data
if self.data:
self.qr = qrcode.QRCode()
self.qr.add_data(self.data)
if not self.fixedSize:
k = len(self.qr.get_matrix())
self.setMinimumSize(k*5,k*5)
else:
self.qr = None
self.update()
def paintEvent(self, e):
if not self.data:
return
black = QColor(0, 0, 0, 255)
white = QColor(255, 255, 255, 255)
if not self.qr:
qp = QtGui.QPainter()
qp.begin(self)
qp.setBrush(white)
qp.setPen(white)
r = qp.viewport()
qp.drawRect(0, 0, r.width(), r.height())
qp.end()
return
matrix = self.qr.get_matrix()
k = len(matrix)
qp = QtGui.QPainter()
qp.begin(self)
r = qp.viewport()
margin = 10
framesize = min(r.width(), r.height())
boxsize = int( (framesize - 2*margin)/k )
size = k*boxsize
left = (r.width() - size)/2
top = (r.height() - size)/2
qp.setBrush(white)
qp.setPen(white)
qp.drawRect(left-margin, top-margin, size+(margin*2), size+(margin*2))
qp.setBrush(black)
qp.setPen(black)
for r in range(k):
for c in range(k):
if matrix[r][c]:
qp.drawRect(left+c*boxsize, top+r*boxsize, boxsize - 1, boxsize - 1)
qp.end()
class QRDialog(WindowModalDialog):
def __init__(self, data, parent=None, title = "", show_text=False):
WindowModalDialog.__init__(self, parent, title)
vbox = QVBoxLayout()
qrw = QRCodeWidget(data)
vbox.addWidget(qrw, 1)
if show_text:
text = QTextEdit()
text.setText(data)
text.setReadOnly(True)
vbox.addWidget(text)
hbox = QHBoxLayout()
hbox.addStretch(1)
config = electrum_ltc.get_config()
if config:
filename = os.path.join(config.path, "qrcode.png")
def print_qr():
p = QPixmap.grabWindow(qrw.winId())
p.save(filename, 'png')
self.show_message(_("QR code saved to file") + " " + filename)
def copy_to_clipboard():
p = QPixmap.grabWindow(qrw.winId())
p.save(filename, 'png')
QApplication.clipboard().setImage(QImage(filename))
self.show_message(_("QR code copied to clipboard"))
b = QPushButton(_("Copy"))
hbox.addWidget(b)
b.clicked.connect(copy_to_clipboard)
b = QPushButton(_("Save"))
hbox.addWidget(b)
b.clicked.connect(print_qr)
b = QPushButton(_("Close"))
hbox.addWidget(b)
b.clicked.connect(self.accept)
b.setDefault(True)
vbox.addLayout(hbox)
self.setLayout(vbox)
| true | true |
f733f4bccad2803286b6d1cfa79643e6b4b2f217 | 505 | py | Python | squeezenet/squeezenet.py | sbmalik/pytorchx | 938ba5855cfb72b0dbce91af8c0a6d0e3943f122 | [
"MIT"
] | 133 | 2020-03-09T03:13:27.000Z | 2022-03-28T12:24:31.000Z | squeezenet/squeezenet.py | sbmalik/pytorchx | 938ba5855cfb72b0dbce91af8c0a6d0e3943f122 | [
"MIT"
] | 6 | 2020-06-17T08:17:17.000Z | 2022-01-06T05:40:06.000Z | squeezenet/squeezenet.py | sbmalik/pytorchx | 938ba5855cfb72b0dbce91af8c0a6d0e3943f122 | [
"MIT"
] | 54 | 2020-02-27T09:04:01.000Z | 2022-03-23T08:18:24.000Z | import torch
from torch import nn
from torch.nn import functional as F
import torchvision
def main():
print('cuda device count: ', torch.cuda.device_count())
net = torchvision.models.squeezenet1_1(pretrained=True)
#net.fc = nn.Linear(512, 2)
net = net.eval()
net = net.to('cuda:0')
print(net)
tmp = torch.ones(2, 3, 227, 227).to('cuda:0')
out = net(tmp)
print('squeezenet out:', out.shape)
torch.save(net, "squeezenet.pth")
if __name__ == '__main__':
main()
| 24.047619 | 59 | 0.647525 | import torch
from torch import nn
from torch.nn import functional as F
import torchvision
def main():
print('cuda device count: ', torch.cuda.device_count())
net = torchvision.models.squeezenet1_1(pretrained=True)
net = net.eval()
net = net.to('cuda:0')
print(net)
tmp = torch.ones(2, 3, 227, 227).to('cuda:0')
out = net(tmp)
print('squeezenet out:', out.shape)
torch.save(net, "squeezenet.pth")
if __name__ == '__main__':
main()
| true | true |
f733f543c9317810d0f46d56dde0668916ac6e64 | 3,643 | py | Python | tests/util/test_replace_phi.py | bbieniek/deidentify | 7021bf0540e0a7f931e65544d12a2909c79a14eb | [
"MIT"
] | 64 | 2020-01-16T16:20:47.000Z | 2022-03-31T12:59:19.000Z | tests/util/test_replace_phi.py | HabibMrad/deidentify | d8960a74c852a71b29a6ee0fd6a3cf7f946a5f60 | [
"MIT"
] | 14 | 2020-01-28T08:47:06.000Z | 2022-02-12T08:32:12.000Z | tests/util/test_replace_phi.py | HabibMrad/deidentify | d8960a74c852a71b29a6ee0fd6a3cf7f946a5f60 | [
"MIT"
] | 12 | 2020-01-21T07:54:04.000Z | 2022-02-19T06:42:53.000Z | import re
import pytest
from deidentify.base import Annotation, Document
from deidentify.util import mask_annotations, surrogate_annotations
def test_mask_annotations():
text = "De patient J. Jansen (e: j.jnsen@email.com, t: 06-12345678)"
annotations = [
Annotation(text='J. Jansen', start=11, end=20, tag='Name', doc_id='', ann_id='T0'),
Annotation(text='j.jnsen@email.com', start=25, end=42, tag='Email', doc_id='', ann_id='T1'),
Annotation(text='06-12345678', start=47, end=58, tag='Phone_fax', doc_id='', ann_id='T2')
]
doc = Document(name='test_doc', text=text, annotations=annotations)
doc = mask_annotations(doc)
assert doc.text == "De patient [NAME] (e: [EMAIL], t: [PHONE_FAX])"
assert doc.annotations == [
Annotation(text='[NAME]', start=11, end=17, tag='Name', doc_id='', ann_id='T0'),
Annotation(text='[EMAIL]', start=22, end=29, tag='Email', doc_id='', ann_id='T1'),
Annotation(text='[PHONE_FAX]', start=34, end=45, tag='Phone_fax', doc_id='', ann_id='T2')
]
def test_surrogate_annotations():
text = "De patient J. Jansen (e: j.jnsen@email.com, t: 06-12345678)"
annotations = [
Annotation(text='J. Jansen', start=11, end=20, tag='Name', doc_id='', ann_id='T0'),
Annotation(text='j.jnsen@email.com', start=25, end=42, tag='Email', doc_id='', ann_id='T1'),
Annotation(text='06-12345678', start=47, end=58, tag='Phone_fax', doc_id='', ann_id='T2')
]
doc = Document(name='test_doc', text=text, annotations=annotations)
surrogate_doc = list(surrogate_annotations([doc]))[0]
assert len(surrogate_doc.annotations) == len(doc.annotations)
assert re.match(r'De patient .* \(e: .*, t: .*\)', doc.text)
assert not surrogate_doc.annotations_without_surrogates
for ann in surrogate_doc.annotations:
assert surrogate_doc.text[ann.start:ann.end] == ann.text
def test_surrogate_annotations_errors_raise():
doc = Document(
name='test_doc',
text='This document was written on INVALID_DATE.',
annotations=[
Annotation(text='INVALID_DATE', start=29, end=41, tag='Date', doc_id='', ann_id='T0')
]
)
with pytest.raises(ValueError, match=r'No valid surrogate for Annotation\(.*INVALID_DATE.*\)'):
_ = list(surrogate_annotations([doc]))[0]
def test_surrogate_annotations_errors_ignore():
original_doc = Document(
name='test_doc',
text='This document was written on INVALID_DATE.',
annotations=[
Annotation(text='INVALID_DATE', start=29, end=41, tag='Date', doc_id='', ann_id='T0')
]
)
gen = surrogate_annotations([original_doc], errors='ignore')
surrogate_doc = list(gen)[0]
assert surrogate_doc.text == original_doc.text
assert surrogate_doc.annotations == original_doc.annotations
assert surrogate_doc.annotations_without_surrogates == original_doc.annotations
def test_surrogate_annotations_errors_coerce():
original_doc = Document(
name='test_doc',
text='This document was written on INVALID_DATE.',
annotations=[
Annotation(text='INVALID_DATE', start=29, end=41, tag='Date', doc_id='', ann_id='T0')
]
)
gen = surrogate_annotations([original_doc], errors='coerce')
surrogate_doc = list(gen)[0]
assert surrogate_doc.text == 'This document was written on [Date].'
assert surrogate_doc.annotations == [
Annotation(text='[Date]', start=29, end=35, tag='Date', doc_id='', ann_id='T0')
]
assert surrogate_doc.annotations_without_surrogates == original_doc.annotations
| 39.597826 | 100 | 0.660719 | import re
import pytest
from deidentify.base import Annotation, Document
from deidentify.util import mask_annotations, surrogate_annotations
def test_mask_annotations():
text = "De patient J. Jansen (e: j.jnsen@email.com, t: 06-12345678)"
annotations = [
Annotation(text='J. Jansen', start=11, end=20, tag='Name', doc_id='', ann_id='T0'),
Annotation(text='j.jnsen@email.com', start=25, end=42, tag='Email', doc_id='', ann_id='T1'),
Annotation(text='06-12345678', start=47, end=58, tag='Phone_fax', doc_id='', ann_id='T2')
]
doc = Document(name='test_doc', text=text, annotations=annotations)
doc = mask_annotations(doc)
assert doc.text == "De patient [NAME] (e: [EMAIL], t: [PHONE_FAX])"
assert doc.annotations == [
Annotation(text='[NAME]', start=11, end=17, tag='Name', doc_id='', ann_id='T0'),
Annotation(text='[EMAIL]', start=22, end=29, tag='Email', doc_id='', ann_id='T1'),
Annotation(text='[PHONE_FAX]', start=34, end=45, tag='Phone_fax', doc_id='', ann_id='T2')
]
def test_surrogate_annotations():
text = "De patient J. Jansen (e: j.jnsen@email.com, t: 06-12345678)"
annotations = [
Annotation(text='J. Jansen', start=11, end=20, tag='Name', doc_id='', ann_id='T0'),
Annotation(text='j.jnsen@email.com', start=25, end=42, tag='Email', doc_id='', ann_id='T1'),
Annotation(text='06-12345678', start=47, end=58, tag='Phone_fax', doc_id='', ann_id='T2')
]
doc = Document(name='test_doc', text=text, annotations=annotations)
surrogate_doc = list(surrogate_annotations([doc]))[0]
assert len(surrogate_doc.annotations) == len(doc.annotations)
assert re.match(r'De patient .* \(e: .*, t: .*\)', doc.text)
assert not surrogate_doc.annotations_without_surrogates
for ann in surrogate_doc.annotations:
assert surrogate_doc.text[ann.start:ann.end] == ann.text
def test_surrogate_annotations_errors_raise():
doc = Document(
name='test_doc',
text='This document was written on INVALID_DATE.',
annotations=[
Annotation(text='INVALID_DATE', start=29, end=41, tag='Date', doc_id='', ann_id='T0')
]
)
with pytest.raises(ValueError, match=r'No valid surrogate for Annotation\(.*INVALID_DATE.*\)'):
_ = list(surrogate_annotations([doc]))[0]
def test_surrogate_annotations_errors_ignore():
original_doc = Document(
name='test_doc',
text='This document was written on INVALID_DATE.',
annotations=[
Annotation(text='INVALID_DATE', start=29, end=41, tag='Date', doc_id='', ann_id='T0')
]
)
gen = surrogate_annotations([original_doc], errors='ignore')
surrogate_doc = list(gen)[0]
assert surrogate_doc.text == original_doc.text
assert surrogate_doc.annotations == original_doc.annotations
assert surrogate_doc.annotations_without_surrogates == original_doc.annotations
def test_surrogate_annotations_errors_coerce():
original_doc = Document(
name='test_doc',
text='This document was written on INVALID_DATE.',
annotations=[
Annotation(text='INVALID_DATE', start=29, end=41, tag='Date', doc_id='', ann_id='T0')
]
)
gen = surrogate_annotations([original_doc], errors='coerce')
surrogate_doc = list(gen)[0]
assert surrogate_doc.text == 'This document was written on [Date].'
assert surrogate_doc.annotations == [
Annotation(text='[Date]', start=29, end=35, tag='Date', doc_id='', ann_id='T0')
]
assert surrogate_doc.annotations_without_surrogates == original_doc.annotations
| true | true |
f733f59c0942b1f69c38aa28dc8969d66b50fa36 | 2,324 | py | Python | money_transfer/money_transfer/doctype/transfer_from_vault/transfer_from_vault.py | staumoepeau/money_transfer | c66a2852b4851c25b41ae214f6cfb39823487add | [
"MIT"
] | null | null | null | money_transfer/money_transfer/doctype/transfer_from_vault/transfer_from_vault.py | staumoepeau/money_transfer | c66a2852b4851c25b41ae214f6cfb39823487add | [
"MIT"
] | 1 | 2016-11-03T01:06:46.000Z | 2016-11-03T01:06:46.000Z | money_transfer/money_transfer/doctype/transfer_from_vault/transfer_from_vault.py | staumoepeau/money_transfer | c66a2852b4851c25b41ae214f6cfb39823487add | [
"MIT"
] | 3 | 2017-05-11T06:51:56.000Z | 2020-12-14T21:38:33.000Z | # -*- coding: utf-8 -*-
# Copyright (c) 2015, Caitlah Technology and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe.model.document import Document
from erpnext.controllers.accounts_controller import AccountsController
class TransferfromVault(Document):
def validate(self):
if not self.title:
self.title = self.doctype
def on_submit(self):
self.make_trxn_entries_out()
self.make_trxn_entries_in()
self.make_gl_entries()
def make_trxn_entries_out(self):
userid = frappe.get_doc("Agents", self.transfer_from_vault)
doc = frappe.new_doc("Transactions Details")
doc.update({
"user_id": userid.agent_user,
"posting_date": self.transfer_date,
"currency": userid.agents_currency,
"description": self.doctype,
"outflow": self.transfer_amount,
"mctn": self.name
})
doc.insert()
doc.submit()
def make_trxn_entries_in(self):
userid = frappe.get_doc("Agents", self.transfer_to_agent)
doc = frappe.new_doc("Transactions Details")
doc.update({
"user_id": userid.agent_user,
"posting_date": self.transfer_date,
"currency": userid.agents_currency,
"description": self.doctype,
"inflow": self.transfer_amount,
"mctn": self.name
})
doc.insert()
doc.submit()
def make_gl_entries(self, cancel=0, adv_adj=0):
from erpnext.accounts.general_ledger import make_gl_entries
gl_map = []
gl_map.append(
frappe._dict({
"posting_date": self.transfer_date,
"transaction_date": self.transfer_date,
"account": "Cash in Till - T&T",
"credit": self.transfer_amount,
"remarks": "Transfer from Vault to Teller",
"voucher_type": self.doctype,
"voucher_no": self.name,
"against": "Cash in Vault - T&T"
}))
gl_map.append(
frappe._dict({
"posting_date": self.transfer_date,
"transaction_date": self.transfer_date,
"account": "Cash in Vault - T&T",
"debit": self.transfer_amount,
"remarks": "Transfer from Vault to Teller",
"voucher_type": self.doctype,
"voucher_no": self.name,
"against": "Cash in Till - T&T"
}))
if gl_map:
make_gl_entries(gl_map, cancel=cancel, adv_adj=adv_adj) | 30.578947 | 70 | 0.670396 |
from __future__ import unicode_literals
import frappe
from frappe.model.document import Document
from erpnext.controllers.accounts_controller import AccountsController
class TransferfromVault(Document):
def validate(self):
if not self.title:
self.title = self.doctype
def on_submit(self):
self.make_trxn_entries_out()
self.make_trxn_entries_in()
self.make_gl_entries()
def make_trxn_entries_out(self):
userid = frappe.get_doc("Agents", self.transfer_from_vault)
doc = frappe.new_doc("Transactions Details")
doc.update({
"user_id": userid.agent_user,
"posting_date": self.transfer_date,
"currency": userid.agents_currency,
"description": self.doctype,
"outflow": self.transfer_amount,
"mctn": self.name
})
doc.insert()
doc.submit()
def make_trxn_entries_in(self):
userid = frappe.get_doc("Agents", self.transfer_to_agent)
doc = frappe.new_doc("Transactions Details")
doc.update({
"user_id": userid.agent_user,
"posting_date": self.transfer_date,
"currency": userid.agents_currency,
"description": self.doctype,
"inflow": self.transfer_amount,
"mctn": self.name
})
doc.insert()
doc.submit()
def make_gl_entries(self, cancel=0, adv_adj=0):
from erpnext.accounts.general_ledger import make_gl_entries
gl_map = []
gl_map.append(
frappe._dict({
"posting_date": self.transfer_date,
"transaction_date": self.transfer_date,
"account": "Cash in Till - T&T",
"credit": self.transfer_amount,
"remarks": "Transfer from Vault to Teller",
"voucher_type": self.doctype,
"voucher_no": self.name,
"against": "Cash in Vault - T&T"
}))
gl_map.append(
frappe._dict({
"posting_date": self.transfer_date,
"transaction_date": self.transfer_date,
"account": "Cash in Vault - T&T",
"debit": self.transfer_amount,
"remarks": "Transfer from Vault to Teller",
"voucher_type": self.doctype,
"voucher_no": self.name,
"against": "Cash in Till - T&T"
}))
if gl_map:
make_gl_entries(gl_map, cancel=cancel, adv_adj=adv_adj) | true | true |
f733f59f80c320d1a8315825e6c848f0cc1b0218 | 30,408 | py | Python | geniesp/sp_config.py | Sage-Bionetworks/GENIE-Sponsored-Projects | e34be3ece96144aa525c7281738736d3c5ef93cb | [
"MIT"
] | 1 | 2022-03-10T21:45:53.000Z | 2022-03-10T21:45:53.000Z | geniesp/sp_config.py | Sage-Bionetworks/GENIE-Sponsored-Projects | e34be3ece96144aa525c7281738736d3c5ef93cb | [
"MIT"
] | 36 | 2020-09-23T18:16:29.000Z | 2022-03-30T20:56:45.000Z | geniesp/sp_config.py | Sage-Bionetworks/GENIE-Sponsored-Projects | e34be3ece96144aa525c7281738736d3c5ef93cb | [
"MIT"
] | null | null | null | """
Sponsored project configuration classes
USAGE:
git clone https://github.com/cBioPortal/cbioportal.git
python runSP.py AKT1 ../cbioportal/ --staging
"""
import os
import random
import string
import pandas as pd
import synapseclient
from . import new_redcap_export_mapping
from . import sp_redcap_export_mapping
class Akt1(sp_redcap_export_mapping.SponsoredProjectRunner):
"""
AKT1 PROCESSES
- ONE TIMELINE FILE
- CLINICAL FILE
OS_MONTHS = death_date_int - mets_disease_date_int
OS_MONTHS_PRIMARY = death_date_int - primary_dx_date_int
All dates are converted from days to months (days/30.4)
Add headers
REMOVE PATIENTS/SAMPLES THAT DON'T HAVE GENIE SAMPLE IDS
"""
_SPONSORED_PROJECT = "AKT1"
_DATES = ["death_date_int","follow_up_date_int","primary_dx_date_int","lrr_date_int","mets_disease_date_int","sample_date_int_1",
"sequence_report_date_int_1","sequence_report_date_int_1_static","sample_date_int_2","sample_date_int_2_static",
"sequence_report_date_int_2","sequence_report_date_int_2_static","sequence_report_date_int_3_static",
"OS_MONTHS","OS_MONTHS_PRIMARY"]
_CASE_LIST_MAF_SAMPLES_TEMPLATE = "cancer_study_identifier: genie_akt1\nstable_id: genie_akt1_sequenced\ncase_list_category: all_cases_with_mutation_data\ncase_list_name: Sequenced Tumors\ncase_list_description: All sequenced samples (%s samples)\ncase_list_ids: %s"
_CASE_LIST_PATH = os.path.join(_SPONSORED_PROJECT,'case_lists')
_UNMAPPED_SYN_ID = "syn11066652"
_MAPPED_SYN_ID = "syn8404878"
_CASE_LIST_SYN_ID = "syn10145838"
_SP_SYN_ID = "syn8363325"
_REDCAP_TO_CBIOMAPPING_SYNID = "syn8220815"
_SP_REDCAP_EXPORTS_SYNID = "syn8404875" #Storage of not found samples
_NUM_SAMPLE_COLS = 3
def addOSMonths(self, sponsoredProject_mapped_df):
#Must add new date fields to the DATE variable along with add to the mapping table: syn8220815
sponsoredProject_mapped_df['OS_MONTHS'] = sponsoredProject_mapped_df['death_date_int'] - sponsoredProject_mapped_df['mets_disease_date_int']
sponsoredProject_mapped_df['OS_MONTHS_PRIMARY'] = sponsoredProject_mapped_df['death_date_int'] - sponsoredProject_mapped_df['primary_dx_date_int']
return(sponsoredProject_mapped_df)
def createTemporaryGenieId(self, x, tempIdMapping):
uniqId = x['record_id'] + x['redcap_data_access_group']
tempIdMap = tempIdMapping['patientId'][tempIdMapping['uniqueId'] == uniqId]
tempId = 'GENIE-%s-%s' % (x['redcap_data_access_group'],''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(10)))
if len(tempIdMap) == 0:
return(tempId)
else:
return(tempIdMap.values[0])
# if sum(tempIdMapping['uniqueId'] == uniqId) == 0:
# #syn.store(synapseclient.Table(syn.get("syn10164044"),[[uniqId, tempId, SPONSORED_PROJECT]]))
# return(tempId)
# elif pd.np.isnan(temp['tempPatientId'][tempIdMapping['uniqueId'] == uniqId].values[0]):
# else:
# return(tempIdMapping[tempIdMapping['uniqueId'] == uniqId]['tempPatientId'].values[0])
def createNullPatients(self, sponsoredProject_mapped_df, tempIdMappingDf):
print("RENAMING %s NULL PATIENTS" % sum(sponsoredProject_mapped_df['genie_patient_id'].isnull()))
#Create temp patient Id
allNullPatients = sponsoredProject_mapped_df[['record_id','redcap_data_access_group','genie_patient_id']][sponsoredProject_mapped_df['genie_patient_id'].isnull()]
temporaryIds = allNullPatients.apply(lambda x: self.createTemporaryGenieId(x, tempIdMappingDf), axis =1)
if sponsoredProject_mapped_df['genie_patient_id'].isnull().any():
sponsoredProject_mapped_df['genie_patient_id'][sponsoredProject_mapped_df['genie_patient_id'].isnull()] = temporaryIds
assert sum(sponsoredProject_mapped_df['genie_patient_id'].isnull()) ==0, "Make sure there are no null genie patient Ids"
sponsoredProject_mapped_df['genie_patient_id'] = sponsoredProject_mapped_df.apply(lambda x: self.checkGenieId(x, 'redcap_data_access_group','genie_patient_id'), axis=1)
sponsoredProject_mapped_df.reset_index(inplace=True,drop=True)
return(sponsoredProject_mapped_df, temporaryIds)
def makeTimeLineDf(self, redCapExportDf, therapyRange = 18):
START_DATE = []
STOP_DATE = []
TREATMENT_TYPE = []
SUBTYPE = []
AGENT = []
THERAPY_DRUG_CLINTRIAL = []
THERAPY_DRUG_AZD5363 = []
THERAPY_DRUG_OTHER = []
THERAPY_DRUG_DISCONTINUE = []
THERAPY_DRUG_REASON = []
THERAPY_COMBO_YN = []
THERAPY_COMBO_NUM = []
#THERAPY NUMBER
for therapyNumber in range(1,therapyRange):
therapyCols = [i for i in redCapExportDf if "therapy%d_" % therapyNumber in i]
START_DATE.extend([i for i in therapyCols if "start_int" in i])
STOP_DATE.extend([i for i in therapyCols if "end_int" in i])
AGENT.extend([i for i in therapyCols if len(i.split("_")) == 2])
THERAPY_DRUG_CLINTRIAL.extend([i for i in therapyCols if "clintrial" in i])
THERAPY_DRUG_AZD5363.extend([i for i in therapyCols if "azd" in i])
THERAPY_DRUG_OTHER.extend([i for i in therapyCols if "other" in i])
THERAPY_DRUG_DISCONTINUE.extend([i for i in therapyCols if "discontinue" in i])
THERAPY_DRUG_REASON.extend([i for i in therapyCols if "reason" in i])
THERAPY_COMBO_YN.extend([i for i in therapyCols if "combo_yn" in i] * len([i for i in therapyCols if "start_int" in i]))
THERAPY_COMBO_NUM.extend([i for i in therapyCols if "combo_num" in i]* len([i for i in therapyCols if "start_int" in i]))
TREATMENT_TYPE.extend(["Medical Therapy %d" % therapyNumber]* len([i for i in therapyCols if "start_int" in i]))
SUBTYPE.extend(["Chemo/Target/Immuno etc."] * len([i for i in therapyCols if "start_int" in i]))
#OVARIAN
ovarian = [i for i in redCapExportDf if "ovariansup" in i]
ovarian_len = len([i for i in ovarian if "start_int" in i])
START_DATE.extend([i for i in ovarian if "start_int" in i])
STOP_DATE.extend([i for i in ovarian if "end_int" in i])
TREATMENT_TYPE.extend(["Ovarian Suppression At Primary"] * ovarian_len)
SUBTYPE.extend(["Ovarian Suppression"] * ovarian_len)
AGENT.extend(['']*ovarian_len)
THERAPY_DRUG_CLINTRIAL.extend(['']*ovarian_len)
THERAPY_DRUG_AZD5363.extend(['']*ovarian_len)
THERAPY_DRUG_OTHER.extend(['']*ovarian_len)
THERAPY_DRUG_DISCONTINUE.extend(['']*ovarian_len)
THERAPY_DRUG_REASON.extend(['']*ovarian_len)
THERAPY_COMBO_YN.extend(['']*ovarian_len)
THERAPY_COMBO_NUM.extend(['']*ovarian_len)
#HORMONE
hormo = [i for i in redCapExportDf if "hormo" in i]
hormo_len = len([i for i in hormo if "start_int" in i])
START_DATE.extend([i for i in hormo if "start_int" in i])
STOP_DATE.extend([i for i in hormo if "end_int" in i])
THERAPY_DRUG_CLINTRIAL.extend([i for i in hormo if "clintrial" in i])
THERAPY_DRUG_AZD5363.extend(['']*hormo_len)
THERAPY_DRUG_OTHER.extend([i for i in hormo if "other" in i])
THERAPY_DRUG_DISCONTINUE.extend([i for i in hormo if "discon" in i])
THERAPY_DRUG_REASON.extend([i for i in hormo if "reason" in i])
AGENT.extend([i for i in hormo if "reason" not in i and "discon" not in i and "other" not in i and "clintrial" not in i and "start_int" not in i and "end_int" not in i and "therapy" not in i])
THERAPY_COMBO_YN.extend(['']*hormo_len)
THERAPY_COMBO_NUM.extend(['']*hormo_len)
SUBTYPE.extend(["Hormone Therapy"] * hormo_len)
TREATMENT_TYPE.extend(["Medical Therapy 1"] * hormo_len)
EVENT_TYPE = ["TREATMENT"]*len(AGENT)
#METASTATIC DIAGNOSIS
metaDiagnosis = pd.DataFrame()
metaDiagnosis['PATIENT_ID'] = redCapExportDf['genie_patient_id']
#MET DISEASE IS TIMEPOINT 0
metaDiagnosis['START_DATE'] = 0
#metaDiagnosis['START_DATE'] = redCapExportDf['mets_disease_date_int']
metaDiagnosis['EVENT_TYPE'] = 'STATUS'
metaDiagnosis['STATUS'] = 'Metastatic Diagnosis'
metaDiagnosis = metaDiagnosis[~metaDiagnosis['START_DATE'].isnull()]
removeCols = START_DATE+STOP_DATE+AGENT+THERAPY_DRUG_CLINTRIAL+THERAPY_DRUG_AZD5363+THERAPY_DRUG_OTHER+THERAPY_DRUG_DISCONTINUE+THERAPY_DRUG_REASON+THERAPY_COMBO_YN+THERAPY_COMBO_NUM
lengths = set([
len(START_DATE),
len(STOP_DATE),
len(TREATMENT_TYPE),
len(SUBTYPE),
len(AGENT),
len(THERAPY_DRUG_CLINTRIAL),
len(THERAPY_DRUG_AZD5363),
len(THERAPY_DRUG_OTHER),
len(THERAPY_DRUG_DISCONTINUE),
len(THERAPY_DRUG_REASON),
len(THERAPY_COMBO_YN),
len(THERAPY_COMBO_NUM),
len(EVENT_TYPE)])
assert len(lengths) == 1,"Lengths must all be the same"
total = pd.DataFrame()
for i in range(len(redCapExportDf)):
timelineDF = pd.DataFrame()
timelineDF['PATIENT_ID'] = [redCapExportDf['genie_patient_id'][i]]*len(START_DATE)
#timelineDF['START_DATE'] = redCapExportDf.ix[i][START_DATE].reset_index(drop=True) - redCapExportDf.ix[i]['primary_dx_date_int']
#timelineDF['STOP_DATE'] = redCapExportDf.ix[i][STOP_DATE].reset_index(drop=True) - redCapExportDf.ix[i]['primary_dx_date_int']
#MET DISEASE IS TIMEPOINT 0
timelineDF['START_DATE'] = redCapExportDf.iloc[i][START_DATE].reset_index(drop=True) - redCapExportDf.iloc[i]['mets_disease_date_int']
timelineDF['STOP_DATE'] = redCapExportDf.iloc[i][STOP_DATE].reset_index(drop=True) - redCapExportDf.iloc[i]['mets_disease_date_int']
timelineDF['EVENT_TYPE'] = EVENT_TYPE
timelineDF['TREATMENT_TYPE'] = TREATMENT_TYPE
timelineDF['SUBTYPE'] = SUBTYPE
timelineDF['AGENT'] = redCapExportDf.iloc[i][AGENT].reset_index(drop=True)
timelineDF['THERAPY_DRUG_CLINTRIAL'] = redCapExportDf.iloc[i][THERAPY_DRUG_CLINTRIAL].reset_index(drop=True)
timelineDF['THERAPY_DRUG_AZD5363'] = redCapExportDf.iloc[i][THERAPY_DRUG_AZD5363].reset_index(drop=True)
timelineDF['THERAPY_DRUG_OTHER'] = redCapExportDf.iloc[i][THERAPY_DRUG_OTHER].reset_index(drop=True)
timelineDF['THERAPY_DRUG_DISCONTINUE'] = redCapExportDf.iloc[i][THERAPY_DRUG_DISCONTINUE].reset_index(drop=True)
timelineDF['THERAPY_DRUG_REASON'] = redCapExportDf.iloc[i][THERAPY_DRUG_REASON].reset_index(drop=True)
timelineDF['THERAPY_COMBO_YN'] = redCapExportDf.iloc[i][THERAPY_COMBO_YN].reset_index(drop=True)
timelineDF['THERAPY_COMBO_NUM'] = redCapExportDf.iloc[i][THERAPY_COMBO_NUM].reset_index(drop=True)
total = total.append(timelineDF)
total['STATUS'] = ''
ordering = total.columns
total = total.append(metaDiagnosis)
total = total[ordering]
return(total,removeCols)
def getSpecimen(self, getTimelineSpecimen):
specimen = pd.DataFrame()
specimen['PATIENT_ID'] = getTimelineSpecimen['PATIENT_ID']
specimen['START_DATE'] = getTimelineSpecimen.SEQUENCE_REPORT_DATE_INT_STATIC - getTimelineSpecimen.METS_DISEASE_DATE_INT
specimen['EVENT_TYPE'] = 'SPECIMEN'
specimen['SAMPLE_ID'] = getTimelineSpecimen['SAMPLE_ID']
specimen['SAMPLE_NOTES'] = getTimelineSpecimen.SEQUENCE_REPORT_DATE_INT_STATIC
specimen = specimen[~specimen['START_DATE'].isnull()]
return(specimen)
class Erbb2(sp_redcap_export_mapping.SponsoredProjectRunner):
_SPONSORED_PROJECT = "ERBB2"
_DATES = ['follow_up_date_int','date_death_int','primary_dx_date_int','lrr_date_int','date_first_met_int',
'sample_date_int_1','seq_report_date_int_1','sample_date_int_2','seq_report_date_int_2','sample_date_int_3',
'sequence_report_date_int_3','sample_date_int_4','sequence_report_date_int_4','sample_date_int_5','sequence_report_date_int_5',
'sample_date_int_6','seq_report_date_int_6','sample_date_int_7','seq_report_date_int_7','sample_date_int_8',
'sequence_report_date_int_8','sample_date_int_9','sequence_report_date_int_9','sample_date_int_10',
'sequence_report_date_int_10','date_bso_int','OS_MONTHS','OS_MONTHS_PRIMARY']
_CASE_LIST_MAF_SAMPLES_TEMPLATE = "cancer_study_identifier: genie_erbb2\nstable_id: genie_erbb2_sequenced\ncase_list_category: all_cases_with_mutation_data\ncase_list_name: Sequenced Tumors\ncase_list_description: All sequenced samples (%s samples)\ncase_list_ids: %s"
_CASE_LIST_PATH = os.path.join(_SPONSORED_PROJECT,'case_lists')
_UNMAPPED_SYN_ID = "syn8356977"
_MAPPED_SYN_ID = "syn8367692"
_CASE_LIST_SYN_ID = "syn10145925"
_SP_SYN_ID = "syn8363326"
_REDCAP_TO_CBIOMAPPING_SYNID = "syn8363731"
_SP_REDCAP_EXPORTS_SYNID = "syn8322425" #Storage of not found samples
_NUM_SAMPLE_COLS = 10
def addOSMonths(self, sponsoredProject_mapped_df):
#Must add new date fields to the DATE variable along with add to the mapping table: syn8220815
sponsoredProject_mapped_df['OS_MONTHS'] = sponsoredProject_mapped_df['date_death_int'] - sponsoredProject_mapped_df['date_first_met_int']
sponsoredProject_mapped_df['OS_MONTHS_PRIMARY'] = sponsoredProject_mapped_df['date_death_int'] - sponsoredProject_mapped_df['primary_dx_date_int']
return(sponsoredProject_mapped_df)
def createTemporaryGenieId(self, x, tempIdMapping, patientIdCol):
"""
Create temporary genie id for those that don't have
"""
uniqId = x['record_id_patient_id'] + x['redcap_data_access_group']
if sum(tempIdMapping['uniqueId'] == uniqId) == 0:
tempId = 'GENIE-%s-%s' % (x['redcap_data_access_group'],''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(10)))
self.syn.store(synapseclient.Table(self.syn.get("syn10164044"),[[uniqId, tempId]]))
return(tempId)
else:
return(tempIdMapping[tempIdMapping['uniqueId'] == uniqId]['temporaryId'].values[0])
def createNullPatients(self, sponsoredProject_mapped_df, tempIdMappingDf):
#### TIMELINE FILE
sponsoredProject_mapped_df['redcap_data_access_group'] = [i.upper() for i in sponsoredProject_mapped_df['redcap_data_access_group']]
allNullPatients = sponsoredProject_mapped_df[['record_id_patient_id','redcap_data_access_group']][sponsoredProject_mapped_df['record_id_patient_id'].isnull()]
temporaryIds = allNullPatients.apply(lambda x: self.createTemporaryGenieId(x, tempIdMappingDf, 'record_id_patient_id'), axis =1)
if not temporaryIds.empty:
sponsoredProject_mapped_df['record_id_patient_id'][sponsoredProject_mapped_df['record_id_patient_id'].isnull()] = temporaryIds
assert sum(sponsoredProject_mapped_df['record_id_patient_id'].isnull()) == 0, "Make sure there are no null genie patient Ids"
sponsoredProject_mapped_df['record_id_patient_id'] = sponsoredProject_mapped_df.apply(lambda x: self.checkGenieId(x, 'redcap_data_access_group','record_id_patient_id'), axis=1)
return(sponsoredProject_mapped_df, temporaryIds)
def makeTimeLineDf(self, redCapExportDf, therapyRange = 16):
START_DATE = []
STOP_DATE = []
TREATMENT_TYPE = []
SUBTYPE = []
AGENT = []
THERAPY_RESPONSE = []
THERAPY_DRUG_OTHER = []
THERAPY_DRUG_DISCONTINUE = []
THERAPY_DRUG_REASON = []
THERAPY_COMBO_YN = []
THERAPY_COMBO_NUM = []
ADD_TREATMENT = []
TREATMENT_SETTING = []
for therapyNumber in range(1,therapyRange):
therapyCols = [i for i in redCapExportDf if ("therapy%d_" % therapyNumber in i or "combo_therapy_yn_%d" %therapyNumber == i or "add_treatment_%d" % therapyNumber == i or "treatment_setting_%d" % therapyNumber == i)]
START_DATE.extend([i for i in therapyCols if "start_int" in i])
STOP_DATE.extend([i for i in therapyCols if "end_int" in i])
AGENT.extend([i for i in therapyCols if len(i.split("_")) == 2 and "response" not in i and "ctdrug" not in i])
THERAPY_DRUG_OTHER.extend([i for i in therapyCols if "other" in i])
THERAPY_DRUG_DISCONTINUE.extend([i for i in therapyCols if "discon" in i])
THERAPY_DRUG_REASON.extend([i for i in therapyCols if "reason" in i])
THERAPY_COMBO_YN.extend([i for i in therapyCols if "combo_therapy_yn" in i] * len([i for i in therapyCols if "start_int" in i]))
THERAPY_COMBO_NUM.extend([i for i in therapyCols if "combo_num" in i]* len([i for i in therapyCols if "start_int" in i]))
TREATMENT_TYPE.extend(["Medical Therapy %d" % therapyNumber]* len([i for i in therapyCols if "start_int" in i]))
SUBTYPE.extend(["Chemo/Target/Immuno etc."] * len([i for i in therapyCols if "start_int" in i]))
THERAPY_RESPONSE.extend([i for i in therapyCols if "response" in i] *len([i for i in therapyCols if "start_int" in i]))
ADD_TREATMENT.extend([i for i in therapyCols if "add_treatment" in i] * len([i for i in therapyCols if "start_int" in i]))
TREATMENT_SETTING.extend([i for i in therapyCols if "treatment_setting" in i] * len([i for i in therapyCols if "start_int" in i]))
EVENT_TYPE = ["TREATMENT"]*len(AGENT)
ADD_TREATMENT.extend(['']*4)
#METASTATIC DIAGNOSIS
metaDiagnosis = pd.DataFrame()
#MET DISEASE IS TIMEPOINT 0
metaDiagnosis['PATIENT_ID'] = redCapExportDf['record_id_patient_id']
metaDiagnosis['START_DATE'] = 0
#metaDiagnosis['START_DATE'] = redCapExportDf['date_first_met_int']
metaDiagnosis['EVENT_TYPE'] = 'STATUS'
metaDiagnosis['STATUS'] = 'Metastatic Diagnosis'
metaDiagnosis = metaDiagnosis[~metaDiagnosis['START_DATE'].isnull()]
removeCols = START_DATE+STOP_DATE+AGENT+THERAPY_DRUG_OTHER+THERAPY_RESPONSE+THERAPY_DRUG_DISCONTINUE+THERAPY_DRUG_REASON+THERAPY_COMBO_YN+THERAPY_COMBO_NUM+ADD_TREATMENT + TREATMENT_SETTING
lengths = set([
len(START_DATE),
len(STOP_DATE),
len(TREATMENT_TYPE),
len(SUBTYPE),
len(AGENT),
len(THERAPY_RESPONSE),
len(THERAPY_DRUG_OTHER),
len(TREATMENT_SETTING),
len(ADD_TREATMENT),
len(THERAPY_DRUG_DISCONTINUE),
len(THERAPY_DRUG_REASON),
len(THERAPY_COMBO_YN),
len(THERAPY_COMBO_NUM),
len(EVENT_TYPE)])
assert len(lengths) == 1,"Lengths must all be the same"
total = pd.DataFrame()
for i in range(len(redCapExportDf)):
timelineDF = pd.DataFrame()
timelineDF['PATIENT_ID'] = [redCapExportDf['record_id_patient_id'][i]]*len(START_DATE)
if not pd.isnull(redCapExportDf.iloc[i]['date_first_met_int']):
timelineDF['START_DATE'] = [start if pd.isnull(start) else int(start) - int(redCapExportDf.iloc[i]['date_first_met_int']) for start in redCapExportDf.iloc[i][START_DATE].reset_index(drop=True)]
timelineDF['STOP_DATE'] = [end if pd.isnull(end) else int(end) - int(redCapExportDf.iloc[i]['date_first_met_int']) for end in redCapExportDf.iloc[i][STOP_DATE].reset_index(drop=True)]
else:
timelineDF['START_DATE'] = pd.np.nan
timelineDF['STOP_DATE'] = pd.np.nan
timelineDF['EVENT_TYPE'] = EVENT_TYPE
timelineDF['TREATMENT_TYPE'] = TREATMENT_TYPE
timelineDF['SUBTYPE'] = SUBTYPE
timelineDF['AGENT'] = redCapExportDf.iloc[i][AGENT].reset_index(drop=True)
timelineDF['THERAPY_DRUG_OTHER'] = redCapExportDf.iloc[i][THERAPY_DRUG_OTHER].reset_index(drop=True)
timelineDF['THERAPY_DRUG_DISCONTINUE'] = redCapExportDf.iloc[i][THERAPY_DRUG_DISCONTINUE].reset_index(drop=True)
timelineDF['THERAPY_DRUG_REASON'] = redCapExportDf.iloc[i][THERAPY_DRUG_REASON].reset_index(drop=True)
timelineDF['THERAPY_COMBO_YN'] = redCapExportDf.iloc[i][THERAPY_COMBO_YN].reset_index(drop=True)
timelineDF['THERAPY_COMBO_NUM'] = redCapExportDf.iloc[i][THERAPY_COMBO_NUM].reset_index(drop=True)
total = total.append(timelineDF)
total['STATUS'] = ''
ordering = total.columns
total = total.append(metaDiagnosis)
total = total[ordering]
return(total, removeCols)
def getSpecimen(self, getTimelineSpecimen):
specimen = pd.DataFrame()
specimen['PATIENT_ID'] = getTimelineSpecimen['PATIENT_ID']
getTimelineSpecimen = getTimelineSpecimen[~getTimelineSpecimen.SEQUENCE_REPORT_DATE_INT_STATIC.isnull()]
getTimelineSpecimen = getTimelineSpecimen[~getTimelineSpecimen.METS_DISEASE_DATE_INT.isnull()]
specimen['START_DATE'] = getTimelineSpecimen.SEQUENCE_REPORT_DATE_INT_STATIC.astype(int) - getTimelineSpecimen.METS_DISEASE_DATE_INT.astype(int)
specimen['EVENT_TYPE'] = 'SPECIMEN'
specimen['SAMPLE_ID'] = getTimelineSpecimen['SAMPLE_ID']
specimen['SAMPLE_NOTES'] = getTimelineSpecimen.SEQUENCE_REPORT_DATE_INT_STATIC
specimen = specimen[~specimen['START_DATE'].isnull()]
return(specimen)
class Fgfr4(new_redcap_export_mapping.SponsoredProjectRunner):
_DATA_ELEMENT_SYN_ID = "syn12032922"
_SPONSORED_PROJECT = 'FGFR4'
# No need to define in class
_CASE_LIST_PATH = os.path.join(_SPONSORED_PROJECT, 'case_lists')
_NUM_COUNTS = 4
_REDCAP_TO_CBIOMAPPING_SYNID = "syn15572052"
_UNLABELLED_SYN_ID = "syn15341849"
_LABELLED_SYN_ID = "syn15341838"
# Storage of not found samples
_SP_REDCAP_EXPORTS_SYNID = "syn11812526"
_SP_SYN_ID = "syn14721789"
_CASE_LIST_MAF_SAMPLES_TEMPLATE = (
"cancer_study_identifier: genie_fgfr4\n"
"stable_id: genie_fgfr4_sequenced\n"
"case_list_category: all_cases_with_mutation_data\n"
"case_list_name: Sequenced Tumors\n"
"case_list_description: All sequenced samples "
"(%s samples)\ncase_list_ids: %s")
_CASE_LIST_SYN_ID = "syn14721794"
# def addOSMonths(self, sponsoredProject_mapped_df):
# '''
# Must add new date fields to the DATE variable along with add
# to the mapping table: syn8220815
# '''
# sponsoredProject_mapped_df['OS_MONTHS'] = \
# sponsoredProject_mapped_df['death_date_int'] - \
# sponsoredProject_mapped_df['date_first_met_int']
# sponsoredProject_mapped_df['OS_MONTHS_PRIMARY'] = \
# sponsoredProject_mapped_df['death_date_int'] - \
# sponsoredProject_mapped_df['primary_dx_date_int']
# return(sponsoredProject_mapped_df)
def makeTimeLineDf(
self, treatmentDf, finalPatientDf, therapyRange=5):
# These variables are capitalized to match with the column headers
START_DATE = []
STOP_DATE = []
TREATMENT_TYPE = []
SUBTYPE = []
AGENT = []
THERAPY_RESPONSE = []
# Name of Chemotherapeutic Agent or Hormone Therapy - Experimental or
# OTHER (NCIT ID)
THERAPY_DRUG_OTHER = []
THERAPY_DRUG_DISCONTINUE = []
THERAPY_DRUG_REASON = []
TREATMENT_SETTING = []
RXNORM_ID = []
# Name of Chemotherapeutic Agent or Hormone Therapy - Experimental or
# OTHER
THERAPY_DRUG_START_ESTIMATED = []
THERAPY_DRUG_OTHER_NAME = []
THERAPY_DRUG_END_ESTIMATED = []
for therapyNumber in range(1, therapyRange):
therapyCols = [
i for i in treatmentDf
if "therapy_drug%d" % therapyNumber in i]
startCols = [i for i in therapyCols if "start_int" in i]
START_DATE.extend(startCols)
STOP_DATE.extend([i for i in therapyCols if "end_int" in i])
AGENT.extend([
i for i in therapyCols if "name" in i and "other" not in i])
RXNORM_ID.extend([
i for i in therapyCols
if i == "therapy_drug%d" % therapyNumber])
THERAPY_DRUG_OTHER.extend([
i for i in therapyCols if "other" in i and 'name' not in i])
THERAPY_DRUG_DISCONTINUE.extend([
i for i in therapyCols if "discon" in i])
THERAPY_DRUG_REASON.extend([
i for i in therapyCols if "reason" in i])
THERAPY_DRUG_OTHER_NAME.extend([
i for i in therapyCols if "other_name" in i])
THERAPY_DRUG_START_ESTIMATED.extend([
i for i in therapyCols if "start_estimated" in i])
THERAPY_DRUG_END_ESTIMATED.extend([
i for i in therapyCols if "end_estimated" in i])
# Value
TREATMENT_TYPE.extend([
"Medical Therapy %d" % therapyNumber] * len(startCols))
# Value
SUBTYPE = ["Chemo/Target/Immuno etc."] * len(AGENT)
TREATMENT_SETTING = ['treatment_setting'] * len(AGENT)
THERAPY_RESPONSE = ['therapy_response'] * len(AGENT)
# Value
EVENT_TYPE = ["TREATMENT"]*len(AGENT)
LINE_START = ['line_start_int'] * len(AGENT)
REGIMEN_NAME = ['regimen_name'] * len(AGENT)
CLINICAL_TRIAL = ['clinical_trial'] * len(AGENT)
CENTER = ['redcap_data_access_group'] * len(AGENT)
lengths = [
len(START_DATE),
len(STOP_DATE),
len(TREATMENT_TYPE),
len(AGENT),
len(THERAPY_DRUG_OTHER),
len(THERAPY_DRUG_DISCONTINUE),
len(THERAPY_DRUG_REASON),
len(RXNORM_ID),
len(THERAPY_DRUG_OTHER_NAME),
len(THERAPY_DRUG_START_ESTIMATED),
len(THERAPY_DRUG_END_ESTIMATED),
len(TREATMENT_TYPE)]
assert len(set(lengths)) == 1, "Lengths must all be the same"
total = pd.DataFrame()
for i in range(len(treatmentDf)):
timelineDF = pd.DataFrame()
timelineDF['PATIENT_ID'] = \
[treatmentDf['patient_id'].iloc[i]]*len(START_DATE)
timelineDF['START_DATE'] = \
treatmentDf.iloc[i][START_DATE].reset_index(drop=True)
timelineDF['STOP_DATE'] = \
treatmentDf.iloc[i][STOP_DATE].reset_index(drop=True)
timelineDF['EVENT_TYPE'] = EVENT_TYPE
# has to be in this order of PATIENT_ID, START, STOP and EVENT_TYPE
timelineDF['TREATMENT_TYPE'] = TREATMENT_TYPE
timelineDF['SUBTYPE'] = SUBTYPE
timelineDF['AGENT'] = \
treatmentDf.iloc[i][AGENT].reset_index(drop=True)
timelineDF['RXNORM_ID'] = \
treatmentDf.iloc[i][RXNORM_ID].reset_index(drop=True)
timelineDF['THERAPY_DRUG_OTHER'] = \
treatmentDf.iloc[i][THERAPY_DRUG_OTHER].reset_index(drop=True)
timelineDF['THERAPY_DRUG_DISCONTINUE'] = treatmentDf.iloc[i][
THERAPY_DRUG_DISCONTINUE].reset_index(drop=True)
timelineDF['THERAPY_DRUG_REASON'] = \
treatmentDf.iloc[i][THERAPY_DRUG_REASON].reset_index(drop=True)
timelineDF['THERAPY_DRUG_OTHER_NAME'] = treatmentDf.iloc[i][
THERAPY_DRUG_OTHER_NAME].reset_index(drop=True)
timelineDF['THERAPY_DRUG_START_ESTIMATED'] = treatmentDf.iloc[i][
THERAPY_DRUG_START_ESTIMATED].reset_index(drop=True)
timelineDF['THERAPY_DRUG_END_ESTIMATED'] = treatmentDf.iloc[i][
THERAPY_DRUG_END_ESTIMATED].reset_index(drop=True)
timelineDF['TREATMENT_SETTING'] = \
treatmentDf.iloc[i][TREATMENT_SETTING].reset_index(drop=True)
timelineDF['THERAPY_RESPONSE'] = \
treatmentDf.iloc[i][THERAPY_RESPONSE].reset_index(drop=True)
timelineDF['LINE_START'] = \
treatmentDf.iloc[i][LINE_START].reset_index(drop=True)
timelineDF['REGIMEN_NAME'] = \
treatmentDf.iloc[i][REGIMEN_NAME].reset_index(drop=True)
timelineDF['CLINICAL_TRIAL'] = \
treatmentDf.iloc[i][CLINICAL_TRIAL].reset_index(drop=True)
timelineDF['CENTER'] = \
treatmentDf.iloc[i][CENTER].reset_index(drop=True)
total = total.append(timelineDF, sort=False)
# remove all without START dates
total = total[~total['START_DATE'].isnull()]
total['SP'] = self._SPONSORED_PROJECT
total['STATUS'] = ''
total['START_DATE'] = total['START_DATE'].astype('float')
total['STOP_DATE'] = total['STOP_DATE'].astype('float')
total['RXNORM_ID'] = total['RXNORM_ID'].astype('float')
total['LINE_START'] = total['LINE_START'].astype('float')
total.drop_duplicates(inplace=True)
# Anchor point is MET_DX_DATE_INT
date_met_int = [
float(finalPatientDf['MET_DX_DATE_INT'][
finalPatientDf['PATIENT_ID'] == patient].values[0])
for patient in total['PATIENT_ID']]
total['START_DATE'] = total['START_DATE'] - date_met_int
total['STOP_DATE'] = total['STOP_DATE'] - date_met_int
total['LINE_START'] = total['LINE_START'] - date_met_int
return(total)
def createSpecimenDf(self, sampleDf, patientDf):
clinicalDf = sampleDf.merge(patientDf, on="PATIENT_ID", how="outer")
clinicalDf = clinicalDf[~clinicalDf.AGE_AT_SEQ_REPORT.isnull()]
clinicalDf = \
clinicalDf[~clinicalDf.DATE_FIRST_DISTANT_MET_INT.isnull()]
specimen = pd.DataFrame()
specimen['PATIENT_ID'] = clinicalDf['PATIENT_ID']
specimen['SAMPLE_ID'] = clinicalDf['SAMPLE_ID']
specimen['START_DATE'] = \
clinicalDf.AGE_AT_SEQ_REPORT.astype(int) - \
clinicalDf.DATE_FIRST_DISTANT_MET_INT.astype(int)
specimen['EVENT_TYPE'] = 'SPECIMEN'
specimen['SAMPLE_NOTES'] = clinicalDf.AGE_AT_SEQ_REPORT
specimen = specimen[~specimen['START_DATE'].isnull()]
return(specimen)
| 56.103321 | 272 | 0.67331 | import os
import random
import string
import pandas as pd
import synapseclient
from . import new_redcap_export_mapping
from . import sp_redcap_export_mapping
class Akt1(sp_redcap_export_mapping.SponsoredProjectRunner):
_SPONSORED_PROJECT = "AKT1"
_DATES = ["death_date_int","follow_up_date_int","primary_dx_date_int","lrr_date_int","mets_disease_date_int","sample_date_int_1",
"sequence_report_date_int_1","sequence_report_date_int_1_static","sample_date_int_2","sample_date_int_2_static",
"sequence_report_date_int_2","sequence_report_date_int_2_static","sequence_report_date_int_3_static",
"OS_MONTHS","OS_MONTHS_PRIMARY"]
_CASE_LIST_MAF_SAMPLES_TEMPLATE = "cancer_study_identifier: genie_akt1\nstable_id: genie_akt1_sequenced\ncase_list_category: all_cases_with_mutation_data\ncase_list_name: Sequenced Tumors\ncase_list_description: All sequenced samples (%s samples)\ncase_list_ids: %s"
_CASE_LIST_PATH = os.path.join(_SPONSORED_PROJECT,'case_lists')
_UNMAPPED_SYN_ID = "syn11066652"
_MAPPED_SYN_ID = "syn8404878"
_CASE_LIST_SYN_ID = "syn10145838"
_SP_SYN_ID = "syn8363325"
_REDCAP_TO_CBIOMAPPING_SYNID = "syn8220815"
_SP_REDCAP_EXPORTS_SYNID = "syn8404875"
_NUM_SAMPLE_COLS = 3
def addOSMonths(self, sponsoredProject_mapped_df):
sponsoredProject_mapped_df['OS_MONTHS'] = sponsoredProject_mapped_df['death_date_int'] - sponsoredProject_mapped_df['mets_disease_date_int']
sponsoredProject_mapped_df['OS_MONTHS_PRIMARY'] = sponsoredProject_mapped_df['death_date_int'] - sponsoredProject_mapped_df['primary_dx_date_int']
return(sponsoredProject_mapped_df)
def createTemporaryGenieId(self, x, tempIdMapping):
uniqId = x['record_id'] + x['redcap_data_access_group']
tempIdMap = tempIdMapping['patientId'][tempIdMapping['uniqueId'] == uniqId]
tempId = 'GENIE-%s-%s' % (x['redcap_data_access_group'],''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(10)))
if len(tempIdMap) == 0:
return(tempId)
else:
return(tempIdMap.values[0])
ed_df, tempIdMappingDf):
print("RENAMING %s NULL PATIENTS" % sum(sponsoredProject_mapped_df['genie_patient_id'].isnull()))
allNullPatients = sponsoredProject_mapped_df[['record_id','redcap_data_access_group','genie_patient_id']][sponsoredProject_mapped_df['genie_patient_id'].isnull()]
temporaryIds = allNullPatients.apply(lambda x: self.createTemporaryGenieId(x, tempIdMappingDf), axis =1)
if sponsoredProject_mapped_df['genie_patient_id'].isnull().any():
sponsoredProject_mapped_df['genie_patient_id'][sponsoredProject_mapped_df['genie_patient_id'].isnull()] = temporaryIds
assert sum(sponsoredProject_mapped_df['genie_patient_id'].isnull()) ==0, "Make sure there are no null genie patient Ids"
sponsoredProject_mapped_df['genie_patient_id'] = sponsoredProject_mapped_df.apply(lambda x: self.checkGenieId(x, 'redcap_data_access_group','genie_patient_id'), axis=1)
sponsoredProject_mapped_df.reset_index(inplace=True,drop=True)
return(sponsoredProject_mapped_df, temporaryIds)
def makeTimeLineDf(self, redCapExportDf, therapyRange = 18):
START_DATE = []
STOP_DATE = []
TREATMENT_TYPE = []
SUBTYPE = []
AGENT = []
THERAPY_DRUG_CLINTRIAL = []
THERAPY_DRUG_AZD5363 = []
THERAPY_DRUG_OTHER = []
THERAPY_DRUG_DISCONTINUE = []
THERAPY_DRUG_REASON = []
THERAPY_COMBO_YN = []
THERAPY_COMBO_NUM = []
for therapyNumber in range(1,therapyRange):
therapyCols = [i for i in redCapExportDf if "therapy%d_" % therapyNumber in i]
START_DATE.extend([i for i in therapyCols if "start_int" in i])
STOP_DATE.extend([i for i in therapyCols if "end_int" in i])
AGENT.extend([i for i in therapyCols if len(i.split("_")) == 2])
THERAPY_DRUG_CLINTRIAL.extend([i for i in therapyCols if "clintrial" in i])
THERAPY_DRUG_AZD5363.extend([i for i in therapyCols if "azd" in i])
THERAPY_DRUG_OTHER.extend([i for i in therapyCols if "other" in i])
THERAPY_DRUG_DISCONTINUE.extend([i for i in therapyCols if "discontinue" in i])
THERAPY_DRUG_REASON.extend([i for i in therapyCols if "reason" in i])
THERAPY_COMBO_YN.extend([i for i in therapyCols if "combo_yn" in i] * len([i for i in therapyCols if "start_int" in i]))
THERAPY_COMBO_NUM.extend([i for i in therapyCols if "combo_num" in i]* len([i for i in therapyCols if "start_int" in i]))
TREATMENT_TYPE.extend(["Medical Therapy %d" % therapyNumber]* len([i for i in therapyCols if "start_int" in i]))
SUBTYPE.extend(["Chemo/Target/Immuno etc."] * len([i for i in therapyCols if "start_int" in i]))
ovarian = [i for i in redCapExportDf if "ovariansup" in i]
ovarian_len = len([i for i in ovarian if "start_int" in i])
START_DATE.extend([i for i in ovarian if "start_int" in i])
STOP_DATE.extend([i for i in ovarian if "end_int" in i])
TREATMENT_TYPE.extend(["Ovarian Suppression At Primary"] * ovarian_len)
SUBTYPE.extend(["Ovarian Suppression"] * ovarian_len)
AGENT.extend(['']*ovarian_len)
THERAPY_DRUG_CLINTRIAL.extend(['']*ovarian_len)
THERAPY_DRUG_AZD5363.extend(['']*ovarian_len)
THERAPY_DRUG_OTHER.extend(['']*ovarian_len)
THERAPY_DRUG_DISCONTINUE.extend(['']*ovarian_len)
THERAPY_DRUG_REASON.extend(['']*ovarian_len)
THERAPY_COMBO_YN.extend(['']*ovarian_len)
THERAPY_COMBO_NUM.extend(['']*ovarian_len)
hormo = [i for i in redCapExportDf if "hormo" in i]
hormo_len = len([i for i in hormo if "start_int" in i])
START_DATE.extend([i for i in hormo if "start_int" in i])
STOP_DATE.extend([i for i in hormo if "end_int" in i])
THERAPY_DRUG_CLINTRIAL.extend([i for i in hormo if "clintrial" in i])
THERAPY_DRUG_AZD5363.extend(['']*hormo_len)
THERAPY_DRUG_OTHER.extend([i for i in hormo if "other" in i])
THERAPY_DRUG_DISCONTINUE.extend([i for i in hormo if "discon" in i])
THERAPY_DRUG_REASON.extend([i for i in hormo if "reason" in i])
AGENT.extend([i for i in hormo if "reason" not in i and "discon" not in i and "other" not in i and "clintrial" not in i and "start_int" not in i and "end_int" not in i and "therapy" not in i])
THERAPY_COMBO_YN.extend(['']*hormo_len)
THERAPY_COMBO_NUM.extend(['']*hormo_len)
SUBTYPE.extend(["Hormone Therapy"] * hormo_len)
TREATMENT_TYPE.extend(["Medical Therapy 1"] * hormo_len)
EVENT_TYPE = ["TREATMENT"]*len(AGENT)
metaDiagnosis = pd.DataFrame()
metaDiagnosis['PATIENT_ID'] = redCapExportDf['genie_patient_id']
metaDiagnosis['START_DATE'] = 0
metaDiagnosis['EVENT_TYPE'] = 'STATUS'
metaDiagnosis['STATUS'] = 'Metastatic Diagnosis'
metaDiagnosis = metaDiagnosis[~metaDiagnosis['START_DATE'].isnull()]
removeCols = START_DATE+STOP_DATE+AGENT+THERAPY_DRUG_CLINTRIAL+THERAPY_DRUG_AZD5363+THERAPY_DRUG_OTHER+THERAPY_DRUG_DISCONTINUE+THERAPY_DRUG_REASON+THERAPY_COMBO_YN+THERAPY_COMBO_NUM
lengths = set([
len(START_DATE),
len(STOP_DATE),
len(TREATMENT_TYPE),
len(SUBTYPE),
len(AGENT),
len(THERAPY_DRUG_CLINTRIAL),
len(THERAPY_DRUG_AZD5363),
len(THERAPY_DRUG_OTHER),
len(THERAPY_DRUG_DISCONTINUE),
len(THERAPY_DRUG_REASON),
len(THERAPY_COMBO_YN),
len(THERAPY_COMBO_NUM),
len(EVENT_TYPE)])
assert len(lengths) == 1,"Lengths must all be the same"
total = pd.DataFrame()
for i in range(len(redCapExportDf)):
timelineDF = pd.DataFrame()
timelineDF['PATIENT_ID'] = [redCapExportDf['genie_patient_id'][i]]*len(START_DATE)
timelineDF['START_DATE'] = redCapExportDf.iloc[i][START_DATE].reset_index(drop=True) - redCapExportDf.iloc[i]['mets_disease_date_int']
timelineDF['STOP_DATE'] = redCapExportDf.iloc[i][STOP_DATE].reset_index(drop=True) - redCapExportDf.iloc[i]['mets_disease_date_int']
timelineDF['EVENT_TYPE'] = EVENT_TYPE
timelineDF['TREATMENT_TYPE'] = TREATMENT_TYPE
timelineDF['SUBTYPE'] = SUBTYPE
timelineDF['AGENT'] = redCapExportDf.iloc[i][AGENT].reset_index(drop=True)
timelineDF['THERAPY_DRUG_CLINTRIAL'] = redCapExportDf.iloc[i][THERAPY_DRUG_CLINTRIAL].reset_index(drop=True)
timelineDF['THERAPY_DRUG_AZD5363'] = redCapExportDf.iloc[i][THERAPY_DRUG_AZD5363].reset_index(drop=True)
timelineDF['THERAPY_DRUG_OTHER'] = redCapExportDf.iloc[i][THERAPY_DRUG_OTHER].reset_index(drop=True)
timelineDF['THERAPY_DRUG_DISCONTINUE'] = redCapExportDf.iloc[i][THERAPY_DRUG_DISCONTINUE].reset_index(drop=True)
timelineDF['THERAPY_DRUG_REASON'] = redCapExportDf.iloc[i][THERAPY_DRUG_REASON].reset_index(drop=True)
timelineDF['THERAPY_COMBO_YN'] = redCapExportDf.iloc[i][THERAPY_COMBO_YN].reset_index(drop=True)
timelineDF['THERAPY_COMBO_NUM'] = redCapExportDf.iloc[i][THERAPY_COMBO_NUM].reset_index(drop=True)
total = total.append(timelineDF)
total['STATUS'] = ''
ordering = total.columns
total = total.append(metaDiagnosis)
total = total[ordering]
return(total,removeCols)
def getSpecimen(self, getTimelineSpecimen):
specimen = pd.DataFrame()
specimen['PATIENT_ID'] = getTimelineSpecimen['PATIENT_ID']
specimen['START_DATE'] = getTimelineSpecimen.SEQUENCE_REPORT_DATE_INT_STATIC - getTimelineSpecimen.METS_DISEASE_DATE_INT
specimen['EVENT_TYPE'] = 'SPECIMEN'
specimen['SAMPLE_ID'] = getTimelineSpecimen['SAMPLE_ID']
specimen['SAMPLE_NOTES'] = getTimelineSpecimen.SEQUENCE_REPORT_DATE_INT_STATIC
specimen = specimen[~specimen['START_DATE'].isnull()]
return(specimen)
class Erbb2(sp_redcap_export_mapping.SponsoredProjectRunner):
_SPONSORED_PROJECT = "ERBB2"
_DATES = ['follow_up_date_int','date_death_int','primary_dx_date_int','lrr_date_int','date_first_met_int',
'sample_date_int_1','seq_report_date_int_1','sample_date_int_2','seq_report_date_int_2','sample_date_int_3',
'sequence_report_date_int_3','sample_date_int_4','sequence_report_date_int_4','sample_date_int_5','sequence_report_date_int_5',
'sample_date_int_6','seq_report_date_int_6','sample_date_int_7','seq_report_date_int_7','sample_date_int_8',
'sequence_report_date_int_8','sample_date_int_9','sequence_report_date_int_9','sample_date_int_10',
'sequence_report_date_int_10','date_bso_int','OS_MONTHS','OS_MONTHS_PRIMARY']
_CASE_LIST_MAF_SAMPLES_TEMPLATE = "cancer_study_identifier: genie_erbb2\nstable_id: genie_erbb2_sequenced\ncase_list_category: all_cases_with_mutation_data\ncase_list_name: Sequenced Tumors\ncase_list_description: All sequenced samples (%s samples)\ncase_list_ids: %s"
_CASE_LIST_PATH = os.path.join(_SPONSORED_PROJECT,'case_lists')
_UNMAPPED_SYN_ID = "syn8356977"
_MAPPED_SYN_ID = "syn8367692"
_CASE_LIST_SYN_ID = "syn10145925"
_SP_SYN_ID = "syn8363326"
_REDCAP_TO_CBIOMAPPING_SYNID = "syn8363731"
_SP_REDCAP_EXPORTS_SYNID = "syn8322425"
_NUM_SAMPLE_COLS = 10
def addOSMonths(self, sponsoredProject_mapped_df):
sponsoredProject_mapped_df['OS_MONTHS'] = sponsoredProject_mapped_df['date_death_int'] - sponsoredProject_mapped_df['date_first_met_int']
sponsoredProject_mapped_df['OS_MONTHS_PRIMARY'] = sponsoredProject_mapped_df['date_death_int'] - sponsoredProject_mapped_df['primary_dx_date_int']
return(sponsoredProject_mapped_df)
def createTemporaryGenieId(self, x, tempIdMapping, patientIdCol):
uniqId = x['record_id_patient_id'] + x['redcap_data_access_group']
if sum(tempIdMapping['uniqueId'] == uniqId) == 0:
tempId = 'GENIE-%s-%s' % (x['redcap_data_access_group'],''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(10)))
self.syn.store(synapseclient.Table(self.syn.get("syn10164044"),[[uniqId, tempId]]))
return(tempId)
else:
return(tempIdMapping[tempIdMapping['uniqueId'] == uniqId]['temporaryId'].values[0])
def createNullPatients(self, sponsoredProject_mapped_df, tempIdMappingDf):
_access_group'] = [i.upper() for i in sponsoredProject_mapped_df['redcap_data_access_group']]
allNullPatients = sponsoredProject_mapped_df[['record_id_patient_id','redcap_data_access_group']][sponsoredProject_mapped_df['record_id_patient_id'].isnull()]
temporaryIds = allNullPatients.apply(lambda x: self.createTemporaryGenieId(x, tempIdMappingDf, 'record_id_patient_id'), axis =1)
if not temporaryIds.empty:
sponsoredProject_mapped_df['record_id_patient_id'][sponsoredProject_mapped_df['record_id_patient_id'].isnull()] = temporaryIds
assert sum(sponsoredProject_mapped_df['record_id_patient_id'].isnull()) == 0, "Make sure there are no null genie patient Ids"
sponsoredProject_mapped_df['record_id_patient_id'] = sponsoredProject_mapped_df.apply(lambda x: self.checkGenieId(x, 'redcap_data_access_group','record_id_patient_id'), axis=1)
return(sponsoredProject_mapped_df, temporaryIds)
def makeTimeLineDf(self, redCapExportDf, therapyRange = 16):
START_DATE = []
STOP_DATE = []
TREATMENT_TYPE = []
SUBTYPE = []
AGENT = []
THERAPY_RESPONSE = []
THERAPY_DRUG_OTHER = []
THERAPY_DRUG_DISCONTINUE = []
THERAPY_DRUG_REASON = []
THERAPY_COMBO_YN = []
THERAPY_COMBO_NUM = []
ADD_TREATMENT = []
TREATMENT_SETTING = []
for therapyNumber in range(1,therapyRange):
therapyCols = [i for i in redCapExportDf if ("therapy%d_" % therapyNumber in i or "combo_therapy_yn_%d" %therapyNumber == i or "add_treatment_%d" % therapyNumber == i or "treatment_setting_%d" % therapyNumber == i)]
START_DATE.extend([i for i in therapyCols if "start_int" in i])
STOP_DATE.extend([i for i in therapyCols if "end_int" in i])
AGENT.extend([i for i in therapyCols if len(i.split("_")) == 2 and "response" not in i and "ctdrug" not in i])
THERAPY_DRUG_OTHER.extend([i for i in therapyCols if "other" in i])
THERAPY_DRUG_DISCONTINUE.extend([i for i in therapyCols if "discon" in i])
THERAPY_DRUG_REASON.extend([i for i in therapyCols if "reason" in i])
THERAPY_COMBO_YN.extend([i for i in therapyCols if "combo_therapy_yn" in i] * len([i for i in therapyCols if "start_int" in i]))
THERAPY_COMBO_NUM.extend([i for i in therapyCols if "combo_num" in i]* len([i for i in therapyCols if "start_int" in i]))
TREATMENT_TYPE.extend(["Medical Therapy %d" % therapyNumber]* len([i for i in therapyCols if "start_int" in i]))
SUBTYPE.extend(["Chemo/Target/Immuno etc."] * len([i for i in therapyCols if "start_int" in i]))
THERAPY_RESPONSE.extend([i for i in therapyCols if "response" in i] *len([i for i in therapyCols if "start_int" in i]))
ADD_TREATMENT.extend([i for i in therapyCols if "add_treatment" in i] * len([i for i in therapyCols if "start_int" in i]))
TREATMENT_SETTING.extend([i for i in therapyCols if "treatment_setting" in i] * len([i for i in therapyCols if "start_int" in i]))
EVENT_TYPE = ["TREATMENT"]*len(AGENT)
ADD_TREATMENT.extend(['']*4)
metaDiagnosis = pd.DataFrame()
metaDiagnosis['PATIENT_ID'] = redCapExportDf['record_id_patient_id']
metaDiagnosis['START_DATE'] = 0
metaDiagnosis['EVENT_TYPE'] = 'STATUS'
metaDiagnosis['STATUS'] = 'Metastatic Diagnosis'
metaDiagnosis = metaDiagnosis[~metaDiagnosis['START_DATE'].isnull()]
removeCols = START_DATE+STOP_DATE+AGENT+THERAPY_DRUG_OTHER+THERAPY_RESPONSE+THERAPY_DRUG_DISCONTINUE+THERAPY_DRUG_REASON+THERAPY_COMBO_YN+THERAPY_COMBO_NUM+ADD_TREATMENT + TREATMENT_SETTING
lengths = set([
len(START_DATE),
len(STOP_DATE),
len(TREATMENT_TYPE),
len(SUBTYPE),
len(AGENT),
len(THERAPY_RESPONSE),
len(THERAPY_DRUG_OTHER),
len(TREATMENT_SETTING),
len(ADD_TREATMENT),
len(THERAPY_DRUG_DISCONTINUE),
len(THERAPY_DRUG_REASON),
len(THERAPY_COMBO_YN),
len(THERAPY_COMBO_NUM),
len(EVENT_TYPE)])
assert len(lengths) == 1,"Lengths must all be the same"
total = pd.DataFrame()
for i in range(len(redCapExportDf)):
timelineDF = pd.DataFrame()
timelineDF['PATIENT_ID'] = [redCapExportDf['record_id_patient_id'][i]]*len(START_DATE)
if not pd.isnull(redCapExportDf.iloc[i]['date_first_met_int']):
timelineDF['START_DATE'] = [start if pd.isnull(start) else int(start) - int(redCapExportDf.iloc[i]['date_first_met_int']) for start in redCapExportDf.iloc[i][START_DATE].reset_index(drop=True)]
timelineDF['STOP_DATE'] = [end if pd.isnull(end) else int(end) - int(redCapExportDf.iloc[i]['date_first_met_int']) for end in redCapExportDf.iloc[i][STOP_DATE].reset_index(drop=True)]
else:
timelineDF['START_DATE'] = pd.np.nan
timelineDF['STOP_DATE'] = pd.np.nan
timelineDF['EVENT_TYPE'] = EVENT_TYPE
timelineDF['TREATMENT_TYPE'] = TREATMENT_TYPE
timelineDF['SUBTYPE'] = SUBTYPE
timelineDF['AGENT'] = redCapExportDf.iloc[i][AGENT].reset_index(drop=True)
timelineDF['THERAPY_DRUG_OTHER'] = redCapExportDf.iloc[i][THERAPY_DRUG_OTHER].reset_index(drop=True)
timelineDF['THERAPY_DRUG_DISCONTINUE'] = redCapExportDf.iloc[i][THERAPY_DRUG_DISCONTINUE].reset_index(drop=True)
timelineDF['THERAPY_DRUG_REASON'] = redCapExportDf.iloc[i][THERAPY_DRUG_REASON].reset_index(drop=True)
timelineDF['THERAPY_COMBO_YN'] = redCapExportDf.iloc[i][THERAPY_COMBO_YN].reset_index(drop=True)
timelineDF['THERAPY_COMBO_NUM'] = redCapExportDf.iloc[i][THERAPY_COMBO_NUM].reset_index(drop=True)
total = total.append(timelineDF)
total['STATUS'] = ''
ordering = total.columns
total = total.append(metaDiagnosis)
total = total[ordering]
return(total, removeCols)
def getSpecimen(self, getTimelineSpecimen):
specimen = pd.DataFrame()
specimen['PATIENT_ID'] = getTimelineSpecimen['PATIENT_ID']
getTimelineSpecimen = getTimelineSpecimen[~getTimelineSpecimen.SEQUENCE_REPORT_DATE_INT_STATIC.isnull()]
getTimelineSpecimen = getTimelineSpecimen[~getTimelineSpecimen.METS_DISEASE_DATE_INT.isnull()]
specimen['START_DATE'] = getTimelineSpecimen.SEQUENCE_REPORT_DATE_INT_STATIC.astype(int) - getTimelineSpecimen.METS_DISEASE_DATE_INT.astype(int)
specimen['EVENT_TYPE'] = 'SPECIMEN'
specimen['SAMPLE_ID'] = getTimelineSpecimen['SAMPLE_ID']
specimen['SAMPLE_NOTES'] = getTimelineSpecimen.SEQUENCE_REPORT_DATE_INT_STATIC
specimen = specimen[~specimen['START_DATE'].isnull()]
return(specimen)
class Fgfr4(new_redcap_export_mapping.SponsoredProjectRunner):
_DATA_ELEMENT_SYN_ID = "syn12032922"
_SPONSORED_PROJECT = 'FGFR4'
_CASE_LIST_PATH = os.path.join(_SPONSORED_PROJECT, 'case_lists')
_NUM_COUNTS = 4
_REDCAP_TO_CBIOMAPPING_SYNID = "syn15572052"
_UNLABELLED_SYN_ID = "syn15341849"
_LABELLED_SYN_ID = "syn15341838"
_SP_REDCAP_EXPORTS_SYNID = "syn11812526"
_SP_SYN_ID = "syn14721789"
_CASE_LIST_MAF_SAMPLES_TEMPLATE = (
"cancer_study_identifier: genie_fgfr4\n"
"stable_id: genie_fgfr4_sequenced\n"
"case_list_category: all_cases_with_mutation_data\n"
"case_list_name: Sequenced Tumors\n"
"case_list_description: All sequenced samples "
"(%s samples)\ncase_list_ids: %s")
_CASE_LIST_SYN_ID = "syn14721794"
# Must add new date fields to the DATE variable along with add
# to the mapping table: syn8220815
# '''
def makeTimeLineDf(
self, treatmentDf, finalPatientDf, therapyRange=5):
START_DATE = []
STOP_DATE = []
TREATMENT_TYPE = []
SUBTYPE = []
AGENT = []
THERAPY_RESPONSE = []
THERAPY_DRUG_OTHER = []
THERAPY_DRUG_DISCONTINUE = []
THERAPY_DRUG_REASON = []
TREATMENT_SETTING = []
RXNORM_ID = []
THERAPY_DRUG_START_ESTIMATED = []
THERAPY_DRUG_OTHER_NAME = []
THERAPY_DRUG_END_ESTIMATED = []
for therapyNumber in range(1, therapyRange):
therapyCols = [
i for i in treatmentDf
if "therapy_drug%d" % therapyNumber in i]
startCols = [i for i in therapyCols if "start_int" in i]
START_DATE.extend(startCols)
STOP_DATE.extend([i for i in therapyCols if "end_int" in i])
AGENT.extend([
i for i in therapyCols if "name" in i and "other" not in i])
RXNORM_ID.extend([
i for i in therapyCols
if i == "therapy_drug%d" % therapyNumber])
THERAPY_DRUG_OTHER.extend([
i for i in therapyCols if "other" in i and 'name' not in i])
THERAPY_DRUG_DISCONTINUE.extend([
i for i in therapyCols if "discon" in i])
THERAPY_DRUG_REASON.extend([
i for i in therapyCols if "reason" in i])
THERAPY_DRUG_OTHER_NAME.extend([
i for i in therapyCols if "other_name" in i])
THERAPY_DRUG_START_ESTIMATED.extend([
i for i in therapyCols if "start_estimated" in i])
THERAPY_DRUG_END_ESTIMATED.extend([
i for i in therapyCols if "end_estimated" in i])
TREATMENT_TYPE.extend([
"Medical Therapy %d" % therapyNumber] * len(startCols))
SUBTYPE = ["Chemo/Target/Immuno etc."] * len(AGENT)
TREATMENT_SETTING = ['treatment_setting'] * len(AGENT)
THERAPY_RESPONSE = ['therapy_response'] * len(AGENT)
EVENT_TYPE = ["TREATMENT"]*len(AGENT)
LINE_START = ['line_start_int'] * len(AGENT)
REGIMEN_NAME = ['regimen_name'] * len(AGENT)
CLINICAL_TRIAL = ['clinical_trial'] * len(AGENT)
CENTER = ['redcap_data_access_group'] * len(AGENT)
lengths = [
len(START_DATE),
len(STOP_DATE),
len(TREATMENT_TYPE),
len(AGENT),
len(THERAPY_DRUG_OTHER),
len(THERAPY_DRUG_DISCONTINUE),
len(THERAPY_DRUG_REASON),
len(RXNORM_ID),
len(THERAPY_DRUG_OTHER_NAME),
len(THERAPY_DRUG_START_ESTIMATED),
len(THERAPY_DRUG_END_ESTIMATED),
len(TREATMENT_TYPE)]
assert len(set(lengths)) == 1, "Lengths must all be the same"
total = pd.DataFrame()
for i in range(len(treatmentDf)):
timelineDF = pd.DataFrame()
timelineDF['PATIENT_ID'] = \
[treatmentDf['patient_id'].iloc[i]]*len(START_DATE)
timelineDF['START_DATE'] = \
treatmentDf.iloc[i][START_DATE].reset_index(drop=True)
timelineDF['STOP_DATE'] = \
treatmentDf.iloc[i][STOP_DATE].reset_index(drop=True)
timelineDF['EVENT_TYPE'] = EVENT_TYPE
timelineDF['TREATMENT_TYPE'] = TREATMENT_TYPE
timelineDF['SUBTYPE'] = SUBTYPE
timelineDF['AGENT'] = \
treatmentDf.iloc[i][AGENT].reset_index(drop=True)
timelineDF['RXNORM_ID'] = \
treatmentDf.iloc[i][RXNORM_ID].reset_index(drop=True)
timelineDF['THERAPY_DRUG_OTHER'] = \
treatmentDf.iloc[i][THERAPY_DRUG_OTHER].reset_index(drop=True)
timelineDF['THERAPY_DRUG_DISCONTINUE'] = treatmentDf.iloc[i][
THERAPY_DRUG_DISCONTINUE].reset_index(drop=True)
timelineDF['THERAPY_DRUG_REASON'] = \
treatmentDf.iloc[i][THERAPY_DRUG_REASON].reset_index(drop=True)
timelineDF['THERAPY_DRUG_OTHER_NAME'] = treatmentDf.iloc[i][
THERAPY_DRUG_OTHER_NAME].reset_index(drop=True)
timelineDF['THERAPY_DRUG_START_ESTIMATED'] = treatmentDf.iloc[i][
THERAPY_DRUG_START_ESTIMATED].reset_index(drop=True)
timelineDF['THERAPY_DRUG_END_ESTIMATED'] = treatmentDf.iloc[i][
THERAPY_DRUG_END_ESTIMATED].reset_index(drop=True)
timelineDF['TREATMENT_SETTING'] = \
treatmentDf.iloc[i][TREATMENT_SETTING].reset_index(drop=True)
timelineDF['THERAPY_RESPONSE'] = \
treatmentDf.iloc[i][THERAPY_RESPONSE].reset_index(drop=True)
timelineDF['LINE_START'] = \
treatmentDf.iloc[i][LINE_START].reset_index(drop=True)
timelineDF['REGIMEN_NAME'] = \
treatmentDf.iloc[i][REGIMEN_NAME].reset_index(drop=True)
timelineDF['CLINICAL_TRIAL'] = \
treatmentDf.iloc[i][CLINICAL_TRIAL].reset_index(drop=True)
timelineDF['CENTER'] = \
treatmentDf.iloc[i][CENTER].reset_index(drop=True)
total = total.append(timelineDF, sort=False)
total = total[~total['START_DATE'].isnull()]
total['SP'] = self._SPONSORED_PROJECT
total['STATUS'] = ''
total['START_DATE'] = total['START_DATE'].astype('float')
total['STOP_DATE'] = total['STOP_DATE'].astype('float')
total['RXNORM_ID'] = total['RXNORM_ID'].astype('float')
total['LINE_START'] = total['LINE_START'].astype('float')
total.drop_duplicates(inplace=True)
date_met_int = [
float(finalPatientDf['MET_DX_DATE_INT'][
finalPatientDf['PATIENT_ID'] == patient].values[0])
for patient in total['PATIENT_ID']]
total['START_DATE'] = total['START_DATE'] - date_met_int
total['STOP_DATE'] = total['STOP_DATE'] - date_met_int
total['LINE_START'] = total['LINE_START'] - date_met_int
return(total)
def createSpecimenDf(self, sampleDf, patientDf):
clinicalDf = sampleDf.merge(patientDf, on="PATIENT_ID", how="outer")
clinicalDf = clinicalDf[~clinicalDf.AGE_AT_SEQ_REPORT.isnull()]
clinicalDf = \
clinicalDf[~clinicalDf.DATE_FIRST_DISTANT_MET_INT.isnull()]
specimen = pd.DataFrame()
specimen['PATIENT_ID'] = clinicalDf['PATIENT_ID']
specimen['SAMPLE_ID'] = clinicalDf['SAMPLE_ID']
specimen['START_DATE'] = \
clinicalDf.AGE_AT_SEQ_REPORT.astype(int) - \
clinicalDf.DATE_FIRST_DISTANT_MET_INT.astype(int)
specimen['EVENT_TYPE'] = 'SPECIMEN'
specimen['SAMPLE_NOTES'] = clinicalDf.AGE_AT_SEQ_REPORT
specimen = specimen[~specimen['START_DATE'].isnull()]
return(specimen)
| true | true |
f733f64503456306ed934e0680bde4bebc9a1936 | 6,334 | py | Python | Steg_Tool/steg.py | Affanmir/Awesome-Python-Scripts | bba0512e1c580d605205744ece878da13f2c7661 | [
"MIT"
] | 1,026 | 2018-10-02T18:51:12.000Z | 2022-03-31T13:45:14.000Z | Steg_Tool/steg.py | Affanmir/Awesome-Python-Scripts | bba0512e1c580d605205744ece878da13f2c7661 | [
"MIT"
] | 164 | 2018-10-02T18:37:40.000Z | 2021-11-18T13:29:54.000Z | Steg_Tool/steg.py | Affanmir/Awesome-Python-Scripts | bba0512e1c580d605205744ece878da13f2c7661 | [
"MIT"
] | 521 | 2018-10-02T18:15:40.000Z | 2022-03-26T12:10:15.000Z | #Image Stego using LSB
import cv2
def encode(input_image_name, output_image_name, file_name):
input_image = cv2.imread(input_image_name)
height, width, nbchannels = input_image.shape
size = width*height
current_width = 0
current_height = 0
current_channel = 0
maskonevalues = [1, 2, 4, 8, 16, 32, 64, 128]
maskone = maskonevalues.pop(0)
maskzerovalues = [254, 253, 251, 247, 239, 223, 191, 127]
maskzero = maskzerovalues.pop(0)
data = open(file_name, "rb").read()
length = len(data)
if(width*height*nbchannels < length + 64):
raise Exception("Not enough space to hold all steganographic data")
binary_value = bin(length)[2:]
if(len(binary_value) > 64):
raise Exception("Binary Value larger than expected")
else:
while(len(binary_value) < 64):
binary_value = "0" + binary_value
for c in binary_value:
value = list(input_image[current_height, current_width])
if(int(c) == 1):
value[current_channel] = int(value[current_channel]) | maskone
else:
value[current_channel] = int(value[current_channel]) & maskzero
input_image[current_height, current_width] = tuple(value)
if(current_channel == nbchannels-1):
current_channel = 0
if(current_width == width-1):
current_width = 0
if(current_height == height-1):
current_height = 0
if maskone == 128:
raise Exception("No more space available in image")
else:
maskone = maskonevalues.pop(0)
maskzero = maskzerovalues.pop(0)
else:
current_height += 1
else:
current_width += 1
else:
current_channel += 1
for byte in data:
if(isinstance(byte, int)):
pass
else:
byte = ord(byte)
binv = bin(byte)[2:]
if(len(binv) > 8):
raise Exception("Binary Value larger than expected")
else:
while(len(binv) < 8):
binv = "0" + binv
for c in binv:
val = list(input_image[current_height, current_width])
if(int(c) == 1):
val[current_channel] = int(val[current_channel]) | maskone
else:
val[current_channel] = int(val[current_channel]) & maskzero
input_image[current_height, current_width] = tuple(val)
if(current_channel == nbchannels-1):
current_channel = 0
if(current_width == width-1):
current_width = 0
if(current_height == height-1):
current_height = 0
if maskone == 128:
raise Exception("No more space available in image")
else:
maskone = maskonevalues.pop(0)
maskzero = maskzerovalues.pop(0)
else:
current_height += 1
else:
current_width += 1
else:
current_channel += 1
cv2.imwrite(output_image_name, input_image)
def decode(encoded_image_name, extracted_file_name):
encoded_image = cv2.imread(encoded_image_name)
height, width, nbchannels = encoded_image.shape
size = width*height
current_width = 0
current_height = 0
current_channel = 0
maskonevalues = [1, 2, 4, 8, 16, 32, 64, 128]
maskone = maskonevalues.pop(0)
maskzerovalues = [254, 253, 251, 247, 239, 223, 191, 127]
maskzero = maskzerovalues.pop(0)
bits = ""
for i in range(64):
value = encoded_image[current_height, current_width][current_channel]
value = int(value) & maskone
if(current_channel == nbchannels-1):
current_channel = 0
if(current_width == width-1):
current_width = 0
if(current_height == height-1):
current_height = 0
if(maskone == 128):
raise Exception("No more space available in image")
else:
maskone = maskonevalues.pop(0)
maskzero = maskzerovalues.pop(0)
else:
current_height += 1
else:
current_width += 1
else:
current_channel += 1
if(value > 0):
bits += "1"
else:
bits += "0"
length = int(bits, 2)
output = b""
for i in range(length):
bits = ""
for i in range(8):
value = encoded_image[current_height, current_width][current_channel]
value = int(value) & maskone
if(current_channel == nbchannels-1):
current_channel = 0
if(current_width == width-1):
current_width = 0
if(current_height == height-1):
current_height = 0
if(maskone == 128):
raise Exception("No more space available in image")
else:
maskone = maskonevalues.pop(0)
maskzero = maskzerovalues.pop(0)
else:
current_height += 1
else:
current_width += 1
else:
current_channel += 1
if(value > 0):
bits += "1"
else:
bits += "0"
output += bytearray([int(bits, 2)])
f = open(extracted_file_name, "wb")
f.write(output)
f.close()
if __name__ == "__main__":
input_string = input()
#encode input_image_name output_image_name file_name
#decode encoded_image_name extracted_file_name
input_list = input_string.split()
if input_list[0] == "encode":
encode(input_list[1], input_list[2], input_list[3])
print(f"{input_list[2]}")
elif input_list[0] == "decode":
decode(input_list[1], input_list[2])
print(f"{input_list[2]}")
else:
print("Invalid Entry")
| 36.194286 | 81 | 0.516577 |
import cv2
def encode(input_image_name, output_image_name, file_name):
input_image = cv2.imread(input_image_name)
height, width, nbchannels = input_image.shape
size = width*height
current_width = 0
current_height = 0
current_channel = 0
maskonevalues = [1, 2, 4, 8, 16, 32, 64, 128]
maskone = maskonevalues.pop(0)
maskzerovalues = [254, 253, 251, 247, 239, 223, 191, 127]
maskzero = maskzerovalues.pop(0)
data = open(file_name, "rb").read()
length = len(data)
if(width*height*nbchannels < length + 64):
raise Exception("Not enough space to hold all steganographic data")
binary_value = bin(length)[2:]
if(len(binary_value) > 64):
raise Exception("Binary Value larger than expected")
else:
while(len(binary_value) < 64):
binary_value = "0" + binary_value
for c in binary_value:
value = list(input_image[current_height, current_width])
if(int(c) == 1):
value[current_channel] = int(value[current_channel]) | maskone
else:
value[current_channel] = int(value[current_channel]) & maskzero
input_image[current_height, current_width] = tuple(value)
if(current_channel == nbchannels-1):
current_channel = 0
if(current_width == width-1):
current_width = 0
if(current_height == height-1):
current_height = 0
if maskone == 128:
raise Exception("No more space available in image")
else:
maskone = maskonevalues.pop(0)
maskzero = maskzerovalues.pop(0)
else:
current_height += 1
else:
current_width += 1
else:
current_channel += 1
for byte in data:
if(isinstance(byte, int)):
pass
else:
byte = ord(byte)
binv = bin(byte)[2:]
if(len(binv) > 8):
raise Exception("Binary Value larger than expected")
else:
while(len(binv) < 8):
binv = "0" + binv
for c in binv:
val = list(input_image[current_height, current_width])
if(int(c) == 1):
val[current_channel] = int(val[current_channel]) | maskone
else:
val[current_channel] = int(val[current_channel]) & maskzero
input_image[current_height, current_width] = tuple(val)
if(current_channel == nbchannels-1):
current_channel = 0
if(current_width == width-1):
current_width = 0
if(current_height == height-1):
current_height = 0
if maskone == 128:
raise Exception("No more space available in image")
else:
maskone = maskonevalues.pop(0)
maskzero = maskzerovalues.pop(0)
else:
current_height += 1
else:
current_width += 1
else:
current_channel += 1
cv2.imwrite(output_image_name, input_image)
def decode(encoded_image_name, extracted_file_name):
encoded_image = cv2.imread(encoded_image_name)
height, width, nbchannels = encoded_image.shape
size = width*height
current_width = 0
current_height = 0
current_channel = 0
maskonevalues = [1, 2, 4, 8, 16, 32, 64, 128]
maskone = maskonevalues.pop(0)
maskzerovalues = [254, 253, 251, 247, 239, 223, 191, 127]
maskzero = maskzerovalues.pop(0)
bits = ""
for i in range(64):
value = encoded_image[current_height, current_width][current_channel]
value = int(value) & maskone
if(current_channel == nbchannels-1):
current_channel = 0
if(current_width == width-1):
current_width = 0
if(current_height == height-1):
current_height = 0
if(maskone == 128):
raise Exception("No more space available in image")
else:
maskone = maskonevalues.pop(0)
maskzero = maskzerovalues.pop(0)
else:
current_height += 1
else:
current_width += 1
else:
current_channel += 1
if(value > 0):
bits += "1"
else:
bits += "0"
length = int(bits, 2)
output = b""
for i in range(length):
bits = ""
for i in range(8):
value = encoded_image[current_height, current_width][current_channel]
value = int(value) & maskone
if(current_channel == nbchannels-1):
current_channel = 0
if(current_width == width-1):
current_width = 0
if(current_height == height-1):
current_height = 0
if(maskone == 128):
raise Exception("No more space available in image")
else:
maskone = maskonevalues.pop(0)
maskzero = maskzerovalues.pop(0)
else:
current_height += 1
else:
current_width += 1
else:
current_channel += 1
if(value > 0):
bits += "1"
else:
bits += "0"
output += bytearray([int(bits, 2)])
f = open(extracted_file_name, "wb")
f.write(output)
f.close()
if __name__ == "__main__":
input_string = input()
input_list = input_string.split()
if input_list[0] == "encode":
encode(input_list[1], input_list[2], input_list[3])
print(f"{input_list[2]}")
elif input_list[0] == "decode":
decode(input_list[1], input_list[2])
print(f"{input_list[2]}")
else:
print("Invalid Entry")
| true | true |
f733f7bdc6ccb83dcd5f5ed040f02a8015dd3bc6 | 3,400 | py | Python | src/main/python/foil/unification.py | stefano-bragaglia/PyFoil | eb558659f2d0e1298ebd7b854c3ecdeb1a007bda | [
"BSD-2-Clause"
] | null | null | null | src/main/python/foil/unification.py | stefano-bragaglia/PyFoil | eb558659f2d0e1298ebd7b854c3ecdeb1a007bda | [
"BSD-2-Clause"
] | null | null | null | src/main/python/foil/unification.py | stefano-bragaglia/PyFoil | eb558659f2d0e1298ebd7b854c3ecdeb1a007bda | [
"BSD-2-Clause"
] | null | null | null | import re
from collections import namedtuple
from typing import Dict
from typing import List
from typing import Optional
from typing import Union
Value = Union[bool, float, int, str]
Variable = str
Term = Union[Value, Variable]
Substitution = Dict[Variable, Term]
Step = namedtuple('Step', ['index', 'literal', 'substitution'])
Derivation = List[Step]
def is_ground(term: Term) -> bool:
return term is not None and not is_variable(term)
def is_variable(term: Term) -> bool:
return isinstance(term, str) and bool(re.match(r'[_A-Z][_a-zA-Z0-9]*', term))
def normalize(term: Term) -> str:
if isinstance(term, bool) or isinstance(term, float) or isinstance(term, int):
return str(term)
if isinstance(term, str) and re.match(r'[_a-zA-Z][_a-zA-Z0-9]*', term):
return str(term)
if isinstance(term, str) and any(term.startswith(ch) and term.endswith(ch) for ch in ['"', "'"]):
return str(term)
return repr(term)
def unify(var: Variable, term: Term, subst: Substitution) -> Optional[Substitution]:
if var == term:
return subst
if is_variable(term):
var, term = term, var
if not is_variable(var):
return None
if is_variable(term):
return equate(var, term, subst)
return assign(var, term, subst)
def assign(var: Variable, value: Value, subst: Substitution) -> Optional[Substitution]:
if var not in subst:
return {var: value, **subst}
term = subst[var]
if is_variable(term):
return {k: value if v == term else term for k, v in subst.items()}
return subst if term == value else None
def equate(var1: Variable, var2: Variable, subst: Substitution) -> Optional[Substitution]:
term1, term2 = subst.get(var1), subst.get(var2)
if is_ground(term1) and is_ground(term2):
return subst if term1 == term2 else None
mentions = set([var1, var2] + [k for k, v in subst.items() for t in (term1, term2)
if t and is_variable(t) and v == t])
if is_ground(term1) and not is_ground(term2):
label = term1
elif is_ground(term2) and not is_ground(term1):
label = term2
else:
label = ''.join(sorted(mentions))
return {var1: label, var2: label, **{k: label if k in mentions else v for k, v in subst.items()}}
def simplify(subst: Substitution) -> Optional[Substitution]:
result = {}
for var, term in subst.items():
if var not in result:
if is_variable(term):
vv = sorted({k for k, v in subst.items() if v == term})
for v in vv:
if v != vv[0]:
result[v] = vv[0]
else:
result[var] = term
return result
# @Tabling
def resolve(program: 'Program', query: 'Literal') -> Optional[Derivation]:
for i, clause in enumerate(program.clauses):
substitution = clause.head.unify(query)
if substitution is None:
continue
derivation = [Step(i, query, substitution)]
if not clause.body:
return derivation
for query in clause.body:
substituted = query.substitute(substitution)
sub_goal = resolve(program, substituted)
if not sub_goal:
return None
derivation = [*derivation, *sub_goal]
return derivation
return None
| 28.571429 | 101 | 0.612941 | import re
from collections import namedtuple
from typing import Dict
from typing import List
from typing import Optional
from typing import Union
Value = Union[bool, float, int, str]
Variable = str
Term = Union[Value, Variable]
Substitution = Dict[Variable, Term]
Step = namedtuple('Step', ['index', 'literal', 'substitution'])
Derivation = List[Step]
def is_ground(term: Term) -> bool:
return term is not None and not is_variable(term)
def is_variable(term: Term) -> bool:
return isinstance(term, str) and bool(re.match(r'[_A-Z][_a-zA-Z0-9]*', term))
def normalize(term: Term) -> str:
if isinstance(term, bool) or isinstance(term, float) or isinstance(term, int):
return str(term)
if isinstance(term, str) and re.match(r'[_a-zA-Z][_a-zA-Z0-9]*', term):
return str(term)
if isinstance(term, str) and any(term.startswith(ch) and term.endswith(ch) for ch in ['"', "'"]):
return str(term)
return repr(term)
def unify(var: Variable, term: Term, subst: Substitution) -> Optional[Substitution]:
if var == term:
return subst
if is_variable(term):
var, term = term, var
if not is_variable(var):
return None
if is_variable(term):
return equate(var, term, subst)
return assign(var, term, subst)
def assign(var: Variable, value: Value, subst: Substitution) -> Optional[Substitution]:
if var not in subst:
return {var: value, **subst}
term = subst[var]
if is_variable(term):
return {k: value if v == term else term for k, v in subst.items()}
return subst if term == value else None
def equate(var1: Variable, var2: Variable, subst: Substitution) -> Optional[Substitution]:
term1, term2 = subst.get(var1), subst.get(var2)
if is_ground(term1) and is_ground(term2):
return subst if term1 == term2 else None
mentions = set([var1, var2] + [k for k, v in subst.items() for t in (term1, term2)
if t and is_variable(t) and v == t])
if is_ground(term1) and not is_ground(term2):
label = term1
elif is_ground(term2) and not is_ground(term1):
label = term2
else:
label = ''.join(sorted(mentions))
return {var1: label, var2: label, **{k: label if k in mentions else v for k, v in subst.items()}}
def simplify(subst: Substitution) -> Optional[Substitution]:
result = {}
for var, term in subst.items():
if var not in result:
if is_variable(term):
vv = sorted({k for k, v in subst.items() if v == term})
for v in vv:
if v != vv[0]:
result[v] = vv[0]
else:
result[var] = term
return result
# @Tabling
def resolve(program: 'Program', query: 'Literal') -> Optional[Derivation]:
for i, clause in enumerate(program.clauses):
substitution = clause.head.unify(query)
if substitution is None:
continue
derivation = [Step(i, query, substitution)]
if not clause.body:
return derivation
for query in clause.body:
substituted = query.substitute(substitution)
sub_goal = resolve(program, substituted)
if not sub_goal:
return None
derivation = [*derivation, *sub_goal]
return derivation
return None
| true | true |
f733fa1ac2c27ec458218e283208cab0e78418f3 | 133 | py | Python | discord_karaoke/src/decorators/__init__.py | artemetr/discord-karaoke-bot | cd77739c31dde7a39399f946d954896075371aee | [
"MIT"
] | null | null | null | discord_karaoke/src/decorators/__init__.py | artemetr/discord-karaoke-bot | cd77739c31dde7a39399f946d954896075371aee | [
"MIT"
] | 3 | 2022-03-23T22:14:43.000Z | 2022-03-25T19:06:53.000Z | discord_karaoke/src/decorators/__init__.py | artemetr/discord-karaoke-bot | cd77739c31dde7a39399f946d954896075371aee | [
"MIT"
] | null | null | null | from .allowed_channels import allowed_channels
from .allowed_guilds import allowed_guilds
from .direct_message import direct_message
| 33.25 | 46 | 0.887218 | from .allowed_channels import allowed_channels
from .allowed_guilds import allowed_guilds
from .direct_message import direct_message
| true | true |
f733fd5a8e7d851d3b7e442b98201f652fb344be | 85,612 | py | Python | openshift/installer/vendored/openshift-ansible-git-2016-04-27/roles/openshift_facts/library/openshift_facts.py | fahlmant/openshift-tools | dbb4f16ccde3404c36c23108c45ca7b67138ee12 | [
"Apache-2.0"
] | null | null | null | openshift/installer/vendored/openshift-ansible-git-2016-04-27/roles/openshift_facts/library/openshift_facts.py | fahlmant/openshift-tools | dbb4f16ccde3404c36c23108c45ca7b67138ee12 | [
"Apache-2.0"
] | 3 | 2016-12-01T23:01:36.000Z | 2016-12-02T00:16:48.000Z | openshift/installer/vendored/openshift-ansible-git-2016-04-27/roles/openshift_facts/library/openshift_facts.py | fahlmant/openshift-tools | dbb4f16ccde3404c36c23108c45ca7b67138ee12 | [
"Apache-2.0"
] | 2 | 2018-10-16T05:11:13.000Z | 2018-11-07T01:46:29.000Z | #!/usr/bin/python
# pylint: disable=too-many-lines
# -*- coding: utf-8 -*-
# vim: expandtab:tabstop=4:shiftwidth=4
# Reason: Disable pylint too-many-lines because we don't want to split up this file.
# Status: Permanently disabled to keep this module as self-contained as possible.
"""Ansible module for retrieving and setting openshift related facts"""
DOCUMENTATION = '''
---
module: openshift_facts
short_description: Cluster Facts
author: Jason DeTiberus
requirements: [ ]
'''
EXAMPLES = '''
'''
import ConfigParser
import copy
import os
import StringIO
import yaml
from distutils.util import strtobool
from distutils.version import LooseVersion
import struct
import socket
from dbus import SystemBus, Interface
from dbus.exceptions import DBusException
def migrate_docker_facts(facts):
""" Apply migrations for docker facts """
params = {
'common': (
'additional_registries',
'insecure_registries',
'blocked_registries',
'options'
),
'node': (
'log_driver',
'log_options'
)
}
if 'docker' not in facts:
facts['docker'] = {}
for role in params.keys():
if role in facts:
for param in params[role]:
old_param = 'docker_' + param
if old_param in facts[role]:
facts['docker'][param] = facts[role].pop(old_param)
if 'node' in facts and 'portal_net' in facts['node']:
facts['docker']['hosted_registry_insecure'] = True
facts['docker']['hosted_registry_network'] = facts['node'].pop('portal_net')
# log_options was originally meant to be a comma separated string, but
# we now prefer an actual list, with backward compatability:
if 'log_options' in facts['docker'] and \
isinstance(facts['docker']['log_options'], basestring):
facts['docker']['log_options'] = facts['docker']['log_options'].split(",")
return facts
# TODO: We should add a generic migration function that takes source and destination
# paths and does the right thing rather than one function for common, one for node, etc.
def migrate_common_facts(facts):
""" Migrate facts from various roles into common """
params = {
'node': ('portal_net'),
'master': ('portal_net')
}
if 'common' not in facts:
facts['common'] = {}
for role in params.keys():
if role in facts:
for param in params[role]:
if param in facts[role]:
facts['common'][param] = facts[role].pop(param)
return facts
def migrate_node_facts(facts):
""" Migrate facts from various roles into node """
params = {
'common': ('dns_ip'),
}
if 'node' not in facts:
facts['node'] = {}
for role in params.keys():
if role in facts:
for param in params[role]:
if param in facts[role]:
facts['node'][param] = facts[role].pop(param)
return facts
def migrate_local_facts(facts):
""" Apply migrations of local facts """
migrated_facts = copy.deepcopy(facts)
migrated_facts = migrate_docker_facts(migrated_facts)
migrated_facts = migrate_common_facts(migrated_facts)
migrated_facts = migrate_node_facts(migrated_facts)
migrated_facts = migrate_hosted_facts(migrated_facts)
return migrated_facts
def migrate_hosted_facts(facts):
""" Apply migrations for master facts """
if 'master' in facts:
if 'router_selector' in facts['master']:
if 'hosted' not in facts:
facts['hosted'] = {}
if 'router' not in facts['hosted']:
facts['hosted']['router'] = {}
facts['hosted']['router']['selector'] = facts['master'].pop('router_selector')
return facts
def first_ip(network):
""" Return the first IPv4 address in network
Args:
network (str): network in CIDR format
Returns:
str: first IPv4 address
"""
atoi = lambda addr: struct.unpack("!I", socket.inet_aton(addr))[0]
itoa = lambda addr: socket.inet_ntoa(struct.pack("!I", addr))
(address, netmask) = network.split('/')
netmask_i = (0xffffffff << (32 - atoi(netmask))) & 0xffffffff
return itoa((atoi(address) & netmask_i) + 1)
def hostname_valid(hostname):
""" Test if specified hostname should be considered valid
Args:
hostname (str): hostname to test
Returns:
bool: True if valid, otherwise False
"""
if (not hostname or
hostname.startswith('localhost') or
hostname.endswith('localdomain') or
len(hostname.split('.')) < 2):
return False
return True
def choose_hostname(hostnames=None, fallback=''):
""" Choose a hostname from the provided hostnames
Given a list of hostnames and a fallback value, choose a hostname to
use. This function will prefer fqdns if they exist (excluding any that
begin with localhost or end with localdomain) over ip addresses.
Args:
hostnames (list): list of hostnames
fallback (str): default value to set if hostnames does not contain
a valid hostname
Returns:
str: chosen hostname
"""
hostname = fallback
if hostnames is None:
return hostname
ip_regex = r'\A\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}\Z'
ips = [i for i in hostnames
if (i is not None and isinstance(i, basestring)
and re.match(ip_regex, i))]
hosts = [i for i in hostnames
if i is not None and i != '' and i not in ips]
for host_list in (hosts, ips):
for host in host_list:
if hostname_valid(host):
return host
return hostname
def query_metadata(metadata_url, headers=None, expect_json=False):
""" Return metadata from the provided metadata_url
Args:
metadata_url (str): metadata url
headers (dict): headers to set for metadata request
expect_json (bool): does the metadata_url return json
Returns:
dict or list: metadata request result
"""
result, info = fetch_url(module, metadata_url, headers=headers)
if info['status'] != 200:
raise OpenShiftFactsMetadataUnavailableError("Metadata unavailable")
if expect_json:
return module.from_json(result.read())
else:
return [line.strip() for line in result.readlines()]
def walk_metadata(metadata_url, headers=None, expect_json=False):
""" Walk the metadata tree and return a dictionary of the entire tree
Args:
metadata_url (str): metadata url
headers (dict): headers to set for metadata request
expect_json (bool): does the metadata_url return json
Returns:
dict: the result of walking the metadata tree
"""
metadata = dict()
for line in query_metadata(metadata_url, headers, expect_json):
if line.endswith('/') and not line == 'public-keys/':
key = line[:-1]
metadata[key] = walk_metadata(metadata_url + line,
headers, expect_json)
else:
results = query_metadata(metadata_url + line, headers,
expect_json)
if len(results) == 1:
# disable pylint maybe-no-member because overloaded use of
# the module name causes pylint to not detect that results
# is an array or hash
# pylint: disable=maybe-no-member
metadata[line] = results.pop()
else:
metadata[line] = results
return metadata
def get_provider_metadata(metadata_url, supports_recursive=False,
headers=None, expect_json=False):
""" Retrieve the provider metadata
Args:
metadata_url (str): metadata url
supports_recursive (bool): does the provider metadata api support
recursion
headers (dict): headers to set for metadata request
expect_json (bool): does the metadata_url return json
Returns:
dict: the provider metadata
"""
try:
if supports_recursive:
metadata = query_metadata(metadata_url, headers,
expect_json)
else:
metadata = walk_metadata(metadata_url, headers,
expect_json)
except OpenShiftFactsMetadataUnavailableError:
metadata = None
return metadata
def normalize_gce_facts(metadata, facts):
""" Normalize gce facts
Args:
metadata (dict): provider metadata
facts (dict): facts to update
Returns:
dict: the result of adding the normalized metadata to the provided
facts dict
"""
for interface in metadata['instance']['networkInterfaces']:
int_info = dict(ips=[interface['ip']], network_type='gce')
int_info['public_ips'] = [ac['externalIp'] for ac
in interface['accessConfigs']]
int_info['public_ips'].extend(interface['forwardedIps'])
_, _, network_id = interface['network'].rpartition('/')
int_info['network_id'] = network_id
facts['network']['interfaces'].append(int_info)
_, _, zone = metadata['instance']['zone'].rpartition('/')
facts['zone'] = zone
# GCE currently only supports a single interface
facts['network']['ip'] = facts['network']['interfaces'][0]['ips'][0]
pub_ip = facts['network']['interfaces'][0]['public_ips'][0]
facts['network']['public_ip'] = pub_ip
facts['network']['hostname'] = metadata['instance']['hostname']
# TODO: attempt to resolve public_hostname
facts['network']['public_hostname'] = facts['network']['public_ip']
return facts
def normalize_aws_facts(metadata, facts):
""" Normalize aws facts
Args:
metadata (dict): provider metadata
facts (dict): facts to update
Returns:
dict: the result of adding the normalized metadata to the provided
facts dict
"""
for interface in sorted(
metadata['network']['interfaces']['macs'].values(),
key=lambda x: x['device-number']
):
int_info = dict()
var_map = {'ips': 'local-ipv4s', 'public_ips': 'public-ipv4s'}
for ips_var, int_var in var_map.iteritems():
ips = interface.get(int_var)
if isinstance(ips, basestring):
int_info[ips_var] = [ips]
else:
int_info[ips_var] = ips
if 'vpc-id' in interface:
int_info['network_type'] = 'vpc'
else:
int_info['network_type'] = 'classic'
if int_info['network_type'] == 'vpc':
int_info['network_id'] = interface['subnet-id']
else:
int_info['network_id'] = None
facts['network']['interfaces'].append(int_info)
facts['zone'] = metadata['placement']['availability-zone']
# TODO: actually attempt to determine default local and public ips
# by using the ansible default ip fact and the ipv4-associations
# from the ec2 metadata
facts['network']['ip'] = metadata.get('local-ipv4')
facts['network']['public_ip'] = metadata.get('public-ipv4')
# TODO: verify that local hostname makes sense and is resolvable
facts['network']['hostname'] = metadata.get('local-hostname')
# TODO: verify that public hostname makes sense and is resolvable
facts['network']['public_hostname'] = metadata.get('public-hostname')
return facts
def normalize_openstack_facts(metadata, facts):
""" Normalize openstack facts
Args:
metadata (dict): provider metadata
facts (dict): facts to update
Returns:
dict: the result of adding the normalized metadata to the provided
facts dict
"""
# openstack ec2 compat api does not support network interfaces and
# the version tested on did not include the info in the openstack
# metadata api, should be updated if neutron exposes this.
facts['zone'] = metadata['availability_zone']
local_ipv4 = metadata['ec2_compat']['local-ipv4'].split(',')[0]
facts['network']['ip'] = local_ipv4
facts['network']['public_ip'] = metadata['ec2_compat']['public-ipv4']
# TODO: verify local hostname makes sense and is resolvable
facts['network']['hostname'] = metadata['hostname']
# TODO: verify that public hostname makes sense and is resolvable
pub_h = metadata['ec2_compat']['public-hostname']
facts['network']['public_hostname'] = pub_h
return facts
def normalize_provider_facts(provider, metadata):
""" Normalize provider facts
Args:
provider (str): host provider
metadata (dict): provider metadata
Returns:
dict: the normalized provider facts
"""
if provider is None or metadata is None:
return {}
# TODO: test for ipv6_enabled where possible (gce, aws do not support)
# and configure ipv6 facts if available
# TODO: add support for setting user_data if available
facts = dict(name=provider, metadata=metadata,
network=dict(interfaces=[], ipv6_enabled=False))
if provider == 'gce':
facts = normalize_gce_facts(metadata, facts)
elif provider == 'aws':
facts = normalize_aws_facts(metadata, facts)
elif provider == 'openstack':
facts = normalize_openstack_facts(metadata, facts)
return facts
def set_flannel_facts_if_unset(facts):
""" Set flannel facts if not already present in facts dict
dict: the facts dict updated with the flannel facts if
missing
Args:
facts (dict): existing facts
Returns:
dict: the facts dict updated with the flannel
facts if they were not already present
"""
if 'common' in facts:
if 'use_flannel' not in facts['common']:
use_flannel = False
facts['common']['use_flannel'] = use_flannel
return facts
def set_nuage_facts_if_unset(facts):
""" Set nuage facts if not already present in facts dict
dict: the facts dict updated with the nuage facts if
missing
Args:
facts (dict): existing facts
Returns:
dict: the facts dict updated with the nuage
facts if they were not already present
"""
if 'common' in facts:
if 'use_nuage' not in facts['common']:
use_nuage = False
facts['common']['use_nuage'] = use_nuage
return facts
def set_node_schedulability(facts):
""" Set schedulable facts if not already present in facts dict
Args:
facts (dict): existing facts
Returns:
dict: the facts dict updated with the generated schedulable
facts if they were not already present
"""
if 'node' in facts:
if 'schedulable' not in facts['node']:
if 'master' in facts:
facts['node']['schedulable'] = False
else:
facts['node']['schedulable'] = True
return facts
def set_selectors(facts):
""" Set selectors facts if not already present in facts dict
Args:
facts (dict): existing facts
Returns:
dict: the facts dict updated with the generated selectors
facts if they were not already present
"""
deployment_type = facts['common']['deployment_type']
if deployment_type == 'online':
selector = "type=infra"
else:
selector = "region=infra"
if 'hosted' not in facts:
facts['hosted'] = {}
if 'router' not in facts['hosted']:
facts['hosted']['router'] = {}
if 'selector' not in facts['hosted']['router'] or facts['hosted']['router']['selector'] in [None, 'None']:
facts['hosted']['router']['selector'] = selector
if 'master' in facts:
if 'infra_nodes' in facts['master']:
if 'registry_selector' not in facts['master']:
facts['master']['registry_selector'] = selector
return facts
def set_metrics_facts_if_unset(facts):
""" Set cluster metrics facts if not already present in facts dict
dict: the facts dict updated with the generated cluster metrics facts if
missing
Args:
facts (dict): existing facts
Returns:
dict: the facts dict updated with the generated cluster metrics
facts if they were not already present
"""
if 'common' in facts:
if 'use_cluster_metrics' not in facts['common']:
use_cluster_metrics = False
facts['common']['use_cluster_metrics'] = use_cluster_metrics
return facts
def set_dnsmasq_facts_if_unset(facts):
""" Set dnsmasq facts if not already present in facts
Args:
facts (dict) existing facts
Returns:
facts (dict) updated facts with values set if not previously set
"""
if 'common' in facts:
if 'use_dnsmasq' not in facts['common'] and facts['common']['version_gte_3_2_or_1_2']:
facts['common']['use_dnsmasq'] = True
else:
facts['common']['use_dnsmasq'] = False
if 'master' in facts and 'dns_port' not in facts['master']:
if facts['common']['use_dnsmasq']:
facts['master']['dns_port'] = 8053
else:
facts['master']['dns_port'] = 53
return facts
def set_project_cfg_facts_if_unset(facts):
""" Set Project Configuration facts if not already present in facts dict
dict:
Args:
facts (dict): existing facts
Returns:
dict: the facts dict updated with the generated Project Configuration
facts if they were not already present
"""
config = {
'default_node_selector': '',
'project_request_message': '',
'project_request_template': '',
'mcs_allocator_range': 's0:/2',
'mcs_labels_per_project': 5,
'uid_allocator_range': '1000000000-1999999999/10000'
}
if 'master' in facts:
for key, value in config.items():
if key not in facts['master']:
facts['master'][key] = value
return facts
def set_identity_providers_if_unset(facts):
""" Set identity_providers fact if not already present in facts dict
Args:
facts (dict): existing facts
Returns:
dict: the facts dict updated with the generated identity providers
facts if they were not already present
"""
if 'master' in facts:
deployment_type = facts['common']['deployment_type']
if 'identity_providers' not in facts['master']:
identity_provider = dict(
name='allow_all', challenge=True, login=True,
kind='AllowAllPasswordIdentityProvider'
)
if deployment_type in ['enterprise', 'atomic-enterprise', 'openshift-enterprise']:
identity_provider = dict(
name='deny_all', challenge=True, login=True,
kind='DenyAllPasswordIdentityProvider'
)
facts['master']['identity_providers'] = [identity_provider]
return facts
def set_url_facts_if_unset(facts):
""" Set url facts if not already present in facts dict
Args:
facts (dict): existing facts
Returns:
dict: the facts dict updated with the generated url facts if they
were not already present
"""
if 'master' in facts:
hostname = facts['common']['hostname']
cluster_hostname = facts['master'].get('cluster_hostname')
cluster_public_hostname = facts['master'].get('cluster_public_hostname')
public_hostname = facts['common']['public_hostname']
api_hostname = cluster_hostname if cluster_hostname else hostname
api_public_hostname = cluster_public_hostname if cluster_public_hostname else public_hostname
console_path = facts['master']['console_path']
etcd_hosts = facts['master']['etcd_hosts']
use_ssl = dict(
api=facts['master']['api_use_ssl'],
public_api=facts['master']['api_use_ssl'],
loopback_api=facts['master']['api_use_ssl'],
console=facts['master']['console_use_ssl'],
public_console=facts['master']['console_use_ssl'],
etcd=facts['master']['etcd_use_ssl']
)
ports = dict(
api=facts['master']['api_port'],
public_api=facts['master']['api_port'],
loopback_api=facts['master']['api_port'],
console=facts['master']['console_port'],
public_console=facts['master']['console_port'],
etcd=facts['master']['etcd_port'],
)
etcd_urls = []
if etcd_hosts != '':
facts['master']['etcd_port'] = ports['etcd']
facts['master']['embedded_etcd'] = False
for host in etcd_hosts:
etcd_urls.append(format_url(use_ssl['etcd'], host,
ports['etcd']))
else:
etcd_urls = [format_url(use_ssl['etcd'], hostname,
ports['etcd'])]
facts['master'].setdefault('etcd_urls', etcd_urls)
prefix_hosts = [('api', api_hostname),
('public_api', api_public_hostname),
('loopback_api', hostname)]
for prefix, host in prefix_hosts:
facts['master'].setdefault(prefix + '_url', format_url(use_ssl[prefix],
host,
ports[prefix]))
r_lhn = "{0}:{1}".format(hostname, ports['api']).replace('.', '-')
r_lhu = "system:openshift-master/{0}:{1}".format(api_hostname, ports['api']).replace('.', '-')
facts['master'].setdefault('loopback_cluster_name', r_lhn)
facts['master'].setdefault('loopback_context_name', "default/{0}/system:openshift-master".format(r_lhn))
facts['master'].setdefault('loopback_user', r_lhu)
prefix_hosts = [('console', api_hostname), ('public_console', api_public_hostname)]
for prefix, host in prefix_hosts:
facts['master'].setdefault(prefix + '_url', format_url(use_ssl[prefix],
host,
ports[prefix],
console_path))
return facts
def set_aggregate_facts(facts):
""" Set aggregate facts
Args:
facts (dict): existing facts
Returns:
dict: the facts dict updated with aggregated facts
"""
all_hostnames = set()
internal_hostnames = set()
kube_svc_ip = first_ip(facts['common']['portal_net'])
if 'common' in facts:
all_hostnames.add(facts['common']['hostname'])
all_hostnames.add(facts['common']['public_hostname'])
all_hostnames.add(facts['common']['ip'])
all_hostnames.add(facts['common']['public_ip'])
facts['common']['kube_svc_ip'] = kube_svc_ip
internal_hostnames.add(facts['common']['hostname'])
internal_hostnames.add(facts['common']['ip'])
cluster_domain = facts['common']['dns_domain']
if 'master' in facts:
if 'cluster_hostname' in facts['master']:
all_hostnames.add(facts['master']['cluster_hostname'])
if 'cluster_public_hostname' in facts['master']:
all_hostnames.add(facts['master']['cluster_public_hostname'])
svc_names = ['openshift', 'openshift.default', 'openshift.default.svc',
'openshift.default.svc.' + cluster_domain, 'kubernetes', 'kubernetes.default',
'kubernetes.default.svc', 'kubernetes.default.svc.' + cluster_domain]
all_hostnames.update(svc_names)
internal_hostnames.update(svc_names)
all_hostnames.add(kube_svc_ip)
internal_hostnames.add(kube_svc_ip)
facts['common']['all_hostnames'] = list(all_hostnames)
facts['common']['internal_hostnames'] = list(internal_hostnames)
return facts
def set_etcd_facts_if_unset(facts):
"""
If using embedded etcd, loads the data directory from master-config.yaml.
If using standalone etcd, loads ETCD_DATA_DIR from etcd.conf.
If anything goes wrong parsing these, the fact will not be set.
"""
if 'master' in facts and facts['master']['embedded_etcd']:
etcd_facts = facts['etcd'] if 'etcd' in facts else dict()
if 'etcd_data_dir' not in etcd_facts:
try:
# Parse master config to find actual etcd data dir:
master_cfg_path = os.path.join(facts['common']['config_base'],
'master/master-config.yaml')
master_cfg_f = open(master_cfg_path, 'r')
config = yaml.safe_load(master_cfg_f.read())
master_cfg_f.close()
etcd_facts['etcd_data_dir'] = \
config['etcdConfig']['storageDirectory']
facts['etcd'] = etcd_facts
# We don't want exceptions bubbling up here:
# pylint: disable=broad-except
except Exception:
pass
else:
etcd_facts = facts['etcd'] if 'etcd' in facts else dict()
# Read ETCD_DATA_DIR from /etc/etcd/etcd.conf:
try:
# Add a fake section for parsing:
ini_str = '[root]\n' + open('/etc/etcd/etcd.conf', 'r').read()
ini_fp = StringIO.StringIO(ini_str)
config = ConfigParser.RawConfigParser()
config.readfp(ini_fp)
etcd_data_dir = config.get('root', 'ETCD_DATA_DIR')
if etcd_data_dir.startswith('"') and etcd_data_dir.endswith('"'):
etcd_data_dir = etcd_data_dir[1:-1]
etcd_facts['etcd_data_dir'] = etcd_data_dir
facts['etcd'] = etcd_facts
# We don't want exceptions bubbling up here:
# pylint: disable=broad-except
except Exception:
pass
return facts
def set_deployment_facts_if_unset(facts):
""" Set Facts that vary based on deployment_type. This currently
includes common.service_type, common.config_base, master.registry_url,
node.registry_url, node.storage_plugin_deps
Args:
facts (dict): existing facts
Returns:
dict: the facts dict updated with the generated deployment_type
facts
"""
# disabled to avoid breaking up facts related to deployment type into
# multiple methods for now.
# pylint: disable=too-many-statements, too-many-branches
if 'common' in facts:
deployment_type = facts['common']['deployment_type']
if 'service_type' not in facts['common']:
service_type = 'atomic-openshift'
if deployment_type == 'origin':
service_type = 'origin'
elif deployment_type in ['enterprise']:
service_type = 'openshift'
facts['common']['service_type'] = service_type
if 'config_base' not in facts['common']:
config_base = '/etc/origin'
if deployment_type in ['enterprise']:
config_base = '/etc/openshift'
# Handle upgrade scenarios when symlinks don't yet exist:
if not os.path.exists(config_base) and os.path.exists('/etc/openshift'):
config_base = '/etc/openshift'
facts['common']['config_base'] = config_base
if 'data_dir' not in facts['common']:
data_dir = '/var/lib/origin'
if deployment_type in ['enterprise']:
data_dir = '/var/lib/openshift'
# Handle upgrade scenarios when symlinks don't yet exist:
if not os.path.exists(data_dir) and os.path.exists('/var/lib/openshift'):
data_dir = '/var/lib/openshift'
facts['common']['data_dir'] = data_dir
if 'docker' in facts:
deployment_type = facts['common']['deployment_type']
if deployment_type in ['enterprise', 'atomic-enterprise', 'openshift-enterprise']:
addtl_regs = facts['docker'].get('additional_registries', [])
ent_reg = 'registry.access.redhat.com'
if ent_reg not in addtl_regs:
facts['docker']['additional_registries'] = addtl_regs + [ent_reg]
for role in ('master', 'node'):
if role in facts:
deployment_type = facts['common']['deployment_type']
if 'registry_url' not in facts[role]:
registry_url = 'openshift/origin-${component}:${version}'
if deployment_type in ['enterprise', 'online', 'openshift-enterprise']:
registry_url = 'openshift3/ose-${component}:${version}'
elif deployment_type == 'atomic-enterprise':
registry_url = 'aep3_beta/aep-${component}:${version}'
facts[role]['registry_url'] = registry_url
if 'master' in facts:
deployment_type = facts['common']['deployment_type']
openshift_features = ['Builder', 'S2IBuilder', 'WebConsole']
if 'disabled_features' in facts['master']:
if deployment_type == 'atomic-enterprise':
curr_disabled_features = set(facts['master']['disabled_features'])
facts['master']['disabled_features'] = list(curr_disabled_features.union(openshift_features))
else:
if deployment_type == 'atomic-enterprise':
facts['master']['disabled_features'] = openshift_features
if 'node' in facts:
deployment_type = facts['common']['deployment_type']
if 'storage_plugin_deps' not in facts['node']:
if deployment_type in ['openshift-enterprise', 'atomic-enterprise', 'origin']:
facts['node']['storage_plugin_deps'] = ['ceph', 'glusterfs', 'iscsi']
else:
facts['node']['storage_plugin_deps'] = []
return facts
def set_version_facts_if_unset(facts):
""" Set version facts. This currently includes common.version and
common.version_gte_3_1_or_1_1.
Args:
facts (dict): existing facts
Returns:
dict: the facts dict updated with version facts.
"""
if 'common' in facts:
deployment_type = facts['common']['deployment_type']
version = get_openshift_version(facts)
if version is not None:
facts['common']['version'] = version
if deployment_type == 'origin':
version_gte_3_1_or_1_1 = LooseVersion(version) >= LooseVersion('1.1.0')
version_gte_3_1_1_or_1_1_1 = LooseVersion(version) >= LooseVersion('1.1.1')
version_gte_3_2_or_1_2 = LooseVersion(version) >= LooseVersion('1.2.0')
else:
version_gte_3_1_or_1_1 = LooseVersion(version) >= LooseVersion('3.0.2.905')
version_gte_3_1_1_or_1_1_1 = LooseVersion(version) >= LooseVersion('3.1.1')
version_gte_3_2_or_1_2 = LooseVersion(version) >= LooseVersion('3.1.1.901')
else:
version_gte_3_1_or_1_1 = True
version_gte_3_1_1_or_1_1_1 = True
version_gte_3_2_or_1_2 = True
facts['common']['version_gte_3_1_or_1_1'] = version_gte_3_1_or_1_1
facts['common']['version_gte_3_1_1_or_1_1_1'] = version_gte_3_1_1_or_1_1_1
facts['common']['version_gte_3_2_or_1_2'] = version_gte_3_2_or_1_2
if version_gte_3_2_or_1_2:
examples_content_version = 'v1.2'
elif version_gte_3_1_or_1_1:
examples_content_version = 'v1.1'
else:
examples_content_version = 'v1.0'
facts['common']['examples_content_version'] = examples_content_version
return facts
def set_manageiq_facts_if_unset(facts):
""" Set manageiq facts. This currently includes common.use_manageiq.
Args:
facts (dict): existing facts
Returns:
dict: the facts dict updated with version facts.
Raises:
OpenShiftFactsInternalError:
"""
if 'common' not in facts:
if 'version_gte_3_1_or_1_1' not in facts['common']:
raise OpenShiftFactsInternalError(
"Invalid invocation: The required facts are not set"
)
if 'use_manageiq' not in facts['common']:
facts['common']['use_manageiq'] = facts['common']['version_gte_3_1_or_1_1']
return facts
def set_sdn_facts_if_unset(facts, system_facts):
""" Set sdn facts if not already present in facts dict
Args:
facts (dict): existing facts
system_facts (dict): ansible_facts
Returns:
dict: the facts dict updated with the generated sdn facts if they
were not already present
"""
if 'common' in facts:
use_sdn = facts['common']['use_openshift_sdn']
if not (use_sdn == '' or isinstance(use_sdn, bool)):
use_sdn = safe_get_bool(use_sdn)
facts['common']['use_openshift_sdn'] = use_sdn
if 'sdn_network_plugin_name' not in facts['common']:
plugin = 'redhat/openshift-ovs-subnet' if use_sdn else ''
facts['common']['sdn_network_plugin_name'] = plugin
if 'master' in facts:
if 'sdn_cluster_network_cidr' not in facts['master']:
facts['master']['sdn_cluster_network_cidr'] = '10.1.0.0/16'
if 'sdn_host_subnet_length' not in facts['master']:
facts['master']['sdn_host_subnet_length'] = '8'
if 'node' in facts and 'sdn_mtu' not in facts['node']:
node_ip = facts['common']['ip']
# default MTU if interface MTU cannot be detected
facts['node']['sdn_mtu'] = '1450'
for val in system_facts.itervalues():
if isinstance(val, dict) and 'mtu' in val:
mtu = val['mtu']
if 'ipv4' in val and val['ipv4'].get('address') == node_ip:
facts['node']['sdn_mtu'] = str(mtu - 50)
return facts
def migrate_oauth_template_facts(facts):
"""
Migrate an old oauth template fact to a newer format if it's present.
The legacy 'oauth_template' fact was just a filename, and assumed you were
setting the 'login' template.
The new pluralized 'oauth_templates' fact is a dict mapping the template
name to a filename.
Simplify the code after this by merging the old fact into the new.
"""
if 'master' in facts and 'oauth_template' in facts['master']:
if 'oauth_templates' not in facts['master']:
facts['master']['oauth_templates'] = {"login": facts['master']['oauth_template']}
elif 'login' not in facts['master']['oauth_templates']:
facts['master']['oauth_templates']['login'] = facts['master']['oauth_template']
return facts
def format_url(use_ssl, hostname, port, path=''):
""" Format url based on ssl flag, hostname, port and path
Args:
use_ssl (bool): is ssl enabled
hostname (str): hostname
port (str): port
path (str): url path
Returns:
str: The generated url string
"""
scheme = 'https' if use_ssl else 'http'
netloc = hostname
if (use_ssl and port != '443') or (not use_ssl and port != '80'):
netloc += ":%s" % port
return urlparse.urlunparse((scheme, netloc, path, '', '', ''))
def get_current_config(facts):
""" Get current openshift config
Args:
facts (dict): existing facts
Returns:
dict: the facts dict updated with the current openshift config
"""
current_config = dict()
roles = [role for role in facts if role not in ['common', 'provider']]
for role in roles:
if 'roles' in current_config:
current_config['roles'].append(role)
else:
current_config['roles'] = [role]
# TODO: parse the /etc/sysconfig/openshift-{master,node} config to
# determine the location of files.
# TODO: I suspect this isn't working right now, but it doesn't prevent
# anything from working properly as far as I can tell, perhaps because
# we override the kubeconfig path everywhere we use it?
# Query kubeconfig settings
kubeconfig_dir = '/var/lib/origin/openshift.local.certificates'
if role == 'node':
kubeconfig_dir = os.path.join(
kubeconfig_dir, "node-%s" % facts['common']['hostname']
)
kubeconfig_path = os.path.join(kubeconfig_dir, '.kubeconfig')
if (os.path.isfile('/usr/bin/openshift')
and os.path.isfile(kubeconfig_path)):
try:
_, output, _ = module.run_command(
["/usr/bin/openshift", "ex", "config", "view", "-o",
"json", "--kubeconfig=%s" % kubeconfig_path],
check_rc=False
)
config = json.loads(output)
cad = 'certificate-authority-data'
try:
for cluster in config['clusters']:
config['clusters'][cluster][cad] = 'masked'
except KeyError:
pass
try:
for user in config['users']:
config['users'][user][cad] = 'masked'
config['users'][user]['client-key-data'] = 'masked'
except KeyError:
pass
current_config['kubeconfig'] = config
# override pylint broad-except warning, since we do not want
# to bubble up any exceptions if oc config view
# fails
# pylint: disable=broad-except
except Exception:
pass
return current_config
def build_kubelet_args(facts):
""" Build node kubelet_args """
cloud_cfg_path = os.path.join(facts['common']['config_base'],
'cloudprovider')
if 'node' in facts:
kubelet_args = {}
if 'cloudprovider' in facts:
if 'kind' in facts['cloudprovider']:
if facts['cloudprovider']['kind'] == 'aws':
kubelet_args['cloud-provider'] = ['aws']
kubelet_args['cloud-config'] = [cloud_cfg_path + '/aws.conf']
if facts['cloudprovider']['kind'] == 'openstack':
kubelet_args['cloud-provider'] = ['openstack']
kubelet_args['cloud-config'] = [cloud_cfg_path + '/openstack.conf']
if kubelet_args != {}:
facts = merge_facts({'node': {'kubelet_args': kubelet_args}}, facts, [], [])
return facts
def build_controller_args(facts):
""" Build master controller_args """
cloud_cfg_path = os.path.join(facts['common']['config_base'],
'cloudprovider')
if 'master' in facts:
controller_args = {}
if 'cloudprovider' in facts:
if 'kind' in facts['cloudprovider']:
if facts['cloudprovider']['kind'] == 'aws':
controller_args['cloud-provider'] = ['aws']
controller_args['cloud-config'] = [cloud_cfg_path + '/aws.conf']
if facts['cloudprovider']['kind'] == 'openstack':
controller_args['cloud-provider'] = ['openstack']
controller_args['cloud-config'] = [cloud_cfg_path + '/openstack.conf']
if controller_args != {}:
facts = merge_facts({'master': {'controller_args': controller_args}}, facts, [], [])
return facts
def build_api_server_args(facts):
""" Build master api_server_args """
cloud_cfg_path = os.path.join(facts['common']['config_base'],
'cloudprovider')
if 'master' in facts:
api_server_args = {}
if 'cloudprovider' in facts:
if 'kind' in facts['cloudprovider']:
if facts['cloudprovider']['kind'] == 'aws':
api_server_args['cloud-provider'] = ['aws']
api_server_args['cloud-config'] = [cloud_cfg_path + '/aws.conf']
if facts['cloudprovider']['kind'] == 'openstack':
api_server_args['cloud-provider'] = ['openstack']
api_server_args['cloud-config'] = [cloud_cfg_path + '/openstack.conf']
if api_server_args != {}:
facts = merge_facts({'master': {'api_server_args': api_server_args}}, facts, [], [])
return facts
def is_service_running(service):
""" Queries systemd through dbus to see if the service is running """
service_running = False
bus = SystemBus()
systemd = bus.get_object('org.freedesktop.systemd1', '/org/freedesktop/systemd1')
manager = Interface(systemd, dbus_interface='org.freedesktop.systemd1.Manager')
try:
service_unit = service if service.endswith('.service') else manager.GetUnit('{0}.service'.format(service))
service_proxy = bus.get_object('org.freedesktop.systemd1', str(service_unit))
service_properties = Interface(service_proxy, dbus_interface='org.freedesktop.DBus.Properties')
service_load_state = service_properties.Get('org.freedesktop.systemd1.Unit', 'LoadState')
service_active_state = service_properties.Get('org.freedesktop.systemd1.Unit', 'ActiveState')
if service_load_state == 'loaded' and service_active_state == 'active':
service_running = True
except DBusException:
pass
return service_running
def get_version_output(binary, version_cmd):
""" runs and returns the version output for a command """
cmd = []
for item in (binary, version_cmd):
if isinstance(item, list):
cmd.extend(item)
else:
cmd.append(item)
if os.path.isfile(cmd[0]):
_, output, _ = module.run_command(cmd)
return output
def get_docker_version_info():
""" Parses and returns the docker version info """
result = None
if is_service_running('docker'):
version_info = yaml.safe_load(get_version_output('/usr/bin/docker', 'version'))
if 'Server' in version_info:
result = {
'api_version': version_info['Server']['API version'],
'version': version_info['Server']['Version']
}
return result
def get_openshift_version(facts):
""" Get current version of openshift on the host
Args:
facts (dict): existing facts
optional cli_image for pulling the version number
Returns:
version: the current openshift version
"""
version = None
# No need to run this method repeatedly on a system if we already know the
# version
if 'common' in facts:
if 'version' in facts['common'] and facts['common']['version'] is not None:
return facts['common']['version']
if os.path.isfile('/usr/bin/openshift'):
_, output, _ = module.run_command(['/usr/bin/openshift', 'version'])
version = parse_openshift_version(output)
# openshift_facts runs before openshift_docker_facts. However, it will be
# called again and set properly throughout the playbook run. This could be
# refactored to simply set the openshift.common.version in the
# openshift_docker_facts role but it would take reworking some assumptions
# on how get_openshift_version is called.
if 'is_containerized' in facts['common'] and safe_get_bool(facts['common']['is_containerized']):
if 'docker' in facts and 'openshift_version' in facts['docker']:
version = facts['docker']['openshift_version']
return version
def parse_openshift_version(output):
""" Apply provider facts to supplied facts dict
Args:
string: output of 'openshift version'
Returns:
string: the version number
"""
versions = dict(e.split(' v') for e in output.splitlines() if ' v' in e)
return versions.get('openshift', '')
def apply_provider_facts(facts, provider_facts):
""" Apply provider facts to supplied facts dict
Args:
facts (dict): facts dict to update
provider_facts (dict): provider facts to apply
roles: host roles
Returns:
dict: the merged facts
"""
if not provider_facts:
return facts
common_vars = [('hostname', 'ip'), ('public_hostname', 'public_ip')]
for h_var, ip_var in common_vars:
ip_value = provider_facts['network'].get(ip_var)
if ip_value:
facts['common'][ip_var] = ip_value
facts['common'][h_var] = choose_hostname(
[provider_facts['network'].get(h_var)],
facts['common'][ip_var]
)
facts['provider'] = provider_facts
return facts
# Disabling pylint too many branches. This function needs refactored
# but is a very core part of openshift_facts.
# pylint: disable=too-many-branches
def merge_facts(orig, new, additive_facts_to_overwrite, protected_facts_to_overwrite):
""" Recursively merge facts dicts
Args:
orig (dict): existing facts
new (dict): facts to update
additive_facts_to_overwrite (list): additive facts to overwrite in jinja
'.' notation ex: ['master.named_certificates']
protected_facts_to_overwrite (list): protected facts to overwrite in jinja
'.' notation ex: ['master.master_count']
Returns:
dict: the merged facts
"""
additive_facts = ['named_certificates']
protected_facts = ['ha', 'master_count']
# Facts we do not ever want to merge. These originate in inventory variables
# and contain JSON dicts. We don't ever want to trigger a merge
# here, just completely overwrite with the new if they are present there.
inventory_json_facts = ['admission_plugin_config',
'kube_admission_plugin_config',
'image_policy_config']
facts = dict()
for key, value in orig.iteritems():
# Key exists in both old and new facts.
if key in new:
if key in inventory_json_facts:
# Watchout for JSON facts that sometimes load as strings.
# (can happen if the JSON contains a boolean)
if isinstance(new[key], basestring):
facts[key] = yaml.safe_load(new[key])
else:
facts[key] = copy.deepcopy(new[key])
# Continue to recurse if old and new fact is a dictionary.
elif isinstance(value, dict) and isinstance(new[key], dict):
# Collect the subset of additive facts to overwrite if
# key matches. These will be passed to the subsequent
# merge_facts call.
relevant_additive_facts = []
for item in additive_facts_to_overwrite:
if '.' in item and item.startswith(key + '.'):
relevant_additive_facts.append(item)
# Collect the subset of protected facts to overwrite
# if key matches. These will be passed to the
# subsequent merge_facts call.
relevant_protected_facts = []
for item in protected_facts_to_overwrite:
if '.' in item and item.startswith(key + '.'):
relevant_protected_facts.append(item)
facts[key] = merge_facts(value, new[key], relevant_additive_facts, relevant_protected_facts)
# Key matches an additive fact and we are not overwriting
# it so we will append the new value to the existing value.
elif key in additive_facts and key not in [x.split('.')[-1] for x in additive_facts_to_overwrite]:
if isinstance(value, list) and isinstance(new[key], list):
new_fact = []
for item in copy.deepcopy(value) + copy.deepcopy(new[key]):
if item not in new_fact:
new_fact.append(item)
facts[key] = new_fact
# Key matches a protected fact and we are not overwriting
# it so we will determine if it is okay to change this
# fact.
elif key in protected_facts and key not in [x.split('.')[-1] for x in protected_facts_to_overwrite]:
# The master count (int) can only increase unless it
# has been passed as a protected fact to overwrite.
if key == 'master_count':
if int(value) <= int(new[key]):
facts[key] = copy.deepcopy(new[key])
else:
module.fail_json(msg='openshift_facts received a lower value for openshift.master.master_count')
# ha (bool) can not change unless it has been passed
# as a protected fact to overwrite.
if key == 'ha':
if safe_get_bool(value) != safe_get_bool(new[key]):
module.fail_json(msg='openshift_facts received a different value for openshift.master.ha')
else:
facts[key] = value
# No other condition has been met. Overwrite the old fact
# with the new value.
else:
facts[key] = copy.deepcopy(new[key])
# Key isn't in new so add it to facts to keep it.
else:
facts[key] = copy.deepcopy(value)
new_keys = set(new.keys()) - set(orig.keys())
for key in new_keys:
facts[key] = copy.deepcopy(new[key])
return facts
def save_local_facts(filename, facts):
""" Save local facts
Args:
filename (str): local facts file
facts (dict): facts to set
"""
try:
fact_dir = os.path.dirname(filename)
if not os.path.exists(fact_dir):
os.makedirs(fact_dir)
with open(filename, 'w') as fact_file:
fact_file.write(module.jsonify(facts))
os.chmod(filename, 0o600)
except (IOError, OSError) as ex:
raise OpenShiftFactsFileWriteError(
"Could not create fact file: %s, error: %s" % (filename, ex)
)
def get_local_facts_from_file(filename):
""" Retrieve local facts from fact file
Args:
filename (str): local facts file
Returns:
dict: the retrieved facts
"""
local_facts = dict()
try:
# Handle conversion of INI style facts file to json style
ini_facts = ConfigParser.SafeConfigParser()
ini_facts.read(filename)
for section in ini_facts.sections():
local_facts[section] = dict()
for key, value in ini_facts.items(section):
local_facts[section][key] = value
except (ConfigParser.MissingSectionHeaderError,
ConfigParser.ParsingError):
try:
with open(filename, 'r') as facts_file:
local_facts = json.load(facts_file)
except (ValueError, IOError):
pass
return local_facts
def sort_unique(alist):
""" Sorts and de-dupes a list
Args:
list: a list
Returns:
list: a sorted de-duped list
"""
alist.sort()
out = list()
for i in alist:
if i not in out:
out.append(i)
return out
def safe_get_bool(fact):
""" Get a boolean fact safely.
Args:
facts: fact to convert
Returns:
bool: given fact as a bool
"""
return bool(strtobool(str(fact)))
def set_proxy_facts(facts):
""" Set global proxy facts and promote defaults from http_proxy, https_proxy,
no_proxy to the more specific builddefaults and builddefaults_git vars.
1. http_proxy, https_proxy, no_proxy
2. builddefaults_*
3. builddefaults_git_*
Args:
facts(dict): existing facts
Returns:
facts(dict): Updated facts with missing values
"""
if 'common' in facts:
common = facts['common']
if 'http_proxy' in common or 'https_proxy' in common:
if 'generate_no_proxy_hosts' in common and \
common['generate_no_proxy_hosts']:
if 'no_proxy' in common and \
isinstance(common['no_proxy'], basestring):
common['no_proxy'] = common['no_proxy'].split(",")
else:
common['no_proxy'] = []
if 'no_proxy_internal_hostnames' in common:
common['no_proxy'].extend(common['no_proxy_internal_hostnames'].split(','))
common['no_proxy'].append('.' + common['dns_domain'])
common['no_proxy'].append(common['hostname'])
common['no_proxy'] = sort_unique(common['no_proxy'])
facts['common'] = common
if 'builddefaults' in facts:
facts['master']['admission_plugin_config'] = dict()
builddefaults = facts['builddefaults']
common = facts['common']
if 'http_proxy' not in builddefaults and 'http_proxy' in common:
builddefaults['http_proxy'] = common['http_proxy']
if 'https_proxy' not in builddefaults and 'https_proxy' in common:
builddefaults['https_proxy'] = common['https_proxy']
if 'no_proxy' not in builddefaults and 'no_proxy' in common:
builddefaults['no_proxy'] = common['no_proxy']
if 'git_http_proxy' not in builddefaults and 'http_proxy' in builddefaults:
builddefaults['git_http_proxy'] = builddefaults['http_proxy']
if 'git_https_proxy' not in builddefaults and 'https_proxy' in builddefaults:
builddefaults['git_https_proxy'] = builddefaults['https_proxy']
if 'admission_plugin_config' not in builddefaults:
builddefaults['admission_plugin_config'] = dict()
if 'config' in builddefaults and ('http_proxy' in builddefaults or \
'https_proxy' in builddefaults):
facts['master']['admission_plugin_config'].update(builddefaults['config'])
facts['builddefaults'] = builddefaults
return facts
# pylint: disable=too-many-statements
def set_container_facts_if_unset(facts):
""" Set containerized facts.
Args:
facts (dict): existing facts
Returns:
dict: the facts dict updated with the generated containerization
facts
"""
deployment_type = facts['common']['deployment_type']
if deployment_type in ['enterprise', 'openshift-enterprise']:
master_image = 'openshift3/ose'
cli_image = master_image
node_image = 'openshift3/node'
ovs_image = 'openshift3/openvswitch'
etcd_image = 'registry.access.redhat.com/rhel7/etcd'
pod_image = 'openshift3/ose-pod'
router_image = 'openshift3/ose-haproxy-router'
registry_image = 'openshift3/ose-docker-registry'
deployer_image = 'openshift3/ose-deployer'
elif deployment_type == 'atomic-enterprise':
master_image = 'aep3_beta/aep'
cli_image = master_image
node_image = 'aep3_beta/node'
ovs_image = 'aep3_beta/openvswitch'
etcd_image = 'registry.access.redhat.com/rhel7/etcd'
pod_image = 'aep3_beta/aep-pod'
router_image = 'aep3_beta/aep-haproxy-router'
registry_image = 'aep3_beta/aep-docker-registry'
deployer_image = 'aep3_beta/aep-deployer'
else:
master_image = 'openshift/origin'
cli_image = master_image
node_image = 'openshift/node'
ovs_image = 'openshift/openvswitch'
etcd_image = 'registry.access.redhat.com/rhel7/etcd'
pod_image = 'openshift/origin-pod'
router_image = 'openshift/origin-haproxy-router'
registry_image = 'openshift/origin-docker-registry'
deployer_image = 'openshift/origin-deployer'
facts['common']['is_atomic'] = os.path.isfile('/run/ostree-booted')
if 'is_containerized' not in facts['common']:
facts['common']['is_containerized'] = facts['common']['is_atomic']
if 'cli_image' not in facts['common']:
facts['common']['cli_image'] = cli_image
if 'pod_image' not in facts['common']:
facts['common']['pod_image'] = pod_image
if 'router_image' not in facts['common']:
facts['common']['router_image'] = router_image
if 'registry_image' not in facts['common']:
facts['common']['registry_image'] = registry_image
if 'deployer_image' not in facts['common']:
facts['common']['deployer_image'] = deployer_image
if 'etcd' in facts and 'etcd_image' not in facts['etcd']:
facts['etcd']['etcd_image'] = etcd_image
if 'master' in facts and 'master_image' not in facts['master']:
facts['master']['master_image'] = master_image
if 'node' in facts:
if 'node_image' not in facts['node']:
facts['node']['node_image'] = node_image
if 'ovs_image' not in facts['node']:
facts['node']['ovs_image'] = ovs_image
if safe_get_bool(facts['common']['is_containerized']):
facts['common']['admin_binary'] = '/usr/local/bin/oadm'
facts['common']['client_binary'] = '/usr/local/bin/oc'
return facts
def set_installed_variant_rpm_facts(facts):
""" Set RPM facts of installed variant
Args:
facts (dict): existing facts
Returns:
dict: the facts dict updated with installed_variant_rpms
"""
installed_rpms = []
for base_rpm in ['openshift', 'atomic-openshift', 'origin']:
optional_rpms = ['master', 'node', 'clients', 'sdn-ovs']
variant_rpms = [base_rpm] + \
['{0}-{1}'.format(base_rpm, r) for r in optional_rpms] + \
['tuned-profiles-%s-node' % base_rpm]
for rpm in variant_rpms:
exit_code, _, _ = module.run_command(['rpm', '-q', rpm])
if exit_code == 0:
installed_rpms.append(rpm)
facts['common']['installed_variant_rpms'] = installed_rpms
return facts
class OpenShiftFactsInternalError(Exception):
"""Origin Facts Error"""
pass
class OpenShiftFactsUnsupportedRoleError(Exception):
"""Origin Facts Unsupported Role Error"""
pass
class OpenShiftFactsFileWriteError(Exception):
"""Origin Facts File Write Error"""
pass
class OpenShiftFactsMetadataUnavailableError(Exception):
"""Origin Facts Metadata Unavailable Error"""
pass
class OpenShiftFacts(object):
""" Origin Facts
Attributes:
facts (dict): facts for the host
Args:
module (AnsibleModule): an AnsibleModule object
role (str): role for setting local facts
filename (str): local facts file to use
local_facts (dict): local facts to set
additive_facts_to_overwrite (list): additive facts to overwrite in jinja
'.' notation ex: ['master.named_certificates']
protected_facts_to_overwrite (list): protected facts to overwrite in jinja
'.' notation ex: ['master.master_count']
Raises:
OpenShiftFactsUnsupportedRoleError:
"""
known_roles = ['builddefaults',
'cloudprovider',
'common',
'docker',
'etcd',
'hosted',
'master',
'node']
# Disabling too-many-arguments, this should be cleaned up as a TODO item.
# pylint: disable=too-many-arguments
def __init__(self, role, filename, local_facts,
additive_facts_to_overwrite=None,
openshift_env=None,
openshift_env_structures=None,
protected_facts_to_overwrite=None):
self.changed = False
self.filename = filename
if role not in self.known_roles:
raise OpenShiftFactsUnsupportedRoleError(
"Role %s is not supported by this module" % role
)
self.role = role
self.system_facts = ansible_facts(module)
self.facts = self.generate_facts(local_facts,
additive_facts_to_overwrite,
openshift_env,
openshift_env_structures,
protected_facts_to_overwrite)
def generate_facts(self,
local_facts,
additive_facts_to_overwrite,
openshift_env,
openshift_env_structures,
protected_facts_to_overwrite):
""" Generate facts
Args:
local_facts (dict): local_facts for overriding generated defaults
additive_facts_to_overwrite (list): additive facts to overwrite in jinja
'.' notation ex: ['master.named_certificates']
openshift_env (dict): openshift_env facts for overriding generated defaults
protected_facts_to_overwrite (list): protected facts to overwrite in jinja
'.' notation ex: ['master.master_count']
Returns:
dict: The generated facts
"""
local_facts = self.init_local_facts(local_facts,
additive_facts_to_overwrite,
openshift_env,
openshift_env_structures,
protected_facts_to_overwrite)
roles = local_facts.keys()
if 'common' in local_facts and 'deployment_type' in local_facts['common']:
deployment_type = local_facts['common']['deployment_type']
else:
deployment_type = 'origin'
defaults = self.get_defaults(roles, deployment_type)
provider_facts = self.init_provider_facts()
facts = apply_provider_facts(defaults, provider_facts)
facts = merge_facts(facts,
local_facts,
additive_facts_to_overwrite,
protected_facts_to_overwrite)
facts = migrate_oauth_template_facts(facts)
facts['current_config'] = get_current_config(facts)
facts = set_url_facts_if_unset(facts)
facts = set_project_cfg_facts_if_unset(facts)
facts = set_flannel_facts_if_unset(facts)
facts = set_nuage_facts_if_unset(facts)
facts = set_node_schedulability(facts)
facts = set_selectors(facts)
facts = set_metrics_facts_if_unset(facts)
facts = set_identity_providers_if_unset(facts)
facts = set_sdn_facts_if_unset(facts, self.system_facts)
facts = set_deployment_facts_if_unset(facts)
facts = set_container_facts_if_unset(facts)
facts = build_kubelet_args(facts)
facts = build_controller_args(facts)
facts = build_api_server_args(facts)
facts = set_version_facts_if_unset(facts)
facts = set_dnsmasq_facts_if_unset(facts)
facts = set_manageiq_facts_if_unset(facts)
facts = set_aggregate_facts(facts)
facts = set_etcd_facts_if_unset(facts)
facts = set_proxy_facts(facts)
if not safe_get_bool(facts['common']['is_containerized']):
facts = set_installed_variant_rpm_facts(facts)
return dict(openshift=facts)
def get_defaults(self, roles, deployment_type):
""" Get default fact values
Args:
roles (list): list of roles for this host
Returns:
dict: The generated default facts
"""
defaults = {}
ip_addr = self.system_facts['default_ipv4']['address']
exit_code, output, _ = module.run_command(['hostname', '-f'])
hostname_f = output.strip() if exit_code == 0 else ''
hostname_values = [hostname_f, self.system_facts['nodename'],
self.system_facts['fqdn']]
hostname = choose_hostname(hostname_values, ip_addr)
defaults['common'] = dict(use_openshift_sdn=True, ip=ip_addr,
public_ip=ip_addr,
deployment_type=deployment_type,
hostname=hostname,
public_hostname=hostname,
portal_net='172.30.0.0/16',
client_binary='oc', admin_binary='oadm',
dns_domain='cluster.local',
install_examples=True,
debug_level=2)
if 'master' in roles:
scheduler_predicates = [
{"name": "MatchNodeSelector"},
{"name": "PodFitsResources"},
{"name": "PodFitsPorts"},
{"name": "NoDiskConflict"},
{"name": "Region", "argument": {"serviceAffinity" : {"labels" : ["region"]}}}
]
scheduler_priorities = [
{"name": "LeastRequestedPriority", "weight": 1},
{"name": "SelectorSpreadPriority", "weight": 1},
{"name": "Zone", "weight" : 2, "argument": {"serviceAntiAffinity" : {"label": "zone"}}}
]
defaults['master'] = dict(api_use_ssl=True, api_port='8443',
controllers_port='8444',
console_use_ssl=True,
console_path='/console',
console_port='8443', etcd_use_ssl=True,
etcd_hosts='', etcd_port='4001',
portal_net='172.30.0.0/16',
embedded_etcd=True, embedded_kube=True,
embedded_dns=True,
bind_addr='0.0.0.0',
session_max_seconds=3600,
session_name='ssn',
session_secrets_file='',
access_token_max_seconds=86400,
auth_token_max_seconds=500,
oauth_grant_method='auto',
scheduler_predicates=scheduler_predicates,
scheduler_priorities=scheduler_priorities)
if 'node' in roles:
defaults['node'] = dict(labels={}, annotations={},
iptables_sync_period='5s',
local_quota_per_fsgroup="",
set_node_ip=False)
if 'docker' in roles:
docker = dict(disable_push_dockerhub=False)
version_info = get_docker_version_info()
if version_info is not None:
docker['api_version'] = version_info['api_version']
docker['version'] = version_info['version']
defaults['docker'] = docker
if 'cloudprovider' in roles:
defaults['cloudprovider'] = dict(kind=None)
if 'hosted' in roles or self.role == 'hosted':
defaults['hosted'] = dict(
metrics=dict(
deploy=False,
duration=7,
resolution=10,
storage=dict(
kind=None,
volume=dict(
name='metrics',
size='10Gi'
),
nfs=dict(
directory='/exports',
options='*(rw,root_squash)'),
host=None,
access_modes=['ReadWriteMany'],
create_pv=True
)
),
registry=dict(
storage=dict(
kind=None,
volume=dict(
name='registry',
size='5Gi'
),
nfs=dict(
directory='/exports',
options='*(rw,root_squash)'),
host=None,
access_modes=['ReadWriteMany'],
create_pv=True
)
),
router=dict()
)
return defaults
def guess_host_provider(self):
""" Guess the host provider
Returns:
dict: The generated default facts for the detected provider
"""
# TODO: cloud provider facts should probably be submitted upstream
product_name = self.system_facts['product_name']
product_version = self.system_facts['product_version']
virt_type = self.system_facts['virtualization_type']
virt_role = self.system_facts['virtualization_role']
provider = None
metadata = None
# TODO: this is not exposed through module_utils/facts.py in ansible,
# need to create PR for ansible to expose it
bios_vendor = get_file_content(
'/sys/devices/virtual/dmi/id/bios_vendor'
)
if bios_vendor == 'Google':
provider = 'gce'
metadata_url = ('http://metadata.google.internal/'
'computeMetadata/v1/?recursive=true')
headers = {'Metadata-Flavor': 'Google'}
metadata = get_provider_metadata(metadata_url, True, headers,
True)
# Filter sshKeys and serviceAccounts from gce metadata
if metadata:
metadata['project']['attributes'].pop('sshKeys', None)
metadata['instance'].pop('serviceAccounts', None)
elif (virt_type == 'xen' and virt_role == 'guest'
and re.match(r'.*\.amazon$', product_version)):
provider = 'aws'
metadata_url = 'http://169.254.169.254/latest/meta-data/'
metadata = get_provider_metadata(metadata_url)
elif re.search(r'OpenStack', product_name):
provider = 'openstack'
metadata_url = ('http://169.254.169.254/openstack/latest/'
'meta_data.json')
metadata = get_provider_metadata(metadata_url, True, None,
True)
if metadata:
ec2_compat_url = 'http://169.254.169.254/latest/meta-data/'
metadata['ec2_compat'] = get_provider_metadata(
ec2_compat_url
)
# disable pylint maybe-no-member because overloaded use of
# the module name causes pylint to not detect that results
# is an array or hash
# pylint: disable=maybe-no-member
# Filter public_keys and random_seed from openstack metadata
metadata.pop('public_keys', None)
metadata.pop('random_seed', None)
if not metadata['ec2_compat']:
metadata = None
return dict(name=provider, metadata=metadata)
def init_provider_facts(self):
""" Initialize the provider facts
Returns:
dict: The normalized provider facts
"""
provider_info = self.guess_host_provider()
provider_facts = normalize_provider_facts(
provider_info.get('name'),
provider_info.get('metadata')
)
return provider_facts
@staticmethod
def split_openshift_env_fact_keys(openshift_env_fact, openshift_env_structures):
""" Split openshift_env facts based on openshift_env structures.
Args:
openshift_env_fact (string): the openshift_env fact to split
ex: 'openshift_cloudprovider_openstack_auth_url'
openshift_env_structures (list): a list of structures to determine fact keys
ex: ['openshift.cloudprovider.openstack.*']
Returns:
list: a list of keys that represent the fact
ex: ['openshift', 'cloudprovider', 'openstack', 'auth_url']
"""
# By default, we'll split an openshift_env fact by underscores.
fact_keys = openshift_env_fact.split('_')
# Determine if any of the provided variable structures match the fact.
matching_structure = None
if openshift_env_structures != None:
for structure in openshift_env_structures:
if re.match(structure, openshift_env_fact):
matching_structure = structure
# Fact didn't match any variable structures so return the default fact keys.
if matching_structure is None:
return fact_keys
final_keys = []
structure_keys = matching_structure.split('.')
for structure_key in structure_keys:
# Matched current key. Add to final keys.
if structure_key == fact_keys[structure_keys.index(structure_key)]:
final_keys.append(structure_key)
# Wildcard means we will be taking everything from here to the end of the fact.
elif structure_key == '*':
final_keys.append('_'.join(fact_keys[structure_keys.index(structure_key):]))
# Shouldn't have gotten here, return the fact keys.
else:
return fact_keys
return final_keys
# Disabling too-many-branches and too-many-locals.
# This should be cleaned up as a TODO item.
#pylint: disable=too-many-branches, too-many-locals
def init_local_facts(self, facts=None,
additive_facts_to_overwrite=None,
openshift_env=None,
openshift_env_structures=None,
protected_facts_to_overwrite=None):
""" Initialize the local facts
Args:
facts (dict): local facts to set
additive_facts_to_overwrite (list): additive facts to overwrite in jinja
'.' notation ex: ['master.named_certificates']
openshift_env (dict): openshift env facts to set
protected_facts_to_overwrite (list): protected facts to overwrite in jinja
'.' notation ex: ['master.master_count']
Returns:
dict: The result of merging the provided facts with existing
local facts
"""
changed = False
facts_to_set = dict()
if facts is not None:
facts_to_set[self.role] = facts
if openshift_env != {} and openshift_env != None:
for fact, value in openshift_env.iteritems():
oo_env_facts = dict()
current_level = oo_env_facts
keys = self.split_openshift_env_fact_keys(fact, openshift_env_structures)[1:]
if len(keys) > 0 and keys[0] != self.role:
continue
for key in keys:
if key == keys[-1]:
current_level[key] = value
elif key not in current_level:
current_level[key] = dict()
current_level = current_level[key]
facts_to_set = merge_facts(orig=facts_to_set,
new=oo_env_facts,
additive_facts_to_overwrite=[],
protected_facts_to_overwrite=[])
local_facts = get_local_facts_from_file(self.filename)
migrated_facts = migrate_local_facts(local_facts)
new_local_facts = merge_facts(migrated_facts,
facts_to_set,
additive_facts_to_overwrite,
protected_facts_to_overwrite)
if 'docker' in new_local_facts:
# remove duplicate and empty strings from registry lists
for cat in ['additional', 'blocked', 'insecure']:
key = '{0}_registries'.format(cat)
if key in new_local_facts['docker']:
val = new_local_facts['docker'][key]
if isinstance(val, basestring):
val = [x.strip() for x in val.split(',')]
new_local_facts['docker'][key] = list(set(val) - set(['']))
# Convert legacy log_options comma sep string to a list if present:
if 'log_options' in new_local_facts['docker'] and \
isinstance(new_local_facts['docker']['log_options'], basestring):
new_local_facts['docker']['log_options'] = new_local_facts['docker']['log_options'].split(',')
new_local_facts = self.remove_empty_facts(new_local_facts)
if new_local_facts != local_facts:
self.validate_local_facts(new_local_facts)
changed = True
if not module.check_mode:
save_local_facts(self.filename, new_local_facts)
self.changed = changed
return new_local_facts
def remove_empty_facts(self, facts=None):
""" Remove empty facts
Args:
facts (dict): facts to clean
"""
facts_to_remove = []
for fact, value in facts.iteritems():
if isinstance(facts[fact], dict):
facts[fact] = self.remove_empty_facts(facts[fact])
else:
if value == "" or value == [""] or value is None:
facts_to_remove.append(fact)
for fact in facts_to_remove:
del facts[fact]
return facts
def validate_local_facts(self, facts=None):
""" Validate local facts
Args:
facts (dict): local facts to validate
"""
invalid_facts = dict()
invalid_facts = self.validate_master_facts(facts, invalid_facts)
if invalid_facts:
msg = 'Invalid facts detected:\n'
for key in invalid_facts.keys():
msg += '{0}: {1}\n'.format(key, invalid_facts[key])
module.fail_json(msg=msg,
changed=self.changed)
# disabling pylint errors for line-too-long since we're dealing
# with best effort reduction of error messages here.
# disabling errors for too-many-branches since we require checking
# many conditions.
# pylint: disable=line-too-long, too-many-branches
@staticmethod
def validate_master_facts(facts, invalid_facts):
""" Validate master facts
Args:
facts (dict): local facts to validate
invalid_facts (dict): collected invalid_facts
Returns:
dict: Invalid facts
"""
if 'master' in facts:
# openshift.master.session_auth_secrets
if 'session_auth_secrets' in facts['master']:
session_auth_secrets = facts['master']['session_auth_secrets']
if not issubclass(type(session_auth_secrets), list):
invalid_facts['session_auth_secrets'] = 'Expects session_auth_secrets is a list.'
elif 'session_encryption_secrets' not in facts['master']:
invalid_facts['session_auth_secrets'] = ('openshift_master_session_encryption secrets must be set '
'if openshift_master_session_auth_secrets is provided.')
elif len(session_auth_secrets) != len(facts['master']['session_encryption_secrets']):
invalid_facts['session_auth_secrets'] = ('openshift_master_session_auth_secrets and '
'openshift_master_session_encryption_secrets must be '
'equal length.')
else:
for secret in session_auth_secrets:
if len(secret) < 32:
invalid_facts['session_auth_secrets'] = ('Invalid secret in session_auth_secrets. '
'Secrets must be at least 32 characters in length.')
# openshift.master.session_encryption_secrets
if 'session_encryption_secrets' in facts['master']:
session_encryption_secrets = facts['master']['session_encryption_secrets']
if not issubclass(type(session_encryption_secrets), list):
invalid_facts['session_encryption_secrets'] = 'Expects session_encryption_secrets is a list.'
elif 'session_auth_secrets' not in facts['master']:
invalid_facts['session_encryption_secrets'] = ('openshift_master_session_auth_secrets must be '
'set if openshift_master_session_encryption_secrets '
'is provided.')
else:
for secret in session_encryption_secrets:
if len(secret) not in [16, 24, 32]:
invalid_facts['session_encryption_secrets'] = ('Invalid secret in session_encryption_secrets. '
'Secrets must be 16, 24, or 32 characters in length.')
return invalid_facts
def main():
""" main """
# disabling pylint errors for global-variable-undefined and invalid-name
# for 'global module' usage, since it is required to use ansible_facts
# pylint: disable=global-variable-undefined, invalid-name
global module
module = AnsibleModule(
argument_spec=dict(
role=dict(default='common', required=False,
choices=OpenShiftFacts.known_roles),
local_facts=dict(default=None, type='dict', required=False),
additive_facts_to_overwrite=dict(default=[], type='list', required=False),
openshift_env=dict(default={}, type='dict', required=False),
openshift_env_structures=dict(default=[], type='list', required=False),
protected_facts_to_overwrite=dict(default=[], type='list', required=False),
),
supports_check_mode=True,
add_file_common_args=True,
)
role = module.params['role']
local_facts = module.params['local_facts']
additive_facts_to_overwrite = module.params['additive_facts_to_overwrite']
openshift_env = module.params['openshift_env']
openshift_env_structures = module.params['openshift_env_structures']
protected_facts_to_overwrite = module.params['protected_facts_to_overwrite']
fact_file = '/etc/ansible/facts.d/openshift.fact'
openshift_facts = OpenShiftFacts(role,
fact_file,
local_facts,
additive_facts_to_overwrite,
openshift_env,
openshift_env_structures,
protected_facts_to_overwrite)
file_params = module.params.copy()
file_params['path'] = fact_file
file_args = module.load_file_common_arguments(file_params)
changed = module.set_fs_attributes_if_different(file_args,
openshift_facts.changed)
return module.exit_json(changed=changed,
ansible_facts=openshift_facts.facts)
# ignore pylint errors related to the module_utils import
# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.facts import *
from ansible.module_utils.urls import *
if __name__ == '__main__':
main()
| 41.001916 | 129 | 0.579335 |
# Status: Permanently disabled to keep this module as self-contained as possible.
DOCUMENTATION = '''
---
module: openshift_facts
short_description: Cluster Facts
author: Jason DeTiberus
requirements: [ ]
'''
EXAMPLES = '''
'''
import ConfigParser
import copy
import os
import StringIO
import yaml
from distutils.util import strtobool
from distutils.version import LooseVersion
import struct
import socket
from dbus import SystemBus, Interface
from dbus.exceptions import DBusException
def migrate_docker_facts(facts):
params = {
'common': (
'additional_registries',
'insecure_registries',
'blocked_registries',
'options'
),
'node': (
'log_driver',
'log_options'
)
}
if 'docker' not in facts:
facts['docker'] = {}
for role in params.keys():
if role in facts:
for param in params[role]:
old_param = 'docker_' + param
if old_param in facts[role]:
facts['docker'][param] = facts[role].pop(old_param)
if 'node' in facts and 'portal_net' in facts['node']:
facts['docker']['hosted_registry_insecure'] = True
facts['docker']['hosted_registry_network'] = facts['node'].pop('portal_net')
# log_options was originally meant to be a comma separated string, but
# we now prefer an actual list, with backward compatability:
if 'log_options' in facts['docker'] and \
isinstance(facts['docker']['log_options'], basestring):
facts['docker']['log_options'] = facts['docker']['log_options'].split(",")
return facts
# TODO: We should add a generic migration function that takes source and destination
# paths and does the right thing rather than one function for common, one for node, etc.
def migrate_common_facts(facts):
params = {
'node': ('portal_net'),
'master': ('portal_net')
}
if 'common' not in facts:
facts['common'] = {}
for role in params.keys():
if role in facts:
for param in params[role]:
if param in facts[role]:
facts['common'][param] = facts[role].pop(param)
return facts
def migrate_node_facts(facts):
params = {
'common': ('dns_ip'),
}
if 'node' not in facts:
facts['node'] = {}
for role in params.keys():
if role in facts:
for param in params[role]:
if param in facts[role]:
facts['node'][param] = facts[role].pop(param)
return facts
def migrate_local_facts(facts):
migrated_facts = copy.deepcopy(facts)
migrated_facts = migrate_docker_facts(migrated_facts)
migrated_facts = migrate_common_facts(migrated_facts)
migrated_facts = migrate_node_facts(migrated_facts)
migrated_facts = migrate_hosted_facts(migrated_facts)
return migrated_facts
def migrate_hosted_facts(facts):
if 'master' in facts:
if 'router_selector' in facts['master']:
if 'hosted' not in facts:
facts['hosted'] = {}
if 'router' not in facts['hosted']:
facts['hosted']['router'] = {}
facts['hosted']['router']['selector'] = facts['master'].pop('router_selector')
return facts
def first_ip(network):
atoi = lambda addr: struct.unpack("!I", socket.inet_aton(addr))[0]
itoa = lambda addr: socket.inet_ntoa(struct.pack("!I", addr))
(address, netmask) = network.split('/')
netmask_i = (0xffffffff << (32 - atoi(netmask))) & 0xffffffff
return itoa((atoi(address) & netmask_i) + 1)
def hostname_valid(hostname):
if (not hostname or
hostname.startswith('localhost') or
hostname.endswith('localdomain') or
len(hostname.split('.')) < 2):
return False
return True
def choose_hostname(hostnames=None, fallback=''):
hostname = fallback
if hostnames is None:
return hostname
ip_regex = r'\A\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}\Z'
ips = [i for i in hostnames
if (i is not None and isinstance(i, basestring)
and re.match(ip_regex, i))]
hosts = [i for i in hostnames
if i is not None and i != '' and i not in ips]
for host_list in (hosts, ips):
for host in host_list:
if hostname_valid(host):
return host
return hostname
def query_metadata(metadata_url, headers=None, expect_json=False):
result, info = fetch_url(module, metadata_url, headers=headers)
if info['status'] != 200:
raise OpenShiftFactsMetadataUnavailableError("Metadata unavailable")
if expect_json:
return module.from_json(result.read())
else:
return [line.strip() for line in result.readlines()]
def walk_metadata(metadata_url, headers=None, expect_json=False):
metadata = dict()
for line in query_metadata(metadata_url, headers, expect_json):
if line.endswith('/') and not line == 'public-keys/':
key = line[:-1]
metadata[key] = walk_metadata(metadata_url + line,
headers, expect_json)
else:
results = query_metadata(metadata_url + line, headers,
expect_json)
if len(results) == 1:
# disable pylint maybe-no-member because overloaded use of
# the module name causes pylint to not detect that results
# is an array or hash
# pylint: disable=maybe-no-member
metadata[line] = results.pop()
else:
metadata[line] = results
return metadata
def get_provider_metadata(metadata_url, supports_recursive=False,
headers=None, expect_json=False):
try:
if supports_recursive:
metadata = query_metadata(metadata_url, headers,
expect_json)
else:
metadata = walk_metadata(metadata_url, headers,
expect_json)
except OpenShiftFactsMetadataUnavailableError:
metadata = None
return metadata
def normalize_gce_facts(metadata, facts):
for interface in metadata['instance']['networkInterfaces']:
int_info = dict(ips=[interface['ip']], network_type='gce')
int_info['public_ips'] = [ac['externalIp'] for ac
in interface['accessConfigs']]
int_info['public_ips'].extend(interface['forwardedIps'])
_, _, network_id = interface['network'].rpartition('/')
int_info['network_id'] = network_id
facts['network']['interfaces'].append(int_info)
_, _, zone = metadata['instance']['zone'].rpartition('/')
facts['zone'] = zone
# GCE currently only supports a single interface
facts['network']['ip'] = facts['network']['interfaces'][0]['ips'][0]
pub_ip = facts['network']['interfaces'][0]['public_ips'][0]
facts['network']['public_ip'] = pub_ip
facts['network']['hostname'] = metadata['instance']['hostname']
# TODO: attempt to resolve public_hostname
facts['network']['public_hostname'] = facts['network']['public_ip']
return facts
def normalize_aws_facts(metadata, facts):
for interface in sorted(
metadata['network']['interfaces']['macs'].values(),
key=lambda x: x['device-number']
):
int_info = dict()
var_map = {'ips': 'local-ipv4s', 'public_ips': 'public-ipv4s'}
for ips_var, int_var in var_map.iteritems():
ips = interface.get(int_var)
if isinstance(ips, basestring):
int_info[ips_var] = [ips]
else:
int_info[ips_var] = ips
if 'vpc-id' in interface:
int_info['network_type'] = 'vpc'
else:
int_info['network_type'] = 'classic'
if int_info['network_type'] == 'vpc':
int_info['network_id'] = interface['subnet-id']
else:
int_info['network_id'] = None
facts['network']['interfaces'].append(int_info)
facts['zone'] = metadata['placement']['availability-zone']
# TODO: actually attempt to determine default local and public ips
# by using the ansible default ip fact and the ipv4-associations
# from the ec2 metadata
facts['network']['ip'] = metadata.get('local-ipv4')
facts['network']['public_ip'] = metadata.get('public-ipv4')
# TODO: verify that local hostname makes sense and is resolvable
facts['network']['hostname'] = metadata.get('local-hostname')
# TODO: verify that public hostname makes sense and is resolvable
facts['network']['public_hostname'] = metadata.get('public-hostname')
return facts
def normalize_openstack_facts(metadata, facts):
# openstack ec2 compat api does not support network interfaces and
# the version tested on did not include the info in the openstack
# metadata api, should be updated if neutron exposes this.
facts['zone'] = metadata['availability_zone']
local_ipv4 = metadata['ec2_compat']['local-ipv4'].split(',')[0]
facts['network']['ip'] = local_ipv4
facts['network']['public_ip'] = metadata['ec2_compat']['public-ipv4']
# TODO: verify local hostname makes sense and is resolvable
facts['network']['hostname'] = metadata['hostname']
# TODO: verify that public hostname makes sense and is resolvable
pub_h = metadata['ec2_compat']['public-hostname']
facts['network']['public_hostname'] = pub_h
return facts
def normalize_provider_facts(provider, metadata):
if provider is None or metadata is None:
return {}
# TODO: test for ipv6_enabled where possible (gce, aws do not support)
# and configure ipv6 facts if available
# TODO: add support for setting user_data if available
facts = dict(name=provider, metadata=metadata,
network=dict(interfaces=[], ipv6_enabled=False))
if provider == 'gce':
facts = normalize_gce_facts(metadata, facts)
elif provider == 'aws':
facts = normalize_aws_facts(metadata, facts)
elif provider == 'openstack':
facts = normalize_openstack_facts(metadata, facts)
return facts
def set_flannel_facts_if_unset(facts):
if 'common' in facts:
if 'use_flannel' not in facts['common']:
use_flannel = False
facts['common']['use_flannel'] = use_flannel
return facts
def set_nuage_facts_if_unset(facts):
if 'common' in facts:
if 'use_nuage' not in facts['common']:
use_nuage = False
facts['common']['use_nuage'] = use_nuage
return facts
def set_node_schedulability(facts):
if 'node' in facts:
if 'schedulable' not in facts['node']:
if 'master' in facts:
facts['node']['schedulable'] = False
else:
facts['node']['schedulable'] = True
return facts
def set_selectors(facts):
deployment_type = facts['common']['deployment_type']
if deployment_type == 'online':
selector = "type=infra"
else:
selector = "region=infra"
if 'hosted' not in facts:
facts['hosted'] = {}
if 'router' not in facts['hosted']:
facts['hosted']['router'] = {}
if 'selector' not in facts['hosted']['router'] or facts['hosted']['router']['selector'] in [None, 'None']:
facts['hosted']['router']['selector'] = selector
if 'master' in facts:
if 'infra_nodes' in facts['master']:
if 'registry_selector' not in facts['master']:
facts['master']['registry_selector'] = selector
return facts
def set_metrics_facts_if_unset(facts):
if 'common' in facts:
if 'use_cluster_metrics' not in facts['common']:
use_cluster_metrics = False
facts['common']['use_cluster_metrics'] = use_cluster_metrics
return facts
def set_dnsmasq_facts_if_unset(facts):
if 'common' in facts:
if 'use_dnsmasq' not in facts['common'] and facts['common']['version_gte_3_2_or_1_2']:
facts['common']['use_dnsmasq'] = True
else:
facts['common']['use_dnsmasq'] = False
if 'master' in facts and 'dns_port' not in facts['master']:
if facts['common']['use_dnsmasq']:
facts['master']['dns_port'] = 8053
else:
facts['master']['dns_port'] = 53
return facts
def set_project_cfg_facts_if_unset(facts):
config = {
'default_node_selector': '',
'project_request_message': '',
'project_request_template': '',
'mcs_allocator_range': 's0:/2',
'mcs_labels_per_project': 5,
'uid_allocator_range': '1000000000-1999999999/10000'
}
if 'master' in facts:
for key, value in config.items():
if key not in facts['master']:
facts['master'][key] = value
return facts
def set_identity_providers_if_unset(facts):
if 'master' in facts:
deployment_type = facts['common']['deployment_type']
if 'identity_providers' not in facts['master']:
identity_provider = dict(
name='allow_all', challenge=True, login=True,
kind='AllowAllPasswordIdentityProvider'
)
if deployment_type in ['enterprise', 'atomic-enterprise', 'openshift-enterprise']:
identity_provider = dict(
name='deny_all', challenge=True, login=True,
kind='DenyAllPasswordIdentityProvider'
)
facts['master']['identity_providers'] = [identity_provider]
return facts
def set_url_facts_if_unset(facts):
if 'master' in facts:
hostname = facts['common']['hostname']
cluster_hostname = facts['master'].get('cluster_hostname')
cluster_public_hostname = facts['master'].get('cluster_public_hostname')
public_hostname = facts['common']['public_hostname']
api_hostname = cluster_hostname if cluster_hostname else hostname
api_public_hostname = cluster_public_hostname if cluster_public_hostname else public_hostname
console_path = facts['master']['console_path']
etcd_hosts = facts['master']['etcd_hosts']
use_ssl = dict(
api=facts['master']['api_use_ssl'],
public_api=facts['master']['api_use_ssl'],
loopback_api=facts['master']['api_use_ssl'],
console=facts['master']['console_use_ssl'],
public_console=facts['master']['console_use_ssl'],
etcd=facts['master']['etcd_use_ssl']
)
ports = dict(
api=facts['master']['api_port'],
public_api=facts['master']['api_port'],
loopback_api=facts['master']['api_port'],
console=facts['master']['console_port'],
public_console=facts['master']['console_port'],
etcd=facts['master']['etcd_port'],
)
etcd_urls = []
if etcd_hosts != '':
facts['master']['etcd_port'] = ports['etcd']
facts['master']['embedded_etcd'] = False
for host in etcd_hosts:
etcd_urls.append(format_url(use_ssl['etcd'], host,
ports['etcd']))
else:
etcd_urls = [format_url(use_ssl['etcd'], hostname,
ports['etcd'])]
facts['master'].setdefault('etcd_urls', etcd_urls)
prefix_hosts = [('api', api_hostname),
('public_api', api_public_hostname),
('loopback_api', hostname)]
for prefix, host in prefix_hosts:
facts['master'].setdefault(prefix + '_url', format_url(use_ssl[prefix],
host,
ports[prefix]))
r_lhn = "{0}:{1}".format(hostname, ports['api']).replace('.', '-')
r_lhu = "system:openshift-master/{0}:{1}".format(api_hostname, ports['api']).replace('.', '-')
facts['master'].setdefault('loopback_cluster_name', r_lhn)
facts['master'].setdefault('loopback_context_name', "default/{0}/system:openshift-master".format(r_lhn))
facts['master'].setdefault('loopback_user', r_lhu)
prefix_hosts = [('console', api_hostname), ('public_console', api_public_hostname)]
for prefix, host in prefix_hosts:
facts['master'].setdefault(prefix + '_url', format_url(use_ssl[prefix],
host,
ports[prefix],
console_path))
return facts
def set_aggregate_facts(facts):
all_hostnames = set()
internal_hostnames = set()
kube_svc_ip = first_ip(facts['common']['portal_net'])
if 'common' in facts:
all_hostnames.add(facts['common']['hostname'])
all_hostnames.add(facts['common']['public_hostname'])
all_hostnames.add(facts['common']['ip'])
all_hostnames.add(facts['common']['public_ip'])
facts['common']['kube_svc_ip'] = kube_svc_ip
internal_hostnames.add(facts['common']['hostname'])
internal_hostnames.add(facts['common']['ip'])
cluster_domain = facts['common']['dns_domain']
if 'master' in facts:
if 'cluster_hostname' in facts['master']:
all_hostnames.add(facts['master']['cluster_hostname'])
if 'cluster_public_hostname' in facts['master']:
all_hostnames.add(facts['master']['cluster_public_hostname'])
svc_names = ['openshift', 'openshift.default', 'openshift.default.svc',
'openshift.default.svc.' + cluster_domain, 'kubernetes', 'kubernetes.default',
'kubernetes.default.svc', 'kubernetes.default.svc.' + cluster_domain]
all_hostnames.update(svc_names)
internal_hostnames.update(svc_names)
all_hostnames.add(kube_svc_ip)
internal_hostnames.add(kube_svc_ip)
facts['common']['all_hostnames'] = list(all_hostnames)
facts['common']['internal_hostnames'] = list(internal_hostnames)
return facts
def set_etcd_facts_if_unset(facts):
if 'master' in facts and facts['master']['embedded_etcd']:
etcd_facts = facts['etcd'] if 'etcd' in facts else dict()
if 'etcd_data_dir' not in etcd_facts:
try:
# Parse master config to find actual etcd data dir:
master_cfg_path = os.path.join(facts['common']['config_base'],
'master/master-config.yaml')
master_cfg_f = open(master_cfg_path, 'r')
config = yaml.safe_load(master_cfg_f.read())
master_cfg_f.close()
etcd_facts['etcd_data_dir'] = \
config['etcdConfig']['storageDirectory']
facts['etcd'] = etcd_facts
# We don't want exceptions bubbling up here:
except Exception:
pass
else:
etcd_facts = facts['etcd'] if 'etcd' in facts else dict()
try:
ini_str = '[root]\n' + open('/etc/etcd/etcd.conf', 'r').read()
ini_fp = StringIO.StringIO(ini_str)
config = ConfigParser.RawConfigParser()
config.readfp(ini_fp)
etcd_data_dir = config.get('root', 'ETCD_DATA_DIR')
if etcd_data_dir.startswith('"') and etcd_data_dir.endswith('"'):
etcd_data_dir = etcd_data_dir[1:-1]
etcd_facts['etcd_data_dir'] = etcd_data_dir
facts['etcd'] = etcd_facts
# pylint: disable=broad-except
except Exception:
pass
return facts
def set_deployment_facts_if_unset(facts):
# disabled to avoid breaking up facts related to deployment type into
# multiple methods for now.
# pylint: disable=too-many-statements, too-many-branches
if 'common' in facts:
deployment_type = facts['common']['deployment_type']
if 'service_type' not in facts['common']:
service_type = 'atomic-openshift'
if deployment_type == 'origin':
service_type = 'origin'
elif deployment_type in ['enterprise']:
service_type = 'openshift'
facts['common']['service_type'] = service_type
if 'config_base' not in facts['common']:
config_base = '/etc/origin'
if deployment_type in ['enterprise']:
config_base = '/etc/openshift'
# Handle upgrade scenarios when symlinks don't yet exist:
if not os.path.exists(config_base) and os.path.exists('/etc/openshift'):
config_base = '/etc/openshift'
facts['common']['config_base'] = config_base
if 'data_dir' not in facts['common']:
data_dir = '/var/lib/origin'
if deployment_type in ['enterprise']:
data_dir = '/var/lib/openshift'
if not os.path.exists(data_dir) and os.path.exists('/var/lib/openshift'):
data_dir = '/var/lib/openshift'
facts['common']['data_dir'] = data_dir
if 'docker' in facts:
deployment_type = facts['common']['deployment_type']
if deployment_type in ['enterprise', 'atomic-enterprise', 'openshift-enterprise']:
addtl_regs = facts['docker'].get('additional_registries', [])
ent_reg = 'registry.access.redhat.com'
if ent_reg not in addtl_regs:
facts['docker']['additional_registries'] = addtl_regs + [ent_reg]
for role in ('master', 'node'):
if role in facts:
deployment_type = facts['common']['deployment_type']
if 'registry_url' not in facts[role]:
registry_url = 'openshift/origin-${component}:${version}'
if deployment_type in ['enterprise', 'online', 'openshift-enterprise']:
registry_url = 'openshift3/ose-${component}:${version}'
elif deployment_type == 'atomic-enterprise':
registry_url = 'aep3_beta/aep-${component}:${version}'
facts[role]['registry_url'] = registry_url
if 'master' in facts:
deployment_type = facts['common']['deployment_type']
openshift_features = ['Builder', 'S2IBuilder', 'WebConsole']
if 'disabled_features' in facts['master']:
if deployment_type == 'atomic-enterprise':
curr_disabled_features = set(facts['master']['disabled_features'])
facts['master']['disabled_features'] = list(curr_disabled_features.union(openshift_features))
else:
if deployment_type == 'atomic-enterprise':
facts['master']['disabled_features'] = openshift_features
if 'node' in facts:
deployment_type = facts['common']['deployment_type']
if 'storage_plugin_deps' not in facts['node']:
if deployment_type in ['openshift-enterprise', 'atomic-enterprise', 'origin']:
facts['node']['storage_plugin_deps'] = ['ceph', 'glusterfs', 'iscsi']
else:
facts['node']['storage_plugin_deps'] = []
return facts
def set_version_facts_if_unset(facts):
if 'common' in facts:
deployment_type = facts['common']['deployment_type']
version = get_openshift_version(facts)
if version is not None:
facts['common']['version'] = version
if deployment_type == 'origin':
version_gte_3_1_or_1_1 = LooseVersion(version) >= LooseVersion('1.1.0')
version_gte_3_1_1_or_1_1_1 = LooseVersion(version) >= LooseVersion('1.1.1')
version_gte_3_2_or_1_2 = LooseVersion(version) >= LooseVersion('1.2.0')
else:
version_gte_3_1_or_1_1 = LooseVersion(version) >= LooseVersion('3.0.2.905')
version_gte_3_1_1_or_1_1_1 = LooseVersion(version) >= LooseVersion('3.1.1')
version_gte_3_2_or_1_2 = LooseVersion(version) >= LooseVersion('3.1.1.901')
else:
version_gte_3_1_or_1_1 = True
version_gte_3_1_1_or_1_1_1 = True
version_gte_3_2_or_1_2 = True
facts['common']['version_gte_3_1_or_1_1'] = version_gte_3_1_or_1_1
facts['common']['version_gte_3_1_1_or_1_1_1'] = version_gte_3_1_1_or_1_1_1
facts['common']['version_gte_3_2_or_1_2'] = version_gte_3_2_or_1_2
if version_gte_3_2_or_1_2:
examples_content_version = 'v1.2'
elif version_gte_3_1_or_1_1:
examples_content_version = 'v1.1'
else:
examples_content_version = 'v1.0'
facts['common']['examples_content_version'] = examples_content_version
return facts
def set_manageiq_facts_if_unset(facts):
if 'common' not in facts:
if 'version_gte_3_1_or_1_1' not in facts['common']:
raise OpenShiftFactsInternalError(
"Invalid invocation: The required facts are not set"
)
if 'use_manageiq' not in facts['common']:
facts['common']['use_manageiq'] = facts['common']['version_gte_3_1_or_1_1']
return facts
def set_sdn_facts_if_unset(facts, system_facts):
if 'common' in facts:
use_sdn = facts['common']['use_openshift_sdn']
if not (use_sdn == '' or isinstance(use_sdn, bool)):
use_sdn = safe_get_bool(use_sdn)
facts['common']['use_openshift_sdn'] = use_sdn
if 'sdn_network_plugin_name' not in facts['common']:
plugin = 'redhat/openshift-ovs-subnet' if use_sdn else ''
facts['common']['sdn_network_plugin_name'] = plugin
if 'master' in facts:
if 'sdn_cluster_network_cidr' not in facts['master']:
facts['master']['sdn_cluster_network_cidr'] = '10.1.0.0/16'
if 'sdn_host_subnet_length' not in facts['master']:
facts['master']['sdn_host_subnet_length'] = '8'
if 'node' in facts and 'sdn_mtu' not in facts['node']:
node_ip = facts['common']['ip']
# default MTU if interface MTU cannot be detected
facts['node']['sdn_mtu'] = '1450'
for val in system_facts.itervalues():
if isinstance(val, dict) and 'mtu' in val:
mtu = val['mtu']
if 'ipv4' in val and val['ipv4'].get('address') == node_ip:
facts['node']['sdn_mtu'] = str(mtu - 50)
return facts
def migrate_oauth_template_facts(facts):
if 'master' in facts and 'oauth_template' in facts['master']:
if 'oauth_templates' not in facts['master']:
facts['master']['oauth_templates'] = {"login": facts['master']['oauth_template']}
elif 'login' not in facts['master']['oauth_templates']:
facts['master']['oauth_templates']['login'] = facts['master']['oauth_template']
return facts
def format_url(use_ssl, hostname, port, path=''):
scheme = 'https' if use_ssl else 'http'
netloc = hostname
if (use_ssl and port != '443') or (not use_ssl and port != '80'):
netloc += ":%s" % port
return urlparse.urlunparse((scheme, netloc, path, '', '', ''))
def get_current_config(facts):
current_config = dict()
roles = [role for role in facts if role not in ['common', 'provider']]
for role in roles:
if 'roles' in current_config:
current_config['roles'].append(role)
else:
current_config['roles'] = [role]
# TODO: parse the /etc/sysconfig/openshift-{master,node} config to
# determine the location of files.
# TODO: I suspect this isn't working right now, but it doesn't prevent
# anything from working properly as far as I can tell, perhaps because
# we override the kubeconfig path everywhere we use it?
# Query kubeconfig settings
kubeconfig_dir = '/var/lib/origin/openshift.local.certificates'
if role == 'node':
kubeconfig_dir = os.path.join(
kubeconfig_dir, "node-%s" % facts['common']['hostname']
)
kubeconfig_path = os.path.join(kubeconfig_dir, '.kubeconfig')
if (os.path.isfile('/usr/bin/openshift')
and os.path.isfile(kubeconfig_path)):
try:
_, output, _ = module.run_command(
["/usr/bin/openshift", "ex", "config", "view", "-o",
"json", "--kubeconfig=%s" % kubeconfig_path],
check_rc=False
)
config = json.loads(output)
cad = 'certificate-authority-data'
try:
for cluster in config['clusters']:
config['clusters'][cluster][cad] = 'masked'
except KeyError:
pass
try:
for user in config['users']:
config['users'][user][cad] = 'masked'
config['users'][user]['client-key-data'] = 'masked'
except KeyError:
pass
current_config['kubeconfig'] = config
# override pylint broad-except warning, since we do not want
# to bubble up any exceptions if oc config view
# fails
# pylint: disable=broad-except
except Exception:
pass
return current_config
def build_kubelet_args(facts):
cloud_cfg_path = os.path.join(facts['common']['config_base'],
'cloudprovider')
if 'node' in facts:
kubelet_args = {}
if 'cloudprovider' in facts:
if 'kind' in facts['cloudprovider']:
if facts['cloudprovider']['kind'] == 'aws':
kubelet_args['cloud-provider'] = ['aws']
kubelet_args['cloud-config'] = [cloud_cfg_path + '/aws.conf']
if facts['cloudprovider']['kind'] == 'openstack':
kubelet_args['cloud-provider'] = ['openstack']
kubelet_args['cloud-config'] = [cloud_cfg_path + '/openstack.conf']
if kubelet_args != {}:
facts = merge_facts({'node': {'kubelet_args': kubelet_args}}, facts, [], [])
return facts
def build_controller_args(facts):
cloud_cfg_path = os.path.join(facts['common']['config_base'],
'cloudprovider')
if 'master' in facts:
controller_args = {}
if 'cloudprovider' in facts:
if 'kind' in facts['cloudprovider']:
if facts['cloudprovider']['kind'] == 'aws':
controller_args['cloud-provider'] = ['aws']
controller_args['cloud-config'] = [cloud_cfg_path + '/aws.conf']
if facts['cloudprovider']['kind'] == 'openstack':
controller_args['cloud-provider'] = ['openstack']
controller_args['cloud-config'] = [cloud_cfg_path + '/openstack.conf']
if controller_args != {}:
facts = merge_facts({'master': {'controller_args': controller_args}}, facts, [], [])
return facts
def build_api_server_args(facts):
cloud_cfg_path = os.path.join(facts['common']['config_base'],
'cloudprovider')
if 'master' in facts:
api_server_args = {}
if 'cloudprovider' in facts:
if 'kind' in facts['cloudprovider']:
if facts['cloudprovider']['kind'] == 'aws':
api_server_args['cloud-provider'] = ['aws']
api_server_args['cloud-config'] = [cloud_cfg_path + '/aws.conf']
if facts['cloudprovider']['kind'] == 'openstack':
api_server_args['cloud-provider'] = ['openstack']
api_server_args['cloud-config'] = [cloud_cfg_path + '/openstack.conf']
if api_server_args != {}:
facts = merge_facts({'master': {'api_server_args': api_server_args}}, facts, [], [])
return facts
def is_service_running(service):
service_running = False
bus = SystemBus()
systemd = bus.get_object('org.freedesktop.systemd1', '/org/freedesktop/systemd1')
manager = Interface(systemd, dbus_interface='org.freedesktop.systemd1.Manager')
try:
service_unit = service if service.endswith('.service') else manager.GetUnit('{0}.service'.format(service))
service_proxy = bus.get_object('org.freedesktop.systemd1', str(service_unit))
service_properties = Interface(service_proxy, dbus_interface='org.freedesktop.DBus.Properties')
service_load_state = service_properties.Get('org.freedesktop.systemd1.Unit', 'LoadState')
service_active_state = service_properties.Get('org.freedesktop.systemd1.Unit', 'ActiveState')
if service_load_state == 'loaded' and service_active_state == 'active':
service_running = True
except DBusException:
pass
return service_running
def get_version_output(binary, version_cmd):
cmd = []
for item in (binary, version_cmd):
if isinstance(item, list):
cmd.extend(item)
else:
cmd.append(item)
if os.path.isfile(cmd[0]):
_, output, _ = module.run_command(cmd)
return output
def get_docker_version_info():
result = None
if is_service_running('docker'):
version_info = yaml.safe_load(get_version_output('/usr/bin/docker', 'version'))
if 'Server' in version_info:
result = {
'api_version': version_info['Server']['API version'],
'version': version_info['Server']['Version']
}
return result
def get_openshift_version(facts):
version = None
# No need to run this method repeatedly on a system if we already know the
# version
if 'common' in facts:
if 'version' in facts['common'] and facts['common']['version'] is not None:
return facts['common']['version']
if os.path.isfile('/usr/bin/openshift'):
_, output, _ = module.run_command(['/usr/bin/openshift', 'version'])
version = parse_openshift_version(output)
# openshift_facts runs before openshift_docker_facts. However, it will be
# called again and set properly throughout the playbook run. This could be
# refactored to simply set the openshift.common.version in the
# openshift_docker_facts role but it would take reworking some assumptions
# on how get_openshift_version is called.
if 'is_containerized' in facts['common'] and safe_get_bool(facts['common']['is_containerized']):
if 'docker' in facts and 'openshift_version' in facts['docker']:
version = facts['docker']['openshift_version']
return version
def parse_openshift_version(output):
versions = dict(e.split(' v') for e in output.splitlines() if ' v' in e)
return versions.get('openshift', '')
def apply_provider_facts(facts, provider_facts):
if not provider_facts:
return facts
common_vars = [('hostname', 'ip'), ('public_hostname', 'public_ip')]
for h_var, ip_var in common_vars:
ip_value = provider_facts['network'].get(ip_var)
if ip_value:
facts['common'][ip_var] = ip_value
facts['common'][h_var] = choose_hostname(
[provider_facts['network'].get(h_var)],
facts['common'][ip_var]
)
facts['provider'] = provider_facts
return facts
# Disabling pylint too many branches. This function needs refactored
# but is a very core part of openshift_facts.
# pylint: disable=too-many-branches
def merge_facts(orig, new, additive_facts_to_overwrite, protected_facts_to_overwrite):
additive_facts = ['named_certificates']
protected_facts = ['ha', 'master_count']
# Facts we do not ever want to merge. These originate in inventory variables
# and contain JSON dicts. We don't ever want to trigger a merge
inventory_json_facts = ['admission_plugin_config',
'kube_admission_plugin_config',
'image_policy_config']
facts = dict()
for key, value in orig.iteritems():
if key in new:
if key in inventory_json_facts:
if isinstance(new[key], basestring):
facts[key] = yaml.safe_load(new[key])
else:
facts[key] = copy.deepcopy(new[key])
elif isinstance(value, dict) and isinstance(new[key], dict):
relevant_additive_facts = []
for item in additive_facts_to_overwrite:
if '.' in item and item.startswith(key + '.'):
relevant_additive_facts.append(item)
relevant_protected_facts = []
for item in protected_facts_to_overwrite:
if '.' in item and item.startswith(key + '.'):
relevant_protected_facts.append(item)
facts[key] = merge_facts(value, new[key], relevant_additive_facts, relevant_protected_facts)
elif key in additive_facts and key not in [x.split('.')[-1] for x in additive_facts_to_overwrite]:
if isinstance(value, list) and isinstance(new[key], list):
new_fact = []
for item in copy.deepcopy(value) + copy.deepcopy(new[key]):
if item not in new_fact:
new_fact.append(item)
facts[key] = new_fact
elif key in protected_facts and key not in [x.split('.')[-1] for x in protected_facts_to_overwrite]:
if key == 'master_count':
if int(value) <= int(new[key]):
facts[key] = copy.deepcopy(new[key])
else:
module.fail_json(msg='openshift_facts received a lower value for openshift.master.master_count')
if key == 'ha':
if safe_get_bool(value) != safe_get_bool(new[key]):
module.fail_json(msg='openshift_facts received a different value for openshift.master.ha')
else:
facts[key] = value
else:
facts[key] = copy.deepcopy(new[key])
else:
facts[key] = copy.deepcopy(value)
new_keys = set(new.keys()) - set(orig.keys())
for key in new_keys:
facts[key] = copy.deepcopy(new[key])
return facts
def save_local_facts(filename, facts):
try:
fact_dir = os.path.dirname(filename)
if not os.path.exists(fact_dir):
os.makedirs(fact_dir)
with open(filename, 'w') as fact_file:
fact_file.write(module.jsonify(facts))
os.chmod(filename, 0o600)
except (IOError, OSError) as ex:
raise OpenShiftFactsFileWriteError(
"Could not create fact file: %s, error: %s" % (filename, ex)
)
def get_local_facts_from_file(filename):
local_facts = dict()
try:
# Handle conversion of INI style facts file to json style
ini_facts = ConfigParser.SafeConfigParser()
ini_facts.read(filename)
for section in ini_facts.sections():
local_facts[section] = dict()
for key, value in ini_facts.items(section):
local_facts[section][key] = value
except (ConfigParser.MissingSectionHeaderError,
ConfigParser.ParsingError):
try:
with open(filename, 'r') as facts_file:
local_facts = json.load(facts_file)
except (ValueError, IOError):
pass
return local_facts
def sort_unique(alist):
alist.sort()
out = list()
for i in alist:
if i not in out:
out.append(i)
return out
def safe_get_bool(fact):
return bool(strtobool(str(fact)))
def set_proxy_facts(facts):
if 'common' in facts:
common = facts['common']
if 'http_proxy' in common or 'https_proxy' in common:
if 'generate_no_proxy_hosts' in common and \
common['generate_no_proxy_hosts']:
if 'no_proxy' in common and \
isinstance(common['no_proxy'], basestring):
common['no_proxy'] = common['no_proxy'].split(",")
else:
common['no_proxy'] = []
if 'no_proxy_internal_hostnames' in common:
common['no_proxy'].extend(common['no_proxy_internal_hostnames'].split(','))
common['no_proxy'].append('.' + common['dns_domain'])
common['no_proxy'].append(common['hostname'])
common['no_proxy'] = sort_unique(common['no_proxy'])
facts['common'] = common
if 'builddefaults' in facts:
facts['master']['admission_plugin_config'] = dict()
builddefaults = facts['builddefaults']
common = facts['common']
if 'http_proxy' not in builddefaults and 'http_proxy' in common:
builddefaults['http_proxy'] = common['http_proxy']
if 'https_proxy' not in builddefaults and 'https_proxy' in common:
builddefaults['https_proxy'] = common['https_proxy']
if 'no_proxy' not in builddefaults and 'no_proxy' in common:
builddefaults['no_proxy'] = common['no_proxy']
if 'git_http_proxy' not in builddefaults and 'http_proxy' in builddefaults:
builddefaults['git_http_proxy'] = builddefaults['http_proxy']
if 'git_https_proxy' not in builddefaults and 'https_proxy' in builddefaults:
builddefaults['git_https_proxy'] = builddefaults['https_proxy']
if 'admission_plugin_config' not in builddefaults:
builddefaults['admission_plugin_config'] = dict()
if 'config' in builddefaults and ('http_proxy' in builddefaults or \
'https_proxy' in builddefaults):
facts['master']['admission_plugin_config'].update(builddefaults['config'])
facts['builddefaults'] = builddefaults
return facts
# pylint: disable=too-many-statements
def set_container_facts_if_unset(facts):
deployment_type = facts['common']['deployment_type']
if deployment_type in ['enterprise', 'openshift-enterprise']:
master_image = 'openshift3/ose'
cli_image = master_image
node_image = 'openshift3/node'
ovs_image = 'openshift3/openvswitch'
etcd_image = 'registry.access.redhat.com/rhel7/etcd'
pod_image = 'openshift3/ose-pod'
router_image = 'openshift3/ose-haproxy-router'
registry_image = 'openshift3/ose-docker-registry'
deployer_image = 'openshift3/ose-deployer'
elif deployment_type == 'atomic-enterprise':
master_image = 'aep3_beta/aep'
cli_image = master_image
node_image = 'aep3_beta/node'
ovs_image = 'aep3_beta/openvswitch'
etcd_image = 'registry.access.redhat.com/rhel7/etcd'
pod_image = 'aep3_beta/aep-pod'
router_image = 'aep3_beta/aep-haproxy-router'
registry_image = 'aep3_beta/aep-docker-registry'
deployer_image = 'aep3_beta/aep-deployer'
else:
master_image = 'openshift/origin'
cli_image = master_image
node_image = 'openshift/node'
ovs_image = 'openshift/openvswitch'
etcd_image = 'registry.access.redhat.com/rhel7/etcd'
pod_image = 'openshift/origin-pod'
router_image = 'openshift/origin-haproxy-router'
registry_image = 'openshift/origin-docker-registry'
deployer_image = 'openshift/origin-deployer'
facts['common']['is_atomic'] = os.path.isfile('/run/ostree-booted')
if 'is_containerized' not in facts['common']:
facts['common']['is_containerized'] = facts['common']['is_atomic']
if 'cli_image' not in facts['common']:
facts['common']['cli_image'] = cli_image
if 'pod_image' not in facts['common']:
facts['common']['pod_image'] = pod_image
if 'router_image' not in facts['common']:
facts['common']['router_image'] = router_image
if 'registry_image' not in facts['common']:
facts['common']['registry_image'] = registry_image
if 'deployer_image' not in facts['common']:
facts['common']['deployer_image'] = deployer_image
if 'etcd' in facts and 'etcd_image' not in facts['etcd']:
facts['etcd']['etcd_image'] = etcd_image
if 'master' in facts and 'master_image' not in facts['master']:
facts['master']['master_image'] = master_image
if 'node' in facts:
if 'node_image' not in facts['node']:
facts['node']['node_image'] = node_image
if 'ovs_image' not in facts['node']:
facts['node']['ovs_image'] = ovs_image
if safe_get_bool(facts['common']['is_containerized']):
facts['common']['admin_binary'] = '/usr/local/bin/oadm'
facts['common']['client_binary'] = '/usr/local/bin/oc'
return facts
def set_installed_variant_rpm_facts(facts):
installed_rpms = []
for base_rpm in ['openshift', 'atomic-openshift', 'origin']:
optional_rpms = ['master', 'node', 'clients', 'sdn-ovs']
variant_rpms = [base_rpm] + \
['{0}-{1}'.format(base_rpm, r) for r in optional_rpms] + \
['tuned-profiles-%s-node' % base_rpm]
for rpm in variant_rpms:
exit_code, _, _ = module.run_command(['rpm', '-q', rpm])
if exit_code == 0:
installed_rpms.append(rpm)
facts['common']['installed_variant_rpms'] = installed_rpms
return facts
class OpenShiftFactsInternalError(Exception):
pass
class OpenShiftFactsUnsupportedRoleError(Exception):
pass
class OpenShiftFactsFileWriteError(Exception):
pass
class OpenShiftFactsMetadataUnavailableError(Exception):
pass
class OpenShiftFacts(object):
known_roles = ['builddefaults',
'cloudprovider',
'common',
'docker',
'etcd',
'hosted',
'master',
'node']
# Disabling too-many-arguments, this should be cleaned up as a TODO item.
# pylint: disable=too-many-arguments
def __init__(self, role, filename, local_facts,
additive_facts_to_overwrite=None,
openshift_env=None,
openshift_env_structures=None,
protected_facts_to_overwrite=None):
self.changed = False
self.filename = filename
if role not in self.known_roles:
raise OpenShiftFactsUnsupportedRoleError(
"Role %s is not supported by this module" % role
)
self.role = role
self.system_facts = ansible_facts(module)
self.facts = self.generate_facts(local_facts,
additive_facts_to_overwrite,
openshift_env,
openshift_env_structures,
protected_facts_to_overwrite)
def generate_facts(self,
local_facts,
additive_facts_to_overwrite,
openshift_env,
openshift_env_structures,
protected_facts_to_overwrite):
local_facts = self.init_local_facts(local_facts,
additive_facts_to_overwrite,
openshift_env,
openshift_env_structures,
protected_facts_to_overwrite)
roles = local_facts.keys()
if 'common' in local_facts and 'deployment_type' in local_facts['common']:
deployment_type = local_facts['common']['deployment_type']
else:
deployment_type = 'origin'
defaults = self.get_defaults(roles, deployment_type)
provider_facts = self.init_provider_facts()
facts = apply_provider_facts(defaults, provider_facts)
facts = merge_facts(facts,
local_facts,
additive_facts_to_overwrite,
protected_facts_to_overwrite)
facts = migrate_oauth_template_facts(facts)
facts['current_config'] = get_current_config(facts)
facts = set_url_facts_if_unset(facts)
facts = set_project_cfg_facts_if_unset(facts)
facts = set_flannel_facts_if_unset(facts)
facts = set_nuage_facts_if_unset(facts)
facts = set_node_schedulability(facts)
facts = set_selectors(facts)
facts = set_metrics_facts_if_unset(facts)
facts = set_identity_providers_if_unset(facts)
facts = set_sdn_facts_if_unset(facts, self.system_facts)
facts = set_deployment_facts_if_unset(facts)
facts = set_container_facts_if_unset(facts)
facts = build_kubelet_args(facts)
facts = build_controller_args(facts)
facts = build_api_server_args(facts)
facts = set_version_facts_if_unset(facts)
facts = set_dnsmasq_facts_if_unset(facts)
facts = set_manageiq_facts_if_unset(facts)
facts = set_aggregate_facts(facts)
facts = set_etcd_facts_if_unset(facts)
facts = set_proxy_facts(facts)
if not safe_get_bool(facts['common']['is_containerized']):
facts = set_installed_variant_rpm_facts(facts)
return dict(openshift=facts)
def get_defaults(self, roles, deployment_type):
defaults = {}
ip_addr = self.system_facts['default_ipv4']['address']
exit_code, output, _ = module.run_command(['hostname', '-f'])
hostname_f = output.strip() if exit_code == 0 else ''
hostname_values = [hostname_f, self.system_facts['nodename'],
self.system_facts['fqdn']]
hostname = choose_hostname(hostname_values, ip_addr)
defaults['common'] = dict(use_openshift_sdn=True, ip=ip_addr,
public_ip=ip_addr,
deployment_type=deployment_type,
hostname=hostname,
public_hostname=hostname,
portal_net='172.30.0.0/16',
client_binary='oc', admin_binary='oadm',
dns_domain='cluster.local',
install_examples=True,
debug_level=2)
if 'master' in roles:
scheduler_predicates = [
{"name": "MatchNodeSelector"},
{"name": "PodFitsResources"},
{"name": "PodFitsPorts"},
{"name": "NoDiskConflict"},
{"name": "Region", "argument": {"serviceAffinity" : {"labels" : ["region"]}}}
]
scheduler_priorities = [
{"name": "LeastRequestedPriority", "weight": 1},
{"name": "SelectorSpreadPriority", "weight": 1},
{"name": "Zone", "weight" : 2, "argument": {"serviceAntiAffinity" : {"label": "zone"}}}
]
defaults['master'] = dict(api_use_ssl=True, api_port='8443',
controllers_port='8444',
console_use_ssl=True,
console_path='/console',
console_port='8443', etcd_use_ssl=True,
etcd_hosts='', etcd_port='4001',
portal_net='172.30.0.0/16',
embedded_etcd=True, embedded_kube=True,
embedded_dns=True,
bind_addr='0.0.0.0',
session_max_seconds=3600,
session_name='ssn',
session_secrets_file='',
access_token_max_seconds=86400,
auth_token_max_seconds=500,
oauth_grant_method='auto',
scheduler_predicates=scheduler_predicates,
scheduler_priorities=scheduler_priorities)
if 'node' in roles:
defaults['node'] = dict(labels={}, annotations={},
iptables_sync_period='5s',
local_quota_per_fsgroup="",
set_node_ip=False)
if 'docker' in roles:
docker = dict(disable_push_dockerhub=False)
version_info = get_docker_version_info()
if version_info is not None:
docker['api_version'] = version_info['api_version']
docker['version'] = version_info['version']
defaults['docker'] = docker
if 'cloudprovider' in roles:
defaults['cloudprovider'] = dict(kind=None)
if 'hosted' in roles or self.role == 'hosted':
defaults['hosted'] = dict(
metrics=dict(
deploy=False,
duration=7,
resolution=10,
storage=dict(
kind=None,
volume=dict(
name='metrics',
size='10Gi'
),
nfs=dict(
directory='/exports',
options='*(rw,root_squash)'),
host=None,
access_modes=['ReadWriteMany'],
create_pv=True
)
),
registry=dict(
storage=dict(
kind=None,
volume=dict(
name='registry',
size='5Gi'
),
nfs=dict(
directory='/exports',
options='*(rw,root_squash)'),
host=None,
access_modes=['ReadWriteMany'],
create_pv=True
)
),
router=dict()
)
return defaults
def guess_host_provider(self):
# TODO: cloud provider facts should probably be submitted upstream
product_name = self.system_facts['product_name']
product_version = self.system_facts['product_version']
virt_type = self.system_facts['virtualization_type']
virt_role = self.system_facts['virtualization_role']
provider = None
metadata = None
# TODO: this is not exposed through module_utils/facts.py in ansible,
# need to create PR for ansible to expose it
bios_vendor = get_file_content(
'/sys/devices/virtual/dmi/id/bios_vendor'
)
if bios_vendor == 'Google':
provider = 'gce'
metadata_url = ('http://metadata.google.internal/'
'computeMetadata/v1/?recursive=true')
headers = {'Metadata-Flavor': 'Google'}
metadata = get_provider_metadata(metadata_url, True, headers,
True)
# Filter sshKeys and serviceAccounts from gce metadata
if metadata:
metadata['project']['attributes'].pop('sshKeys', None)
metadata['instance'].pop('serviceAccounts', None)
elif (virt_type == 'xen' and virt_role == 'guest'
and re.match(r'.*\.amazon$', product_version)):
provider = 'aws'
metadata_url = 'http://169.254.169.254/latest/meta-data/'
metadata = get_provider_metadata(metadata_url)
elif re.search(r'OpenStack', product_name):
provider = 'openstack'
metadata_url = ('http://169.254.169.254/openstack/latest/'
'meta_data.json')
metadata = get_provider_metadata(metadata_url, True, None,
True)
if metadata:
ec2_compat_url = 'http://169.254.169.254/latest/meta-data/'
metadata['ec2_compat'] = get_provider_metadata(
ec2_compat_url
)
# disable pylint maybe-no-member because overloaded use of
# the module name causes pylint to not detect that results
# is an array or hash
# pylint: disable=maybe-no-member
# Filter public_keys and random_seed from openstack metadata
metadata.pop('public_keys', None)
metadata.pop('random_seed', None)
if not metadata['ec2_compat']:
metadata = None
return dict(name=provider, metadata=metadata)
def init_provider_facts(self):
provider_info = self.guess_host_provider()
provider_facts = normalize_provider_facts(
provider_info.get('name'),
provider_info.get('metadata')
)
return provider_facts
@staticmethod
def split_openshift_env_fact_keys(openshift_env_fact, openshift_env_structures):
# By default, we'll split an openshift_env fact by underscores.
fact_keys = openshift_env_fact.split('_')
matching_structure = None
if openshift_env_structures != None:
for structure in openshift_env_structures:
if re.match(structure, openshift_env_fact):
matching_structure = structure
if matching_structure is None:
return fact_keys
final_keys = []
structure_keys = matching_structure.split('.')
for structure_key in structure_keys:
# Matched current key. Add to final keys.
if structure_key == fact_keys[structure_keys.index(structure_key)]:
final_keys.append(structure_key)
# Wildcard means we will be taking everything from here to the end of the fact.
elif structure_key == '*':
final_keys.append('_'.join(fact_keys[structure_keys.index(structure_key):]))
# Shouldn't have gotten here, return the fact keys.
else:
return fact_keys
return final_keys
def init_local_facts(self, facts=None,
additive_facts_to_overwrite=None,
openshift_env=None,
openshift_env_structures=None,
protected_facts_to_overwrite=None):
changed = False
facts_to_set = dict()
if facts is not None:
facts_to_set[self.role] = facts
if openshift_env != {} and openshift_env != None:
for fact, value in openshift_env.iteritems():
oo_env_facts = dict()
current_level = oo_env_facts
keys = self.split_openshift_env_fact_keys(fact, openshift_env_structures)[1:]
if len(keys) > 0 and keys[0] != self.role:
continue
for key in keys:
if key == keys[-1]:
current_level[key] = value
elif key not in current_level:
current_level[key] = dict()
current_level = current_level[key]
facts_to_set = merge_facts(orig=facts_to_set,
new=oo_env_facts,
additive_facts_to_overwrite=[],
protected_facts_to_overwrite=[])
local_facts = get_local_facts_from_file(self.filename)
migrated_facts = migrate_local_facts(local_facts)
new_local_facts = merge_facts(migrated_facts,
facts_to_set,
additive_facts_to_overwrite,
protected_facts_to_overwrite)
if 'docker' in new_local_facts:
for cat in ['additional', 'blocked', 'insecure']:
key = '{0}_registries'.format(cat)
if key in new_local_facts['docker']:
val = new_local_facts['docker'][key]
if isinstance(val, basestring):
val = [x.strip() for x in val.split(',')]
new_local_facts['docker'][key] = list(set(val) - set(['']))
if 'log_options' in new_local_facts['docker'] and \
isinstance(new_local_facts['docker']['log_options'], basestring):
new_local_facts['docker']['log_options'] = new_local_facts['docker']['log_options'].split(',')
new_local_facts = self.remove_empty_facts(new_local_facts)
if new_local_facts != local_facts:
self.validate_local_facts(new_local_facts)
changed = True
if not module.check_mode:
save_local_facts(self.filename, new_local_facts)
self.changed = changed
return new_local_facts
def remove_empty_facts(self, facts=None):
facts_to_remove = []
for fact, value in facts.iteritems():
if isinstance(facts[fact], dict):
facts[fact] = self.remove_empty_facts(facts[fact])
else:
if value == "" or value == [""] or value is None:
facts_to_remove.append(fact)
for fact in facts_to_remove:
del facts[fact]
return facts
def validate_local_facts(self, facts=None):
invalid_facts = dict()
invalid_facts = self.validate_master_facts(facts, invalid_facts)
if invalid_facts:
msg = 'Invalid facts detected:\n'
for key in invalid_facts.keys():
msg += '{0}: {1}\n'.format(key, invalid_facts[key])
module.fail_json(msg=msg,
changed=self.changed)
# with best effort reduction of error messages here.
# disabling errors for too-many-branches since we require checking
# many conditions.
# pylint: disable=line-too-long, too-many-branches
@staticmethod
def validate_master_facts(facts, invalid_facts):
if 'master' in facts:
# openshift.master.session_auth_secrets
if 'session_auth_secrets' in facts['master']:
session_auth_secrets = facts['master']['session_auth_secrets']
if not issubclass(type(session_auth_secrets), list):
invalid_facts['session_auth_secrets'] = 'Expects session_auth_secrets is a list.'
elif 'session_encryption_secrets' not in facts['master']:
invalid_facts['session_auth_secrets'] = ('openshift_master_session_encryption secrets must be set '
'if openshift_master_session_auth_secrets is provided.')
elif len(session_auth_secrets) != len(facts['master']['session_encryption_secrets']):
invalid_facts['session_auth_secrets'] = ('openshift_master_session_auth_secrets and '
'openshift_master_session_encryption_secrets must be '
'equal length.')
else:
for secret in session_auth_secrets:
if len(secret) < 32:
invalid_facts['session_auth_secrets'] = ('Invalid secret in session_auth_secrets. '
'Secrets must be at least 32 characters in length.')
# openshift.master.session_encryption_secrets
if 'session_encryption_secrets' in facts['master']:
session_encryption_secrets = facts['master']['session_encryption_secrets']
if not issubclass(type(session_encryption_secrets), list):
invalid_facts['session_encryption_secrets'] = 'Expects session_encryption_secrets is a list.'
elif 'session_auth_secrets' not in facts['master']:
invalid_facts['session_encryption_secrets'] = ('openshift_master_session_auth_secrets must be '
'set if openshift_master_session_encryption_secrets '
'is provided.')
else:
for secret in session_encryption_secrets:
if len(secret) not in [16, 24, 32]:
invalid_facts['session_encryption_secrets'] = ('Invalid secret in session_encryption_secrets. '
'Secrets must be 16, 24, or 32 characters in length.')
return invalid_facts
def main():
# disabling pylint errors for global-variable-undefined and invalid-name
# for 'global module' usage, since it is required to use ansible_facts
# pylint: disable=global-variable-undefined, invalid-name
global module
module = AnsibleModule(
argument_spec=dict(
role=dict(default='common', required=False,
choices=OpenShiftFacts.known_roles),
local_facts=dict(default=None, type='dict', required=False),
additive_facts_to_overwrite=dict(default=[], type='list', required=False),
openshift_env=dict(default={}, type='dict', required=False),
openshift_env_structures=dict(default=[], type='list', required=False),
protected_facts_to_overwrite=dict(default=[], type='list', required=False),
),
supports_check_mode=True,
add_file_common_args=True,
)
role = module.params['role']
local_facts = module.params['local_facts']
additive_facts_to_overwrite = module.params['additive_facts_to_overwrite']
openshift_env = module.params['openshift_env']
openshift_env_structures = module.params['openshift_env_structures']
protected_facts_to_overwrite = module.params['protected_facts_to_overwrite']
fact_file = '/etc/ansible/facts.d/openshift.fact'
openshift_facts = OpenShiftFacts(role,
fact_file,
local_facts,
additive_facts_to_overwrite,
openshift_env,
openshift_env_structures,
protected_facts_to_overwrite)
file_params = module.params.copy()
file_params['path'] = fact_file
file_args = module.load_file_common_arguments(file_params)
changed = module.set_fs_attributes_if_different(file_args,
openshift_facts.changed)
return module.exit_json(changed=changed,
ansible_facts=openshift_facts.facts)
# ignore pylint errors related to the module_utils import
# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.facts import *
from ansible.module_utils.urls import *
if __name__ == '__main__':
main()
| true | true |
f733fd6fdb137cc32d29187bb72ab56c79af93fe | 8,011 | py | Python | pytests/epengine/documentkeys.py | cgghali/TAF | 1de8dec77ad781c373e18d9c285befd534ac203a | [
"Apache-2.0"
] | null | null | null | pytests/epengine/documentkeys.py | cgghali/TAF | 1de8dec77ad781c373e18d9c285befd534ac203a | [
"Apache-2.0"
] | null | null | null | pytests/epengine/documentkeys.py | cgghali/TAF | 1de8dec77ad781c373e18d9c285befd534ac203a | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
from basetestcase import BaseTestCase
from couchbase_helper.documentgenerator import doc_generator
from membase.api.rest_client import RestConnection
from couchbase_helper.document import View
class DocumentKeysTests(BaseTestCase):
def setUp(self):
super(DocumentKeysTests, self).setUp()
nodes_init = self.cluster.servers[1:self.nodes_init] \
if self.nodes_init != 1 else []
self.task.rebalance([self.cluster.master], nodes_init, [])
self.cluster.nodes_in_cluster.extend([self.cluster.master]+nodes_init)
self.bucket_util.create_default_bucket(
bucket_type=self.bucket_type,
replica=self.num_replicas,
storage=self.bucket_storage,
eviction_policy=self.bucket_eviction_policy)
self.bucket_util.add_rbac_user()
self.cluster_util.print_cluster_stats()
self.bucket_util.print_bucket_stats()
self.log.info("====== DocumentKeysTests setUp complete ======")
def tearDown(self):
super(DocumentKeysTests, self).tearDown()
def _persist_and_verify(self):
"""
Helper function to wait for persistence and
then verify data/stats on all buckets
"""
self.bucket_util._wait_for_stats_all_buckets()
self.bucket_util.verify_stats_all_buckets(self.num_items)
"""Helper function to verify the data using view query"""
def _verify_with_views(self, expected_rows):
for bucket in self.bucket_util.buckets:
default_map_func = 'function (doc, meta) { emit(meta.id, null);}'
default_view = View("View", default_map_func, None, False)
ddoc_name = "key_ddoc"
self.bucket_util.create_views(
self.cluster.master, ddoc_name, [default_view], bucket.name)
query = {"stale": "false", "connection_timeout": 60000}
self.bucket_util.query_view(self.cluster.master, ddoc_name,
default_view.name, query,
expected_rows, bucket=bucket.name)
"""
Perform create/update/delete data ops on the input document key and verify
"""
def _dockey_data_ops(self, dockey="dockey"):
target_vb = None
if self.target_vbucket is not None:
target_vb = [self.target_vbucket]
gen_load = doc_generator(dockey, 0, self.num_items,
key_size=self.key_size,
doc_size=self.doc_size,
doc_type=self.doc_type,
vbuckets=self.cluster_util.vbuckets,
target_vbucket=target_vb)
bucket = self.bucket_util.get_all_buckets()[0]
for op_type in ["create", "update", "delete"]:
task = self.task.async_load_gen_docs(
self.cluster, bucket, gen_load, op_type, 0, batch_size=20,
persist_to=self.persist_to, replicate_to=self.replicate_to,
durability=self.durability_level,
timeout_secs=self.sdk_timeout,
sdk_client_pool=self.sdk_client_pool)
self.task.jython_task_manager.get_task_result(task)
if op_type == "delete":
self.num_items = 0
self._persist_and_verify()
"""Perform verification with views after loading data"""
def _dockey_views(self, dockey="dockey"):
gen_load = doc_generator(dockey, 0, self.num_items,
key_size=self.key_size,
doc_size=self.doc_size,
doc_type=self.doc_type,
vbuckets=self.cluster_util.vbuckets)
bucket = self.bucket_util.get_all_buckets()[0]
task = self.task.async_load_gen_docs(self.cluster, bucket,
gen_load, "create", 0,
batch_size=20,
persist_to=self.persist_to,
replicate_to=self.replicate_to,
durability=self.durability_level,
timeout_secs=self.sdk_timeout,
sdk_client_pool=self.sdk_client_pool)
self.task.jython_task_manager.get_task_result(task)
self._persist_and_verify()
self._verify_with_views(self.num_items)
"""
This function loads data in bucket and waits for persistence.
One node is failed over after that and it is verified,
data can be retrieved
"""
def _dockey_dcp(self, dockey="dockey"):
gen_load = doc_generator(dockey, 0, self.num_items,
key_size=self.key_size,
doc_size=self.doc_size,
doc_type=self.doc_type,
vbuckets=self.cluster_util.vbuckets)
bucket = self.bucket_util.get_all_buckets()[0]
task = self.task.async_load_gen_docs(self.cluster, bucket,
gen_load, "create", 0,
batch_size=20,
persist_to=self.persist_to,
replicate_to=self.replicate_to,
durability=self.durability_level,
timeout_secs=self.sdk_timeout,
sdk_client_pool=self.sdk_client_pool)
self.task.jython_task_manager.get_task_result(task)
self._persist_and_verify()
# assert if there are not enough nodes to failover
rest = RestConnection(self.cluster.master)
num_nodes = len(rest.node_statuses())
self.assertTrue(num_nodes > 1,
"ERROR: Not enough nodes to do failover")
# failover 1 node(we have 1 replica) and verify the keys
rest = RestConnection(self.cluster.master)
node_status = rest.node_statuses()
for node_to_failover in self.servers[(num_nodes - 1):num_nodes]:
for node in node_status:
if node_to_failover.ip == node.ip \
and int(node_to_failover.port) == int(node.port):
rest.fail_over(node.id, graceful=False)
self.cluster.nodes_in_cluster = \
list(set(self.cluster.nodes_in_cluster)
- set(self.servers[(num_nodes - 1):num_nodes]))
self._persist_and_verify()
def test_dockey_whitespace_data_ops(self):
generic_key = "d o c k e y"
if self.key_size:
self.key_size = self.key_size-len(generic_key)
generic_key = generic_key + "_" * self.key_size
self._dockey_data_ops(generic_key)
def test_dockey_binary_data_ops(self):
generic_key = "d\ro\nckey"
if self.key_size:
self.key_size = self.key_size-len(generic_key)
generic_key = generic_key + "\n" * self.key_size
self._dockey_data_ops(generic_key)
def test_dockey_unicode_data_ops(self):
generic_key = "\u00CA"
if self.key_size:
self.key_size = self.key_size-len(generic_key)
generic_key = generic_key + "é" * self.key_size
self._dockey_data_ops(generic_key)
def test_dockey_whitespace_views(self):
self._dockey_views("doc key ")
def test_dockey_binary_views(self):
self._dockey_views("docke\0y\n")
def test_dockey_unicode_views(self):
self._dockey_views("México")
def test_dockey_whitespace_dcp(self):
self._dockey_dcp("d o c k e y")
def test_dockey_binary_dcp(self):
self._dockey_dcp("d\rocke\0y")
def test_dockey_unicode_dcp(self):
self._dockey_dcp("привет")
| 44.505556 | 82 | 0.580202 |
from basetestcase import BaseTestCase
from couchbase_helper.documentgenerator import doc_generator
from membase.api.rest_client import RestConnection
from couchbase_helper.document import View
class DocumentKeysTests(BaseTestCase):
def setUp(self):
super(DocumentKeysTests, self).setUp()
nodes_init = self.cluster.servers[1:self.nodes_init] \
if self.nodes_init != 1 else []
self.task.rebalance([self.cluster.master], nodes_init, [])
self.cluster.nodes_in_cluster.extend([self.cluster.master]+nodes_init)
self.bucket_util.create_default_bucket(
bucket_type=self.bucket_type,
replica=self.num_replicas,
storage=self.bucket_storage,
eviction_policy=self.bucket_eviction_policy)
self.bucket_util.add_rbac_user()
self.cluster_util.print_cluster_stats()
self.bucket_util.print_bucket_stats()
self.log.info("====== DocumentKeysTests setUp complete ======")
def tearDown(self):
super(DocumentKeysTests, self).tearDown()
def _persist_and_verify(self):
self.bucket_util._wait_for_stats_all_buckets()
self.bucket_util.verify_stats_all_buckets(self.num_items)
def _verify_with_views(self, expected_rows):
for bucket in self.bucket_util.buckets:
default_map_func = 'function (doc, meta) { emit(meta.id, null);}'
default_view = View("View", default_map_func, None, False)
ddoc_name = "key_ddoc"
self.bucket_util.create_views(
self.cluster.master, ddoc_name, [default_view], bucket.name)
query = {"stale": "false", "connection_timeout": 60000}
self.bucket_util.query_view(self.cluster.master, ddoc_name,
default_view.name, query,
expected_rows, bucket=bucket.name)
def _dockey_data_ops(self, dockey="dockey"):
target_vb = None
if self.target_vbucket is not None:
target_vb = [self.target_vbucket]
gen_load = doc_generator(dockey, 0, self.num_items,
key_size=self.key_size,
doc_size=self.doc_size,
doc_type=self.doc_type,
vbuckets=self.cluster_util.vbuckets,
target_vbucket=target_vb)
bucket = self.bucket_util.get_all_buckets()[0]
for op_type in ["create", "update", "delete"]:
task = self.task.async_load_gen_docs(
self.cluster, bucket, gen_load, op_type, 0, batch_size=20,
persist_to=self.persist_to, replicate_to=self.replicate_to,
durability=self.durability_level,
timeout_secs=self.sdk_timeout,
sdk_client_pool=self.sdk_client_pool)
self.task.jython_task_manager.get_task_result(task)
if op_type == "delete":
self.num_items = 0
self._persist_and_verify()
def _dockey_views(self, dockey="dockey"):
gen_load = doc_generator(dockey, 0, self.num_items,
key_size=self.key_size,
doc_size=self.doc_size,
doc_type=self.doc_type,
vbuckets=self.cluster_util.vbuckets)
bucket = self.bucket_util.get_all_buckets()[0]
task = self.task.async_load_gen_docs(self.cluster, bucket,
gen_load, "create", 0,
batch_size=20,
persist_to=self.persist_to,
replicate_to=self.replicate_to,
durability=self.durability_level,
timeout_secs=self.sdk_timeout,
sdk_client_pool=self.sdk_client_pool)
self.task.jython_task_manager.get_task_result(task)
self._persist_and_verify()
self._verify_with_views(self.num_items)
def _dockey_dcp(self, dockey="dockey"):
gen_load = doc_generator(dockey, 0, self.num_items,
key_size=self.key_size,
doc_size=self.doc_size,
doc_type=self.doc_type,
vbuckets=self.cluster_util.vbuckets)
bucket = self.bucket_util.get_all_buckets()[0]
task = self.task.async_load_gen_docs(self.cluster, bucket,
gen_load, "create", 0,
batch_size=20,
persist_to=self.persist_to,
replicate_to=self.replicate_to,
durability=self.durability_level,
timeout_secs=self.sdk_timeout,
sdk_client_pool=self.sdk_client_pool)
self.task.jython_task_manager.get_task_result(task)
self._persist_and_verify()
rest = RestConnection(self.cluster.master)
num_nodes = len(rest.node_statuses())
self.assertTrue(num_nodes > 1,
"ERROR: Not enough nodes to do failover")
rest = RestConnection(self.cluster.master)
node_status = rest.node_statuses()
for node_to_failover in self.servers[(num_nodes - 1):num_nodes]:
for node in node_status:
if node_to_failover.ip == node.ip \
and int(node_to_failover.port) == int(node.port):
rest.fail_over(node.id, graceful=False)
self.cluster.nodes_in_cluster = \
list(set(self.cluster.nodes_in_cluster)
- set(self.servers[(num_nodes - 1):num_nodes]))
self._persist_and_verify()
def test_dockey_whitespace_data_ops(self):
generic_key = "d o c k e y"
if self.key_size:
self.key_size = self.key_size-len(generic_key)
generic_key = generic_key + "_" * self.key_size
self._dockey_data_ops(generic_key)
def test_dockey_binary_data_ops(self):
generic_key = "d\ro\nckey"
if self.key_size:
self.key_size = self.key_size-len(generic_key)
generic_key = generic_key + "\n" * self.key_size
self._dockey_data_ops(generic_key)
def test_dockey_unicode_data_ops(self):
generic_key = "\u00CA"
if self.key_size:
self.key_size = self.key_size-len(generic_key)
generic_key = generic_key + "é" * self.key_size
self._dockey_data_ops(generic_key)
def test_dockey_whitespace_views(self):
self._dockey_views("doc key ")
def test_dockey_binary_views(self):
self._dockey_views("docke\0y\n")
def test_dockey_unicode_views(self):
self._dockey_views("México")
def test_dockey_whitespace_dcp(self):
self._dockey_dcp("d o c k e y")
def test_dockey_binary_dcp(self):
self._dockey_dcp("d\rocke\0y")
def test_dockey_unicode_dcp(self):
self._dockey_dcp("привет")
| true | true |
f733fdf221450419c3d7c93286b3d35fe0b70e32 | 12,168 | py | Python | models/mli/model_skopes_rules.py | pragnesh-ai/driverlessai-recipes | 97371a2d2cd853cdeeb15037f462af96d81a7c0b | [
"Apache-2.0"
] | 194 | 2019-04-23T10:25:13.000Z | 2022-03-29T04:19:28.000Z | models/mli/model_skopes_rules.py | pragnesh-ai/driverlessai-recipes | 97371a2d2cd853cdeeb15037f462af96d81a7c0b | [
"Apache-2.0"
] | 50 | 2019-06-24T20:17:51.000Z | 2022-03-16T20:05:37.000Z | models/mli/model_skopes_rules.py | pragnesh-ai/driverlessai-recipes | 97371a2d2cd853cdeeb15037f462af96d81a7c0b | [
"Apache-2.0"
] | 85 | 2019-03-27T12:26:43.000Z | 2022-01-27T13:15:37.000Z | """Skopes rules """
import uuid
import os
import datatable as dt
import numpy as np
from h2oaicore.models import CustomModel
from sklearn.preprocessing import LabelEncoder
from h2oaicore.systemutils import physical_cores_count
from h2oaicore.systemutils import user_dir, remove, config
from h2oaicore.systemutils import make_experiment_logger, loggerinfo, loggerwarning, loggerdebug
class SKOPE_RULES(CustomModel):
_regression = False
_binary = True
_multiclass = False
_display_name = "SKOPE RULES"
_description = "SKOPE RULES"
# using git master because pypi is very out of date (Jan 2020) but need Sept 1-ish master with fix for updated scikit-learn
_modules_needed_by_name = ['git+https://github.com/scikit-learn-contrib/skope-rules.git']
@staticmethod
def do_acceptance_test():
return True
def set_default_params(self, accuracy=None, time_tolerance=None,
interpretability=None, **kwargs):
# Fill up parameters we care about
self.params = dict(random_state=kwargs.get("random_state", 1234),
max_depth_duplication=None, n_estimators=10,
precision_min=0.5, recall_min=0.01, max_samples=0.8,
max_samples_features=1.0, max_depth=3,
max_features="auto", min_samples_split=2,
bootstrap=False, bootstrap_features=False)
def mutate_params(self, accuracy=10, **kwargs):
if accuracy > 8:
max_depth_duplication = [None, 2, 3]
n_estimators = [10, 20, 40]
precision_min = [0.1, 0.2, 0.3]
recall_min = [0.01, 0.05]
max_samples = [0.5, 0.8, 1.0]
max_samples_features = [0.5, 0.8, 1.0]
max_depth = [3, 4, 5]
max_features = ["sqrt", "log2", "auto"]
min_samples_split = [2, 11, 21]
bootstrap = [True, False]
bootstrap_features = [True, False]
elif accuracy >= 5:
max_depth_duplication = [None]
n_estimators = [10, 20]
precision_min = [0.1, 0.2, 0.3]
recall_min = [0.01]
max_samples = [0.8, 1.0]
max_samples_features = [1.0]
max_depth = [3, 4]
max_features = ["sqrt", "log2", "auto"]
min_samples_split = [2, 5, 11]
bootstrap = [True, False]
bootstrap_features = [True, False]
else:
max_depth_duplication = [None]
n_estimators = [10]
precision_min = [0.1, 0.2]
recall_min = [0.01]
max_samples = [0.8, 1.0]
max_samples_features = [0.8, 1.0]
max_depth = [3, 4]
max_features = ["auto"]
min_samples_split = [2]
bootstrap = [True, False]
bootstrap_features = [True, False]
self.params["max_depth_duplication"] = np.random.choice(max_depth_duplication)
self.params["n_estimators"] = np.random.choice(n_estimators)
self.params["precision_min"] = np.random.choice(precision_min)
self.params["recall_min"] = np.random.choice(recall_min)
self.params["max_samples"] = np.random.choice(max_samples)
self.params["max_samples_features"] = np.random.choice(max_samples_features)
self.params["max_depth"] = np.random.choice(max_depth)
self.params["max_features"] = np.random.choice(max_features)
self.params["min_samples_split"] = np.random.choice(min_samples_split)
self.params["bootstrap"] = np.random.choice(bootstrap)
self.params["bootstrap_features"] = np.random.choice(bootstrap_features)
def _create_tmp_folder(self, logger):
# Create a temp folder to store files
# Set the default value without context available (required to pass acceptance test)
tmp_folder = os.path.join(user_dir(), "%s_SKOPE_model_folder" % uuid.uuid4())
# Make a real tmp folder when experiment is available
if self.context and self.context.experiment_id:
tmp_folder = os.path.join(self.context.experiment_tmp_dir, "%s_SKOPE_model_folder" % uuid.uuid4())
# Now let's try to create that folder
try:
os.mkdir(tmp_folder)
except PermissionError:
# This not occur so log a warning
loggerwarning(logger, "SKOPE was denied temp folder creation rights")
tmp_folder = os.path.join(user_dir(), "%s_SKOPE_model_folder" % uuid.uuid4())
os.mkdir(tmp_folder)
except FileExistsError:
# We should never be here since temp dir name is expected to be unique
loggerwarning(logger, "SKOPE temp folder already exists")
tmp_folder = os.path.join(self.context.experiment_tmp_dir, "%s_SKOPE_model_folder" % uuid.uuid4())
os.mkdir(tmp_folder)
except:
# Revert to temporary file path
tmp_folder = os.path.join(user_dir(), "%s_SKOPE_model_folder" % uuid.uuid4())
os.mkdir(tmp_folder)
loggerinfo(logger, "SKOPE temp folder {}".format(tmp_folder))
return tmp_folder
def fit(self, X, y, sample_weight=None, eval_set=None, sample_weight_eval_set=None, **kwargs):
orig_cols = list(X.names)
import pandas as pd
import numpy as np
from skrules import SkopeRules
from sklearn.preprocessing import OneHotEncoder
from collections import Counter
# Get the logger if it exists
logger = None
if self.context and self.context.experiment_id:
logger = make_experiment_logger(experiment_id=self.context.experiment_id,
tmp_dir=self.context.tmp_dir,
experiment_tmp_dir=self.context.experiment_tmp_dir)
# Set up temp folder
tmp_folder = self._create_tmp_folder(logger)
# Set up model
if self.num_classes >= 2:
lb = LabelEncoder()
lb.fit(self.labels)
y = lb.transform(y)
model = SkopeRules(max_depth_duplication=self.params["max_depth_duplication"],
n_estimators=self.params["n_estimators"],
precision_min=self.params["precision_min"],
recall_min=self.params["recall_min"],
max_samples=self.params["max_samples"],
max_samples_features=self.params["max_samples_features"],
max_depth=self.params["max_depth"],
max_features=self.params["max_features"],
min_samples_split=self.params["min_samples_split"],
bootstrap=self.params["bootstrap"],
bootstrap_features=self.params["bootstrap_features"],
random_state=self.params["random_state"],
feature_names=orig_cols)
else:
# Skopes doesn't work for regression
loggerinfo(logger, "PASS, no skopes model")
pass
# Find the datatypes
X = X.to_pandas()
X.columns = orig_cols
# Change continuous features to categorical
X_datatypes = [str(item) for item in list(X.dtypes)]
# Change all float32 values to float64
for ii in range(len(X_datatypes)):
if X_datatypes[ii] == 'float32':
X = X.astype({orig_cols[ii]: np.float64})
X_datatypes = [str(item) for item in list(X.dtypes)]
# List the categorical and numerical features
self.X_categorical = [orig_cols[col_count] for col_count in range(len(orig_cols)) if
(X_datatypes[col_count] == 'category') or (X_datatypes[col_count] == 'object')]
self.X_numeric = [item for item in orig_cols if item not in self.X_categorical]
# Find the levels and mode for each categorical feature
# for use in the test set
self.train_levels = {}
for item in self.X_categorical:
self.train_levels[item] = list(set(X[item]))
self.train_mode[item] = Counter(X[item]).most_common(1)[0][0]
# One hot encode the categorical features
# And replace missing values with a Missing category
if len(self.X_categorical) > 0:
loggerinfo(logger, "PCategorical encode")
for colname in self.X_categorical:
X[colname] = list(X[colname].fillna("Missing"))
self.enc = OneHotEncoder(handle_unknown='ignore')
self.enc.fit(X[self.X_categorical])
self.encoded_categories = list(self.enc.get_feature_names(input_features=self.X_categorical))
X_enc = self.enc.transform(X[self.X_categorical]).toarray()
X = pd.concat([X[self.X_numeric], pd.DataFrame(X_enc, columns=self.encoded_categories)], axis=1)
# Replace missing values with a missing value code
if len(self.X_numeric) > 0:
for colname in self.X_numeric:
X[colname] = list(X[colname].fillna(-999))
model.fit(np.array(X), np.array(y))
# Find the rule list
self.rule_list = model.rules_
# Calculate feature importances
var_imp = []
for var in orig_cols:
var_imp.append(sum(int(var in item[0]) for item in self.rule_list))
if max(var_imp) != 0:
importances = list(np.array(var_imp) / max(var_imp))
else:
importances = [1] * len(var_imp)
pd.DataFrame(model.rules_, columns=['Rule', '(Precision, Recall, nb)']).to_csv(
os.path.join(tmp_folder, 'Skope_rules.csv'), index=False)
self.mean_target = np.array(sum(y) / len(y))
# Set model properties
self.set_model_properties(model=model,
features=list(X.columns),
importances=importances,
iterations=self.params['n_estimators'])
def predict(self, X, **kwargs):
orig_cols = list(X.names)
import pandas as pd
X = dt.Frame(X)
# Find datatypes
X = X.to_pandas()
X_datatypes = [str(item) for item in list(X.dtypes)]
# Change float 32 values to float 64
for ii in range(len(X_datatypes)):
if X_datatypes[ii] == 'float32':
X = X.astype({orig_cols[ii]: np.float64})
# Replace missing values with a missing category
# Replace categories that weren't in the training set with the mode
if len(self.X_categorical) > 0:
for colname in self.X_categorical:
X[colname] = list(X[colname].fillna("Missing"))
for label in self.X_categorical:
# Replace anything not in the test set
train_categories = self.train_levels[label]
X_label = np.array(X[label])
mmode = self.train_mode[label]
X_label[~np.isin(X_label, train_categories)] = mmode
X[label] = X_label
# Replace missing values with a missing value code
if len(self.X_numeric) > 0:
for colname in self.X_numeric:
X[colname] = list(X[colname].fillna(-999))
# Get model
model, _, _, _ = self.get_model_properties()
# One hot encode categorical features
if len(self.X_categorical) > 0:
X_enc = self.enc.transform(X[self.X_categorical]).toarray()
X = pd.concat([X[self.X_numeric], pd.DataFrame(X_enc, columns=self.encoded_categories)], axis=1)
# Make predictions on the test set
preds = model.score_top_rules(X) / len(self.rule_list)
preds = np.array(preds)
epsilon = 10 ** (-3)
preds[np.isnan(preds)] = self.mean_target
preds[preds > 1 - epsilon] = 1.0 - epsilon
preds[preds < 0 + epsilon] = 0.0 + epsilon
return preds
| 42.397213 | 127 | 0.591387 |
import uuid
import os
import datatable as dt
import numpy as np
from h2oaicore.models import CustomModel
from sklearn.preprocessing import LabelEncoder
from h2oaicore.systemutils import physical_cores_count
from h2oaicore.systemutils import user_dir, remove, config
from h2oaicore.systemutils import make_experiment_logger, loggerinfo, loggerwarning, loggerdebug
class SKOPE_RULES(CustomModel):
_regression = False
_binary = True
_multiclass = False
_display_name = "SKOPE RULES"
_description = "SKOPE RULES"
_modules_needed_by_name = ['git+https://github.com/scikit-learn-contrib/skope-rules.git']
@staticmethod
def do_acceptance_test():
return True
def set_default_params(self, accuracy=None, time_tolerance=None,
interpretability=None, **kwargs):
self.params = dict(random_state=kwargs.get("random_state", 1234),
max_depth_duplication=None, n_estimators=10,
precision_min=0.5, recall_min=0.01, max_samples=0.8,
max_samples_features=1.0, max_depth=3,
max_features="auto", min_samples_split=2,
bootstrap=False, bootstrap_features=False)
def mutate_params(self, accuracy=10, **kwargs):
if accuracy > 8:
max_depth_duplication = [None, 2, 3]
n_estimators = [10, 20, 40]
precision_min = [0.1, 0.2, 0.3]
recall_min = [0.01, 0.05]
max_samples = [0.5, 0.8, 1.0]
max_samples_features = [0.5, 0.8, 1.0]
max_depth = [3, 4, 5]
max_features = ["sqrt", "log2", "auto"]
min_samples_split = [2, 11, 21]
bootstrap = [True, False]
bootstrap_features = [True, False]
elif accuracy >= 5:
max_depth_duplication = [None]
n_estimators = [10, 20]
precision_min = [0.1, 0.2, 0.3]
recall_min = [0.01]
max_samples = [0.8, 1.0]
max_samples_features = [1.0]
max_depth = [3, 4]
max_features = ["sqrt", "log2", "auto"]
min_samples_split = [2, 5, 11]
bootstrap = [True, False]
bootstrap_features = [True, False]
else:
max_depth_duplication = [None]
n_estimators = [10]
precision_min = [0.1, 0.2]
recall_min = [0.01]
max_samples = [0.8, 1.0]
max_samples_features = [0.8, 1.0]
max_depth = [3, 4]
max_features = ["auto"]
min_samples_split = [2]
bootstrap = [True, False]
bootstrap_features = [True, False]
self.params["max_depth_duplication"] = np.random.choice(max_depth_duplication)
self.params["n_estimators"] = np.random.choice(n_estimators)
self.params["precision_min"] = np.random.choice(precision_min)
self.params["recall_min"] = np.random.choice(recall_min)
self.params["max_samples"] = np.random.choice(max_samples)
self.params["max_samples_features"] = np.random.choice(max_samples_features)
self.params["max_depth"] = np.random.choice(max_depth)
self.params["max_features"] = np.random.choice(max_features)
self.params["min_samples_split"] = np.random.choice(min_samples_split)
self.params["bootstrap"] = np.random.choice(bootstrap)
self.params["bootstrap_features"] = np.random.choice(bootstrap_features)
def _create_tmp_folder(self, logger):
tmp_folder = os.path.join(user_dir(), "%s_SKOPE_model_folder" % uuid.uuid4())
if self.context and self.context.experiment_id:
tmp_folder = os.path.join(self.context.experiment_tmp_dir, "%s_SKOPE_model_folder" % uuid.uuid4())
try:
os.mkdir(tmp_folder)
except PermissionError:
# This not occur so log a warning
loggerwarning(logger, "SKOPE was denied temp folder creation rights")
tmp_folder = os.path.join(user_dir(), "%s_SKOPE_model_folder" % uuid.uuid4())
os.mkdir(tmp_folder)
except FileExistsError:
# We should never be here since temp dir name is expected to be unique
loggerwarning(logger, "SKOPE temp folder already exists")
tmp_folder = os.path.join(self.context.experiment_tmp_dir, "%s_SKOPE_model_folder" % uuid.uuid4())
os.mkdir(tmp_folder)
except:
# Revert to temporary file path
tmp_folder = os.path.join(user_dir(), "%s_SKOPE_model_folder" % uuid.uuid4())
os.mkdir(tmp_folder)
loggerinfo(logger, "SKOPE temp folder {}".format(tmp_folder))
return tmp_folder
def fit(self, X, y, sample_weight=None, eval_set=None, sample_weight_eval_set=None, **kwargs):
orig_cols = list(X.names)
import pandas as pd
import numpy as np
from skrules import SkopeRules
from sklearn.preprocessing import OneHotEncoder
from collections import Counter
# Get the logger if it exists
logger = None
if self.context and self.context.experiment_id:
logger = make_experiment_logger(experiment_id=self.context.experiment_id,
tmp_dir=self.context.tmp_dir,
experiment_tmp_dir=self.context.experiment_tmp_dir)
# Set up temp folder
tmp_folder = self._create_tmp_folder(logger)
# Set up model
if self.num_classes >= 2:
lb = LabelEncoder()
lb.fit(self.labels)
y = lb.transform(y)
model = SkopeRules(max_depth_duplication=self.params["max_depth_duplication"],
n_estimators=self.params["n_estimators"],
precision_min=self.params["precision_min"],
recall_min=self.params["recall_min"],
max_samples=self.params["max_samples"],
max_samples_features=self.params["max_samples_features"],
max_depth=self.params["max_depth"],
max_features=self.params["max_features"],
min_samples_split=self.params["min_samples_split"],
bootstrap=self.params["bootstrap"],
bootstrap_features=self.params["bootstrap_features"],
random_state=self.params["random_state"],
feature_names=orig_cols)
else:
# Skopes doesn't work for regression
loggerinfo(logger, "PASS, no skopes model")
pass
X = X.to_pandas()
X.columns = orig_cols
X_datatypes = [str(item) for item in list(X.dtypes)]
for ii in range(len(X_datatypes)):
if X_datatypes[ii] == 'float32':
X = X.astype({orig_cols[ii]: np.float64})
X_datatypes = [str(item) for item in list(X.dtypes)]
self.X_categorical = [orig_cols[col_count] for col_count in range(len(orig_cols)) if
(X_datatypes[col_count] == 'category') or (X_datatypes[col_count] == 'object')]
self.X_numeric = [item for item in orig_cols if item not in self.X_categorical]
self.train_levels = {}
for item in self.X_categorical:
self.train_levels[item] = list(set(X[item]))
self.train_mode[item] = Counter(X[item]).most_common(1)[0][0]
if len(self.X_categorical) > 0:
loggerinfo(logger, "PCategorical encode")
for colname in self.X_categorical:
X[colname] = list(X[colname].fillna("Missing"))
self.enc = OneHotEncoder(handle_unknown='ignore')
self.enc.fit(X[self.X_categorical])
self.encoded_categories = list(self.enc.get_feature_names(input_features=self.X_categorical))
X_enc = self.enc.transform(X[self.X_categorical]).toarray()
X = pd.concat([X[self.X_numeric], pd.DataFrame(X_enc, columns=self.encoded_categories)], axis=1)
if len(self.X_numeric) > 0:
for colname in self.X_numeric:
X[colname] = list(X[colname].fillna(-999))
model.fit(np.array(X), np.array(y))
self.rule_list = model.rules_
var_imp = []
for var in orig_cols:
var_imp.append(sum(int(var in item[0]) for item in self.rule_list))
if max(var_imp) != 0:
importances = list(np.array(var_imp) / max(var_imp))
else:
importances = [1] * len(var_imp)
pd.DataFrame(model.rules_, columns=['Rule', '(Precision, Recall, nb)']).to_csv(
os.path.join(tmp_folder, 'Skope_rules.csv'), index=False)
self.mean_target = np.array(sum(y) / len(y))
self.set_model_properties(model=model,
features=list(X.columns),
importances=importances,
iterations=self.params['n_estimators'])
def predict(self, X, **kwargs):
orig_cols = list(X.names)
import pandas as pd
X = dt.Frame(X)
X = X.to_pandas()
X_datatypes = [str(item) for item in list(X.dtypes)]
for ii in range(len(X_datatypes)):
if X_datatypes[ii] == 'float32':
X = X.astype({orig_cols[ii]: np.float64})
if len(self.X_categorical) > 0:
for colname in self.X_categorical:
X[colname] = list(X[colname].fillna("Missing"))
for label in self.X_categorical:
# Replace anything not in the test set
train_categories = self.train_levels[label]
X_label = np.array(X[label])
mmode = self.train_mode[label]
X_label[~np.isin(X_label, train_categories)] = mmode
X[label] = X_label
# Replace missing values with a missing value code
if len(self.X_numeric) > 0:
for colname in self.X_numeric:
X[colname] = list(X[colname].fillna(-999))
# Get model
model, _, _, _ = self.get_model_properties()
# One hot encode categorical features
if len(self.X_categorical) > 0:
X_enc = self.enc.transform(X[self.X_categorical]).toarray()
X = pd.concat([X[self.X_numeric], pd.DataFrame(X_enc, columns=self.encoded_categories)], axis=1)
# Make predictions on the test set
preds = model.score_top_rules(X) / len(self.rule_list)
preds = np.array(preds)
epsilon = 10 ** (-3)
preds[np.isnan(preds)] = self.mean_target
preds[preds > 1 - epsilon] = 1.0 - epsilon
preds[preds < 0 + epsilon] = 0.0 + epsilon
return preds
| true | true |
f733fdfd6251a8d71a648d7c85c3dac02509dfc4 | 75 | py | Python | cumulogenesis.py | stelligent/cumulogenesis | f5a3587aebd2592642c98cb4ad93d52a927dceeb | [
"MIT"
] | 1 | 2021-03-22T21:50:10.000Z | 2021-03-22T21:50:10.000Z | cumulogenesis.py | stelligent/cumulogenesis | f5a3587aebd2592642c98cb4ad93d52a927dceeb | [
"MIT"
] | 1 | 2021-03-25T22:23:04.000Z | 2021-03-25T22:23:04.000Z | cumulogenesis.py | stelligent/cumulogenesis | f5a3587aebd2592642c98cb4ad93d52a927dceeb | [
"MIT"
] | 1 | 2019-04-03T19:09:34.000Z | 2019-04-03T19:09:34.000Z | #!/usr/bin/env python
from cumulogenesis.interfaces import cli
cli.run()
| 12.5 | 40 | 0.76 |
from cumulogenesis.interfaces import cli
cli.run()
| true | true |
f733fe3381230dc5d35efe6c185f98aa454d97ea | 1,378 | py | Python | scripts/train_volleyball_stage2_dynamic.py | daniel-richter/DIN_GAR | f97759038936ad36359cb8c0d9ff0951d2482e25 | [
"MIT"
] | 14 | 2021-11-29T08:11:07.000Z | 2022-02-26T14:23:28.000Z | scripts/train_volleyball_stage2_dynamic.py | daniel-richter/DIN_GAR | f97759038936ad36359cb8c0d9ff0951d2482e25 | [
"MIT"
] | 9 | 2021-08-31T11:55:49.000Z | 2021-11-21T03:29:33.000Z | scripts/train_volleyball_stage2_dynamic.py | daniel-richter/DIN_GAR | f97759038936ad36359cb8c0d9ff0951d2482e25 | [
"MIT"
] | 6 | 2021-09-16T11:41:54.000Z | 2021-11-10T09:27:19.000Z | import sys
sys.path.append(".")
from train_net_dynamic import *
cfg=Config('volleyball')
cfg.inference_module_name = 'dynamic_volleyball'
cfg.device_list = "0,1"
cfg.use_gpu = True
cfg.use_multi_gpu = True
cfg.training_stage = 2
cfg.train_backbone = True
cfg.test_before_train = False
cfg.test_interval_epoch = 1
# vgg16 setup
cfg.backbone = 'vgg16'
cfg.stage1_model_path = 'result/basemodel_VD_vgg16.pth'
cfg.out_size = 22, 40
cfg.emb_features = 512
# res18 setup
# cfg.backbone = 'res18'
# cfg.stage1_model_path = 'result/basemodel_VD_res18.pth'
# cfg.out_size = 23, 40
# cfg.emb_features = 512
# Dynamic Inference setup
cfg.group = 1
cfg.stride = 1
cfg.ST_kernel_size = [(3, 3)] #[(3, 3),(3, 3),(3, 3),(3, 3)]
cfg.dynamic_sampling = True
cfg.sampling_ratio = [1]
cfg.lite_dim = 128 # None # 128
cfg.scale_factor = True
cfg.beta_factor = False
cfg.hierarchical_inference = False
cfg.parallel_inference = False
cfg.num_DIM = 1
cfg.train_dropout_prob = 0.3
cfg.batch_size = 2
cfg.test_batch_size = 1
cfg.num_frames = 10
cfg.load_backbone_stage2 = True
cfg.train_learning_rate = 1e-4
# cfg.lr_plan = {11: 3e-5, 21: 1e-5}
# cfg.max_epoch = 60
# cfg.lr_plan = {11: 3e-5, 21: 1e-5}
cfg.lr_plan = {11: 1e-5}
cfg.max_epoch = 30
cfg.actions_weights = [[1., 1., 2., 3., 1., 2., 2., 0.2, 1.]]
cfg.exp_note = 'Dynamic Volleyball_stage2_res18_litedim128_reproduce_1'
train_net(cfg)
| 24.607143 | 71 | 0.727866 | import sys
sys.path.append(".")
from train_net_dynamic import *
cfg=Config('volleyball')
cfg.inference_module_name = 'dynamic_volleyball'
cfg.device_list = "0,1"
cfg.use_gpu = True
cfg.use_multi_gpu = True
cfg.training_stage = 2
cfg.train_backbone = True
cfg.test_before_train = False
cfg.test_interval_epoch = 1
cfg.backbone = 'vgg16'
cfg.stage1_model_path = 'result/basemodel_VD_vgg16.pth'
cfg.out_size = 22, 40
cfg.emb_features = 512
cfg.group = 1
cfg.stride = 1
cfg.ST_kernel_size = [(3, 3)]
cfg.dynamic_sampling = True
cfg.sampling_ratio = [1]
cfg.lite_dim = 128 scale_factor = True
cfg.beta_factor = False
cfg.hierarchical_inference = False
cfg.parallel_inference = False
cfg.num_DIM = 1
cfg.train_dropout_prob = 0.3
cfg.batch_size = 2
cfg.test_batch_size = 1
cfg.num_frames = 10
cfg.load_backbone_stage2 = True
cfg.train_learning_rate = 1e-4
cfg.lr_plan = {11: 1e-5}
cfg.max_epoch = 30
cfg.actions_weights = [[1., 1., 2., 3., 1., 2., 2., 0.2, 1.]]
cfg.exp_note = 'Dynamic Volleyball_stage2_res18_litedim128_reproduce_1'
train_net(cfg)
| true | true |
f733ffc85633950fbe996e09698c90caf6a8e6e8 | 8,278 | py | Python | test_tesseract.py | mlissner/tesseract-performance-testing | f0040987ef9ccbaf65eb786301637fcdb00ef3b5 | [
"MIT"
] | 2 | 2016-08-25T23:36:42.000Z | 2018-03-15T20:51:58.000Z | test_tesseract.py | mlissner/tesseract-performance-testing | f0040987ef9ccbaf65eb786301637fcdb00ef3b5 | [
"MIT"
] | null | null | null | test_tesseract.py | mlissner/tesseract-performance-testing | f0040987ef9ccbaf65eb786301637fcdb00ef3b5 | [
"MIT"
] | null | null | null | import glob
import os
import subprocess
import tempfile
import time
import cStringIO
from wand.color import Color
from wand.image import Image
PATH = './test_assets/*.pdf'
def temp_name():
""" returns a temporary file-name """
tmpfile = tempfile.NamedTemporaryFile(prefix="tess_")
return tmpfile.name
def convert_to_txt(tmp_file_prefix):
tess_out = ''
for png in sorted(glob.glob('%s*.png' % tmp_file_prefix)):
tesseract_command = ['tesseract', png, png[:-4], '-l', 'eng']
tess_out = subprocess.check_output(
tesseract_command,
stderr=subprocess.STDOUT
)
return tess_out
def convert_blob_to_text(blob):
"""Do Tesseract work, but use a blob as input instead of a file."""
tesseract_command = ['tesseract', 'stdin', 'stdout', '-l', 'eng']
p = subprocess.Popen(
tesseract_command,
stdout=subprocess.PIPE,
stdin=subprocess.PIPE,
stderr=subprocess.STDOUT
)
return p.communicate(input=blob)[0]
def convert_file_to_txt(path):
tesseract_command = ['tesseract', path, 'stdout', '-l', 'eng']
p = subprocess.Popen(
tesseract_command,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT
)
return p.communicate()[0]
def convert_to_pngs(command):
subprocess.check_output(command,
stderr=subprocess.STDOUT)
def avg(l):
"""Make the average of a list"""
return sum(l) / len(l)
def subprocess_approach():
# Basic approach using subprocess and writing things to disk.
methods = {
'current': ['convert',
'-depth', '4',
'-density', '300',
'-background', 'white', '+matte'],
'grayscale': ['convert',
'-depth', '4',
'-density', '300',
'-background', 'white', '+matte',
'-colorspace', 'Gray'],
'smaller': ['convert',
'-depth', '4',
'-density', '200',
'-background', 'white', '+matte'],
}
for method_name, command in methods.items():
print("\n\nAttempting method: %s" % method_name)
image_cpu_timing = []
tess_cpu_timing = []
image_wall_timing = []
tess_wall_timing = []
for path in sorted(glob.glob(PATH)):
out_name = temp_name()
print(" Doing: %s" % path)
print(" Using temp dir: %s" % out_name)
try:
print(" Doing image conversion.")
full_command = command + [path, '%s-%%03d.png' % out_name]
t1_cpu = time.clock()
t1_wall = time.time()
convert_to_pngs(full_command)
image_cpu_timing.append(time.clock() - t1_cpu)
image_wall_timing.append(time.time() - t1_wall)
print(" Doing tesseract command.")
t1_cpu = time.clock()
t1_wall = time.time()
convert_to_txt(out_name)
tess_cpu_timing.append(time.clock() - t1_cpu)
tess_wall_timing.append(time.time() - t1_wall)
finally:
# Remove tmp_file and the text file
for f in glob.glob('%s*' % out_name):
try:
os.remove(f)
except OSError:
pass
print(u" Sys, Real")
print(u" Average image conversion was %s, %s" % (
avg(image_cpu_timing),
avg(image_wall_timing),
))
print(u" Average tess conversion was %s, %s" % (
avg(tess_cpu_timing),
avg(tess_wall_timing),
))
print(u" Total image time was: %s, %s" % (
sum(image_cpu_timing), sum(image_wall_timing)
))
print(u" Total tess time was: %s, %s" % (
sum(tess_cpu_timing), sum(tess_wall_timing)
))
print(u" Grand total was %s, %s" % (
sum(image_cpu_timing) + sum(tess_cpu_timing),
sum(image_wall_timing) + sum(tess_wall_timing),
))
def wand_approach():
# New Approach using Wand to create files
# Install libmagickwand-dev!
image_cpu_timing = []
tess_cpu_timing = []
image_wall_timing = []
tess_wall_timing = []
for path in sorted(glob.glob(PATH)):
print(" Doing: %s" % path)
all_pages = Image(filename=path, resolution=150)
for i, img in enumerate(all_pages.sequence):
t1_cpu = time.clock()
t1_wall = time.time()
with Image(img) as img_out:
img_out.format = 'png'
img_out.background_color = Color('white')
img_out.alpha_channel = 'remove'
img_out.depth = 4
img_out.type = "grayscale"
img_out.resolution = 150
#img_out.save(filename='%s-%03d.png' % (path[:-4], i))
img_bin = img_out.make_blob('png')
image_cpu_timing.append(time.clock() - t1_cpu)
image_wall_timing.append(time.time() - t1_wall)
# Do Tesseract on the binary data
t1_cpu = time.clock()
t1_wall = time.time()
txt = convert_blob_to_text(img_bin)
tess_cpu_timing.append(time.clock() - t1_cpu)
tess_wall_timing.append(time.time() - t1_wall)
print(u" Sys, Real")
print(u" Average image conversion was %s, %s" % (
avg(image_cpu_timing),
avg(image_wall_timing),
))
print(u" Average tess conversion was %s, %s" % (
avg(tess_cpu_timing),
avg(tess_wall_timing),
))
print(u" Total image time was: %s, %s" % (
sum(image_cpu_timing), sum(image_wall_timing)
))
print(u" Total tess time was: %s, %s" % (
sum(tess_cpu_timing), sum(tess_wall_timing)
))
print(u" Grand total was %s, %s" % (
sum(image_cpu_timing) + sum(tess_cpu_timing),
sum(image_wall_timing) + sum(tess_wall_timing),
))
def multipage_tiff_approach():
"""Theory: Initializing Tesseract for every page takes time.
Hypothesis: Using a multi-page tiff will allow it only to be initialized
once, saving time.
"""
image_cpu_timing = []
tess_cpu_timing = []
image_wall_timing = []
tess_wall_timing = []
for path in sorted(glob.glob(PATH)):
print(" Doing: %s" % path)
all_pages = Image(filename=path, resolution=300)
tiff_out = Image()
t1_cpu = time.clock()
t1_wall = time.time()
for i, img in enumerate(all_pages.sequence):
with Image(img) as img_out:
img_out.background_color = Color('white')
img_out.alpha_channel = 'remove'
img_out.depth = 4
img_out.type = "grayscale"
tiff_out.sequence.append(img_out)
tiff_bin = cStringIO.StringIO()
tiff_out.format = 'tiff'
tiff_out.save(file=tiff_bin)
image_cpu_timing.append(time.clock() - t1_cpu)
image_wall_timing.append(time.time() - t1_wall)
# Do Tesseract on the binary data
t1_cpu = time.clock()
t1_wall = time.time()
txt = convert_blob_to_text(tiff_bin.getvalue())
tess_cpu_timing.append(time.clock() - t1_cpu)
tess_wall_timing.append(time.time() - t1_wall)
print(u" Sys, Real")
print(u" Average image conversion was %s, %s" % (
avg(image_cpu_timing),
avg(image_wall_timing),
))
print(u" Average tess conversion was %s, %s" % (
avg(tess_cpu_timing),
avg(tess_wall_timing),
))
print(u" Total image time was: %s, %s" % (
sum(image_cpu_timing), sum(image_wall_timing)
))
print(u" Total tess time was: %s, %s" % (
sum(tess_cpu_timing), sum(tess_wall_timing)
))
print(u" Grand total was %s, %s" % (
sum(image_cpu_timing) + sum(tess_cpu_timing),
sum(image_wall_timing) + sum(tess_wall_timing),
))
subprocess_approach()
wand_approach()
multipage_tiff_approach()
| 33.51417 | 76 | 0.553395 | import glob
import os
import subprocess
import tempfile
import time
import cStringIO
from wand.color import Color
from wand.image import Image
PATH = './test_assets/*.pdf'
def temp_name():
tmpfile = tempfile.NamedTemporaryFile(prefix="tess_")
return tmpfile.name
def convert_to_txt(tmp_file_prefix):
tess_out = ''
for png in sorted(glob.glob('%s*.png' % tmp_file_prefix)):
tesseract_command = ['tesseract', png, png[:-4], '-l', 'eng']
tess_out = subprocess.check_output(
tesseract_command,
stderr=subprocess.STDOUT
)
return tess_out
def convert_blob_to_text(blob):
tesseract_command = ['tesseract', 'stdin', 'stdout', '-l', 'eng']
p = subprocess.Popen(
tesseract_command,
stdout=subprocess.PIPE,
stdin=subprocess.PIPE,
stderr=subprocess.STDOUT
)
return p.communicate(input=blob)[0]
def convert_file_to_txt(path):
tesseract_command = ['tesseract', path, 'stdout', '-l', 'eng']
p = subprocess.Popen(
tesseract_command,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT
)
return p.communicate()[0]
def convert_to_pngs(command):
subprocess.check_output(command,
stderr=subprocess.STDOUT)
def avg(l):
return sum(l) / len(l)
def subprocess_approach():
methods = {
'current': ['convert',
'-depth', '4',
'-density', '300',
'-background', 'white', '+matte'],
'grayscale': ['convert',
'-depth', '4',
'-density', '300',
'-background', 'white', '+matte',
'-colorspace', 'Gray'],
'smaller': ['convert',
'-depth', '4',
'-density', '200',
'-background', 'white', '+matte'],
}
for method_name, command in methods.items():
print("\n\nAttempting method: %s" % method_name)
image_cpu_timing = []
tess_cpu_timing = []
image_wall_timing = []
tess_wall_timing = []
for path in sorted(glob.glob(PATH)):
out_name = temp_name()
print(" Doing: %s" % path)
print(" Using temp dir: %s" % out_name)
try:
print(" Doing image conversion.")
full_command = command + [path, '%s-%%03d.png' % out_name]
t1_cpu = time.clock()
t1_wall = time.time()
convert_to_pngs(full_command)
image_cpu_timing.append(time.clock() - t1_cpu)
image_wall_timing.append(time.time() - t1_wall)
print(" Doing tesseract command.")
t1_cpu = time.clock()
t1_wall = time.time()
convert_to_txt(out_name)
tess_cpu_timing.append(time.clock() - t1_cpu)
tess_wall_timing.append(time.time() - t1_wall)
finally:
for f in glob.glob('%s*' % out_name):
try:
os.remove(f)
except OSError:
pass
print(u" Sys, Real")
print(u" Average image conversion was %s, %s" % (
avg(image_cpu_timing),
avg(image_wall_timing),
))
print(u" Average tess conversion was %s, %s" % (
avg(tess_cpu_timing),
avg(tess_wall_timing),
))
print(u" Total image time was: %s, %s" % (
sum(image_cpu_timing), sum(image_wall_timing)
))
print(u" Total tess time was: %s, %s" % (
sum(tess_cpu_timing), sum(tess_wall_timing)
))
print(u" Grand total was %s, %s" % (
sum(image_cpu_timing) + sum(tess_cpu_timing),
sum(image_wall_timing) + sum(tess_wall_timing),
))
def wand_approach():
image_cpu_timing = []
tess_cpu_timing = []
image_wall_timing = []
tess_wall_timing = []
for path in sorted(glob.glob(PATH)):
print(" Doing: %s" % path)
all_pages = Image(filename=path, resolution=150)
for i, img in enumerate(all_pages.sequence):
t1_cpu = time.clock()
t1_wall = time.time()
with Image(img) as img_out:
img_out.format = 'png'
img_out.background_color = Color('white')
img_out.alpha_channel = 'remove'
img_out.depth = 4
img_out.type = "grayscale"
img_out.resolution = 150
img_bin = img_out.make_blob('png')
image_cpu_timing.append(time.clock() - t1_cpu)
image_wall_timing.append(time.time() - t1_wall)
t1_cpu = time.clock()
t1_wall = time.time()
txt = convert_blob_to_text(img_bin)
tess_cpu_timing.append(time.clock() - t1_cpu)
tess_wall_timing.append(time.time() - t1_wall)
print(u" Sys, Real")
print(u" Average image conversion was %s, %s" % (
avg(image_cpu_timing),
avg(image_wall_timing),
))
print(u" Average tess conversion was %s, %s" % (
avg(tess_cpu_timing),
avg(tess_wall_timing),
))
print(u" Total image time was: %s, %s" % (
sum(image_cpu_timing), sum(image_wall_timing)
))
print(u" Total tess time was: %s, %s" % (
sum(tess_cpu_timing), sum(tess_wall_timing)
))
print(u" Grand total was %s, %s" % (
sum(image_cpu_timing) + sum(tess_cpu_timing),
sum(image_wall_timing) + sum(tess_wall_timing),
))
def multipage_tiff_approach():
image_cpu_timing = []
tess_cpu_timing = []
image_wall_timing = []
tess_wall_timing = []
for path in sorted(glob.glob(PATH)):
print(" Doing: %s" % path)
all_pages = Image(filename=path, resolution=300)
tiff_out = Image()
t1_cpu = time.clock()
t1_wall = time.time()
for i, img in enumerate(all_pages.sequence):
with Image(img) as img_out:
img_out.background_color = Color('white')
img_out.alpha_channel = 'remove'
img_out.depth = 4
img_out.type = "grayscale"
tiff_out.sequence.append(img_out)
tiff_bin = cStringIO.StringIO()
tiff_out.format = 'tiff'
tiff_out.save(file=tiff_bin)
image_cpu_timing.append(time.clock() - t1_cpu)
image_wall_timing.append(time.time() - t1_wall)
t1_cpu = time.clock()
t1_wall = time.time()
txt = convert_blob_to_text(tiff_bin.getvalue())
tess_cpu_timing.append(time.clock() - t1_cpu)
tess_wall_timing.append(time.time() - t1_wall)
print(u" Sys, Real")
print(u" Average image conversion was %s, %s" % (
avg(image_cpu_timing),
avg(image_wall_timing),
))
print(u" Average tess conversion was %s, %s" % (
avg(tess_cpu_timing),
avg(tess_wall_timing),
))
print(u" Total image time was: %s, %s" % (
sum(image_cpu_timing), sum(image_wall_timing)
))
print(u" Total tess time was: %s, %s" % (
sum(tess_cpu_timing), sum(tess_wall_timing)
))
print(u" Grand total was %s, %s" % (
sum(image_cpu_timing) + sum(tess_cpu_timing),
sum(image_wall_timing) + sum(tess_wall_timing),
))
subprocess_approach()
wand_approach()
multipage_tiff_approach()
| true | true |
f73400680cc5eaab78a41bb4860f6f92218c7d1e | 1,297 | py | Python | src/test/python/hellopymsdl_test/test__main__.py | St4rG00se/pymsdl_template | 779ffcf0fc249e44a406c6b02f439923f5d4caad | [
"MIT"
] | 1 | 2022-03-04T17:05:16.000Z | 2022-03-04T17:05:16.000Z | src/test/python/hellopymsdl_test/test__main__.py | St4rG00se/pymsdl_template | 779ffcf0fc249e44a406c6b02f439923f5d4caad | [
"MIT"
] | null | null | null | src/test/python/hellopymsdl_test/test__main__.py | St4rG00se/pymsdl_template | 779ffcf0fc249e44a406c6b02f439923f5d4caad | [
"MIT"
] | 1 | 2022-02-22T10:10:47.000Z | 2022-02-22T10:10:47.000Z | """Main file tests"""
from typing import Final
from unittest.mock import patch, MagicMock, call
class TestHello:
"""hello function Tests"""
class TestNominalCase:
@patch('hellopymsdl.service.MessageService.MessageService')
@patch('builtins.print')
def test_call_hello__should__print_message_from_message_service(
self, print_mock: MagicMock, message_service_mock: MagicMock
) -> None:
"""When call hello function from main, should print the message service message"""
# GIVEN
test_message: Final[str] = "My test message"
def get_message_mock(file_name: str) -> str:
if file_name != "message.txt":
raise FileNotFoundError
return test_message
message_service_mock.return_value.get_message.side_effect = get_message_mock
# WHEN
from hellopymsdl.__main__ import hello
hello()
# THEN
assert message_service_mock.return_value.get_message.call_count == 1
assert print_mock.call_count == 2
print_mock.assert_has_calls([
call("hello python with Maven Standard Directory Layout"),
call(test_message)
])
| 34.131579 | 94 | 0.619121 | from typing import Final
from unittest.mock import patch, MagicMock, call
class TestHello:
class TestNominalCase:
@patch('hellopymsdl.service.MessageService.MessageService')
@patch('builtins.print')
def test_call_hello__should__print_message_from_message_service(
self, print_mock: MagicMock, message_service_mock: MagicMock
) -> None:
test_message: Final[str] = "My test message"
def get_message_mock(file_name: str) -> str:
if file_name != "message.txt":
raise FileNotFoundError
return test_message
message_service_mock.return_value.get_message.side_effect = get_message_mock
from hellopymsdl.__main__ import hello
hello()
assert message_service_mock.return_value.get_message.call_count == 1
assert print_mock.call_count == 2
print_mock.assert_has_calls([
call("hello python with Maven Standard Directory Layout"),
call(test_message)
])
| true | true |
f73400dfe689e91c4c2b457c4be1a0a41380fd6a | 1,555 | py | Python | pandas/tests/arrays/integer/conftest.py | AdrianMastronardi/pandas | 67045903306ac4a1cab108177e92df30d99912b4 | [
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"MIT-0",
"ECL-2.0",
"BSD-3-Clause"
] | 1 | 2019-11-01T08:44:40.000Z | 2019-11-01T08:44:40.000Z | pandas/tests/arrays/integer/conftest.py | AdrianMastronardi/pandas | 67045903306ac4a1cab108177e92df30d99912b4 | [
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"MIT-0",
"ECL-2.0",
"BSD-3-Clause"
] | null | null | null | pandas/tests/arrays/integer/conftest.py | AdrianMastronardi/pandas | 67045903306ac4a1cab108177e92df30d99912b4 | [
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"MIT-0",
"ECL-2.0",
"BSD-3-Clause"
] | null | null | null | import numpy as np
import pytest
import pandas as pd
from pandas.core.arrays.integer import (
Int8Dtype,
Int16Dtype,
Int32Dtype,
Int64Dtype,
UInt8Dtype,
UInt16Dtype,
UInt32Dtype,
UInt64Dtype,
)
@pytest.fixture(
params=[
Int8Dtype,
Int16Dtype,
Int32Dtype,
Int64Dtype,
UInt8Dtype,
UInt16Dtype,
UInt32Dtype,
UInt64Dtype,
]
)
def dtype(request):
"""Parametrized fixture returning integer 'dtype'"""
return request.param()
@pytest.fixture
def data(dtype):
"""
Fixture returning 'data' array with valid and missing values according to
parametrized integer 'dtype'.
Used to test dtype conversion with and without missing values.
"""
return pd.array(
list(range(8)) + [np.nan] + list(range(10, 98)) + [np.nan] + [99, 100],
dtype=dtype,
)
@pytest.fixture
def data_missing(dtype):
"""
Fixture returning array with exactly one NaN and one valid integer,
according to parametrized integer 'dtype'.
Used to test dtype conversion with and without missing values.
"""
return pd.array([np.nan, 1], dtype=dtype)
@pytest.fixture(params=["data", "data_missing"])
def all_data(request, data, data_missing):
"""Parametrized fixture returning 'data' or 'data_missing' integer arrays.
Used to test dtype conversion with and without missing values.
"""
if request.param == "data":
return data
elif request.param == "data_missing":
return data_missing
| 22.536232 | 79 | 0.65209 | import numpy as np
import pytest
import pandas as pd
from pandas.core.arrays.integer import (
Int8Dtype,
Int16Dtype,
Int32Dtype,
Int64Dtype,
UInt8Dtype,
UInt16Dtype,
UInt32Dtype,
UInt64Dtype,
)
@pytest.fixture(
params=[
Int8Dtype,
Int16Dtype,
Int32Dtype,
Int64Dtype,
UInt8Dtype,
UInt16Dtype,
UInt32Dtype,
UInt64Dtype,
]
)
def dtype(request):
return request.param()
@pytest.fixture
def data(dtype):
return pd.array(
list(range(8)) + [np.nan] + list(range(10, 98)) + [np.nan] + [99, 100],
dtype=dtype,
)
@pytest.fixture
def data_missing(dtype):
return pd.array([np.nan, 1], dtype=dtype)
@pytest.fixture(params=["data", "data_missing"])
def all_data(request, data, data_missing):
if request.param == "data":
return data
elif request.param == "data_missing":
return data_missing
| true | true |
f73403c9f579d7c74df8240d768267a178688da7 | 5,629 | py | Python | sonnet/python/modules/layer_norm.py | gaoxuesong/sonnet | 40995a58744bbadc2e875c5c87e744896bdc4249 | [
"Apache-2.0"
] | 1 | 2021-06-04T06:21:24.000Z | 2021-06-04T06:21:24.000Z | sonnet/python/modules/layer_norm.py | gaoxuesong/sonnet | 40995a58744bbadc2e875c5c87e744896bdc4249 | [
"Apache-2.0"
] | null | null | null | sonnet/python/modules/layer_norm.py | gaoxuesong/sonnet | 40995a58744bbadc2e875c5c87e744896bdc4249 | [
"Apache-2.0"
] | null | null | null | # Copyright 2017 The Sonnet Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Layer normalization module for Sonnet.
This contains the module LayerNorm, which performs layer normalization on
its inputs.
Original paper: https://arxiv.org/abs/1607.06450.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from sonnet.python.modules import base
from sonnet.python.modules import util
import tensorflow as tf
class LayerNorm(base.AbstractModule):
"""Layer normalization module.
Implementation based on:
https://arxiv.org/abs/1607.06450
This module transforms input x into:
outputs = gamma * (x - mu) / sigma + beta
where mu and sigma are respectively the mean and standard deviation of x.
Gamma and beta are trainable parameters for scaling and shifting respectively.
"""
GAMMA = "gamma" # Layer norm scaling.
BETA = "beta" # Layer norm bias.
POSSIBLE_KEYS = {GAMMA, BETA}
def __init__(self,
eps=1e-5,
initializers=None,
partitioners=None,
regularizers=None,
name="layer_norm"):
"""Constructs a LayerNorm module.
Args:
eps: small epsilon to avoid division by zero variance. Defaults to
1e-5 as used in the paper.
initializers: Dict containing ops to initialize the scale and bias.
This dictionary may contain any of the keys in POSSIBLE_KEYS.
partitioners: Optional dict containing partitioners to partition
the scale and bias. As a default, no partitioners are used. This
dict may contain any of the keys in POSSIBLE_KEYS.
regularizers: Optional dict containing regularizers for the scale and
bias. As a default, no regularizers are used. This dict may contain
any of the keys in POSSIBLE_KEYS.
name: name of the module.
Raises:
KeyError: If `initializers`, `partitioners` or `regularizers` contain
any keys other than `gamma`, `beta`.
TypeError: If any of the given initializers, partitioners or regularizers
are not callable.
"""
super(LayerNorm, self).__init__(name=name)
self._eps = eps
self._initializers = util.check_initializers(initializers,
self.POSSIBLE_KEYS)
self._partitioners = util.check_partitioners(partitioners,
self.POSSIBLE_KEYS)
self._regularizers = util.check_regularizers(regularizers,
self.POSSIBLE_KEYS)
def _build(self, inputs):
"""Connects the LayerNorm module into the graph.
Args:
inputs: a Tensor of shape `[batch_size, layer_dim]`.
Returns:
normalized: layer normalized outputs with same shape as inputs.
Raises:
base.NotSupportedError: If `inputs` has data type of `tf.float16`.
"""
if inputs.dtype == tf.float16:
raise base.NotSupportedError(
"LayerNorm does not support `tf.float16`, insufficient "
"precision for calculating sufficient statistics.")
if inputs.get_shape().ndims != 2:
raise base.NotSupportedError(
"Layer normalization expects inputs of rank 2."
" Got inputs of rank {}.".format(inputs.get_shape().ndims))
hidden_size = inputs.get_shape()[1].value
if self.GAMMA not in self._initializers:
self._initializers[self.GAMMA] = create_gamma_initializer()
self._gamma = tf.get_variable(
self.GAMMA,
shape=[hidden_size],
dtype=inputs.dtype,
initializer=self._initializers[self.GAMMA],
partitioner=self._partitioners.get(self.GAMMA),
regularizer=self._regularizers.get(self.GAMMA))
if self.BETA not in self._initializers:
self._initializers[self.BETA] = create_beta_initializer()
self._beta = tf.get_variable(
self.BETA,
shape=[hidden_size],
dtype=inputs.dtype,
initializer=self._initializers[self.BETA],
partitioner=self._partitioners.get(self.BETA),
regularizer=self._regularizers.get(self.BETA))
mean, var = tf.nn.moments(inputs, [1], keep_dims=True)
normalized = tf.nn.batch_normalization(inputs, mean, var, self._beta,
self._gamma, self._eps)
return normalized
@property
def initializers(self):
return self._initializers
@property
def partitioners(self):
return self._partitioners
@property
def regularizers(self):
return self._regularizers
@property
def beta(self):
self._ensure_is_connected()
return self._beta
@property
def gamma(self):
self._ensure_is_connected()
return self._gamma
def create_beta_initializer():
"""Returns a default initializer for the `beta` in layer norm."""
return tf.zeros_initializer()
def create_gamma_initializer():
"""Returns a default initializer for the `gamma` in layer norm."""
return tf.ones_initializer()
| 31.982955 | 80 | 0.67099 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from sonnet.python.modules import base
from sonnet.python.modules import util
import tensorflow as tf
class LayerNorm(base.AbstractModule):
GAMMA = "gamma"
BETA = "beta"
POSSIBLE_KEYS = {GAMMA, BETA}
def __init__(self,
eps=1e-5,
initializers=None,
partitioners=None,
regularizers=None,
name="layer_norm"):
super(LayerNorm, self).__init__(name=name)
self._eps = eps
self._initializers = util.check_initializers(initializers,
self.POSSIBLE_KEYS)
self._partitioners = util.check_partitioners(partitioners,
self.POSSIBLE_KEYS)
self._regularizers = util.check_regularizers(regularizers,
self.POSSIBLE_KEYS)
def _build(self, inputs):
if inputs.dtype == tf.float16:
raise base.NotSupportedError(
"LayerNorm does not support `tf.float16`, insufficient "
"precision for calculating sufficient statistics.")
if inputs.get_shape().ndims != 2:
raise base.NotSupportedError(
"Layer normalization expects inputs of rank 2."
" Got inputs of rank {}.".format(inputs.get_shape().ndims))
hidden_size = inputs.get_shape()[1].value
if self.GAMMA not in self._initializers:
self._initializers[self.GAMMA] = create_gamma_initializer()
self._gamma = tf.get_variable(
self.GAMMA,
shape=[hidden_size],
dtype=inputs.dtype,
initializer=self._initializers[self.GAMMA],
partitioner=self._partitioners.get(self.GAMMA),
regularizer=self._regularizers.get(self.GAMMA))
if self.BETA not in self._initializers:
self._initializers[self.BETA] = create_beta_initializer()
self._beta = tf.get_variable(
self.BETA,
shape=[hidden_size],
dtype=inputs.dtype,
initializer=self._initializers[self.BETA],
partitioner=self._partitioners.get(self.BETA),
regularizer=self._regularizers.get(self.BETA))
mean, var = tf.nn.moments(inputs, [1], keep_dims=True)
normalized = tf.nn.batch_normalization(inputs, mean, var, self._beta,
self._gamma, self._eps)
return normalized
@property
def initializers(self):
return self._initializers
@property
def partitioners(self):
return self._partitioners
@property
def regularizers(self):
return self._regularizers
@property
def beta(self):
self._ensure_is_connected()
return self._beta
@property
def gamma(self):
self._ensure_is_connected()
return self._gamma
def create_beta_initializer():
return tf.zeros_initializer()
def create_gamma_initializer():
return tf.ones_initializer()
| true | true |
f73403edf303e50a99fe3939c97df68dfd5476b7 | 258 | py | Python | manage.py | msattel/strike_me_happy | c4bffa6a61f9b2066d4bf0fa41741f68553ec710 | [
"Apache-2.0"
] | null | null | null | manage.py | msattel/strike_me_happy | c4bffa6a61f9b2066d4bf0fa41741f68553ec710 | [
"Apache-2.0"
] | null | null | null | manage.py | msattel/strike_me_happy | c4bffa6a61f9b2066d4bf0fa41741f68553ec710 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "strike_me_happy.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| 23.454545 | 79 | 0.77907 |
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "strike_me_happy.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| true | true |
f734047a23619f9f882b1d1a2e95f92ba54a9bbf | 24,574 | py | Python | tests/server/test_handlers.py | iPieter/kiwi | 76b66872fce68873809a0dea112e2ed552ae5b63 | [
"Apache-2.0"
] | null | null | null | tests/server/test_handlers.py | iPieter/kiwi | 76b66872fce68873809a0dea112e2ed552ae5b63 | [
"Apache-2.0"
] | 1 | 2021-01-24T13:34:51.000Z | 2021-01-24T13:34:51.000Z | tests/server/test_handlers.py | iPieter/kiwi | 76b66872fce68873809a0dea112e2ed552ae5b63 | [
"Apache-2.0"
] | null | null | null | import json
import uuid
import mock
import pytest
import os
import kiwi
from kiwi.entities import ViewType
from kiwi.entities.model_registry import RegisteredModel, ModelVersion, \
RegisteredModelTag, ModelVersionTag
from kiwi.exceptions import MlflowException
from kiwi.protos.databricks_pb2 import INTERNAL_ERROR, INVALID_PARAMETER_VALUE, ErrorCode
from kiwi.server.handlers import get_endpoints, _create_experiment, _get_request_message, \
_search_runs, _log_batch, catch_mlflow_exception, _create_registered_model, \
_update_registered_model, _delete_registered_model, _get_registered_model, \
_list_registered_models, _search_registered_models, \
_get_latest_versions, _create_model_version, _update_model_version, \
_delete_model_version, _get_model_version_download_uri, \
_search_model_versions, _get_model_version, _transition_stage, _rename_registered_model, \
_set_registered_model_tag, _delete_registered_model_tag, _set_model_version_tag, \
_delete_model_version_tag
from kiwi.server import BACKEND_STORE_URI_ENV_VAR, app
from kiwi.store.entities.paged_list import PagedList
from kiwi.protos.service_pb2 import CreateExperiment, SearchRuns
from kiwi.protos.model_registry_pb2 import CreateRegisteredModel, UpdateRegisteredModel, \
DeleteRegisteredModel, ListRegisteredModels, SearchRegisteredModels, GetRegisteredModel, \
GetLatestVersions, CreateModelVersion, UpdateModelVersion, \
DeleteModelVersion, GetModelVersion, GetModelVersionDownloadUri, SearchModelVersions, \
TransitionModelVersionStage, RenameRegisteredModel, SetRegisteredModelTag, \
DeleteRegisteredModelTag, SetModelVersionTag, DeleteModelVersionTag
from kiwi.utils.proto_json_utils import message_to_json
from kiwi.utils.validation import MAX_BATCH_LOG_REQUEST_SIZE
@pytest.fixture()
def mock_get_request_message():
with mock.patch('mlflow.server.handlers._get_request_message') as m:
yield m
@pytest.fixture()
def mock_get_request_json():
with mock.patch('mlflow.server.handlers._get_request_json') as m:
yield m
@pytest.fixture()
def mock_tracking_store():
with mock.patch('mlflow.server.handlers._get_tracking_store') as m:
mock_store = mock.MagicMock()
m.return_value = mock_store
yield mock_store
@pytest.fixture()
def mock_model_registry_store():
with mock.patch('mlflow.server.handlers._get_model_registry_store') as m:
mock_store = mock.MagicMock()
m.return_value = mock_store
yield mock_store
def test_health():
with app.test_client() as c:
response = c.get("/health")
assert response.status_code == 200
assert response.get_data().decode() == "OK"
def test_get_endpoints():
endpoints = get_endpoints()
create_experiment_endpoint = [e for e in endpoints if e[1] == _create_experiment]
assert len(create_experiment_endpoint) == 4
def test_all_model_registry_endpoints_available():
endpoints = {handler: method for (path, handler, method) in get_endpoints()}
print(endpoints)
# Test that each of the handler is enabled as an endpoint with appropriate method.
expected_endpoints = {
"POST": [
_create_registered_model,
_create_model_version,
_rename_registered_model,
_transition_stage
],
"PATCH": [
_update_registered_model,
_update_model_version,
],
"DELETE": [
_delete_registered_model,
_delete_registered_model,
],
"GET": [
_list_registered_models,
_search_model_versions,
_get_latest_versions,
_get_registered_model,
_get_model_version,
_get_model_version_download_uri,
]
}
# TODO: efficient mechanism to test endpoint path
for method, handlers in expected_endpoints.items():
for handler in handlers:
assert handler in endpoints
assert endpoints[handler] == [method]
def test_can_parse_json():
request = mock.MagicMock()
request.method = "POST"
request.get_json = mock.MagicMock()
request.get_json.return_value = {"name": "hello"}
msg = _get_request_message(CreateExperiment(), flask_request=request)
assert msg.name == "hello"
def test_can_parse_post_json_with_unknown_fields():
request = mock.MagicMock()
request.method = "POST"
request.get_json = mock.MagicMock()
request.get_json.return_value = {"name": "hello", "WHAT IS THIS FIELD EVEN": "DOING"}
msg = _get_request_message(CreateExperiment(), flask_request=request)
assert msg.name == "hello"
def test_can_parse_get_json_with_unknown_fields():
request = mock.MagicMock()
request.method = "GET"
request.query_string = b"name=hello&superDuperUnknown=field"
msg = _get_request_message(CreateExperiment(), flask_request=request)
assert msg.name == "hello"
# Previous versions of the client sent a doubly string encoded JSON blob,
# so this test ensures continued compliance with such clients.
def test_can_parse_json_string():
request = mock.MagicMock()
request.method = "POST"
request.get_json = mock.MagicMock()
request.get_json.return_value = '{"name": "hello2"}'
msg = _get_request_message(CreateExperiment(), flask_request=request)
assert msg.name == "hello2"
def test_search_runs_default_view_type(mock_get_request_message, mock_tracking_store):
"""
Search Runs default view type is filled in as ViewType.ACTIVE_ONLY
"""
mock_get_request_message.return_value = SearchRuns(experiment_ids=["0"])
mock_tracking_store.search_runs.return_value = PagedList([], None)
_search_runs()
args, _ = mock_tracking_store.search_runs.call_args
assert args[2] == ViewType.ACTIVE_ONLY
def test_log_batch_api_req(mock_get_request_json):
mock_get_request_json.return_value = "a" * (MAX_BATCH_LOG_REQUEST_SIZE + 1)
response = _log_batch()
assert response.status_code == 400
json_response = json.loads(response.get_data())
assert json_response["error_code"] == ErrorCode.Name(INVALID_PARAMETER_VALUE)
assert ("Batched logging API requests must be at most %s bytes" % MAX_BATCH_LOG_REQUEST_SIZE
in json_response["message"])
def test_catch_mlflow_exception():
@catch_mlflow_exception
def test_handler():
raise MlflowException('test error', error_code=INTERNAL_ERROR)
# pylint: disable=assignment-from-no-return
response = test_handler()
json_response = json.loads(response.get_data())
assert response.status_code == 500
assert json_response['error_code'] == ErrorCode.Name(INTERNAL_ERROR)
assert json_response['message'] == 'test error'
@pytest.mark.large
def test_mlflow_server_with_installed_plugin(tmpdir):
"""This test requires the package in tests/resources/mlflow-test-plugin to be installed"""
from mlflow_test_plugin.file_store import PluginFileStore
env = {
BACKEND_STORE_URI_ENV_VAR: "file-plugin:%s" % tmpdir.strpath,
}
with mock.patch.dict(os.environ, env):
kiwi.server.handlers._tracking_store = None
try:
plugin_file_store = kiwi.server.handlers._get_tracking_store()
finally:
kiwi.server.handlers._tracking_store = None
assert isinstance(plugin_file_store, PluginFileStore)
assert plugin_file_store.is_plugin
def jsonify(obj):
def _jsonify(obj):
return json.loads(message_to_json(obj.to_proto()))
if isinstance(obj, list):
return [_jsonify(o) for o in obj]
else:
return _jsonify(obj)
# Tests for Model Registry handlers
def test_create_registered_model(mock_get_request_message, mock_model_registry_store):
tags = [RegisteredModelTag(key="key", value="value"),
RegisteredModelTag(key="anotherKey", value="some other value")]
mock_get_request_message.return_value = CreateRegisteredModel(name="model_1",
tags=[tag.to_proto()
for tag in tags])
rm = RegisteredModel("model_1", tags=tags)
mock_model_registry_store.create_registered_model.return_value = rm
resp = _create_registered_model()
_, args = mock_model_registry_store.create_registered_model.call_args
assert args["name"] == "model_1"
assert {tag.key: tag.value for tag in args["tags"]} == {tag.key: tag.value for tag in tags}
assert json.loads(resp.get_data()) == {"registered_model": jsonify(rm)}
def test_get_registered_model(mock_get_request_message, mock_model_registry_store):
name = "model1"
mock_get_request_message.return_value = GetRegisteredModel(name=name)
rmd = RegisteredModel(name=name, creation_timestamp=111,
last_updated_timestamp=222, description="Test model",
latest_versions=[])
mock_model_registry_store.get_registered_model.return_value = rmd
resp = _get_registered_model()
_, args = mock_model_registry_store.get_registered_model.call_args
assert args == {"name": name}
assert json.loads(resp.get_data()) == {"registered_model": jsonify(rmd)}
def test_update_registered_model(mock_get_request_message, mock_model_registry_store):
name = "model_1"
description = "Test model"
mock_get_request_message.return_value = UpdateRegisteredModel(name=name,
description=description)
rm2 = RegisteredModel(name, description=description)
mock_model_registry_store.update_registered_model.return_value = rm2
resp = _update_registered_model()
_, args = mock_model_registry_store.update_registered_model.call_args
assert args == {"name": name, "description": u"Test model"}
assert json.loads(resp.get_data()) == {"registered_model": jsonify(rm2)}
def test_rename_registered_model(mock_get_request_message, mock_model_registry_store):
name = "model_1"
new_name = "model_2"
mock_get_request_message.return_value = RenameRegisteredModel(name=name, new_name=new_name)
rm2 = RegisteredModel(new_name)
mock_model_registry_store.rename_registered_model.return_value = rm2
resp = _rename_registered_model()
_, args = mock_model_registry_store.rename_registered_model.call_args
assert args == {"name": name, "new_name": new_name}
assert json.loads(resp.get_data()) == {"registered_model": jsonify(rm2)}
def test_delete_registered_model(mock_get_request_message, mock_model_registry_store):
name = "model_1"
mock_get_request_message.return_value = DeleteRegisteredModel(name=name)
_delete_registered_model()
_, args = mock_model_registry_store.delete_registered_model.call_args
assert args == {"name": name}
def test_list_registered_models(mock_get_request_message, mock_model_registry_store):
mock_get_request_message.return_value = ListRegisteredModels(max_results=50)
rmds = PagedList([
RegisteredModel(name="model_1", creation_timestamp=111,
last_updated_timestamp=222, description="Test model",
latest_versions=[]),
RegisteredModel(name="model_2", creation_timestamp=111,
last_updated_timestamp=333, description="Another model",
latest_versions=[]),
], "next_pt")
mock_model_registry_store.list_registered_models.return_value = rmds
resp = _list_registered_models()
args, _ = mock_model_registry_store.list_registered_models.call_args
assert args == (50, '')
assert json.loads(resp.get_data()) == {
"next_page_token": "next_pt",
"registered_models": jsonify(rmds)}
def test_search_registered_models(mock_get_request_message, mock_model_registry_store):
rmds = [
RegisteredModel(name="model_1", creation_timestamp=111,
last_updated_timestamp=222, description="Test model",
latest_versions=[]),
RegisteredModel(name="model_2", creation_timestamp=111,
last_updated_timestamp=333, description="Another model",
latest_versions=[]),
]
mock_get_request_message.return_value = SearchRegisteredModels()
mock_model_registry_store.search_registered_models.return_value = PagedList(rmds, None)
resp = _search_registered_models()
_, args = mock_model_registry_store.search_registered_models.call_args
assert args == {"filter_string": "", "max_results": 100, "order_by": [], "page_token": ""}
assert json.loads(resp.get_data()) == {"registered_models": jsonify(rmds)}
mock_get_request_message.return_value = SearchRegisteredModels(filter="hello")
mock_model_registry_store.search_registered_models.return_value = PagedList(rmds[:1], "tok")
resp = _search_registered_models()
_, args = mock_model_registry_store.search_registered_models.call_args
assert args == {"filter_string": "hello", "max_results": 100, "order_by": [], "page_token": ""}
assert json.loads(resp.get_data()) == {"registered_models": jsonify(rmds[:1]),
"next_page_token": "tok"}
mock_get_request_message.return_value = SearchRegisteredModels(filter="hi", max_results=5)
mock_model_registry_store.search_registered_models.return_value = PagedList([rmds[0]], "tik")
resp = _search_registered_models()
_, args = mock_model_registry_store.search_registered_models.call_args
assert args == {"filter_string": "hi", "max_results": 5, "order_by": [], "page_token": ""}
assert json.loads(resp.get_data()) == {"registered_models": jsonify([rmds[0]]),
"next_page_token": "tik"}
mock_get_request_message.return_value = SearchRegisteredModels(filter="hey",
max_results=500,
order_by=["a", "B desc"],
page_token="prev")
mock_model_registry_store.search_registered_models.return_value = PagedList(rmds, "DONE")
resp = _search_registered_models()
_, args = mock_model_registry_store.search_registered_models.call_args
assert args == {"filter_string": "hey",
"max_results": 500,
"order_by": ["a", "B desc"],
"page_token": "prev"}
assert json.loads(resp.get_data()) == {"registered_models": jsonify(rmds),
"next_page_token": "DONE"}
def test_get_latest_versions(mock_get_request_message, mock_model_registry_store):
name = "model1"
mock_get_request_message.return_value = GetLatestVersions(name=name)
mvds = [
ModelVersion(name=name, version="5", creation_timestamp=1,
last_updated_timestamp=12, description="v 5", user_id="u1",
current_stage="Production", source="A/B", run_id=uuid.uuid4().hex,
status="READY", status_message=None),
ModelVersion(name=name, version="1", creation_timestamp=1,
last_updated_timestamp=1200, description="v 1", user_id="u1",
current_stage="Archived", source="A/B2", run_id=uuid.uuid4().hex,
status="READY", status_message=None),
ModelVersion(name=name, version="12", creation_timestamp=100,
last_updated_timestamp=None, description="v 12", user_id="u2",
current_stage="Staging", source="A/B3", run_id=uuid.uuid4().hex,
status="READY", status_message=None),
]
mock_model_registry_store.get_latest_versions.return_value = mvds
resp = _get_latest_versions()
_, args = mock_model_registry_store.get_latest_versions.call_args
assert args == {"name": name, "stages": []}
assert json.loads(resp.get_data()) == {"model_versions": jsonify(mvds)}
for stages in [[], ["None"], ["Staging"], ["Staging", "Production"]]:
mock_get_request_message.return_value = GetLatestVersions(name=name,
stages=stages)
_get_latest_versions()
_, args = mock_model_registry_store.get_latest_versions.call_args
assert args == {"name": name, "stages": stages}
def test_create_model_version(mock_get_request_message, mock_model_registry_store):
run_id = uuid.uuid4().hex
tags = [ModelVersionTag(key="key", value="value"),
ModelVersionTag(key="anotherKey", value="some other value")]
mock_get_request_message.return_value = CreateModelVersion(name="model_1",
source="A/B",
run_id=run_id,
tags=[tag.to_proto()
for tag in tags])
mv = ModelVersion(name="model_1", version="12", creation_timestamp=123, tags=tags)
mock_model_registry_store.create_model_version.return_value = mv
resp = _create_model_version()
_, args = mock_model_registry_store.create_model_version.call_args
assert args["name"] == "model_1"
assert args["source"] == "A/B"
assert args["run_id"] == run_id
assert {tag.key: tag.value for tag in args["tags"]} == {tag.key: tag.value for tag in tags}
assert json.loads(resp.get_data()) == {"model_version": jsonify(mv)}
def test_set_registered_model_tag(mock_get_request_message, mock_model_registry_store):
name = "model1"
tag = RegisteredModelTag(key="some weird key", value="some value")
mock_get_request_message.return_value = SetRegisteredModelTag(name=name, key=tag.key,
value=tag.value)
_set_registered_model_tag()
_, args = mock_model_registry_store.set_registered_model_tag.call_args
assert args == {"name": name, "tag": tag}
def test_delete_registered_model_tag(mock_get_request_message, mock_model_registry_store):
name = "model1"
key = "some weird key"
mock_get_request_message.return_value = DeleteRegisteredModelTag(name=name, key=key)
_delete_registered_model_tag()
_, args = mock_model_registry_store.delete_registered_model_tag.call_args
assert args == {"name": name, "key": key}
def test_get_model_version_details(mock_get_request_message, mock_model_registry_store):
mock_get_request_message.return_value = GetModelVersion(name="model1", version="32")
mvd = ModelVersion(name="model1", version="5", creation_timestamp=1,
last_updated_timestamp=12, description="v 5", user_id="u1",
current_stage="Production", source="A/B", run_id=uuid.uuid4().hex,
status="READY", status_message=None)
mock_model_registry_store.get_model_version.return_value = mvd
resp = _get_model_version()
_, args = mock_model_registry_store.get_model_version.call_args
assert args == {"name": "model1", "version": "32"}
assert json.loads(resp.get_data()) == {"model_version": jsonify(mvd)}
def test_update_model_version(mock_get_request_message, mock_model_registry_store):
name = "model1"
version = "32"
description = "Great model!"
mock_get_request_message.return_value = UpdateModelVersion(name=name, version=version,
description=description)
mv = ModelVersion(name=name, version=version, creation_timestamp=123, description=description)
mock_model_registry_store.update_model_version.return_value = mv
_update_model_version()
_, args = mock_model_registry_store.update_model_version.call_args
assert args == {"name": name, "version": version, "description": description}
def test_transition_model_version_stage(mock_get_request_message, mock_model_registry_store):
name = "model1"
version = "32"
stage = "Production"
mock_get_request_message.return_value = TransitionModelVersionStage(name=name, version=version,
stage=stage)
mv = ModelVersion(name=name, version=version, creation_timestamp=123, current_stage=stage)
mock_model_registry_store.transition_model_version_stage.return_value = mv
_transition_stage()
_, args = mock_model_registry_store.transition_model_version_stage.call_args
assert args == {"name": name, "version": version, "stage": stage,
"archive_existing_versions": False}
def test_delete_model_version(mock_get_request_message, mock_model_registry_store):
name = "model1"
version = "32"
mock_get_request_message.return_value = DeleteModelVersion(name=name, version=version)
_delete_model_version()
_, args = mock_model_registry_store.delete_model_version.call_args
assert args == {"name": name, "version": version}
def test_get_model_version_download_uri(mock_get_request_message, mock_model_registry_store):
name = "model1"
version = "32"
mock_get_request_message.return_value = GetModelVersionDownloadUri(name=name, version=version)
mock_model_registry_store.get_model_version_download_uri.return_value = "some/download/path"
resp = _get_model_version_download_uri()
_, args = mock_model_registry_store.get_model_version_download_uri.call_args
assert args == {"name": name, "version": version}
assert json.loads(resp.get_data()) == {"artifact_uri": "some/download/path"}
def test_search_model_versions(mock_get_request_message, mock_model_registry_store):
mock_get_request_message.return_value = SearchModelVersions(filter="source_path = 'A/B/CD'")
mvds = [
ModelVersion(name="model_1", version="5", creation_timestamp=100,
last_updated_timestamp=1200, description="v 5", user_id="u1",
current_stage="Production", source="A/B/CD", run_id=uuid.uuid4().hex,
status="READY", status_message=None),
ModelVersion(name="model_1", version="12", creation_timestamp=110,
last_updated_timestamp=2000, description="v 12", user_id="u2",
current_stage="Production", source="A/B/CD", run_id=uuid.uuid4().hex,
status="READY", status_message=None),
ModelVersion(name="ads_model", version="8", creation_timestamp=200,
last_updated_timestamp=2000, description="v 8", user_id="u1",
current_stage="Staging", source="A/B/CD", run_id=uuid.uuid4().hex,
status="READY", status_message=None),
ModelVersion(name="fraud_detection_model", version="345",
creation_timestamp=1000, last_updated_timestamp=1001,
description="newest version", user_id="u12", current_stage="None",
source="A/B/CD", run_id=uuid.uuid4().hex, status="READY",
status_message=None),
]
mock_model_registry_store.search_model_versions.return_value = mvds
resp = _search_model_versions()
args, _ = mock_model_registry_store.search_model_versions.call_args
assert args == ("source_path = 'A/B/CD'",)
assert json.loads(resp.get_data()) == {"model_versions": jsonify(mvds)}
def test_set_model_version_tag(mock_get_request_message, mock_model_registry_store):
name = "model1"
version = "1"
tag = ModelVersionTag(key="some weird key", value="some value")
mock_get_request_message.return_value = SetModelVersionTag(name=name, version=version,
key=tag.key, value=tag.value)
_set_model_version_tag()
_, args = mock_model_registry_store.set_model_version_tag.call_args
assert args == {"name": name, "version": version, "tag": tag}
def test_delete_model_version_tag(mock_get_request_message, mock_model_registry_store):
name = "model1"
version = "1"
key = "some weird key"
mock_get_request_message.return_value = DeleteModelVersionTag(name=name, version=version,
key=key)
_delete_model_version_tag()
_, args = mock_model_registry_store.delete_model_version_tag.call_args
assert args == {"name": name, "version": version, "key": key}
| 47.348748 | 99 | 0.68601 | import json
import uuid
import mock
import pytest
import os
import kiwi
from kiwi.entities import ViewType
from kiwi.entities.model_registry import RegisteredModel, ModelVersion, \
RegisteredModelTag, ModelVersionTag
from kiwi.exceptions import MlflowException
from kiwi.protos.databricks_pb2 import INTERNAL_ERROR, INVALID_PARAMETER_VALUE, ErrorCode
from kiwi.server.handlers import get_endpoints, _create_experiment, _get_request_message, \
_search_runs, _log_batch, catch_mlflow_exception, _create_registered_model, \
_update_registered_model, _delete_registered_model, _get_registered_model, \
_list_registered_models, _search_registered_models, \
_get_latest_versions, _create_model_version, _update_model_version, \
_delete_model_version, _get_model_version_download_uri, \
_search_model_versions, _get_model_version, _transition_stage, _rename_registered_model, \
_set_registered_model_tag, _delete_registered_model_tag, _set_model_version_tag, \
_delete_model_version_tag
from kiwi.server import BACKEND_STORE_URI_ENV_VAR, app
from kiwi.store.entities.paged_list import PagedList
from kiwi.protos.service_pb2 import CreateExperiment, SearchRuns
from kiwi.protos.model_registry_pb2 import CreateRegisteredModel, UpdateRegisteredModel, \
DeleteRegisteredModel, ListRegisteredModels, SearchRegisteredModels, GetRegisteredModel, \
GetLatestVersions, CreateModelVersion, UpdateModelVersion, \
DeleteModelVersion, GetModelVersion, GetModelVersionDownloadUri, SearchModelVersions, \
TransitionModelVersionStage, RenameRegisteredModel, SetRegisteredModelTag, \
DeleteRegisteredModelTag, SetModelVersionTag, DeleteModelVersionTag
from kiwi.utils.proto_json_utils import message_to_json
from kiwi.utils.validation import MAX_BATCH_LOG_REQUEST_SIZE
@pytest.fixture()
def mock_get_request_message():
with mock.patch('mlflow.server.handlers._get_request_message') as m:
yield m
@pytest.fixture()
def mock_get_request_json():
with mock.patch('mlflow.server.handlers._get_request_json') as m:
yield m
@pytest.fixture()
def mock_tracking_store():
with mock.patch('mlflow.server.handlers._get_tracking_store') as m:
mock_store = mock.MagicMock()
m.return_value = mock_store
yield mock_store
@pytest.fixture()
def mock_model_registry_store():
with mock.patch('mlflow.server.handlers._get_model_registry_store') as m:
mock_store = mock.MagicMock()
m.return_value = mock_store
yield mock_store
def test_health():
with app.test_client() as c:
response = c.get("/health")
assert response.status_code == 200
assert response.get_data().decode() == "OK"
def test_get_endpoints():
endpoints = get_endpoints()
create_experiment_endpoint = [e for e in endpoints if e[1] == _create_experiment]
assert len(create_experiment_endpoint) == 4
def test_all_model_registry_endpoints_available():
endpoints = {handler: method for (path, handler, method) in get_endpoints()}
print(endpoints)
expected_endpoints = {
"POST": [
_create_registered_model,
_create_model_version,
_rename_registered_model,
_transition_stage
],
"PATCH": [
_update_registered_model,
_update_model_version,
],
"DELETE": [
_delete_registered_model,
_delete_registered_model,
],
"GET": [
_list_registered_models,
_search_model_versions,
_get_latest_versions,
_get_registered_model,
_get_model_version,
_get_model_version_download_uri,
]
}
for method, handlers in expected_endpoints.items():
for handler in handlers:
assert handler in endpoints
assert endpoints[handler] == [method]
def test_can_parse_json():
request = mock.MagicMock()
request.method = "POST"
request.get_json = mock.MagicMock()
request.get_json.return_value = {"name": "hello"}
msg = _get_request_message(CreateExperiment(), flask_request=request)
assert msg.name == "hello"
def test_can_parse_post_json_with_unknown_fields():
request = mock.MagicMock()
request.method = "POST"
request.get_json = mock.MagicMock()
request.get_json.return_value = {"name": "hello", "WHAT IS THIS FIELD EVEN": "DOING"}
msg = _get_request_message(CreateExperiment(), flask_request=request)
assert msg.name == "hello"
def test_can_parse_get_json_with_unknown_fields():
request = mock.MagicMock()
request.method = "GET"
request.query_string = b"name=hello&superDuperUnknown=field"
msg = _get_request_message(CreateExperiment(), flask_request=request)
assert msg.name == "hello"
def test_can_parse_json_string():
request = mock.MagicMock()
request.method = "POST"
request.get_json = mock.MagicMock()
request.get_json.return_value = '{"name": "hello2"}'
msg = _get_request_message(CreateExperiment(), flask_request=request)
assert msg.name == "hello2"
def test_search_runs_default_view_type(mock_get_request_message, mock_tracking_store):
mock_get_request_message.return_value = SearchRuns(experiment_ids=["0"])
mock_tracking_store.search_runs.return_value = PagedList([], None)
_search_runs()
args, _ = mock_tracking_store.search_runs.call_args
assert args[2] == ViewType.ACTIVE_ONLY
def test_log_batch_api_req(mock_get_request_json):
mock_get_request_json.return_value = "a" * (MAX_BATCH_LOG_REQUEST_SIZE + 1)
response = _log_batch()
assert response.status_code == 400
json_response = json.loads(response.get_data())
assert json_response["error_code"] == ErrorCode.Name(INVALID_PARAMETER_VALUE)
assert ("Batched logging API requests must be at most %s bytes" % MAX_BATCH_LOG_REQUEST_SIZE
in json_response["message"])
def test_catch_mlflow_exception():
@catch_mlflow_exception
def test_handler():
raise MlflowException('test error', error_code=INTERNAL_ERROR)
response = test_handler()
json_response = json.loads(response.get_data())
assert response.status_code == 500
assert json_response['error_code'] == ErrorCode.Name(INTERNAL_ERROR)
assert json_response['message'] == 'test error'
@pytest.mark.large
def test_mlflow_server_with_installed_plugin(tmpdir):
from mlflow_test_plugin.file_store import PluginFileStore
env = {
BACKEND_STORE_URI_ENV_VAR: "file-plugin:%s" % tmpdir.strpath,
}
with mock.patch.dict(os.environ, env):
kiwi.server.handlers._tracking_store = None
try:
plugin_file_store = kiwi.server.handlers._get_tracking_store()
finally:
kiwi.server.handlers._tracking_store = None
assert isinstance(plugin_file_store, PluginFileStore)
assert plugin_file_store.is_plugin
def jsonify(obj):
def _jsonify(obj):
return json.loads(message_to_json(obj.to_proto()))
if isinstance(obj, list):
return [_jsonify(o) for o in obj]
else:
return _jsonify(obj)
def test_create_registered_model(mock_get_request_message, mock_model_registry_store):
tags = [RegisteredModelTag(key="key", value="value"),
RegisteredModelTag(key="anotherKey", value="some other value")]
mock_get_request_message.return_value = CreateRegisteredModel(name="model_1",
tags=[tag.to_proto()
for tag in tags])
rm = RegisteredModel("model_1", tags=tags)
mock_model_registry_store.create_registered_model.return_value = rm
resp = _create_registered_model()
_, args = mock_model_registry_store.create_registered_model.call_args
assert args["name"] == "model_1"
assert {tag.key: tag.value for tag in args["tags"]} == {tag.key: tag.value for tag in tags}
assert json.loads(resp.get_data()) == {"registered_model": jsonify(rm)}
def test_get_registered_model(mock_get_request_message, mock_model_registry_store):
name = "model1"
mock_get_request_message.return_value = GetRegisteredModel(name=name)
rmd = RegisteredModel(name=name, creation_timestamp=111,
last_updated_timestamp=222, description="Test model",
latest_versions=[])
mock_model_registry_store.get_registered_model.return_value = rmd
resp = _get_registered_model()
_, args = mock_model_registry_store.get_registered_model.call_args
assert args == {"name": name}
assert json.loads(resp.get_data()) == {"registered_model": jsonify(rmd)}
def test_update_registered_model(mock_get_request_message, mock_model_registry_store):
name = "model_1"
description = "Test model"
mock_get_request_message.return_value = UpdateRegisteredModel(name=name,
description=description)
rm2 = RegisteredModel(name, description=description)
mock_model_registry_store.update_registered_model.return_value = rm2
resp = _update_registered_model()
_, args = mock_model_registry_store.update_registered_model.call_args
assert args == {"name": name, "description": u"Test model"}
assert json.loads(resp.get_data()) == {"registered_model": jsonify(rm2)}
def test_rename_registered_model(mock_get_request_message, mock_model_registry_store):
name = "model_1"
new_name = "model_2"
mock_get_request_message.return_value = RenameRegisteredModel(name=name, new_name=new_name)
rm2 = RegisteredModel(new_name)
mock_model_registry_store.rename_registered_model.return_value = rm2
resp = _rename_registered_model()
_, args = mock_model_registry_store.rename_registered_model.call_args
assert args == {"name": name, "new_name": new_name}
assert json.loads(resp.get_data()) == {"registered_model": jsonify(rm2)}
def test_delete_registered_model(mock_get_request_message, mock_model_registry_store):
name = "model_1"
mock_get_request_message.return_value = DeleteRegisteredModel(name=name)
_delete_registered_model()
_, args = mock_model_registry_store.delete_registered_model.call_args
assert args == {"name": name}
def test_list_registered_models(mock_get_request_message, mock_model_registry_store):
mock_get_request_message.return_value = ListRegisteredModels(max_results=50)
rmds = PagedList([
RegisteredModel(name="model_1", creation_timestamp=111,
last_updated_timestamp=222, description="Test model",
latest_versions=[]),
RegisteredModel(name="model_2", creation_timestamp=111,
last_updated_timestamp=333, description="Another model",
latest_versions=[]),
], "next_pt")
mock_model_registry_store.list_registered_models.return_value = rmds
resp = _list_registered_models()
args, _ = mock_model_registry_store.list_registered_models.call_args
assert args == (50, '')
assert json.loads(resp.get_data()) == {
"next_page_token": "next_pt",
"registered_models": jsonify(rmds)}
def test_search_registered_models(mock_get_request_message, mock_model_registry_store):
rmds = [
RegisteredModel(name="model_1", creation_timestamp=111,
last_updated_timestamp=222, description="Test model",
latest_versions=[]),
RegisteredModel(name="model_2", creation_timestamp=111,
last_updated_timestamp=333, description="Another model",
latest_versions=[]),
]
mock_get_request_message.return_value = SearchRegisteredModels()
mock_model_registry_store.search_registered_models.return_value = PagedList(rmds, None)
resp = _search_registered_models()
_, args = mock_model_registry_store.search_registered_models.call_args
assert args == {"filter_string": "", "max_results": 100, "order_by": [], "page_token": ""}
assert json.loads(resp.get_data()) == {"registered_models": jsonify(rmds)}
mock_get_request_message.return_value = SearchRegisteredModels(filter="hello")
mock_model_registry_store.search_registered_models.return_value = PagedList(rmds[:1], "tok")
resp = _search_registered_models()
_, args = mock_model_registry_store.search_registered_models.call_args
assert args == {"filter_string": "hello", "max_results": 100, "order_by": [], "page_token": ""}
assert json.loads(resp.get_data()) == {"registered_models": jsonify(rmds[:1]),
"next_page_token": "tok"}
mock_get_request_message.return_value = SearchRegisteredModels(filter="hi", max_results=5)
mock_model_registry_store.search_registered_models.return_value = PagedList([rmds[0]], "tik")
resp = _search_registered_models()
_, args = mock_model_registry_store.search_registered_models.call_args
assert args == {"filter_string": "hi", "max_results": 5, "order_by": [], "page_token": ""}
assert json.loads(resp.get_data()) == {"registered_models": jsonify([rmds[0]]),
"next_page_token": "tik"}
mock_get_request_message.return_value = SearchRegisteredModels(filter="hey",
max_results=500,
order_by=["a", "B desc"],
page_token="prev")
mock_model_registry_store.search_registered_models.return_value = PagedList(rmds, "DONE")
resp = _search_registered_models()
_, args = mock_model_registry_store.search_registered_models.call_args
assert args == {"filter_string": "hey",
"max_results": 500,
"order_by": ["a", "B desc"],
"page_token": "prev"}
assert json.loads(resp.get_data()) == {"registered_models": jsonify(rmds),
"next_page_token": "DONE"}
def test_get_latest_versions(mock_get_request_message, mock_model_registry_store):
name = "model1"
mock_get_request_message.return_value = GetLatestVersions(name=name)
mvds = [
ModelVersion(name=name, version="5", creation_timestamp=1,
last_updated_timestamp=12, description="v 5", user_id="u1",
current_stage="Production", source="A/B", run_id=uuid.uuid4().hex,
status="READY", status_message=None),
ModelVersion(name=name, version="1", creation_timestamp=1,
last_updated_timestamp=1200, description="v 1", user_id="u1",
current_stage="Archived", source="A/B2", run_id=uuid.uuid4().hex,
status="READY", status_message=None),
ModelVersion(name=name, version="12", creation_timestamp=100,
last_updated_timestamp=None, description="v 12", user_id="u2",
current_stage="Staging", source="A/B3", run_id=uuid.uuid4().hex,
status="READY", status_message=None),
]
mock_model_registry_store.get_latest_versions.return_value = mvds
resp = _get_latest_versions()
_, args = mock_model_registry_store.get_latest_versions.call_args
assert args == {"name": name, "stages": []}
assert json.loads(resp.get_data()) == {"model_versions": jsonify(mvds)}
for stages in [[], ["None"], ["Staging"], ["Staging", "Production"]]:
mock_get_request_message.return_value = GetLatestVersions(name=name,
stages=stages)
_get_latest_versions()
_, args = mock_model_registry_store.get_latest_versions.call_args
assert args == {"name": name, "stages": stages}
def test_create_model_version(mock_get_request_message, mock_model_registry_store):
run_id = uuid.uuid4().hex
tags = [ModelVersionTag(key="key", value="value"),
ModelVersionTag(key="anotherKey", value="some other value")]
mock_get_request_message.return_value = CreateModelVersion(name="model_1",
source="A/B",
run_id=run_id,
tags=[tag.to_proto()
for tag in tags])
mv = ModelVersion(name="model_1", version="12", creation_timestamp=123, tags=tags)
mock_model_registry_store.create_model_version.return_value = mv
resp = _create_model_version()
_, args = mock_model_registry_store.create_model_version.call_args
assert args["name"] == "model_1"
assert args["source"] == "A/B"
assert args["run_id"] == run_id
assert {tag.key: tag.value for tag in args["tags"]} == {tag.key: tag.value for tag in tags}
assert json.loads(resp.get_data()) == {"model_version": jsonify(mv)}
def test_set_registered_model_tag(mock_get_request_message, mock_model_registry_store):
name = "model1"
tag = RegisteredModelTag(key="some weird key", value="some value")
mock_get_request_message.return_value = SetRegisteredModelTag(name=name, key=tag.key,
value=tag.value)
_set_registered_model_tag()
_, args = mock_model_registry_store.set_registered_model_tag.call_args
assert args == {"name": name, "tag": tag}
def test_delete_registered_model_tag(mock_get_request_message, mock_model_registry_store):
name = "model1"
key = "some weird key"
mock_get_request_message.return_value = DeleteRegisteredModelTag(name=name, key=key)
_delete_registered_model_tag()
_, args = mock_model_registry_store.delete_registered_model_tag.call_args
assert args == {"name": name, "key": key}
def test_get_model_version_details(mock_get_request_message, mock_model_registry_store):
mock_get_request_message.return_value = GetModelVersion(name="model1", version="32")
mvd = ModelVersion(name="model1", version="5", creation_timestamp=1,
last_updated_timestamp=12, description="v 5", user_id="u1",
current_stage="Production", source="A/B", run_id=uuid.uuid4().hex,
status="READY", status_message=None)
mock_model_registry_store.get_model_version.return_value = mvd
resp = _get_model_version()
_, args = mock_model_registry_store.get_model_version.call_args
assert args == {"name": "model1", "version": "32"}
assert json.loads(resp.get_data()) == {"model_version": jsonify(mvd)}
def test_update_model_version(mock_get_request_message, mock_model_registry_store):
name = "model1"
version = "32"
description = "Great model!"
mock_get_request_message.return_value = UpdateModelVersion(name=name, version=version,
description=description)
mv = ModelVersion(name=name, version=version, creation_timestamp=123, description=description)
mock_model_registry_store.update_model_version.return_value = mv
_update_model_version()
_, args = mock_model_registry_store.update_model_version.call_args
assert args == {"name": name, "version": version, "description": description}
def test_transition_model_version_stage(mock_get_request_message, mock_model_registry_store):
name = "model1"
version = "32"
stage = "Production"
mock_get_request_message.return_value = TransitionModelVersionStage(name=name, version=version,
stage=stage)
mv = ModelVersion(name=name, version=version, creation_timestamp=123, current_stage=stage)
mock_model_registry_store.transition_model_version_stage.return_value = mv
_transition_stage()
_, args = mock_model_registry_store.transition_model_version_stage.call_args
assert args == {"name": name, "version": version, "stage": stage,
"archive_existing_versions": False}
def test_delete_model_version(mock_get_request_message, mock_model_registry_store):
name = "model1"
version = "32"
mock_get_request_message.return_value = DeleteModelVersion(name=name, version=version)
_delete_model_version()
_, args = mock_model_registry_store.delete_model_version.call_args
assert args == {"name": name, "version": version}
def test_get_model_version_download_uri(mock_get_request_message, mock_model_registry_store):
name = "model1"
version = "32"
mock_get_request_message.return_value = GetModelVersionDownloadUri(name=name, version=version)
mock_model_registry_store.get_model_version_download_uri.return_value = "some/download/path"
resp = _get_model_version_download_uri()
_, args = mock_model_registry_store.get_model_version_download_uri.call_args
assert args == {"name": name, "version": version}
assert json.loads(resp.get_data()) == {"artifact_uri": "some/download/path"}
def test_search_model_versions(mock_get_request_message, mock_model_registry_store):
mock_get_request_message.return_value = SearchModelVersions(filter="source_path = 'A/B/CD'")
mvds = [
ModelVersion(name="model_1", version="5", creation_timestamp=100,
last_updated_timestamp=1200, description="v 5", user_id="u1",
current_stage="Production", source="A/B/CD", run_id=uuid.uuid4().hex,
status="READY", status_message=None),
ModelVersion(name="model_1", version="12", creation_timestamp=110,
last_updated_timestamp=2000, description="v 12", user_id="u2",
current_stage="Production", source="A/B/CD", run_id=uuid.uuid4().hex,
status="READY", status_message=None),
ModelVersion(name="ads_model", version="8", creation_timestamp=200,
last_updated_timestamp=2000, description="v 8", user_id="u1",
current_stage="Staging", source="A/B/CD", run_id=uuid.uuid4().hex,
status="READY", status_message=None),
ModelVersion(name="fraud_detection_model", version="345",
creation_timestamp=1000, last_updated_timestamp=1001,
description="newest version", user_id="u12", current_stage="None",
source="A/B/CD", run_id=uuid.uuid4().hex, status="READY",
status_message=None),
]
mock_model_registry_store.search_model_versions.return_value = mvds
resp = _search_model_versions()
args, _ = mock_model_registry_store.search_model_versions.call_args
assert args == ("source_path = 'A/B/CD'",)
assert json.loads(resp.get_data()) == {"model_versions": jsonify(mvds)}
def test_set_model_version_tag(mock_get_request_message, mock_model_registry_store):
name = "model1"
version = "1"
tag = ModelVersionTag(key="some weird key", value="some value")
mock_get_request_message.return_value = SetModelVersionTag(name=name, version=version,
key=tag.key, value=tag.value)
_set_model_version_tag()
_, args = mock_model_registry_store.set_model_version_tag.call_args
assert args == {"name": name, "version": version, "tag": tag}
def test_delete_model_version_tag(mock_get_request_message, mock_model_registry_store):
name = "model1"
version = "1"
key = "some weird key"
mock_get_request_message.return_value = DeleteModelVersionTag(name=name, version=version,
key=key)
_delete_model_version_tag()
_, args = mock_model_registry_store.delete_model_version_tag.call_args
assert args == {"name": name, "version": version, "key": key}
| true | true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.