hexsha
stringlengths 40
40
| size
int64 2
1.02M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 6
130
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
245
| max_issues_repo_name
stringlengths 6
130
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
245
| max_forks_repo_name
stringlengths 6
130
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 2
1.02M
| avg_line_length
float64 1
417k
| max_line_length
int64 1
987k
| alphanum_fraction
float64 0
1
| content_no_comment
stringlengths 0
1.01M
| is_comment_constant_removed
bool 1
class | is_sharp_comment_removed
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f70e088bf84c45c11008a30e7afba1fe2d2c87f3
| 1,610
|
py
|
Python
|
proxymanager.py
|
PrototypeSapien/OWExcelsiorRaffle
|
a5e38a0060e2c665fee66e695575ae1503bece6c
|
[
"MIT"
] | 2
|
2018-03-06T22:51:06.000Z
|
2018-03-14T12:13:17.000Z
|
proxymanager.py
|
PrototypeSapien/OWExcelsiorRaffle
|
a5e38a0060e2c665fee66e695575ae1503bece6c
|
[
"MIT"
] | null | null | null |
proxymanager.py
|
PrototypeSapien/OWExcelsiorRaffle
|
a5e38a0060e2c665fee66e695575ae1503bece6c
|
[
"MIT"
] | 1
|
2018-06-25T09:44:57.000Z
|
2018-06-25T09:44:57.000Z
|
from utils import*
from random import*
formattedProxies = []
def chooseProxy(tasknum):
if tasknum + 1 <= len(proxieslines):
proxy = proxieslines[tasknum].rstrip()
if tasknum + 1 > len(proxieslines):
if len(proxieslines) > 1:
a = randint(1, len(proxieslines) - 1)
if len(proxieslines) == 1:
a = 0
proxy = proxieslines[a].rstrip()
try:
proxytest = proxy.split(":")[2]
userpass = True
except IndexError:
userpass = False
if userpass == False:
proxyedit = proxy
if userpass == True:
ip = proxy.split(":")[0]
port = proxy.split(":")[1]
userpassproxy = ip + ':' + port
proxyedit = userpassproxy
proxyuser = proxy.split(":")[2]
proxyuser = proxyuser.rstrip()
proxypass = proxy.split(":")[3]
proxyuser = proxyuser.rstrip()
if userpass == True:
proxies = {'http': 'http://' + proxyuser + ':' + proxypass + '@' + userpassproxy,
'https': 'https://' + proxyuser + ':' + proxypass + '@' + userpassproxy}
if userpass == False:
proxies = {'http': 'http://' + proxy,
'https': 'https://' + proxy}
global formattedProxies
formattedProxies.append(proxies)
return proxies
def importProxies(proxyfile):
p = open('{}.txt'.format(proxyfile))
global proxieslines
proxieslines = p.readlines()
numproxies = len(proxieslines)
global formattedProxies
if numproxies > 0:
formattedProxies = []
for i in range (0,len(proxieslines)):
chooseProxy(i)
if numproxies == 0:
formattedProxies = [None]
# print(formattedProxies[0])
xlpFormat() #IMPORTANT DO NOT REMOVE OR ELSE SCRIPT WILL BREAK.
log('%s proxies loaded' % numproxies)
return formattedProxies
| 28.245614
| 83
| 0.665217
|
from utils import*
from random import*
formattedProxies = []
def chooseProxy(tasknum):
if tasknum + 1 <= len(proxieslines):
proxy = proxieslines[tasknum].rstrip()
if tasknum + 1 > len(proxieslines):
if len(proxieslines) > 1:
a = randint(1, len(proxieslines) - 1)
if len(proxieslines) == 1:
a = 0
proxy = proxieslines[a].rstrip()
try:
proxytest = proxy.split(":")[2]
userpass = True
except IndexError:
userpass = False
if userpass == False:
proxyedit = proxy
if userpass == True:
ip = proxy.split(":")[0]
port = proxy.split(":")[1]
userpassproxy = ip + ':' + port
proxyedit = userpassproxy
proxyuser = proxy.split(":")[2]
proxyuser = proxyuser.rstrip()
proxypass = proxy.split(":")[3]
proxyuser = proxyuser.rstrip()
if userpass == True:
proxies = {'http': 'http://' + proxyuser + ':' + proxypass + '@' + userpassproxy,
'https': 'https://' + proxyuser + ':' + proxypass + '@' + userpassproxy}
if userpass == False:
proxies = {'http': 'http://' + proxy,
'https': 'https://' + proxy}
global formattedProxies
formattedProxies.append(proxies)
return proxies
def importProxies(proxyfile):
p = open('{}.txt'.format(proxyfile))
global proxieslines
proxieslines = p.readlines()
numproxies = len(proxieslines)
global formattedProxies
if numproxies > 0:
formattedProxies = []
for i in range (0,len(proxieslines)):
chooseProxy(i)
if numproxies == 0:
formattedProxies = [None]
xlpFormat()
log('%s proxies loaded' % numproxies)
return formattedProxies
| true
| true
|
f70e090bb5b01942713149493e48bc0e51f7f74b
| 5,620
|
py
|
Python
|
youtube_dl/extractor/ceskatelevize.py
|
Logmytech/youtube-dl-QT
|
1497297719a95c4f70fbfa32e0fa4e38cdd475dc
|
[
"MIT"
] | 1
|
2015-02-19T13:13:47.000Z
|
2015-02-19T13:13:47.000Z
|
youtube_dl/extractor/ceskatelevize.py
|
Logmytech/youtube-dl-QT
|
1497297719a95c4f70fbfa32e0fa4e38cdd475dc
|
[
"MIT"
] | 2
|
2019-05-20T12:46:30.000Z
|
2020-11-07T12:50:32.000Z
|
youtube_dl/extractor/ceskatelevize.py
|
Logmytech/youtube-dl-QT
|
1497297719a95c4f70fbfa32e0fa4e38cdd475dc
|
[
"MIT"
] | 5
|
2020-10-25T09:18:58.000Z
|
2021-05-23T22:57:55.000Z
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import re
from .subtitles import SubtitlesInfoExtractor
from ..compat import (
compat_urllib_request,
compat_urllib_parse,
compat_urllib_parse_urlparse,
)
from ..utils import (
ExtractorError,
float_or_none,
)
class CeskaTelevizeIE(SubtitlesInfoExtractor):
_VALID_URL = r'https?://www\.ceskatelevize\.cz/(porady|ivysilani)/(.+/)?(?P<id>[^?#]+)'
_TESTS = [
{
'url': 'http://www.ceskatelevize.cz/ivysilani/ivysilani/10441294653-hyde-park-civilizace/214411058091220',
'info_dict': {
'id': '214411058091220',
'ext': 'mp4',
'title': 'Hyde Park Civilizace',
'description': 'Věda a současná civilizace. Interaktivní pořad - prostor pro vaše otázky a komentáře',
'thumbnail': 're:^https?://.*\.jpg',
'duration': 3350,
},
'params': {
# m3u8 download
'skip_download': True,
},
},
{
'url': 'http://www.ceskatelevize.cz/ivysilani/10532695142-prvni-republika/bonus/14716-zpevacka-z-duparny-bobina',
'info_dict': {
'id': '14716',
'ext': 'mp4',
'title': 'První republika: Zpěvačka z Dupárny Bobina',
'description': 'Sága mapující atmosféru první republiky od r. 1918 do r. 1945.',
'thumbnail': 're:^https?://.*\.jpg',
'duration': 88.4,
},
'params': {
# m3u8 download
'skip_download': True,
},
},
]
def _real_extract(self, url):
url = url.replace('/porady/', '/ivysilani/').replace('/video/', '')
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
webpage = self._download_webpage(url, video_id)
NOT_AVAILABLE_STRING = 'This content is not available at your territory due to limited copyright.'
if '%s</p>' % NOT_AVAILABLE_STRING in webpage:
raise ExtractorError(NOT_AVAILABLE_STRING, expected=True)
typ = self._html_search_regex(r'getPlaylistUrl\(\[\{"type":"(.+?)","id":".+?"\}\],', webpage, 'type')
episode_id = self._html_search_regex(r'getPlaylistUrl\(\[\{"type":".+?","id":"(.+?)"\}\],', webpage, 'episode_id')
data = {
'playlist[0][type]': typ,
'playlist[0][id]': episode_id,
'requestUrl': compat_urllib_parse_urlparse(url).path,
'requestSource': 'iVysilani',
}
req = compat_urllib_request.Request(
'http://www.ceskatelevize.cz/ivysilani/ajax/get-client-playlist',
data=compat_urllib_parse.urlencode(data))
req.add_header('Content-type', 'application/x-www-form-urlencoded')
req.add_header('x-addr', '127.0.0.1')
req.add_header('X-Requested-With', 'XMLHttpRequest')
req.add_header('Referer', url)
playlistpage = self._download_json(req, video_id)
playlist_url = playlistpage['url']
if playlist_url == 'error_region':
raise ExtractorError(NOT_AVAILABLE_STRING, expected=True)
req = compat_urllib_request.Request(compat_urllib_parse.unquote(playlist_url))
req.add_header('Referer', url)
playlist = self._download_json(req, video_id)
item = playlist['playlist'][0]
formats = []
for format_id, stream_url in item['streamUrls'].items():
formats.extend(self._extract_m3u8_formats(stream_url, video_id, 'mp4'))
self._sort_formats(formats)
title = self._og_search_title(webpage)
description = self._og_search_description(webpage)
duration = float_or_none(item.get('duration'))
thumbnail = item.get('previewImageUrl')
subtitles = {}
subs = item.get('subtitles')
if subs:
subtitles['cs'] = subs[0]['url']
if self._downloader.params.get('listsubtitles', False):
self._list_available_subtitles(video_id, subtitles)
return
subtitles = self._fix_subtitles(self.extract_subtitles(video_id, subtitles))
return {
'id': episode_id,
'title': title,
'description': description,
'thumbnail': thumbnail,
'duration': duration,
'formats': formats,
'subtitles': subtitles,
}
@staticmethod
def _fix_subtitles(subtitles):
""" Convert millisecond-based subtitles to SRT """
if subtitles is None:
return subtitles # subtitles not requested
def _msectotimecode(msec):
""" Helper utility to convert milliseconds to timecode """
components = []
for divider in [1000, 60, 60, 100]:
components.append(msec % divider)
msec //= divider
return "{3:02}:{2:02}:{1:02},{0:03}".format(*components)
def _fix_subtitle(subtitle):
for line in subtitle.splitlines():
m = re.match(r"^\s*([0-9]+);\s*([0-9]+)\s+([0-9]+)\s*$", line)
if m:
yield m.group(1)
start, stop = (_msectotimecode(int(t)) for t in m.groups()[1:])
yield "{0} --> {1}".format(start, stop)
else:
yield line
fixed_subtitles = {}
for k, v in subtitles.items():
fixed_subtitles[k] = "\r\n".join(_fix_subtitle(v))
return fixed_subtitles
| 36.025641
| 125
| 0.561922
|
from __future__ import unicode_literals
import re
from .subtitles import SubtitlesInfoExtractor
from ..compat import (
compat_urllib_request,
compat_urllib_parse,
compat_urllib_parse_urlparse,
)
from ..utils import (
ExtractorError,
float_or_none,
)
class CeskaTelevizeIE(SubtitlesInfoExtractor):
_VALID_URL = r'https?://www\.ceskatelevize\.cz/(porady|ivysilani)/(.+/)?(?P<id>[^?#]+)'
_TESTS = [
{
'url': 'http://www.ceskatelevize.cz/ivysilani/ivysilani/10441294653-hyde-park-civilizace/214411058091220',
'info_dict': {
'id': '214411058091220',
'ext': 'mp4',
'title': 'Hyde Park Civilizace',
'description': 'Věda a současná civilizace. Interaktivní pořad - prostor pro vaše otázky a komentáře',
'thumbnail': 're:^https?://.*\.jpg',
'duration': 3350,
},
'params': {
'skip_download': True,
},
},
{
'url': 'http://www.ceskatelevize.cz/ivysilani/10532695142-prvni-republika/bonus/14716-zpevacka-z-duparny-bobina',
'info_dict': {
'id': '14716',
'ext': 'mp4',
'title': 'První republika: Zpěvačka z Dupárny Bobina',
'description': 'Sága mapující atmosféru první republiky od r. 1918 do r. 1945.',
'thumbnail': 're:^https?://.*\.jpg',
'duration': 88.4,
},
'params': {
'skip_download': True,
},
},
]
def _real_extract(self, url):
url = url.replace('/porady/', '/ivysilani/').replace('/video/', '')
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
webpage = self._download_webpage(url, video_id)
NOT_AVAILABLE_STRING = 'This content is not available at your territory due to limited copyright.'
if '%s</p>' % NOT_AVAILABLE_STRING in webpage:
raise ExtractorError(NOT_AVAILABLE_STRING, expected=True)
typ = self._html_search_regex(r'getPlaylistUrl\(\[\{"type":"(.+?)","id":".+?"\}\],', webpage, 'type')
episode_id = self._html_search_regex(r'getPlaylistUrl\(\[\{"type":".+?","id":"(.+?)"\}\],', webpage, 'episode_id')
data = {
'playlist[0][type]': typ,
'playlist[0][id]': episode_id,
'requestUrl': compat_urllib_parse_urlparse(url).path,
'requestSource': 'iVysilani',
}
req = compat_urllib_request.Request(
'http://www.ceskatelevize.cz/ivysilani/ajax/get-client-playlist',
data=compat_urllib_parse.urlencode(data))
req.add_header('Content-type', 'application/x-www-form-urlencoded')
req.add_header('x-addr', '127.0.0.1')
req.add_header('X-Requested-With', 'XMLHttpRequest')
req.add_header('Referer', url)
playlistpage = self._download_json(req, video_id)
playlist_url = playlistpage['url']
if playlist_url == 'error_region':
raise ExtractorError(NOT_AVAILABLE_STRING, expected=True)
req = compat_urllib_request.Request(compat_urllib_parse.unquote(playlist_url))
req.add_header('Referer', url)
playlist = self._download_json(req, video_id)
item = playlist['playlist'][0]
formats = []
for format_id, stream_url in item['streamUrls'].items():
formats.extend(self._extract_m3u8_formats(stream_url, video_id, 'mp4'))
self._sort_formats(formats)
title = self._og_search_title(webpage)
description = self._og_search_description(webpage)
duration = float_or_none(item.get('duration'))
thumbnail = item.get('previewImageUrl')
subtitles = {}
subs = item.get('subtitles')
if subs:
subtitles['cs'] = subs[0]['url']
if self._downloader.params.get('listsubtitles', False):
self._list_available_subtitles(video_id, subtitles)
return
subtitles = self._fix_subtitles(self.extract_subtitles(video_id, subtitles))
return {
'id': episode_id,
'title': title,
'description': description,
'thumbnail': thumbnail,
'duration': duration,
'formats': formats,
'subtitles': subtitles,
}
@staticmethod
def _fix_subtitles(subtitles):
if subtitles is None:
return subtitles
def _msectotimecode(msec):
components = []
for divider in [1000, 60, 60, 100]:
components.append(msec % divider)
msec //= divider
return "{3:02}:{2:02}:{1:02},{0:03}".format(*components)
def _fix_subtitle(subtitle):
for line in subtitle.splitlines():
m = re.match(r"^\s*([0-9]+);\s*([0-9]+)\s+([0-9]+)\s*$", line)
if m:
yield m.group(1)
start, stop = (_msectotimecode(int(t)) for t in m.groups()[1:])
yield "{0} --> {1}".format(start, stop)
else:
yield line
fixed_subtitles = {}
for k, v in subtitles.items():
fixed_subtitles[k] = "\r\n".join(_fix_subtitle(v))
return fixed_subtitles
| true
| true
|
f70e090e5514cb7ff3f6119ea53ce414c57ac954
| 2,467
|
py
|
Python
|
profile_generator/feature/colors/hsl/schema.py
|
nethy/profile-generator
|
9bc54bed36b84b45902d75a273739480b4ff2204
|
[
"MIT"
] | null | null | null |
profile_generator/feature/colors/hsl/schema.py
|
nethy/profile-generator
|
9bc54bed36b84b45902d75a273739480b4ff2204
|
[
"MIT"
] | null | null | null |
profile_generator/feature/colors/hsl/schema.py
|
nethy/profile-generator
|
9bc54bed36b84b45902d75a273739480b4ff2204
|
[
"MIT"
] | null | null | null |
from collections.abc import Iterable, Mapping
from typing import Any
from profile_generator.feature.colors.white_balance.schema import DEFAULT
from profile_generator.model.view import raw_therapee
from profile_generator.model.view.raw_therapee import EqPoint, LinearEqPoint
from profile_generator.schema import object_of, range_of
_LC_ENABLED = "LCEnabled"
_HH_CURVE = "HhCurve"
_CH_CURVE = "ChCurve"
_LH_CURVE = "LhCurve"
DEFAULT = {
_LC_ENABLED: "false",
_HH_CURVE: raw_therapee.CurveType.LINEAR,
_CH_CURVE: raw_therapee.CurveType.LINEAR,
_LH_CURVE: raw_therapee.CurveType.LINEAR,
}
_STEPS = 7.0
_COLORS_SCHEMA = object_of(
{
"magenta": range_of(-_STEPS, _STEPS),
"red": range_of(-_STEPS, _STEPS),
"yellow": range_of(-_STEPS, _STEPS),
"green": range_of(-_STEPS, _STEPS),
"cyan": range_of(-_STEPS, _STEPS),
"blue": range_of(-_STEPS, _STEPS),
}
)
SCHEMA = object_of(
{"hue": _COLORS_SCHEMA, "saturation": _COLORS_SCHEMA, "luminance": _COLORS_SCHEMA}
)
_BASE_VALUE = 0.5
_COLORS = [
"red",
"yellow",
"green",
"cyan",
"blue",
"magenta",
]
HUES = {
"red": 0 / 360,
"yellow": 60 / 360,
"green": 120 / 360,
"cyan": 180 / 360,
"blue": 240 / 360,
"magenta": 300 / 360,
}
def process(data: Any) -> Mapping[str, str]:
result: dict[str, str] = {}
result |= _get_eq_curve(data, "hue", 0.25, _HH_CURVE)
result |= _get_eq_curve(data, "saturation", 0.3, _CH_CURVE)
result |= _get_eq_curve(data, "luminance", 0.07, _LH_CURVE)
return DEFAULT | result
def _get_eq_curve(
data: Any, key_name: str, max_adjustment: float, template_name: str
) -> Mapping[str, str]:
config = data.get(key_name, {})
equalizer = _get_equalizer(config, max_adjustment)
if any(p.y != _BASE_VALUE for p in equalizer):
return {
_LC_ENABLED: "true",
template_name: raw_therapee.CurveType.STANDARD
+ raw_therapee.present_equalizer(equalizer),
}
else:
return {}
def _get_equalizer(
config: Mapping[str, int], max_adjustment: float
) -> Iterable[EqPoint]:
return [
LinearEqPoint(HUES[color], _get_value(config, color, max_adjustment))
for color in _COLORS
]
def _get_value(config: Mapping[str, int], color: str, max_adjustment: float) -> float:
adjustment = config.get(color, 0)
return _BASE_VALUE + adjustment / _STEPS * max_adjustment
| 26.244681
| 86
| 0.660722
|
from collections.abc import Iterable, Mapping
from typing import Any
from profile_generator.feature.colors.white_balance.schema import DEFAULT
from profile_generator.model.view import raw_therapee
from profile_generator.model.view.raw_therapee import EqPoint, LinearEqPoint
from profile_generator.schema import object_of, range_of
_LC_ENABLED = "LCEnabled"
_HH_CURVE = "HhCurve"
_CH_CURVE = "ChCurve"
_LH_CURVE = "LhCurve"
DEFAULT = {
_LC_ENABLED: "false",
_HH_CURVE: raw_therapee.CurveType.LINEAR,
_CH_CURVE: raw_therapee.CurveType.LINEAR,
_LH_CURVE: raw_therapee.CurveType.LINEAR,
}
_STEPS = 7.0
_COLORS_SCHEMA = object_of(
{
"magenta": range_of(-_STEPS, _STEPS),
"red": range_of(-_STEPS, _STEPS),
"yellow": range_of(-_STEPS, _STEPS),
"green": range_of(-_STEPS, _STEPS),
"cyan": range_of(-_STEPS, _STEPS),
"blue": range_of(-_STEPS, _STEPS),
}
)
SCHEMA = object_of(
{"hue": _COLORS_SCHEMA, "saturation": _COLORS_SCHEMA, "luminance": _COLORS_SCHEMA}
)
_BASE_VALUE = 0.5
_COLORS = [
"red",
"yellow",
"green",
"cyan",
"blue",
"magenta",
]
HUES = {
"red": 0 / 360,
"yellow": 60 / 360,
"green": 120 / 360,
"cyan": 180 / 360,
"blue": 240 / 360,
"magenta": 300 / 360,
}
def process(data: Any) -> Mapping[str, str]:
result: dict[str, str] = {}
result |= _get_eq_curve(data, "hue", 0.25, _HH_CURVE)
result |= _get_eq_curve(data, "saturation", 0.3, _CH_CURVE)
result |= _get_eq_curve(data, "luminance", 0.07, _LH_CURVE)
return DEFAULT | result
def _get_eq_curve(
data: Any, key_name: str, max_adjustment: float, template_name: str
) -> Mapping[str, str]:
config = data.get(key_name, {})
equalizer = _get_equalizer(config, max_adjustment)
if any(p.y != _BASE_VALUE for p in equalizer):
return {
_LC_ENABLED: "true",
template_name: raw_therapee.CurveType.STANDARD
+ raw_therapee.present_equalizer(equalizer),
}
else:
return {}
def _get_equalizer(
config: Mapping[str, int], max_adjustment: float
) -> Iterable[EqPoint]:
return [
LinearEqPoint(HUES[color], _get_value(config, color, max_adjustment))
for color in _COLORS
]
def _get_value(config: Mapping[str, int], color: str, max_adjustment: float) -> float:
adjustment = config.get(color, 0)
return _BASE_VALUE + adjustment / _STEPS * max_adjustment
| true
| true
|
f70e099b3406c30522f6b90e2609a838fa6f001d
| 1,675
|
py
|
Python
|
leetcode/solutions/fibonacci_number/matrix_exponentiation_solution_full_implementation.py
|
ShreckYe/Python-learning-Chinese
|
cfe4ab8fe5e2d68486ddef0b028979aa5cfab69a
|
[
"Apache-2.0"
] | 6
|
2018-09-22T12:45:55.000Z
|
2019-10-08T15:32:25.000Z
|
leetcode/solutions/fibonacci_number/matrix_exponentiation_solution_full_implementation.py
|
ShreckYe/Python-learning-Chinese
|
cfe4ab8fe5e2d68486ddef0b028979aa5cfab69a
|
[
"Apache-2.0"
] | null | null | null |
leetcode/solutions/fibonacci_number/matrix_exponentiation_solution_full_implementation.py
|
ShreckYe/Python-learning-Chinese
|
cfe4ab8fe5e2d68486ddef0b028979aa5cfab69a
|
[
"Apache-2.0"
] | 2
|
2018-09-23T13:12:08.000Z
|
2018-09-26T15:31:12.000Z
|
"""
Author: Shreck Ye
Date: June 16, 2019
Time complexity: O(log(N))
Let's think in the mathematical way. Obviously, the recursion formula represents a linear relationship.
By viewing it as a recursion formula of a single vector F_n = (f_n, f_{n + 1})' with a transition matrix M = (0, 1; 1, 1),
which is (f_{n + 1}, f_{n + 2})' = (0, 1; 1, 1) (f_n, f_{n + 1})' namely F_{n + 1} = M F_n,
we can get the result using matrix exponentiation and reduce the number of recursions.
"""
import copy
F_0 = [[0], [1]]
M = [[0, 1], [1, 1]]
def zero_matrix(m: int, n: int):
rows = [None] * m
row = [0] * n
for i in range(m):
rows[i] = copy.deepcopy(row)
return rows
def matmul(A, B):
# More checks of matrix shapes may be performed
m = len(A)
n = len(B)
l = len(B[0])
C = zero_matrix(m, l)
for i in range(m):
for j in range(l):
sum = 0
A_i = A[i]
for k in range(n):
sum += A_i[k] * B[k][j]
C[i][j] = sum
return C
def eye(size: int):
E = zero_matrix(size, size)
for i in range(size):
E[i][i] = 1
return E
def matrix_power(A, n: int):
size = len(A)
if n == 0:
return eye(size)
elif n == 1:
return copy.deepcopy(A)
else:
A_pow_half_n = matrix_power(A, n // 2)
A_pow_n = matmul(A_pow_half_n, A_pow_half_n)
if n % 2:
A_pow_n = matmul(A_pow_n, A)
return A_pow_n
class Solution:
def fib(self, N: int) -> int:
return matmul(matrix_power(M, N), F_0)[0][0]
# Test cases
s = Solution()
print(s.fib(0), s.fib(1), s.fib(2), s.fib(3), s.fib(4), s.fib(5))
| 23.928571
| 122
| 0.549254
|
import copy
F_0 = [[0], [1]]
M = [[0, 1], [1, 1]]
def zero_matrix(m: int, n: int):
rows = [None] * m
row = [0] * n
for i in range(m):
rows[i] = copy.deepcopy(row)
return rows
def matmul(A, B):
m = len(A)
n = len(B)
l = len(B[0])
C = zero_matrix(m, l)
for i in range(m):
for j in range(l):
sum = 0
A_i = A[i]
for k in range(n):
sum += A_i[k] * B[k][j]
C[i][j] = sum
return C
def eye(size: int):
E = zero_matrix(size, size)
for i in range(size):
E[i][i] = 1
return E
def matrix_power(A, n: int):
size = len(A)
if n == 0:
return eye(size)
elif n == 1:
return copy.deepcopy(A)
else:
A_pow_half_n = matrix_power(A, n // 2)
A_pow_n = matmul(A_pow_half_n, A_pow_half_n)
if n % 2:
A_pow_n = matmul(A_pow_n, A)
return A_pow_n
class Solution:
def fib(self, N: int) -> int:
return matmul(matrix_power(M, N), F_0)[0][0]
s = Solution()
print(s.fib(0), s.fib(1), s.fib(2), s.fib(3), s.fib(4), s.fib(5))
| true
| true
|
f70e0a4dc109f4ca82947753b5013560250dda43
| 623
|
py
|
Python
|
data/stacks_data.py
|
NimaAIMLDL/segmentation-app
|
3e13078cbc211f4e1ae915e034825d16edfb2e35
|
[
"MIT"
] | 2
|
2021-05-14T03:47:47.000Z
|
2021-05-14T03:48:10.000Z
|
data/stacks_data.py
|
NimaAIMLDL/segmentation-app
|
3e13078cbc211f4e1ae915e034825d16edfb2e35
|
[
"MIT"
] | null | null | null |
data/stacks_data.py
|
NimaAIMLDL/segmentation-app
|
3e13078cbc211f4e1ae915e034825d16edfb2e35
|
[
"MIT"
] | null | null | null |
stacks_data = [
{
'name': 'Python',
'image': '../assets/images/python.png'
},
{
'name': 'Plotly',
'image': '../assets/images/plotly.png'
},
{
'name': 'Dash',
'image': '../assets/images/dash.png'
},
{
'name': 'Pandas',
'image': '../assets/images/pandas.png'
},
{
'name': 'Keras',
'image': '../assets/images/keras.png'
},
{
'name': 'TensorFlow',
'image': '../assets/images/tensorflow.png'
},
{
'name': 'Sklearn',
'image': '../assets/images/sklearn.png'
}
]
| 20.096774
| 50
| 0.428571
|
stacks_data = [
{
'name': 'Python',
'image': '../assets/images/python.png'
},
{
'name': 'Plotly',
'image': '../assets/images/plotly.png'
},
{
'name': 'Dash',
'image': '../assets/images/dash.png'
},
{
'name': 'Pandas',
'image': '../assets/images/pandas.png'
},
{
'name': 'Keras',
'image': '../assets/images/keras.png'
},
{
'name': 'TensorFlow',
'image': '../assets/images/tensorflow.png'
},
{
'name': 'Sklearn',
'image': '../assets/images/sklearn.png'
}
]
| true
| true
|
f70e0b2708b01792a275ee48480039747794c660
| 3,377
|
py
|
Python
|
predict_functions.py
|
xXEminenTXx/ImageClassifier
|
e0e63e12108b523270ea7d615afcbfc696b07996
|
[
"MIT"
] | null | null | null |
predict_functions.py
|
xXEminenTXx/ImageClassifier
|
e0e63e12108b523270ea7d615afcbfc696b07996
|
[
"MIT"
] | null | null | null |
predict_functions.py
|
xXEminenTXx/ImageClassifier
|
e0e63e12108b523270ea7d615afcbfc696b07996
|
[
"MIT"
] | null | null | null |
# python imports
import numpy as np
from PIL import Image
import torch
from torch import nn, optim
import torch.nn.functional as F
from torchvision import datasets, transforms, models
from collections import OrderedDict
from sys import exit
# File containing all of the functions used in the predict program
def load_checkpoint(filepath):
checkpoint = torch.load(filepath)
if checkpoint["arch"] == 'VGG':
model = models.vgg16(pretrained=True)
elif checkpoint["arch"] == 'Densenet':
model = models.densenet121(pretrained=True)
else:
print("Unsupported arch used in checkpoint")
exit(1)
for param in model.parameters():
param.requires_grad = False
model.class_to_idx = checkpoint['class_to_idx']
# Load classifier from checkpoint
classifier = checkpoint['classifier']
model.classifier = classifier
model.load_state_dict(checkpoint['model_state_dict'])
return model
def process_image(image_path):
''' Scales, crops, and normalizes a PIL image for a PyTorch model,
returns an Numpy array
'''
# Process a PIL image for use in a PyTorch model
pil_image = Image.open(image_path)
# Resize
if pil_image.size[0] > pil_image.size[1]:
pil_image.thumbnail((5000, 256))
else:
pil_image.thumbnail((256, 5000))
# Crop
left_margin = (pil_image.width-224)/2
bottom_margin = (pil_image.height-224)/2
right_margin = left_margin + 224
top_margin = bottom_margin + 224
pil_image = pil_image.crop((left_margin, bottom_margin, right_margin, top_margin))
# Normalize
np_image = np.array(pil_image)/255
mean = np.array([0.485, 0.456, 0.406])
std = np.array([0.229, 0.224, 0.225])
np_image = (np_image - mean) / std
# PyTorch expects the color channel to be the first dimension but it's the third dimension in the PIL image and Numpy array
# Color channel needs to be first; retain the order of the other two dimensions.
np_image = np_image.transpose((2, 0, 1))
return np_image
def predict(image_path, model, topk, gpu):
''' Predict the class (or classes) of an image using a trained deep learning model.
'''
image = process_image(image_path)
if gpu:
model.to('cuda')
image = torch.from_numpy(image).type(torch.cuda.FloatTensor)
else:
model.to('cpu')
image = torch.from_numpy(image).type(torch.FloatTensor)
# Returns a new tensor with a dimension of size one inserted at the specified position.
image = image.unsqueeze(0)
output = model.forward(image)
probabilities = torch.exp(output)
# Probabilities and the indices of those probabilities corresponding to the classes
top_probabilities, top_indices = probabilities.topk(topk)
# Convert to lists
top_probabilities = top_probabilities.detach().type(torch.FloatTensor).numpy().tolist()[0]
top_indices = top_indices.detach().type(torch.FloatTensor).numpy().tolist()[0]
# Convert topk_indices to the actual class labels using class_to_idx
# Invert the dictionary so you get a mapping from index to class.
idx_to_class = {value: key for key, value in model.class_to_idx.items()}
#print(idx_to_class)
top_classes = [idx_to_class[index] for index in top_indices]
return top_probabilities, top_classes
| 30.423423
| 127
| 0.695292
|
import numpy as np
from PIL import Image
import torch
from torch import nn, optim
import torch.nn.functional as F
from torchvision import datasets, transforms, models
from collections import OrderedDict
from sys import exit
def load_checkpoint(filepath):
checkpoint = torch.load(filepath)
if checkpoint["arch"] == 'VGG':
model = models.vgg16(pretrained=True)
elif checkpoint["arch"] == 'Densenet':
model = models.densenet121(pretrained=True)
else:
print("Unsupported arch used in checkpoint")
exit(1)
for param in model.parameters():
param.requires_grad = False
model.class_to_idx = checkpoint['class_to_idx']
classifier = checkpoint['classifier']
model.classifier = classifier
model.load_state_dict(checkpoint['model_state_dict'])
return model
def process_image(image_path):
pil_image = Image.open(image_path)
if pil_image.size[0] > pil_image.size[1]:
pil_image.thumbnail((5000, 256))
else:
pil_image.thumbnail((256, 5000))
left_margin = (pil_image.width-224)/2
bottom_margin = (pil_image.height-224)/2
right_margin = left_margin + 224
top_margin = bottom_margin + 224
pil_image = pil_image.crop((left_margin, bottom_margin, right_margin, top_margin))
np_image = np.array(pil_image)/255
mean = np.array([0.485, 0.456, 0.406])
std = np.array([0.229, 0.224, 0.225])
np_image = (np_image - mean) / std
# Color channel needs to be first; retain the order of the other two dimensions.
np_image = np_image.transpose((2, 0, 1))
return np_image
def predict(image_path, model, topk, gpu):
image = process_image(image_path)
if gpu:
model.to('cuda')
image = torch.from_numpy(image).type(torch.cuda.FloatTensor)
else:
model.to('cpu')
image = torch.from_numpy(image).type(torch.FloatTensor)
# Returns a new tensor with a dimension of size one inserted at the specified position.
image = image.unsqueeze(0)
output = model.forward(image)
probabilities = torch.exp(output)
# Probabilities and the indices of those probabilities corresponding to the classes
top_probabilities, top_indices = probabilities.topk(topk)
# Convert to lists
top_probabilities = top_probabilities.detach().type(torch.FloatTensor).numpy().tolist()[0]
top_indices = top_indices.detach().type(torch.FloatTensor).numpy().tolist()[0]
# Convert topk_indices to the actual class labels using class_to_idx
# Invert the dictionary so you get a mapping from index to class.
idx_to_class = {value: key for key, value in model.class_to_idx.items()}
#print(idx_to_class)
top_classes = [idx_to_class[index] for index in top_indices]
return top_probabilities, top_classes
| true
| true
|
f70e0c021691a186693b392f6dbb09438fae2140
| 6,212
|
py
|
Python
|
app/market/models/player_transaction.py
|
zanielyene/krabacus3
|
de985874b4034b13564cfd4af5e58e312fe5d438
|
[
"MIT"
] | 2
|
2020-01-24T14:15:04.000Z
|
2020-04-10T21:49:18.000Z
|
app/market/models/player_transaction.py
|
zanielyene/krabacus3
|
de985874b4034b13564cfd4af5e58e312fe5d438
|
[
"MIT"
] | 3
|
2020-02-12T00:31:59.000Z
|
2021-06-10T21:33:49.000Z
|
app/market/models/player_transaction.py
|
zanielyene/krabacus3
|
de985874b4034b13564cfd4af5e58e312fe5d438
|
[
"MIT"
] | 3
|
2020-01-04T19:02:34.000Z
|
2020-09-13T20:42:43.000Z
|
import logging
from django.contrib.auth.models import User
from django.db import models, transaction
from django.db.models import Q
from django.utils import timezone
from eve_api.models import Structure, EVEPlayerCharacter, ObjectType
from dataclasses import dataclass
from django.apps import apps
from django.core.cache import cache
logger=logging.getLogger(__name__)
class TransactionLinkage(models.Model):
id = models.BigAutoField(primary_key=True)
source_transaction = models.ForeignKey("PlayerTransaction", related_name="source_transaction", on_delete=models.CASCADE)
destination_transaction = models.ForeignKey("PlayerTransaction", related_name="destination_transaction",on_delete=models.CASCADE)
quantity_linked = models.BigIntegerField()
date_linked = models.DateTimeField(default=timezone.now)
route = models.ForeignKey("TradingRoute", on_delete=models.CASCADE)
class Meta:
index_together = [
["route", "date_linked"]
]
def save(self, *args, **kwargs):
''' On save, update timestamps '''
if not self.id:
self.created = timezone.now()
return super(TransactionLinkage, self).save(*args, **kwargs)
@dataclass
class TransactionSource:
fuzzy: bool
unit_price: float
total_price: float
linkages : list
class PlayerTransaction(models.Model):
ccp_id = models.BigIntegerField(primary_key=True)
character = models.ForeignKey(EVEPlayerCharacter, on_delete=models.CASCADE)
client_id = models.BigIntegerField()
timestamp = models.DateTimeField()
is_buy = models.BooleanField()
is_personal = models.BooleanField()
journal_ref_id = models.BigIntegerField()
location = models.ForeignKey(Structure, on_delete=models.CASCADE)
quantity = models.BigIntegerField()
object_type = models.ForeignKey(ObjectType, on_delete=models.CASCADE)
unit_price = models.FloatField()
# we don't index these because we don't want the entire goddamn index rebuilt every time there's a change made
quantity_without_known_source = models.BigIntegerField()
quantity_without_known_destination = models.BigIntegerField()
def __str__(self):
return "Transaction #{}".format(self.pk)
@staticmethod
def exists(ccp_id):
"""
Cache-backed exists method. Cache only hits for Structures we know exist.
:param ccp_id:
:return:
"""
exists = cache.get("transaction_exists_%s" % ccp_id)
if exists is not None:
return True
else:
exists_db = PlayerTransaction.objects.filter(pk=ccp_id).exists()
if exists_db:
# after 90 days we DGAF
timeout = 86400 * 90
cache.set("transaction_exists_%s" % ccp_id, True, timeout=timeout)
return exists_db
def get_source_value(self, quantity, route):
if quantity > self.quantity:
raise Exception("somethings broken with {}".format(self.pk))
linkages = TransactionLinkage.objects.filter(Q(destination_transaction=self) & Q(route=route))
if not linkages:
return None
ret_links = []
quant_accounted = 0
sum_of_products = 0
for link in linkages:
quant_accounted += link.quantity_linked
sum_of_products += link.quantity_linked * link.source_transaction.unit_price
ret_links.append(link)
fuzzy = False if quant_accounted == quantity else True
unit_price = sum_of_products / quant_accounted
if fuzzy:
sum_of_products = quantity / quant_accounted * sum_of_products
return TransactionSource(fuzzy, unit_price, sum_of_products, ret_links)
class Meta:
index_together = [
["location", "object_type", "character", "timestamp", "is_buy"]
]
def _get_routes_that_apply_to_transaction(self):
TradingRoute_lazy = apps.get_model('market', 'TradingRoute')
routes = TradingRoute_lazy.objects.filter(
destination_character = self.character,
destination_structure = self.location
)
return routes
def link_transactions(self):
# todo: LOCK THE SHIT OUT OF THIS
routes = self._get_routes_that_apply_to_transaction()
older_than = self.timestamp
new_links = []
transactions_to_save = []
attributed = 0
for route in routes:
transactions = PlayerTransaction.objects.filter(
location=route.source_structure,
object_type=self.object_type,
character=route.source_character,
timestamp__lte=older_than,
quantity_without_known_destination__gt=0,
is_buy=True,
).order_by('timestamp')
for source_txn in transactions:
if source_txn.quantity_without_known_destination >= self.quantity_without_known_source:
contribution = self.quantity_without_known_source
else:
contribution = source_txn.quantity_without_known_destination
self.quantity_without_known_source -= contribution
source_txn.quantity_without_known_destination -= contribution
attributed += contribution
link = TransactionLinkage(
source_transaction = source_txn,
destination_transaction = self,
quantity_linked = contribution,
route = route
)
new_links.append(link)
transactions_to_save.append(source_txn)
if not self.quantity_without_known_source:
break
if not self.quantity_without_known_source:
break
if transactions_to_save:
logger.info("Successfully attributed {} units of transaction {}".format(attributed, self.pk))
with transaction.atomic():
self.save()
for t in transactions_to_save:
t.save()
TransactionLinkage.objects.bulk_create(new_links)
| 36.116279
| 133
| 0.651642
|
import logging
from django.contrib.auth.models import User
from django.db import models, transaction
from django.db.models import Q
from django.utils import timezone
from eve_api.models import Structure, EVEPlayerCharacter, ObjectType
from dataclasses import dataclass
from django.apps import apps
from django.core.cache import cache
logger=logging.getLogger(__name__)
class TransactionLinkage(models.Model):
id = models.BigAutoField(primary_key=True)
source_transaction = models.ForeignKey("PlayerTransaction", related_name="source_transaction", on_delete=models.CASCADE)
destination_transaction = models.ForeignKey("PlayerTransaction", related_name="destination_transaction",on_delete=models.CASCADE)
quantity_linked = models.BigIntegerField()
date_linked = models.DateTimeField(default=timezone.now)
route = models.ForeignKey("TradingRoute", on_delete=models.CASCADE)
class Meta:
index_together = [
["route", "date_linked"]
]
def save(self, *args, **kwargs):
if not self.id:
self.created = timezone.now()
return super(TransactionLinkage, self).save(*args, **kwargs)
@dataclass
class TransactionSource:
fuzzy: bool
unit_price: float
total_price: float
linkages : list
class PlayerTransaction(models.Model):
ccp_id = models.BigIntegerField(primary_key=True)
character = models.ForeignKey(EVEPlayerCharacter, on_delete=models.CASCADE)
client_id = models.BigIntegerField()
timestamp = models.DateTimeField()
is_buy = models.BooleanField()
is_personal = models.BooleanField()
journal_ref_id = models.BigIntegerField()
location = models.ForeignKey(Structure, on_delete=models.CASCADE)
quantity = models.BigIntegerField()
object_type = models.ForeignKey(ObjectType, on_delete=models.CASCADE)
unit_price = models.FloatField()
quantity_without_known_source = models.BigIntegerField()
quantity_without_known_destination = models.BigIntegerField()
def __str__(self):
return "Transaction #{}".format(self.pk)
@staticmethod
def exists(ccp_id):
exists = cache.get("transaction_exists_%s" % ccp_id)
if exists is not None:
return True
else:
exists_db = PlayerTransaction.objects.filter(pk=ccp_id).exists()
if exists_db:
# after 90 days we DGAF
timeout = 86400 * 90
cache.set("transaction_exists_%s" % ccp_id, True, timeout=timeout)
return exists_db
def get_source_value(self, quantity, route):
if quantity > self.quantity:
raise Exception("somethings broken with {}".format(self.pk))
linkages = TransactionLinkage.objects.filter(Q(destination_transaction=self) & Q(route=route))
if not linkages:
return None
ret_links = []
quant_accounted = 0
sum_of_products = 0
for link in linkages:
quant_accounted += link.quantity_linked
sum_of_products += link.quantity_linked * link.source_transaction.unit_price
ret_links.append(link)
fuzzy = False if quant_accounted == quantity else True
unit_price = sum_of_products / quant_accounted
if fuzzy:
sum_of_products = quantity / quant_accounted * sum_of_products
return TransactionSource(fuzzy, unit_price, sum_of_products, ret_links)
class Meta:
index_together = [
["location", "object_type", "character", "timestamp", "is_buy"]
]
def _get_routes_that_apply_to_transaction(self):
TradingRoute_lazy = apps.get_model('market', 'TradingRoute')
routes = TradingRoute_lazy.objects.filter(
destination_character = self.character,
destination_structure = self.location
)
return routes
def link_transactions(self):
# todo: LOCK THE SHIT OUT OF THIS
routes = self._get_routes_that_apply_to_transaction()
older_than = self.timestamp
new_links = []
transactions_to_save = []
attributed = 0
for route in routes:
transactions = PlayerTransaction.objects.filter(
location=route.source_structure,
object_type=self.object_type,
character=route.source_character,
timestamp__lte=older_than,
quantity_without_known_destination__gt=0,
is_buy=True,
).order_by('timestamp')
for source_txn in transactions:
if source_txn.quantity_without_known_destination >= self.quantity_without_known_source:
contribution = self.quantity_without_known_source
else:
contribution = source_txn.quantity_without_known_destination
self.quantity_without_known_source -= contribution
source_txn.quantity_without_known_destination -= contribution
attributed += contribution
link = TransactionLinkage(
source_transaction = source_txn,
destination_transaction = self,
quantity_linked = contribution,
route = route
)
new_links.append(link)
transactions_to_save.append(source_txn)
if not self.quantity_without_known_source:
break
if not self.quantity_without_known_source:
break
if transactions_to_save:
logger.info("Successfully attributed {} units of transaction {}".format(attributed, self.pk))
with transaction.atomic():
self.save()
for t in transactions_to_save:
t.save()
TransactionLinkage.objects.bulk_create(new_links)
| true
| true
|
f70e0d63198c55093b63b35c874d4605ea1b15a0
| 9,296
|
py
|
Python
|
nova/tests/api/openstack/compute/plugins/v3/test_instance_usage_audit_log.py
|
bopopescu/nova-34
|
b037993984229bb698050f20e8719b8c06ff2be3
|
[
"Apache-2.0"
] | null | null | null |
nova/tests/api/openstack/compute/plugins/v3/test_instance_usage_audit_log.py
|
bopopescu/nova-34
|
b037993984229bb698050f20e8719b8c06ff2be3
|
[
"Apache-2.0"
] | null | null | null |
nova/tests/api/openstack/compute/plugins/v3/test_instance_usage_audit_log.py
|
bopopescu/nova-34
|
b037993984229bb698050f20e8719b8c06ff2be3
|
[
"Apache-2.0"
] | 1
|
2020-07-24T08:52:14.000Z
|
2020-07-24T08:52:14.000Z
|
# Copyright (c) 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
from webob import exc
from nova.api.openstack.compute.plugins.v3 import \
instance_usage_audit_log as ial
from nova import context
from nova import db
from nova.openstack.common import timeutils
from nova import test
from nova.tests.api.openstack import fakes
from nova.tests.objects import test_service
from nova import utils
import urllib
service_base = test_service.fake_service
TEST_COMPUTE_SERVICES = [dict(service_base, host='foo', topic='compute'),
dict(service_base, host='bar', topic='compute'),
dict(service_base, host='baz', topic='compute'),
dict(service_base, host='plonk', topic='compute'),
dict(service_base, host='wibble', topic='bogus'),
]
begin1 = datetime.datetime(2012, 7, 4, 6, 0, 0)
begin2 = end1 = datetime.datetime(2012, 7, 5, 6, 0, 0)
begin3 = end2 = datetime.datetime(2012, 7, 6, 6, 0, 0)
end3 = datetime.datetime(2012, 7, 7, 6, 0, 0)
#test data
TEST_LOGS1 = [
#all services done, no errors.
dict(host="plonk", period_beginning=begin1, period_ending=end1,
state="DONE", errors=0, task_items=23, message="test1"),
dict(host="baz", period_beginning=begin1, period_ending=end1,
state="DONE", errors=0, task_items=17, message="test2"),
dict(host="bar", period_beginning=begin1, period_ending=end1,
state="DONE", errors=0, task_items=10, message="test3"),
dict(host="foo", period_beginning=begin1, period_ending=end1,
state="DONE", errors=0, task_items=7, message="test4"),
]
TEST_LOGS2 = [
#some still running...
dict(host="plonk", period_beginning=begin2, period_ending=end2,
state="DONE", errors=0, task_items=23, message="test5"),
dict(host="baz", period_beginning=begin2, period_ending=end2,
state="DONE", errors=0, task_items=17, message="test6"),
dict(host="bar", period_beginning=begin2, period_ending=end2,
state="RUNNING", errors=0, task_items=10, message="test7"),
dict(host="foo", period_beginning=begin2, period_ending=end2,
state="DONE", errors=0, task_items=7, message="test8"),
]
TEST_LOGS3 = [
#some errors..
dict(host="plonk", period_beginning=begin3, period_ending=end3,
state="DONE", errors=0, task_items=23, message="test9"),
dict(host="baz", period_beginning=begin3, period_ending=end3,
state="DONE", errors=2, task_items=17, message="test10"),
dict(host="bar", period_beginning=begin3, period_ending=end3,
state="DONE", errors=0, task_items=10, message="test11"),
dict(host="foo", period_beginning=begin3, period_ending=end3,
state="DONE", errors=1, task_items=7, message="test12"),
]
def fake_task_log_get_all(context, task_name, begin, end,
host=None, state=None):
assert task_name == "instance_usage_audit"
if begin == begin1 and end == end1:
return TEST_LOGS1
if begin == begin2 and end == end2:
return TEST_LOGS2
if begin == begin3 and end == end3:
return TEST_LOGS3
raise AssertionError("Invalid date %s to %s" % (begin, end))
def fake_last_completed_audit_period(unit=None, before=None):
audit_periods = [(begin3, end3),
(begin2, end2),
(begin1, end1)]
if before is not None:
for begin, end in audit_periods:
if before > end:
return begin, end
raise AssertionError("Invalid before date %s" % (before))
return begin1, end1
class InstanceUsageAuditLogTest(test.TestCase):
def setUp(self):
super(InstanceUsageAuditLogTest, self).setUp()
self.context = context.get_admin_context()
timeutils.set_time_override(datetime.datetime(2012, 7, 5, 10, 0, 0))
self.controller = ial.InstanceUsageAuditLogController()
self.host_api = self.controller.host_api
def fake_service_get_all(context, disabled):
self.assertTrue(disabled is None)
return TEST_COMPUTE_SERVICES
self.stubs.Set(utils, 'last_completed_audit_period',
fake_last_completed_audit_period)
self.stubs.Set(db, 'service_get_all',
fake_service_get_all)
self.stubs.Set(db, 'task_log_get_all',
fake_task_log_get_all)
def tearDown(self):
super(InstanceUsageAuditLogTest, self).tearDown()
timeutils.clear_time_override()
def test_index(self):
req = fakes.HTTPRequestV3.blank('/os-instance_usage_audit_log')
result = self.controller.index(req)
self.assertIn('instance_usage_audit_log', result)
logs = result['instance_usage_audit_log']
self.assertEquals(57, logs['total_instances'])
self.assertEquals(0, logs['total_errors'])
self.assertEquals(4, len(logs['log']))
self.assertEquals(4, logs['num_hosts'])
self.assertEquals(4, logs['num_hosts_done'])
self.assertEquals(0, logs['num_hosts_running'])
self.assertEquals(0, logs['num_hosts_not_run'])
self.assertEquals("ALL hosts done. 0 errors.", logs['overall_status'])
def test_index_with_format1(self):
before = urllib.quote("2012-07-05 10:00:00")
req = fakes.HTTPRequestV3.blank(
'/os-instance_usage_audit_log?before=' + before)
result = self.controller.index(req)
self.assertIn('instance_usage_audit_log', result)
logs = result['instance_usage_audit_log']
self.assertEquals(57, logs['total_instances'])
self.assertEquals(0, logs['total_errors'])
self.assertEquals(4, len(logs['log']))
self.assertEquals(4, logs['num_hosts'])
self.assertEquals(4, logs['num_hosts_done'])
self.assertEquals(0, logs['num_hosts_running'])
self.assertEquals(0, logs['num_hosts_not_run'])
self.assertEquals("ALL hosts done. 0 errors.", logs['overall_status'])
def test_index_with_format2(self):
before = urllib.quote('2012-07-05 10:00:00.10')
req = fakes.HTTPRequestV3.blank(
'/os-instance_usage_audit_log?before=' + before)
result = self.controller.index(req)
self.assertIn('instance_usage_audit_log', result)
logs = result['instance_usage_audit_log']
self.assertEquals(57, logs['total_instances'])
self.assertEquals(0, logs['total_errors'])
self.assertEquals(4, len(logs['log']))
self.assertEquals(4, logs['num_hosts'])
self.assertEquals(4, logs['num_hosts_done'])
self.assertEquals(0, logs['num_hosts_running'])
self.assertEquals(0, logs['num_hosts_not_run'])
self.assertEquals("ALL hosts done. 0 errors.", logs['overall_status'])
def test_index_with_invalid_format(self):
req = fakes.HTTPRequestV3.blank(
'/os-instance_usage_audit_log?before=abc')
self.assertRaises(exc.HTTPBadRequest, self.controller.index, req)
def test_index_with_running(self):
before = urllib.quote('2012-07-06 10:00:00')
req = fakes.HTTPRequestV3.blank(
'/os-instance_usage_audit_log?before=' + before)
result = self.controller.index(req)
self.assertIn('instance_usage_audit_log', result)
logs = result['instance_usage_audit_log']
self.assertEquals(57, logs['total_instances'])
self.assertEquals(0, logs['total_errors'])
self.assertEquals(4, len(logs['log']))
self.assertEquals(4, logs['num_hosts'])
self.assertEquals(3, logs['num_hosts_done'])
self.assertEquals(1, logs['num_hosts_running'])
self.assertEquals(0, logs['num_hosts_not_run'])
self.assertEquals("3 of 4 hosts done. 0 errors.",
logs['overall_status'])
def test_index_with_errors(self):
before = urllib.quote('2012-07-07 10:00:00')
req = fakes.HTTPRequestV3.blank(
'/os-instance_usage_audit_log?before=' + before)
result = self.controller.index(req)
self.assertIn('instance_usage_audit_log', result)
logs = result['instance_usage_audit_log']
self.assertEquals(57, logs['total_instances'])
self.assertEquals(3, logs['total_errors'])
self.assertEquals(4, len(logs['log']))
self.assertEquals(4, logs['num_hosts'])
self.assertEquals(4, logs['num_hosts_done'])
self.assertEquals(0, logs['num_hosts_running'])
self.assertEquals(0, logs['num_hosts_not_run'])
self.assertEquals("ALL hosts done. 3 errors.",
logs['overall_status'])
| 42.254545
| 78
| 0.656734
|
import datetime
from webob import exc
from nova.api.openstack.compute.plugins.v3 import \
instance_usage_audit_log as ial
from nova import context
from nova import db
from nova.openstack.common import timeutils
from nova import test
from nova.tests.api.openstack import fakes
from nova.tests.objects import test_service
from nova import utils
import urllib
service_base = test_service.fake_service
TEST_COMPUTE_SERVICES = [dict(service_base, host='foo', topic='compute'),
dict(service_base, host='bar', topic='compute'),
dict(service_base, host='baz', topic='compute'),
dict(service_base, host='plonk', topic='compute'),
dict(service_base, host='wibble', topic='bogus'),
]
begin1 = datetime.datetime(2012, 7, 4, 6, 0, 0)
begin2 = end1 = datetime.datetime(2012, 7, 5, 6, 0, 0)
begin3 = end2 = datetime.datetime(2012, 7, 6, 6, 0, 0)
end3 = datetime.datetime(2012, 7, 7, 6, 0, 0)
TEST_LOGS1 = [
dict(host="plonk", period_beginning=begin1, period_ending=end1,
state="DONE", errors=0, task_items=23, message="test1"),
dict(host="baz", period_beginning=begin1, period_ending=end1,
state="DONE", errors=0, task_items=17, message="test2"),
dict(host="bar", period_beginning=begin1, period_ending=end1,
state="DONE", errors=0, task_items=10, message="test3"),
dict(host="foo", period_beginning=begin1, period_ending=end1,
state="DONE", errors=0, task_items=7, message="test4"),
]
TEST_LOGS2 = [
dict(host="plonk", period_beginning=begin2, period_ending=end2,
state="DONE", errors=0, task_items=23, message="test5"),
dict(host="baz", period_beginning=begin2, period_ending=end2,
state="DONE", errors=0, task_items=17, message="test6"),
dict(host="bar", period_beginning=begin2, period_ending=end2,
state="RUNNING", errors=0, task_items=10, message="test7"),
dict(host="foo", period_beginning=begin2, period_ending=end2,
state="DONE", errors=0, task_items=7, message="test8"),
]
TEST_LOGS3 = [
dict(host="plonk", period_beginning=begin3, period_ending=end3,
state="DONE", errors=0, task_items=23, message="test9"),
dict(host="baz", period_beginning=begin3, period_ending=end3,
state="DONE", errors=2, task_items=17, message="test10"),
dict(host="bar", period_beginning=begin3, period_ending=end3,
state="DONE", errors=0, task_items=10, message="test11"),
dict(host="foo", period_beginning=begin3, period_ending=end3,
state="DONE", errors=1, task_items=7, message="test12"),
]
def fake_task_log_get_all(context, task_name, begin, end,
host=None, state=None):
assert task_name == "instance_usage_audit"
if begin == begin1 and end == end1:
return TEST_LOGS1
if begin == begin2 and end == end2:
return TEST_LOGS2
if begin == begin3 and end == end3:
return TEST_LOGS3
raise AssertionError("Invalid date %s to %s" % (begin, end))
def fake_last_completed_audit_period(unit=None, before=None):
audit_periods = [(begin3, end3),
(begin2, end2),
(begin1, end1)]
if before is not None:
for begin, end in audit_periods:
if before > end:
return begin, end
raise AssertionError("Invalid before date %s" % (before))
return begin1, end1
class InstanceUsageAuditLogTest(test.TestCase):
def setUp(self):
super(InstanceUsageAuditLogTest, self).setUp()
self.context = context.get_admin_context()
timeutils.set_time_override(datetime.datetime(2012, 7, 5, 10, 0, 0))
self.controller = ial.InstanceUsageAuditLogController()
self.host_api = self.controller.host_api
def fake_service_get_all(context, disabled):
self.assertTrue(disabled is None)
return TEST_COMPUTE_SERVICES
self.stubs.Set(utils, 'last_completed_audit_period',
fake_last_completed_audit_period)
self.stubs.Set(db, 'service_get_all',
fake_service_get_all)
self.stubs.Set(db, 'task_log_get_all',
fake_task_log_get_all)
def tearDown(self):
super(InstanceUsageAuditLogTest, self).tearDown()
timeutils.clear_time_override()
def test_index(self):
req = fakes.HTTPRequestV3.blank('/os-instance_usage_audit_log')
result = self.controller.index(req)
self.assertIn('instance_usage_audit_log', result)
logs = result['instance_usage_audit_log']
self.assertEquals(57, logs['total_instances'])
self.assertEquals(0, logs['total_errors'])
self.assertEquals(4, len(logs['log']))
self.assertEquals(4, logs['num_hosts'])
self.assertEquals(4, logs['num_hosts_done'])
self.assertEquals(0, logs['num_hosts_running'])
self.assertEquals(0, logs['num_hosts_not_run'])
self.assertEquals("ALL hosts done. 0 errors.", logs['overall_status'])
def test_index_with_format1(self):
before = urllib.quote("2012-07-05 10:00:00")
req = fakes.HTTPRequestV3.blank(
'/os-instance_usage_audit_log?before=' + before)
result = self.controller.index(req)
self.assertIn('instance_usage_audit_log', result)
logs = result['instance_usage_audit_log']
self.assertEquals(57, logs['total_instances'])
self.assertEquals(0, logs['total_errors'])
self.assertEquals(4, len(logs['log']))
self.assertEquals(4, logs['num_hosts'])
self.assertEquals(4, logs['num_hosts_done'])
self.assertEquals(0, logs['num_hosts_running'])
self.assertEquals(0, logs['num_hosts_not_run'])
self.assertEquals("ALL hosts done. 0 errors.", logs['overall_status'])
def test_index_with_format2(self):
before = urllib.quote('2012-07-05 10:00:00.10')
req = fakes.HTTPRequestV3.blank(
'/os-instance_usage_audit_log?before=' + before)
result = self.controller.index(req)
self.assertIn('instance_usage_audit_log', result)
logs = result['instance_usage_audit_log']
self.assertEquals(57, logs['total_instances'])
self.assertEquals(0, logs['total_errors'])
self.assertEquals(4, len(logs['log']))
self.assertEquals(4, logs['num_hosts'])
self.assertEquals(4, logs['num_hosts_done'])
self.assertEquals(0, logs['num_hosts_running'])
self.assertEquals(0, logs['num_hosts_not_run'])
self.assertEquals("ALL hosts done. 0 errors.", logs['overall_status'])
def test_index_with_invalid_format(self):
req = fakes.HTTPRequestV3.blank(
'/os-instance_usage_audit_log?before=abc')
self.assertRaises(exc.HTTPBadRequest, self.controller.index, req)
def test_index_with_running(self):
before = urllib.quote('2012-07-06 10:00:00')
req = fakes.HTTPRequestV3.blank(
'/os-instance_usage_audit_log?before=' + before)
result = self.controller.index(req)
self.assertIn('instance_usage_audit_log', result)
logs = result['instance_usage_audit_log']
self.assertEquals(57, logs['total_instances'])
self.assertEquals(0, logs['total_errors'])
self.assertEquals(4, len(logs['log']))
self.assertEquals(4, logs['num_hosts'])
self.assertEquals(3, logs['num_hosts_done'])
self.assertEquals(1, logs['num_hosts_running'])
self.assertEquals(0, logs['num_hosts_not_run'])
self.assertEquals("3 of 4 hosts done. 0 errors.",
logs['overall_status'])
def test_index_with_errors(self):
before = urllib.quote('2012-07-07 10:00:00')
req = fakes.HTTPRequestV3.blank(
'/os-instance_usage_audit_log?before=' + before)
result = self.controller.index(req)
self.assertIn('instance_usage_audit_log', result)
logs = result['instance_usage_audit_log']
self.assertEquals(57, logs['total_instances'])
self.assertEquals(3, logs['total_errors'])
self.assertEquals(4, len(logs['log']))
self.assertEquals(4, logs['num_hosts'])
self.assertEquals(4, logs['num_hosts_done'])
self.assertEquals(0, logs['num_hosts_running'])
self.assertEquals(0, logs['num_hosts_not_run'])
self.assertEquals("ALL hosts done. 3 errors.",
logs['overall_status'])
| true
| true
|
f70e0dc1141a75b9b7795fbc60c21ab2fd20b738
| 1,350
|
py
|
Python
|
tests/kyu_6_tests/test_regexp_basics_is_it_ipv4_address.py
|
the-zebulan/CodeWars
|
1eafd1247d60955a5dfb63e4882e8ce86019f43a
|
[
"MIT"
] | 40
|
2016-03-09T12:26:20.000Z
|
2022-03-23T08:44:51.000Z
|
tests/kyu_6_tests/test_regexp_basics_is_it_ipv4_address.py
|
akalynych/CodeWars
|
1eafd1247d60955a5dfb63e4882e8ce86019f43a
|
[
"MIT"
] | null | null | null |
tests/kyu_6_tests/test_regexp_basics_is_it_ipv4_address.py
|
akalynych/CodeWars
|
1eafd1247d60955a5dfb63e4882e8ce86019f43a
|
[
"MIT"
] | 36
|
2016-11-07T19:59:58.000Z
|
2022-03-31T11:18:27.000Z
|
import unittest
from katas.kyu_6.regexp_basics_is_it_ipv4_address import ipv4_address
class IPV4AddressTestCase(unittest.TestCase):
def test_true(self):
self.assertTrue(ipv4_address('127.0.0.1'))
def test_true_2(self):
self.assertTrue(ipv4_address('0.0.0.0'))
def test_true_3(self):
self.assertTrue(ipv4_address('255.255.255.255'))
def test_true_4(self):
self.assertTrue(ipv4_address('10.20.30.40'))
def test_false(self):
self.assertFalse(ipv4_address(''))
def test_false_2(self):
self.assertFalse(ipv4_address('10.256.30.40'))
def test_false_3(self):
self.assertFalse(ipv4_address('10.20.030.40'))
def test_false_4(self):
self.assertFalse(ipv4_address('127.0.1'))
def test_false_5(self):
self.assertFalse(ipv4_address('127.0.0.0.1'))
def test_false_6(self):
self.assertFalse(ipv4_address('..255.255'))
def test_false_7(self):
self.assertFalse(ipv4_address('127.0.0.1\n'))
def test_false_8(self):
self.assertFalse(ipv4_address('\n127.0.0.1'))
def test_false_9(self):
self.assertFalse(ipv4_address(' 127.0.0.1'))
def test_false_10(self):
self.assertFalse(ipv4_address('127.0.0.1 '))
def test_false_11(self):
self.assertFalse(ipv4_address(' 127.0.0.1 '))
| 26.470588
| 69
| 0.663704
|
import unittest
from katas.kyu_6.regexp_basics_is_it_ipv4_address import ipv4_address
class IPV4AddressTestCase(unittest.TestCase):
def test_true(self):
self.assertTrue(ipv4_address('127.0.0.1'))
def test_true_2(self):
self.assertTrue(ipv4_address('0.0.0.0'))
def test_true_3(self):
self.assertTrue(ipv4_address('255.255.255.255'))
def test_true_4(self):
self.assertTrue(ipv4_address('10.20.30.40'))
def test_false(self):
self.assertFalse(ipv4_address(''))
def test_false_2(self):
self.assertFalse(ipv4_address('10.256.30.40'))
def test_false_3(self):
self.assertFalse(ipv4_address('10.20.030.40'))
def test_false_4(self):
self.assertFalse(ipv4_address('127.0.1'))
def test_false_5(self):
self.assertFalse(ipv4_address('127.0.0.0.1'))
def test_false_6(self):
self.assertFalse(ipv4_address('..255.255'))
def test_false_7(self):
self.assertFalse(ipv4_address('127.0.0.1\n'))
def test_false_8(self):
self.assertFalse(ipv4_address('\n127.0.0.1'))
def test_false_9(self):
self.assertFalse(ipv4_address(' 127.0.0.1'))
def test_false_10(self):
self.assertFalse(ipv4_address('127.0.0.1 '))
def test_false_11(self):
self.assertFalse(ipv4_address(' 127.0.0.1 '))
| true
| true
|
f70e0e628ae3abaeb2a0154852fecbc787501f2a
| 1,410
|
py
|
Python
|
xebec/tests/test_validate.py
|
gibsramen/xebec
|
89586fb4b6f6d75fb89ca32cfc1bee0d48705067
|
[
"BSD-3-Clause"
] | 1
|
2022-03-27T04:46:14.000Z
|
2022-03-27T04:46:14.000Z
|
xebec/tests/test_validate.py
|
gibsramen/xebec
|
89586fb4b6f6d75fb89ca32cfc1bee0d48705067
|
[
"BSD-3-Clause"
] | null | null | null |
xebec/tests/test_validate.py
|
gibsramen/xebec
|
89586fb4b6f6d75fb89ca32cfc1bee0d48705067
|
[
"BSD-3-Clause"
] | null | null | null |
import os
import pytest
from xebec.src import _validate as vd
def test_validate_table(data_paths, tmp_path):
err_biom = os.path.join(tmp_path, "err.biom")
with open(err_biom, "w") as f:
f.write("kachow")
with pytest.raises(ValueError) as exc_info:
vd.validate_table(err_biom)
exp_err_msg = "Table is empty!"
assert str(exc_info.value) == exp_err_msg
with pytest.raises(FileNotFoundError) as exc_info:
vd.validate_table("NOT A FILE")
exp_err_msg = "[Errno 2] No such file or directory: 'NOT A FILE'"
assert str(exc_info.value) == exp_err_msg
def test_validate_metadata(data_paths, tmp_path):
err_md = os.path.join(tmp_path, "err.tsv")
with open(err_md, "w") as f:
f.write("kerblam")
with pytest.raises(ValueError) as exc_info:
vd.validate_metadata(err_md)
exp_err_msg = "Metadata is empty!"
assert str(exc_info.value) == exp_err_msg
with pytest.raises(FileNotFoundError) as exc_info:
vd.validate_metadata("NOT A FILE")
exp_err_msg = "[Errno 2] No such file or directory: 'NOT A FILE'"
assert str(exc_info.value) == exp_err_msg
def test_validate_metadata(data_paths, tmp_path):
with pytest.raises(FileNotFoundError) as exc_info:
vd.validate_tree("NOT A FILE")
exp_err_msg = "[Errno 2] No such file or directory: 'NOT A FILE'"
assert str(exc_info.value) == exp_err_msg
| 31.333333
| 69
| 0.694326
|
import os
import pytest
from xebec.src import _validate as vd
def test_validate_table(data_paths, tmp_path):
err_biom = os.path.join(tmp_path, "err.biom")
with open(err_biom, "w") as f:
f.write("kachow")
with pytest.raises(ValueError) as exc_info:
vd.validate_table(err_biom)
exp_err_msg = "Table is empty!"
assert str(exc_info.value) == exp_err_msg
with pytest.raises(FileNotFoundError) as exc_info:
vd.validate_table("NOT A FILE")
exp_err_msg = "[Errno 2] No such file or directory: 'NOT A FILE'"
assert str(exc_info.value) == exp_err_msg
def test_validate_metadata(data_paths, tmp_path):
err_md = os.path.join(tmp_path, "err.tsv")
with open(err_md, "w") as f:
f.write("kerblam")
with pytest.raises(ValueError) as exc_info:
vd.validate_metadata(err_md)
exp_err_msg = "Metadata is empty!"
assert str(exc_info.value) == exp_err_msg
with pytest.raises(FileNotFoundError) as exc_info:
vd.validate_metadata("NOT A FILE")
exp_err_msg = "[Errno 2] No such file or directory: 'NOT A FILE'"
assert str(exc_info.value) == exp_err_msg
def test_validate_metadata(data_paths, tmp_path):
with pytest.raises(FileNotFoundError) as exc_info:
vd.validate_tree("NOT A FILE")
exp_err_msg = "[Errno 2] No such file or directory: 'NOT A FILE'"
assert str(exc_info.value) == exp_err_msg
| true
| true
|
f70e0efee06270a4b2d8e5234f64981ceaae676f
| 2,248
|
py
|
Python
|
model-optimizer/extensions/front/onnx/pad_ext_test.py
|
Andruxin52rus/openvino
|
d824e371fe7dffb90e6d3d58e4e34adecfce4606
|
[
"Apache-2.0"
] | 2
|
2020-11-18T14:14:06.000Z
|
2020-11-28T04:55:57.000Z
|
model-optimizer/extensions/front/onnx/pad_ext_test.py
|
Andruxin52rus/openvino
|
d824e371fe7dffb90e6d3d58e4e34adecfce4606
|
[
"Apache-2.0"
] | 30
|
2020-11-13T11:44:07.000Z
|
2022-02-21T13:03:16.000Z
|
model-optimizer/extensions/front/onnx/pad_ext_test.py
|
mmakridi/openvino
|
769bb7709597c14debdaa356dd60c5a78bdfa97e
|
[
"Apache-2.0"
] | 1
|
2020-12-18T15:47:45.000Z
|
2020-12-18T15:47:45.000Z
|
"""
Copyright (C) 2018-2020 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from argparse import Namespace
import onnx
from extensions.front.onnx.pad_ext import PadFrontExtractor
from mo.graph.graph import Graph
from mo.utils.unittest.extractors import PB, BaseExtractorsTestingClass
class TestPad(BaseExtractorsTestingClass):
@staticmethod
def _create_node(pads=None, value=None, mode=None):
if pads is None:
pads = [1, 2, 3, 4]
if value is None:
value = 0.0
if mode is None:
mode = 'constant'
pb = onnx.helper.make_node(
'Pad',
pads=pads,
mode=mode,
value=value,
inputs=['a'],
outputs=['b']
)
graph = Graph()
node = PB({'pb': pb, 'graph': graph})
return node
def test_ok(self):
node = self._create_node()
PadFrontExtractor.extract(node)
self.res = node
self.expected = {
'pads': [[1, 3], [2, 4]],
'mode': 'constant',
'fill_value': 0
}
self.compare()
def test_reflect(self):
node = self._create_node(mode='reflect')
PadFrontExtractor.extract(node)
self.res = node
self.expected = {
'pads': [[1, 3], [2, 4]],
'mode': 'reflect',
'fill_value': 0
}
self.compare()
def test_non_zero_fill_value(self):
node = self._create_node(value=1.0)
PadFrontExtractor.extract(node)
self.res = node
self.expected = {
'pads': [[1, 3], [2, 4]],
'mode': 'constant',
'fill_value': 1.0
}
self.compare()
| 26.447059
| 73
| 0.58274
|
from argparse import Namespace
import onnx
from extensions.front.onnx.pad_ext import PadFrontExtractor
from mo.graph.graph import Graph
from mo.utils.unittest.extractors import PB, BaseExtractorsTestingClass
class TestPad(BaseExtractorsTestingClass):
@staticmethod
def _create_node(pads=None, value=None, mode=None):
if pads is None:
pads = [1, 2, 3, 4]
if value is None:
value = 0.0
if mode is None:
mode = 'constant'
pb = onnx.helper.make_node(
'Pad',
pads=pads,
mode=mode,
value=value,
inputs=['a'],
outputs=['b']
)
graph = Graph()
node = PB({'pb': pb, 'graph': graph})
return node
def test_ok(self):
node = self._create_node()
PadFrontExtractor.extract(node)
self.res = node
self.expected = {
'pads': [[1, 3], [2, 4]],
'mode': 'constant',
'fill_value': 0
}
self.compare()
def test_reflect(self):
node = self._create_node(mode='reflect')
PadFrontExtractor.extract(node)
self.res = node
self.expected = {
'pads': [[1, 3], [2, 4]],
'mode': 'reflect',
'fill_value': 0
}
self.compare()
def test_non_zero_fill_value(self):
node = self._create_node(value=1.0)
PadFrontExtractor.extract(node)
self.res = node
self.expected = {
'pads': [[1, 3], [2, 4]],
'mode': 'constant',
'fill_value': 1.0
}
self.compare()
| true
| true
|
f70e0effde4c7b7b31c63232faa9299912fb117a
| 8,791
|
py
|
Python
|
frappe/tests/test_global_search.py
|
erpnext-tm/frappe
|
7b470f28e1cf00b0659c01e06a2d0a4693b28d98
|
[
"MIT"
] | null | null | null |
frappe/tests/test_global_search.py
|
erpnext-tm/frappe
|
7b470f28e1cf00b0659c01e06a2d0a4693b28d98
|
[
"MIT"
] | null | null | null |
frappe/tests/test_global_search.py
|
erpnext-tm/frappe
|
7b470f28e1cf00b0659c01e06a2d0a4693b28d98
|
[
"MIT"
] | null | null | null |
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
import unittest
import frappe
import frappe.utils
from frappe.desk.page.setup_wizard.install_fixtures import update_global_search_doctypes
from frappe.test_runner import make_test_objects
from frappe.utils import global_search
class TestGlobalSearch(unittest.TestCase):
def setUp(self):
update_global_search_doctypes()
global_search.setup_global_search_table()
self.assertTrue("__global_search" in frappe.db.get_tables())
doctype = "Event"
global_search.reset()
from frappe.custom.doctype.property_setter.property_setter import make_property_setter
make_property_setter(doctype, "subject", "in_global_search", 1, "Int")
make_property_setter(doctype, "event_type", "in_global_search", 1, "Int")
make_property_setter(doctype, "roles", "in_global_search", 1, "Int")
make_property_setter(doctype, "repeat_on", "in_global_search", 0, "Int")
def tearDown(self):
frappe.db.sql("DELETE FROM `tabProperty Setter` WHERE `doc_type`='Event'")
frappe.clear_cache(doctype="Event")
frappe.db.sql("DELETE FROM `tabEvent`")
frappe.db.sql("DELETE FROM `__global_search`")
make_test_objects("Event")
frappe.db.commit()
def insert_test_events(self):
frappe.db.sql("DELETE FROM `tabEvent`")
phrases = [
'"The Sixth Extinction II: Amor Fati" is the second episode of the seventh season of the American science fiction.',
"After Mulder awakens from his coma, he realizes his duty to prevent alien colonization. ",
"Carter explored themes of extraterrestrial involvement in ancient mass extinctions in this episode, the third in a trilogy.",
]
for text in phrases:
frappe.get_doc(
dict(doctype="Event", subject=text, repeat_on="Monthly", starts_on=frappe.utils.now_datetime())
).insert()
global_search.sync_global_search()
frappe.db.commit()
def test_search(self):
self.insert_test_events()
results = global_search.search("awakens")
self.assertTrue(
"After Mulder awakens from his coma, he realizes his duty to prevent alien colonization. "
in results[0].content
)
results = global_search.search("extraterrestrial")
self.assertTrue(
"Carter explored themes of extraterrestrial involvement in ancient mass extinctions in this episode, the third in a trilogy."
in results[0].content
)
results = global_search.search("awakens & duty & alien")
self.assertTrue(
"After Mulder awakens from his coma, he realizes his duty to prevent alien colonization. "
in results[0].content
)
def test_update_doc(self):
self.insert_test_events()
test_subject = "testing global search"
event = frappe.get_doc("Event", frappe.get_all("Event")[0].name)
event.subject = test_subject
event.save()
frappe.db.commit()
global_search.sync_global_search()
results = global_search.search("testing global search")
self.assertTrue("testing global search" in results[0].content)
def test_update_fields(self):
self.insert_test_events()
results = global_search.search("Monthly")
self.assertEqual(len(results), 0)
doctype = "Event"
from frappe.custom.doctype.property_setter.property_setter import make_property_setter
make_property_setter(doctype, "repeat_on", "in_global_search", 1, "Int")
global_search.rebuild_for_doctype(doctype)
results = global_search.search("Monthly")
self.assertEqual(len(results), 3)
def test_delete_doc(self):
self.insert_test_events()
event_name = frappe.get_all("Event")[0].name
event = frappe.get_doc("Event", event_name)
test_subject = event.subject
results = global_search.search(test_subject)
self.assertTrue(
any(r["name"] == event_name for r in results), msg="Failed to search document by exact name"
)
frappe.delete_doc("Event", event_name)
global_search.sync_global_search()
results = global_search.search(test_subject)
self.assertTrue(
all(r["name"] != event_name for r in results),
msg="Deleted documents appearing in global search.",
)
def test_insert_child_table(self):
frappe.db.sql("delete from tabEvent")
phrases = [
"Hydrus is a small constellation in the deep southern sky. ",
"It was first depicted on a celestial atlas by Johann Bayer in his 1603 Uranometria. ",
"The French explorer and astronomer Nicolas Louis de Lacaille charted the brighter stars and gave their Bayer designations in 1756. ",
'Its name means "male water snake", as opposed to Hydra, a much larger constellation that represents a female water snake. ',
"It remains below the horizon for most Northern Hemisphere observers.",
"The brightest star is the 2.8-magnitude Beta Hydri, also the closest reasonably bright star to the south celestial pole. ",
"Pulsating between magnitude 3.26 and 3.33, Gamma Hydri is a variable red giant some 60 times the diameter of our Sun. ",
"Lying near it is VW Hydri, one of the brightest dwarf novae in the heavens. ",
"Four star systems have been found to have exoplanets to date, most notably HD 10180, which could bear up to nine planetary companions.",
]
for text in phrases:
doc = frappe.get_doc(
{"doctype": "Event", "subject": text, "starts_on": frappe.utils.now_datetime()}
)
doc.insert()
global_search.sync_global_search()
frappe.db.commit()
def test_get_field_value(self):
cases = [
{
"case_type": "generic",
"data": """
<style type="text/css"> p.p1 {margin: 0.0px 0.0px 0.0px 0.0px; font: 14.0px 'Open Sans';
-webkit-text-stroke: #000000} span.s1 {font-kerning: none} </style>
<script>
var options = {
foo: "bar"
}
</script>
<p class="p1"><span class="s1">Contrary to popular belief, Lorem Ipsum is not simply random text. It has
roots in a piece of classical Latin literature from 45 BC, making it over 2000 years old. Richard McClintock,
a Latin professor at Hampden-Sydney College in Virginia, looked up one of the more obscure Latin words, consectetur,
from a Lorem Ipsum passage, and going through the cites of the word in classical literature, discovered the undoubtable source.
Lorem Ipsum comes from sections 1.10.32 and 1.10.33 of "de Finibus Bonorum et Malorum" (The Extremes of Good and Evil) by Cicero,
written in 45 BC. This book is a treatise on the theory of ethics, very popular during the Renaissance. The first line of Lorem Ipsum,
"Lorem ipsum dolor sit amet..", comes from a line in section 1.10.32.</span></p>
""",
"result": (
"Description : Contrary to popular belief, Lorem Ipsum is not simply random text. It has roots in a piece of classical "
"Latin literature from 45 BC, making it over 2000 years old. Richard McClintock, a Latin professor at Hampden-Sydney College in Virginia, "
"looked up one of the more obscure Latin words, consectetur, from a Lorem Ipsum passage, and going through the cites of the word "
'in classical literature, discovered the undoubtable source. Lorem Ipsum comes from sections 1.10.32 and 1.10.33 of "de Finibus Bonorum '
'et Malorum" (The Extremes of Good and Evil) by Cicero, written in 45 BC. This book is a treatise on the theory of ethics, very popular '
'during the Renaissance. The first line of Lorem Ipsum, "Lorem ipsum dolor sit amet..", comes from a line in section 1.10.32.'
),
},
{
"case_type": "with_style",
"data": """
<style type="text/css"> p.p1 {margin: 0.0px 0.0px 0.0px 0.0px; font: 14.0px 'Open Sans';
-webkit-text-stroke: #000000} span.s1 {font-kerning: none} </style>Lorem Ipsum Dolor Sit Amet
""",
"result": "Description : Lorem Ipsum Dolor Sit Amet",
},
{
"case_type": "with_script",
"data": """
<script>
var options = {
foo: "bar"
}
</script>
Lorem Ipsum Dolor Sit Amet
""",
"result": "Description : Lorem Ipsum Dolor Sit Amet",
},
]
for case in cases:
doc = frappe.get_doc(
{
"doctype": "Event",
"subject": "Lorem Ipsum",
"starts_on": frappe.utils.now_datetime(),
"description": case["data"],
}
)
field_as_text = ""
for field in doc.meta.fields:
if field.fieldname == "description":
field_as_text = global_search.get_formatted_value(doc.description, field)
self.assertEqual(case["result"], field_as_text)
def test_web_page_index(self):
global_search.update_global_search_for_all_web_pages()
global_search.sync_global_search()
frappe.db.commit()
results = global_search.web_search("unsubscribe")
self.assertTrue("Unsubscribe" in results[0].content)
results = global_search.web_search(
text="unsubscribe", scope='manufacturing" UNION ALL SELECT 1,2,3,4,doctype from __global_search'
)
self.assertTrue(results == [])
| 40.511521
| 144
| 0.723695
|
from __future__ import unicode_literals
import unittest
import frappe
import frappe.utils
from frappe.desk.page.setup_wizard.install_fixtures import update_global_search_doctypes
from frappe.test_runner import make_test_objects
from frappe.utils import global_search
class TestGlobalSearch(unittest.TestCase):
def setUp(self):
update_global_search_doctypes()
global_search.setup_global_search_table()
self.assertTrue("__global_search" in frappe.db.get_tables())
doctype = "Event"
global_search.reset()
from frappe.custom.doctype.property_setter.property_setter import make_property_setter
make_property_setter(doctype, "subject", "in_global_search", 1, "Int")
make_property_setter(doctype, "event_type", "in_global_search", 1, "Int")
make_property_setter(doctype, "roles", "in_global_search", 1, "Int")
make_property_setter(doctype, "repeat_on", "in_global_search", 0, "Int")
def tearDown(self):
frappe.db.sql("DELETE FROM `tabProperty Setter` WHERE `doc_type`='Event'")
frappe.clear_cache(doctype="Event")
frappe.db.sql("DELETE FROM `tabEvent`")
frappe.db.sql("DELETE FROM `__global_search`")
make_test_objects("Event")
frappe.db.commit()
def insert_test_events(self):
frappe.db.sql("DELETE FROM `tabEvent`")
phrases = [
'"The Sixth Extinction II: Amor Fati" is the second episode of the seventh season of the American science fiction.',
"After Mulder awakens from his coma, he realizes his duty to prevent alien colonization. ",
"Carter explored themes of extraterrestrial involvement in ancient mass extinctions in this episode, the third in a trilogy.",
]
for text in phrases:
frappe.get_doc(
dict(doctype="Event", subject=text, repeat_on="Monthly", starts_on=frappe.utils.now_datetime())
).insert()
global_search.sync_global_search()
frappe.db.commit()
def test_search(self):
self.insert_test_events()
results = global_search.search("awakens")
self.assertTrue(
"After Mulder awakens from his coma, he realizes his duty to prevent alien colonization. "
in results[0].content
)
results = global_search.search("extraterrestrial")
self.assertTrue(
"Carter explored themes of extraterrestrial involvement in ancient mass extinctions in this episode, the third in a trilogy."
in results[0].content
)
results = global_search.search("awakens & duty & alien")
self.assertTrue(
"After Mulder awakens from his coma, he realizes his duty to prevent alien colonization. "
in results[0].content
)
def test_update_doc(self):
self.insert_test_events()
test_subject = "testing global search"
event = frappe.get_doc("Event", frappe.get_all("Event")[0].name)
event.subject = test_subject
event.save()
frappe.db.commit()
global_search.sync_global_search()
results = global_search.search("testing global search")
self.assertTrue("testing global search" in results[0].content)
def test_update_fields(self):
self.insert_test_events()
results = global_search.search("Monthly")
self.assertEqual(len(results), 0)
doctype = "Event"
from frappe.custom.doctype.property_setter.property_setter import make_property_setter
make_property_setter(doctype, "repeat_on", "in_global_search", 1, "Int")
global_search.rebuild_for_doctype(doctype)
results = global_search.search("Monthly")
self.assertEqual(len(results), 3)
def test_delete_doc(self):
self.insert_test_events()
event_name = frappe.get_all("Event")[0].name
event = frappe.get_doc("Event", event_name)
test_subject = event.subject
results = global_search.search(test_subject)
self.assertTrue(
any(r["name"] == event_name for r in results), msg="Failed to search document by exact name"
)
frappe.delete_doc("Event", event_name)
global_search.sync_global_search()
results = global_search.search(test_subject)
self.assertTrue(
all(r["name"] != event_name for r in results),
msg="Deleted documents appearing in global search.",
)
def test_insert_child_table(self):
frappe.db.sql("delete from tabEvent")
phrases = [
"Hydrus is a small constellation in the deep southern sky. ",
"It was first depicted on a celestial atlas by Johann Bayer in his 1603 Uranometria. ",
"The French explorer and astronomer Nicolas Louis de Lacaille charted the brighter stars and gave their Bayer designations in 1756. ",
'Its name means "male water snake", as opposed to Hydra, a much larger constellation that represents a female water snake. ',
"It remains below the horizon for most Northern Hemisphere observers.",
"The brightest star is the 2.8-magnitude Beta Hydri, also the closest reasonably bright star to the south celestial pole. ",
"Pulsating between magnitude 3.26 and 3.33, Gamma Hydri is a variable red giant some 60 times the diameter of our Sun. ",
"Lying near it is VW Hydri, one of the brightest dwarf novae in the heavens. ",
"Four star systems have been found to have exoplanets to date, most notably HD 10180, which could bear up to nine planetary companions.",
]
for text in phrases:
doc = frappe.get_doc(
{"doctype": "Event", "subject": text, "starts_on": frappe.utils.now_datetime()}
)
doc.insert()
global_search.sync_global_search()
frappe.db.commit()
def test_get_field_value(self):
cases = [
{
"case_type": "generic",
"data": """
<style type="text/css"> p.p1 {margin: 0.0px 0.0px 0.0px 0.0px; font: 14.0px 'Open Sans';
-webkit-text-stroke: #000000} span.s1 {font-kerning: none} </style>
<script>
var options = {
foo: "bar"
}
</script>
<p class="p1"><span class="s1">Contrary to popular belief, Lorem Ipsum is not simply random text. It has
roots in a piece of classical Latin literature from 45 BC, making it over 2000 years old. Richard McClintock,
a Latin professor at Hampden-Sydney College in Virginia, looked up one of the more obscure Latin words, consectetur,
from a Lorem Ipsum passage, and going through the cites of the word in classical literature, discovered the undoubtable source.
Lorem Ipsum comes from sections 1.10.32 and 1.10.33 of "de Finibus Bonorum et Malorum" (The Extremes of Good and Evil) by Cicero,
written in 45 BC. This book is a treatise on the theory of ethics, very popular during the Renaissance. The first line of Lorem Ipsum,
"Lorem ipsum dolor sit amet..", comes from a line in section 1.10.32.</span></p>
""",
"result": (
"Description : Contrary to popular belief, Lorem Ipsum is not simply random text. It has roots in a piece of classical "
"Latin literature from 45 BC, making it over 2000 years old. Richard McClintock, a Latin professor at Hampden-Sydney College in Virginia, "
"looked up one of the more obscure Latin words, consectetur, from a Lorem Ipsum passage, and going through the cites of the word "
'in classical literature, discovered the undoubtable source. Lorem Ipsum comes from sections 1.10.32 and 1.10.33 of "de Finibus Bonorum '
'et Malorum" (The Extremes of Good and Evil) by Cicero, written in 45 BC. This book is a treatise on the theory of ethics, very popular '
'during the Renaissance. The first line of Lorem Ipsum, "Lorem ipsum dolor sit amet..", comes from a line in section 1.10.32.'
),
},
{
"case_type": "with_style",
"data": """
<style type="text/css"> p.p1 {margin: 0.0px 0.0px 0.0px 0.0px; font: 14.0px 'Open Sans';
-webkit-text-stroke: #000000} span.s1 {font-kerning: none} </style>Lorem Ipsum Dolor Sit Amet
""",
"result": "Description : Lorem Ipsum Dolor Sit Amet",
},
{
"case_type": "with_script",
"data": """
<script>
var options = {
foo: "bar"
}
</script>
Lorem Ipsum Dolor Sit Amet
""",
"result": "Description : Lorem Ipsum Dolor Sit Amet",
},
]
for case in cases:
doc = frappe.get_doc(
{
"doctype": "Event",
"subject": "Lorem Ipsum",
"starts_on": frappe.utils.now_datetime(),
"description": case["data"],
}
)
field_as_text = ""
for field in doc.meta.fields:
if field.fieldname == "description":
field_as_text = global_search.get_formatted_value(doc.description, field)
self.assertEqual(case["result"], field_as_text)
def test_web_page_index(self):
global_search.update_global_search_for_all_web_pages()
global_search.sync_global_search()
frappe.db.commit()
results = global_search.web_search("unsubscribe")
self.assertTrue("Unsubscribe" in results[0].content)
results = global_search.web_search(
text="unsubscribe", scope='manufacturing" UNION ALL SELECT 1,2,3,4,doctype from __global_search'
)
self.assertTrue(results == [])
| true
| true
|
f70e0f126b8a9c0eb3c7c60d2741aac4f327bb36
| 16,294
|
py
|
Python
|
Server/server.py
|
Rsky-20/Security-Message-Communication
|
b34d7a3e1ab54f7bed07c9974f7e1a5c4fc7e75c
|
[
"MIT"
] | null | null | null |
Server/server.py
|
Rsky-20/Security-Message-Communication
|
b34d7a3e1ab54f7bed07c9974f7e1a5c4fc7e75c
|
[
"MIT"
] | null | null | null |
Server/server.py
|
Rsky-20/Security-Message-Communication
|
b34d7a3e1ab54f7bed07c9974f7e1a5c4fc7e75c
|
[
"MIT"
] | null | null | null |
#!/bin/python3 -*- coding: utf-8 -*-
"""
@Author : Jessy JOSE -- Pierre VAUDRY
IPSA Aero1 - Prim2
Release date: 09/12/2020
[other information]
Licence: MIT
[Description]
SMC is a security message communication.
This program is the part of server program.
The server uses the socket module to work.
To improve communication between the client and the server, we use the select module to select a specific socket.
The datetime, os and platform modules are used to make the server fully functional. These modules are used to date
operations, and to clean the console if necessary depending on the platform used.
[Functions]:
Clean() -- clear console
documentation() -- make native documentation and basic screen interface
log() -- log all data receive on server
log_connection() -- log all connection make with server
process_server() -- interprets the data received and exploits it
list_log() -- conversion of data inside text file to list
str_log(data) -- conversion of a list to str
consoleCommand() -- make a native console after communication to see log.txt
connection_server() -- main process of the server
run() -- run and launch server
[Global variable]:
{int variable}
PORT
{str variable}
HOST
affichage_logo
{dict variable}
server_data
{list variable}
client_connected
[Other variable]:
Many other constants and variable may be defined; these may be used in calls to
the process_server(), list_log(), str_log(data), consoleCommand() and connection_server() functions
"""
# ---------------------------------------------Import module section-------------------------------------------------- #
import datetime
import select
import socket
import os
import platform
# ------------------------------------------------Global variable----------------------------------------------------- #
# Definition of local server variable
# Host is local adress for binding the server
HOST = '127.0.0.1'
# Port is the gate than the client take to discuss with the client
PORT = 50100
# Initialisation of list to make a stockage of connected client on the server
client_connected = []
# server_data is a dict. It's use to make a count of client
server_data = {
'count': 0
}
# ------------------------------------------------Functions & process------------------------------------------------- #
def Clean():
"""
[description]
clean is a process to clean main console
:return: none
"""
if platform.system() == "Windows":
os.system("cls")
elif platform.system() == "Linux":
os.system("clear")
def documentation():
"""
[Description]
This process return a native and basic documentation to the administrator of the serverS with a great ascii art
screen
:return: none
"""
affichage_logo = '\033[36m' + """
___ _ _ __ __ _____ _ _ _
/ ____| (_) | | \/ | / ____| (_) | | (_)
| (___ ___ ___ _ _ _ __ _| |_ _ _ | \ / | ___ ___ ___ __ _ __ _ ___ | | ___ _ __ ___ _ __ ___ _ _ _ __ _ ___ __ _| |_ _ ___ _ __
\___ \ / _ \/ __| | | | '__| | __| | | | | |\/| |/ _ \/ __/ __|/ _` |/ _` |/ _ \ | | / _ \| '_ ` _ \| '_ ` _ \| | | | '_ \| |/ __/ _` | __| |/ _ \| '_ \
____) | __/ (__| |_| | | | | |_| |_| | | | | | __/\__ \__ \ (_| | (_| | __/ | |___| (_) | | | | | | | | | | | |_| | | | | | (_| (_| | |_| | (_) | | | |
|_____/ \___|\___|\__,_|_| |_|\__|\__, | |_| |_|\___||___/___/\__,_|\__, |\___| \_____\___/|_| |_| |_|_| |_| |_|\__,_|_| |_|_|\___\__,_|\__|_|\___/|_| |_|
__/ | __/ |
|___/ |___/
/%&@@@@@&%/
@@@@@@@@&&(((((&&@@@@@@@@.
@@@@@,,,,,,,,,,,,,,,,,,,,,,,@@@@@
@@@@,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,@@@@
&@@@,,,,,,,,,,,,,,%@*,,%/%@%***@,,,,,,,&@@&
@@@(@@@@@@@@@@@*,,,,*,,,,,,,,,,,,,,,,,,,,,@@@
&@@@@@@@&,,,.....%@@@@@@@@*,,,,,,,,,,,,,,,,,,,,@@@
(@@@@&(#((***,,,,....,.......@@@@@,,,,,,,,,,,,,,,,,@@@
@@@@*,#*((/(/(/,,,,,,,,...,.... ,@@@&,,,,,,,,,,,,,,@@@
@@@,,/,,(*,/%/(((*,,,,.,....,.,.. ,..,@@@%,,,,,,,,,,,&@@
@@@./. ..*(((/#//***,*,,,,*,,*.. ..,, . @@@,,,,,,,,,@@@
@@@#,/**..*@@@#(//***#@@&,*,.,,.&@@#. ,,.. .@@%,,,,,,@@@#
@@(*%(,,/@@@(@@@%/*#@@@/@@@**.@@@/&@@(.. . @@@,,/@@@@
@@@#.,(/,@@@@@@@(..*@@@@@@@(..@@@@@@@ @@@@@@@
,@@(/((*#**#/(/, .*, ..&*////(,, @@@
@@@ */*(/*/. ..... . ... /*(.,.,, .@@@
%@@@. . . .., ., . . *,**.*, #@@@
@@@@(. .., ,. ,. .@@@
%@@@@@@(,.. ,. .. . .&@@@ , &@@
%@@@@@@@@@@@@@@@* @@@. (@@
@@@.@@.
@@@@.
@@.
""" + '\033[39m'
print(affichage_logo)
print('\t\t\t\t\t\t\t\t\t\t\t\t#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#')
print('\t\t\t\t\t\t\t\t\t\t\t\t# ' + '\033[31m' + 'Welcome in SMC server' + '\033[39m' +
' #')
print('\t\t\t\t\t\t\t\t\t\t\t\t#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#')
print('Reste de la doc à venir °°°')
def log(data):
"""
[description]
log is a process to make a history file of all conversation between client in the server.
He use a str value given by client and save it in a texte file.
:param data: str value given by client to the server
:return: none
"""
# Open file text as lmsg and write data with return line, after close the file in use
with open("log.txt", "a") as lmsg:
lmsg.write(data + "\n")
lmsg.close()
def log_connection(client_connected):
"""
[description]
log_connection is a process to make a history file of all connection client in the server.
He use a type value given by module socket and save it in a texte file..
:param client_connected: type value given by socket module
:return: none
"""
# Open file text as lc and write in file the date time of server, the client information in str
# After close the file
with open("log_connection.txt", "a") as lc:
lc.write(datetime.datetime.isoformat(datetime.datetime.now()) +
"> {} connected on server \n".format(client_connected))
lc.close()
def process_server(data):
"""
[description]
process_server is a function that processes data given by client.
It's a part of server to use client data.
He use a str value given by client and use it in process.
:param data: str value given by client in main bool of server in the part where data are receive
:return: response a str but this variable is not use in the rest of program (because we have not make yet a
functionnaly than use this var)
"""
# Rename data as response and use it in log process
response = data
if response != '/stop':
log(response)
else:
pass
return response
def list_log():
"""
[description]
list_log() is a function to change text file to list.
He open and take all data in log.txt.
He take all ligne and append in output list named dlog
:return: llog list created to use data in file txt
"""
# Open file text as lg and create a list named dlog.
# for line in lg, in variable named lastL, make a str variable named s and split the line with separator '@'
# After append variable l in list dlog and close
with open('log.txt', 'r') as lg:
llog = []
for line in lg:
s = line.strip("\n")
lastL = s.split("@")
llog.append(lastL)
lg.close()
return llog
def str_log(data):
"""
[description]
str_log is a function to change a list to str data.
He split element of list and join all element to make a str data.
But only the last line is returned and use
:param data: list of all data exchange between client2server and server2client
:return: list of all data exchange between client2server and server2client
"""
# Create a empty local variable named str_l
# for i in the range of len data, for j in range of len of all data element, join the last element in variable str_l
str_l = ''
for i in range(len(data)):
for j in range(len(data[i])):
str_l = ','.join(data[i - 1])
return str_l
def consoleCommand(event):
"""
[description]
:param event:
:return:
"""
if event == '/log.txt':
log = open("./log.txt", "r")
contenu = log.read()
print(contenu)
else:
exit()
# This process is named connection_server because client connextion's are logged and used by server.
# connection_server is a inspired of send() process but the code is not the same.
def connection_server():
"""
[description]
connection_server is a main process in the server program.
He use socket module to create a server.
It's the main part of server.
He take global value of the program like HOST and PORT, to bind and launch the server and listen all connection in
socket
Create a connection, wait to receive data, process it with the
process_request function.
AF_INET represents the IPv4 address family.
SOCK_STREAM represents the TCP protocol.
:return: none
"""
# Creating a socket, by creating a socket object named s.
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# Allows to reuse the same address
# s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
# s.bind (address,port) binds an address and a port to socket s.
# The address parameter is a tuple consisting of the IP address of the
# server and a port number.
# s.bind((data_server['HOST'], data_server['PORT']))
s.bind((HOST, PORT))
# s.listen () makes the server ready to accept connections.
s.listen(5)
print('{serverver-status}:', '\033[32m', 'Online', '\033[1m')
print(s)
# Variable that starts the while loop
server_ready = True
turn = server_data['count']
while server_ready:
# wait_connections is variable with client waiting for connection and dialogue with server
# Select allows the first client in wait client to connect with the server every 0,05 second
wait_connections, wlist, xlist = select.select([s], [], [], 0.05)
select.select([s], [], [], 0.05)
# for connection in wait connections, accept the first client who wants to connect with server
# and append this client in list of connected_client, print connection_client in the console and log this
# connection with the process log_connection
for connection in wait_connections:
connection_client, info_connection = connection.accept()
client_connected.append(connection_client)
print("Prosition : ", turn , " | Client : ", connection_client)
turn = turn + 1
log_connection(connection_client)
####################################################################################################################
# Create a empty list read_client
read_client = []
# Part of the program that passes the possible errors in order to run the rest program
try:
read_client, wlist, xlist = select.select(client_connected, [], [], 0.05)
except select.error:
pass
else:
# for client in read_client, receive message of this client, and use process_server to record the message
for client in read_client:
msg_recv = client.recv(1024)
msg_recv = msg_recv.decode()
process_server(msg_recv)
print('\033[39m', '[', '\033[31m', 'SERVER@', '\033[36m', HOST, '\033[33m', '-p ', str(PORT),
'\033[39m', ']: Client send a message. Go to ./log.txt to see more.')
if msg_recv == "/stop":
server_ready = False
break
###############################################
# Function or process to open last message and convert him
d_l = list_log()
c2c = str_log(d_l)
p_server = c2c
###############################################
# encode the message give by server
byte_data = p_server.encode()
# send message to the client
client.sendall(byte_data)
####################################################################################################################
#console = input("[" + datetime.datetime.isoformat(datetime.datetime.now()) + "](/log.txt to see log in server)>")
#consoleCommand(console) # This line, give to the administrator the console to oppen and see log
print("Close all connections")
# For client in client_connected, disconnect all client
for client in client_connected:
client.close()
# close the server after executing while bool
s.close()
Clean()
def run():
"""
[description]
Run process
:return: none
"""
print('[' + '\033[31m' + 'SERVER@' + '\033[36m' + HOST + ' ' + '\033[33m' + '-p ' + str(PORT) + '\033[39m' + ']:\n')
while True:
connection_server()
# -------------------------------------------Run & Start server program----------------------------------------------- #
if __name__ == '__main__':
# Give basic and native documentation in console
documentation()
# Run the program
run()
| 39.936275
| 266
| 0.444213
|
import datetime
import select
import socket
import os
import platform
HOST = '127.0.0.1'
PORT = 50100
client_connected = []
server_data = {
'count': 0
}
# ------------------------------------------------Functions & process------------------------------------------------- #
def Clean():
if platform.system() == "Windows":
os.system("cls")
elif platform.system() == "Linux":
os.system("clear")
def documentation():
affichage_logo = '\033[36m' + """
___ _ _ __ __ _____ _ _ _
/ ____| (_) | | \/ | / ____| (_) | | (_)
| (___ ___ ___ _ _ _ __ _| |_ _ _ | \ / | ___ ___ ___ __ _ __ _ ___ | | ___ _ __ ___ _ __ ___ _ _ _ __ _ ___ __ _| |_ _ ___ _ __
\___ \ / _ \/ __| | | | '__| | __| | | | | |\/| |/ _ \/ __/ __|/ _` |/ _` |/ _ \ | | / _ \| '_ ` _ \| '_ ` _ \| | | | '_ \| |/ __/ _` | __| |/ _ \| '_ \
____) | __/ (__| |_| | | | | |_| |_| | | | | | __/\__ \__ \ (_| | (_| | __/ | |___| (_) | | | | | | | | | | | |_| | | | | | (_| (_| | |_| | (_) | | | |
|_____/ \___|\___|\__,_|_| |_|\__|\__, | |_| |_|\___||___/___/\__,_|\__, |\___| \_____\___/|_| |_| |_|_| |_| |_|\__,_|_| |_|_|\___\__,_|\__|_|\___/|_| |_|
__/ | __/ |
|___/ |___/
/%&@@@@@&%/
@@@@@@@@&&(((((&&@@@@@@@@.
@@@@@,,,,,,,,,,,,,,,,,,,,,,,@@@@@
@@@@,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,@@@@
&@@@,,,,,,,,,,,,,,%@*,,%/%@%***@,,,,,,,&@@&
@@@(@@@@@@@@@@@*,,,,*,,,,,,,,,,,,,,,,,,,,,@@@
&@@@@@@@&,,,.....%@@@@@@@@*,,,,,,,,,,,,,,,,,,,,@@@
(@@@@&(#((***,,,,....,.......@@@@@,,,,,,,,,,,,,,,,,@@@
@@@@*,#*((/(/(/,,,,,,,,...,.... ,@@@&,,,,,,,,,,,,,,@@@
@@@,,/,,(*,/%/(((*,,,,.,....,.,.. ,..,@@@%,,,,,,,,,,,&@@
@@@./. ..*(((/#//***,*,,,,*,,*.. ..,, . @@@,,,,,,,,,@@@
@@@#,/**..*@@@#(//***#@@&,*,.,,.&@@#. ,,.. .@@%,,,,,,@@@#
@@(*%(,,/@@@(@@@%/*#@@@/@@@**.@@@/&@@(.. . @@@,,/@@@@
@@@#.,(/,@@@@@@@(..*@@@@@@@(..@@@@@@@ @@@@@@@
,@@(/((*#**#/(/, .*, ..&*////(,, @@@
@@@ */*(/*/. ..... . ... /*(.,.,, .@@@
%@@@. . . .., ., . . *,**.*, #@@@
@@@@(. .., ,. ,. .@@@
%@@@@@@(,.. ,. .. . .&@@@ , &@@
%@@@@@@@@@@@@@@@* @@@. (@@
@@@.@@.
@@@@.
@@.
""" + '\033[39m'
print(affichage_logo)
print('\t\t\t\t\t\t\t\t\t\t\t\t#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#')
print('\t\t\t\t\t\t\t\t\t\t\t\t# ' + '\033[31m' + 'Welcome in SMC server' + '\033[39m' +
' #')
print('\t\t\t\t\t\t\t\t\t\t\t\t#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#')
print('Reste de la doc à venir °°°')
def log(data):
with open("log.txt", "a") as lmsg:
lmsg.write(data + "\n")
lmsg.close()
def log_connection(client_connected):
with open("log_connection.txt", "a") as lc:
lc.write(datetime.datetime.isoformat(datetime.datetime.now()) +
"> {} connected on server \n".format(client_connected))
lc.close()
def process_server(data):
response = data
if response != '/stop':
log(response)
else:
pass
return response
def list_log():
with open('log.txt', 'r') as lg:
llog = []
for line in lg:
s = line.strip("\n")
lastL = s.split("@")
llog.append(lastL)
lg.close()
return llog
def str_log(data):
str_l = ''
for i in range(len(data)):
for j in range(len(data[i])):
str_l = ','.join(data[i - 1])
return str_l
def consoleCommand(event):
if event == '/log.txt':
log = open("./log.txt", "r")
contenu = log.read()
print(contenu)
else:
exit()
# connection_server is a inspired of send() process but the code is not the same.
def connection_server():
# Creating a socket, by creating a socket object named s.
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# Allows to reuse the same address
# s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
# s.bind (address,port) binds an address and a port to socket s.
# The address parameter is a tuple consisting of the IP address of the
# server and a port number.
# s.bind((data_server['HOST'], data_server['PORT']))
s.bind((HOST, PORT))
# s.listen () makes the server ready to accept connections.
s.listen(5)
print('{serverver-status}:', '\033[32m', 'Online', '\033[1m')
print(s)
# Variable that starts the while loop
server_ready = True
turn = server_data['count']
while server_ready:
# wait_connections is variable with client waiting for connection and dialogue with server
# Select allows the first client in wait client to connect with the server every 0,05 second
wait_connections, wlist, xlist = select.select([s], [], [], 0.05)
select.select([s], [], [], 0.05)
# for connection in wait connections, accept the first client who wants to connect with server
# and append this client in list of connected_client, print connection_client in the console and log this
# connection with the process log_connection
for connection in wait_connections:
connection_client, info_connection = connection.accept()
client_connected.append(connection_client)
print("Prosition : ", turn , " | Client : ", connection_client)
turn = turn + 1
log_connection(connection_client)
####################################################################################################################
# Create a empty list read_client
read_client = []
# Part of the program that passes the possible errors in order to run the rest program
try:
read_client, wlist, xlist = select.select(client_connected, [], [], 0.05)
except select.error:
pass
else:
# for client in read_client, receive message of this client, and use process_server to record the message
for client in read_client:
msg_recv = client.recv(1024)
msg_recv = msg_recv.decode()
process_server(msg_recv)
print('\033[39m', '[', '\033[31m', 'SERVER@', '\033[36m', HOST, '\033[33m', '-p ', str(PORT),
'\033[39m', ']: Client send a message. Go to ./log.txt to see more.')
if msg_recv == "/stop":
server_ready = False
break
###############################################
# Function or process to open last message and convert him
d_l = list_log()
c2c = str_log(d_l)
p_server = c2c
###############################################
# encode the message give by server
byte_data = p_server.encode()
# send message to the client
client.sendall(byte_data)
####################################################################################################################
#console = input("[" + datetime.datetime.isoformat(datetime.datetime.now()) + "](/log.txt to see log in server)>")
#consoleCommand(console) # This line, give to the administrator the console to oppen and see log
print("Close all connections")
# For client in client_connected, disconnect all client
for client in client_connected:
client.close()
# close the server after executing while bool
s.close()
Clean()
def run():
print('[' + '\033[31m' + 'SERVER@' + '\033[36m' + HOST + ' ' + '\033[33m' + '-p ' + str(PORT) + '\033[39m' + ']:\n')
while True:
connection_server()
# -------------------------------------------Run & Start server program----------------------------------------------- #
if __name__ == '__main__':
# Give basic and native documentation in console
documentation()
# Run the program
run()
| true
| true
|
f70e113091c422b2268db0b5f5723c811cca94c5
| 4,901
|
py
|
Python
|
sample_project/sample_project/settings.py
|
MagicSolutions/django-admin-sortable
|
344927a393262aecf700b830b487430030cffdb2
|
[
"Naumen",
"Condor-1.1",
"Apache-1.1",
"MS-PL"
] | null | null | null |
sample_project/sample_project/settings.py
|
MagicSolutions/django-admin-sortable
|
344927a393262aecf700b830b487430030cffdb2
|
[
"Naumen",
"Condor-1.1",
"Apache-1.1",
"MS-PL"
] | null | null | null |
sample_project/sample_project/settings.py
|
MagicSolutions/django-admin-sortable
|
344927a393262aecf700b830b487430030cffdb2
|
[
"Naumen",
"Condor-1.1",
"Apache-1.1",
"MS-PL"
] | null | null | null |
# Django settings for test_project project.
import os
def map_path(directory_name):
return os.path.join(os.path.dirname(__file__),
'../' + directory_name).replace('\\', '/')
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('Your Name', 'your_email@example.com'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': map_path('database/test_project.sqlite'),
# The following settings are not used with sqlite3:
'USER': '',
'PASSWORD': '',
'HOST': '',
'PORT': ''
}
}
# Hosts/domain names that are valid for this site; required if DEBUG is False
# See https://docs.djangoproject.com/en/1.5/ref/settings/#allowed-hosts
ALLOWED_HOSTS = []
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = 'America/Chicago'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/var/www/example.com/media/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://example.com/media/", "http://media.example.com/"
MEDIA_URL = ''
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/var/www/example.com/static/"
STATIC_ROOT = ''
# URL prefix for static files.
# Example: "http://example.com/static/", "http://static.example.com/"
STATIC_URL = '/static/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = '8**a!c8$1x)p@j2pj0yq!*v+dzp24g*$918ws#x@k+gf%0%rct'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
# Uncomment the next line for simple clickjacking protection:
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'sample_project.urls'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'sample_project.wsgi.application'
TEMPLATE_DIRS = (
map_path('templates'),
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.admin',
'django.contrib.admindocs',
'adminsortable',
'app',
)
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
| 30.440994
| 79
| 0.697409
|
import os
def map_path(directory_name):
return os.path.join(os.path.dirname(__file__),
'../' + directory_name).replace('\\', '/')
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': map_path('database/test_project.sqlite'),
'USER': '',
'PASSWORD': '',
'HOST': '',
'PORT': ''
}
}
= []
TIME_ZONE = 'America/Chicago'
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
USE_I18N = True
USE_L10N = True
USE_TZ = True
MEDIA_ROOT = ''
MEDIA_URL = ''
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
STATIC_ROOT = ''
STATIC_URL = '/static/'
STATICFILES_DIRS = (
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = '8**a!c8$1x)p@j2pj0yq!*v+dzp24g*$918ws#x@k+gf%0%rct'
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'sample_project.urls'
WSGI_APPLICATION = 'sample_project.wsgi.application'
TEMPLATE_DIRS = (
map_path('templates'),
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.admin',
'django.contrib.admindocs',
'adminsortable',
'app',
)
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
| true
| true
|
f70e115700aac8a6e74283f4cc0b46ff65daf283
| 1,576
|
py
|
Python
|
simstring/measure/overlap.py
|
icfly2/simstring-1
|
e4a57603967c5d138ce021cedc09d509f75e1933
|
[
"MIT"
] | null | null | null |
simstring/measure/overlap.py
|
icfly2/simstring-1
|
e4a57603967c5d138ce021cedc09d509f75e1933
|
[
"MIT"
] | null | null | null |
simstring/measure/overlap.py
|
icfly2/simstring-1
|
e4a57603967c5d138ce021cedc09d509f75e1933
|
[
"MIT"
] | null | null | null |
import math
from typing import Iterable
from .base import BaseMeasure
class OverlapMeasure(BaseMeasure):
def __init__(self, db=None, maxsize: int = 100) -> None:
super().__init__()
if db:
self.maxsize = db.max_feature_size()
else:
self.maxsize = maxsize
def min_feature_size(self, query_size, alpha) -> int:
# return 1 # Not sure the below isn't sufficient
return math.floor(query_size * alpha) or 1
def max_feature_size(self, query_size, alpha) -> int:
return self.maxsize
def minimum_common_feature_count(
self, query_size: int, y_size: int, alpha: float
) -> int:
return int(math.ceil(alpha * min(query_size, y_size)))
def similarity(self, X: Iterable[str], Y: Iterable[str]) -> int:
return min(len(set(X)), len(set(Y)))
class LeftOverlapMeasure(BaseMeasure):
def __init__(self, db=None, maxsize: int = 100) -> None:
super().__init__()
if db:
self.maxsize = db.max_feature_size()
else:
self.maxsize = maxsize
def min_feature_size(self, query_size, alpha) -> int:
return math.floor(query_size * alpha) or 1
def max_feature_size(self, query_size, alpha) -> int:
return self.maxsize
def minimum_common_feature_count(
self, query_size: int, y_size: int, alpha: float
) -> int:
return math.floor(query_size * alpha) or 1
def similarity(self, X: Iterable[str], Y: Iterable[str]) -> float:
return 1 - len(set(X) - set(Y)) / len(set(X))
| 30.901961
| 70
| 0.626269
|
import math
from typing import Iterable
from .base import BaseMeasure
class OverlapMeasure(BaseMeasure):
def __init__(self, db=None, maxsize: int = 100) -> None:
super().__init__()
if db:
self.maxsize = db.max_feature_size()
else:
self.maxsize = maxsize
def min_feature_size(self, query_size, alpha) -> int:
* alpha) or 1
def max_feature_size(self, query_size, alpha) -> int:
return self.maxsize
def minimum_common_feature_count(
self, query_size: int, y_size: int, alpha: float
) -> int:
return int(math.ceil(alpha * min(query_size, y_size)))
def similarity(self, X: Iterable[str], Y: Iterable[str]) -> int:
return min(len(set(X)), len(set(Y)))
class LeftOverlapMeasure(BaseMeasure):
def __init__(self, db=None, maxsize: int = 100) -> None:
super().__init__()
if db:
self.maxsize = db.max_feature_size()
else:
self.maxsize = maxsize
def min_feature_size(self, query_size, alpha) -> int:
return math.floor(query_size * alpha) or 1
def max_feature_size(self, query_size, alpha) -> int:
return self.maxsize
def minimum_common_feature_count(
self, query_size: int, y_size: int, alpha: float
) -> int:
return math.floor(query_size * alpha) or 1
def similarity(self, X: Iterable[str], Y: Iterable[str]) -> float:
return 1 - len(set(X) - set(Y)) / len(set(X))
| true
| true
|
f70e11851218fffbf03f0ed4b78743ce5833fc4a
| 1,410
|
py
|
Python
|
webapi/utils/dependencies.py
|
xqhgit/fastapi-vue-blog
|
4f90869c4b8078205239be38d79ac9be6dcb56b8
|
[
"MIT"
] | 3
|
2022-03-10T08:43:24.000Z
|
2022-03-26T09:10:29.000Z
|
webapi/utils/dependencies.py
|
xqhgit/fastapi-vue-blog
|
4f90869c4b8078205239be38d79ac9be6dcb56b8
|
[
"MIT"
] | null | null | null |
webapi/utils/dependencies.py
|
xqhgit/fastapi-vue-blog
|
4f90869c4b8078205239be38d79ac9be6dcb56b8
|
[
"MIT"
] | 2
|
2022-03-11T03:14:38.000Z
|
2022-03-19T07:27:56.000Z
|
from fastapi import Depends, HTTPException, status, Header
from fastapi.security import OAuth2PasswordBearer
from pydantic import ValidationError
from jose import jwt
from webapi.db.config import async_session
from webapi.db import models, schemas
from webapi.db.dals.user_dal import UserDAL
from webapi.setting import settings
from webapi.utils import security
reusable_oauth2 = OAuth2PasswordBearer(
tokenUrl=f'/api/admin/login/access_token/'
)
class DALGetter:
def __init__(self, dal_cls):
self.dal_cls = dal_cls
async def __call__(self):
async with async_session() as session:
async with session.begin():
yield self.dal_cls(session)
async def get_current_user(
dal: UserDAL = Depends(DALGetter(UserDAL)), token: str = Depends(reusable_oauth2)
) -> models.User:
credentials_exception = HTTPException(
status_code=status.HTTP_401_UNAUTHORIZED,
detail="Could not validate credentials",
headers={"WWW-Authenticate": "Bearer"},
)
try:
payload = jwt.decode(
token, settings.SECRET_KEY, algorithms=[security.ALGORITHM]
)
token_data = schemas.token.TokenPayload(**payload)
except (jwt.JWTError, ValidationError):
raise credentials_exception
user = await dal.get(id=token_data.sub)
if user is None:
raise credentials_exception
return user
| 30
| 89
| 0.711348
|
from fastapi import Depends, HTTPException, status, Header
from fastapi.security import OAuth2PasswordBearer
from pydantic import ValidationError
from jose import jwt
from webapi.db.config import async_session
from webapi.db import models, schemas
from webapi.db.dals.user_dal import UserDAL
from webapi.setting import settings
from webapi.utils import security
reusable_oauth2 = OAuth2PasswordBearer(
tokenUrl=f'/api/admin/login/access_token/'
)
class DALGetter:
def __init__(self, dal_cls):
self.dal_cls = dal_cls
async def __call__(self):
async with async_session() as session:
async with session.begin():
yield self.dal_cls(session)
async def get_current_user(
dal: UserDAL = Depends(DALGetter(UserDAL)), token: str = Depends(reusable_oauth2)
) -> models.User:
credentials_exception = HTTPException(
status_code=status.HTTP_401_UNAUTHORIZED,
detail="Could not validate credentials",
headers={"WWW-Authenticate": "Bearer"},
)
try:
payload = jwt.decode(
token, settings.SECRET_KEY, algorithms=[security.ALGORITHM]
)
token_data = schemas.token.TokenPayload(**payload)
except (jwt.JWTError, ValidationError):
raise credentials_exception
user = await dal.get(id=token_data.sub)
if user is None:
raise credentials_exception
return user
| true
| true
|
f70e12e552ce2ce286b2c0216cc508629fb98b15
| 700
|
py
|
Python
|
modules/spaghetti/modules/fingerprints/waf/binarysec.py
|
GDGSNF/PXXTF
|
b1e081348c1e993c61213f3fd5f960894ce91d01
|
[
"MIT"
] | 16
|
2020-10-03T22:01:46.000Z
|
2021-08-18T16:58:56.000Z
|
modules/spaghetti/modules/fingerprints/waf/binarysec.py
|
GDGSNF/PXXTF
|
b1e081348c1e993c61213f3fd5f960894ce91d01
|
[
"MIT"
] | 1
|
2021-10-18T00:13:27.000Z
|
2021-10-18T00:13:31.000Z
|
modules/spaghetti/modules/fingerprints/waf/binarysec.py
|
yezz123/PXXTF
|
b1e081348c1e993c61213f3fd5f960894ce91d01
|
[
"MIT"
] | 5
|
2020-10-16T11:07:45.000Z
|
2021-05-19T23:49:06.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Spaghetti: Web Application Security Scanner
#
# @url: https://github.com/m4ll0k/Spaghetti
# @author: Momo Outaadi (M4ll0k)
# @license: See the file 'doc/LICENSE'
import re
class Binarysec():
@staticmethod
def Run(headers):
_ = False
try:
for item in list(headers.items()):
_ = re.search(r'BinarySec',item[1],re.I) is not None
_ |= re.search(r'x-binarysec-[via|nocahe]',item[0],re.I) is not None
if _:
return "BinarySEC Web Application Firewall (BinarySEC)"
break
except Exception as ERROR:
print(ERROR)
| 29.166667
| 84
| 0.562857
|
import re
class Binarysec():
@staticmethod
def Run(headers):
_ = False
try:
for item in list(headers.items()):
_ = re.search(r'BinarySec',item[1],re.I) is not None
_ |= re.search(r'x-binarysec-[via|nocahe]',item[0],re.I) is not None
if _:
return "BinarySEC Web Application Firewall (BinarySEC)"
break
except Exception as ERROR:
print(ERROR)
| true
| true
|
f70e13d2cf53fe00db7193010563cbf1f5d8167b
| 870
|
py
|
Python
|
Game/buzzer.py
|
tjddus0403/osscap2020
|
642691e8e30bbf14ec6acd177da1ad58456c3a4c
|
[
"Apache-2.0"
] | 1
|
2020-10-07T12:49:33.000Z
|
2020-10-07T12:49:33.000Z
|
Game/buzzer.py
|
tjddus0403/osscap2020
|
642691e8e30bbf14ec6acd177da1ad58456c3a4c
|
[
"Apache-2.0"
] | 9
|
2020-10-09T08:58:09.000Z
|
2020-11-30T12:21:14.000Z
|
Game/buzzer.py
|
tjddus0403/osscap2020
|
642691e8e30bbf14ec6acd177da1ad58456c3a4c
|
[
"Apache-2.0"
] | 3
|
2020-10-07T12:49:35.000Z
|
2020-10-11T12:53:39.000Z
|
import RPi.GPIO as GPIO
import time
#0 #1 #2 #3 #4 #5 #6 #7 #8 #9 #10 #11 #12 #13
#list=[261.6256|,293.6648|,329.6276|,349.2282|,391.9954|,440|,493.8833|,523.2511|,587.3295|,659.2551|,698.4565|,783.9909|,880|,987.7666]
#num=[2,4,5,8,8,7,6,5,6,4,6,8,11,11,11,12,10,9,10,5,]
plane=[330, 294, 261, 294, 330, 330, 330, 294, 294, 294,
330, 392, 392]
buzzer_pin=17
GPIO.setwarnings(False)
GPIO.setmode(GPIO.BCM)
GPIO.setup(buzzer_pin,GPIO.OUT)
try:
pwm=GPIO.PWM(buzzer_pin,100);
pwm.start(100)
pwm.ChangeDutyCycle(90)
for i in plane:
pwm.ChangeFrequency(i)
time.sleep(0.5)
#for i in range(len(num)):
# pwm.ChangeFrequency(list[num[i]])
# time.sleep(1)
except KeyboardInterrupt:
pwm.stop()
GPIO.cleanup()
| 33.461538
| 137
| 0.554023
|
import RPi.GPIO as GPIO
import time
| true
| true
|
f70e14360cedd17b4c21de3c4422f52574d413fc
| 1,610
|
py
|
Python
|
samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_featurestore_service_delete_feature_sync.py
|
sakagarwal/python-aiplatform
|
62b4a1ea589235910c6e87f027899a29bf1bacb1
|
[
"Apache-2.0"
] | 1
|
2022-03-30T05:23:29.000Z
|
2022-03-30T05:23:29.000Z
|
samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_featurestore_service_delete_feature_sync.py
|
sakagarwal/python-aiplatform
|
62b4a1ea589235910c6e87f027899a29bf1bacb1
|
[
"Apache-2.0"
] | null | null | null |
samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_featurestore_service_delete_feature_sync.py
|
sakagarwal/python-aiplatform
|
62b4a1ea589235910c6e87f027899a29bf1bacb1
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Generated code. DO NOT EDIT!
#
# Snippet for DeleteFeature
# NOTE: This snippet has been automatically generated for illustrative purposes only.
# It may require modifications to work in your environment.
# To install the latest published package dependency, execute the following:
# python3 -m pip install google-cloud-aiplatform
# [START aiplatform_generated_aiplatform_v1beta1_FeaturestoreService_DeleteFeature_sync]
from google.cloud import aiplatform_v1beta1
def sample_delete_feature():
# Create a client
client = aiplatform_v1beta1.FeaturestoreServiceClient()
# Initialize request argument(s)
request = aiplatform_v1beta1.DeleteFeatureRequest(
name="name_value",
)
# Make the request
operation = client.delete_feature(request=request)
print("Waiting for operation to complete...")
response = operation.result()
# Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1beta1_FeaturestoreService_DeleteFeature_sync]
| 32.2
| 88
| 0.765217
|
from google.cloud import aiplatform_v1beta1
def sample_delete_feature():
client = aiplatform_v1beta1.FeaturestoreServiceClient()
request = aiplatform_v1beta1.DeleteFeatureRequest(
name="name_value",
)
operation = client.delete_feature(request=request)
print("Waiting for operation to complete...")
response = operation.result()
print(response)
| true
| true
|
f70e15283262a46e09c68cb2d40a91404d0eed3e
| 4,016
|
py
|
Python
|
request-management-api/request_api/auth.py
|
bcgov/foi-flow
|
7f9897b3aad4ba91fbc8edcb8f526906efb490df
|
[
"Apache-2.0"
] | null | null | null |
request-management-api/request_api/auth.py
|
bcgov/foi-flow
|
7f9897b3aad4ba91fbc8edcb8f526906efb490df
|
[
"Apache-2.0"
] | 1,579
|
2021-04-14T18:27:45.000Z
|
2022-03-31T23:49:42.000Z
|
request-management-api/request_api/auth.py
|
bcgov/foi-flow
|
7f9897b3aad4ba91fbc8edcb8f526906efb490df
|
[
"Apache-2.0"
] | 1
|
2022-03-01T20:17:47.000Z
|
2022-03-01T20:17:47.000Z
|
# Copyright © 2021 Province of British Columbia
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Bring in the common JWT Manager."""
from functools import wraps
from http import HTTPStatus
from flask import g, request
from flask_jwt_oidc import JwtManager
from jose import jwt as josejwt
jwt = (
JwtManager()
) # pylint: disable=invalid-name; lower case name as used by convention in most Flask apps
class Auth:
"""Extending JwtManager to include additional functionalities."""
@classmethod
def require(cls, f):
"""Validate the Bearer Token."""
@jwt.requires_auth
@wraps(f)
def decorated(*args, **kwargs):
g.authorization_header = request.headers.get("Authorization", None)
g.token_info = g.jwt_oidc_token_info
return f(*args, **kwargs)
return decorated
@classmethod
def ismemberofgroups(cls, groups):
"""Check that at least one of the realm groups are in the token.
Args:
groups [str,]: Comma separated list of valid roles
"""
def decorated(f):
# Token verification is commented here with an expectation to use this decorator in conjuction with require.
#@Auth.require
@wraps(f)
def wrapper(*args, **kwargs):
_groups = groups.split(',')
token = jwt.get_token_auth_header()
unverified_claims = josejwt.get_unverified_claims(token)
usergroups = unverified_claims['groups']
usergroups = [usergroup.replace('/','',1) if usergroup.startswith('/') else usergroup for usergroup in usergroups]
exists = False
for group in _groups:
if group in usergroups:
exists = True
retval = "Unauthorized" , 401
if exists == True:
return f(*args, **kwargs)
return retval
return wrapper
return decorated
auth = (
Auth()
)
class AuthHelper:
@classmethod
def getuserid(cls):
token = request.headers.get("Authorization", None)
unverified_claims = josejwt.get_unverified_claims(token.partition("Bearer")[2].strip())
return unverified_claims['preferred_username']
@classmethod
def getusername(cls):
token = request.headers.get("Authorization", None)
unverified_claims = josejwt.get_unverified_claims(token.partition("Bearer")[2].strip())
return unverified_claims['name']
@classmethod
def isministrymember(cls):
token = request.headers.get("Authorization", None)
unverified_claims = josejwt.get_unverified_claims(token.partition("Bearer")[2].strip())
usergroups = unverified_claims['groups']
usergroups = [usergroup.replace('/','',1) if usergroup.startswith('/') else usergroup for usergroup in usergroups]
for group in usergroups:
if group.endswith("Ministry Team"):
return True
return False
@classmethod
def getusergroups(cls):
token = request.headers.get("Authorization", None)
unverified_claims = josejwt.get_unverified_claims(token.partition("Bearer")[2].strip())
usergroups = unverified_claims['groups']
usergroups = [usergroup.replace('/','',1) if usergroup.startswith('/') else usergroup for usergroup in usergroups]
return usergroups
| 36.844037
| 130
| 0.638695
|
from functools import wraps
from http import HTTPStatus
from flask import g, request
from flask_jwt_oidc import JwtManager
from jose import jwt as josejwt
jwt = (
JwtManager()
)
class Auth:
@classmethod
def require(cls, f):
@jwt.requires_auth
@wraps(f)
def decorated(*args, **kwargs):
g.authorization_header = request.headers.get("Authorization", None)
g.token_info = g.jwt_oidc_token_info
return f(*args, **kwargs)
return decorated
@classmethod
def ismemberofgroups(cls, groups):
def decorated(f):
@wraps(f)
def wrapper(*args, **kwargs):
_groups = groups.split(',')
token = jwt.get_token_auth_header()
unverified_claims = josejwt.get_unverified_claims(token)
usergroups = unverified_claims['groups']
usergroups = [usergroup.replace('/','',1) if usergroup.startswith('/') else usergroup for usergroup in usergroups]
exists = False
for group in _groups:
if group in usergroups:
exists = True
retval = "Unauthorized" , 401
if exists == True:
return f(*args, **kwargs)
return retval
return wrapper
return decorated
auth = (
Auth()
)
class AuthHelper:
@classmethod
def getuserid(cls):
token = request.headers.get("Authorization", None)
unverified_claims = josejwt.get_unverified_claims(token.partition("Bearer")[2].strip())
return unverified_claims['preferred_username']
@classmethod
def getusername(cls):
token = request.headers.get("Authorization", None)
unverified_claims = josejwt.get_unverified_claims(token.partition("Bearer")[2].strip())
return unverified_claims['name']
@classmethod
def isministrymember(cls):
token = request.headers.get("Authorization", None)
unverified_claims = josejwt.get_unverified_claims(token.partition("Bearer")[2].strip())
usergroups = unverified_claims['groups']
usergroups = [usergroup.replace('/','',1) if usergroup.startswith('/') else usergroup for usergroup in usergroups]
for group in usergroups:
if group.endswith("Ministry Team"):
return True
return False
@classmethod
def getusergroups(cls):
token = request.headers.get("Authorization", None)
unverified_claims = josejwt.get_unverified_claims(token.partition("Bearer")[2].strip())
usergroups = unverified_claims['groups']
usergroups = [usergroup.replace('/','',1) if usergroup.startswith('/') else usergroup for usergroup in usergroups]
return usergroups
| true
| true
|
f70e1935ab2f3c77a77c5a5dae326c3b14cd1e1d
| 953
|
py
|
Python
|
python/src/weichat/test/catch_message.py
|
weiwei02/Technical--Documentation
|
d53d702b17cbeb9e4940764c6e4a4277382ec0cf
|
[
"Apache-2.0"
] | 2
|
2017-06-25T13:30:40.000Z
|
2017-09-18T16:50:40.000Z
|
python/src/weichat/test/catch_message.py
|
weiwei02/Technical--Documentation
|
d53d702b17cbeb9e4940764c6e4a4277382ec0cf
|
[
"Apache-2.0"
] | null | null | null |
python/src/weichat/test/catch_message.py
|
weiwei02/Technical--Documentation
|
d53d702b17cbeb9e4940764c6e4a4277382ec0cf
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
"""
1. 实现微信消息的抓取
:author Wang Weiwei <email>weiwei02@vip.qq.com / weiwei.wang@100credit.com</email>
:sine 2017/8/11
:version 1.0
"""
import itchat,time
import queue
import _thread
XIAOBING_ID = 'xiaoice-ms'
msgQueue = queue.Queue(maxsize=100)
@itchat.msg_register(itchat.content.TEXT, isMpChat=True)
def print_content(msg):
if msg["FromUserName"] == XIAOBING_ID:
msgQueue.put(msg["Text"])
print("公众号消息", msg["Text"])
@itchat.msg_register(itchat.content.TEXT, isFriendChat=True)
def print_contents(msg):
print(msg)
itchat.send_msg(msg["Text"], toUserName="@3c0f48b3cec6e9d90fe03a8a0edb78eb")
return msgQueue.get()
itchat.auto_login(hotReload=True)
itchat.start_receiving()
# mps = itchat.get_mps()
#
# a = itchat.send_msg("你是谁", toUserName="@3c0f48b3cec6e9d90fe03a8a0edb78eb")
#
# message = itchat.get_msg()
# print("回复信息: ", message)
_thread.start_new_thread(itchat.run, ())
| 22.690476
| 87
| 0.713536
|
import itchat,time
import queue
import _thread
XIAOBING_ID = 'xiaoice-ms'
msgQueue = queue.Queue(maxsize=100)
@itchat.msg_register(itchat.content.TEXT, isMpChat=True)
def print_content(msg):
if msg["FromUserName"] == XIAOBING_ID:
msgQueue.put(msg["Text"])
print("公众号消息", msg["Text"])
@itchat.msg_register(itchat.content.TEXT, isFriendChat=True)
def print_contents(msg):
print(msg)
itchat.send_msg(msg["Text"], toUserName="@3c0f48b3cec6e9d90fe03a8a0edb78eb")
return msgQueue.get()
itchat.auto_login(hotReload=True)
itchat.start_receiving()
_thread.start_new_thread(itchat.run, ())
| true
| true
|
f70e1c7958eb5a1da8353fd4c92b1dd3852bdb9a
| 7,537
|
py
|
Python
|
xarray/tests/test_distributed.py
|
snowman2/xarray
|
d47cf0c850cb70429373782b3c1e0329d14fd05a
|
[
"Apache-2.0"
] | null | null | null |
xarray/tests/test_distributed.py
|
snowman2/xarray
|
d47cf0c850cb70429373782b3c1e0329d14fd05a
|
[
"Apache-2.0"
] | null | null | null |
xarray/tests/test_distributed.py
|
snowman2/xarray
|
d47cf0c850cb70429373782b3c1e0329d14fd05a
|
[
"Apache-2.0"
] | null | null | null |
""" isort:skip_file """
import pickle
import pytest
dask = pytest.importorskip("dask") # isort:skip
distributed = pytest.importorskip("distributed") # isort:skip
from dask.distributed import Client, Lock
from distributed.utils_test import cluster, gen_cluster
from distributed.utils_test import loop
from distributed.client import futures_of
import xarray as xr
from xarray.backends.locks import HDF5_LOCK, CombinedLock
from xarray.tests.test_backends import (
ON_WINDOWS,
create_tmp_file,
create_tmp_geotiff,
open_example_dataset,
)
from xarray.tests.test_dataset import create_test_data
from . import (
assert_allclose,
has_h5netcdf,
has_netCDF4,
requires_rasterio,
has_scipy,
requires_zarr,
requires_cfgrib,
)
# this is to stop isort throwing errors. May have been easier to just use
# `isort:skip` in retrospect
da = pytest.importorskip("dask.array")
loop = loop # loop is an imported fixture, which flake8 has issues ack-ing
@pytest.fixture
def tmp_netcdf_filename(tmpdir):
return str(tmpdir.join("testfile.nc"))
ENGINES = []
if has_scipy:
ENGINES.append("scipy")
if has_netCDF4:
ENGINES.append("netcdf4")
if has_h5netcdf:
ENGINES.append("h5netcdf")
NC_FORMATS = {
"netcdf4": [
"NETCDF3_CLASSIC",
"NETCDF3_64BIT_OFFSET",
"NETCDF3_64BIT_DATA",
"NETCDF4_CLASSIC",
"NETCDF4",
],
"scipy": ["NETCDF3_CLASSIC", "NETCDF3_64BIT"],
"h5netcdf": ["NETCDF4"],
}
ENGINES_AND_FORMATS = [
("netcdf4", "NETCDF3_CLASSIC"),
("netcdf4", "NETCDF4_CLASSIC"),
("netcdf4", "NETCDF4"),
("h5netcdf", "NETCDF4"),
("scipy", "NETCDF3_64BIT"),
]
@pytest.mark.parametrize("engine,nc_format", ENGINES_AND_FORMATS)
def test_dask_distributed_netcdf_roundtrip(
loop, tmp_netcdf_filename, engine, nc_format
):
if engine not in ENGINES:
pytest.skip("engine not available")
chunks = {"dim1": 4, "dim2": 3, "dim3": 6}
with cluster() as (s, [a, b]):
with Client(s["address"], loop=loop):
original = create_test_data().chunk(chunks)
if engine == "scipy":
with pytest.raises(NotImplementedError):
original.to_netcdf(
tmp_netcdf_filename, engine=engine, format=nc_format
)
return
original.to_netcdf(tmp_netcdf_filename, engine=engine, format=nc_format)
with xr.open_dataset(
tmp_netcdf_filename, chunks=chunks, engine=engine
) as restored:
assert isinstance(restored.var1.data, da.Array)
computed = restored.compute()
assert_allclose(original, computed)
@pytest.mark.parametrize("engine,nc_format", ENGINES_AND_FORMATS)
def test_dask_distributed_read_netcdf_integration_test(
loop, tmp_netcdf_filename, engine, nc_format
):
if engine not in ENGINES:
pytest.skip("engine not available")
chunks = {"dim1": 4, "dim2": 3, "dim3": 6}
with cluster() as (s, [a, b]):
with Client(s["address"], loop=loop):
original = create_test_data()
original.to_netcdf(tmp_netcdf_filename, engine=engine, format=nc_format)
with xr.open_dataset(
tmp_netcdf_filename, chunks=chunks, engine=engine
) as restored:
assert isinstance(restored.var1.data, da.Array)
computed = restored.compute()
assert_allclose(original, computed)
@requires_zarr
@pytest.mark.parametrize("consolidated", [True, False])
@pytest.mark.parametrize("compute", [True, False])
def test_dask_distributed_zarr_integration_test(loop, consolidated, compute) -> None:
if consolidated:
pytest.importorskip("zarr", minversion="2.2.1.dev2")
write_kwargs = {"consolidated": True}
read_kwargs = {"backend_kwargs": {"consolidated": True}}
else:
write_kwargs = read_kwargs = {} # type: ignore
chunks = {"dim1": 4, "dim2": 3, "dim3": 5}
with cluster() as (s, [a, b]):
with Client(s["address"], loop=loop):
original = create_test_data().chunk(chunks)
with create_tmp_file(
allow_cleanup_failure=ON_WINDOWS, suffix=".zarrc"
) as filename:
maybe_futures = original.to_zarr(
filename, compute=compute, **write_kwargs
)
if not compute:
maybe_futures.compute()
with xr.open_dataset(
filename, chunks="auto", engine="zarr", **read_kwargs
) as restored:
assert isinstance(restored.var1.data, da.Array)
computed = restored.compute()
assert_allclose(original, computed)
@requires_rasterio
@pytest.mark.filterwarnings("ignore:deallocating CachingFileManager")
def test_dask_distributed_rasterio_integration_test(loop) -> None:
with create_tmp_geotiff() as (tmp_file, expected):
with cluster() as (s, [a, b]):
with pytest.warns(DeprecationWarning), Client(s["address"], loop=loop):
da_tiff = xr.open_rasterio(tmp_file, chunks={"band": 1})
assert isinstance(da_tiff.data, da.Array)
actual = da_tiff.compute()
assert_allclose(actual, expected)
@requires_cfgrib
@pytest.mark.filterwarnings("ignore:deallocating CachingFileManager")
def test_dask_distributed_cfgrib_integration_test(loop) -> None:
with cluster() as (s, [a, b]):
with Client(s["address"], loop=loop):
with open_example_dataset(
"example.grib", engine="cfgrib", chunks={"time": 1}
) as ds:
with open_example_dataset("example.grib", engine="cfgrib") as expected:
assert isinstance(ds["t"].data, da.Array)
actual = ds.compute()
assert_allclose(actual, expected)
@pytest.mark.xfail(reason="https://github.com/pydata/xarray/pull/6211")
@gen_cluster(client=True)
async def test_async(c, s, a, b) -> None:
x = create_test_data()
assert not dask.is_dask_collection(x)
y = x.chunk({"dim2": 4}) + 10
assert dask.is_dask_collection(y)
assert dask.is_dask_collection(y.var1)
assert dask.is_dask_collection(y.var2)
z = y.persist()
assert str(z)
assert dask.is_dask_collection(z)
assert dask.is_dask_collection(z.var1)
assert dask.is_dask_collection(z.var2)
assert len(y.__dask_graph__()) > len(z.__dask_graph__())
assert not futures_of(y)
assert futures_of(z)
future = c.compute(z)
w = await future
assert not dask.is_dask_collection(w)
assert_allclose(x + 10, w)
assert s.tasks
def test_hdf5_lock() -> None:
assert isinstance(HDF5_LOCK, dask.utils.SerializableLock)
@pytest.mark.xfail(reason="https://github.com/pydata/xarray/pull/6211")
@gen_cluster(client=True)
async def test_serializable_locks(c, s, a, b) -> None:
def f(x, lock=None):
with lock:
return x + 1
# note, the creation of Lock needs to be done inside a cluster
for lock in [
HDF5_LOCK,
Lock(),
Lock("filename.nc"),
CombinedLock([HDF5_LOCK]),
CombinedLock([HDF5_LOCK, Lock("filename.nc")]),
]:
futures = c.map(f, list(range(10)), lock=lock)
await c.gather(futures)
lock2 = pickle.loads(pickle.dumps(lock))
assert type(lock) == type(lock2)
| 31.144628
| 87
| 0.63646
|
import pickle
import pytest
dask = pytest.importorskip("dask")
distributed = pytest.importorskip("distributed")
from dask.distributed import Client, Lock
from distributed.utils_test import cluster, gen_cluster
from distributed.utils_test import loop
from distributed.client import futures_of
import xarray as xr
from xarray.backends.locks import HDF5_LOCK, CombinedLock
from xarray.tests.test_backends import (
ON_WINDOWS,
create_tmp_file,
create_tmp_geotiff,
open_example_dataset,
)
from xarray.tests.test_dataset import create_test_data
from . import (
assert_allclose,
has_h5netcdf,
has_netCDF4,
requires_rasterio,
has_scipy,
requires_zarr,
requires_cfgrib,
)
da = pytest.importorskip("dask.array")
loop = loop
@pytest.fixture
def tmp_netcdf_filename(tmpdir):
return str(tmpdir.join("testfile.nc"))
ENGINES = []
if has_scipy:
ENGINES.append("scipy")
if has_netCDF4:
ENGINES.append("netcdf4")
if has_h5netcdf:
ENGINES.append("h5netcdf")
NC_FORMATS = {
"netcdf4": [
"NETCDF3_CLASSIC",
"NETCDF3_64BIT_OFFSET",
"NETCDF3_64BIT_DATA",
"NETCDF4_CLASSIC",
"NETCDF4",
],
"scipy": ["NETCDF3_CLASSIC", "NETCDF3_64BIT"],
"h5netcdf": ["NETCDF4"],
}
ENGINES_AND_FORMATS = [
("netcdf4", "NETCDF3_CLASSIC"),
("netcdf4", "NETCDF4_CLASSIC"),
("netcdf4", "NETCDF4"),
("h5netcdf", "NETCDF4"),
("scipy", "NETCDF3_64BIT"),
]
@pytest.mark.parametrize("engine,nc_format", ENGINES_AND_FORMATS)
def test_dask_distributed_netcdf_roundtrip(
loop, tmp_netcdf_filename, engine, nc_format
):
if engine not in ENGINES:
pytest.skip("engine not available")
chunks = {"dim1": 4, "dim2": 3, "dim3": 6}
with cluster() as (s, [a, b]):
with Client(s["address"], loop=loop):
original = create_test_data().chunk(chunks)
if engine == "scipy":
with pytest.raises(NotImplementedError):
original.to_netcdf(
tmp_netcdf_filename, engine=engine, format=nc_format
)
return
original.to_netcdf(tmp_netcdf_filename, engine=engine, format=nc_format)
with xr.open_dataset(
tmp_netcdf_filename, chunks=chunks, engine=engine
) as restored:
assert isinstance(restored.var1.data, da.Array)
computed = restored.compute()
assert_allclose(original, computed)
@pytest.mark.parametrize("engine,nc_format", ENGINES_AND_FORMATS)
def test_dask_distributed_read_netcdf_integration_test(
loop, tmp_netcdf_filename, engine, nc_format
):
if engine not in ENGINES:
pytest.skip("engine not available")
chunks = {"dim1": 4, "dim2": 3, "dim3": 6}
with cluster() as (s, [a, b]):
with Client(s["address"], loop=loop):
original = create_test_data()
original.to_netcdf(tmp_netcdf_filename, engine=engine, format=nc_format)
with xr.open_dataset(
tmp_netcdf_filename, chunks=chunks, engine=engine
) as restored:
assert isinstance(restored.var1.data, da.Array)
computed = restored.compute()
assert_allclose(original, computed)
@requires_zarr
@pytest.mark.parametrize("consolidated", [True, False])
@pytest.mark.parametrize("compute", [True, False])
def test_dask_distributed_zarr_integration_test(loop, consolidated, compute) -> None:
if consolidated:
pytest.importorskip("zarr", minversion="2.2.1.dev2")
write_kwargs = {"consolidated": True}
read_kwargs = {"backend_kwargs": {"consolidated": True}}
else:
write_kwargs = read_kwargs = {}
chunks = {"dim1": 4, "dim2": 3, "dim3": 5}
with cluster() as (s, [a, b]):
with Client(s["address"], loop=loop):
original = create_test_data().chunk(chunks)
with create_tmp_file(
allow_cleanup_failure=ON_WINDOWS, suffix=".zarrc"
) as filename:
maybe_futures = original.to_zarr(
filename, compute=compute, **write_kwargs
)
if not compute:
maybe_futures.compute()
with xr.open_dataset(
filename, chunks="auto", engine="zarr", **read_kwargs
) as restored:
assert isinstance(restored.var1.data, da.Array)
computed = restored.compute()
assert_allclose(original, computed)
@requires_rasterio
@pytest.mark.filterwarnings("ignore:deallocating CachingFileManager")
def test_dask_distributed_rasterio_integration_test(loop) -> None:
with create_tmp_geotiff() as (tmp_file, expected):
with cluster() as (s, [a, b]):
with pytest.warns(DeprecationWarning), Client(s["address"], loop=loop):
da_tiff = xr.open_rasterio(tmp_file, chunks={"band": 1})
assert isinstance(da_tiff.data, da.Array)
actual = da_tiff.compute()
assert_allclose(actual, expected)
@requires_cfgrib
@pytest.mark.filterwarnings("ignore:deallocating CachingFileManager")
def test_dask_distributed_cfgrib_integration_test(loop) -> None:
with cluster() as (s, [a, b]):
with Client(s["address"], loop=loop):
with open_example_dataset(
"example.grib", engine="cfgrib", chunks={"time": 1}
) as ds:
with open_example_dataset("example.grib", engine="cfgrib") as expected:
assert isinstance(ds["t"].data, da.Array)
actual = ds.compute()
assert_allclose(actual, expected)
@pytest.mark.xfail(reason="https://github.com/pydata/xarray/pull/6211")
@gen_cluster(client=True)
async def test_async(c, s, a, b) -> None:
x = create_test_data()
assert not dask.is_dask_collection(x)
y = x.chunk({"dim2": 4}) + 10
assert dask.is_dask_collection(y)
assert dask.is_dask_collection(y.var1)
assert dask.is_dask_collection(y.var2)
z = y.persist()
assert str(z)
assert dask.is_dask_collection(z)
assert dask.is_dask_collection(z.var1)
assert dask.is_dask_collection(z.var2)
assert len(y.__dask_graph__()) > len(z.__dask_graph__())
assert not futures_of(y)
assert futures_of(z)
future = c.compute(z)
w = await future
assert not dask.is_dask_collection(w)
assert_allclose(x + 10, w)
assert s.tasks
def test_hdf5_lock() -> None:
assert isinstance(HDF5_LOCK, dask.utils.SerializableLock)
@pytest.mark.xfail(reason="https://github.com/pydata/xarray/pull/6211")
@gen_cluster(client=True)
async def test_serializable_locks(c, s, a, b) -> None:
def f(x, lock=None):
with lock:
return x + 1
for lock in [
HDF5_LOCK,
Lock(),
Lock("filename.nc"),
CombinedLock([HDF5_LOCK]),
CombinedLock([HDF5_LOCK, Lock("filename.nc")]),
]:
futures = c.map(f, list(range(10)), lock=lock)
await c.gather(futures)
lock2 = pickle.loads(pickle.dumps(lock))
assert type(lock) == type(lock2)
| true
| true
|
f70e1d37c5742828f7d32c361be3599316abc4b4
| 4,068
|
py
|
Python
|
mkrel.py
|
CDrummond/lms-volumecheck
|
d945705a5570656d9e33b65da026a19da2e6d596
|
[
"MIT"
] | 1
|
2020-09-06T16:03:11.000Z
|
2020-09-06T16:03:11.000Z
|
mkrel.py
|
CDrummond/lms-volumecheck
|
d945705a5570656d9e33b65da026a19da2e6d596
|
[
"MIT"
] | null | null | null |
mkrel.py
|
CDrummond/lms-volumecheck
|
d945705a5570656d9e33b65da026a19da2e6d596
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
#
# LMS-AutoPlay
#
# Copyright (c) 2020 Craig Drummond <craig.p.drummond@gmail.com>
# MIT license.
#
import hashlib
import os
import re
import requests
import shutil
import sys
REPO_XML = "repo.xml"
PLUGIN_NAME = "VolumeCheck"
PLUGIN_GIT_NAME = "lms-volumecheck"
def info(s):
print("INFO: %s" %s)
def error(s):
print("ERROR: %s" % s)
exit(-1)
def usage():
print("Usage: %s <major>.<minor>.<patch>" % sys.argv[0])
exit(-1)
def checkVersion(version):
try:
parts=version.split('.')
major=int(parts[0])
minor=int(parts[1])
patch=int(parts[2])
except:
error("Invalid version number")
def releaseUrl(version):
return "https://github.com/CDrummond/%s/releases/download/%s/%s-%s.zip" % (PLUGIN_GIT_NAME, version, PLUGIN_GIT_NAME, version)
def checkVersionExists(version):
url = releaseUrl(version)
info("Checking %s" % url)
request = requests.head(url)
if request.status_code == 200 or request.status_code == 302:
error("Version already exists")
def updateLine(line, startStr, endStr, updateStr):
start=line.find(startStr)
if start!=-1:
start+=len(startStr)
end=line.find(endStr, start)
if end!=-1:
return "%s%s%s" % (line[:start], updateStr, line[end:])
return None
def updateInstallXml(version):
lines=[]
updated=False
installXml = "%s/install.xml" % PLUGIN_NAME
info("Updating %s" % installXml)
with open(installXml, "r") as f:
lines=f.readlines()
for i in range(len(lines)):
updated = updateLine(lines[i], "<version>", "</version>", version)
if updated:
lines[i]=updated
updated=True
break
if not updated:
error("Failed to update version in %s" % installXml)
with open(installXml, "w") as f:
for line in lines:
f.write(line)
def createZip(version):
info("Creating ZIP")
zipFile="%s-%s" % (PLUGIN_GIT_NAME, version)
shutil.make_archive(zipFile, 'zip', PLUGIN_NAME)
zipFile+=".zip"
return zipFile
def getSha1Sum(zipFile):
info("Generating SHA1")
sha1 = hashlib.sha1()
with open(zipFile, 'rb') as f:
while True:
data = f.read(65535)
if not data:
break
sha1.update(data)
return sha1.hexdigest()
def updateRepoXml(repo, version, zipFile, sha1, pluginName=None):
lines=[]
updatedVersion=False
updatedUrl=False
updatedSha=False
info("Updating %s" % repo)
inSection = pluginName is None
with open(repo, "r") as f:
lines=f.readlines()
for i in range(len(lines)):
if pluginName is not None and '<plugin name="' in lines[i]:
inSection = pluginName in lines[i]
if inSection:
updated = updateLine(lines[i], 'version="', '"', version)
if updated:
lines[i]=updated
updatedVersion=True
updated = updateLine(lines[i], '<url>', '</url>', releaseUrl(version))
if updated:
lines[i]=updated
updatedUrl=True
updated = updateLine(lines[i], '<sha>', '</sha>', sha1)
if updated:
lines[i]=updated
updatedSha=True
if updatedVersion and updatedUrl and updatedSha:
break
if not updatedVersion:
error("Failed to update version in %s" % repo)
if not updatedUrl:
error("Failed to url version in %s" % repo)
if not updatedSha:
error("Failed to sha version in %s" % repo)
with open(repo, "w") as f:
for line in lines:
f.write(line)
if 1==len(sys.argv):
usage()
version=sys.argv[1]
if version!="test":
checkVersion(version)
checkVersionExists(version)
updateInstallXml(version)
zipFile = createZip(version)
sha1 = getSha1Sum(zipFile)
if version!="test" and os.path.exists(REPO_XML):
updateRepoXml(REPO_XML, version, zipFile, sha1, PLUGIN_NAME)
| 25.111111
| 130
| 0.598328
|
import hashlib
import os
import re
import requests
import shutil
import sys
REPO_XML = "repo.xml"
PLUGIN_NAME = "VolumeCheck"
PLUGIN_GIT_NAME = "lms-volumecheck"
def info(s):
print("INFO: %s" %s)
def error(s):
print("ERROR: %s" % s)
exit(-1)
def usage():
print("Usage: %s <major>.<minor>.<patch>" % sys.argv[0])
exit(-1)
def checkVersion(version):
try:
parts=version.split('.')
major=int(parts[0])
minor=int(parts[1])
patch=int(parts[2])
except:
error("Invalid version number")
def releaseUrl(version):
return "https://github.com/CDrummond/%s/releases/download/%s/%s-%s.zip" % (PLUGIN_GIT_NAME, version, PLUGIN_GIT_NAME, version)
def checkVersionExists(version):
url = releaseUrl(version)
info("Checking %s" % url)
request = requests.head(url)
if request.status_code == 200 or request.status_code == 302:
error("Version already exists")
def updateLine(line, startStr, endStr, updateStr):
start=line.find(startStr)
if start!=-1:
start+=len(startStr)
end=line.find(endStr, start)
if end!=-1:
return "%s%s%s" % (line[:start], updateStr, line[end:])
return None
def updateInstallXml(version):
lines=[]
updated=False
installXml = "%s/install.xml" % PLUGIN_NAME
info("Updating %s" % installXml)
with open(installXml, "r") as f:
lines=f.readlines()
for i in range(len(lines)):
updated = updateLine(lines[i], "<version>", "</version>", version)
if updated:
lines[i]=updated
updated=True
break
if not updated:
error("Failed to update version in %s" % installXml)
with open(installXml, "w") as f:
for line in lines:
f.write(line)
def createZip(version):
info("Creating ZIP")
zipFile="%s-%s" % (PLUGIN_GIT_NAME, version)
shutil.make_archive(zipFile, 'zip', PLUGIN_NAME)
zipFile+=".zip"
return zipFile
def getSha1Sum(zipFile):
info("Generating SHA1")
sha1 = hashlib.sha1()
with open(zipFile, 'rb') as f:
while True:
data = f.read(65535)
if not data:
break
sha1.update(data)
return sha1.hexdigest()
def updateRepoXml(repo, version, zipFile, sha1, pluginName=None):
lines=[]
updatedVersion=False
updatedUrl=False
updatedSha=False
info("Updating %s" % repo)
inSection = pluginName is None
with open(repo, "r") as f:
lines=f.readlines()
for i in range(len(lines)):
if pluginName is not None and '<plugin name="' in lines[i]:
inSection = pluginName in lines[i]
if inSection:
updated = updateLine(lines[i], 'version="', '"', version)
if updated:
lines[i]=updated
updatedVersion=True
updated = updateLine(lines[i], '<url>', '</url>', releaseUrl(version))
if updated:
lines[i]=updated
updatedUrl=True
updated = updateLine(lines[i], '<sha>', '</sha>', sha1)
if updated:
lines[i]=updated
updatedSha=True
if updatedVersion and updatedUrl and updatedSha:
break
if not updatedVersion:
error("Failed to update version in %s" % repo)
if not updatedUrl:
error("Failed to url version in %s" % repo)
if not updatedSha:
error("Failed to sha version in %s" % repo)
with open(repo, "w") as f:
for line in lines:
f.write(line)
if 1==len(sys.argv):
usage()
version=sys.argv[1]
if version!="test":
checkVersion(version)
checkVersionExists(version)
updateInstallXml(version)
zipFile = createZip(version)
sha1 = getSha1Sum(zipFile)
if version!="test" and os.path.exists(REPO_XML):
updateRepoXml(REPO_XML, version, zipFile, sha1, PLUGIN_NAME)
| true
| true
|
f70e1d838b73f282995fc08cafd08c13943feeab
| 10,957
|
py
|
Python
|
conan_inquiry/transformers/github.py
|
jsdelivrbot/conan_inquiry-1
|
f218012b02db0fc2abf5e95e2be4a72113b7d820
|
[
"MIT"
] | 11
|
2017-11-14T17:21:51.000Z
|
2021-04-03T13:09:56.000Z
|
conan_inquiry/transformers/github.py
|
jsdelivrbot/conan_inquiry-1
|
f218012b02db0fc2abf5e95e2be4a72113b7d820
|
[
"MIT"
] | 7
|
2018-03-31T12:22:07.000Z
|
2019-12-02T02:25:04.000Z
|
conan_inquiry/transformers/github.py
|
jsdelivrbot/conan_inquiry-1
|
f218012b02db0fc2abf5e95e2be4a72113b7d820
|
[
"MIT"
] | 2
|
2018-07-30T06:09:34.000Z
|
2018-12-07T18:37:49.000Z
|
import json
import os
from base64 import b64decode
from datetime import timedelta
from threading import Semaphore
import github
from dotmap import DotMap
from github import GithubException
from conan_inquiry.transformers.base import BaseGithubTransformer
from conan_inquiry.util.general import render_readme
from conan_inquiry.util.travis import repo_has_travis
class GithubTransformer(BaseGithubTransformer):
"""
Populates empty urls based on the Github url, if given
"""
github_limit = Semaphore(value=15)
def transform(self, package):
if 'github' in package.urls:
with self.github_limit:
github_id = package.urls.github.replace('.git', '')
try:
self._set_repo(github_id)
v3data = self.cache.get(github_id, timedelta(days=2), 'github_api',
lambda: self._v3_requests(),
locked_getter=False)
num_contributors = v3data['num_contributors']
latest_commit = v3data['latest_commit']
clone_url = v3data['clone_url']
repo_owner = v3data['repo_owner']
repo_name = v3data['repo_name']
graph_request = '''
query Repo($owner:String!, $name:String!) {
repo: repository(owner: $owner, name: $name) {
owner {
login
... on Organization {
name
orgEmail: email
websiteUrl
}
... on User {
name
userEmail: email
websiteUrl
}
}
tree: object(expression: "HEAD:") {
... on Tree {
entries {
name
}
}
}
repositoryTopics(first: 20) {
totalCount
nodes {
topic {
name
}
}
}
forks {
totalCount
}
description
hasIssuesEnabled
hasWikiEnabled
homepageUrl
url
openIssues: issues(states: OPEN) {
totalCount
}
closedIssues: issues(states: CLOSED) {
totalCount
}
openPRs: pullRequests(states: OPEN) {
totalCount
}
closedPRs: pullRequests(states: CLOSED) {
totalCount
}
pushedAt
stargazers {
totalCount
}
watchers {
totalCount
}
}
rateLimit {
cost
remaining
}
}
'''
graph_data = self.cache.get(github_id, timedelta(days=2), 'github_graph',
lambda: self.github_graph.execute(
graph_request,
dict(owner=repo_owner,
name=repo_name)),
locked_getter=False)
graph = json.loads(graph_data)['data']
except GithubException:
return package
if graph['repo']['description'] != package.name:
self._set_unless_exists(package, 'description', graph['repo']['description'])
self._set_unless_exists(package.urls, 'website', graph['repo']['homepageUrl'])
self._set_unless_exists(package.urls, 'website', 'https://github.com/' + github_id)
self._set_unless_exists(package.urls, 'code', graph['repo']['url'])
if graph['repo']['hasIssuesEnabled']:
self._set_unless_exists(package.urls, 'issues', graph['repo']['url'] + '/issues')
if graph['repo']['hasWikiEnabled']:
# TODO: check if there is content in the wiki
self._set_unless_exists(package.urls, 'wiki', graph['repo']['url'] + '/wiki')
if repo_has_travis(github_id, self.http):
self._set_unless_exists(package.urls, 'travis',
'https://travis-ci.org/' + github_id)
self._set_unless_exists(package.urls, 'git', clone_url)
try:
def get_readme(repo, github):
readme = repo.get_readme()
rendered = render_readme(readme.path, readme.decoded_content.decode('utf-8'),
graph['repo']['url'],
lambda raw: github.render_markdown(raw, repo).decode('utf-8'))
return dict(url=readme.html_url, content=rendered)
readme = self.cache.get(github_id, timedelta(days=7), 'github_readme',
lambda: get_readme(self.repo, self.github),
locked_getter=False)
self._set_unless_exists(package.urls, 'readme', readme['url'])
self._set_unless_exists(package.files.readme, 'url', readme['url'])
self._set_unless_exists(package.files.readme, 'content', readme['content'])
except github.UnknownObjectException:
pass
for entry in graph['repo']['tree']['entries']:
if os.path.basename(entry['name']).lower() == 'license':
def get_file(repo, name):
f = repo.get_file_contents(name)
return dict(url=f.html_url, string=str(b64decode(f.content)))
file = self.cache.get(github_id + '->' + entry['name'], timedelta(days=28), 'github_file',
lambda: get_file(self.repo, entry['name']),
locked_getter=False)
self._set_unless_exists(package, 'license', file['url'])
self._set_unless_exists(package, '_license_data', file['string'])
break
if 'authors' not in package:
owner = graph['repo']['owner']
if 'userEmail' in owner:
# private repo
name = owner['name'] if owner['name'] is not None else owner['login']
author = DotMap(name=name,
github=owner['login'])
email = owner['userEmail']
website = owner['websiteUrl']
if email is not None:
author.email = email
if website is not None:
author.website = website
package.authors = [author]
else:
# organization repo
name = owner['name'] if owner['name'] is not None else owner['login']
author = DotMap(name=name,
github=owner['login'])
email = owner['orgEmail']
website = owner['websiteUrl']
if email is not None:
author.email = email
if website is not None:
author.website = website
package.authors = [author]
self._set_unless_exists(package.stats, 'github_prs', graph['repo']['openPRs']['totalCount'])
self._set_unless_exists(package.stats, 'github_issues', graph['repo']['openIssues']['totalCount'])
self._set_unless_exists(package.stats, 'github_stars', graph['repo']['stargazers']['totalCount'])
self._set_unless_exists(package.stats, 'github_watchers', graph['repo']['watchers']['totalCount'])
self._set_unless_exists(package.stats, 'github_forks', graph['repo']['forks']['totalCount'])
if num_contributors is not None:
self._set_unless_exists(package.stats, 'github_commits', num_contributors)
self._set_unless_exists(package.stats, 'github_latest_commit', latest_commit)
if 'keywords' not in package:
package.keywords = []
package.keywords.extend([r['topic']['name'] for r in graph['repo']['repositoryTopics']['nodes']])
for recipie in package.recipies:
if 'github' in recipie.urls:
self._set_unless_exists(recipie.urls, 'website', 'https://github.com/' + recipie.urls.github)
self._set_unless_exists(recipie.urls, 'issues', 'https://github.com/' + recipie.urls.github + '/issues')
return package
def _v3_requests(self):
# TODO: the number of commits does not seem to be correct and sometimes fetching doesn't work at all
contributors = self.repo.get_stats_contributors()
if contributors is not None:
num_contributors = sum([c.total for c in contributors])
else:
num_contributors = None
commits = self.repo.get_commits()
latest_commit = commits[0].commit.committer.date.isoformat()
clone_url = self.repo.clone_url
repo_owner = self.repo.owner.login
repo_name = self.repo.name
return dict(num_contributors=num_contributors, latest_commit=latest_commit,
clone_url=clone_url, repo_owner=repo_owner, repo_name=repo_name)
| 49.355856
| 120
| 0.446381
|
import json
import os
from base64 import b64decode
from datetime import timedelta
from threading import Semaphore
import github
from dotmap import DotMap
from github import GithubException
from conan_inquiry.transformers.base import BaseGithubTransformer
from conan_inquiry.util.general import render_readme
from conan_inquiry.util.travis import repo_has_travis
class GithubTransformer(BaseGithubTransformer):
github_limit = Semaphore(value=15)
def transform(self, package):
if 'github' in package.urls:
with self.github_limit:
github_id = package.urls.github.replace('.git', '')
try:
self._set_repo(github_id)
v3data = self.cache.get(github_id, timedelta(days=2), 'github_api',
lambda: self._v3_requests(),
locked_getter=False)
num_contributors = v3data['num_contributors']
latest_commit = v3data['latest_commit']
clone_url = v3data['clone_url']
repo_owner = v3data['repo_owner']
repo_name = v3data['repo_name']
graph_request = '''
query Repo($owner:String!, $name:String!) {
repo: repository(owner: $owner, name: $name) {
owner {
login
... on Organization {
name
orgEmail: email
websiteUrl
}
... on User {
name
userEmail: email
websiteUrl
}
}
tree: object(expression: "HEAD:") {
... on Tree {
entries {
name
}
}
}
repositoryTopics(first: 20) {
totalCount
nodes {
topic {
name
}
}
}
forks {
totalCount
}
description
hasIssuesEnabled
hasWikiEnabled
homepageUrl
url
openIssues: issues(states: OPEN) {
totalCount
}
closedIssues: issues(states: CLOSED) {
totalCount
}
openPRs: pullRequests(states: OPEN) {
totalCount
}
closedPRs: pullRequests(states: CLOSED) {
totalCount
}
pushedAt
stargazers {
totalCount
}
watchers {
totalCount
}
}
rateLimit {
cost
remaining
}
}
'''
graph_data = self.cache.get(github_id, timedelta(days=2), 'github_graph',
lambda: self.github_graph.execute(
graph_request,
dict(owner=repo_owner,
name=repo_name)),
locked_getter=False)
graph = json.loads(graph_data)['data']
except GithubException:
return package
if graph['repo']['description'] != package.name:
self._set_unless_exists(package, 'description', graph['repo']['description'])
self._set_unless_exists(package.urls, 'website', graph['repo']['homepageUrl'])
self._set_unless_exists(package.urls, 'website', 'https://github.com/' + github_id)
self._set_unless_exists(package.urls, 'code', graph['repo']['url'])
if graph['repo']['hasIssuesEnabled']:
self._set_unless_exists(package.urls, 'issues', graph['repo']['url'] + '/issues')
if graph['repo']['hasWikiEnabled']:
self._set_unless_exists(package.urls, 'wiki', graph['repo']['url'] + '/wiki')
if repo_has_travis(github_id, self.http):
self._set_unless_exists(package.urls, 'travis',
'https://travis-ci.org/' + github_id)
self._set_unless_exists(package.urls, 'git', clone_url)
try:
def get_readme(repo, github):
readme = repo.get_readme()
rendered = render_readme(readme.path, readme.decoded_content.decode('utf-8'),
graph['repo']['url'],
lambda raw: github.render_markdown(raw, repo).decode('utf-8'))
return dict(url=readme.html_url, content=rendered)
readme = self.cache.get(github_id, timedelta(days=7), 'github_readme',
lambda: get_readme(self.repo, self.github),
locked_getter=False)
self._set_unless_exists(package.urls, 'readme', readme['url'])
self._set_unless_exists(package.files.readme, 'url', readme['url'])
self._set_unless_exists(package.files.readme, 'content', readme['content'])
except github.UnknownObjectException:
pass
for entry in graph['repo']['tree']['entries']:
if os.path.basename(entry['name']).lower() == 'license':
def get_file(repo, name):
f = repo.get_file_contents(name)
return dict(url=f.html_url, string=str(b64decode(f.content)))
file = self.cache.get(github_id + '->' + entry['name'], timedelta(days=28), 'github_file',
lambda: get_file(self.repo, entry['name']),
locked_getter=False)
self._set_unless_exists(package, 'license', file['url'])
self._set_unless_exists(package, '_license_data', file['string'])
break
if 'authors' not in package:
owner = graph['repo']['owner']
if 'userEmail' in owner:
name = owner['name'] if owner['name'] is not None else owner['login']
author = DotMap(name=name,
github=owner['login'])
email = owner['userEmail']
website = owner['websiteUrl']
if email is not None:
author.email = email
if website is not None:
author.website = website
package.authors = [author]
else:
name = owner['name'] if owner['name'] is not None else owner['login']
author = DotMap(name=name,
github=owner['login'])
email = owner['orgEmail']
website = owner['websiteUrl']
if email is not None:
author.email = email
if website is not None:
author.website = website
package.authors = [author]
self._set_unless_exists(package.stats, 'github_prs', graph['repo']['openPRs']['totalCount'])
self._set_unless_exists(package.stats, 'github_issues', graph['repo']['openIssues']['totalCount'])
self._set_unless_exists(package.stats, 'github_stars', graph['repo']['stargazers']['totalCount'])
self._set_unless_exists(package.stats, 'github_watchers', graph['repo']['watchers']['totalCount'])
self._set_unless_exists(package.stats, 'github_forks', graph['repo']['forks']['totalCount'])
if num_contributors is not None:
self._set_unless_exists(package.stats, 'github_commits', num_contributors)
self._set_unless_exists(package.stats, 'github_latest_commit', latest_commit)
if 'keywords' not in package:
package.keywords = []
package.keywords.extend([r['topic']['name'] for r in graph['repo']['repositoryTopics']['nodes']])
for recipie in package.recipies:
if 'github' in recipie.urls:
self._set_unless_exists(recipie.urls, 'website', 'https://github.com/' + recipie.urls.github)
self._set_unless_exists(recipie.urls, 'issues', 'https://github.com/' + recipie.urls.github + '/issues')
return package
def _v3_requests(self):
contributors = self.repo.get_stats_contributors()
if contributors is not None:
num_contributors = sum([c.total for c in contributors])
else:
num_contributors = None
commits = self.repo.get_commits()
latest_commit = commits[0].commit.committer.date.isoformat()
clone_url = self.repo.clone_url
repo_owner = self.repo.owner.login
repo_name = self.repo.name
return dict(num_contributors=num_contributors, latest_commit=latest_commit,
clone_url=clone_url, repo_owner=repo_owner, repo_name=repo_name)
| true
| true
|
f70e1e3962bb07e74eb6aa882f579281c65d1686
| 1,179
|
py
|
Python
|
sdk/python/pulumi_azure_nextgen/appconfiguration/v20191001/__init__.py
|
pulumi/pulumi-azure-nextgen
|
452736b0a1cf584c2d4c04666e017af6e9b2c15c
|
[
"Apache-2.0"
] | 31
|
2020-09-21T09:41:01.000Z
|
2021-02-26T13:21:59.000Z
|
sdk/python/pulumi_azure_nextgen/appconfiguration/v20191001/__init__.py
|
pulumi/pulumi-azure-nextgen
|
452736b0a1cf584c2d4c04666e017af6e9b2c15c
|
[
"Apache-2.0"
] | 231
|
2020-09-21T09:38:45.000Z
|
2021-03-01T11:16:03.000Z
|
sdk/python/pulumi_azure_nextgen/appconfiguration/v20191001/__init__.py
|
pulumi/pulumi-azure-nextgen
|
452736b0a1cf584c2d4c04666e017af6e9b2c15c
|
[
"Apache-2.0"
] | 4
|
2020-09-29T14:14:59.000Z
|
2021-02-10T20:38:16.000Z
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
# Export this package's modules as members:
from ._enums import *
from .configuration_store import *
from .get_configuration_store import *
from .list_configuration_store_key_value import *
from .list_configuration_store_keys import *
from ._inputs import *
from . import outputs
def _register_module():
import pulumi
from ... import _utilities
class Module(pulumi.runtime.ResourceModule):
_version = _utilities.get_semver_version()
def version(self):
return Module._version
def construct(self, name: str, typ: str, urn: str) -> pulumi.Resource:
if typ == "azure-nextgen:appconfiguration/v20191001:ConfigurationStore":
return ConfigurationStore(name, pulumi.ResourceOptions(urn=urn))
else:
raise Exception(f"unknown resource type {typ}")
_module_instance = Module()
pulumi.runtime.register_resource_module("azure-nextgen", "appconfiguration/v20191001", _module_instance)
_register_module()
| 32.75
| 108
| 0.709075
|
# Export this package's modules as members:
from ._enums import *
from .configuration_store import *
from .get_configuration_store import *
from .list_configuration_store_key_value import *
from .list_configuration_store_keys import *
from ._inputs import *
from . import outputs
def _register_module():
import pulumi
from ... import _utilities
class Module(pulumi.runtime.ResourceModule):
_version = _utilities.get_semver_version()
def version(self):
return Module._version
def construct(self, name: str, typ: str, urn: str) -> pulumi.Resource:
if typ == "azure-nextgen:appconfiguration/v20191001:ConfigurationStore":
return ConfigurationStore(name, pulumi.ResourceOptions(urn=urn))
else:
raise Exception(f"unknown resource type {typ}")
_module_instance = Module()
pulumi.runtime.register_resource_module("azure-nextgen", "appconfiguration/v20191001", _module_instance)
_register_module()
| true
| true
|
f70e1ee3749cfb451aaed3f6eeb68e36db20480f
| 918
|
py
|
Python
|
inventory/urls.py
|
dentonya/python-django-sales-inventory-project
|
d0fcdf81136908a022e0f4eeca94fc0357473635
|
[
"Apache-2.0"
] | 1
|
2021-10-18T09:27:03.000Z
|
2021-10-18T09:27:03.000Z
|
inventory/urls.py
|
dentonya/python-django-sales-inventory-project
|
d0fcdf81136908a022e0f4eeca94fc0357473635
|
[
"Apache-2.0"
] | 1
|
2021-08-04T20:11:28.000Z
|
2021-08-04T20:11:28.000Z
|
inventory/urls.py
|
dentonya/python-django-sales-inventory-project
|
d0fcdf81136908a022e0f4eeca94fc0357473635
|
[
"Apache-2.0"
] | null | null | null |
"""inventory URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
from .views import dashboard
urlpatterns = [
path('admin/', admin.site.urls),
path('', dashboard, name='dashboard'),
path('users/', include('users.urls')),
path('store/', include('store.urls')),
]
| 35.307692
| 77
| 0.697168
|
from django.contrib import admin
from django.urls import path, include
from .views import dashboard
urlpatterns = [
path('admin/', admin.site.urls),
path('', dashboard, name='dashboard'),
path('users/', include('users.urls')),
path('store/', include('store.urls')),
]
| true
| true
|
f70e1eec634ed0c89cd786687c6b726187e816d5
| 11,426
|
py
|
Python
|
src/train.py
|
Gordonbuck/ml-oov-we
|
ce28cd8b556a16125ba36cd41781a3e60bb26422
|
[
"MIT"
] | null | null | null |
src/train.py
|
Gordonbuck/ml-oov-we
|
ce28cd8b556a16125ba36cd41781a3e60bb26422
|
[
"MIT"
] | null | null | null |
src/train.py
|
Gordonbuck/ml-oov-we
|
ce28cd8b556a16125ba36cd41781a3e60bb26422
|
[
"MIT"
] | null | null | null |
import higher
from leap import Leap
import numpy as np
import os
import torch
import torch.nn as nn
import gc
def train(model, source_corpus, char2idx, args, device):
model = model.to(device)
optimizer = torch.optim.Adam(model.parameters(), lr=args.lr_init)
lr_scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, factor=args.lr_decay, patience=args.patience,
threshold=args.threshold)
best_valid_cosine = 1
for epoch in np.arange(args.n_epochs):
valid_cosine = []
valid_ce = []
model.train()
for batch in np.arange(args.n_batch):
train_contexts, train_targets, train_vocabs, train_inds = source_corpus.get_batch(args.batch_size,
args.n_shot,
char2idx, device,
fixed=args.fixed_shot,
return_inds=True)
optimizer.zero_grad()
if args.lang_model:
pred_emb, pred_ind = model.forward(train_contexts, train_vocabs, lang_model=args.lang_model)
loss = nn.functional.cross_entropy(pred_ind, train_inds)
loss += -nn.functional.cosine_similarity(pred_emb, train_targets).mean()
else:
pred_emb = model.forward(train_contexts, train_vocabs)
loss = -nn.functional.cosine_similarity(pred_emb, train_targets).mean()
loss.backward()
optimizer.step()
model.eval()
with torch.no_grad():
for batch in np.arange(args.n_batch):
valid_contexts, valid_targets, valid_vocabs, valid_inds = source_corpus.get_batch(args.batch_size,
args.n_shot,
char2idx, device,
use_valid=True,
fixed=args.fixed_shot,
return_inds=True)
if args.lang_model:
pred_emb, pred_ind = model.forward(valid_contexts, valid_vocabs, lang_model=args.lang_model)
loss = nn.functional.cross_entropy(pred_ind, valid_inds).mean()
valid_ce += [loss.cpu().numpy()]
else:
pred_emb = model.forward(valid_contexts, valid_vocabs)
loss = -nn.functional.cosine_similarity(pred_emb, valid_targets).mean()
valid_cosine += [loss.cpu().numpy()]
avg_valid = np.average(valid_cosine)
lr_scheduler.step(avg_valid)
if args.lang_model:
avg_ce = np.average(valid_ce)
print(f"Average cosine loss: {avg_valid}; Average cross entropy loss: {avg_ce}")
else:
print(f"Average cosine loss: {avg_valid}")
if avg_valid < best_valid_cosine:
best_valid_cosine = avg_valid
torch.save(model.state_dict(), os.path.join(args.save_dir, 'model.pt'))
if optimizer.param_groups[0]['lr'] < args.lr_early_stop:
print('LR early stop')
break
def maml_adapt(model, source_corpus, target_corpus, char2idx, args, device, lang_model_n_words=0):
model = model.to(device)
meta_optimizer = torch.optim.Adam(model.parameters(), lr=args.maml_meta_lr_init)
lr_scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(meta_optimizer, factor=args.lr_decay,
patience=args.patience, threshold=args.threshold)
best_score = 3
for meta_epoch in np.arange(args.n_meta_epochs):
gc.collect()
source_valid_cosine = []
target_valid_cosine = []
model.train()
with torch.backends.cudnn.flags(benchmark=True):
for meta_batch in np.arange(args.n_meta_batch):
inner_optimizer = torch.optim.Adam(model.parameters(), lr=args.maml_inner_lr_init)
meta_optimizer.zero_grad()
with higher.innerloop_ctx(model, inner_optimizer, copy_initial_weights=False) as (fmodel, diffopt):
for inner_batch in np.arange(args.n_inner_batch):
source_train_contexts, source_train_targets, source_train_vocabs = source_corpus.get_batch(
args.meta_batch_size, args.n_shot, char2idx, device, fixed=args.fixed_shot)
pred_emb = fmodel.forward(source_train_contexts, source_train_vocabs)
loss = -nn.functional.cosine_similarity(pred_emb, source_train_targets).mean()
diffopt.step(loss)
target_train_contexts, target_train_targets, target_train_vocabs = target_corpus.get_batch(
args.meta_batch_size, args.n_shot, char2idx, device, fixed=args.fixed_shot,
repeat_ctxs=args.meta_repeat_ctxs)
pred_emb = fmodel.forward(target_train_contexts, target_train_vocabs)
loss = -nn.functional.cosine_similarity(pred_emb, target_train_targets).mean()
loss.backward()
meta_optimizer.step()
model.eval()
with torch.no_grad():
for batch in np.arange(args.n_batch):
source_valid_contexts, source_valid_targets, source_valid_vocabs = source_corpus.get_batch(
args.meta_batch_size, args.n_shot, char2idx, device, use_valid=True, fixed=args.fixed_shot)
pred_emb = model.forward(source_valid_contexts, source_valid_vocabs)
loss = -nn.functional.cosine_similarity(pred_emb, source_valid_targets).mean()
source_valid_cosine += [loss.cpu().numpy()]
target_valid_contexts, target_valid_targets, target_valid_vocabs = target_corpus.get_batch(
args.meta_batch_size, args.n_shot, char2idx, device, use_valid=True, fixed=args.fixed_shot,
repeat_ctxs=args.meta_repeat_ctxs)
pred_emb = model.forward(target_valid_contexts, target_valid_vocabs)
loss = -nn.functional.cosine_similarity(pred_emb, target_valid_targets).mean()
target_valid_cosine += [loss.cpu().numpy()]
avg_source_valid, avg_target_valid = np.average(source_valid_cosine), np.average(target_valid_cosine)
score = avg_target_valid
lr_scheduler.step(score)
print(f"Average source cosine loss: {avg_source_valid}; Average target cosine loss: {avg_target_valid}")
if score < best_score:
best_score = score
torch.save(model.state_dict(), os.path.join(args.save_dir, 'maml_model.pt'))
if meta_optimizer.param_groups[0]['lr'] < args.maml_lr_early_stop:
print('LR early stop')
break
def leap_adapt(model, source_corpus, target_corpus, char2idx, args, device, lang_model_n_words=0):
model = model.to(device)
leap = Leap(model)
meta_optimizer = torch.optim.Adam(leap.parameters(), lr=args.leap_meta_lr_init)
lr_scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(meta_optimizer, factor=args.lr_decay,
patience=args.patience, threshold=args.threshold)
best_score = 3
for meta_epoch in np.arange(args.n_meta_epochs):
source_valid_cosine = []
target_valid_cosine = []
model.train()
for meta_batch in np.arange(args.n_meta_batch):
meta_optimizer.zero_grad()
leap.init_task()
leap.to(model)
inner_optimizer = torch.optim.Adam(model.parameters(), lr=args.leap_inner_lr_init)
for inner_batch in np.arange(args.n_task_steps):
inner_optimizer.zero_grad()
source_train_contexts, source_train_targets, source_train_vocabs = source_corpus.get_batch(
args.meta_batch_size, args.n_shot, char2idx, device, fixed=args.fixed_shot)
pred_emb = model.forward(source_train_contexts, source_train_vocabs)
loss = -nn.functional.cosine_similarity(pred_emb, source_train_targets).mean()
loss.backward()
leap.update(loss, model)
inner_optimizer.step()
leap.init_task()
leap.to(model)
inner_optimizer = torch.optim.Adam(model.parameters(), lr=args.leap_inner_lr_init)
for inner_batch in np.arange(args.n_task_steps):
inner_optimizer.zero_grad()
target_train_contexts, target_train_targets, target_train_vocabs = target_corpus.get_batch(
args.meta_batch_size, args.n_shot, char2idx, device, fixed=args.fixed_shot,
repeat_ctxs=args.meta_repeat_ctxs)
pred_emb = model.forward(target_train_contexts, target_train_vocabs)
loss = -nn.functional.cosine_similarity(pred_emb, target_train_targets).mean()
loss.backward()
leap.update(loss, model)
inner_optimizer.step()
leap.normalize()
meta_optimizer.step()
leap.to(model)
model.eval()
with torch.no_grad():
for batch in np.arange(args.n_batch):
source_valid_contexts, source_valid_targets, source_valid_vocabs = source_corpus.get_batch(
args.meta_batch_size, args.n_shot, char2idx, device, use_valid=True, fixed=args.fixed_shot)
pred_emb = model.forward(source_valid_contexts, source_valid_vocabs)
loss = -nn.functional.cosine_similarity(pred_emb, source_valid_targets).mean()
source_valid_cosine += [loss.cpu().numpy()]
target_valid_contexts, target_valid_targets, target_valid_vocabs = target_corpus.get_batch(
args.meta_batch_size, args.n_shot, char2idx, device, use_valid=True, fixed=args.fixed_shot,
repeat_ctxs=args.meta_repeat_ctxs)
pred_emb = model.forward(target_valid_contexts, target_valid_vocabs)
loss = -nn.functional.cosine_similarity(pred_emb, target_valid_targets).mean()
target_valid_cosine += [loss.cpu().numpy()]
avg_source_valid, avg_target_valid = np.average(source_valid_cosine), np.average(target_valid_cosine)
score = avg_target_valid
lr_scheduler.step(score)
print(f"Average source cosine loss: {avg_source_valid}; Average target cosine loss: {avg_target_valid}")
if score < best_score:
best_score = score
torch.save(model.state_dict(), os.path.join(args.save_dir, 'leap_model.pt'))
if meta_optimizer.param_groups[0]['lr'] < args.leap_lr_early_stop:
print('LR early stop')
break
| 52.412844
| 120
| 0.591896
|
import higher
from leap import Leap
import numpy as np
import os
import torch
import torch.nn as nn
import gc
def train(model, source_corpus, char2idx, args, device):
model = model.to(device)
optimizer = torch.optim.Adam(model.parameters(), lr=args.lr_init)
lr_scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, factor=args.lr_decay, patience=args.patience,
threshold=args.threshold)
best_valid_cosine = 1
for epoch in np.arange(args.n_epochs):
valid_cosine = []
valid_ce = []
model.train()
for batch in np.arange(args.n_batch):
train_contexts, train_targets, train_vocabs, train_inds = source_corpus.get_batch(args.batch_size,
args.n_shot,
char2idx, device,
fixed=args.fixed_shot,
return_inds=True)
optimizer.zero_grad()
if args.lang_model:
pred_emb, pred_ind = model.forward(train_contexts, train_vocabs, lang_model=args.lang_model)
loss = nn.functional.cross_entropy(pred_ind, train_inds)
loss += -nn.functional.cosine_similarity(pred_emb, train_targets).mean()
else:
pred_emb = model.forward(train_contexts, train_vocabs)
loss = -nn.functional.cosine_similarity(pred_emb, train_targets).mean()
loss.backward()
optimizer.step()
model.eval()
with torch.no_grad():
for batch in np.arange(args.n_batch):
valid_contexts, valid_targets, valid_vocabs, valid_inds = source_corpus.get_batch(args.batch_size,
args.n_shot,
char2idx, device,
use_valid=True,
fixed=args.fixed_shot,
return_inds=True)
if args.lang_model:
pred_emb, pred_ind = model.forward(valid_contexts, valid_vocabs, lang_model=args.lang_model)
loss = nn.functional.cross_entropy(pred_ind, valid_inds).mean()
valid_ce += [loss.cpu().numpy()]
else:
pred_emb = model.forward(valid_contexts, valid_vocabs)
loss = -nn.functional.cosine_similarity(pred_emb, valid_targets).mean()
valid_cosine += [loss.cpu().numpy()]
avg_valid = np.average(valid_cosine)
lr_scheduler.step(avg_valid)
if args.lang_model:
avg_ce = np.average(valid_ce)
print(f"Average cosine loss: {avg_valid}; Average cross entropy loss: {avg_ce}")
else:
print(f"Average cosine loss: {avg_valid}")
if avg_valid < best_valid_cosine:
best_valid_cosine = avg_valid
torch.save(model.state_dict(), os.path.join(args.save_dir, 'model.pt'))
if optimizer.param_groups[0]['lr'] < args.lr_early_stop:
print('LR early stop')
break
def maml_adapt(model, source_corpus, target_corpus, char2idx, args, device, lang_model_n_words=0):
model = model.to(device)
meta_optimizer = torch.optim.Adam(model.parameters(), lr=args.maml_meta_lr_init)
lr_scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(meta_optimizer, factor=args.lr_decay,
patience=args.patience, threshold=args.threshold)
best_score = 3
for meta_epoch in np.arange(args.n_meta_epochs):
gc.collect()
source_valid_cosine = []
target_valid_cosine = []
model.train()
with torch.backends.cudnn.flags(benchmark=True):
for meta_batch in np.arange(args.n_meta_batch):
inner_optimizer = torch.optim.Adam(model.parameters(), lr=args.maml_inner_lr_init)
meta_optimizer.zero_grad()
with higher.innerloop_ctx(model, inner_optimizer, copy_initial_weights=False) as (fmodel, diffopt):
for inner_batch in np.arange(args.n_inner_batch):
source_train_contexts, source_train_targets, source_train_vocabs = source_corpus.get_batch(
args.meta_batch_size, args.n_shot, char2idx, device, fixed=args.fixed_shot)
pred_emb = fmodel.forward(source_train_contexts, source_train_vocabs)
loss = -nn.functional.cosine_similarity(pred_emb, source_train_targets).mean()
diffopt.step(loss)
target_train_contexts, target_train_targets, target_train_vocabs = target_corpus.get_batch(
args.meta_batch_size, args.n_shot, char2idx, device, fixed=args.fixed_shot,
repeat_ctxs=args.meta_repeat_ctxs)
pred_emb = fmodel.forward(target_train_contexts, target_train_vocabs)
loss = -nn.functional.cosine_similarity(pred_emb, target_train_targets).mean()
loss.backward()
meta_optimizer.step()
model.eval()
with torch.no_grad():
for batch in np.arange(args.n_batch):
source_valid_contexts, source_valid_targets, source_valid_vocabs = source_corpus.get_batch(
args.meta_batch_size, args.n_shot, char2idx, device, use_valid=True, fixed=args.fixed_shot)
pred_emb = model.forward(source_valid_contexts, source_valid_vocabs)
loss = -nn.functional.cosine_similarity(pred_emb, source_valid_targets).mean()
source_valid_cosine += [loss.cpu().numpy()]
target_valid_contexts, target_valid_targets, target_valid_vocabs = target_corpus.get_batch(
args.meta_batch_size, args.n_shot, char2idx, device, use_valid=True, fixed=args.fixed_shot,
repeat_ctxs=args.meta_repeat_ctxs)
pred_emb = model.forward(target_valid_contexts, target_valid_vocabs)
loss = -nn.functional.cosine_similarity(pred_emb, target_valid_targets).mean()
target_valid_cosine += [loss.cpu().numpy()]
avg_source_valid, avg_target_valid = np.average(source_valid_cosine), np.average(target_valid_cosine)
score = avg_target_valid
lr_scheduler.step(score)
print(f"Average source cosine loss: {avg_source_valid}; Average target cosine loss: {avg_target_valid}")
if score < best_score:
best_score = score
torch.save(model.state_dict(), os.path.join(args.save_dir, 'maml_model.pt'))
if meta_optimizer.param_groups[0]['lr'] < args.maml_lr_early_stop:
print('LR early stop')
break
def leap_adapt(model, source_corpus, target_corpus, char2idx, args, device, lang_model_n_words=0):
model = model.to(device)
leap = Leap(model)
meta_optimizer = torch.optim.Adam(leap.parameters(), lr=args.leap_meta_lr_init)
lr_scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(meta_optimizer, factor=args.lr_decay,
patience=args.patience, threshold=args.threshold)
best_score = 3
for meta_epoch in np.arange(args.n_meta_epochs):
source_valid_cosine = []
target_valid_cosine = []
model.train()
for meta_batch in np.arange(args.n_meta_batch):
meta_optimizer.zero_grad()
leap.init_task()
leap.to(model)
inner_optimizer = torch.optim.Adam(model.parameters(), lr=args.leap_inner_lr_init)
for inner_batch in np.arange(args.n_task_steps):
inner_optimizer.zero_grad()
source_train_contexts, source_train_targets, source_train_vocabs = source_corpus.get_batch(
args.meta_batch_size, args.n_shot, char2idx, device, fixed=args.fixed_shot)
pred_emb = model.forward(source_train_contexts, source_train_vocabs)
loss = -nn.functional.cosine_similarity(pred_emb, source_train_targets).mean()
loss.backward()
leap.update(loss, model)
inner_optimizer.step()
leap.init_task()
leap.to(model)
inner_optimizer = torch.optim.Adam(model.parameters(), lr=args.leap_inner_lr_init)
for inner_batch in np.arange(args.n_task_steps):
inner_optimizer.zero_grad()
target_train_contexts, target_train_targets, target_train_vocabs = target_corpus.get_batch(
args.meta_batch_size, args.n_shot, char2idx, device, fixed=args.fixed_shot,
repeat_ctxs=args.meta_repeat_ctxs)
pred_emb = model.forward(target_train_contexts, target_train_vocabs)
loss = -nn.functional.cosine_similarity(pred_emb, target_train_targets).mean()
loss.backward()
leap.update(loss, model)
inner_optimizer.step()
leap.normalize()
meta_optimizer.step()
leap.to(model)
model.eval()
with torch.no_grad():
for batch in np.arange(args.n_batch):
source_valid_contexts, source_valid_targets, source_valid_vocabs = source_corpus.get_batch(
args.meta_batch_size, args.n_shot, char2idx, device, use_valid=True, fixed=args.fixed_shot)
pred_emb = model.forward(source_valid_contexts, source_valid_vocabs)
loss = -nn.functional.cosine_similarity(pred_emb, source_valid_targets).mean()
source_valid_cosine += [loss.cpu().numpy()]
target_valid_contexts, target_valid_targets, target_valid_vocabs = target_corpus.get_batch(
args.meta_batch_size, args.n_shot, char2idx, device, use_valid=True, fixed=args.fixed_shot,
repeat_ctxs=args.meta_repeat_ctxs)
pred_emb = model.forward(target_valid_contexts, target_valid_vocabs)
loss = -nn.functional.cosine_similarity(pred_emb, target_valid_targets).mean()
target_valid_cosine += [loss.cpu().numpy()]
avg_source_valid, avg_target_valid = np.average(source_valid_cosine), np.average(target_valid_cosine)
score = avg_target_valid
lr_scheduler.step(score)
print(f"Average source cosine loss: {avg_source_valid}; Average target cosine loss: {avg_target_valid}")
if score < best_score:
best_score = score
torch.save(model.state_dict(), os.path.join(args.save_dir, 'leap_model.pt'))
if meta_optimizer.param_groups[0]['lr'] < args.leap_lr_early_stop:
print('LR early stop')
break
| true
| true
|
f70e1f1f5c53199c00231881a0ed05b757d4ed44
| 15,360
|
py
|
Python
|
p2p/connection.py
|
g-r-a-n-t/trinity
|
f108b6cd34ed9aabfcf9e235badd91597650ecd5
|
[
"MIT"
] | null | null | null |
p2p/connection.py
|
g-r-a-n-t/trinity
|
f108b6cd34ed9aabfcf9e235badd91597650ecd5
|
[
"MIT"
] | null | null | null |
p2p/connection.py
|
g-r-a-n-t/trinity
|
f108b6cd34ed9aabfcf9e235badd91597650ecd5
|
[
"MIT"
] | null | null | null |
import asyncio
import collections
import contextlib
import functools
from typing import (
Any,
DefaultDict,
Dict,
List,
Sequence,
Set,
Tuple,
Type,
TYPE_CHECKING,
Union,
)
from async_service import Service
from async_service.asyncio import cleanup_tasks
from cached_property import cached_property
from eth_keys import keys
from p2p.abc import (
BehaviorAPI,
CommandAPI,
ConnectionAPI,
HandlerFn,
HandshakeReceiptAPI,
LogicAPI,
MultiplexerAPI,
NodeAPI,
ProtocolAPI,
SessionAPI,
SubscriptionAPI,
THandshakeReceipt,
TLogic,
TProtocol,
)
from p2p.constants import PEER_READY_TIMEOUT
from p2p.disconnect import DisconnectReason
from p2p.exceptions import (
DuplicateAPI,
MalformedMessage,
PeerConnectionLost,
ReceiptNotFound,
UnknownAPI,
UnknownProtocol,
UnknownProtocolCommand,
)
from p2p.asyncio_utils import create_task, wait_first
from p2p.subscription import Subscription
from p2p.p2p_proto import BaseP2PProtocol, DevP2PReceipt, Disconnect
from p2p.typing import Capabilities
from p2p._utils import get_logger
if TYPE_CHECKING:
from p2p.peer import BasePeer # noqa: F401
class Connection(ConnectionAPI, Service):
_protocol_handlers: DefaultDict[
Type[ProtocolAPI],
Set[HandlerFn]
]
_msg_handlers: Set[HandlerFn]
_command_handlers: DefaultDict[
Type[CommandAPI[Any]],
Set[HandlerFn]
]
_logics: Dict[str, LogicAPI]
def __init__(self,
multiplexer: MultiplexerAPI,
devp2p_receipt: DevP2PReceipt,
protocol_receipts: Sequence[HandshakeReceiptAPI],
is_dial_out: bool) -> None:
self.logger = get_logger('p2p.connection.Connection')
# The multiplexer passed to us will have been started when performing the handshake, so it
# is already reading messages from the transport and storing them in per-protocol queues.
self._multiplexer = multiplexer
# Stop early in case the multiplexer is no longer streaming.
self._multiplexer.raise_if_streaming_error()
self._devp2p_receipt = devp2p_receipt
self.protocol_receipts = tuple(protocol_receipts)
self.is_dial_out = is_dial_out
self._protocol_handlers = collections.defaultdict(set)
self._command_handlers = collections.defaultdict(set)
self._msg_handlers = set()
# An event that controls when the connection will start reading from
# the individual multiplexed protocol streams and feeding handlers.
# This ensures that the connection does not start consuming messages
# before all necessary handlers have been added
self._handlers_ready = asyncio.Event()
self.behaviors_applied = asyncio.Event()
self._logics = {}
def __str__(self) -> str:
return f"Connection-{self.session}"
def __repr__(self) -> str:
return f"<Connection {self.session!r} {self._multiplexer!r} dial_out={self.is_dial_out}>"
@property
def is_streaming_messages(self) -> bool:
return self._handlers_ready.is_set()
def start_protocol_streams(self) -> None:
self._handlers_ready.set()
async def run_behaviors(self, behaviors: Tuple[BehaviorAPI, ...]) -> None:
async with contextlib.AsyncExitStack() as stack:
futures: List[asyncio.Task[Any]] = [
create_task(self.manager.wait_finished(), 'Connection/run_behaviors/wait_finished')]
for behavior in behaviors:
if behavior.should_apply_to(self):
behavior_exit = await stack.enter_async_context(behavior.apply(self))
futures.append(behavior_exit)
self.behaviors_applied.set()
# If wait_first() is called, cleanup_tasks() will be a no-op, but if any post_apply()
# calls raise an exception, it will ensure we don't leak pending tasks that would
# cause asyncio to complain.
async with cleanup_tasks(*futures):
try:
for behavior in behaviors:
behavior.post_apply()
await wait_first(futures)
except PeerConnectionLost:
# Any of our behaviors may propagate a PeerConnectionLost, which is to be
# expected as many Connection APIs used by them can raise that. To avoid a
# DaemonTaskExit since we're returning silently, ensure we're cancelled.
self.manager.cancel()
async def run_peer(self, peer: 'BasePeer') -> None:
"""
Run the peer as a child service.
A peer must always run as a child of the connection so that it has an open connection
until it finishes its cleanup.
"""
self.manager.run_daemon_task(self.run_behaviors, peer.get_behaviors())
await self.behaviors_applied.wait()
self.manager.run_daemon_child_service(peer)
await asyncio.wait_for(peer.manager.wait_started(), timeout=PEER_READY_TIMEOUT)
await asyncio.wait_for(peer.ready.wait(), timeout=PEER_READY_TIMEOUT)
#
# Primary properties of the connection
#
@cached_property
def is_dial_in(self) -> bool:
return not self.is_dial_out
@cached_property
def remote(self) -> NodeAPI:
return self._multiplexer.remote
@cached_property
def session(self) -> SessionAPI:
return self._multiplexer.session
@property
def is_closing(self) -> bool:
return self._multiplexer.is_closing
def __del__(self) -> None:
# This is necessary because the multiplexer passed to our constructor will be streaming,
# and if for some reason our run() method is not called, we'd leave the multiplexer
# streaming indefinitely. We might still get ayncio warnings (about a task being destroyed
# while still pending) if that happens, but this is the best we can do.
self._multiplexer.cancel_streaming()
async def run(self) -> None:
# Our multiplexer will already be streaming in the background (as it was used during
# handshake), so we do this to ensure we only start if it is still running.
self._multiplexer.raise_if_streaming_error()
for protocol in self._multiplexer.get_protocols():
self.manager.run_daemon_task(self._feed_protocol_handlers, protocol)
try:
await self._multiplexer.wait_streaming_finished()
except PeerConnectionLost:
pass
except MalformedMessage as err:
self.logger.debug(
"Disconnecting peer %s for sending MalformedMessage: %s",
self.remote,
err,
exc_info=True,
)
try:
self.get_base_protocol().send(Disconnect(DisconnectReason.BAD_PROTOCOL))
except PeerConnectionLost:
self.logger.debug(
"%s went away while trying to disconnect for MalformedMessage",
self,
)
finally:
self.manager.cancel()
#
# Subscriptions/Handler API
#
async def _feed_protocol_handlers(self, protocol: ProtocolAPI) -> None:
# do not start consuming from the protocol stream until
# `start_protocol_streams` has been called and the multiplexer is
# active.
try:
await asyncio.wait_for(self._handlers_ready.wait(), timeout=10)
except asyncio.TimeoutError as err:
self.logger.warning('Timedout waiting for handler ready signal')
raise asyncio.TimeoutError(
"The handlers ready event was never set. Ensure that "
"`Connection.start_protocol_streams()` is being called"
) from err
async for cmd in self._multiplexer.stream_protocol_messages(protocol):
self.logger.debug2('Handling command: %s', type(cmd))
# local copy to prevent multation while iterating
protocol_handlers = set(self._protocol_handlers[type(protocol)])
for proto_handler_fn in protocol_handlers:
self.logger.debug2(
'Running protocol handler %s for protocol=%s command=%s',
proto_handler_fn,
protocol,
type(cmd),
)
self.manager.run_task(proto_handler_fn, self, cmd)
command_handlers = set(self._command_handlers[type(cmd)])
command_handlers.update(self._msg_handlers)
for cmd_handler_fn in command_handlers:
self.logger.debug2(
'Running command handler %s for protocol=%s command=%s',
cmd_handler_fn,
protocol,
type(cmd),
)
self.manager.run_task(cmd_handler_fn, self, cmd)
# XXX: This ugliness is needed because Multiplexer.stream_protocol_messages() stops as
# soon as the transport is closed, and that may happen immediately after we received a
# Disconnect+EOF from a remote, but before we've had a chance to process the disconnect,
# which would cause a DaemonTaskExit error
# (https://github.com/ethereum/trinity/issues/1733).
if self.is_closing:
try:
await asyncio.wait_for(self.manager.wait_finished(), timeout=2)
except asyncio.TimeoutError:
self.logger.error(
"stream_protocol_messages() terminated but Connection was never cancelled, "
"this will cause the Connection to crash with a DaemonTaskExit")
def add_protocol_handler(self,
protocol_class: Type[ProtocolAPI],
handler_fn: HandlerFn,
) -> SubscriptionAPI:
if not self._multiplexer.has_protocol(protocol_class):
raise UnknownProtocol(
f"Protocol {protocol_class} was not found int he connected "
f"protocols: {self._multiplexer.get_protocols()}"
)
self._protocol_handlers[protocol_class].add(handler_fn)
cancel_fn = functools.partial(
self._protocol_handlers[protocol_class].remove,
handler_fn,
)
return Subscription(cancel_fn)
def add_msg_handler(self, handler_fn: HandlerFn) -> SubscriptionAPI:
self._msg_handlers.add(handler_fn)
cancel_fn = functools.partial(self._msg_handlers.remove, handler_fn)
return Subscription(cancel_fn)
def add_command_handler(self,
command_type: Type[CommandAPI[Any]],
handler_fn: HandlerFn,
) -> SubscriptionAPI:
for protocol in self._multiplexer.get_protocols():
if protocol.supports_command(command_type):
self._command_handlers[command_type].add(handler_fn)
cancel_fn = functools.partial(
self._command_handlers[command_type].remove,
handler_fn,
)
return Subscription(cancel_fn)
else:
raise UnknownProtocolCommand(
f"Command {command_type} was not found in the connected "
f"protocols: {self._multiplexer.get_protocols()}"
)
#
# API extension
#
def add_logic(self, name: str, logic: LogicAPI) -> SubscriptionAPI:
if name in self._logics:
raise DuplicateAPI(
f"There is already an API registered under the name '{name}': "
f"{self._logics[name]}"
)
self._logics[name] = logic
cancel_fn = functools.partial(self.remove_logic, name)
return Subscription(cancel_fn)
def remove_logic(self, name: str) -> None:
self._logics.pop(name)
def has_logic(self, name: str) -> bool:
if self.is_closing:
# This is a safety net, really, as the Peer should never call this if it is no longer
# alive.
raise PeerConnectionLost("Cannot look up subprotocol when connection is closing")
return name in self._logics
def get_logic(self, name: str, logic_type: Type[TLogic]) -> TLogic:
if not self.has_logic(name):
raise UnknownAPI(f"No API registered for the name '{name}'")
logic = self._logics[name]
if isinstance(logic, logic_type):
return logic
else:
raise TypeError(
f"Wrong logic type. expected: {logic_type} got: {type(logic)}"
)
#
# Access to underlying Multiplexer
#
def get_multiplexer(self) -> MultiplexerAPI:
return self._multiplexer
#
# Base Protocol shortcuts
#
def get_base_protocol(self) -> BaseP2PProtocol:
return self._multiplexer.get_base_protocol()
def get_p2p_receipt(self) -> DevP2PReceipt:
return self._devp2p_receipt
#
# Protocol APIS
#
def has_protocol(self, protocol_identifier: Union[ProtocolAPI, Type[ProtocolAPI]]) -> bool:
return self._multiplexer.has_protocol(protocol_identifier)
def get_protocols(self) -> Tuple[ProtocolAPI, ...]:
return self._multiplexer.get_protocols()
def get_protocol_by_type(self, protocol_type: Type[TProtocol]) -> TProtocol:
return self._multiplexer.get_protocol_by_type(protocol_type)
def get_protocol_for_command_type(self, command_type: Type[CommandAPI[Any]]) -> ProtocolAPI:
return self._multiplexer.get_protocol_for_command_type(command_type)
def get_receipt_by_type(self, receipt_type: Type[THandshakeReceipt]) -> THandshakeReceipt:
for receipt in self.protocol_receipts:
if isinstance(receipt, receipt_type):
return receipt
else:
raise ReceiptNotFound(f"Receipt not found: {receipt_type}")
#
# Connection Metadata
#
@cached_property
def remote_capabilities(self) -> Capabilities:
return self._devp2p_receipt.capabilities
@cached_property
def remote_p2p_version(self) -> int:
return self._devp2p_receipt.version
@cached_property
def negotiated_p2p_version(self) -> int:
return self.get_base_protocol().version
@cached_property
def remote_public_key(self) -> keys.PublicKey:
return keys.PublicKey(self._devp2p_receipt.remote_public_key)
@cached_property
def client_version_string(self) -> str:
return self._devp2p_receipt.client_version_string
@cached_property
def safe_client_version_string(self) -> str:
# limit number of chars to be displayed, and try to keep printable ones only
# MAGIC 256: arbitrary, "should be enough for everybody"
if len(self.client_version_string) <= 256:
return self.client_version_string
truncated_client_version_string = self.client_version_string[:253] + '...'
if truncated_client_version_string.isprintable():
return truncated_client_version_string
else:
return repr(truncated_client_version_string)
| 37.832512
| 100
| 0.641667
|
import asyncio
import collections
import contextlib
import functools
from typing import (
Any,
DefaultDict,
Dict,
List,
Sequence,
Set,
Tuple,
Type,
TYPE_CHECKING,
Union,
)
from async_service import Service
from async_service.asyncio import cleanup_tasks
from cached_property import cached_property
from eth_keys import keys
from p2p.abc import (
BehaviorAPI,
CommandAPI,
ConnectionAPI,
HandlerFn,
HandshakeReceiptAPI,
LogicAPI,
MultiplexerAPI,
NodeAPI,
ProtocolAPI,
SessionAPI,
SubscriptionAPI,
THandshakeReceipt,
TLogic,
TProtocol,
)
from p2p.constants import PEER_READY_TIMEOUT
from p2p.disconnect import DisconnectReason
from p2p.exceptions import (
DuplicateAPI,
MalformedMessage,
PeerConnectionLost,
ReceiptNotFound,
UnknownAPI,
UnknownProtocol,
UnknownProtocolCommand,
)
from p2p.asyncio_utils import create_task, wait_first
from p2p.subscription import Subscription
from p2p.p2p_proto import BaseP2PProtocol, DevP2PReceipt, Disconnect
from p2p.typing import Capabilities
from p2p._utils import get_logger
if TYPE_CHECKING:
from p2p.peer import BasePeer
class Connection(ConnectionAPI, Service):
_protocol_handlers: DefaultDict[
Type[ProtocolAPI],
Set[HandlerFn]
]
_msg_handlers: Set[HandlerFn]
_command_handlers: DefaultDict[
Type[CommandAPI[Any]],
Set[HandlerFn]
]
_logics: Dict[str, LogicAPI]
def __init__(self,
multiplexer: MultiplexerAPI,
devp2p_receipt: DevP2PReceipt,
protocol_receipts: Sequence[HandshakeReceiptAPI],
is_dial_out: bool) -> None:
self.logger = get_logger('p2p.connection.Connection')
self._multiplexer = multiplexer
self._multiplexer.raise_if_streaming_error()
self._devp2p_receipt = devp2p_receipt
self.protocol_receipts = tuple(protocol_receipts)
self.is_dial_out = is_dial_out
self._protocol_handlers = collections.defaultdict(set)
self._command_handlers = collections.defaultdict(set)
self._msg_handlers = set()
self._handlers_ready = asyncio.Event()
self.behaviors_applied = asyncio.Event()
self._logics = {}
def __str__(self) -> str:
return f"Connection-{self.session}"
def __repr__(self) -> str:
return f"<Connection {self.session!r} {self._multiplexer!r} dial_out={self.is_dial_out}>"
@property
def is_streaming_messages(self) -> bool:
return self._handlers_ready.is_set()
def start_protocol_streams(self) -> None:
self._handlers_ready.set()
async def run_behaviors(self, behaviors: Tuple[BehaviorAPI, ...]) -> None:
async with contextlib.AsyncExitStack() as stack:
futures: List[asyncio.Task[Any]] = [
create_task(self.manager.wait_finished(), 'Connection/run_behaviors/wait_finished')]
for behavior in behaviors:
if behavior.should_apply_to(self):
behavior_exit = await stack.enter_async_context(behavior.apply(self))
futures.append(behavior_exit)
self.behaviors_applied.set()
# cause asyncio to complain.
async with cleanup_tasks(*futures):
try:
for behavior in behaviors:
behavior.post_apply()
await wait_first(futures)
except PeerConnectionLost:
# Any of our behaviors may propagate a PeerConnectionLost, which is to be
# expected as many Connection APIs used by them can raise that. To avoid a
# DaemonTaskExit since we're returning silently, ensure we're cancelled.
self.manager.cancel()
async def run_peer(self, peer: 'BasePeer') -> None:
self.manager.run_daemon_task(self.run_behaviors, peer.get_behaviors())
await self.behaviors_applied.wait()
self.manager.run_daemon_child_service(peer)
await asyncio.wait_for(peer.manager.wait_started(), timeout=PEER_READY_TIMEOUT)
await asyncio.wait_for(peer.ready.wait(), timeout=PEER_READY_TIMEOUT)
#
# Primary properties of the connection
#
@cached_property
def is_dial_in(self) -> bool:
return not self.is_dial_out
@cached_property
def remote(self) -> NodeAPI:
return self._multiplexer.remote
@cached_property
def session(self) -> SessionAPI:
return self._multiplexer.session
@property
def is_closing(self) -> bool:
return self._multiplexer.is_closing
def __del__(self) -> None:
# This is necessary because the multiplexer passed to our constructor will be streaming,
# and if for some reason our run() method is not called, we'd leave the multiplexer
self._multiplexer.cancel_streaming()
async def run(self) -> None:
self._multiplexer.raise_if_streaming_error()
for protocol in self._multiplexer.get_protocols():
self.manager.run_daemon_task(self._feed_protocol_handlers, protocol)
try:
await self._multiplexer.wait_streaming_finished()
except PeerConnectionLost:
pass
except MalformedMessage as err:
self.logger.debug(
"Disconnecting peer %s for sending MalformedMessage: %s",
self.remote,
err,
exc_info=True,
)
try:
self.get_base_protocol().send(Disconnect(DisconnectReason.BAD_PROTOCOL))
except PeerConnectionLost:
self.logger.debug(
"%s went away while trying to disconnect for MalformedMessage",
self,
)
finally:
self.manager.cancel()
async def _feed_protocol_handlers(self, protocol: ProtocolAPI) -> None:
try:
await asyncio.wait_for(self._handlers_ready.wait(), timeout=10)
except asyncio.TimeoutError as err:
self.logger.warning('Timedout waiting for handler ready signal')
raise asyncio.TimeoutError(
"The handlers ready event was never set. Ensure that "
"`Connection.start_protocol_streams()` is being called"
) from err
async for cmd in self._multiplexer.stream_protocol_messages(protocol):
self.logger.debug2('Handling command: %s', type(cmd))
protocol_handlers = set(self._protocol_handlers[type(protocol)])
for proto_handler_fn in protocol_handlers:
self.logger.debug2(
'Running protocol handler %s for protocol=%s command=%s',
proto_handler_fn,
protocol,
type(cmd),
)
self.manager.run_task(proto_handler_fn, self, cmd)
command_handlers = set(self._command_handlers[type(cmd)])
command_handlers.update(self._msg_handlers)
for cmd_handler_fn in command_handlers:
self.logger.debug2(
'Running command handler %s for protocol=%s command=%s',
cmd_handler_fn,
protocol,
type(cmd),
)
self.manager.run_task(cmd_handler_fn, self, cmd)
# which would cause a DaemonTaskExit error
# (https://github.com/ethereum/trinity/issues/1733).
if self.is_closing:
try:
await asyncio.wait_for(self.manager.wait_finished(), timeout=2)
except asyncio.TimeoutError:
self.logger.error(
"stream_protocol_messages() terminated but Connection was never cancelled, "
"this will cause the Connection to crash with a DaemonTaskExit")
def add_protocol_handler(self,
protocol_class: Type[ProtocolAPI],
handler_fn: HandlerFn,
) -> SubscriptionAPI:
if not self._multiplexer.has_protocol(protocol_class):
raise UnknownProtocol(
f"Protocol {protocol_class} was not found int he connected "
f"protocols: {self._multiplexer.get_protocols()}"
)
self._protocol_handlers[protocol_class].add(handler_fn)
cancel_fn = functools.partial(
self._protocol_handlers[protocol_class].remove,
handler_fn,
)
return Subscription(cancel_fn)
def add_msg_handler(self, handler_fn: HandlerFn) -> SubscriptionAPI:
self._msg_handlers.add(handler_fn)
cancel_fn = functools.partial(self._msg_handlers.remove, handler_fn)
return Subscription(cancel_fn)
def add_command_handler(self,
command_type: Type[CommandAPI[Any]],
handler_fn: HandlerFn,
) -> SubscriptionAPI:
for protocol in self._multiplexer.get_protocols():
if protocol.supports_command(command_type):
self._command_handlers[command_type].add(handler_fn)
cancel_fn = functools.partial(
self._command_handlers[command_type].remove,
handler_fn,
)
return Subscription(cancel_fn)
else:
raise UnknownProtocolCommand(
f"Command {command_type} was not found in the connected "
f"protocols: {self._multiplexer.get_protocols()}"
)
#
# API extension
#
def add_logic(self, name: str, logic: LogicAPI) -> SubscriptionAPI:
if name in self._logics:
raise DuplicateAPI(
f"There is already an API registered under the name '{name}': "
f"{self._logics[name]}"
)
self._logics[name] = logic
cancel_fn = functools.partial(self.remove_logic, name)
return Subscription(cancel_fn)
def remove_logic(self, name: str) -> None:
self._logics.pop(name)
def has_logic(self, name: str) -> bool:
if self.is_closing:
# This is a safety net, really, as the Peer should never call this if it is no longer
# alive.
raise PeerConnectionLost("Cannot look up subprotocol when connection is closing")
return name in self._logics
def get_logic(self, name: str, logic_type: Type[TLogic]) -> TLogic:
if not self.has_logic(name):
raise UnknownAPI(f"No API registered for the name '{name}'")
logic = self._logics[name]
if isinstance(logic, logic_type):
return logic
else:
raise TypeError(
f"Wrong logic type. expected: {logic_type} got: {type(logic)}"
)
#
# Access to underlying Multiplexer
#
def get_multiplexer(self) -> MultiplexerAPI:
return self._multiplexer
#
# Base Protocol shortcuts
#
def get_base_protocol(self) -> BaseP2PProtocol:
return self._multiplexer.get_base_protocol()
def get_p2p_receipt(self) -> DevP2PReceipt:
return self._devp2p_receipt
#
# Protocol APIS
#
def has_protocol(self, protocol_identifier: Union[ProtocolAPI, Type[ProtocolAPI]]) -> bool:
return self._multiplexer.has_protocol(protocol_identifier)
def get_protocols(self) -> Tuple[ProtocolAPI, ...]:
return self._multiplexer.get_protocols()
def get_protocol_by_type(self, protocol_type: Type[TProtocol]) -> TProtocol:
return self._multiplexer.get_protocol_by_type(protocol_type)
def get_protocol_for_command_type(self, command_type: Type[CommandAPI[Any]]) -> ProtocolAPI:
return self._multiplexer.get_protocol_for_command_type(command_type)
def get_receipt_by_type(self, receipt_type: Type[THandshakeReceipt]) -> THandshakeReceipt:
for receipt in self.protocol_receipts:
if isinstance(receipt, receipt_type):
return receipt
else:
raise ReceiptNotFound(f"Receipt not found: {receipt_type}")
#
# Connection Metadata
#
@cached_property
def remote_capabilities(self) -> Capabilities:
return self._devp2p_receipt.capabilities
@cached_property
def remote_p2p_version(self) -> int:
return self._devp2p_receipt.version
@cached_property
def negotiated_p2p_version(self) -> int:
return self.get_base_protocol().version
@cached_property
def remote_public_key(self) -> keys.PublicKey:
return keys.PublicKey(self._devp2p_receipt.remote_public_key)
@cached_property
def client_version_string(self) -> str:
return self._devp2p_receipt.client_version_string
@cached_property
def safe_client_version_string(self) -> str:
# limit number of chars to be displayed, and try to keep printable ones only
# MAGIC 256: arbitrary, "should be enough for everybody"
if len(self.client_version_string) <= 256:
return self.client_version_string
truncated_client_version_string = self.client_version_string[:253] + '...'
if truncated_client_version_string.isprintable():
return truncated_client_version_string
else:
return repr(truncated_client_version_string)
| true
| true
|
f70e20602d9329f0b785241b32a1ae744bf6d702
| 119
|
py
|
Python
|
number reverser.py
|
Jayapraveen34/crazy-lover
|
be5bd897c40c31b3e5e6eafe3b6436cb3d888efe
|
[
"BSD-2-Clause"
] | null | null | null |
number reverser.py
|
Jayapraveen34/crazy-lover
|
be5bd897c40c31b3e5e6eafe3b6436cb3d888efe
|
[
"BSD-2-Clause"
] | null | null | null |
number reverser.py
|
Jayapraveen34/crazy-lover
|
be5bd897c40c31b3e5e6eafe3b6436cb3d888efe
|
[
"BSD-2-Clause"
] | null | null | null |
a = str(input('Enter the number you want to reverse:'))
b = (a[::-1])
c = int(b)
print('the reversed number is',c)
| 23.8
| 56
| 0.605042
|
a = str(input('Enter the number you want to reverse:'))
b = (a[::-1])
c = int(b)
print('the reversed number is',c)
| true
| true
|
f70e21c7bb225db44fe1393e288a00a2a5f19261
| 8,936
|
py
|
Python
|
src/pretix/api/serializers/cart.py
|
rstrblstr/pretix
|
a4827fc992bae308e19a295e150998d9f8c17413
|
[
"Apache-2.0"
] | 1,248
|
2015-04-24T13:32:06.000Z
|
2022-03-29T07:01:36.000Z
|
src/pretix/api/serializers/cart.py
|
rstrblstr/pretix
|
a4827fc992bae308e19a295e150998d9f8c17413
|
[
"Apache-2.0"
] | 2,113
|
2015-02-18T18:58:16.000Z
|
2022-03-31T11:12:32.000Z
|
src/pretix/api/serializers/cart.py
|
thegcat/pretix
|
451d3fce0575d85a0ea93fd64aa0631feaced967
|
[
"Apache-2.0"
] | 453
|
2015-05-13T09:29:06.000Z
|
2022-03-24T13:39:16.000Z
|
#
# This file is part of pretix (Community Edition).
#
# Copyright (C) 2014-2020 Raphael Michel and contributors
# Copyright (C) 2020-2021 rami.io GmbH and contributors
#
# This program is free software: you can redistribute it and/or modify it under the terms of the GNU Affero General
# Public License as published by the Free Software Foundation in version 3 of the License.
#
# ADDITIONAL TERMS APPLY: Pursuant to Section 7 of the GNU Affero General Public License, additional terms are
# applicable granting you additional permissions and placing additional restrictions on your usage of this software.
# Please refer to the pretix LICENSE file to obtain the full terms applicable to this work. If you did not receive
# this file, see <https://pretix.eu/about/en/license>.
#
# This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied
# warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for more
# details.
#
# You should have received a copy of the GNU Affero General Public License along with this program. If not, see
# <https://www.gnu.org/licenses/>.
#
import os
from datetime import timedelta
from django.core.files import File
from django.utils.crypto import get_random_string
from django.utils.timezone import now
from django.utils.translation import gettext_lazy
from rest_framework import serializers
from rest_framework.exceptions import ValidationError
from pretix.api.serializers.i18n import I18nAwareModelSerializer
from pretix.api.serializers.order import (
AnswerCreateSerializer, AnswerSerializer, InlineSeatSerializer,
)
from pretix.base.models import Quota, Seat
from pretix.base.models.orders import CartPosition
class CartPositionSerializer(I18nAwareModelSerializer):
answers = AnswerSerializer(many=True)
seat = InlineSeatSerializer()
class Meta:
model = CartPosition
fields = ('id', 'cart_id', 'item', 'variation', 'price', 'attendee_name', 'attendee_name_parts',
'attendee_email', 'voucher', 'addon_to', 'subevent', 'datetime', 'expires', 'includes_tax',
'answers', 'seat')
class CartPositionCreateSerializer(I18nAwareModelSerializer):
answers = AnswerCreateSerializer(many=True, required=False)
expires = serializers.DateTimeField(required=False)
attendee_name = serializers.CharField(required=False, allow_null=True)
seat = serializers.CharField(required=False, allow_null=True)
sales_channel = serializers.CharField(required=False, default='sales_channel')
class Meta:
model = CartPosition
fields = ('cart_id', 'item', 'variation', 'price', 'attendee_name', 'attendee_name_parts', 'attendee_email',
'subevent', 'expires', 'includes_tax', 'answers', 'seat', 'sales_channel')
def create(self, validated_data):
answers_data = validated_data.pop('answers')
if not validated_data.get('cart_id'):
cid = "{}@api".format(get_random_string(48))
while CartPosition.objects.filter(cart_id=cid).exists():
cid = "{}@api".format(get_random_string(48))
validated_data['cart_id'] = cid
if not validated_data.get('expires'):
validated_data['expires'] = now() + timedelta(
minutes=self.context['event'].settings.get('reservation_time', as_type=int)
)
new_quotas = (validated_data.get('variation').quotas.filter(subevent=validated_data.get('subevent'))
if validated_data.get('variation')
else validated_data.get('item').quotas.filter(subevent=validated_data.get('subevent')))
if len(new_quotas) == 0:
raise ValidationError(
gettext_lazy('The product "{}" is not assigned to a quota.').format(
str(validated_data.get('item'))
)
)
for quota in new_quotas:
avail = quota.availability(_cache=self.context['quota_cache'])
if avail[0] != Quota.AVAILABILITY_OK or (avail[1] is not None and avail[1] < 1):
raise ValidationError(
gettext_lazy('There is not enough quota available on quota "{}" to perform '
'the operation.').format(
quota.name
)
)
for quota in new_quotas:
oldsize = self.context['quota_cache'][quota.pk][1]
newsize = oldsize - 1 if oldsize is not None else None
self.context['quota_cache'][quota.pk] = (
Quota.AVAILABILITY_OK if newsize is None or newsize > 0 else Quota.AVAILABILITY_GONE,
newsize
)
attendee_name = validated_data.pop('attendee_name', '')
if attendee_name and not validated_data.get('attendee_name_parts'):
validated_data['attendee_name_parts'] = {
'_legacy': attendee_name
}
seated = validated_data.get('item').seat_category_mappings.filter(subevent=validated_data.get('subevent')).exists()
if validated_data.get('seat'):
if not seated:
raise ValidationError('The specified product does not allow to choose a seat.')
try:
seat = self.context['event'].seats.get(seat_guid=validated_data['seat'], subevent=validated_data.get('subevent'))
except Seat.DoesNotExist:
raise ValidationError('The specified seat does not exist.')
except Seat.MultipleObjectsReturned:
raise ValidationError('The specified seat ID is not unique.')
else:
validated_data['seat'] = seat
if not seat.is_available(
sales_channel=validated_data.get('sales_channel', 'web'),
distance_ignore_cart_id=validated_data['cart_id'],
):
raise ValidationError(gettext_lazy('The selected seat "{seat}" is not available.').format(seat=seat.name))
elif seated:
raise ValidationError('The specified product requires to choose a seat.')
validated_data.pop('sales_channel')
cp = CartPosition.objects.create(event=self.context['event'], **validated_data)
for answ_data in answers_data:
options = answ_data.pop('options')
if isinstance(answ_data['answer'], File):
an = answ_data.pop('answer')
answ = cp.answers.create(**answ_data, answer='')
answ.file.save(os.path.basename(an.name), an, save=False)
answ.answer = 'file://' + answ.file.name
answ.save()
an.close()
else:
answ = cp.answers.create(**answ_data)
answ.options.add(*options)
return cp
def validate_cart_id(self, cid):
if cid and not cid.endswith('@api'):
raise ValidationError('Cart ID should end in @api or be empty.')
return cid
def validate_item(self, item):
if item.event != self.context['event']:
raise ValidationError(
'The specified item does not belong to this event.'
)
if not item.active:
raise ValidationError(
'The specified item is not active.'
)
return item
def validate_subevent(self, subevent):
if self.context['event'].has_subevents:
if not subevent:
raise ValidationError(
'You need to set a subevent.'
)
if subevent.event != self.context['event']:
raise ValidationError(
'The specified subevent does not belong to this event.'
)
elif subevent:
raise ValidationError(
'You cannot set a subevent for this event.'
)
return subevent
def validate(self, data):
if data.get('item'):
if data.get('item').has_variations:
if not data.get('variation'):
raise ValidationError('You should specify a variation for this item.')
else:
if data.get('variation').item != data.get('item'):
raise ValidationError(
'The specified variation does not belong to the specified item.'
)
elif data.get('variation'):
raise ValidationError(
'You cannot specify a variation for this item.'
)
if data.get('attendee_name') and data.get('attendee_name_parts'):
raise ValidationError(
{'attendee_name': ['Do not specify attendee_name if you specified attendee_name_parts.']}
)
return data
| 45.360406
| 129
| 0.620859
|
import os
from datetime import timedelta
from django.core.files import File
from django.utils.crypto import get_random_string
from django.utils.timezone import now
from django.utils.translation import gettext_lazy
from rest_framework import serializers
from rest_framework.exceptions import ValidationError
from pretix.api.serializers.i18n import I18nAwareModelSerializer
from pretix.api.serializers.order import (
AnswerCreateSerializer, AnswerSerializer, InlineSeatSerializer,
)
from pretix.base.models import Quota, Seat
from pretix.base.models.orders import CartPosition
class CartPositionSerializer(I18nAwareModelSerializer):
answers = AnswerSerializer(many=True)
seat = InlineSeatSerializer()
class Meta:
model = CartPosition
fields = ('id', 'cart_id', 'item', 'variation', 'price', 'attendee_name', 'attendee_name_parts',
'attendee_email', 'voucher', 'addon_to', 'subevent', 'datetime', 'expires', 'includes_tax',
'answers', 'seat')
class CartPositionCreateSerializer(I18nAwareModelSerializer):
answers = AnswerCreateSerializer(many=True, required=False)
expires = serializers.DateTimeField(required=False)
attendee_name = serializers.CharField(required=False, allow_null=True)
seat = serializers.CharField(required=False, allow_null=True)
sales_channel = serializers.CharField(required=False, default='sales_channel')
class Meta:
model = CartPosition
fields = ('cart_id', 'item', 'variation', 'price', 'attendee_name', 'attendee_name_parts', 'attendee_email',
'subevent', 'expires', 'includes_tax', 'answers', 'seat', 'sales_channel')
def create(self, validated_data):
answers_data = validated_data.pop('answers')
if not validated_data.get('cart_id'):
cid = "{}@api".format(get_random_string(48))
while CartPosition.objects.filter(cart_id=cid).exists():
cid = "{}@api".format(get_random_string(48))
validated_data['cart_id'] = cid
if not validated_data.get('expires'):
validated_data['expires'] = now() + timedelta(
minutes=self.context['event'].settings.get('reservation_time', as_type=int)
)
new_quotas = (validated_data.get('variation').quotas.filter(subevent=validated_data.get('subevent'))
if validated_data.get('variation')
else validated_data.get('item').quotas.filter(subevent=validated_data.get('subevent')))
if len(new_quotas) == 0:
raise ValidationError(
gettext_lazy('The product "{}" is not assigned to a quota.').format(
str(validated_data.get('item'))
)
)
for quota in new_quotas:
avail = quota.availability(_cache=self.context['quota_cache'])
if avail[0] != Quota.AVAILABILITY_OK or (avail[1] is not None and avail[1] < 1):
raise ValidationError(
gettext_lazy('There is not enough quota available on quota "{}" to perform '
'the operation.').format(
quota.name
)
)
for quota in new_quotas:
oldsize = self.context['quota_cache'][quota.pk][1]
newsize = oldsize - 1 if oldsize is not None else None
self.context['quota_cache'][quota.pk] = (
Quota.AVAILABILITY_OK if newsize is None or newsize > 0 else Quota.AVAILABILITY_GONE,
newsize
)
attendee_name = validated_data.pop('attendee_name', '')
if attendee_name and not validated_data.get('attendee_name_parts'):
validated_data['attendee_name_parts'] = {
'_legacy': attendee_name
}
seated = validated_data.get('item').seat_category_mappings.filter(subevent=validated_data.get('subevent')).exists()
if validated_data.get('seat'):
if not seated:
raise ValidationError('The specified product does not allow to choose a seat.')
try:
seat = self.context['event'].seats.get(seat_guid=validated_data['seat'], subevent=validated_data.get('subevent'))
except Seat.DoesNotExist:
raise ValidationError('The specified seat does not exist.')
except Seat.MultipleObjectsReturned:
raise ValidationError('The specified seat ID is not unique.')
else:
validated_data['seat'] = seat
if not seat.is_available(
sales_channel=validated_data.get('sales_channel', 'web'),
distance_ignore_cart_id=validated_data['cart_id'],
):
raise ValidationError(gettext_lazy('The selected seat "{seat}" is not available.').format(seat=seat.name))
elif seated:
raise ValidationError('The specified product requires to choose a seat.')
validated_data.pop('sales_channel')
cp = CartPosition.objects.create(event=self.context['event'], **validated_data)
for answ_data in answers_data:
options = answ_data.pop('options')
if isinstance(answ_data['answer'], File):
an = answ_data.pop('answer')
answ = cp.answers.create(**answ_data, answer='')
answ.file.save(os.path.basename(an.name), an, save=False)
answ.answer = 'file://' + answ.file.name
answ.save()
an.close()
else:
answ = cp.answers.create(**answ_data)
answ.options.add(*options)
return cp
def validate_cart_id(self, cid):
if cid and not cid.endswith('@api'):
raise ValidationError('Cart ID should end in @api or be empty.')
return cid
def validate_item(self, item):
if item.event != self.context['event']:
raise ValidationError(
'The specified item does not belong to this event.'
)
if not item.active:
raise ValidationError(
'The specified item is not active.'
)
return item
def validate_subevent(self, subevent):
if self.context['event'].has_subevents:
if not subevent:
raise ValidationError(
'You need to set a subevent.'
)
if subevent.event != self.context['event']:
raise ValidationError(
'The specified subevent does not belong to this event.'
)
elif subevent:
raise ValidationError(
'You cannot set a subevent for this event.'
)
return subevent
def validate(self, data):
if data.get('item'):
if data.get('item').has_variations:
if not data.get('variation'):
raise ValidationError('You should specify a variation for this item.')
else:
if data.get('variation').item != data.get('item'):
raise ValidationError(
'The specified variation does not belong to the specified item.'
)
elif data.get('variation'):
raise ValidationError(
'You cannot specify a variation for this item.'
)
if data.get('attendee_name') and data.get('attendee_name_parts'):
raise ValidationError(
{'attendee_name': ['Do not specify attendee_name if you specified attendee_name_parts.']}
)
return data
| true
| true
|
f70e21eca2bbf4b33918b8515ca1203bbabbafc7
| 1,515
|
py
|
Python
|
zoo/analytics/migrations/0008_auto_20180907_1348.py
|
uliana291/the-zoo
|
a15a4162c39553abe91224f4feff5d3b66f9413e
|
[
"MIT"
] | null | null | null |
zoo/analytics/migrations/0008_auto_20180907_1348.py
|
uliana291/the-zoo
|
a15a4162c39553abe91224f4feff5d3b66f9413e
|
[
"MIT"
] | null | null | null |
zoo/analytics/migrations/0008_auto_20180907_1348.py
|
uliana291/the-zoo
|
a15a4162c39553abe91224f4feff5d3b66f9413e
|
[
"MIT"
] | null | null | null |
# Generated by Django 2.1 on 2018-09-07 13:48
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [("analytics", "0007_dependencyusage_version")]
operations = [
migrations.AddField(
model_name="dependency",
name="timestamp",
field=models.DateTimeField(default=django.utils.timezone.now),
),
migrations.AddField(
model_name="dependencyusage",
name="timestamp",
field=models.DateTimeField(default=django.utils.timezone.now),
),
migrations.AlterField(
model_name="dependencyusage",
name="dependency",
field=models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="depusage",
to="analytics.Dependency",
),
),
migrations.AlterField(
model_name="dependencyusage",
name="major_version",
field=models.BigIntegerField(blank=True, null=True),
),
migrations.AlterField(
model_name="dependencyusage",
name="minor_version",
field=models.BigIntegerField(blank=True, null=True),
),
migrations.AlterField(
model_name="dependencyusage",
name="patch_version",
field=models.BigIntegerField(blank=True, null=True),
),
]
| 31.5625
| 74
| 0.593399
|
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [("analytics", "0007_dependencyusage_version")]
operations = [
migrations.AddField(
model_name="dependency",
name="timestamp",
field=models.DateTimeField(default=django.utils.timezone.now),
),
migrations.AddField(
model_name="dependencyusage",
name="timestamp",
field=models.DateTimeField(default=django.utils.timezone.now),
),
migrations.AlterField(
model_name="dependencyusage",
name="dependency",
field=models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="depusage",
to="analytics.Dependency",
),
),
migrations.AlterField(
model_name="dependencyusage",
name="major_version",
field=models.BigIntegerField(blank=True, null=True),
),
migrations.AlterField(
model_name="dependencyusage",
name="minor_version",
field=models.BigIntegerField(blank=True, null=True),
),
migrations.AlterField(
model_name="dependencyusage",
name="patch_version",
field=models.BigIntegerField(blank=True, null=True),
),
]
| true
| true
|
f70e220a1292f15b8385e165c8d12e90fe7821bc
| 4,796
|
py
|
Python
|
pygobo/__main__.py
|
alexmilowski/pygodo
|
1b47dfc03747899919a415cc491e0eaae65d0cc5
|
[
"MIT"
] | 2
|
2020-05-22T21:27:10.000Z
|
2022-03-27T09:56:27.000Z
|
venv/Lib/site-packages/pygobo/__main__.py
|
DoesArt-Studios/RamBrowse
|
a81da53e04d265d17e76855e7affc11130ee6120
|
[
"MIT"
] | 1
|
2021-06-27T09:45:27.000Z
|
2021-06-27T09:45:27.000Z
|
venv/Lib/site-packages/pygobo/__main__.py
|
DoesArt-Studios/RamBrowse
|
a81da53e04d265d17e76855e7affc11130ee6120
|
[
"MIT"
] | 1
|
2021-06-26T09:59:02.000Z
|
2021-06-26T09:59:02.000Z
|
import argparse
import sys
import pprint
from pygobo import OBOParser, query_generate
if __name__ == '__main__':
argparser = argparse.ArgumentParser(description='Article importer')
argparser.add_argument('--host',help='Redis host',default='0.0.0.0')
argparser.add_argument('--port',help='Redis port',type=int,default=6379)
argparser.add_argument('--password',help='Redis password')
argparser.add_argument('--show-query',help='Show the cypher queries before they are run.',action='store_true',default=False)
argparser.add_argument('--graph',help='The graph name',default='obo')
argparser.add_argument('--scope',help='The scope of the operation',choices=['all','ontology','term','xref','typedef'],action='append')
argparser.add_argument('--option',help='An option to the operation',choices=['show-xrefs'],action='append')
argparser.add_argument('operation',help='The operation to perform',choices=['parse','cypher','load','structure'])
argparser.add_argument('files',nargs='*',help='The files to process.')
args = argparser.parse_args()
pp = pprint.PrettyPrinter(indent=2)
if len(args.files)==0:
sources = [sys.stdin]
else:
sources = args.files
for source in sources:
parser = OBOParser()
with open(source,'r') if type(source)==str else source as input:
ontology = parser.parse(input)
if args.operation=='parse':
if args.scope is None or 'all' in args.scope:
args.scope = ['ontology','term','typedef']
if 'ontology' in args.scope:
print('Ontology:')
pp.pprint(ontology.metadata)
if 'term' in args.scope:
print('Terms:')
pp.pprint(ontology.terms)
if 'typedef' in args.scope:
print('Typedefs:')
pp.pprint(ontology.typedefs)
elif args.operation=='cypher':
if args.scope is None or 'all' in args.scope:
args.scope = ['ontology','term','xref','typedef']
for query in query_generate(ontology,scope=args.scope):
print(query)
print(';')
elif args.operation=='load':
import redis
from redisgraph import Graph
r = redis.Redis(host=args.host,port=args.port,password=args.password)
graph = Graph(args.graph,r)
if args.scope is None or 'all' in args.scope:
args.scope = ['ontology','term','xref','typedef']
for query in query_generate(ontology,scope=args.scope):
if args.show_query:
print(query)
print(';')
graph.query(query)
elif args.operation=='structure':
if args.scope is None or 'all' in args.scope:
args.scope = ['ontology','term','typedef']
if 'ontology' in args.scope:
print('Ontology:')
for name in sorted(ontology.metadata.keys()):
print(' '+name)
if name=='property_value':
for property in ontology.metadata['property_value'].keys():
print(' '+property)
elif name=='subsetdef':
for property in ontology.metadata['subsetdef'].keys():
print(' '+property)
if 'term' in args.scope:
print('Term:')
structure = {}
xrefs = {}
properties = {}
do_xrefs = ('show-xrefs' in args.option) if args.option is not None else False
for typedef in ontology.terms.keys():
term = ontology.terms[typedef]
if do_xrefs:
for name in term.get('xref',[]):
xrefs[name] = True
for name,value in term.get('property_value',[]):
properties[name] = True
for name in term.keys():
structure[name] = True
for name in sorted(structure.keys()):
print(' '+name)
if do_xrefs and name=='xref':
for xref in sorted(xrefs.keys()):
print(' '+xref)
if name=='property_value':
for property in sorted(properties.keys()):
print(' '+property)
if 'typedef' in args.scope:
print('Typedef:')
structure = {}
for typedef in ontology.typedefs.keys():
for name in ontology.typedefs[typedef].keys():
structure[name] = True
for name in sorted(structure.keys()):
print(' '+name)
| 42.070175
| 137
| 0.538365
|
import argparse
import sys
import pprint
from pygobo import OBOParser, query_generate
if __name__ == '__main__':
argparser = argparse.ArgumentParser(description='Article importer')
argparser.add_argument('--host',help='Redis host',default='0.0.0.0')
argparser.add_argument('--port',help='Redis port',type=int,default=6379)
argparser.add_argument('--password',help='Redis password')
argparser.add_argument('--show-query',help='Show the cypher queries before they are run.',action='store_true',default=False)
argparser.add_argument('--graph',help='The graph name',default='obo')
argparser.add_argument('--scope',help='The scope of the operation',choices=['all','ontology','term','xref','typedef'],action='append')
argparser.add_argument('--option',help='An option to the operation',choices=['show-xrefs'],action='append')
argparser.add_argument('operation',help='The operation to perform',choices=['parse','cypher','load','structure'])
argparser.add_argument('files',nargs='*',help='The files to process.')
args = argparser.parse_args()
pp = pprint.PrettyPrinter(indent=2)
if len(args.files)==0:
sources = [sys.stdin]
else:
sources = args.files
for source in sources:
parser = OBOParser()
with open(source,'r') if type(source)==str else source as input:
ontology = parser.parse(input)
if args.operation=='parse':
if args.scope is None or 'all' in args.scope:
args.scope = ['ontology','term','typedef']
if 'ontology' in args.scope:
print('Ontology:')
pp.pprint(ontology.metadata)
if 'term' in args.scope:
print('Terms:')
pp.pprint(ontology.terms)
if 'typedef' in args.scope:
print('Typedefs:')
pp.pprint(ontology.typedefs)
elif args.operation=='cypher':
if args.scope is None or 'all' in args.scope:
args.scope = ['ontology','term','xref','typedef']
for query in query_generate(ontology,scope=args.scope):
print(query)
print(';')
elif args.operation=='load':
import redis
from redisgraph import Graph
r = redis.Redis(host=args.host,port=args.port,password=args.password)
graph = Graph(args.graph,r)
if args.scope is None or 'all' in args.scope:
args.scope = ['ontology','term','xref','typedef']
for query in query_generate(ontology,scope=args.scope):
if args.show_query:
print(query)
print(';')
graph.query(query)
elif args.operation=='structure':
if args.scope is None or 'all' in args.scope:
args.scope = ['ontology','term','typedef']
if 'ontology' in args.scope:
print('Ontology:')
for name in sorted(ontology.metadata.keys()):
print(' '+name)
if name=='property_value':
for property in ontology.metadata['property_value'].keys():
print(' '+property)
elif name=='subsetdef':
for property in ontology.metadata['subsetdef'].keys():
print(' '+property)
if 'term' in args.scope:
print('Term:')
structure = {}
xrefs = {}
properties = {}
do_xrefs = ('show-xrefs' in args.option) if args.option is not None else False
for typedef in ontology.terms.keys():
term = ontology.terms[typedef]
if do_xrefs:
for name in term.get('xref',[]):
xrefs[name] = True
for name,value in term.get('property_value',[]):
properties[name] = True
for name in term.keys():
structure[name] = True
for name in sorted(structure.keys()):
print(' '+name)
if do_xrefs and name=='xref':
for xref in sorted(xrefs.keys()):
print(' '+xref)
if name=='property_value':
for property in sorted(properties.keys()):
print(' '+property)
if 'typedef' in args.scope:
print('Typedef:')
structure = {}
for typedef in ontology.typedefs.keys():
for name in ontology.typedefs[typedef].keys():
structure[name] = True
for name in sorted(structure.keys()):
print(' '+name)
| true
| true
|
f70e227b06c0768f40ed6d950e9fd3ff364bb6b6
| 2,044
|
py
|
Python
|
Reinforcement-Learning-Trader/Agent.py
|
krisbuote/Reinforcement-Learning-Trader
|
ae8c3af0856a480c88546c2a7e478a735585e0af
|
[
"MIT"
] | 2
|
2018-11-05T19:46:51.000Z
|
2020-02-13T16:20:57.000Z
|
Reinforcement-Learning-Trader/Agent.py
|
krisbuote/Reinforcement-Learning-Trader
|
ae8c3af0856a480c88546c2a7e478a735585e0af
|
[
"MIT"
] | null | null | null |
Reinforcement-Learning-Trader/Agent.py
|
krisbuote/Reinforcement-Learning-Trader
|
ae8c3af0856a480c88546c2a7e478a735585e0af
|
[
"MIT"
] | 1
|
2018-11-05T19:46:54.000Z
|
2018-11-05T19:46:54.000Z
|
import keras
from keras.models import Sequential
from keras.models import load_model
from keras.layers import Dense, LSTM, Dropout
from keras.optimizers import Adam
import numpy as np
import random
from collections import deque
class Agent:
def __init__(self, state_size, is_eval=False, model_name=""):
self.state_size = state_size # normalized previous days
self.action_size = 2 # buy, sell
self.memory = deque(maxlen=1000)
self.inventory = []
self.net_worth = []
self.model_name = model_name
self.is_eval = is_eval
self.gamma = 0.95
self.epsilon = 1.0
self.epsilon_min = 0.08
self.epsilon_decay = 0.995
self.model = load_model("models/" + model_name) if is_eval else self._model()
def _model(self):
model = Sequential()
model.add(Dense(units=64, input_dim=self.state_size, activation="relu"))
model.add(Dense(units=32, activation="relu"))
model.add(Dense(units=8, activation="relu"))
model.add(Dense(self.action_size, activation="linear"))
model.compile(loss="mse", optimizer=Adam(lr=0.001))
return model
def act(self, state):
if not self.is_eval and random.random() <= self.epsilon:
return random.randrange(self.action_size)
options = self.model.predict(state)
return np.argmax(options[0])
def expReplay(self, batch_size):
mini_batch = []
l = len(self.memory)
for i in range(l - batch_size + 1, l):
mini_batch.append(self.memory[i])
for state, action, reward, next_state, done in mini_batch:
target = reward
if not done:
target = reward + self.gamma * np.amax(self.model.predict(next_state)[0])
target_f = self.model.predict(state)
target_f[0][action] = target
self.model.fit(state, target_f, epochs=1, verbose=0)
if self.epsilon > self.epsilon_min:
self.epsilon *= self.epsilon_decay
| 32.967742
| 89
| 0.630626
|
import keras
from keras.models import Sequential
from keras.models import load_model
from keras.layers import Dense, LSTM, Dropout
from keras.optimizers import Adam
import numpy as np
import random
from collections import deque
class Agent:
def __init__(self, state_size, is_eval=False, model_name=""):
self.state_size = state_size
self.action_size = 2
self.memory = deque(maxlen=1000)
self.inventory = []
self.net_worth = []
self.model_name = model_name
self.is_eval = is_eval
self.gamma = 0.95
self.epsilon = 1.0
self.epsilon_min = 0.08
self.epsilon_decay = 0.995
self.model = load_model("models/" + model_name) if is_eval else self._model()
def _model(self):
model = Sequential()
model.add(Dense(units=64, input_dim=self.state_size, activation="relu"))
model.add(Dense(units=32, activation="relu"))
model.add(Dense(units=8, activation="relu"))
model.add(Dense(self.action_size, activation="linear"))
model.compile(loss="mse", optimizer=Adam(lr=0.001))
return model
def act(self, state):
if not self.is_eval and random.random() <= self.epsilon:
return random.randrange(self.action_size)
options = self.model.predict(state)
return np.argmax(options[0])
def expReplay(self, batch_size):
mini_batch = []
l = len(self.memory)
for i in range(l - batch_size + 1, l):
mini_batch.append(self.memory[i])
for state, action, reward, next_state, done in mini_batch:
target = reward
if not done:
target = reward + self.gamma * np.amax(self.model.predict(next_state)[0])
target_f = self.model.predict(state)
target_f[0][action] = target
self.model.fit(state, target_f, epochs=1, verbose=0)
if self.epsilon > self.epsilon_min:
self.epsilon *= self.epsilon_decay
| true
| true
|
f70e22950f5d32c9226094224f16d15f331cb3e4
| 1,816
|
py
|
Python
|
cryptovote/cryptovote/models/authority.py
|
cryptovoting/cryptovote
|
b236cf031a8f9dfa5cca54ff45003313275a0fc8
|
[
"MIT"
] | 8
|
2019-05-14T02:41:34.000Z
|
2021-11-25T08:07:22.000Z
|
cryptovote/cryptovote/models/authority.py
|
cryptovoting/cryptovote
|
b236cf031a8f9dfa5cca54ff45003313275a0fc8
|
[
"MIT"
] | null | null | null |
cryptovote/cryptovote/models/authority.py
|
cryptovoting/cryptovote
|
b236cf031a8f9dfa5cca54ff45003313275a0fc8
|
[
"MIT"
] | 2
|
2019-05-14T20:20:07.000Z
|
2021-11-25T08:07:24.000Z
|
from ..extensions import db
from flask_login import UserMixin as FlaskLoginUser
from uuid import uuid4
from damgard_jurik import keygen
class Authority(db.Model, FlaskLoginUser):
""" Implements an Authority class that can be accessed by flask-login and
handled by flask-sqlalchemy. Any human has a unique Authority object
for each election in which they are an authority. """
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.Text, nullable=False)
email = db.Column(db.Text, nullable=False)
email_confirmed = db.Column(db.Boolean, default=False, nullable=False)
email_key = db.Column(db.Text, unique=True, nullable=False)
election_id = db.Column(db.Integer, db.ForeignKey('election.id'),
nullable=False)
public_key = db.Column(db.PickleType, unique=True, nullable=False)
private_key_ring = db.Column(db.PickleType, nullable=False)
webauthn = db.Column(db.Boolean, nullable=False)
ukey = db.Column(db.String(20), unique=True, nullable=True)
credential_id = db.Column(db.String(250), unique=True, nullable=True)
pub_key = db.Column(db.String(65), unique=True, nullable=True)
sign_count = db.Column(db.Integer, default=0)
rp_id = db.Column(db.String(253), nullable=True)
icon_url = db.Column(db.String(2083), nullable=True)
pw_hash = db.Column(db.Text, nullable=True)
def __init__(self, **kwargs):
self.email_key = str(uuid4())
keypair = keygen(threshold=1, n_shares=1, n_bits=32)
self.public_key = keypair[0]
self.private_key_ring = keypair[1]
for key, value in kwargs.items():
setattr(self, key, value)
def get_id(self):
return self.id
def __repr__(self):
return f'<Authority {self.id} ({self.name})>'
| 40.355556
| 77
| 0.684471
|
from ..extensions import db
from flask_login import UserMixin as FlaskLoginUser
from uuid import uuid4
from damgard_jurik import keygen
class Authority(db.Model, FlaskLoginUser):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.Text, nullable=False)
email = db.Column(db.Text, nullable=False)
email_confirmed = db.Column(db.Boolean, default=False, nullable=False)
email_key = db.Column(db.Text, unique=True, nullable=False)
election_id = db.Column(db.Integer, db.ForeignKey('election.id'),
nullable=False)
public_key = db.Column(db.PickleType, unique=True, nullable=False)
private_key_ring = db.Column(db.PickleType, nullable=False)
webauthn = db.Column(db.Boolean, nullable=False)
ukey = db.Column(db.String(20), unique=True, nullable=True)
credential_id = db.Column(db.String(250), unique=True, nullable=True)
pub_key = db.Column(db.String(65), unique=True, nullable=True)
sign_count = db.Column(db.Integer, default=0)
rp_id = db.Column(db.String(253), nullable=True)
icon_url = db.Column(db.String(2083), nullable=True)
pw_hash = db.Column(db.Text, nullable=True)
def __init__(self, **kwargs):
self.email_key = str(uuid4())
keypair = keygen(threshold=1, n_shares=1, n_bits=32)
self.public_key = keypair[0]
self.private_key_ring = keypair[1]
for key, value in kwargs.items():
setattr(self, key, value)
def get_id(self):
return self.id
def __repr__(self):
return f'<Authority {self.id} ({self.name})>'
| true
| true
|
f70e231545b9ef51cfc49f28cfce801ae010f1db
| 21,955
|
py
|
Python
|
tests/_card_render.py
|
JrGoodle/rich
|
8a8250c5ae4da57043e9886643e6d9b7feecf4da
|
[
"MIT"
] | 2
|
2021-07-29T07:25:53.000Z
|
2021-11-14T15:48:11.000Z
|
tests/_card_render.py
|
heavelock/rich
|
8a8250c5ae4da57043e9886643e6d9b7feecf4da
|
[
"MIT"
] | null | null | null |
tests/_card_render.py
|
heavelock/rich
|
8a8250c5ae4da57043e9886643e6d9b7feecf4da
|
[
"MIT"
] | 1
|
2021-05-21T20:34:41.000Z
|
2021-05-21T20:34:41.000Z
|
expected = "\x1b[3m Rich features \x1b[0m\n\x1b[1;31m \x1b[0m \n\x1b[1;31m \x1b[0m\x1b[1;31m Colors \x1b[0m\x1b[1;31m \x1b[0m✓ \x1b[1;32m4-bit color\x1b[0m \x1b[38;2;51;0;0m█\x1b[0m\x1b[38;2;51;5;0m█\x1b[0m\x1b[38;2;51;11;0m█\x1b[0m\x1b[38;2;51;17;0m█\x1b[0m\x1b[38;2;51;23;0m█\x1b[0m\x1b[38;2;51;29;0m█\x1b[0m\x1b[38;2;51;35;0m█\x1b[0m\x1b[38;2;51;41;0m█\x1b[0m\x1b[38;2;51;47;0m█\x1b[0m\x1b[38;2;49;51;0m█\x1b[0m\x1b[38;2;43;51;0m█\x1b[0m\x1b[38;2;37;51;0m█\x1b[0m\x1b[38;2;31;51;0m█\x1b[0m\x1b[38;2;25;51;0m█\x1b[0m\x1b[38;2;19;51;0m█\x1b[0m\x1b[38;2;13;51;0m█\x1b[0m\x1b[38;2;7;51;0m█\x1b[0m\x1b[38;2;1;51;0m█\x1b[0m\x1b[38;2;0;51;3m█\x1b[0m\x1b[38;2;0;51;9m█\x1b[0m\x1b[38;2;0;51;15m█\x1b[0m\x1b[38;2;0;51;21m█\x1b[0m\x1b[38;2;0;51;27m█\x1b[0m\x1b[38;2;0;51;33m█\x1b[0m\x1b[38;2;0;51;39m█\x1b[0m\x1b[38;2;0;51;45m█\x1b[0m\x1b[38;2;0;50;51m█\x1b[0m\x1b[38;2;0;45;51m█\x1b[0m\x1b[38;2;0;39;51m█\x1b[0m\x1b[38;2;0;33;51m█\x1b[0m\x1b[38;2;0;27;51m█\x1b[0m\x1b[38;2;0;21;51m█\x1b[0m\x1b[38;2;0;15;51m█\x1b[0m\x1b[38;2;0;9;51m█\x1b[0m\x1b[38;2;0;3;51m█\x1b[0m\x1b[38;2;1;0;51m█\x1b[0m\x1b[38;2;7;0;51m█\x1b[0m\x1b[38;2;13;0;51m█\x1b[0m\x1b[38;2;19;0;51m█\x1b[0m\x1b[38;2;25;0;51m█\x1b[0m\x1b[38;2;31;0;51m█\x1b[0m\x1b[38;2;37;0;51m█\x1b[0m\x1b[38;2;43;0;51m█\x1b[0m\x1b[38;2;49;0;51m█\x1b[0m\x1b[38;2;51;0;47m█\x1b[0m\x1b[38;2;51;0;41m█\x1b[0m\x1b[38;2;51;0;35m█\x1b[0m\x1b[38;2;51;0;29m█\x1b[0m\x1b[38;2;51;0;23m█\x1b[0m\x1b[38;2;51;0;17m█\x1b[0m\x1b[38;2;51;0;11m█\x1b[0m\x1b[38;2;51;0;5m█\x1b[0m \n ✓ \x1b[1;34m8-bit color\x1b[0m \x1b[38;2;122;0;0m█\x1b[0m\x1b[38;2;122;14;0m█\x1b[0m\x1b[38;2;122;28;0m█\x1b[0m\x1b[38;2;122;42;0m█\x1b[0m\x1b[38;2;122;56;0m█\x1b[0m\x1b[38;2;122;70;0m█\x1b[0m\x1b[38;2;122;84;0m█\x1b[0m\x1b[38;2;122;98;0m█\x1b[0m\x1b[38;2;122;112;0m█\x1b[0m\x1b[38;2;117;122;0m█\x1b[0m\x1b[38;2;103;122;0m█\x1b[0m\x1b[38;2;89;122;0m█\x1b[0m\x1b[38;2;75;122;0m█\x1b[0m\x1b[38;2;61;122;0m█\x1b[0m\x1b[38;2;47;122;0m█\x1b[0m\x1b[38;2;32;122;0m█\x1b[0m\x1b[38;2;18;122;0m█\x1b[0m\x1b[38;2;4;122;0m█\x1b[0m\x1b[38;2;0;122;9m█\x1b[0m\x1b[38;2;0;122;23m█\x1b[0m\x1b[38;2;0;122;37m█\x1b[0m\x1b[38;2;0;122;51m█\x1b[0m\x1b[38;2;0;122;65m█\x1b[0m\x1b[38;2;0;122;80m█\x1b[0m\x1b[38;2;0;122;94m█\x1b[0m\x1b[38;2;0;122;108m█\x1b[0m\x1b[38;2;0;122;122m█\x1b[0m\x1b[38;2;0;108;122m█\x1b[0m\x1b[38;2;0;94;122m█\x1b[0m\x1b[38;2;0;80;122m█\x1b[0m\x1b[38;2;0;65;122m█\x1b[0m\x1b[38;2;0;51;122m█\x1b[0m\x1b[38;2;0;37;122m█\x1b[0m\x1b[38;2;0;23;122m█\x1b[0m\x1b[38;2;0;9;122m█\x1b[0m\x1b[38;2;4;0;122m█\x1b[0m\x1b[38;2;18;0;122m█\x1b[0m\x1b[38;2;32;0;122m█\x1b[0m\x1b[38;2;47;0;122m█\x1b[0m\x1b[38;2;61;0;122m█\x1b[0m\x1b[38;2;75;0;122m█\x1b[0m\x1b[38;2;89;0;122m█\x1b[0m\x1b[38;2;103;0;122m█\x1b[0m\x1b[38;2;117;0;122m█\x1b[0m\x1b[38;2;122;0;112m█\x1b[0m\x1b[38;2;122;0;98m█\x1b[0m\x1b[38;2;122;0;84m█\x1b[0m\x1b[38;2;122;0;70m█\x1b[0m\x1b[38;2;122;0;56m█\x1b[0m\x1b[38;2;122;0;42m█\x1b[0m\x1b[38;2;122;0;28m█\x1b[0m\x1b[38;2;122;0;14m█\x1b[0m \n ✓ \x1b[1;35mTruecolor (16.7 million)\x1b[0m \x1b[38;2;193;0;0m█\x1b[0m\x1b[38;2;193;22;0m█\x1b[0m\x1b[38;2;193;44;0m█\x1b[0m\x1b[38;2;193;67;0m█\x1b[0m\x1b[38;2;193;89;0m█\x1b[0m\x1b[38;2;193;111;0m█\x1b[0m\x1b[38;2;193;134;0m█\x1b[0m\x1b[38;2;193;156;0m█\x1b[0m\x1b[38;2;193;178;0m█\x1b[0m\x1b[38;2;186;193;0m█\x1b[0m\x1b[38;2;163;193;0m█\x1b[0m\x1b[38;2;141;193;0m█\x1b[0m\x1b[38;2;119;193;0m█\x1b[0m\x1b[38;2;96;193;0m█\x1b[0m\x1b[38;2;74;193;0m█\x1b[0m\x1b[38;2;52;193;0m█\x1b[0m\x1b[38;2;29;193;0m█\x1b[0m\x1b[38;2;7;193;0m█\x1b[0m\x1b[38;2;0;193;14m█\x1b[0m\x1b[38;2;0;193;37m█\x1b[0m\x1b[38;2;0;193;59m█\x1b[0m\x1b[38;2;0;193;81m█\x1b[0m\x1b[38;2;0;193;104m█\x1b[0m\x1b[38;2;0;193;126m█\x1b[0m\x1b[38;2;0;193;149m█\x1b[0m\x1b[38;2;0;193;171m█\x1b[0m\x1b[38;2;0;193;193m█\x1b[0m\x1b[38;2;0;171;193m█\x1b[0m\x1b[38;2;0;149;193m█\x1b[0m\x1b[38;2;0;126;193m█\x1b[0m\x1b[38;2;0;104;193m█\x1b[0m\x1b[38;2;0;81;193m█\x1b[0m\x1b[38;2;0;59;193m█\x1b[0m\x1b[38;2;0;37;193m█\x1b[0m\x1b[38;2;0;14;193m█\x1b[0m\x1b[38;2;7;0;193m█\x1b[0m\x1b[38;2;29;0;193m█\x1b[0m\x1b[38;2;52;0;193m█\x1b[0m\x1b[38;2;74;0;193m█\x1b[0m\x1b[38;2;96;0;193m█\x1b[0m\x1b[38;2;119;0;193m█\x1b[0m\x1b[38;2;141;0;193m█\x1b[0m\x1b[38;2;163;0;193m█\x1b[0m\x1b[38;2;186;0;193m█\x1b[0m\x1b[38;2;193;0;178m█\x1b[0m\x1b[38;2;193;0;156m█\x1b[0m\x1b[38;2;193;0;134m█\x1b[0m\x1b[38;2;193;0;111m█\x1b[0m\x1b[38;2;193;0;89m█\x1b[0m\x1b[38;2;193;0;67m█\x1b[0m\x1b[38;2;193;0;44m█\x1b[0m\x1b[38;2;193;0;22m█\x1b[0m \n ✓ \x1b[1;33mDumb terminals\x1b[0m \x1b[38;2;255;10;10m█\x1b[0m\x1b[38;2;255;38;10m█\x1b[0m\x1b[38;2;255;66;10m█\x1b[0m\x1b[38;2;255;94;10m█\x1b[0m\x1b[38;2;255;123;10m█\x1b[0m\x1b[38;2;255;151;10m█\x1b[0m\x1b[38;2;255;179;10m█\x1b[0m\x1b[38;2;255;207;10m█\x1b[0m\x1b[38;2;255;236;10m█\x1b[0m\x1b[38;2;245;255;10m█\x1b[0m\x1b[38;2;217;255;10m█\x1b[0m\x1b[38;2;189;255;10m█\x1b[0m\x1b[38;2;160;255;10m█\x1b[0m\x1b[38;2;132;255;10m█\x1b[0m\x1b[38;2;104;255;10m█\x1b[0m\x1b[38;2;76;255;10m█\x1b[0m\x1b[38;2;47;255;10m█\x1b[0m\x1b[38;2;19;255;10m█\x1b[0m\x1b[38;2;10;255;29m█\x1b[0m\x1b[38;2;10;255;57m█\x1b[0m\x1b[38;2;10;255;85m█\x1b[0m\x1b[38;2;10;255;113m█\x1b[0m\x1b[38;2;10;255;142m█\x1b[0m\x1b[38;2;10;255;170m█\x1b[0m\x1b[38;2;10;255;198m█\x1b[0m\x1b[38;2;10;255;226m█\x1b[0m\x1b[38;2;10;254;255m█\x1b[0m\x1b[38;2;10;226;255m█\x1b[0m\x1b[38;2;10;198;255m█\x1b[0m\x1b[38;2;10;170;255m█\x1b[0m\x1b[38;2;10;142;255m█\x1b[0m\x1b[38;2;10;113;255m█\x1b[0m\x1b[38;2;10;85;255m█\x1b[0m\x1b[38;2;10;57;255m█\x1b[0m\x1b[38;2;10;29;255m█\x1b[0m\x1b[38;2;19;10;255m█\x1b[0m\x1b[38;2;47;10;255m█\x1b[0m\x1b[38;2;76;10;255m█\x1b[0m\x1b[38;2;104;10;255m█\x1b[0m\x1b[38;2;132;10;255m█\x1b[0m\x1b[38;2;160;10;255m█\x1b[0m\x1b[38;2;189;10;255m█\x1b[0m\x1b[38;2;217;10;255m█\x1b[0m\x1b[38;2;245;10;255m█\x1b[0m\x1b[38;2;255;10;236m█\x1b[0m\x1b[38;2;255;10;207m█\x1b[0m\x1b[38;2;255;10;179m█\x1b[0m\x1b[38;2;255;10;151m█\x1b[0m\x1b[38;2;255;10;123m█\x1b[0m\x1b[38;2;255;10;94m█\x1b[0m\x1b[38;2;255;10;66m█\x1b[0m\x1b[38;2;255;10;38m█\x1b[0m \n ✓ \x1b[1;36mAutomatic color conversion\x1b[0m \x1b[38;2;255;81;81m█\x1b[0m\x1b[38;2;255;101;81m█\x1b[0m\x1b[38;2;255;121;81m█\x1b[0m\x1b[38;2;255;141;81m█\x1b[0m\x1b[38;2;255;161;81m█\x1b[0m\x1b[38;2;255;181;81m█\x1b[0m\x1b[38;2;255;201;81m█\x1b[0m\x1b[38;2;255;221;81m█\x1b[0m\x1b[38;2;255;241;81m█\x1b[0m\x1b[38;2;248;255;81m█\x1b[0m\x1b[38;2;228;255;81m█\x1b[0m\x1b[38;2;208;255;81m█\x1b[0m\x1b[38;2;188;255;81m█\x1b[0m\x1b[38;2;168;255;81m█\x1b[0m\x1b[38;2;148;255;81m█\x1b[0m\x1b[38;2;128;255;81m█\x1b[0m\x1b[38;2;108;255;81m█\x1b[0m\x1b[38;2;88;255;81m█\x1b[0m\x1b[38;2;81;255;94m█\x1b[0m\x1b[38;2;81;255;114m█\x1b[0m\x1b[38;2;81;255;134m█\x1b[0m\x1b[38;2;81;255;154m█\x1b[0m\x1b[38;2;81;255;174m█\x1b[0m\x1b[38;2;81;255;194m█\x1b[0m\x1b[38;2;81;255;214m█\x1b[0m\x1b[38;2;81;255;234m█\x1b[0m\x1b[38;2;81;254;255m█\x1b[0m\x1b[38;2;81;234;255m█\x1b[0m\x1b[38;2;81;214;255m█\x1b[0m\x1b[38;2;81;194;255m█\x1b[0m\x1b[38;2;81;174;255m█\x1b[0m\x1b[38;2;81;154;255m█\x1b[0m\x1b[38;2;81;134;255m█\x1b[0m\x1b[38;2;81;114;255m█\x1b[0m\x1b[38;2;81;94;255m█\x1b[0m\x1b[38;2;88;81;255m█\x1b[0m\x1b[38;2;108;81;255m█\x1b[0m\x1b[38;2;128;81;255m█\x1b[0m\x1b[38;2;148;81;255m█\x1b[0m\x1b[38;2;168;81;255m█\x1b[0m\x1b[38;2;188;81;255m█\x1b[0m\x1b[38;2;208;81;255m█\x1b[0m\x1b[38;2;228;81;255m█\x1b[0m\x1b[38;2;248;81;255m█\x1b[0m\x1b[38;2;255;81;241m█\x1b[0m\x1b[38;2;255;81;221m█\x1b[0m\x1b[38;2;255;81;201m█\x1b[0m\x1b[38;2;255;81;181m█\x1b[0m\x1b[38;2;255;81;161m█\x1b[0m\x1b[38;2;255;81;141m█\x1b[0m\x1b[38;2;255;81;121m█\x1b[0m\x1b[38;2;255;81;101m█\x1b[0m \n\x1b[1;31m \x1b[0m \n\x1b[1;31m \x1b[0m\x1b[1;31m Styles \x1b[0m\x1b[1;31m \x1b[0mAll ansi styles: \x1b[1mbold\x1b[0m, \x1b[2mdim\x1b[0m, \x1b[3mitalic\x1b[0m, \x1b[4munderline\x1b[0m, \x1b[9mstrikethrough\x1b[0m, \x1b[7mreverse\x1b[0m, and even \n \x1b[5mblink\x1b[0m. \n\x1b[1;31m \x1b[0m \n\x1b[1;31m \x1b[0m\x1b[1;31m Text \x1b[0m\x1b[1;31m \x1b[0mWord wrap text. Justify \x1b[32mleft\x1b[0m, \x1b[33mcenter\x1b[0m, \x1b[34mright\x1b[0m or \x1b[31mfull\x1b[0m. \n \n \x1b[32mLorem ipsum dolor \x1b[0m \x1b[33m Lorem ipsum dolor \x1b[0m \x1b[34m Lorem ipsum dolor\x1b[0m \x1b[31mLorem\x1b[0m\x1b[31m \x1b[0m\x1b[31mipsum\x1b[0m\x1b[31m \x1b[0m\x1b[31mdolor\x1b[0m\x1b[31m \x1b[0m\x1b[31msit\x1b[0m \n \x1b[32msit amet, \x1b[0m \x1b[33m sit amet, \x1b[0m \x1b[34m sit amet,\x1b[0m \x1b[31mamet,\x1b[0m\x1b[31m \x1b[0m\x1b[31mconsectetur\x1b[0m \n \x1b[32mconsectetur \x1b[0m \x1b[33m consectetur \x1b[0m \x1b[34m consectetur\x1b[0m \x1b[31madipiscing\x1b[0m\x1b[31m \x1b[0m\x1b[31melit.\x1b[0m \n \x1b[32madipiscing elit. \x1b[0m \x1b[33m adipiscing elit. \x1b[0m \x1b[34m adipiscing elit.\x1b[0m \x1b[31mQuisque\x1b[0m\x1b[31m \x1b[0m\x1b[31min\x1b[0m\x1b[31m \x1b[0m\x1b[31mmetus\x1b[0m\x1b[31m \x1b[0m\x1b[31msed\x1b[0m \n \x1b[32mQuisque in metus \x1b[0m \x1b[33mQuisque in metus sed\x1b[0m \x1b[34m Quisque in metus\x1b[0m \x1b[31msapien\x1b[0m\x1b[31m \x1b[0m\x1b[31multricies\x1b[0m \n \x1b[32msed sapien \x1b[0m \x1b[33m sapien ultricies \x1b[0m \x1b[34m sed sapien\x1b[0m \x1b[31mpretium\x1b[0m\x1b[31m \x1b[0m\x1b[31ma\x1b[0m\x1b[31m \x1b[0m\x1b[31mat\x1b[0m\x1b[31m \x1b[0m\x1b[31mjusto.\x1b[0m \n \x1b[32multricies pretium a\x1b[0m \x1b[33mpretium a at justo. \x1b[0m \x1b[34multricies pretium a\x1b[0m \x1b[31mMaecenas\x1b[0m\x1b[31m \x1b[0m\x1b[31mluctus\x1b[0m\x1b[31m \x1b[0m\x1b[31mvelit\x1b[0m \n \x1b[32mat justo. Maecenas \x1b[0m \x1b[33m Maecenas luctus \x1b[0m \x1b[34m at justo. Maecenas\x1b[0m \x1b[31met auctor maximus.\x1b[0m \n \x1b[32mluctus velit et \x1b[0m \x1b[33m velit et auctor \x1b[0m \x1b[34m luctus velit et\x1b[0m \n \x1b[32mauctor maximus. \x1b[0m \x1b[33m maximus. \x1b[0m \x1b[34m auctor maximus.\x1b[0m \n\x1b[1;31m \x1b[0m \n\x1b[1;31m \x1b[0m\x1b[1;31mAsian languages\x1b[0m\x1b[1;31m \x1b[0m🇨🇳 该库支持中文,日文和韩文文本! \n 🇯🇵 ライブラリは中国語、日本語、韓国語のテキストをサポートしています \n 🇰🇷 이 라이브러리는 중국어, 일본어 및 한국어 텍스트를 지원합니다 \n\x1b[1;31m \x1b[0m \n\x1b[1;31m \x1b[0m\x1b[1;31mConsole markup \x1b[0m\x1b[1;31m \x1b[0m\x1b[1;35mRich\x1b[0m supports a simple \x1b[3mbbcode\x1b[0m like \x1b[1mmarkup\x1b[0m for \x1b[33mcolor\x1b[0m, \x1b[4mstyle\x1b[0m, and emoji! 👍 🍎 🐜 … \n 🥖 🚌 \n\x1b[1;31m \x1b[0m \n\x1b[1;31m \x1b[0m\x1b[1;31m Tables \x1b[0m\x1b[1;31m \x1b[0m\x1b[1m \x1b[0m\x1b[1;32mDate\x1b[0m\x1b[1m \x1b[0m\x1b[1m \x1b[0m \x1b[1m \x1b[0m\x1b[1;34mTitle\x1b[0m\x1b[1m \x1b[0m\x1b[1m \x1b[0m \x1b[1m \x1b[0m\x1b[1;36mProduction Budget\x1b[0m\x1b[1m \x1b[0m \x1b[1m \x1b[0m\x1b[1m \x1b[0m\x1b[1;35mBox Office\x1b[0m\x1b[1m \x1b[0m \n ────────────────────────────────────────────────────────────────────────────────── \n \x1b[32m \x1b[0m\x1b[32mDec 20, 2019\x1b[0m\x1b[32m \x1b[0m \x1b[34m \x1b[0m\x1b[34mStar Wars: The Rise of \x1b[0m\x1b[34m \x1b[0m \x1b[36m \x1b[0m\x1b[36m $275,000,000\x1b[0m\x1b[36m \x1b[0m \x1b[35m \x1b[0m\x1b[35m $375,126,118\x1b[0m\x1b[35m \x1b[0m \n \x1b[34m \x1b[0m\x1b[34mSkywalker \x1b[0m\x1b[34m \x1b[0m \n \x1b[2;32m \x1b[0m\x1b[2;32mMay 25, 2018\x1b[0m\x1b[2;32m \x1b[0m \x1b[2;34m \x1b[0m\x1b[1;2;34mSolo\x1b[0m\x1b[2;34m: A Star Wars Story \x1b[0m\x1b[2;34m \x1b[0m \x1b[2;36m \x1b[0m\x1b[2;36m $275,000,000\x1b[0m\x1b[2;36m \x1b[0m \x1b[2;35m \x1b[0m\x1b[2;35m $393,151,347\x1b[0m\x1b[2;35m \x1b[0m \n \x1b[32m \x1b[0m\x1b[32mDec 15, 2017\x1b[0m\x1b[32m \x1b[0m \x1b[34m \x1b[0m\x1b[34mStar Wars Ep. VIII: The Last\x1b[0m\x1b[34m \x1b[0m \x1b[36m \x1b[0m\x1b[36m $262,000,000\x1b[0m\x1b[36m \x1b[0m \x1b[35m \x1b[0m\x1b[1;35m$1,332,539,889\x1b[0m\x1b[35m \x1b[0m \n \x1b[34m \x1b[0m\x1b[34mJedi \x1b[0m\x1b[34m \x1b[0m \n \x1b[2;32m \x1b[0m\x1b[2;32mMay 19, 1999\x1b[0m\x1b[2;32m \x1b[0m \x1b[2;34m \x1b[0m\x1b[2;34mStar Wars Ep. \x1b[0m\x1b[1;2;34mI\x1b[0m\x1b[2;34m: \x1b[0m\x1b[2;3;34mThe phantom\x1b[0m\x1b[2;34m \x1b[0m \x1b[2;36m \x1b[0m\x1b[2;36m $115,000,000\x1b[0m\x1b[2;36m \x1b[0m \x1b[2;35m \x1b[0m\x1b[2;35m$1,027,044,677\x1b[0m\x1b[2;35m \x1b[0m \n \x1b[2;34m \x1b[0m\x1b[2;3;34mMenace\x1b[0m\x1b[2;34m \x1b[0m\x1b[2;34m \x1b[0m \n\x1b[1;31m \x1b[0m \n\x1b[1;31m \x1b[0m\x1b[1;31m Syntax \x1b[0m\x1b[1;31m \x1b[0m\x1b[1;38;2;227;227;221;48;2;39;40;34m \x1b[0m\x1b[38;2;101;102;96;48;2;39;40;34m 1 \x1b[0m\x1b[38;2;102;217;239;48;2;39;40;34mdef\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m \x1b[0m\x1b[38;2;166;226;46;48;2;39;40;34miter_last\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m(\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34mvalues\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m:\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m \x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34mIterable\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m[\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34mT\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m]\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m)\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m \x1b[0m \x1b[1m{\x1b[0m \n\x1b[1;31m \x1b[0m\x1b[1;31m highlighting \x1b[0m\x1b[1;31m \x1b[0m\x1b[1;38;2;227;227;221;48;2;39;40;34m \x1b[0m\x1b[38;2;101;102;96;48;2;39;40;34m 2 \x1b[0m\x1b[2;38;2;117;113;94;48;2;39;40;34m│ \x1b[0m\x1b[38;2;230;219;116;48;2;39;40;34m\"\"\"Iterate and generate a tuple\x1b[0m \x1b[2;32m│ \x1b[0m\x1b[32m'foo'\x1b[0m: \x1b[1m[\x1b[0m \n\x1b[1;31m \x1b[0m\x1b[1;31m & \x1b[0m\x1b[1;31m \x1b[0m\x1b[1;38;2;227;227;221;48;2;39;40;34m \x1b[0m\x1b[38;2;101;102;96;48;2;39;40;34m 3 \x1b[0m\x1b[2;38;2;117;113;94;48;2;39;40;34m│ \x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34miter_values\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m \x1b[0m\x1b[38;2;249;38;114;48;2;39;40;34m=\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m \x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34miter\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m(\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34mvalues\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m)\x1b[0m\x1b[48;2;39;40;34m \x1b[0m \x1b[2;32m│ │ \x1b[0m\x1b[1;34m3.1427\x1b[0m, \n\x1b[1;31m \x1b[0m\x1b[1;31m pretty \x1b[0m\x1b[1;31m \x1b[0m\x1b[1;38;2;227;227;221;48;2;39;40;34m \x1b[0m\x1b[38;2;101;102;96;48;2;39;40;34m 4 \x1b[0m\x1b[2;38;2;117;113;94;48;2;39;40;34m│ \x1b[0m\x1b[38;2;102;217;239;48;2;39;40;34mtry\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m:\x1b[0m\x1b[48;2;39;40;34m \x1b[0m \x1b[2;32m│ │ \x1b[0m\x1b[1m(\x1b[0m \n\x1b[1;31m \x1b[0m\x1b[1;31m printing \x1b[0m\x1b[1;31m \x1b[0m\x1b[1;38;2;227;227;221;48;2;39;40;34m \x1b[0m\x1b[38;2;101;102;96;48;2;39;40;34m 5 \x1b[0m\x1b[2;38;2;117;113;94;48;2;39;40;34m│ │ \x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34mprevious_value\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m \x1b[0m\x1b[38;2;249;38;114;48;2;39;40;34m=\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m \x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34mnext\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m(\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34miter_\x1b[0m \x1b[2;32m│ │ │ \x1b[0m\x1b[32m'Paul Atriedies'\x1b[0m, \n \x1b[1;38;2;227;227;221;48;2;39;40;34m \x1b[0m\x1b[38;2;101;102;96;48;2;39;40;34m 6 \x1b[0m\x1b[2;38;2;117;113;94;48;2;39;40;34m│ \x1b[0m\x1b[38;2;102;217;239;48;2;39;40;34mexcept\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m \x1b[0m\x1b[38;2;166;226;46;48;2;39;40;34mStopIteration\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m:\x1b[0m\x1b[48;2;39;40;34m \x1b[0m \x1b[2;32m│ │ │ \x1b[0m\x1b[32m'Vladimir Harkonnen'\x1b[0m, \n \x1b[1;38;2;227;227;221;48;2;39;40;34m \x1b[0m\x1b[38;2;101;102;96;48;2;39;40;34m 7 \x1b[0m\x1b[2;38;2;117;113;94;48;2;39;40;34m│ │ \x1b[0m\x1b[38;2;102;217;239;48;2;39;40;34mreturn\x1b[0m\x1b[48;2;39;40;34m \x1b[0m \x1b[2;32m│ │ │ \x1b[0m\x1b[32m'Thufir Haway'\x1b[0m \n \x1b[1;38;2;227;227;221;48;2;39;40;34m \x1b[0m\x1b[38;2;101;102;96;48;2;39;40;34m 8 \x1b[0m\x1b[2;38;2;117;113;94;48;2;39;40;34m│ \x1b[0m\x1b[38;2;102;217;239;48;2;39;40;34mfor\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m \x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34mvalue\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m \x1b[0m\x1b[38;2;249;38;114;48;2;39;40;34min\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m \x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34miter_values\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m:\x1b[0m\x1b[48;2;39;40;34m \x1b[0m \x1b[2;32m│ │ \x1b[0m\x1b[1m)\x1b[0m \n \x1b[1;38;2;227;227;221;48;2;39;40;34m \x1b[0m\x1b[38;2;101;102;96;48;2;39;40;34m 9 \x1b[0m\x1b[2;38;2;117;113;94;48;2;39;40;34m│ │ \x1b[0m\x1b[38;2;102;217;239;48;2;39;40;34myield\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m \x1b[0m\x1b[38;2;102;217;239;48;2;39;40;34mFalse\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m,\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m \x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34mprevious_value\x1b[0m \x1b[2;32m│ \x1b[0m\x1b[1m]\x1b[0m, \n \x1b[1;38;2;227;227;221;48;2;39;40;34m \x1b[0m\x1b[38;2;101;102;96;48;2;39;40;34m10 \x1b[0m\x1b[2;38;2;117;113;94;48;2;39;40;34m│ │ \x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34mprevious_value\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m \x1b[0m\x1b[38;2;249;38;114;48;2;39;40;34m=\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m \x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34mvalue\x1b[0m\x1b[48;2;39;40;34m \x1b[0m \x1b[2;32m│ \x1b[0m\x1b[32m'atomic'\x1b[0m: \x1b[1m(\x1b[0m\x1b[3;91mFalse\x1b[0m, \x1b[3;92mTrue\x1b[0m, \x1b[3;35mNone\x1b[0m\x1b[1m)\x1b[0m \n \x1b[1;38;2;227;227;221;48;2;39;40;34m \x1b[0m\x1b[38;2;101;102;96;48;2;39;40;34m11 \x1b[0m\x1b[2;38;2;117;113;94;48;2;39;40;34m│ \x1b[0m\x1b[38;2;102;217;239;48;2;39;40;34myield\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m \x1b[0m\x1b[38;2;102;217;239;48;2;39;40;34mTrue\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m,\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m \x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34mprevious_value\x1b[0m\x1b[48;2;39;40;34m \x1b[0m \x1b[1m}\x1b[0m \n\x1b[1;31m \x1b[0m \n\x1b[1;31m \x1b[0m\x1b[1;31m Markdown \x1b[0m\x1b[1;31m \x1b[0m\x1b[36m# Markdown\x1b[0m ╔══════════════════════════════════════╗ \n ║ \x1b[1mMarkdown\x1b[0m ║ \n \x1b[36mSupports much of the *markdown*, \x1b[0m ╚══════════════════════════════════════╝ \n \x1b[36m__syntax__!\x1b[0m \n Supports much of the \x1b[3mmarkdown\x1b[0m, \x1b[1msyntax\x1b[0m! \n \x1b[36m- Headers\x1b[0m \n \x1b[36m- Basic formatting: **bold**, *italic*, \x1b[0m \x1b[1;33m • \x1b[0mHeaders \n \x1b[36m`code`\x1b[0m \x1b[1;33m • \x1b[0mBasic formatting: \x1b[1mbold\x1b[0m, \x1b[3mitalic\x1b[0m, \x1b[97;40mcode\x1b[0m \n \x1b[36m- Block quotes\x1b[0m \x1b[1;33m • \x1b[0mBlock quotes \n \x1b[36m- Lists, and more...\x1b[0m \x1b[1;33m • \x1b[0mLists, and more... \n \x1b[36m \x1b[0m \n\x1b[1;31m \x1b[0m \n\x1b[1;31m \x1b[0m\x1b[1;31m And more \x1b[0m\x1b[1;31m \x1b[0mProgress bars, columns, styled logging handler, tracebacks, etc... \n\x1b[1;31m \x1b[0m \n"
| 10,977.5
| 21,954
| 0.567069
|
expected = "\x1b[3m Rich features \x1b[0m\n\x1b[1;31m \x1b[0m \n\x1b[1;31m \x1b[0m\x1b[1;31m Colors \x1b[0m\x1b[1;31m \x1b[0m✓ \x1b[1;32m4-bit color\x1b[0m \x1b[38;2;51;0;0m█\x1b[0m\x1b[38;2;51;5;0m█\x1b[0m\x1b[38;2;51;11;0m█\x1b[0m\x1b[38;2;51;17;0m█\x1b[0m\x1b[38;2;51;23;0m█\x1b[0m\x1b[38;2;51;29;0m█\x1b[0m\x1b[38;2;51;35;0m█\x1b[0m\x1b[38;2;51;41;0m█\x1b[0m\x1b[38;2;51;47;0m█\x1b[0m\x1b[38;2;49;51;0m█\x1b[0m\x1b[38;2;43;51;0m█\x1b[0m\x1b[38;2;37;51;0m█\x1b[0m\x1b[38;2;31;51;0m█\x1b[0m\x1b[38;2;25;51;0m█\x1b[0m\x1b[38;2;19;51;0m█\x1b[0m\x1b[38;2;13;51;0m█\x1b[0m\x1b[38;2;7;51;0m█\x1b[0m\x1b[38;2;1;51;0m█\x1b[0m\x1b[38;2;0;51;3m█\x1b[0m\x1b[38;2;0;51;9m█\x1b[0m\x1b[38;2;0;51;15m█\x1b[0m\x1b[38;2;0;51;21m█\x1b[0m\x1b[38;2;0;51;27m█\x1b[0m\x1b[38;2;0;51;33m█\x1b[0m\x1b[38;2;0;51;39m█\x1b[0m\x1b[38;2;0;51;45m█\x1b[0m\x1b[38;2;0;50;51m█\x1b[0m\x1b[38;2;0;45;51m█\x1b[0m\x1b[38;2;0;39;51m█\x1b[0m\x1b[38;2;0;33;51m█\x1b[0m\x1b[38;2;0;27;51m█\x1b[0m\x1b[38;2;0;21;51m█\x1b[0m\x1b[38;2;0;15;51m█\x1b[0m\x1b[38;2;0;9;51m█\x1b[0m\x1b[38;2;0;3;51m█\x1b[0m\x1b[38;2;1;0;51m█\x1b[0m\x1b[38;2;7;0;51m█\x1b[0m\x1b[38;2;13;0;51m█\x1b[0m\x1b[38;2;19;0;51m█\x1b[0m\x1b[38;2;25;0;51m█\x1b[0m\x1b[38;2;31;0;51m█\x1b[0m\x1b[38;2;37;0;51m█\x1b[0m\x1b[38;2;43;0;51m█\x1b[0m\x1b[38;2;49;0;51m█\x1b[0m\x1b[38;2;51;0;47m█\x1b[0m\x1b[38;2;51;0;41m█\x1b[0m\x1b[38;2;51;0;35m█\x1b[0m\x1b[38;2;51;0;29m█\x1b[0m\x1b[38;2;51;0;23m█\x1b[0m\x1b[38;2;51;0;17m█\x1b[0m\x1b[38;2;51;0;11m█\x1b[0m\x1b[38;2;51;0;5m█\x1b[0m \n ✓ \x1b[1;34m8-bit color\x1b[0m \x1b[38;2;122;0;0m█\x1b[0m\x1b[38;2;122;14;0m█\x1b[0m\x1b[38;2;122;28;0m█\x1b[0m\x1b[38;2;122;42;0m█\x1b[0m\x1b[38;2;122;56;0m█\x1b[0m\x1b[38;2;122;70;0m█\x1b[0m\x1b[38;2;122;84;0m█\x1b[0m\x1b[38;2;122;98;0m█\x1b[0m\x1b[38;2;122;112;0m█\x1b[0m\x1b[38;2;117;122;0m█\x1b[0m\x1b[38;2;103;122;0m█\x1b[0m\x1b[38;2;89;122;0m█\x1b[0m\x1b[38;2;75;122;0m█\x1b[0m\x1b[38;2;61;122;0m█\x1b[0m\x1b[38;2;47;122;0m█\x1b[0m\x1b[38;2;32;122;0m█\x1b[0m\x1b[38;2;18;122;0m█\x1b[0m\x1b[38;2;4;122;0m█\x1b[0m\x1b[38;2;0;122;9m█\x1b[0m\x1b[38;2;0;122;23m█\x1b[0m\x1b[38;2;0;122;37m█\x1b[0m\x1b[38;2;0;122;51m█\x1b[0m\x1b[38;2;0;122;65m█\x1b[0m\x1b[38;2;0;122;80m█\x1b[0m\x1b[38;2;0;122;94m█\x1b[0m\x1b[38;2;0;122;108m█\x1b[0m\x1b[38;2;0;122;122m█\x1b[0m\x1b[38;2;0;108;122m█\x1b[0m\x1b[38;2;0;94;122m█\x1b[0m\x1b[38;2;0;80;122m█\x1b[0m\x1b[38;2;0;65;122m█\x1b[0m\x1b[38;2;0;51;122m█\x1b[0m\x1b[38;2;0;37;122m█\x1b[0m\x1b[38;2;0;23;122m█\x1b[0m\x1b[38;2;0;9;122m█\x1b[0m\x1b[38;2;4;0;122m█\x1b[0m\x1b[38;2;18;0;122m█\x1b[0m\x1b[38;2;32;0;122m█\x1b[0m\x1b[38;2;47;0;122m█\x1b[0m\x1b[38;2;61;0;122m█\x1b[0m\x1b[38;2;75;0;122m█\x1b[0m\x1b[38;2;89;0;122m█\x1b[0m\x1b[38;2;103;0;122m█\x1b[0m\x1b[38;2;117;0;122m█\x1b[0m\x1b[38;2;122;0;112m█\x1b[0m\x1b[38;2;122;0;98m█\x1b[0m\x1b[38;2;122;0;84m█\x1b[0m\x1b[38;2;122;0;70m█\x1b[0m\x1b[38;2;122;0;56m█\x1b[0m\x1b[38;2;122;0;42m█\x1b[0m\x1b[38;2;122;0;28m█\x1b[0m\x1b[38;2;122;0;14m█\x1b[0m \n ✓ \x1b[1;35mTruecolor (16.7 million)\x1b[0m \x1b[38;2;193;0;0m█\x1b[0m\x1b[38;2;193;22;0m█\x1b[0m\x1b[38;2;193;44;0m█\x1b[0m\x1b[38;2;193;67;0m█\x1b[0m\x1b[38;2;193;89;0m█\x1b[0m\x1b[38;2;193;111;0m█\x1b[0m\x1b[38;2;193;134;0m█\x1b[0m\x1b[38;2;193;156;0m█\x1b[0m\x1b[38;2;193;178;0m█\x1b[0m\x1b[38;2;186;193;0m█\x1b[0m\x1b[38;2;163;193;0m█\x1b[0m\x1b[38;2;141;193;0m█\x1b[0m\x1b[38;2;119;193;0m█\x1b[0m\x1b[38;2;96;193;0m█\x1b[0m\x1b[38;2;74;193;0m█\x1b[0m\x1b[38;2;52;193;0m█\x1b[0m\x1b[38;2;29;193;0m█\x1b[0m\x1b[38;2;7;193;0m█\x1b[0m\x1b[38;2;0;193;14m█\x1b[0m\x1b[38;2;0;193;37m█\x1b[0m\x1b[38;2;0;193;59m█\x1b[0m\x1b[38;2;0;193;81m█\x1b[0m\x1b[38;2;0;193;104m█\x1b[0m\x1b[38;2;0;193;126m█\x1b[0m\x1b[38;2;0;193;149m█\x1b[0m\x1b[38;2;0;193;171m█\x1b[0m\x1b[38;2;0;193;193m█\x1b[0m\x1b[38;2;0;171;193m█\x1b[0m\x1b[38;2;0;149;193m█\x1b[0m\x1b[38;2;0;126;193m█\x1b[0m\x1b[38;2;0;104;193m█\x1b[0m\x1b[38;2;0;81;193m█\x1b[0m\x1b[38;2;0;59;193m█\x1b[0m\x1b[38;2;0;37;193m█\x1b[0m\x1b[38;2;0;14;193m█\x1b[0m\x1b[38;2;7;0;193m█\x1b[0m\x1b[38;2;29;0;193m█\x1b[0m\x1b[38;2;52;0;193m█\x1b[0m\x1b[38;2;74;0;193m█\x1b[0m\x1b[38;2;96;0;193m█\x1b[0m\x1b[38;2;119;0;193m█\x1b[0m\x1b[38;2;141;0;193m█\x1b[0m\x1b[38;2;163;0;193m█\x1b[0m\x1b[38;2;186;0;193m█\x1b[0m\x1b[38;2;193;0;178m█\x1b[0m\x1b[38;2;193;0;156m█\x1b[0m\x1b[38;2;193;0;134m█\x1b[0m\x1b[38;2;193;0;111m█\x1b[0m\x1b[38;2;193;0;89m█\x1b[0m\x1b[38;2;193;0;67m█\x1b[0m\x1b[38;2;193;0;44m█\x1b[0m\x1b[38;2;193;0;22m█\x1b[0m \n ✓ \x1b[1;33mDumb terminals\x1b[0m \x1b[38;2;255;10;10m█\x1b[0m\x1b[38;2;255;38;10m█\x1b[0m\x1b[38;2;255;66;10m█\x1b[0m\x1b[38;2;255;94;10m█\x1b[0m\x1b[38;2;255;123;10m█\x1b[0m\x1b[38;2;255;151;10m█\x1b[0m\x1b[38;2;255;179;10m█\x1b[0m\x1b[38;2;255;207;10m█\x1b[0m\x1b[38;2;255;236;10m█\x1b[0m\x1b[38;2;245;255;10m█\x1b[0m\x1b[38;2;217;255;10m█\x1b[0m\x1b[38;2;189;255;10m█\x1b[0m\x1b[38;2;160;255;10m█\x1b[0m\x1b[38;2;132;255;10m█\x1b[0m\x1b[38;2;104;255;10m█\x1b[0m\x1b[38;2;76;255;10m█\x1b[0m\x1b[38;2;47;255;10m█\x1b[0m\x1b[38;2;19;255;10m█\x1b[0m\x1b[38;2;10;255;29m█\x1b[0m\x1b[38;2;10;255;57m█\x1b[0m\x1b[38;2;10;255;85m█\x1b[0m\x1b[38;2;10;255;113m█\x1b[0m\x1b[38;2;10;255;142m█\x1b[0m\x1b[38;2;10;255;170m█\x1b[0m\x1b[38;2;10;255;198m█\x1b[0m\x1b[38;2;10;255;226m█\x1b[0m\x1b[38;2;10;254;255m█\x1b[0m\x1b[38;2;10;226;255m█\x1b[0m\x1b[38;2;10;198;255m█\x1b[0m\x1b[38;2;10;170;255m█\x1b[0m\x1b[38;2;10;142;255m█\x1b[0m\x1b[38;2;10;113;255m█\x1b[0m\x1b[38;2;10;85;255m█\x1b[0m\x1b[38;2;10;57;255m█\x1b[0m\x1b[38;2;10;29;255m█\x1b[0m\x1b[38;2;19;10;255m█\x1b[0m\x1b[38;2;47;10;255m█\x1b[0m\x1b[38;2;76;10;255m█\x1b[0m\x1b[38;2;104;10;255m█\x1b[0m\x1b[38;2;132;10;255m█\x1b[0m\x1b[38;2;160;10;255m█\x1b[0m\x1b[38;2;189;10;255m█\x1b[0m\x1b[38;2;217;10;255m█\x1b[0m\x1b[38;2;245;10;255m█\x1b[0m\x1b[38;2;255;10;236m█\x1b[0m\x1b[38;2;255;10;207m█\x1b[0m\x1b[38;2;255;10;179m█\x1b[0m\x1b[38;2;255;10;151m█\x1b[0m\x1b[38;2;255;10;123m█\x1b[0m\x1b[38;2;255;10;94m█\x1b[0m\x1b[38;2;255;10;66m█\x1b[0m\x1b[38;2;255;10;38m█\x1b[0m \n ✓ \x1b[1;36mAutomatic color conversion\x1b[0m \x1b[38;2;255;81;81m█\x1b[0m\x1b[38;2;255;101;81m█\x1b[0m\x1b[38;2;255;121;81m█\x1b[0m\x1b[38;2;255;141;81m█\x1b[0m\x1b[38;2;255;161;81m█\x1b[0m\x1b[38;2;255;181;81m█\x1b[0m\x1b[38;2;255;201;81m█\x1b[0m\x1b[38;2;255;221;81m█\x1b[0m\x1b[38;2;255;241;81m█\x1b[0m\x1b[38;2;248;255;81m█\x1b[0m\x1b[38;2;228;255;81m█\x1b[0m\x1b[38;2;208;255;81m█\x1b[0m\x1b[38;2;188;255;81m█\x1b[0m\x1b[38;2;168;255;81m█\x1b[0m\x1b[38;2;148;255;81m█\x1b[0m\x1b[38;2;128;255;81m█\x1b[0m\x1b[38;2;108;255;81m█\x1b[0m\x1b[38;2;88;255;81m█\x1b[0m\x1b[38;2;81;255;94m█\x1b[0m\x1b[38;2;81;255;114m█\x1b[0m\x1b[38;2;81;255;134m█\x1b[0m\x1b[38;2;81;255;154m█\x1b[0m\x1b[38;2;81;255;174m█\x1b[0m\x1b[38;2;81;255;194m█\x1b[0m\x1b[38;2;81;255;214m█\x1b[0m\x1b[38;2;81;255;234m█\x1b[0m\x1b[38;2;81;254;255m█\x1b[0m\x1b[38;2;81;234;255m█\x1b[0m\x1b[38;2;81;214;255m█\x1b[0m\x1b[38;2;81;194;255m█\x1b[0m\x1b[38;2;81;174;255m█\x1b[0m\x1b[38;2;81;154;255m█\x1b[0m\x1b[38;2;81;134;255m█\x1b[0m\x1b[38;2;81;114;255m█\x1b[0m\x1b[38;2;81;94;255m█\x1b[0m\x1b[38;2;88;81;255m█\x1b[0m\x1b[38;2;108;81;255m█\x1b[0m\x1b[38;2;128;81;255m█\x1b[0m\x1b[38;2;148;81;255m█\x1b[0m\x1b[38;2;168;81;255m█\x1b[0m\x1b[38;2;188;81;255m█\x1b[0m\x1b[38;2;208;81;255m█\x1b[0m\x1b[38;2;228;81;255m█\x1b[0m\x1b[38;2;248;81;255m█\x1b[0m\x1b[38;2;255;81;241m█\x1b[0m\x1b[38;2;255;81;221m█\x1b[0m\x1b[38;2;255;81;201m█\x1b[0m\x1b[38;2;255;81;181m█\x1b[0m\x1b[38;2;255;81;161m█\x1b[0m\x1b[38;2;255;81;141m█\x1b[0m\x1b[38;2;255;81;121m█\x1b[0m\x1b[38;2;255;81;101m█\x1b[0m \n\x1b[1;31m \x1b[0m \n\x1b[1;31m \x1b[0m\x1b[1;31m Styles \x1b[0m\x1b[1;31m \x1b[0mAll ansi styles: \x1b[1mbold\x1b[0m, \x1b[2mdim\x1b[0m, \x1b[3mitalic\x1b[0m, \x1b[4munderline\x1b[0m, \x1b[9mstrikethrough\x1b[0m, \x1b[7mreverse\x1b[0m, and even \n \x1b[5mblink\x1b[0m. \n\x1b[1;31m \x1b[0m \n\x1b[1;31m \x1b[0m\x1b[1;31m Text \x1b[0m\x1b[1;31m \x1b[0mWord wrap text. Justify \x1b[32mleft\x1b[0m, \x1b[33mcenter\x1b[0m, \x1b[34mright\x1b[0m or \x1b[31mfull\x1b[0m. \n \n \x1b[32mLorem ipsum dolor \x1b[0m \x1b[33m Lorem ipsum dolor \x1b[0m \x1b[34m Lorem ipsum dolor\x1b[0m \x1b[31mLorem\x1b[0m\x1b[31m \x1b[0m\x1b[31mipsum\x1b[0m\x1b[31m \x1b[0m\x1b[31mdolor\x1b[0m\x1b[31m \x1b[0m\x1b[31msit\x1b[0m \n \x1b[32msit amet, \x1b[0m \x1b[33m sit amet, \x1b[0m \x1b[34m sit amet,\x1b[0m \x1b[31mamet,\x1b[0m\x1b[31m \x1b[0m\x1b[31mconsectetur\x1b[0m \n \x1b[32mconsectetur \x1b[0m \x1b[33m consectetur \x1b[0m \x1b[34m consectetur\x1b[0m \x1b[31madipiscing\x1b[0m\x1b[31m \x1b[0m\x1b[31melit.\x1b[0m \n \x1b[32madipiscing elit. \x1b[0m \x1b[33m adipiscing elit. \x1b[0m \x1b[34m adipiscing elit.\x1b[0m \x1b[31mQuisque\x1b[0m\x1b[31m \x1b[0m\x1b[31min\x1b[0m\x1b[31m \x1b[0m\x1b[31mmetus\x1b[0m\x1b[31m \x1b[0m\x1b[31msed\x1b[0m \n \x1b[32mQuisque in metus \x1b[0m \x1b[33mQuisque in metus sed\x1b[0m \x1b[34m Quisque in metus\x1b[0m \x1b[31msapien\x1b[0m\x1b[31m \x1b[0m\x1b[31multricies\x1b[0m \n \x1b[32msed sapien \x1b[0m \x1b[33m sapien ultricies \x1b[0m \x1b[34m sed sapien\x1b[0m \x1b[31mpretium\x1b[0m\x1b[31m \x1b[0m\x1b[31ma\x1b[0m\x1b[31m \x1b[0m\x1b[31mat\x1b[0m\x1b[31m \x1b[0m\x1b[31mjusto.\x1b[0m \n \x1b[32multricies pretium a\x1b[0m \x1b[33mpretium a at justo. \x1b[0m \x1b[34multricies pretium a\x1b[0m \x1b[31mMaecenas\x1b[0m\x1b[31m \x1b[0m\x1b[31mluctus\x1b[0m\x1b[31m \x1b[0m\x1b[31mvelit\x1b[0m \n \x1b[32mat justo. Maecenas \x1b[0m \x1b[33m Maecenas luctus \x1b[0m \x1b[34m at justo. Maecenas\x1b[0m \x1b[31met auctor maximus.\x1b[0m \n \x1b[32mluctus velit et \x1b[0m \x1b[33m velit et auctor \x1b[0m \x1b[34m luctus velit et\x1b[0m \n \x1b[32mauctor maximus. \x1b[0m \x1b[33m maximus. \x1b[0m \x1b[34m auctor maximus.\x1b[0m \n\x1b[1;31m \x1b[0m \n\x1b[1;31m \x1b[0m\x1b[1;31mAsian languages\x1b[0m\x1b[1;31m \x1b[0m🇨🇳 该库支持中文,日文和韩文文本! \n 🇯🇵 ライブラリは中国語、日本語、韓国語のテキストをサポートしています \n 🇰🇷 이 라이브러리는 중국어, 일본어 및 한국어 텍스트를 지원합니다 \n\x1b[1;31m \x1b[0m \n\x1b[1;31m \x1b[0m\x1b[1;31mConsole markup \x1b[0m\x1b[1;31m \x1b[0m\x1b[1;35mRich\x1b[0m supports a simple \x1b[3mbbcode\x1b[0m like \x1b[1mmarkup\x1b[0m for \x1b[33mcolor\x1b[0m, \x1b[4mstyle\x1b[0m, and emoji! 👍 🍎 🐜 … \n 🥖 🚌 \n\x1b[1;31m \x1b[0m \n\x1b[1;31m \x1b[0m\x1b[1;31m Tables \x1b[0m\x1b[1;31m \x1b[0m\x1b[1m \x1b[0m\x1b[1;32mDate\x1b[0m\x1b[1m \x1b[0m\x1b[1m \x1b[0m \x1b[1m \x1b[0m\x1b[1;34mTitle\x1b[0m\x1b[1m \x1b[0m\x1b[1m \x1b[0m \x1b[1m \x1b[0m\x1b[1;36mProduction Budget\x1b[0m\x1b[1m \x1b[0m \x1b[1m \x1b[0m\x1b[1m \x1b[0m\x1b[1;35mBox Office\x1b[0m\x1b[1m \x1b[0m \n ────────────────────────────────────────────────────────────────────────────────── \n \x1b[32m \x1b[0m\x1b[32mDec 20, 2019\x1b[0m\x1b[32m \x1b[0m \x1b[34m \x1b[0m\x1b[34mStar Wars: The Rise of \x1b[0m\x1b[34m \x1b[0m \x1b[36m \x1b[0m\x1b[36m $275,000,000\x1b[0m\x1b[36m \x1b[0m \x1b[35m \x1b[0m\x1b[35m $375,126,118\x1b[0m\x1b[35m \x1b[0m \n \x1b[34m \x1b[0m\x1b[34mSkywalker \x1b[0m\x1b[34m \x1b[0m \n \x1b[2;32m \x1b[0m\x1b[2;32mMay 25, 2018\x1b[0m\x1b[2;32m \x1b[0m \x1b[2;34m \x1b[0m\x1b[1;2;34mSolo\x1b[0m\x1b[2;34m: A Star Wars Story \x1b[0m\x1b[2;34m \x1b[0m \x1b[2;36m \x1b[0m\x1b[2;36m $275,000,000\x1b[0m\x1b[2;36m \x1b[0m \x1b[2;35m \x1b[0m\x1b[2;35m $393,151,347\x1b[0m\x1b[2;35m \x1b[0m \n \x1b[32m \x1b[0m\x1b[32mDec 15, 2017\x1b[0m\x1b[32m \x1b[0m \x1b[34m \x1b[0m\x1b[34mStar Wars Ep. VIII: The Last\x1b[0m\x1b[34m \x1b[0m \x1b[36m \x1b[0m\x1b[36m $262,000,000\x1b[0m\x1b[36m \x1b[0m \x1b[35m \x1b[0m\x1b[1;35m$1,332,539,889\x1b[0m\x1b[35m \x1b[0m \n \x1b[34m \x1b[0m\x1b[34mJedi \x1b[0m\x1b[34m \x1b[0m \n \x1b[2;32m \x1b[0m\x1b[2;32mMay 19, 1999\x1b[0m\x1b[2;32m \x1b[0m \x1b[2;34m \x1b[0m\x1b[2;34mStar Wars Ep. \x1b[0m\x1b[1;2;34mI\x1b[0m\x1b[2;34m: \x1b[0m\x1b[2;3;34mThe phantom\x1b[0m\x1b[2;34m \x1b[0m \x1b[2;36m \x1b[0m\x1b[2;36m $115,000,000\x1b[0m\x1b[2;36m \x1b[0m \x1b[2;35m \x1b[0m\x1b[2;35m$1,027,044,677\x1b[0m\x1b[2;35m \x1b[0m \n \x1b[2;34m \x1b[0m\x1b[2;3;34mMenace\x1b[0m\x1b[2;34m \x1b[0m\x1b[2;34m \x1b[0m \n\x1b[1;31m \x1b[0m \n\x1b[1;31m \x1b[0m\x1b[1;31m Syntax \x1b[0m\x1b[1;31m \x1b[0m\x1b[1;38;2;227;227;221;48;2;39;40;34m \x1b[0m\x1b[38;2;101;102;96;48;2;39;40;34m 1 \x1b[0m\x1b[38;2;102;217;239;48;2;39;40;34mdef\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m \x1b[0m\x1b[38;2;166;226;46;48;2;39;40;34miter_last\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m(\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34mvalues\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m:\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m \x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34mIterable\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m[\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34mT\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m]\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m)\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m \x1b[0m \x1b[1m{\x1b[0m \n\x1b[1;31m \x1b[0m\x1b[1;31m highlighting \x1b[0m\x1b[1;31m \x1b[0m\x1b[1;38;2;227;227;221;48;2;39;40;34m \x1b[0m\x1b[38;2;101;102;96;48;2;39;40;34m 2 \x1b[0m\x1b[2;38;2;117;113;94;48;2;39;40;34m│ \x1b[0m\x1b[38;2;230;219;116;48;2;39;40;34m\"\"\"Iterate and generate a tuple\x1b[0m \x1b[2;32m│ \x1b[0m\x1b[32m'foo'\x1b[0m: \x1b[1m[\x1b[0m \n\x1b[1;31m \x1b[0m\x1b[1;31m & \x1b[0m\x1b[1;31m \x1b[0m\x1b[1;38;2;227;227;221;48;2;39;40;34m \x1b[0m\x1b[38;2;101;102;96;48;2;39;40;34m 3 \x1b[0m\x1b[2;38;2;117;113;94;48;2;39;40;34m│ \x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34miter_values\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m \x1b[0m\x1b[38;2;249;38;114;48;2;39;40;34m=\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m \x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34miter\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m(\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34mvalues\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m)\x1b[0m\x1b[48;2;39;40;34m \x1b[0m \x1b[2;32m│ │ \x1b[0m\x1b[1;34m3.1427\x1b[0m, \n\x1b[1;31m \x1b[0m\x1b[1;31m pretty \x1b[0m\x1b[1;31m \x1b[0m\x1b[1;38;2;227;227;221;48;2;39;40;34m \x1b[0m\x1b[38;2;101;102;96;48;2;39;40;34m 4 \x1b[0m\x1b[2;38;2;117;113;94;48;2;39;40;34m│ \x1b[0m\x1b[38;2;102;217;239;48;2;39;40;34mtry\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m:\x1b[0m\x1b[48;2;39;40;34m \x1b[0m \x1b[2;32m│ │ \x1b[0m\x1b[1m(\x1b[0m \n\x1b[1;31m \x1b[0m\x1b[1;31m printing \x1b[0m\x1b[1;31m \x1b[0m\x1b[1;38;2;227;227;221;48;2;39;40;34m \x1b[0m\x1b[38;2;101;102;96;48;2;39;40;34m 5 \x1b[0m\x1b[2;38;2;117;113;94;48;2;39;40;34m│ │ \x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34mprevious_value\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m \x1b[0m\x1b[38;2;249;38;114;48;2;39;40;34m=\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m \x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34mnext\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m(\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34miter_\x1b[0m \x1b[2;32m│ │ │ \x1b[0m\x1b[32m'Paul Atriedies'\x1b[0m, \n \x1b[1;38;2;227;227;221;48;2;39;40;34m \x1b[0m\x1b[38;2;101;102;96;48;2;39;40;34m 6 \x1b[0m\x1b[2;38;2;117;113;94;48;2;39;40;34m│ \x1b[0m\x1b[38;2;102;217;239;48;2;39;40;34mexcept\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m \x1b[0m\x1b[38;2;166;226;46;48;2;39;40;34mStopIteration\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m:\x1b[0m\x1b[48;2;39;40;34m \x1b[0m \x1b[2;32m│ │ │ \x1b[0m\x1b[32m'Vladimir Harkonnen'\x1b[0m, \n \x1b[1;38;2;227;227;221;48;2;39;40;34m \x1b[0m\x1b[38;2;101;102;96;48;2;39;40;34m 7 \x1b[0m\x1b[2;38;2;117;113;94;48;2;39;40;34m│ │ \x1b[0m\x1b[38;2;102;217;239;48;2;39;40;34mreturn\x1b[0m\x1b[48;2;39;40;34m \x1b[0m \x1b[2;32m│ │ │ \x1b[0m\x1b[32m'Thufir Haway'\x1b[0m \n \x1b[1;38;2;227;227;221;48;2;39;40;34m \x1b[0m\x1b[38;2;101;102;96;48;2;39;40;34m 8 \x1b[0m\x1b[2;38;2;117;113;94;48;2;39;40;34m│ \x1b[0m\x1b[38;2;102;217;239;48;2;39;40;34mfor\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m \x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34mvalue\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m \x1b[0m\x1b[38;2;249;38;114;48;2;39;40;34min\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m \x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34miter_values\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m:\x1b[0m\x1b[48;2;39;40;34m \x1b[0m \x1b[2;32m│ │ \x1b[0m\x1b[1m)\x1b[0m \n \x1b[1;38;2;227;227;221;48;2;39;40;34m \x1b[0m\x1b[38;2;101;102;96;48;2;39;40;34m 9 \x1b[0m\x1b[2;38;2;117;113;94;48;2;39;40;34m│ │ \x1b[0m\x1b[38;2;102;217;239;48;2;39;40;34myield\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m \x1b[0m\x1b[38;2;102;217;239;48;2;39;40;34mFalse\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m,\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m \x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34mprevious_value\x1b[0m \x1b[2;32m│ \x1b[0m\x1b[1m]\x1b[0m, \n \x1b[1;38;2;227;227;221;48;2;39;40;34m \x1b[0m\x1b[38;2;101;102;96;48;2;39;40;34m10 \x1b[0m\x1b[2;38;2;117;113;94;48;2;39;40;34m│ │ \x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34mprevious_value\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m \x1b[0m\x1b[38;2;249;38;114;48;2;39;40;34m=\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m \x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34mvalue\x1b[0m\x1b[48;2;39;40;34m \x1b[0m \x1b[2;32m│ \x1b[0m\x1b[32m'atomic'\x1b[0m: \x1b[1m(\x1b[0m\x1b[3;91mFalse\x1b[0m, \x1b[3;92mTrue\x1b[0m, \x1b[3;35mNone\x1b[0m\x1b[1m)\x1b[0m \n \x1b[1;38;2;227;227;221;48;2;39;40;34m \x1b[0m\x1b[38;2;101;102;96;48;2;39;40;34m11 \x1b[0m\x1b[2;38;2;117;113;94;48;2;39;40;34m│ \x1b[0m\x1b[38;2;102;217;239;48;2;39;40;34myield\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m \x1b[0m\x1b[38;2;102;217;239;48;2;39;40;34mTrue\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m,\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m \x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34mprevious_value\x1b[0m\x1b[48;2;39;40;34m \x1b[0m \x1b[1m}\x1b[0m \n\x1b[1;31m \x1b[0m \n\x1b[1;31m \x1b[0m\x1b[1;31m Markdown \x1b[0m\x1b[1;31m \x1b[0m\x1b[36m
| true
| true
|
f70e235b5231d04e223b326cbb3a98dddd6fbd67
| 59
|
py
|
Python
|
auto_argparse/__init__.py
|
Delrom01/auto-argparse
|
30e242979407b87bb3d6bc62e57816078642d7fc
|
[
"MIT"
] | 1
|
2021-10-11T14:43:11.000Z
|
2021-10-11T14:43:11.000Z
|
auto_argparse/__init__.py
|
Delrom01/auto-argparse
|
30e242979407b87bb3d6bc62e57816078642d7fc
|
[
"MIT"
] | 5
|
2020-05-15T16:26:57.000Z
|
2021-01-25T15:47:28.000Z
|
auto_argparse/__init__.py
|
Delrom01/auto-argparse
|
30e242979407b87bb3d6bc62e57816078642d7fc
|
[
"MIT"
] | null | null | null |
from .auto_argparse import make_parser, parse_args_and_run
| 29.5
| 58
| 0.881356
|
from .auto_argparse import make_parser, parse_args_and_run
| true
| true
|
f70e2360f966b6b225b28079c42dbd2f02ff9bfa
| 20,493
|
py
|
Python
|
third_party/mlir/utils/spirv/gen_spirv_dialect.py
|
uve/tensorflow
|
e08079463bf43e5963acc41da1f57e95603f8080
|
[
"Apache-2.0"
] | null | null | null |
third_party/mlir/utils/spirv/gen_spirv_dialect.py
|
uve/tensorflow
|
e08079463bf43e5963acc41da1f57e95603f8080
|
[
"Apache-2.0"
] | null | null | null |
third_party/mlir/utils/spirv/gen_spirv_dialect.py
|
uve/tensorflow
|
e08079463bf43e5963acc41da1f57e95603f8080
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright 2019 The MLIR Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Script for updating SPIR-V dialect by scraping information from SPIR-V
# HTML and JSON specs from the Internet.
#
# For example, to define the enum attribute for SPIR-V memory model:
#
# ./gen_spirv_dialect.py --base_td_path /path/to/SPIRVBase.td \
# --new-enum MemoryModel
#
# The 'operand_kinds' dict of spirv.core.grammar.json contains all supported
# SPIR-V enum classes.
import re
import requests
import textwrap
SPIRV_HTML_SPEC_URL = 'https://www.khronos.org/registry/spir-v/specs/unified1/SPIRV.html'
SPIRV_JSON_SPEC_URL = 'https://raw.githubusercontent.com/KhronosGroup/SPIRV-Headers/master/include/spirv/unified1/spirv.core.grammar.json'
AUTOGEN_OP_DEF_SEPARATOR = '\n// -----\n\n'
AUTOGEN_ENUM_SECTION_MARKER = 'enum section. Generated from SPIR-V spec; DO NOT MODIFY!'
AUTOGEN_OPCODE_SECTION_MARKER = (
'opcode section. Generated from SPIR-V spec; DO NOT MODIFY!')
def get_spirv_doc_from_html_spec():
"""Extracts instruction documentation from SPIR-V HTML spec.
Returns:
- A dict mapping from instruction opcode to documentation.
"""
response = requests.get(SPIRV_HTML_SPEC_URL)
spec = response.content
from bs4 import BeautifulSoup
spirv = BeautifulSoup(spec, 'html.parser')
section_anchor = spirv.find('h3', {'id': '_a_id_instructions_a_instructions'})
doc = {}
for section in section_anchor.parent.find_all('div', {'class': 'sect3'}):
for table in section.find_all('table'):
inst_html = table.tbody.tr.td.p
opname = inst_html.a['id']
# Ignore the first line, which is just the opname.
doc[opname] = inst_html.text.split('\n', 1)[1].strip()
return doc
def get_spirv_grammar_from_json_spec():
"""Extracts operand kind and instruction grammar from SPIR-V JSON spec.
Returns:
- A list containing all operand kinds' grammar
- A list containing all instructions' grammar
"""
response = requests.get(SPIRV_JSON_SPEC_URL)
spec = response.content
import json
spirv = json.loads(spec)
return spirv['operand_kinds'], spirv['instructions']
def split_list_into_sublists(items, offset):
"""Split the list of items into multiple sublists.
This is to make sure the string composed from each sublist won't exceed
80 characters.
Arguments:
- items: a list of strings
- offset: the offset in calculating each sublist's length
"""
chuncks = []
chunk = []
chunk_len = 0
for item in items:
chunk_len += len(item) + 2
if chunk_len > 80:
chuncks.append(chunk)
chunk = []
chunk_len = len(item) + 2
chunk.append(item)
if len(chunk) != 0:
chuncks.append(chunk)
return chuncks
def uniquify(lst, equality_fn):
"""Returns a list after pruning duplicate elements.
Arguments:
- lst: List whose elements are to be uniqued.
- equality_fn: Function used to compare equality between elements of the
list.
Returns:
- A list with all duplicated removed. The order of elements is same as the
original list, with only the first occurence of duplicates retained.
"""
keys = set()
unique_lst = []
for elem in lst:
key = equality_fn(elem)
if equality_fn(key) not in keys:
unique_lst.append(elem)
keys.add(key)
return unique_lst
def gen_operand_kind_enum_attr(operand_kind):
"""Generates the TableGen I32EnumAttr definition for the given operand kind.
Returns:
- The operand kind's name
- A string containing the TableGen I32EnumAttr definition
"""
if 'enumerants' not in operand_kind:
return '', ''
kind_name = operand_kind['kind']
kind_acronym = ''.join([c for c in kind_name if c >= 'A' and c <= 'Z'])
kind_cases = [(case['enumerant'], case['value'])
for case in operand_kind['enumerants']]
kind_cases = uniquify(kind_cases, lambda x: x[1])
max_len = max([len(symbol) for (symbol, _) in kind_cases])
# Generate the definition for each enum case
fmt_str = 'def SPV_{acronym}_{symbol} {colon:>{offset}} '\
'I32EnumAttrCase<"{symbol}", {value}>;'
case_defs = [
fmt_str.format(
acronym=kind_acronym,
symbol=case[0],
value=case[1],
colon=':',
offset=(max_len + 1 - len(case[0]))) for case in kind_cases
]
case_defs = '\n'.join(case_defs)
# Generate the list of enum case names
fmt_str = 'SPV_{acronym}_{symbol}';
case_names = [fmt_str.format(acronym=kind_acronym,symbol=case[0])
for case in kind_cases]
# Split them into sublists and concatenate into multiple lines
case_names = split_list_into_sublists(case_names, 6)
case_names = ['{:6}'.format('') + ', '.join(sublist)
for sublist in case_names]
case_names = ',\n'.join(case_names)
# Generate the enum attribute definition
enum_attr = 'def SPV_{name}Attr :\n '\
'I32EnumAttr<"{name}", "valid SPIR-V {name}", [\n{cases}\n ]> {{\n'\
' let returnType = "::mlir::spirv::{name}";\n'\
' let convertFromStorage = '\
'"static_cast<::mlir::spirv::{name}>($_self.getInt())";\n'\
' let cppNamespace = "::mlir::spirv";\n}}'.format(
name=kind_name, cases=case_names)
return kind_name, case_defs + '\n\n' + enum_attr
def gen_opcode(instructions):
""" Generates the TableGen definition to map opname to opcode
Returns:
- A string containing the TableGen SPV_OpCode definition
"""
max_len = max([len(inst['opname']) for inst in instructions])
def_fmt_str = 'def SPV_OC_{name} {colon:>{offset}} '\
'I32EnumAttrCase<"{name}", {value}>;'
opcode_defs = [
def_fmt_str.format(
name=inst['opname'],
value=inst['opcode'],
colon=':',
offset=(max_len + 1 - len(inst['opname']))) for inst in instructions
]
opcode_str = '\n'.join(opcode_defs)
decl_fmt_str = 'SPV_OC_{name}'
opcode_list = [
decl_fmt_str.format(name=inst['opname']) for inst in instructions
]
opcode_list = split_list_into_sublists(opcode_list, 6)
opcode_list = [
'{:6}'.format('') + ', '.join(sublist) for sublist in opcode_list
]
opcode_list = ',\n'.join(opcode_list)
enum_attr = 'def SPV_OpcodeAttr :\n'\
' I32EnumAttr<"{name}", "valid SPIR-V instructions", [\n'\
'{lst}\n'\
' ]> {{\n'\
' let returnType = "::mlir::spirv::{name}";\n'\
' let convertFromStorage = '\
'"static_cast<::mlir::spirv::{name}>($_self.getInt())";\n'\
' let cppNamespace = "::mlir::spirv";\n}}'.format(
name='Opcode', lst=opcode_list)
return opcode_str + '\n\n' + enum_attr
def update_td_opcodes(path, instructions, filter_list):
"""Updates SPIRBase.td with new generated opcode cases.
Arguments:
- path: the path to SPIRBase.td
- instructions: a list containing all SPIR-V instructions' grammar
- filter_list: a list containing new opnames to add
"""
with open(path, 'r') as f:
content = f.read()
content = content.split(AUTOGEN_OPCODE_SECTION_MARKER)
assert len(content) == 3
# Extend opcode list with existing list
existing_opcodes = [k[11:] for k in re.findall('def SPV_OC_\w+', content[1])]
filter_list.extend(existing_opcodes)
filter_list = list(set(filter_list))
# Generate the opcode for all instructions in SPIR-V
filter_instrs = list(
filter(lambda inst: (inst['opname'] in filter_list), instructions))
# Sort instruction based on opcode
filter_instrs.sort(key=lambda inst: inst['opcode'])
opcode = gen_opcode(filter_instrs)
# Substitute the opcode
content = content[0] + AUTOGEN_OPCODE_SECTION_MARKER + '\n\n' + \
opcode + '\n\n// End ' + AUTOGEN_OPCODE_SECTION_MARKER \
+ content[2]
with open(path, 'w') as f:
f.write(content)
def update_td_enum_attrs(path, operand_kinds, filter_list):
"""Updates SPIRBase.td with new generated enum definitions.
Arguments:
- path: the path to SPIRBase.td
- operand_kinds: a list containing all operand kinds' grammar
- filter_list: a list containing new enums to add
"""
with open(path, 'r') as f:
content = f.read()
content = content.split(AUTOGEN_ENUM_SECTION_MARKER)
assert len(content) == 3
# Extend filter list with existing enum definitions
existing_kinds = [
k[8:-4] for k in re.findall('def SPV_\w+Attr', content[1])]
filter_list.extend(existing_kinds)
# Generate definitions for all enums in filter list
defs = [gen_operand_kind_enum_attr(kind)
for kind in operand_kinds if kind['kind'] in filter_list]
# Sort alphabetically according to enum name
defs.sort(key=lambda enum : enum[0])
# Only keep the definitions from now on
defs = [enum[1] for enum in defs]
# Substitute the old section
content = content[0] + AUTOGEN_ENUM_SECTION_MARKER + '\n\n' + \
'\n\n'.join(defs) + "\n\n// End " + AUTOGEN_ENUM_SECTION_MARKER \
+ content[2];
with open(path, 'w') as f:
f.write(content)
def snake_casify(name):
"""Turns the given name to follow snake_case convension."""
name = re.sub('\W+', '', name).split()
name = [s.lower() for s in name]
return '_'.join(name)
def map_spec_operand_to_ods_argument(operand):
"""Maps a operand in SPIR-V JSON spec to an op argument in ODS.
Arguments:
- A dict containing the operand's kind, quantifier, and name
Returns:
- A string containing both the type and name for the argument
"""
kind = operand['kind']
quantifier = operand.get('quantifier', '')
# These instruction "operands" are for encoding the results; they should
# not be handled here.
assert kind != 'IdResultType', 'unexpected to handle "IdResultType" kind'
assert kind != 'IdResult', 'unexpected to handle "IdResult" kind'
if kind == 'IdRef':
if quantifier == '':
arg_type = 'SPV_Type'
elif quantifier == '?':
arg_type = 'SPV_Optional<SPV_Type>'
else:
arg_type = 'Variadic<SPV_Type>'
elif kind == 'IdMemorySemantics' or kind == 'IdScope':
# TODO(antiagainst): Need to further constrain 'IdMemorySemantics'
# and 'IdScope' given that they should be gernated from OpConstant.
assert quantifier == '', ('unexpected to have optional/variadic memory '
'semantics or scope <id>')
arg_type = 'I32'
elif kind == 'LiteralInteger':
if quantifier == '':
arg_type = 'I32Attr'
elif quantifier == '?':
arg_type = 'OptionalAttr<I32Attr>'
else:
arg_type = 'OptionalAttr<I32ArrayAttr>'
elif kind == 'LiteralString' or \
kind == 'LiteralContextDependentNumber' or \
kind == 'LiteralExtInstInteger' or \
kind == 'LiteralSpecConstantOpInteger' or \
kind == 'PairLiteralIntegerIdRef' or \
kind == 'PairIdRefLiteralInteger' or \
kind == 'PairIdRefIdRef':
assert False, '"{}" kind unimplemented'.format(kind)
else:
# The rest are all enum operands that we represent with op attributes.
assert quantifier != '*', 'unexpected to have variadic enum attribute'
arg_type = 'SPV_{}Attr'.format(kind)
if quantifier == '?':
arg_type = 'OptionalAttr<{}>'.format(arg_type)
name = operand.get('name', '')
name = snake_casify(name) if name else kind.lower()
return '{}:${}'.format(arg_type, name)
def get_op_definition(instruction, doc, existing_info):
"""Generates the TableGen op definition for the given SPIR-V instruction.
Arguments:
- instruction: the instruction's SPIR-V JSON grammar
- doc: the instruction's SPIR-V HTML doc
- existing_info: a dict containing potential manually specified sections for
this instruction
Returns:
- A string containing the TableGen op definition
"""
fmt_str = 'def SPV_{opname}Op : SPV_Op<"{opname}", [{traits}]> {{\n'\
' let summary = {summary};\n\n'\
' let description = [{{\n'\
'{description}\n\n'\
' ### Custom assembly form\n'\
'{assembly}'\
'}}];\n\n'\
' let arguments = (ins{args});\n\n'\
' let results = (outs{results});\n'\
'{extras}'\
'}}\n'
opname = instruction['opname'][2:]
summary, description = doc.split('\n', 1)
wrapper = textwrap.TextWrapper(
width=76, initial_indent=' ', subsequent_indent=' ')
# Format summary. If the summary can fit in the same line, we print it out
# as a "-quoted string; otherwise, wrap the lines using "[{...}]".
summary = summary.strip();
if len(summary) + len(' let summary = "";') <= 80:
summary = '"{}"'.format(summary)
else:
summary = '[{{\n{}\n }}]'.format(wrapper.fill(summary))
# Wrap description
description = description.split('\n')
description = [wrapper.fill(line) for line in description if line]
description = '\n\n'.join(description)
operands = instruction.get('operands', [])
# Set op's result
results = ''
if len(operands) > 0 and operands[0]['kind'] == 'IdResultType':
results = '\n SPV_Type:$result\n '
operands = operands[1:]
if 'results' in existing_info:
results = existing_info['results']
# Ignore the operand standing for the result <id>
if len(operands) > 0 and operands[0]['kind'] == 'IdResult':
operands = operands[1:]
# Set op' argument
arguments = existing_info.get('arguments', None)
if arguments is None:
arguments = [map_spec_operand_to_ods_argument(o) for o in operands]
arguments = '\n '.join(arguments)
if arguments:
# Prepend and append whitespace for formatting
arguments = '\n {}\n '.format(arguments)
assembly = existing_info.get('assembly', None)
if assembly is None:
assembly = ' ``` {.ebnf}\n'\
' [TODO]\n'\
' ```\n\n'\
' For example:\n\n'\
' ```\n'\
' [TODO]\n'\
' ```\n '
return fmt_str.format(
opname=opname,
traits=existing_info.get('traits', ''),
summary=summary,
description=description,
assembly=assembly,
args=arguments,
results=results,
extras=existing_info.get('extras', ''))
def extract_td_op_info(op_def):
"""Extracts potentially manually specified sections in op's definition.
Arguments: - A string containing the op's TableGen definition
- doc: the instruction's SPIR-V HTML doc
Returns:
- A dict containing potential manually specified sections
"""
# Get opname
opname = [o[8:-2] for o in re.findall('def SPV_\w+Op', op_def)]
assert len(opname) == 1, 'more than one ops in the same section!'
opname = opname[0]
# Get traits
op_tmpl_params = op_def.split('<', 1)[1].split('>', 1)[0].split(', ', 1)
if len(op_tmpl_params) == 1:
traits = ''
else:
traits = op_tmpl_params[1].strip('[]')
# Get custom assembly form
rest = op_def.split('### Custom assembly form\n')
assert len(rest) == 2, \
'{}: cannot find "### Custom assembly form"'.format(opname)
rest = rest[1].split(' let arguments = (ins')
assert len(rest) == 2, '{}: cannot find arguments'.format(opname)
assembly = rest[0].rstrip('}];\n')
# Get arguments
rest = rest[1].split(' let results = (outs')
assert len(rest) == 2, '{}: cannot find results'.format(opname)
args = rest[0].rstrip(');\n')
# Get results
rest = rest[1].split(');', 1)
assert len(rest) == 2, \
'{}: cannot find ");" ending results'.format(opname)
results = rest[0]
extras = rest[1].strip(' }\n')
if extras:
extras = '\n {}\n'.format(extras)
return {
# Prefix with 'Op' to make it consistent with SPIR-V spec
'opname': 'Op{}'.format(opname),
'traits': traits,
'assembly': assembly,
'arguments': args,
'results': results,
'extras': extras
}
def update_td_op_definitions(path, instructions, docs, filter_list):
"""Updates SPIRVOps.td with newly generated op definition.
Arguments:
- path: path to SPIRVOps.td
- instructions: SPIR-V JSON grammar for all instructions
- docs: SPIR-V HTML doc for all instructions
- filter_list: a list containing new opnames to include
Returns:
- A string containing all the TableGen op definitions
"""
with open(path, 'r') as f:
content = f.read()
# Split the file into chuncks, each containing one op.
ops = content.split(AUTOGEN_OP_DEF_SEPARATOR)
header = ops[0]
footer = ops[-1]
ops = ops[1:-1]
# For each existing op, extract the manually-written sections out to retain
# them when re-generating the ops. Also append the existing ops to filter
# list.
op_info_dict = {}
for op in ops:
info_dict = extract_td_op_info(op)
opname = info_dict['opname']
op_info_dict[opname] = info_dict
filter_list.append(opname)
filter_list = sorted(list(set(filter_list)))
op_defs = []
for opname in filter_list:
# Find the grammar spec for this op
instruction = next(
inst for inst in instructions if inst['opname'] == opname)
op_defs.append(
get_op_definition(instruction, docs[opname],
op_info_dict.get(opname, {})))
# Substitute the old op definitions
op_defs = [header] + op_defs + [footer]
content = AUTOGEN_OP_DEF_SEPARATOR.join(op_defs)
with open(path, 'w') as f:
f.write(content)
if __name__ == '__main__':
import argparse
cli_parser = argparse.ArgumentParser(
description='Update SPIR-V dialect definitions using SPIR-V spec')
cli_parser.add_argument(
'--base-td-path',
dest='base_td_path',
type=str,
default=None,
help='Path to SPIRVBase.td')
cli_parser.add_argument(
'--op-td-path',
dest='op_td_path',
type=str,
default=None,
help='Path to SPIRVOps.td')
cli_parser.add_argument(
'--new-enum',
dest='new_enum',
type=str,
default=None,
help='SPIR-V enum to be added to SPIRVBase.td')
cli_parser.add_argument(
'--new-opcodes',
dest='new_opcodes',
type=str,
default=None,
nargs='*',
help='update SPIR-V opcodes in SPIRVBase.td')
cli_parser.add_argument(
'--new-inst',
dest='new_inst',
type=str,
default=None,
help='SPIR-V instruction to be added to SPIRVOps.td')
args = cli_parser.parse_args()
operand_kinds, instructions = get_spirv_grammar_from_json_spec()
# Define new enum attr
if args.new_enum is not None:
assert args.base_td_path is not None
filter_list = [args.new_enum] if args.new_enum else []
update_td_enum_attrs(args.base_td_path, operand_kinds, filter_list)
# Define new opcode
if args.new_opcodes is not None:
assert args.base_td_path is not None
update_td_opcodes(args.base_td_path, instructions, args.new_opcodes)
# Define new op
if args.new_inst is not None:
assert args.op_td_path is not None
filter_list = [args.new_inst] if args.new_inst else []
docs = get_spirv_doc_from_html_spec()
update_td_op_definitions(args.op_td_path, instructions, docs, filter_list)
print('Done. Note that this script just generates a template; ', end='')
print('please read the spec and update traits, arguments, and ', end='')
print('results accordingly.')
| 33.213938
| 139
| 0.63246
|
import re
import requests
import textwrap
SPIRV_HTML_SPEC_URL = 'https://www.khronos.org/registry/spir-v/specs/unified1/SPIRV.html'
SPIRV_JSON_SPEC_URL = 'https://raw.githubusercontent.com/KhronosGroup/SPIRV-Headers/master/include/spirv/unified1/spirv.core.grammar.json'
AUTOGEN_OP_DEF_SEPARATOR = '\n// -----\n\n'
AUTOGEN_ENUM_SECTION_MARKER = 'enum section. Generated from SPIR-V spec; DO NOT MODIFY!'
AUTOGEN_OPCODE_SECTION_MARKER = (
'opcode section. Generated from SPIR-V spec; DO NOT MODIFY!')
def get_spirv_doc_from_html_spec():
response = requests.get(SPIRV_HTML_SPEC_URL)
spec = response.content
from bs4 import BeautifulSoup
spirv = BeautifulSoup(spec, 'html.parser')
section_anchor = spirv.find('h3', {'id': '_a_id_instructions_a_instructions'})
doc = {}
for section in section_anchor.parent.find_all('div', {'class': 'sect3'}):
for table in section.find_all('table'):
inst_html = table.tbody.tr.td.p
opname = inst_html.a['id']
doc[opname] = inst_html.text.split('\n', 1)[1].strip()
return doc
def get_spirv_grammar_from_json_spec():
response = requests.get(SPIRV_JSON_SPEC_URL)
spec = response.content
import json
spirv = json.loads(spec)
return spirv['operand_kinds'], spirv['instructions']
def split_list_into_sublists(items, offset):
chuncks = []
chunk = []
chunk_len = 0
for item in items:
chunk_len += len(item) + 2
if chunk_len > 80:
chuncks.append(chunk)
chunk = []
chunk_len = len(item) + 2
chunk.append(item)
if len(chunk) != 0:
chuncks.append(chunk)
return chuncks
def uniquify(lst, equality_fn):
keys = set()
unique_lst = []
for elem in lst:
key = equality_fn(elem)
if equality_fn(key) not in keys:
unique_lst.append(elem)
keys.add(key)
return unique_lst
def gen_operand_kind_enum_attr(operand_kind):
if 'enumerants' not in operand_kind:
return '', ''
kind_name = operand_kind['kind']
kind_acronym = ''.join([c for c in kind_name if c >= 'A' and c <= 'Z'])
kind_cases = [(case['enumerant'], case['value'])
for case in operand_kind['enumerants']]
kind_cases = uniquify(kind_cases, lambda x: x[1])
max_len = max([len(symbol) for (symbol, _) in kind_cases])
fmt_str = 'def SPV_{acronym}_{symbol} {colon:>{offset}} '\
'I32EnumAttrCase<"{symbol}", {value}>;'
case_defs = [
fmt_str.format(
acronym=kind_acronym,
symbol=case[0],
value=case[1],
colon=':',
offset=(max_len + 1 - len(case[0]))) for case in kind_cases
]
case_defs = '\n'.join(case_defs)
fmt_str = 'SPV_{acronym}_{symbol}';
case_names = [fmt_str.format(acronym=kind_acronym,symbol=case[0])
for case in kind_cases]
case_names = split_list_into_sublists(case_names, 6)
case_names = ['{:6}'.format('') + ', '.join(sublist)
for sublist in case_names]
case_names = ',\n'.join(case_names)
enum_attr = 'def SPV_{name}Attr :\n '\
'I32EnumAttr<"{name}", "valid SPIR-V {name}", [\n{cases}\n ]> {{\n'\
' let returnType = "::mlir::spirv::{name}";\n'\
' let convertFromStorage = '\
'"static_cast<::mlir::spirv::{name}>($_self.getInt())";\n'\
' let cppNamespace = "::mlir::spirv";\n}}'.format(
name=kind_name, cases=case_names)
return kind_name, case_defs + '\n\n' + enum_attr
def gen_opcode(instructions):
max_len = max([len(inst['opname']) for inst in instructions])
def_fmt_str = 'def SPV_OC_{name} {colon:>{offset}} '\
'I32EnumAttrCase<"{name}", {value}>;'
opcode_defs = [
def_fmt_str.format(
name=inst['opname'],
value=inst['opcode'],
colon=':',
offset=(max_len + 1 - len(inst['opname']))) for inst in instructions
]
opcode_str = '\n'.join(opcode_defs)
decl_fmt_str = 'SPV_OC_{name}'
opcode_list = [
decl_fmt_str.format(name=inst['opname']) for inst in instructions
]
opcode_list = split_list_into_sublists(opcode_list, 6)
opcode_list = [
'{:6}'.format('') + ', '.join(sublist) for sublist in opcode_list
]
opcode_list = ',\n'.join(opcode_list)
enum_attr = 'def SPV_OpcodeAttr :\n'\
' I32EnumAttr<"{name}", "valid SPIR-V instructions", [\n'\
'{lst}\n'\
' ]> {{\n'\
' let returnType = "::mlir::spirv::{name}";\n'\
' let convertFromStorage = '\
'"static_cast<::mlir::spirv::{name}>($_self.getInt())";\n'\
' let cppNamespace = "::mlir::spirv";\n}}'.format(
name='Opcode', lst=opcode_list)
return opcode_str + '\n\n' + enum_attr
def update_td_opcodes(path, instructions, filter_list):
with open(path, 'r') as f:
content = f.read()
content = content.split(AUTOGEN_OPCODE_SECTION_MARKER)
assert len(content) == 3
existing_opcodes = [k[11:] for k in re.findall('def SPV_OC_\w+', content[1])]
filter_list.extend(existing_opcodes)
filter_list = list(set(filter_list))
filter_instrs = list(
filter(lambda inst: (inst['opname'] in filter_list), instructions))
filter_instrs.sort(key=lambda inst: inst['opcode'])
opcode = gen_opcode(filter_instrs)
content = content[0] + AUTOGEN_OPCODE_SECTION_MARKER + '\n\n' + \
opcode + '\n\n// End ' + AUTOGEN_OPCODE_SECTION_MARKER \
+ content[2]
with open(path, 'w') as f:
f.write(content)
def update_td_enum_attrs(path, operand_kinds, filter_list):
with open(path, 'r') as f:
content = f.read()
content = content.split(AUTOGEN_ENUM_SECTION_MARKER)
assert len(content) == 3
existing_kinds = [
k[8:-4] for k in re.findall('def SPV_\w+Attr', content[1])]
filter_list.extend(existing_kinds)
defs = [gen_operand_kind_enum_attr(kind)
for kind in operand_kinds if kind['kind'] in filter_list]
defs.sort(key=lambda enum : enum[0])
defs = [enum[1] for enum in defs]
content = content[0] + AUTOGEN_ENUM_SECTION_MARKER + '\n\n' + \
'\n\n'.join(defs) + "\n\n// End " + AUTOGEN_ENUM_SECTION_MARKER \
+ content[2];
with open(path, 'w') as f:
f.write(content)
def snake_casify(name):
name = re.sub('\W+', '', name).split()
name = [s.lower() for s in name]
return '_'.join(name)
def map_spec_operand_to_ods_argument(operand):
kind = operand['kind']
quantifier = operand.get('quantifier', '')
assert kind != 'IdResultType', 'unexpected to handle "IdResultType" kind'
assert kind != 'IdResult', 'unexpected to handle "IdResult" kind'
if kind == 'IdRef':
if quantifier == '':
arg_type = 'SPV_Type'
elif quantifier == '?':
arg_type = 'SPV_Optional<SPV_Type>'
else:
arg_type = 'Variadic<SPV_Type>'
elif kind == 'IdMemorySemantics' or kind == 'IdScope':
assert quantifier == '', ('unexpected to have optional/variadic memory '
'semantics or scope <id>')
arg_type = 'I32'
elif kind == 'LiteralInteger':
if quantifier == '':
arg_type = 'I32Attr'
elif quantifier == '?':
arg_type = 'OptionalAttr<I32Attr>'
else:
arg_type = 'OptionalAttr<I32ArrayAttr>'
elif kind == 'LiteralString' or \
kind == 'LiteralContextDependentNumber' or \
kind == 'LiteralExtInstInteger' or \
kind == 'LiteralSpecConstantOpInteger' or \
kind == 'PairLiteralIntegerIdRef' or \
kind == 'PairIdRefLiteralInteger' or \
kind == 'PairIdRefIdRef':
assert False, '"{}" kind unimplemented'.format(kind)
else:
assert quantifier != '*', 'unexpected to have variadic enum attribute'
arg_type = 'SPV_{}Attr'.format(kind)
if quantifier == '?':
arg_type = 'OptionalAttr<{}>'.format(arg_type)
name = operand.get('name', '')
name = snake_casify(name) if name else kind.lower()
return '{}:${}'.format(arg_type, name)
def get_op_definition(instruction, doc, existing_info):
fmt_str = 'def SPV_{opname}Op : SPV_Op<"{opname}", [{traits}]> {{\n'\
' let summary = {summary};\n\n'\
' let description = [{{\n'\
'{description}\n\n'\
' ### Custom assembly form\n'\
'{assembly}'\
'}}];\n\n'\
' let arguments = (ins{args});\n\n'\
' let results = (outs{results});\n'\
'{extras}'\
'}}\n'
opname = instruction['opname'][2:]
summary, description = doc.split('\n', 1)
wrapper = textwrap.TextWrapper(
width=76, initial_indent=' ', subsequent_indent=' ')
summary = summary.strip();
if len(summary) + len(' let summary = "";') <= 80:
summary = '"{}"'.format(summary)
else:
summary = '[{{\n{}\n }}]'.format(wrapper.fill(summary))
# Wrap description
description = description.split('\n')
description = [wrapper.fill(line) for line in description if line]
description = '\n\n'.join(description)
operands = instruction.get('operands', [])
# Set op's result
results = ''
if len(operands) > 0 and operands[0]['kind'] == 'IdResultType':
results = '\n SPV_Type:$result\n '
operands = operands[1:]
if 'results' in existing_info:
results = existing_info['results']
# Ignore the operand standing for the result <id>
if len(operands) > 0 and operands[0]['kind'] == 'IdResult':
operands = operands[1:]
# Set op' argument
arguments = existing_info.get('arguments', None)
if arguments is None:
arguments = [map_spec_operand_to_ods_argument(o) for o in operands]
arguments = '\n '.join(arguments)
if arguments:
# Prepend and append whitespace for formatting
arguments = '\n {}\n '.format(arguments)
assembly = existing_info.get('assembly', None)
if assembly is None:
assembly = ' ``` {.ebnf}\n'\
' [TODO]\n'\
' ```\n\n'\
' For example:\n\n'\
' ```\n'\
' [TODO]\n'\
' ```\n '
return fmt_str.format(
opname=opname,
traits=existing_info.get('traits', ''),
summary=summary,
description=description,
assembly=assembly,
args=arguments,
results=results,
extras=existing_info.get('extras', ''))
def extract_td_op_info(op_def):
# Get opname
opname = [o[8:-2] for o in re.findall('def SPV_\w+Op', op_def)]
assert len(opname) == 1, 'more than one ops in the same section!'
opname = opname[0]
# Get traits
op_tmpl_params = op_def.split('<', 1)[1].split('>', 1)[0].split(', ', 1)
if len(op_tmpl_params) == 1:
traits = ''
else:
traits = op_tmpl_params[1].strip('[]')
# Get custom assembly form
rest = op_def.split('### Custom assembly form\n')
assert len(rest) == 2, \
'{}: cannot find "### Custom assembly form"'.format(opname)
rest = rest[1].split(' let arguments = (ins')
assert len(rest) == 2, '{}: cannot find arguments'.format(opname)
assembly = rest[0].rstrip('}];\n')
# Get arguments
rest = rest[1].split(' let results = (outs')
assert len(rest) == 2, '{}: cannot find results'.format(opname)
args = rest[0].rstrip(');\n')
# Get results
rest = rest[1].split(');', 1)
assert len(rest) == 2, \
'{}: cannot find ");" ending results'.format(opname)
results = rest[0]
extras = rest[1].strip(' }\n')
if extras:
extras = '\n {}\n'.format(extras)
return {
# Prefix with 'Op' to make it consistent with SPIR-V spec
'opname': 'Op{}'.format(opname),
'traits': traits,
'assembly': assembly,
'arguments': args,
'results': results,
'extras': extras
}
def update_td_op_definitions(path, instructions, docs, filter_list):
with open(path, 'r') as f:
content = f.read()
# Split the file into chuncks, each containing one op.
ops = content.split(AUTOGEN_OP_DEF_SEPARATOR)
header = ops[0]
footer = ops[-1]
ops = ops[1:-1]
# For each existing op, extract the manually-written sections out to retain
# them when re-generating the ops. Also append the existing ops to filter
# list.
op_info_dict = {}
for op in ops:
info_dict = extract_td_op_info(op)
opname = info_dict['opname']
op_info_dict[opname] = info_dict
filter_list.append(opname)
filter_list = sorted(list(set(filter_list)))
op_defs = []
for opname in filter_list:
# Find the grammar spec for this op
instruction = next(
inst for inst in instructions if inst['opname'] == opname)
op_defs.append(
get_op_definition(instruction, docs[opname],
op_info_dict.get(opname, {})))
# Substitute the old op definitions
op_defs = [header] + op_defs + [footer]
content = AUTOGEN_OP_DEF_SEPARATOR.join(op_defs)
with open(path, 'w') as f:
f.write(content)
if __name__ == '__main__':
import argparse
cli_parser = argparse.ArgumentParser(
description='Update SPIR-V dialect definitions using SPIR-V spec')
cli_parser.add_argument(
'--base-td-path',
dest='base_td_path',
type=str,
default=None,
help='Path to SPIRVBase.td')
cli_parser.add_argument(
'--op-td-path',
dest='op_td_path',
type=str,
default=None,
help='Path to SPIRVOps.td')
cli_parser.add_argument(
'--new-enum',
dest='new_enum',
type=str,
default=None,
help='SPIR-V enum to be added to SPIRVBase.td')
cli_parser.add_argument(
'--new-opcodes',
dest='new_opcodes',
type=str,
default=None,
nargs='*',
help='update SPIR-V opcodes in SPIRVBase.td')
cli_parser.add_argument(
'--new-inst',
dest='new_inst',
type=str,
default=None,
help='SPIR-V instruction to be added to SPIRVOps.td')
args = cli_parser.parse_args()
operand_kinds, instructions = get_spirv_grammar_from_json_spec()
# Define new enum attr
if args.new_enum is not None:
assert args.base_td_path is not None
filter_list = [args.new_enum] if args.new_enum else []
update_td_enum_attrs(args.base_td_path, operand_kinds, filter_list)
# Define new opcode
if args.new_opcodes is not None:
assert args.base_td_path is not None
update_td_opcodes(args.base_td_path, instructions, args.new_opcodes)
# Define new op
if args.new_inst is not None:
assert args.op_td_path is not None
filter_list = [args.new_inst] if args.new_inst else []
docs = get_spirv_doc_from_html_spec()
update_td_op_definitions(args.op_td_path, instructions, docs, filter_list)
print('Done. Note that this script just generates a template; ', end='')
print('please read the spec and update traits, arguments, and ', end='')
print('results accordingly.')
| true
| true
|
f70e2365adc065b933c5f54d699042c9f42bc30c
| 641
|
py
|
Python
|
clean.py
|
amraboelela/ertugrul
|
00491d002b7a8989b1ec957c94b187d0490e27fa
|
[
"MIT"
] | null | null | null |
clean.py
|
amraboelela/ertugrul
|
00491d002b7a8989b1ec957c94b187d0490e27fa
|
[
"MIT"
] | null | null | null |
clean.py
|
amraboelela/ertugrul
|
00491d002b7a8989b1ec957c94b187d0490e27fa
|
[
"MIT"
] | null | null | null |
# importing the requests library
import sys, os
if len(sys.argv) > 6:
title = sys.argv[1]
s = sys.argv[2]
a = int(sys.argv[3])
b = int(sys.argv[4])
sourceLanguage = sys.argv[5]
targetLanguage = sys.argv[6]
else:
print("please enter the title, season number, first episode number, last episode numer, source language, and the target language")
exit(-1)
for n in range(a, b+1):
if s == "2":
prefix = title + "-" + s + "-" + str(n).zfill(3)
else:
prefix = title + "-" + s + "-" + str(n).zfill(2)
os.system("./cleanEpisode " + prefix + " " + sourceLanguage + " " + targetLanguage)
| 29.136364
| 134
| 0.583463
|
import sys, os
if len(sys.argv) > 6:
title = sys.argv[1]
s = sys.argv[2]
a = int(sys.argv[3])
b = int(sys.argv[4])
sourceLanguage = sys.argv[5]
targetLanguage = sys.argv[6]
else:
print("please enter the title, season number, first episode number, last episode numer, source language, and the target language")
exit(-1)
for n in range(a, b+1):
if s == "2":
prefix = title + "-" + s + "-" + str(n).zfill(3)
else:
prefix = title + "-" + s + "-" + str(n).zfill(2)
os.system("./cleanEpisode " + prefix + " " + sourceLanguage + " " + targetLanguage)
| true
| true
|
f70e23b0623fe77fb5807f8ef72c2de3c6d55691
| 1,491
|
py
|
Python
|
example_fuzzers/fuzzing_example.py
|
rohankumardubey/atheris
|
77ef3b3b6be35088ada145b223e2c3500ae00123
|
[
"Apache-2.0"
] | null | null | null |
example_fuzzers/fuzzing_example.py
|
rohankumardubey/atheris
|
77ef3b3b6be35088ada145b223e2c3500ae00123
|
[
"Apache-2.0"
] | null | null | null |
example_fuzzers/fuzzing_example.py
|
rohankumardubey/atheris
|
77ef3b3b6be35088ada145b223e2c3500ae00123
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python3
# Copyright 2020 Google LLC
# Copyright 2021 Fraunhofer FKIE
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""An example of fuzzing in Python."""
import atheris
import sys
# This tells Atheris to instrument all functions in the `struct` and
# `example_library` modules.
with atheris.instrument_imports():
import struct
import example_library
@atheris.instrument_func # Instrument the TestOneInput function itself
def TestOneInput(data):
"""The entry point for our fuzzer.
This is a callback that will be repeatedly invoked with different arguments
after Fuzz() is called.
We translate the arbitrary byte string into a format our function being fuzzed
can understand, then call it.
Args:
data: Bytestring coming from the fuzzing engine.
"""
if len(data) != 4:
return # Input must be 4 byte integer.
number, = struct.unpack('<I', data)
example_library.CodeBeingFuzzed(number)
atheris.Setup(sys.argv, TestOneInput)
atheris.Fuzz()
| 29.82
| 80
| 0.753186
|
import atheris
import sys
with atheris.instrument_imports():
import struct
import example_library
@atheris.instrument_func
def TestOneInput(data):
if len(data) != 4:
return
number, = struct.unpack('<I', data)
example_library.CodeBeingFuzzed(number)
atheris.Setup(sys.argv, TestOneInput)
atheris.Fuzz()
| true
| true
|
f70e23d3f90e0aebfb749365625873ed78aeb75c
| 1,585
|
py
|
Python
|
tests/collect.py
|
djamaile/mangascan
|
af3bb62237e81ddc8ef9341bc4e697728c266d20
|
[
"MIT"
] | null | null | null |
tests/collect.py
|
djamaile/mangascan
|
af3bb62237e81ddc8ef9341bc4e697728c266d20
|
[
"MIT"
] | null | null | null |
tests/collect.py
|
djamaile/mangascan
|
af3bb62237e81ddc8ef9341bc4e697728c266d20
|
[
"MIT"
] | null | null | null |
import unittest
from core import collect
class TestCollect(unittest.TestCase):
def test_if_we_get_viz_release(self):
mock_data = {
"name": "a",
"img": "img",
"link": "link",
"publisher": "publisher",
}
response = collect.get_viz()
assert response[0].keys() == mock_data.keys()
def test_if_we_get_yen_release(self):
mock_data = {
"name": "a",
"img": "img",
"link": "link",
"publisher": "publisher",
}
response = collect.get_yen()
assert response[0].keys() == mock_data.keys()
def test_if_we_get_sevenseas_release(self):
mock_data = {
"name": "a",
"img": "img",
"link": "link",
"publisher": "publisher",
}
response = collect.get_seven_seas()
assert response[0].keys() == mock_data.keys()
def test_if_we_get_darkhorse_release(self):
mock_data = {
"name": "a",
"img": "img",
"link": "link",
"publisher": "publisher",
}
response = collect.get_dark_horse()
assert response[0].keys() == mock_data.keys()
def test_if_we_get_kodansha_release(self):
mock_data = {
"name": "a",
"img": "img",
"link": "link",
"publisher": "publisher",
}
response = collect.get_kodansha()
assert response[0].keys() == mock_data.keys()
| 28.818182
| 54
| 0.484543
|
import unittest
from core import collect
class TestCollect(unittest.TestCase):
def test_if_we_get_viz_release(self):
mock_data = {
"name": "a",
"img": "img",
"link": "link",
"publisher": "publisher",
}
response = collect.get_viz()
assert response[0].keys() == mock_data.keys()
def test_if_we_get_yen_release(self):
mock_data = {
"name": "a",
"img": "img",
"link": "link",
"publisher": "publisher",
}
response = collect.get_yen()
assert response[0].keys() == mock_data.keys()
def test_if_we_get_sevenseas_release(self):
mock_data = {
"name": "a",
"img": "img",
"link": "link",
"publisher": "publisher",
}
response = collect.get_seven_seas()
assert response[0].keys() == mock_data.keys()
def test_if_we_get_darkhorse_release(self):
mock_data = {
"name": "a",
"img": "img",
"link": "link",
"publisher": "publisher",
}
response = collect.get_dark_horse()
assert response[0].keys() == mock_data.keys()
def test_if_we_get_kodansha_release(self):
mock_data = {
"name": "a",
"img": "img",
"link": "link",
"publisher": "publisher",
}
response = collect.get_kodansha()
assert response[0].keys() == mock_data.keys()
| true
| true
|
f70e255f53d9ea5a5e99b9d25e7a1289f6b1f8b7
| 1,915
|
py
|
Python
|
detokenize.py
|
miradel51/preprocess
|
05877ab6da36e068611643b220fed424e3999b3c
|
[
"MIT"
] | 2
|
2018-10-20T12:53:48.000Z
|
2021-06-23T16:05:44.000Z
|
detokenize.py
|
miradel51/convert_lowercase
|
05877ab6da36e068611643b220fed424e3999b3c
|
[
"MIT"
] | null | null | null |
detokenize.py
|
miradel51/convert_lowercase
|
05877ab6da36e068611643b220fed424e3999b3c
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
#-*-coding:utf-8 -*-
# author: mld
# email: miradel51@126.com
# date : 2017/9/28
import sys
import string
import re
def de_tokenizestr(original_str):
after_de_tok = ""
original_str = original_str.replace("[ ","[")
original_str = original_str.replace(" ]","]")
original_str = original_str.replace(" !",'!')
original_str = original_str.replace(" % ","%")
original_str = original_str.replace(" # ","#")
original_str = original_str.replace(" @ ","@")
original_str = original_str.replace(" ~ ","~")
original_str = original_str.replace(" & ","&")
original_str = original_str.replace(" * ","*")
original_str = original_str.replace(" \" ","\"")
original_str = original_str.replace(" .",".")
original_str = original_str.replace(" ;",";")
original_str = original_str.replace(" ,",",")
original_str = original_str.replace(" ^","^")
original_str = original_str.replace("( ","(")
original_str = original_str.replace(" )",")")
original_str = original_str.replace("{ ","{")
original_str = original_str.replace(" >",">")
original_str = original_str.replace("< ","<")
original_str = original_str.replace(" ?","?")
original_str = original_str.replace(" }","}")
original_str = original_str.replace(" - ","-")
original_str = original_str.replace(" : ",":")
original_str = original_str.replace(" = ","=")
original_str = original_str.replace(" + ","+")
after_de_tok = original_str
return after_de_tok
if __name__ == '__main__':
ori_ = sys.argv[1]
de_tok_ = sys.argv[2]
ori_file = open(ori_,"r")
de_tok_file = open(de_tok_,"w")
context = ""
for eachline in ori_file:
context = eachline.strip()
#need to tokenization (just separate symboles from words in current line)
context = de_tokenizestr(context)
de_tok_file.write(context)
de_tok_file.write("\n")
ori_file.close()
de_tok_file.close()
| 27.753623
| 76
| 0.646997
|
import sys
import string
import re
def de_tokenizestr(original_str):
after_de_tok = ""
original_str = original_str.replace("[ ","[")
original_str = original_str.replace(" ]","]")
original_str = original_str.replace(" !",'!')
original_str = original_str.replace(" % ","%")
original_str = original_str.replace(" # ","#")
original_str = original_str.replace(" @ ","@")
original_str = original_str.replace(" ~ ","~")
original_str = original_str.replace(" & ","&")
original_str = original_str.replace(" * ","*")
original_str = original_str.replace(" \" ","\"")
original_str = original_str.replace(" .",".")
original_str = original_str.replace(" ;",";")
original_str = original_str.replace(" ,",",")
original_str = original_str.replace(" ^","^")
original_str = original_str.replace("( ","(")
original_str = original_str.replace(" )",")")
original_str = original_str.replace("{ ","{")
original_str = original_str.replace(" >",">")
original_str = original_str.replace("< ","<")
original_str = original_str.replace(" ?","?")
original_str = original_str.replace(" }","}")
original_str = original_str.replace(" - ","-")
original_str = original_str.replace(" : ",":")
original_str = original_str.replace(" = ","=")
original_str = original_str.replace(" + ","+")
after_de_tok = original_str
return after_de_tok
if __name__ == '__main__':
ori_ = sys.argv[1]
de_tok_ = sys.argv[2]
ori_file = open(ori_,"r")
de_tok_file = open(de_tok_,"w")
context = ""
for eachline in ori_file:
context = eachline.strip()
context = de_tokenizestr(context)
de_tok_file.write(context)
de_tok_file.write("\n")
ori_file.close()
de_tok_file.close()
| true
| true
|
f70e26550ef5788483a5dda566d84d9dc5230444
| 1,145
|
py
|
Python
|
humanoid_league_misc/humanoid_league_transform/src/humanoid_league_transform/ball_tester.py
|
MosHumanoid/bitbots_thmos_meta
|
f45ccc362dc689b69027be5b0d000d2a08580de4
|
[
"MIT"
] | null | null | null |
humanoid_league_misc/humanoid_league_transform/src/humanoid_league_transform/ball_tester.py
|
MosHumanoid/bitbots_thmos_meta
|
f45ccc362dc689b69027be5b0d000d2a08580de4
|
[
"MIT"
] | 47
|
2019-03-11T08:58:09.000Z
|
2022-02-26T16:43:32.000Z
|
humanoid_league_misc/humanoid_league_transform/src/humanoid_league_transform/ball_tester.py
|
MosHumanoid/bitbots_thmos_meta
|
f45ccc362dc689b69027be5b0d000d2a08580de4
|
[
"MIT"
] | 1
|
2019-07-28T11:26:24.000Z
|
2019-07-28T11:26:24.000Z
|
#!/usr/bin/env python3
"""
Command line tool to publish balls on the /ball_in_image topic
"""
import rospy
from humanoid_league_msgs.msg import BallInImage, BallInImageArray
import sys
import signal
def _signal_term_handler(signal, frame):
rospy.logerr('User Keyboard interrupt')
sys.exit(0)
if __name__ == "__main__":
# handle keyboard interrupts
signal.signal(signal.SIGINT, _signal_term_handler)
rospy.init_node("ball_tester")
pub = rospy.Publisher("balls_in_image", BallInImageArray, queue_size=10)
while True:
x_str = input("x:")
try:
x = int(x_str)
except ValueError:
print("try again")
continue
y_str = input("y:")
try:
y = int(y_str)
except ValueError:
print("try again")
continue
ba = BallInImageArray()
ba.header.stamp = rospy.get_rostime() - rospy.Duration(0.2)
ball = BallInImage()
ball.confidence = 1
ball.center.x = x
ball.center.y = y
ball.diameter = 0.13
ba.candidates.append(ball)
pub.publish(ba)
| 22.9
| 76
| 0.609607
|
import rospy
from humanoid_league_msgs.msg import BallInImage, BallInImageArray
import sys
import signal
def _signal_term_handler(signal, frame):
rospy.logerr('User Keyboard interrupt')
sys.exit(0)
if __name__ == "__main__":
signal.signal(signal.SIGINT, _signal_term_handler)
rospy.init_node("ball_tester")
pub = rospy.Publisher("balls_in_image", BallInImageArray, queue_size=10)
while True:
x_str = input("x:")
try:
x = int(x_str)
except ValueError:
print("try again")
continue
y_str = input("y:")
try:
y = int(y_str)
except ValueError:
print("try again")
continue
ba = BallInImageArray()
ba.header.stamp = rospy.get_rostime() - rospy.Duration(0.2)
ball = BallInImage()
ball.confidence = 1
ball.center.x = x
ball.center.y = y
ball.diameter = 0.13
ba.candidates.append(ball)
pub.publish(ba)
| true
| true
|
f70e2745e5e18fd6fb1ea57f8e78709f6dec0740
| 849
|
py
|
Python
|
code_TSA_SVC_report_version/results/test13/__init__.py
|
aiir-team/code_TSA_SVC_luxembourg
|
c3efd636a82ac8279f5b525b2b117eac76d7f1a0
|
[
"MIT"
] | null | null | null |
code_TSA_SVC_report_version/results/test13/__init__.py
|
aiir-team/code_TSA_SVC_luxembourg
|
c3efd636a82ac8279f5b525b2b117eac76d7f1a0
|
[
"MIT"
] | null | null | null |
code_TSA_SVC_report_version/results/test13/__init__.py
|
aiir-team/code_TSA_SVC_luxembourg
|
c3efd636a82ac8279f5b525b2b117eac76d7f1a0
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python #
# ------------------------------------------------------------------------------------------------------#
# Created by "Thieu Nguyen" at 02:05, 15/12/2019 #
# #
# Email: nguyenthieu2102@gmail.com #
# Homepage: https://www.researchgate.net/profile/Thieu_Nguyen6 #
# Github: https://github.com/thieunguyen5991 #
#-------------------------------------------------------------------------------------------------------#
| 84.9
| 105
| 0.191991
| true
| true
|
|
f70e27a5c0a2afaf918ee7b2a97de344a9057bef
| 43,409
|
py
|
Python
|
mkt/developers/tests/test_views_payments.py
|
Joergen/zamboni
|
20a0e22b75cf986aceeb8c4d8c25abb948d97096
|
[
"BSD-3-Clause"
] | null | null | null |
mkt/developers/tests/test_views_payments.py
|
Joergen/zamboni
|
20a0e22b75cf986aceeb8c4d8c25abb948d97096
|
[
"BSD-3-Clause"
] | null | null | null |
mkt/developers/tests/test_views_payments.py
|
Joergen/zamboni
|
20a0e22b75cf986aceeb8c4d8c25abb948d97096
|
[
"BSD-3-Clause"
] | null | null | null |
import json
from django.core.exceptions import ObjectDoesNotExist
import mock
from curling.lib import HttpClientError
from mock import ANY
from nose.tools import eq_, ok_, raises
from pyquery import PyQuery as pq
import amo
import amo.tests
from amo.urlresolvers import reverse
from addons.models import (Addon, AddonCategory, AddonDeviceType,
AddonPremium, AddonUpsell, AddonUser, Category)
from constants.payments import (PAYMENT_METHOD_ALL,
PAYMENT_METHOD_CARD,
PAYMENT_METHOD_OPERATOR)
from mkt.constants.payments import ACCESS_PURCHASE, ACCESS_SIMULATE
from mkt.constants.regions import ALL_REGION_IDS
from market.models import Price
from users.models import UserProfile
import mkt
from mkt.developers.models import (AddonPaymentAccount, PaymentAccount,
SolitudeSeller, uri_to_pk, UserInappKey)
from mkt.site.fixtures import fixture
from mkt.webapps.models import AddonExcludedRegion as AER, ContentRating
# Id without any significance but to be different of 1.
TEST_PACKAGE_ID = 2
def setup_payment_account(app, user, uid='uid', package_id=TEST_PACKAGE_ID):
seller = SolitudeSeller.objects.create(user=user, uuid=uid)
payment = PaymentAccount.objects.create(user=user, solitude_seller=seller,
agreed_tos=True, seller_uri=uid,
uri=uid,
bango_package_id=package_id)
return AddonPaymentAccount.objects.create(addon=app,
product_uri='/path/to/%s/' % app.pk, account_uri=payment.uri,
payment_account=payment)
class InappTest(amo.tests.TestCase):
def setUp(self):
self.create_switch('in-app-payments')
self.app = Addon.objects.get(pk=337141)
self.app.update(premium_type=amo.ADDON_FREE_INAPP)
self.user = UserProfile.objects.get(pk=31337)
self.other = UserProfile.objects.get(pk=999)
self.login(self.user)
self.account = setup_payment_account(self.app, self.user)
self.url = reverse('mkt.developers.apps.in_app_config',
args=[self.app.app_slug])
def set_mocks(self, solitude):
get = mock.Mock()
get.get_object_or_404.return_value = {
'seller_product': '/path/to/prod-pk/'
}
post = mock.Mock()
post.return_value = get
solitude.api.bango.product = post
get = mock.Mock()
get.get_object_or_404.return_value = {'resource_pk': 'some-key',
'secret': 'shhh!'}
post = mock.Mock()
post.return_value = get
solitude.api.generic.product = post
@mock.patch('mkt.developers.views_payments.client')
class TestInappConfig(InappTest):
fixtures = fixture('webapp_337141', 'user_999')
@raises(ObjectDoesNotExist)
def test_not_seller(self, solitude):
post = mock.Mock()
post.side_effect = ObjectDoesNotExist
solitude.api.generic.product = post
eq_(self.client.get(self.url).status_code, 404)
def test_key_generation(self, solitude):
self.set_mocks(solitude)
self.client.post(self.url, {})
args = solitude.api.generic.product().patch.call_args
assert 'secret' in args[1]['data']
def test_logged_out(self, solitude):
self.client.logout()
self.assertLoginRequired(self.client.get(self.url))
def test_different(self, solitude):
self.login(self.other)
eq_(self.client.get(self.url).status_code, 403)
def test_developer(self, solitude):
self.login(self.other)
AddonUser.objects.create(addon=self.app, user=self.other,
role=amo.AUTHOR_ROLE_DEV)
# Developer can read, but not reset.
eq_(self.client.get(self.url).status_code, 200)
eq_(self.client.post(self.url).status_code, 403)
def test_not_inapp(self, solitude):
self.app.update(premium_type=amo.ADDON_PREMIUM)
eq_(self.client.get(self.url).status_code, 302)
def test_no_account(self, solitude):
self.app.app_payment_account.delete()
eq_(self.client.get(self.url).status_code, 302)
@mock.patch('mkt.developers.views_payments.client')
class TestInappSecret(InappTest):
fixtures = fixture('webapp_337141', 'user_999')
def setUp(self):
super(TestInappSecret, self).setUp()
self.url = reverse('mkt.developers.apps.in_app_secret',
args=[self.app.app_slug])
def test_show_secret(self, solitude):
self.set_mocks(solitude)
resp = self.client.get(self.url)
eq_(resp.content, 'shhh!')
pk = uri_to_pk(self.account.product_uri)
solitude.api.bango.product.assert_called_with(pk)
solitude.api.generic.product.assert_called_with('prod-pk')
def test_logged_out(self, solitude):
self.client.logout()
self.assertLoginRequired(self.client.get(self.url))
def test_different(self, solitude):
self.client.login(username='regular@mozilla.com', password='password')
eq_(self.client.get(self.url).status_code, 403)
def test_developer(self, solitude):
self.set_mocks(solitude)
self.login(self.other)
AddonUser.objects.create(addon=self.app, user=self.other,
role=amo.AUTHOR_ROLE_DEV)
resp = self.client.get(self.url)
eq_(resp.content, 'shhh!')
class InappKeysTest(InappTest):
fixtures = fixture('webapp_337141', 'user_999')
def setUp(self):
super(InappKeysTest, self).setUp()
self.create_switch('in-app-sandbox')
self.url = reverse('mkt.developers.apps.in_app_keys')
self.seller_uri = '/seller/1/'
self.product_pk = 2
def setup_solitude(self, solitude):
solitude.api.generic.seller.post.return_value = {
'resource_uri': self.seller_uri}
solitude.api.generic.product.post.return_value = {
'resource_pk': self.product_pk}
@mock.patch('mkt.developers.models.client')
class TestInappKeys(InappKeysTest):
def test_logged_out(self, solitude):
self.client.logout()
self.assertLoginRequired(self.client.get(self.url))
def test_no_key(self, solitude):
res = self.client.get(self.url)
eq_(res.status_code, 200)
eq_(res.context['key'], None)
def test_key_generation(self, solitude):
self.setup_solitude(solitude)
res = self.client.post(self.url)
ok_(res['Location'].endswith(self.url), res)
ok_(solitude.api.generic.seller.post.called)
ok_(solitude.api.generic.product.post.called)
key = UserInappKey.objects.get()
eq_(key.solitude_seller.resource_uri, self.seller_uri)
eq_(key.seller_product_pk, self.product_pk)
m = solitude.api.generic.product.post.mock_calls
eq_(m[0][2]['data']['access'], ACCESS_SIMULATE)
def test_reset(self, solitude):
self.setup_solitude(solitude)
key = UserInappKey.create(self.user)
product = mock.Mock()
solitude.api.generic.product.return_value = product
self.client.post(self.url)
product.patch.assert_called_with(data={'secret': ANY})
solitude.api.generic.product.assert_called_with(key.seller_product_pk)
@mock.patch('mkt.developers.models.client')
class TestInappKeySecret(InappKeysTest):
def setUp(self):
super(TestInappKeySecret, self).setUp()
def setup_objects(self, solitude):
self.setup_solitude(solitude)
key = UserInappKey.create(self.user)
self.url = reverse('mkt.developers.apps.in_app_key_secret',
args=[key.pk])
def test_logged_out(self, solitude):
self.setup_objects(solitude)
self.client.logout()
self.assertLoginRequired(self.client.get(self.url))
def test_different(self, solitude):
self.setup_objects(solitude)
self.login(self.other)
eq_(self.client.get(self.url).status_code, 403)
def test_secret(self, solitude):
self.setup_objects(solitude)
secret = 'not telling'
product = mock.Mock()
product.get.return_value = {'secret': secret}
solitude.api.generic.product.return_value = product
res = self.client.get(self.url)
eq_(res.status_code, 200)
eq_(res.content, secret)
class TestPayments(amo.tests.TestCase):
fixtures = fixture('webapp_337141', 'user_999', 'group_admin',
'user_admin', 'user_admin_group', 'prices')
def setUp(self):
self.webapp = self.get_webapp()
AddonDeviceType.objects.create(
addon=self.webapp, device_type=amo.DEVICE_GAIA.id)
self.url = self.webapp.get_dev_url('payments')
self.user = UserProfile.objects.get(pk=31337)
self.other = UserProfile.objects.get(pk=999)
self.admin = UserProfile.objects.get(email='admin@mozilla.com')
# Default to logging in as the app owner.
self.login(self.user)
self.price = Price.objects.filter()[0]
self.patch = mock.patch('mkt.developers.models.client')
self.sol = self.patch.start()
def tearDown(self):
self.patch.stop()
def get_webapp(self):
return Addon.objects.get(pk=337141)
def get_region_list(self):
return list(AER.objects.values_list('region', flat=True))
def get_postdata(self, extension):
base = {'regions': self.get_region_list(),
'free_platforms': ['free-%s' % dt.class_name for dt in
self.webapp.device_types],
'paid_platforms': ['paid-%s' % dt.class_name for dt in
self.webapp.device_types]}
base.update(extension)
return base
def test_free(self):
res = self.client.post(
self.url, self.get_postdata({'toggle-paid': 'free'}), follow=True)
eq_(self.get_webapp().premium_type, amo.ADDON_FREE)
eq_(res.context['is_paid'], False)
def test_premium_passes(self):
self.webapp.update(premium_type=amo.ADDON_FREE)
res = self.client.post(
self.url, self.get_postdata({'toggle-paid': 'paid'}), follow=True)
eq_(self.get_webapp().premium_type, amo.ADDON_PREMIUM)
eq_(res.context['is_paid'], True)
def test_check_api_url_in_context(self):
self.webapp.update(premium_type=amo.ADDON_FREE)
res = self.client.get(self.url)
eq_(res.context['api_pricelist_url'],
reverse('api_dispatch_list', kwargs={'resource_name': 'prices',
'api_name': 'webpay'}))
def test_regions_display_free(self):
self.webapp.update(premium_type=amo.ADDON_FREE)
res = self.client.get(self.url)
pqr = pq(res.content)
eq_(len(pqr('#regions-island')), 1)
eq_(len(pqr('#paid-regions-island')), 0)
def test_regions_display_premium(self):
self.webapp.update(premium_type=amo.ADDON_PREMIUM)
res = self.client.get(self.url)
pqr = pq(res.content)
eq_(len(pqr('#regions-island')), 0)
eq_(len(pqr('#paid-regions-island')), 1)
def test_free_with_in_app_tier_id_in_content(self):
price_tier_zero = Price.objects.create(price='0.00')
self.webapp.update(premium_type=amo.ADDON_PREMIUM)
res = self.client.get(self.url)
pqr = pq(res.content)
eq_(len(pqr('#region-list[data-tier-zero-id]')), 1)
eq_(int(pqr('#region-list').attr(
'data-tier-zero-id')), price_tier_zero.pk)
def test_not_applicable_data_attr_in_content(self):
self.webapp.update(premium_type=amo.ADDON_PREMIUM)
res = self.client.get(self.url)
pqr = pq(res.content)
eq_(len(pqr('#region-list[data-not-applicable-msg]')), 1)
def test_pay_method_ids_in_context(self):
self.webapp.update(premium_type=amo.ADDON_PREMIUM)
res = self.client.get(self.url)
self.assertSetEqual(res.context['payment_methods'].keys(),
[PAYMENT_METHOD_ALL, PAYMENT_METHOD_CARD,
PAYMENT_METHOD_OPERATOR])
def test_free_with_in_app_deletes_upsell(self):
self.make_premium(self.webapp)
new_upsell_app = Addon.objects.create(type=self.webapp.type,
status=self.webapp.status, name='upsell-%s' % self.webapp.id,
premium_type=amo.ADDON_FREE)
new_upsell = AddonUpsell(premium=self.webapp)
new_upsell.free = new_upsell_app
new_upsell.save()
assert self.webapp.upsold is not None
self.client.post(
self.url, self.get_postdata({'price': 'free',
'allow_inapp': 'True',
'regions': ALL_REGION_IDS}),
follow=True)
eq_(self.get_webapp().upsold, None)
eq_(AddonPremium.objects.all().count(), 0)
def test_premium_in_app_passes(self):
self.webapp.update(premium_type=amo.ADDON_FREE)
res = self.client.post(
self.url, self.get_postdata({'toggle-paid': 'paid'}))
self.assert3xx(res, self.url)
res = self.client.post(
self.url, self.get_postdata({'allow_inapp': True,
'price': self.price.pk,
'regions': ALL_REGION_IDS}))
self.assert3xx(res, self.url)
eq_(self.get_webapp().premium_type, amo.ADDON_PREMIUM_INAPP)
def test_later_then_free(self):
self.webapp.update(premium_type=amo.ADDON_PREMIUM,
status=amo.STATUS_NULL,
highest_status=amo.STATUS_PENDING)
self.make_premium(self.webapp)
res = self.client.post(
self.url, self.get_postdata({'toggle-paid': 'free',
'price': self.price.pk}))
self.assert3xx(res, self.url)
eq_(self.get_webapp().status, amo.STATUS_PENDING)
eq_(AddonPremium.objects.all().count(), 0)
def test_premium_price_initial_already_set(self):
Price.objects.create(price='0.00') # Make a free tier for measure.
self.make_premium(self.webapp)
r = self.client.get(self.url)
eq_(pq(r.content)('select[name=price] option[selected]').attr('value'),
str(self.webapp.premium.price.id))
def test_premium_price_initial_use_default(self):
Price.objects.create(price='10.00') # Make one more tier.
self.webapp.update(premium_type=amo.ADDON_FREE)
res = self.client.post(
self.url, self.get_postdata({'toggle-paid': 'paid'}), follow=True)
pqr = pq(res.content)
eq_(pqr('select[name=price] option[selected]').attr('value'),
str(Price.objects.get(price='0.99').id))
def test_starting_with_free_inapp_has_free_selected(self):
self.webapp.update(premium_type=amo.ADDON_FREE_INAPP)
res = self.client.get(self.url)
pqr = pq(res.content)
eq_(pqr('select[name=price] option[selected]').attr('value'), 'free')
def test_made_free_inapp_has_free_selected(self):
self.make_premium(self.webapp)
res = self.client.post(
self.url, self.get_postdata({'price': 'free',
'allow_inapp': 'True'}), follow=True)
pqr = pq(res.content)
eq_(pqr('select[name=price] option[selected]').attr('value'), 'free')
def test_made_free_inapp_then_free(self):
self.webapp.update(premium_type=amo.ADDON_PREMIUM)
self.make_premium(self.webapp)
self.client.post(
self.url, self.get_postdata({'price': 'free',
'allow_inapp': 'True',
'regions': ALL_REGION_IDS}))
eq_(self.get_webapp().premium_type, amo.ADDON_FREE_INAPP)
self.client.post(
self.url, self.get_postdata({'toggle-paid': 'free',
'regions': ALL_REGION_IDS}))
eq_(self.get_webapp().premium_type, amo.ADDON_FREE)
def test_free_with_inapp_without_account_is_incomplete(self):
self.webapp.update(premium_type=amo.ADDON_FREE)
# Toggle to paid
self.client.post(
self.url, self.get_postdata({'toggle-paid': 'paid'}))
res = self.client.post(
self.url, self.get_postdata({'price': 'free',
'allow_inapp': 'True',
'regions': ALL_REGION_IDS}))
self.assert3xx(res, self.url)
eq_(self.get_webapp().status, amo.STATUS_NULL)
eq_(AddonPremium.objects.all().count(), 0)
def test_paid_app_without_account_is_incomplete(self):
self.webapp.update(premium_type=amo.ADDON_FREE)
# Toggle to paid
self.client.post(
self.url, self.get_postdata({'toggle-paid': 'paid'}))
res = self.client.post(
self.url, self.get_postdata({'price': self.price.pk,
'allow_inapp': 'False',
'regions': ALL_REGION_IDS}))
self.assert3xx(res, self.url)
eq_(self.get_webapp().status, amo.STATUS_NULL)
def setup_payment_acct(self, make_owner, user=None, bango_id=123):
# Set up Solitude return values.
api = self.sol.api # Set up Solitude return values.
api.generic.product.get_object.side_effect = ObjectDoesNotExist
api.generic.product.post.return_value = {'resource_uri': 'gpuri'}
api.bango.product.get_object.side_effect = ObjectDoesNotExist
api.bango.product.post.return_value = {
'resource_uri': 'bpruri', 'bango_id': 123}
if not user:
user = self.user
amo.set_user(user)
if make_owner:
# Make owner
AddonUser.objects.create(addon=self.webapp,
user=user, role=amo.AUTHOR_ROLE_OWNER)
# Set up an existing bank account.
seller = SolitudeSeller.objects.create(
resource_uri='/path/to/sel', user=user, uuid='uuid-%s' % user.pk)
acct = PaymentAccount.objects.create(
user=user, uri='asdf-%s' % user.pk, name='test', inactive=False,
seller_uri='suri-%s' % user.pk, solitude_seller=seller,
bango_package_id=123, agreed_tos=True)
return acct, api, user
def is_owner(self, user):
return (self.webapp.authors.filter(user=user,
addonuser__role=amo.AUTHOR_ROLE_OWNER).exists())
def test_associate_acct_to_app_free_inapp(self):
acct, api, user = self.setup_payment_acct(make_owner=True)
# Must be an app owner to change this.
assert self.is_owner(user)
# Associate account with app.
self.make_premium(self.webapp)
res = self.client.post(
self.url, self.get_postdata({'price': 'free',
'allow_inapp': 'True',
'regions': ALL_REGION_IDS,
'accounts': acct.pk}), follow=True)
self.assertNoFormErrors(res)
eq_(res.status_code, 200)
eq_(self.webapp.app_payment_account.payment_account.pk, acct.pk)
eq_(AddonPremium.objects.all().count(), 0)
def test_associate_acct_to_app(self):
self.make_premium(self.webapp, price=self.price.price)
acct, api, user = self.setup_payment_acct(make_owner=True)
# Must be an app owner to change this.
assert self.is_owner(user)
# Associate account with app.
res = self.client.post(
self.url, self.get_postdata({'price': self.price.pk,
'accounts': acct.pk,
'regions': ALL_REGION_IDS}),
follow=True)
eq_(api.bango.premium.post.call_count, 1)
self.assertNoFormErrors(res)
eq_(res.status_code, 200)
eq_(self.webapp.app_payment_account.payment_account.pk, acct.pk)
kw = api.bango.product.post.call_args[1]['data']
ok_(kw['secret'], kw)
kw = api.generic.product.post.call_args[1]['data']
eq_(kw['access'], ACCESS_PURCHASE)
def test_associate_acct_to_app_when_not_owner(self):
self.make_premium(self.webapp, price=self.price.price)
self.login(self.other)
acct, api, user = self.setup_payment_acct(make_owner=False,
user=self.other)
# Check we're not an owner before we start.
assert not self.is_owner(user)
# Attempt to associate account with app as non-owner.
res = self.client.post(
self.url, self.get_postdata({'accounts': acct.pk}), follow=True)
# Non-owner posts are forbidden.
eq_(res.status_code, 403)
# Payment account shouldn't be set as we're not the owner.
assert not (AddonPaymentAccount.objects
.filter(addon=self.webapp).exists())
def test_associate_acct_to_app_when_not_owner_and_an_admin(self):
self.make_premium(self.webapp, self.price.price)
self.login(self.admin)
acct, api, user = self.setup_payment_acct(make_owner=False,
user=self.admin)
# Check we're not an owner before we start.
assert not self.is_owner(user)
assert not (AddonPaymentAccount.objects
.filter(addon=self.webapp).exists())
# Attempt to associate account with app as non-owner admin.
res = self.client.post(
self.url, self.get_postdata({'accounts': acct.pk,
'price': self.price.pk,
'regions': ALL_REGION_IDS}),
follow=True)
self.assertFormError(res, 'bango_account_list_form', 'accounts',
[u'You are not permitted to change payment '
'accounts.'])
# Payment account shouldn't be set as we're not the owner.
assert not (AddonPaymentAccount.objects
.filter(addon=self.webapp).exists())
pqr = pq(res.content)
# Payment field should be disabled.
eq_(len(pqr('#id_accounts[disabled]')), 1)
# There's no existing associated account.
eq_(len(pqr('.current-account')), 0)
def test_associate_acct_to_app_when_admin_and_owner_acct_exists(self):
self.make_premium(self.webapp, price=self.price.price)
owner_acct, api, owner_user = self.setup_payment_acct(make_owner=True)
assert self.is_owner(owner_user)
res = self.client.post(
self.url, self.get_postdata({'accounts': owner_acct.pk,
'price': self.price.pk,
'regions': ALL_REGION_IDS}),
follow=True)
assert (AddonPaymentAccount.objects
.filter(addon=self.webapp).exists())
self.login(self.admin)
admin_acct, api, admin_user = self.setup_payment_acct(make_owner=False,
user=self.admin)
# Check we're not an owner before we start.
assert not self.is_owner(admin_user)
res = self.client.post(
self.url, self.get_postdata({'accounts': admin_acct.pk,
'price': self.price.pk,
'regions': ALL_REGION_IDS}),
follow=True)
self.assertFormError(res, 'bango_account_list_form', 'accounts',
[u'You are not permitted to change payment '
'accounts.'])
def test_one_owner_and_a_second_one_sees_selected_plus_own_accounts(self):
self.make_premium(self.webapp, price=self.price.price)
owner_acct, api, owner = self.setup_payment_acct(make_owner=True)
# Should be an owner.
assert self.is_owner(owner)
res = self.client.post(
self.url, self.get_postdata({'accounts': owner_acct.pk,
'price': self.price.pk,
'regions': ALL_REGION_IDS}),
follow=True)
assert (AddonPaymentAccount.objects
.filter(addon=self.webapp).exists())
# Login as other user.
self.login(self.other)
owner_acct2, api, owner2 = self.setup_payment_acct(make_owner=True,
user=self.other)
assert self.is_owner(owner2)
# Should see the saved account plus 2nd owner's own account select
# and be able to save their own account but not the other owners.
res = self.client.get(self.url)
eq_(res.status_code, 200)
pqr = pq(res.content)
# Check we have just our account option present + '----'.
eq_(len(pqr('#id_accounts option')), 2)
eq_(len(pqr('#id_account[disabled]')), 0)
eq_(pqr('.current-account').text(), unicode(owner_acct))
res = self.client.post(
self.url, self.get_postdata({'accounts': owner_acct2.pk,
'price': self.price.pk,
'regions': ALL_REGION_IDS}),
follow=True)
eq_(res.status_code, 200)
self.assertNoFormErrors(res)
pqr = pq(res.content)
eq_(len(pqr('.current-account')), 0)
eq_(pqr('#id_accounts option[selected]').text(), unicode(owner_acct2))
# Now there should just be our account.
eq_(len(pqr('#id_accounts option')), 1)
def test_existing_account_should_be_disabled_for_non_owner(self):
self.make_premium(self.webapp, price=self.price.price)
acct, api, user = self.setup_payment_acct(make_owner=True)
# Must be an app owner to change this.
assert self.is_owner(user)
# Associate account with app.
res = self.client.post(
self.url, self.get_postdata({'accounts': acct.pk,
'price': self.price.pk,
'regions': ALL_REGION_IDS}),
follow=True)
amo.set_user(self.other)
# Make this user a dev so they have access to the payments page.
AddonUser.objects.create(addon=self.webapp,
user=self.other, role=amo.AUTHOR_ROLE_DEV)
self.login(self.other)
# Make sure not an owner.
assert not self.is_owner(self.other)
res = self.client.get(self.url)
eq_(res.status_code, 200)
pqr = pq(res.content)
# No accounts setup.
eq_(len(pqr('.no-accounts')), 1)
# Currently associated account should be displayed separately.
eq_(pqr('.current-account').text(), unicode(acct))
def test_existing_account_should_be_disabled_for_non_owner_admin(self):
self.make_premium(self.webapp, price=self.price.price)
# Login as regular user
self.login(self.other)
owner_acct, api, user = self.setup_payment_acct(make_owner=True,
user=self.other)
# Must be an app owner to change this.
assert self.is_owner(self.other)
# Associate account with app.
res = self.client.post(self.url,
self.get_postdata({'accounts': owner_acct.pk,
'price': self.price.pk,
'regions': ALL_REGION_IDS}),
follow=True)
self.assertNoFormErrors(res)
# Login as admin.
self.login(self.admin)
# Create an account as an admin.
admin_acct, api, admin_user = self.setup_payment_acct(make_owner=False,
user=self.admin)
# Make sure not an owner.
assert not self.is_owner(self.admin)
res = self.client.get(self.url)
eq_(res.status_code, 200)
pqr = pq(res.content)
# Payment field should be disabled.
eq_(len(pqr('#id_accounts[disabled]')), 1)
# Currently associated account should be displayed separately.
eq_(pqr('.current-account').text(), unicode(owner_acct))
def test_deleted_payment_accounts_switch_to_incomplete_apps(self):
self.make_premium(self.webapp, price=self.price.price)
self.login(self.user)
addon_account = setup_payment_account(self.webapp, self.user)
eq_(self.webapp.status, amo.STATUS_PUBLIC)
self.client.post(reverse('mkt.developers.bango.delete_payment_account',
args=[addon_account.payment_account.pk]))
eq_(self.webapp.reload().status, amo.STATUS_NULL)
def setup_bango_portal(self):
self.create_switch('bango-portal')
self.user = UserProfile.objects.get(pk=31337)
self.webapp.update(premium_type=amo.ADDON_PREMIUM)
self.login(self.user)
self.account = setup_payment_account(self.webapp, self.user)
self.portal_url = self.webapp.get_dev_url(
'payments.bango_portal_from_addon')
def test_bango_portal_links(self):
payments_url = self.webapp.get_dev_url('payments')
res = self.client.get(payments_url)
account_template = self.extract_script_template(
res.content, '#account-row-template')
eq_(len(account_template('.portal-account')), 0)
self.create_switch('bango-portal', db=True)
res = self.client.get(payments_url)
account_template = self.extract_script_template(
res.content, '#account-row-template')
eq_(len(account_template('.portal-account')), 1)
@mock.patch('mkt.developers.views_payments.client.api')
def test_bango_portal_redirect(self, api):
self.setup_bango_portal()
authentication_token = u'D0A44686-D4A3-4B2F-9BEB-5E4975E35192'
api.bango.login.post.return_value = {
'person_id': 600925,
'email_address': u'admin@place.com',
'authentication_token': authentication_token,
}
assert self.is_owner(self.user)
res = self.client.get(self.portal_url)
eq_(res.status_code, 204)
eq_(api.bango.login.post.call_args[0][0]['packageId'], TEST_PACKAGE_ID)
redirect_url = res['Location']
assert authentication_token in redirect_url, redirect_url
assert 'emailAddress=admin%40place.com' in redirect_url, redirect_url
@mock.patch('mkt.developers.views_payments.client.api')
def test_bango_portal_redirect_api_error(self, api):
self.setup_bango_portal()
err = {'errors': 'Something went wrong.'}
api.bango.login.post.side_effect = HttpClientError(content=err)
res = self.client.get(self.portal_url)
eq_(res.status_code, 400)
eq_(json.loads(res.content), err)
def test_bango_portal_redirect_role_error(self):
# Checks that only the owner can access the page (vs. developers).
self.setup_bango_portal()
addon_user = self.user.addonuser_set.all()[0]
addon_user.role = amo.AUTHOR_ROLE_DEV
addon_user.save()
assert not self.is_owner(self.user)
res = self.client.get(self.portal_url)
eq_(res.status_code, 403)
def test_bango_portal_redirect_permission_error(self):
# Checks that the owner of another app can't access the page.
self.setup_bango_portal()
self.login(self.other)
other_webapp = Addon.objects.create(type=self.webapp.type,
status=self.webapp.status, name='other-%s' % self.webapp.id,
premium_type=amo.ADDON_PREMIUM)
AddonUser.objects.create(addon=other_webapp,
user=self.other, role=amo.AUTHOR_ROLE_OWNER)
res = self.client.get(self.portal_url)
eq_(res.status_code, 403)
def test_bango_portal_redirect_solitude_seller_error(self):
# Checks that the owner has a SolitudeSeller instance for this app.
self.setup_bango_portal()
assert self.is_owner(self.user)
(self.webapp.app_payment_account.payment_account.
solitude_seller.update(user=self.other))
res = self.client.get(self.portal_url)
eq_(res.status_code, 403)
class TestRegions(amo.tests.TestCase):
fixtures = ['base/apps', 'base/users', 'webapps/337141-steamcube']
def setUp(self):
self.webapp = self.get_webapp()
AddonDeviceType.objects.create(
addon=self.webapp, device_type=amo.DEVICE_GAIA.id)
self.url = self.webapp.get_dev_url('payments')
self.username = 'admin@mozilla.com'
assert self.client.login(username=self.username, password='password')
self.patch = mock.patch('mkt.developers.models.client')
self.sol = self.patch.start()
def tearDown(self):
self.patch.stop()
def get_webapp(self):
return Addon.objects.get(pk=337141)
def get_dict(self, **kwargs):
extension = {'regions': mkt.regions.ALL_REGION_IDS,
'other_regions': 'on',
'free_platforms': ['free-%s' % dt.class_name for dt in
self.webapp.device_types]}
extension.update(kwargs)
return extension
def get_excluded_ids(self):
return sorted(AER.objects.filter(addon=self.webapp)
.values_list('region', flat=True))
def test_edit_all_regions_are_not_excluded(self):
# Keep the category around for good measure.
Category.objects.create(type=amo.ADDON_WEBAPP, slug='games')
r = self.client.post(self.url, self.get_dict())
self.assertNoFormErrors(r)
eq_(AER.objects.count(), 0)
def test_games_form_disabled(self):
games = Category.objects.create(type=amo.ADDON_WEBAPP, slug='games')
AddonCategory.objects.create(addon=self.webapp, category=games)
r = self.client.get(self.url, self.get_dict())
self.assertNoFormErrors(r)
td = pq(r.content)('#regions')
eq_(td.find('div[data-disabled-regions]')
.attr('data-disabled-regions'),
'[%d, %d]' % (mkt.regions.BR.id, mkt.regions.DE.id))
eq_(td.find('.note.disabled-regions').length, 1)
def test_games_form_enabled_with_content_rating(self):
for region in (mkt.regions.BR, mkt.regions.DE):
rb = region.ratingsbodies[0]
ContentRating.objects.create(
addon=self.webapp, ratings_body=rb.id, rating=rb.ratings[0].id)
games = Category.objects.create(type=amo.ADDON_WEBAPP, slug='games')
AddonCategory.objects.create(addon=self.webapp, category=games)
r = self.client.get(self.url)
td = pq(r.content)('#regions')
eq_(td.find('div[data-disabled-regions]')
.attr('data-disabled-regions'), '[]')
eq_(td.find('.note.disabled-regions').length, 0)
def test_brazil_other_cats_form_enabled(self):
r = self.client.get(self.url)
td = pq(r.content)('#regions')
eq_(td.find('div[data-disabled-regions]')
.attr('data-disabled-regions'), '[]')
eq_(td.find('.note.disabled-regions').length, 0)
class PaymentsBase(amo.tests.TestCase):
fixtures = fixture('user_editor', 'user_999')
def setUp(self):
self.user = UserProfile.objects.get(pk=999)
self.login(self.user)
self.account = self.create()
def create(self):
# If user is defined on SolitudeSeller, why do we also need it on
# PaymentAccount? Fewer JOINs.
seller = SolitudeSeller.objects.create(user=self.user)
return PaymentAccount.objects.create(user=self.user,
solitude_seller=seller,
uri='/bango/package/123',
name="cvan's cnotes",
agreed_tos=True)
class TestPaymentAccountsAdd(PaymentsBase):
# TODO: this test provides bare coverage and might need to be expanded.
def setUp(self):
super(TestPaymentAccountsAdd, self).setUp()
self.url = reverse('mkt.developers.bango.add_payment_account')
def test_login_required(self):
self.client.logout()
self.assertLoginRequired(self.client.post(self.url, data={}))
@mock.patch('mkt.developers.models.client')
def test_create(self, client):
res = self.client.post(self.url, data={
'bankAccountPayeeName': 'name',
'companyName': 'company',
'vendorName': 'vendor',
'financeEmailAddress': 'a@a.com',
'adminEmailAddress': 'a@a.com',
'supportEmailAddress': 'a@a.com',
'address1': 'address 1',
'addressCity': 'city',
'addressState': 'state',
'addressZipCode': 'zip',
'addressPhone': '123',
'countryIso': 'BRA',
'currencyIso': 'EUR',
'bankAccountNumber': '123',
'bankAccountCode': '123',
'bankName': 'asd',
'bankAddress1': 'address 2',
'bankAddressZipCode': '123',
'bankAddressIso': 'BRA',
'account_name': 'account'
})
output = json.loads(res.content)
ok_('pk' in output)
ok_('agreement-url' in output)
eq_(PaymentAccount.objects.count(), 2)
class TestPaymentAccounts(PaymentsBase):
def setUp(self):
super(TestPaymentAccounts, self).setUp()
self.url = reverse('mkt.developers.bango.payment_accounts')
def test_login_required(self):
self.client.logout()
self.assertLoginRequired(self.client.get(self.url))
def test_mine(self):
res = self.client.get(self.url)
eq_(res.status_code, 200)
output = json.loads(res.content)
eq_(output[0]['id'], self.account.pk)
ok_(''' in output[0]['name']) # Was jinja2 escaped.
class TestPaymentPortal(PaymentsBase):
def setUp(self):
super(TestPaymentPortal, self).setUp()
self.create_switch('bango-portal')
self.app_slug = 'app-slug'
def test_with_app_slug(self):
url = reverse('mkt.developers.bango.payment_accounts')
res = self.client.get(url, {'app-slug': self.app_slug})
eq_(res.status_code, 200)
output = json.loads(res.content)
eq_(output[0]['portal-url'],
reverse('mkt.developers.apps.payments.bango_portal_from_addon',
args=[self.app_slug]))
def test_without_app_slug(self):
url = reverse('mkt.developers.bango.payment_accounts')
res = self.client.get(url)
eq_(res.status_code, 200)
output = json.loads(res.content)
ok_('portal-url' not in output[0])
class TestPaymentAccount(PaymentsBase):
def setUp(self):
super(TestPaymentAccount, self).setUp()
self.url = reverse('mkt.developers.bango.payment_account',
args=[self.account.pk])
def test_login_required(self):
self.client.logout()
self.assertLoginRequired(self.client.get(self.url))
@mock.patch('mkt.developers.models.client')
def test_get(self, client):
package = mock.Mock()
package.get.return_value = {'full': {'vendorName': 'testval'}}
client.api.bango.package.return_value = package
res = self.client.get(self.url)
client.api.bango.package.assert_called_with('123')
eq_(res.status_code, 200)
output = json.loads(res.content)
eq_(output['account_name'], self.account.name)
assert 'vendorName' in output, (
'Details from Bango not getting merged in: %s' % output)
eq_(output['vendorName'], 'testval')
class TestPaymentAgreement(PaymentsBase):
def setUp(self):
super(TestPaymentAgreement, self).setUp()
self.url = reverse('mkt.developers.bango.agreement',
args=[self.account.pk])
def test_anon(self):
self.client.logout()
self.assertLoginRequired(self.client.get(self.url))
@mock.patch('mkt.developers.views_payments.client.api')
def test_get(self, api):
api.bango.sbi.agreement.get_object.return_value = {
'text': 'blah', 'valid': '2010-08-31T00:00:00'}
res = self.client.get(self.url)
eq_(res.status_code, 200)
data = json.loads(res.content)
eq_(data['text'], 'blah')
@mock.patch('mkt.developers.views_payments.client.api')
def test_set(self, api):
api.bango.sbi.post.return_value = {
'expires': '2014-08-31T00:00:00',
'valid': '2014-08-31T00:00:00'}
res = self.client.post(self.url)
eq_(res.status_code, 200)
data = json.loads(res.content)
eq_(data['valid'], '2014-08-31T00:00:00')
class TestPaymentAccountsForm(PaymentsBase):
def setUp(self):
super(TestPaymentAccountsForm, self).setUp()
self.url = reverse('mkt.developers.bango.payment_accounts_form')
def test_login_required(self):
self.client.logout()
self.assertLoginRequired(self.client.get(self.url))
def test_mine(self):
res = self.client.get(self.url)
eq_(res.status_code, 200)
eq_(res.context['bango_account_list_form']
.fields['accounts'].choices.queryset.get(), self.account)
def test_mine_disagreed_tos(self):
self.account.update(agreed_tos=False)
res = self.client.get(self.url)
eq_(res.status_code, 200)
self.assertSetEqual(res.context['bango_account_list_form']
.fields['accounts'].choices.queryset.all(), [])
class TestPaymentDelete(PaymentsBase):
def setUp(self):
super(TestPaymentDelete, self).setUp()
self.url = reverse('mkt.developers.bango.delete_payment_account',
args=[self.account.pk])
def test_login_required(self):
self.client.logout()
self.assertLoginRequired(self.client.post(self.url, data={}))
def test_not_mine(self):
self.login(UserProfile.objects.get(pk=5497308))
eq_(self.client.post(self.url, data={}).status_code, 404)
def test_mine(self):
eq_(self.client.post(self.url, data={}).status_code, 200)
eq_(PaymentAccount.objects.get(pk=self.account.pk).inactive, True)
| 41.302569
| 79
| 0.60522
|
import json
from django.core.exceptions import ObjectDoesNotExist
import mock
from curling.lib import HttpClientError
from mock import ANY
from nose.tools import eq_, ok_, raises
from pyquery import PyQuery as pq
import amo
import amo.tests
from amo.urlresolvers import reverse
from addons.models import (Addon, AddonCategory, AddonDeviceType,
AddonPremium, AddonUpsell, AddonUser, Category)
from constants.payments import (PAYMENT_METHOD_ALL,
PAYMENT_METHOD_CARD,
PAYMENT_METHOD_OPERATOR)
from mkt.constants.payments import ACCESS_PURCHASE, ACCESS_SIMULATE
from mkt.constants.regions import ALL_REGION_IDS
from market.models import Price
from users.models import UserProfile
import mkt
from mkt.developers.models import (AddonPaymentAccount, PaymentAccount,
SolitudeSeller, uri_to_pk, UserInappKey)
from mkt.site.fixtures import fixture
from mkt.webapps.models import AddonExcludedRegion as AER, ContentRating
TEST_PACKAGE_ID = 2
def setup_payment_account(app, user, uid='uid', package_id=TEST_PACKAGE_ID):
seller = SolitudeSeller.objects.create(user=user, uuid=uid)
payment = PaymentAccount.objects.create(user=user, solitude_seller=seller,
agreed_tos=True, seller_uri=uid,
uri=uid,
bango_package_id=package_id)
return AddonPaymentAccount.objects.create(addon=app,
product_uri='/path/to/%s/' % app.pk, account_uri=payment.uri,
payment_account=payment)
class InappTest(amo.tests.TestCase):
def setUp(self):
self.create_switch('in-app-payments')
self.app = Addon.objects.get(pk=337141)
self.app.update(premium_type=amo.ADDON_FREE_INAPP)
self.user = UserProfile.objects.get(pk=31337)
self.other = UserProfile.objects.get(pk=999)
self.login(self.user)
self.account = setup_payment_account(self.app, self.user)
self.url = reverse('mkt.developers.apps.in_app_config',
args=[self.app.app_slug])
def set_mocks(self, solitude):
get = mock.Mock()
get.get_object_or_404.return_value = {
'seller_product': '/path/to/prod-pk/'
}
post = mock.Mock()
post.return_value = get
solitude.api.bango.product = post
get = mock.Mock()
get.get_object_or_404.return_value = {'resource_pk': 'some-key',
'secret': 'shhh!'}
post = mock.Mock()
post.return_value = get
solitude.api.generic.product = post
@mock.patch('mkt.developers.views_payments.client')
class TestInappConfig(InappTest):
fixtures = fixture('webapp_337141', 'user_999')
@raises(ObjectDoesNotExist)
def test_not_seller(self, solitude):
post = mock.Mock()
post.side_effect = ObjectDoesNotExist
solitude.api.generic.product = post
eq_(self.client.get(self.url).status_code, 404)
def test_key_generation(self, solitude):
self.set_mocks(solitude)
self.client.post(self.url, {})
args = solitude.api.generic.product().patch.call_args
assert 'secret' in args[1]['data']
def test_logged_out(self, solitude):
self.client.logout()
self.assertLoginRequired(self.client.get(self.url))
def test_different(self, solitude):
self.login(self.other)
eq_(self.client.get(self.url).status_code, 403)
def test_developer(self, solitude):
self.login(self.other)
AddonUser.objects.create(addon=self.app, user=self.other,
role=amo.AUTHOR_ROLE_DEV)
eq_(self.client.get(self.url).status_code, 200)
eq_(self.client.post(self.url).status_code, 403)
def test_not_inapp(self, solitude):
self.app.update(premium_type=amo.ADDON_PREMIUM)
eq_(self.client.get(self.url).status_code, 302)
def test_no_account(self, solitude):
self.app.app_payment_account.delete()
eq_(self.client.get(self.url).status_code, 302)
@mock.patch('mkt.developers.views_payments.client')
class TestInappSecret(InappTest):
fixtures = fixture('webapp_337141', 'user_999')
def setUp(self):
super(TestInappSecret, self).setUp()
self.url = reverse('mkt.developers.apps.in_app_secret',
args=[self.app.app_slug])
def test_show_secret(self, solitude):
self.set_mocks(solitude)
resp = self.client.get(self.url)
eq_(resp.content, 'shhh!')
pk = uri_to_pk(self.account.product_uri)
solitude.api.bango.product.assert_called_with(pk)
solitude.api.generic.product.assert_called_with('prod-pk')
def test_logged_out(self, solitude):
self.client.logout()
self.assertLoginRequired(self.client.get(self.url))
def test_different(self, solitude):
self.client.login(username='regular@mozilla.com', password='password')
eq_(self.client.get(self.url).status_code, 403)
def test_developer(self, solitude):
self.set_mocks(solitude)
self.login(self.other)
AddonUser.objects.create(addon=self.app, user=self.other,
role=amo.AUTHOR_ROLE_DEV)
resp = self.client.get(self.url)
eq_(resp.content, 'shhh!')
class InappKeysTest(InappTest):
fixtures = fixture('webapp_337141', 'user_999')
def setUp(self):
super(InappKeysTest, self).setUp()
self.create_switch('in-app-sandbox')
self.url = reverse('mkt.developers.apps.in_app_keys')
self.seller_uri = '/seller/1/'
self.product_pk = 2
def setup_solitude(self, solitude):
solitude.api.generic.seller.post.return_value = {
'resource_uri': self.seller_uri}
solitude.api.generic.product.post.return_value = {
'resource_pk': self.product_pk}
@mock.patch('mkt.developers.models.client')
class TestInappKeys(InappKeysTest):
def test_logged_out(self, solitude):
self.client.logout()
self.assertLoginRequired(self.client.get(self.url))
def test_no_key(self, solitude):
res = self.client.get(self.url)
eq_(res.status_code, 200)
eq_(res.context['key'], None)
def test_key_generation(self, solitude):
self.setup_solitude(solitude)
res = self.client.post(self.url)
ok_(res['Location'].endswith(self.url), res)
ok_(solitude.api.generic.seller.post.called)
ok_(solitude.api.generic.product.post.called)
key = UserInappKey.objects.get()
eq_(key.solitude_seller.resource_uri, self.seller_uri)
eq_(key.seller_product_pk, self.product_pk)
m = solitude.api.generic.product.post.mock_calls
eq_(m[0][2]['data']['access'], ACCESS_SIMULATE)
def test_reset(self, solitude):
self.setup_solitude(solitude)
key = UserInappKey.create(self.user)
product = mock.Mock()
solitude.api.generic.product.return_value = product
self.client.post(self.url)
product.patch.assert_called_with(data={'secret': ANY})
solitude.api.generic.product.assert_called_with(key.seller_product_pk)
@mock.patch('mkt.developers.models.client')
class TestInappKeySecret(InappKeysTest):
def setUp(self):
super(TestInappKeySecret, self).setUp()
def setup_objects(self, solitude):
self.setup_solitude(solitude)
key = UserInappKey.create(self.user)
self.url = reverse('mkt.developers.apps.in_app_key_secret',
args=[key.pk])
def test_logged_out(self, solitude):
self.setup_objects(solitude)
self.client.logout()
self.assertLoginRequired(self.client.get(self.url))
def test_different(self, solitude):
self.setup_objects(solitude)
self.login(self.other)
eq_(self.client.get(self.url).status_code, 403)
def test_secret(self, solitude):
self.setup_objects(solitude)
secret = 'not telling'
product = mock.Mock()
product.get.return_value = {'secret': secret}
solitude.api.generic.product.return_value = product
res = self.client.get(self.url)
eq_(res.status_code, 200)
eq_(res.content, secret)
class TestPayments(amo.tests.TestCase):
fixtures = fixture('webapp_337141', 'user_999', 'group_admin',
'user_admin', 'user_admin_group', 'prices')
def setUp(self):
self.webapp = self.get_webapp()
AddonDeviceType.objects.create(
addon=self.webapp, device_type=amo.DEVICE_GAIA.id)
self.url = self.webapp.get_dev_url('payments')
self.user = UserProfile.objects.get(pk=31337)
self.other = UserProfile.objects.get(pk=999)
self.admin = UserProfile.objects.get(email='admin@mozilla.com')
self.login(self.user)
self.price = Price.objects.filter()[0]
self.patch = mock.patch('mkt.developers.models.client')
self.sol = self.patch.start()
def tearDown(self):
self.patch.stop()
def get_webapp(self):
return Addon.objects.get(pk=337141)
def get_region_list(self):
return list(AER.objects.values_list('region', flat=True))
def get_postdata(self, extension):
base = {'regions': self.get_region_list(),
'free_platforms': ['free-%s' % dt.class_name for dt in
self.webapp.device_types],
'paid_platforms': ['paid-%s' % dt.class_name for dt in
self.webapp.device_types]}
base.update(extension)
return base
def test_free(self):
res = self.client.post(
self.url, self.get_postdata({'toggle-paid': 'free'}), follow=True)
eq_(self.get_webapp().premium_type, amo.ADDON_FREE)
eq_(res.context['is_paid'], False)
def test_premium_passes(self):
self.webapp.update(premium_type=amo.ADDON_FREE)
res = self.client.post(
self.url, self.get_postdata({'toggle-paid': 'paid'}), follow=True)
eq_(self.get_webapp().premium_type, amo.ADDON_PREMIUM)
eq_(res.context['is_paid'], True)
def test_check_api_url_in_context(self):
self.webapp.update(premium_type=amo.ADDON_FREE)
res = self.client.get(self.url)
eq_(res.context['api_pricelist_url'],
reverse('api_dispatch_list', kwargs={'resource_name': 'prices',
'api_name': 'webpay'}))
def test_regions_display_free(self):
self.webapp.update(premium_type=amo.ADDON_FREE)
res = self.client.get(self.url)
pqr = pq(res.content)
eq_(len(pqr('#regions-island')), 1)
eq_(len(pqr('#paid-regions-island')), 0)
def test_regions_display_premium(self):
self.webapp.update(premium_type=amo.ADDON_PREMIUM)
res = self.client.get(self.url)
pqr = pq(res.content)
eq_(len(pqr('#regions-island')), 0)
eq_(len(pqr('#paid-regions-island')), 1)
def test_free_with_in_app_tier_id_in_content(self):
price_tier_zero = Price.objects.create(price='0.00')
self.webapp.update(premium_type=amo.ADDON_PREMIUM)
res = self.client.get(self.url)
pqr = pq(res.content)
eq_(len(pqr('#region-list[data-tier-zero-id]')), 1)
eq_(int(pqr('#region-list').attr(
'data-tier-zero-id')), price_tier_zero.pk)
def test_not_applicable_data_attr_in_content(self):
self.webapp.update(premium_type=amo.ADDON_PREMIUM)
res = self.client.get(self.url)
pqr = pq(res.content)
eq_(len(pqr('#region-list[data-not-applicable-msg]')), 1)
def test_pay_method_ids_in_context(self):
self.webapp.update(premium_type=amo.ADDON_PREMIUM)
res = self.client.get(self.url)
self.assertSetEqual(res.context['payment_methods'].keys(),
[PAYMENT_METHOD_ALL, PAYMENT_METHOD_CARD,
PAYMENT_METHOD_OPERATOR])
def test_free_with_in_app_deletes_upsell(self):
self.make_premium(self.webapp)
new_upsell_app = Addon.objects.create(type=self.webapp.type,
status=self.webapp.status, name='upsell-%s' % self.webapp.id,
premium_type=amo.ADDON_FREE)
new_upsell = AddonUpsell(premium=self.webapp)
new_upsell.free = new_upsell_app
new_upsell.save()
assert self.webapp.upsold is not None
self.client.post(
self.url, self.get_postdata({'price': 'free',
'allow_inapp': 'True',
'regions': ALL_REGION_IDS}),
follow=True)
eq_(self.get_webapp().upsold, None)
eq_(AddonPremium.objects.all().count(), 0)
def test_premium_in_app_passes(self):
self.webapp.update(premium_type=amo.ADDON_FREE)
res = self.client.post(
self.url, self.get_postdata({'toggle-paid': 'paid'}))
self.assert3xx(res, self.url)
res = self.client.post(
self.url, self.get_postdata({'allow_inapp': True,
'price': self.price.pk,
'regions': ALL_REGION_IDS}))
self.assert3xx(res, self.url)
eq_(self.get_webapp().premium_type, amo.ADDON_PREMIUM_INAPP)
def test_later_then_free(self):
self.webapp.update(premium_type=amo.ADDON_PREMIUM,
status=amo.STATUS_NULL,
highest_status=amo.STATUS_PENDING)
self.make_premium(self.webapp)
res = self.client.post(
self.url, self.get_postdata({'toggle-paid': 'free',
'price': self.price.pk}))
self.assert3xx(res, self.url)
eq_(self.get_webapp().status, amo.STATUS_PENDING)
eq_(AddonPremium.objects.all().count(), 0)
def test_premium_price_initial_already_set(self):
Price.objects.create(price='0.00')
self.make_premium(self.webapp)
r = self.client.get(self.url)
eq_(pq(r.content)('select[name=price] option[selected]').attr('value'),
str(self.webapp.premium.price.id))
def test_premium_price_initial_use_default(self):
Price.objects.create(price='10.00')
self.webapp.update(premium_type=amo.ADDON_FREE)
res = self.client.post(
self.url, self.get_postdata({'toggle-paid': 'paid'}), follow=True)
pqr = pq(res.content)
eq_(pqr('select[name=price] option[selected]').attr('value'),
str(Price.objects.get(price='0.99').id))
def test_starting_with_free_inapp_has_free_selected(self):
self.webapp.update(premium_type=amo.ADDON_FREE_INAPP)
res = self.client.get(self.url)
pqr = pq(res.content)
eq_(pqr('select[name=price] option[selected]').attr('value'), 'free')
def test_made_free_inapp_has_free_selected(self):
self.make_premium(self.webapp)
res = self.client.post(
self.url, self.get_postdata({'price': 'free',
'allow_inapp': 'True'}), follow=True)
pqr = pq(res.content)
eq_(pqr('select[name=price] option[selected]').attr('value'), 'free')
def test_made_free_inapp_then_free(self):
self.webapp.update(premium_type=amo.ADDON_PREMIUM)
self.make_premium(self.webapp)
self.client.post(
self.url, self.get_postdata({'price': 'free',
'allow_inapp': 'True',
'regions': ALL_REGION_IDS}))
eq_(self.get_webapp().premium_type, amo.ADDON_FREE_INAPP)
self.client.post(
self.url, self.get_postdata({'toggle-paid': 'free',
'regions': ALL_REGION_IDS}))
eq_(self.get_webapp().premium_type, amo.ADDON_FREE)
def test_free_with_inapp_without_account_is_incomplete(self):
self.webapp.update(premium_type=amo.ADDON_FREE)
self.client.post(
self.url, self.get_postdata({'toggle-paid': 'paid'}))
res = self.client.post(
self.url, self.get_postdata({'price': 'free',
'allow_inapp': 'True',
'regions': ALL_REGION_IDS}))
self.assert3xx(res, self.url)
eq_(self.get_webapp().status, amo.STATUS_NULL)
eq_(AddonPremium.objects.all().count(), 0)
def test_paid_app_without_account_is_incomplete(self):
self.webapp.update(premium_type=amo.ADDON_FREE)
self.client.post(
self.url, self.get_postdata({'toggle-paid': 'paid'}))
res = self.client.post(
self.url, self.get_postdata({'price': self.price.pk,
'allow_inapp': 'False',
'regions': ALL_REGION_IDS}))
self.assert3xx(res, self.url)
eq_(self.get_webapp().status, amo.STATUS_NULL)
def setup_payment_acct(self, make_owner, user=None, bango_id=123):
api = self.sol.api
api.generic.product.get_object.side_effect = ObjectDoesNotExist
api.generic.product.post.return_value = {'resource_uri': 'gpuri'}
api.bango.product.get_object.side_effect = ObjectDoesNotExist
api.bango.product.post.return_value = {
'resource_uri': 'bpruri', 'bango_id': 123}
if not user:
user = self.user
amo.set_user(user)
if make_owner:
AddonUser.objects.create(addon=self.webapp,
user=user, role=amo.AUTHOR_ROLE_OWNER)
seller = SolitudeSeller.objects.create(
resource_uri='/path/to/sel', user=user, uuid='uuid-%s' % user.pk)
acct = PaymentAccount.objects.create(
user=user, uri='asdf-%s' % user.pk, name='test', inactive=False,
seller_uri='suri-%s' % user.pk, solitude_seller=seller,
bango_package_id=123, agreed_tos=True)
return acct, api, user
def is_owner(self, user):
return (self.webapp.authors.filter(user=user,
addonuser__role=amo.AUTHOR_ROLE_OWNER).exists())
def test_associate_acct_to_app_free_inapp(self):
acct, api, user = self.setup_payment_acct(make_owner=True)
assert self.is_owner(user)
self.make_premium(self.webapp)
res = self.client.post(
self.url, self.get_postdata({'price': 'free',
'allow_inapp': 'True',
'regions': ALL_REGION_IDS,
'accounts': acct.pk}), follow=True)
self.assertNoFormErrors(res)
eq_(res.status_code, 200)
eq_(self.webapp.app_payment_account.payment_account.pk, acct.pk)
eq_(AddonPremium.objects.all().count(), 0)
def test_associate_acct_to_app(self):
self.make_premium(self.webapp, price=self.price.price)
acct, api, user = self.setup_payment_acct(make_owner=True)
assert self.is_owner(user)
res = self.client.post(
self.url, self.get_postdata({'price': self.price.pk,
'accounts': acct.pk,
'regions': ALL_REGION_IDS}),
follow=True)
eq_(api.bango.premium.post.call_count, 1)
self.assertNoFormErrors(res)
eq_(res.status_code, 200)
eq_(self.webapp.app_payment_account.payment_account.pk, acct.pk)
kw = api.bango.product.post.call_args[1]['data']
ok_(kw['secret'], kw)
kw = api.generic.product.post.call_args[1]['data']
eq_(kw['access'], ACCESS_PURCHASE)
def test_associate_acct_to_app_when_not_owner(self):
self.make_premium(self.webapp, price=self.price.price)
self.login(self.other)
acct, api, user = self.setup_payment_acct(make_owner=False,
user=self.other)
assert not self.is_owner(user)
# Attempt to associate account with app as non-owner.
res = self.client.post(
self.url, self.get_postdata({'accounts': acct.pk}), follow=True)
# Non-owner posts are forbidden.
eq_(res.status_code, 403)
# Payment account shouldn't be set as we're not the owner.
assert not (AddonPaymentAccount.objects
.filter(addon=self.webapp).exists())
def test_associate_acct_to_app_when_not_owner_and_an_admin(self):
self.make_premium(self.webapp, self.price.price)
self.login(self.admin)
acct, api, user = self.setup_payment_acct(make_owner=False,
user=self.admin)
# Check we're not an owner before we start.
assert not self.is_owner(user)
assert not (AddonPaymentAccount.objects
.filter(addon=self.webapp).exists())
res = self.client.post(
self.url, self.get_postdata({'accounts': acct.pk,
'price': self.price.pk,
'regions': ALL_REGION_IDS}),
follow=True)
self.assertFormError(res, 'bango_account_list_form', 'accounts',
[u'You are not permitted to change payment '
'accounts.'])
assert not (AddonPaymentAccount.objects
.filter(addon=self.webapp).exists())
pqr = pq(res.content)
eq_(len(pqr('#id_accounts[disabled]')), 1)
eq_(len(pqr('.current-account')), 0)
def test_associate_acct_to_app_when_admin_and_owner_acct_exists(self):
self.make_premium(self.webapp, price=self.price.price)
owner_acct, api, owner_user = self.setup_payment_acct(make_owner=True)
assert self.is_owner(owner_user)
res = self.client.post(
self.url, self.get_postdata({'accounts': owner_acct.pk,
'price': self.price.pk,
'regions': ALL_REGION_IDS}),
follow=True)
assert (AddonPaymentAccount.objects
.filter(addon=self.webapp).exists())
self.login(self.admin)
admin_acct, api, admin_user = self.setup_payment_acct(make_owner=False,
user=self.admin)
# Check we're not an owner before we start.
assert not self.is_owner(admin_user)
res = self.client.post(
self.url, self.get_postdata({'accounts': admin_acct.pk,
'price': self.price.pk,
'regions': ALL_REGION_IDS}),
follow=True)
self.assertFormError(res, 'bango_account_list_form', 'accounts',
[u'You are not permitted to change payment '
'accounts.'])
def test_one_owner_and_a_second_one_sees_selected_plus_own_accounts(self):
self.make_premium(self.webapp, price=self.price.price)
owner_acct, api, owner = self.setup_payment_acct(make_owner=True)
assert self.is_owner(owner)
res = self.client.post(
self.url, self.get_postdata({'accounts': owner_acct.pk,
'price': self.price.pk,
'regions': ALL_REGION_IDS}),
follow=True)
assert (AddonPaymentAccount.objects
.filter(addon=self.webapp).exists())
self.login(self.other)
owner_acct2, api, owner2 = self.setup_payment_acct(make_owner=True,
user=self.other)
assert self.is_owner(owner2)
# and be able to save their own account but not the other owners.
res = self.client.get(self.url)
eq_(res.status_code, 200)
pqr = pq(res.content)
# Check we have just our account option present + '----'.
eq_(len(pqr('
eq_(len(pqr('
eq_(pqr('.current-account').text(), unicode(owner_acct))
res = self.client.post(
self.url, self.get_postdata({'accounts': owner_acct2.pk,
'price': self.price.pk,
'regions': ALL_REGION_IDS}),
follow=True)
eq_(res.status_code, 200)
self.assertNoFormErrors(res)
pqr = pq(res.content)
eq_(len(pqr('.current-account')), 0)
eq_(pqr('
# Now there should just be our account.
eq_(len(pqr('
def test_existing_account_should_be_disabled_for_non_owner(self):
self.make_premium(self.webapp, price=self.price.price)
acct, api, user = self.setup_payment_acct(make_owner=True)
# Must be an app owner to change this.
assert self.is_owner(user)
# Associate account with app.
res = self.client.post(
self.url, self.get_postdata({'accounts': acct.pk,
'price': self.price.pk,
'regions': ALL_REGION_IDS}),
follow=True)
amo.set_user(self.other)
# Make this user a dev so they have access to the payments page.
AddonUser.objects.create(addon=self.webapp,
user=self.other, role=amo.AUTHOR_ROLE_DEV)
self.login(self.other)
# Make sure not an owner.
assert not self.is_owner(self.other)
res = self.client.get(self.url)
eq_(res.status_code, 200)
pqr = pq(res.content)
# No accounts setup.
eq_(len(pqr('.no-accounts')), 1)
# Currently associated account should be displayed separately.
eq_(pqr('.current-account').text(), unicode(acct))
def test_existing_account_should_be_disabled_for_non_owner_admin(self):
self.make_premium(self.webapp, price=self.price.price)
# Login as regular user
self.login(self.other)
owner_acct, api, user = self.setup_payment_acct(make_owner=True,
user=self.other)
# Must be an app owner to change this.
assert self.is_owner(self.other)
# Associate account with app.
res = self.client.post(self.url,
self.get_postdata({'accounts': owner_acct.pk,
'price': self.price.pk,
'regions': ALL_REGION_IDS}),
follow=True)
self.assertNoFormErrors(res)
# Login as admin.
self.login(self.admin)
# Create an account as an admin.
admin_acct, api, admin_user = self.setup_payment_acct(make_owner=False,
user=self.admin)
# Make sure not an owner.
assert not self.is_owner(self.admin)
res = self.client.get(self.url)
eq_(res.status_code, 200)
pqr = pq(res.content)
# Payment field should be disabled.
eq_(len(pqr('
# Currently associated account should be displayed separately.
eq_(pqr('.current-account').text(), unicode(owner_acct))
def test_deleted_payment_accounts_switch_to_incomplete_apps(self):
self.make_premium(self.webapp, price=self.price.price)
self.login(self.user)
addon_account = setup_payment_account(self.webapp, self.user)
eq_(self.webapp.status, amo.STATUS_PUBLIC)
self.client.post(reverse('mkt.developers.bango.delete_payment_account',
args=[addon_account.payment_account.pk]))
eq_(self.webapp.reload().status, amo.STATUS_NULL)
def setup_bango_portal(self):
self.create_switch('bango-portal')
self.user = UserProfile.objects.get(pk=31337)
self.webapp.update(premium_type=amo.ADDON_PREMIUM)
self.login(self.user)
self.account = setup_payment_account(self.webapp, self.user)
self.portal_url = self.webapp.get_dev_url(
'payments.bango_portal_from_addon')
def test_bango_portal_links(self):
payments_url = self.webapp.get_dev_url('payments')
res = self.client.get(payments_url)
account_template = self.extract_script_template(
res.content, '
eq_(len(account_template('.portal-account')), 0)
self.create_switch('bango-portal', db=True)
res = self.client.get(payments_url)
account_template = self.extract_script_template(
res.content, '
eq_(len(account_template('.portal-account')), 1)
@mock.patch('mkt.developers.views_payments.client.api')
def test_bango_portal_redirect(self, api):
self.setup_bango_portal()
authentication_token = u'D0A44686-D4A3-4B2F-9BEB-5E4975E35192'
api.bango.login.post.return_value = {
'person_id': 600925,
'email_address': u'admin@place.com',
'authentication_token': authentication_token,
}
assert self.is_owner(self.user)
res = self.client.get(self.portal_url)
eq_(res.status_code, 204)
eq_(api.bango.login.post.call_args[0][0]['packageId'], TEST_PACKAGE_ID)
redirect_url = res['Location']
assert authentication_token in redirect_url, redirect_url
assert 'emailAddress=admin%40place.com' in redirect_url, redirect_url
@mock.patch('mkt.developers.views_payments.client.api')
def test_bango_portal_redirect_api_error(self, api):
self.setup_bango_portal()
err = {'errors': 'Something went wrong.'}
api.bango.login.post.side_effect = HttpClientError(content=err)
res = self.client.get(self.portal_url)
eq_(res.status_code, 400)
eq_(json.loads(res.content), err)
def test_bango_portal_redirect_role_error(self):
# Checks that only the owner can access the page (vs. developers).
self.setup_bango_portal()
addon_user = self.user.addonuser_set.all()[0]
addon_user.role = amo.AUTHOR_ROLE_DEV
addon_user.save()
assert not self.is_owner(self.user)
res = self.client.get(self.portal_url)
eq_(res.status_code, 403)
def test_bango_portal_redirect_permission_error(self):
# Checks that the owner of another app can't access the page.
self.setup_bango_portal()
self.login(self.other)
other_webapp = Addon.objects.create(type=self.webapp.type,
status=self.webapp.status, name='other-%s' % self.webapp.id,
premium_type=amo.ADDON_PREMIUM)
AddonUser.objects.create(addon=other_webapp,
user=self.other, role=amo.AUTHOR_ROLE_OWNER)
res = self.client.get(self.portal_url)
eq_(res.status_code, 403)
def test_bango_portal_redirect_solitude_seller_error(self):
self.setup_bango_portal()
assert self.is_owner(self.user)
(self.webapp.app_payment_account.payment_account.
solitude_seller.update(user=self.other))
res = self.client.get(self.portal_url)
eq_(res.status_code, 403)
class TestRegions(amo.tests.TestCase):
fixtures = ['base/apps', 'base/users', 'webapps/337141-steamcube']
def setUp(self):
self.webapp = self.get_webapp()
AddonDeviceType.objects.create(
addon=self.webapp, device_type=amo.DEVICE_GAIA.id)
self.url = self.webapp.get_dev_url('payments')
self.username = 'admin@mozilla.com'
assert self.client.login(username=self.username, password='password')
self.patch = mock.patch('mkt.developers.models.client')
self.sol = self.patch.start()
def tearDown(self):
self.patch.stop()
def get_webapp(self):
return Addon.objects.get(pk=337141)
def get_dict(self, **kwargs):
extension = {'regions': mkt.regions.ALL_REGION_IDS,
'other_regions': 'on',
'free_platforms': ['free-%s' % dt.class_name for dt in
self.webapp.device_types]}
extension.update(kwargs)
return extension
def get_excluded_ids(self):
return sorted(AER.objects.filter(addon=self.webapp)
.values_list('region', flat=True))
def test_edit_all_regions_are_not_excluded(self):
Category.objects.create(type=amo.ADDON_WEBAPP, slug='games')
r = self.client.post(self.url, self.get_dict())
self.assertNoFormErrors(r)
eq_(AER.objects.count(), 0)
def test_games_form_disabled(self):
games = Category.objects.create(type=amo.ADDON_WEBAPP, slug='games')
AddonCategory.objects.create(addon=self.webapp, category=games)
r = self.client.get(self.url, self.get_dict())
self.assertNoFormErrors(r)
td = pq(r.content)('#regions')
eq_(td.find('div[data-disabled-regions]')
.attr('data-disabled-regions'),
'[%d, %d]' % (mkt.regions.BR.id, mkt.regions.DE.id))
eq_(td.find('.note.disabled-regions').length, 1)
def test_games_form_enabled_with_content_rating(self):
for region in (mkt.regions.BR, mkt.regions.DE):
rb = region.ratingsbodies[0]
ContentRating.objects.create(
addon=self.webapp, ratings_body=rb.id, rating=rb.ratings[0].id)
games = Category.objects.create(type=amo.ADDON_WEBAPP, slug='games')
AddonCategory.objects.create(addon=self.webapp, category=games)
r = self.client.get(self.url)
td = pq(r.content)('#regions')
eq_(td.find('div[data-disabled-regions]')
.attr('data-disabled-regions'), '[]')
eq_(td.find('.note.disabled-regions').length, 0)
def test_brazil_other_cats_form_enabled(self):
r = self.client.get(self.url)
td = pq(r.content)('#regions')
eq_(td.find('div[data-disabled-regions]')
.attr('data-disabled-regions'), '[]')
eq_(td.find('.note.disabled-regions').length, 0)
class PaymentsBase(amo.tests.TestCase):
fixtures = fixture('user_editor', 'user_999')
def setUp(self):
self.user = UserProfile.objects.get(pk=999)
self.login(self.user)
self.account = self.create()
def create(self):
seller = SolitudeSeller.objects.create(user=self.user)
return PaymentAccount.objects.create(user=self.user,
solitude_seller=seller,
uri='/bango/package/123',
name="cvan's cnotes",
agreed_tos=True)
class TestPaymentAccountsAdd(PaymentsBase):
# TODO: this test provides bare coverage and might need to be expanded.
def setUp(self):
super(TestPaymentAccountsAdd, self).setUp()
self.url = reverse('mkt.developers.bango.add_payment_account')
def test_login_required(self):
self.client.logout()
self.assertLoginRequired(self.client.post(self.url, data={}))
@mock.patch('mkt.developers.models.client')
def test_create(self, client):
res = self.client.post(self.url, data={
'bankAccountPayeeName': 'name',
'companyName': 'company',
'vendorName': 'vendor',
'financeEmailAddress': 'a@a.com',
'adminEmailAddress': 'a@a.com',
'supportEmailAddress': 'a@a.com',
'address1': 'address 1',
'addressCity': 'city',
'addressState': 'state',
'addressZipCode': 'zip',
'addressPhone': '123',
'countryIso': 'BRA',
'currencyIso': 'EUR',
'bankAccountNumber': '123',
'bankAccountCode': '123',
'bankName': 'asd',
'bankAddress1': 'address 2',
'bankAddressZipCode': '123',
'bankAddressIso': 'BRA',
'account_name': 'account'
})
output = json.loads(res.content)
ok_('pk' in output)
ok_('agreement-url' in output)
eq_(PaymentAccount.objects.count(), 2)
class TestPaymentAccounts(PaymentsBase):
def setUp(self):
super(TestPaymentAccounts, self).setUp()
self.url = reverse('mkt.developers.bango.payment_accounts')
def test_login_required(self):
self.client.logout()
self.assertLoginRequired(self.client.get(self.url))
def test_mine(self):
res = self.client.get(self.url)
eq_(res.status_code, 200)
output = json.loads(res.content)
eq_(output[0]['id'], self.account.pk)
ok_('&
class TestPaymentPortal(PaymentsBase):
def setUp(self):
super(TestPaymentPortal, self).setUp()
self.create_switch('bango-portal')
self.app_slug = 'app-slug'
def test_with_app_slug(self):
url = reverse('mkt.developers.bango.payment_accounts')
res = self.client.get(url, {'app-slug': self.app_slug})
eq_(res.status_code, 200)
output = json.loads(res.content)
eq_(output[0]['portal-url'],
reverse('mkt.developers.apps.payments.bango_portal_from_addon',
args=[self.app_slug]))
def test_without_app_slug(self):
url = reverse('mkt.developers.bango.payment_accounts')
res = self.client.get(url)
eq_(res.status_code, 200)
output = json.loads(res.content)
ok_('portal-url' not in output[0])
class TestPaymentAccount(PaymentsBase):
def setUp(self):
super(TestPaymentAccount, self).setUp()
self.url = reverse('mkt.developers.bango.payment_account',
args=[self.account.pk])
def test_login_required(self):
self.client.logout()
self.assertLoginRequired(self.client.get(self.url))
@mock.patch('mkt.developers.models.client')
def test_get(self, client):
package = mock.Mock()
package.get.return_value = {'full': {'vendorName': 'testval'}}
client.api.bango.package.return_value = package
res = self.client.get(self.url)
client.api.bango.package.assert_called_with('123')
eq_(res.status_code, 200)
output = json.loads(res.content)
eq_(output['account_name'], self.account.name)
assert 'vendorName' in output, (
'Details from Bango not getting merged in: %s' % output)
eq_(output['vendorName'], 'testval')
class TestPaymentAgreement(PaymentsBase):
def setUp(self):
super(TestPaymentAgreement, self).setUp()
self.url = reverse('mkt.developers.bango.agreement',
args=[self.account.pk])
def test_anon(self):
self.client.logout()
self.assertLoginRequired(self.client.get(self.url))
@mock.patch('mkt.developers.views_payments.client.api')
def test_get(self, api):
api.bango.sbi.agreement.get_object.return_value = {
'text': 'blah', 'valid': '2010-08-31T00:00:00'}
res = self.client.get(self.url)
eq_(res.status_code, 200)
data = json.loads(res.content)
eq_(data['text'], 'blah')
@mock.patch('mkt.developers.views_payments.client.api')
def test_set(self, api):
api.bango.sbi.post.return_value = {
'expires': '2014-08-31T00:00:00',
'valid': '2014-08-31T00:00:00'}
res = self.client.post(self.url)
eq_(res.status_code, 200)
data = json.loads(res.content)
eq_(data['valid'], '2014-08-31T00:00:00')
class TestPaymentAccountsForm(PaymentsBase):
def setUp(self):
super(TestPaymentAccountsForm, self).setUp()
self.url = reverse('mkt.developers.bango.payment_accounts_form')
def test_login_required(self):
self.client.logout()
self.assertLoginRequired(self.client.get(self.url))
def test_mine(self):
res = self.client.get(self.url)
eq_(res.status_code, 200)
eq_(res.context['bango_account_list_form']
.fields['accounts'].choices.queryset.get(), self.account)
def test_mine_disagreed_tos(self):
self.account.update(agreed_tos=False)
res = self.client.get(self.url)
eq_(res.status_code, 200)
self.assertSetEqual(res.context['bango_account_list_form']
.fields['accounts'].choices.queryset.all(), [])
class TestPaymentDelete(PaymentsBase):
def setUp(self):
super(TestPaymentDelete, self).setUp()
self.url = reverse('mkt.developers.bango.delete_payment_account',
args=[self.account.pk])
def test_login_required(self):
self.client.logout()
self.assertLoginRequired(self.client.post(self.url, data={}))
def test_not_mine(self):
self.login(UserProfile.objects.get(pk=5497308))
eq_(self.client.post(self.url, data={}).status_code, 404)
def test_mine(self):
eq_(self.client.post(self.url, data={}).status_code, 200)
eq_(PaymentAccount.objects.get(pk=self.account.pk).inactive, True)
| true
| true
|
f70e283fd6fdb7fa06c81f1feffe487ca33d2235
| 5,232
|
py
|
Python
|
src/packagedcode2/formats.py
|
purna135/scancode-toolkit-contrib
|
ef556c4bb2bfc513f486d5b58d43895c062d44cb
|
[
"Apache-2.0",
"CC0-1.0"
] | null | null | null |
src/packagedcode2/formats.py
|
purna135/scancode-toolkit-contrib
|
ef556c4bb2bfc513f486d5b58d43895c062d44cb
|
[
"Apache-2.0",
"CC0-1.0"
] | null | null | null |
src/packagedcode2/formats.py
|
purna135/scancode-toolkit-contrib
|
ef556c4bb2bfc513f486d5b58d43895c062d44cb
|
[
"Apache-2.0",
"CC0-1.0"
] | null | null | null |
#
# Copyright (c) 2017 nexB Inc. and others. All rights reserved.
# http://nexb.com and https://github.com/nexB/scancode-toolkit/
# The ScanCode software is licensed under the Apache License version 2.0.
# Data generated with ScanCode require an acknowledgment.
# ScanCode is a trademark of nexB Inc.
#
# You may not use this software except in compliance with the License.
# You may obtain a copy of the License at: http://apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software distributed
# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
#
# When you publish or redistribute any data created with ScanCode or any ScanCode
# derivative work, you must accompany this data with the following acknowledgment:
#
# Generated with ScanCode and provided on an "AS IS" BASIS, WITHOUT WARRANTIES
# OR CONDITIONS OF ANY KIND, either express or implied. No content created from
# ScanCode should be considered or used as legal advice. Consult an Attorney
# for any legal advice.
# ScanCode is a free software code scanning tool from nexB Inc. and others.
# Visit https://github.com/nexB/scancode-toolkit/ for support and download.
"""
Known packaging and package metadata formats.
https://en.wikipedia.org/wiki/Package_manager
https://en.wikipedia.org/wiki/Package_format
"""
package_formats = {
# mainline distros
'rpm': ('RPM (Linux)', ''),
# 'rpm_spec': ('RPM spec file (Linux)', ''),
'deb': ('Debian (Linux)', ''),
# 'deb_control': ('Debian control file (Linux)', ''),
# java
'pom': ('Maven POM (Java)', ''),
'ivy': ('IVY (Java)', ''),
'gradle': ('gradle (Groovy/Java)', 'https://gradle.org/'),
'jboss': ('JBoss (Java)', ''),
'buildr': ('buildr (Ruby)', 'https://buildr.apache.org/'),
'osgi': ('OSGi (Eclipse/Java)', ''),
'sbt': ('sbt (Scala/Java)', 'http://www.scala-sbt.org/'),
'clojars': ('Clojars (Clojure)', ''),
'eclipse': ('Eclipse plugin (Eclipse)', ''),
'netbeans': ('NetBeans plugin (NetBeans)', ''),
'jenkins': ('Jenkins plugin (Jenkins)', ''),
# linux
'lkm': ('Loadable Kernel Module (Linux)', ''),
# Perl
'cpan': ('CPAN (Perl)', ''),
# ruby
'gem': ('RubyGems (Ruby)', ''),
'gemfile': ('Bundler Gemfile/Gemfile.lock (Ruby)', ''),
'gemspec': ('RubyGem gemspec file (Ruby)', ''),
# JS
'npm': ('npm (JavaScript)', ''),
'jspm': ('jspm (JavaScript)', ''),
'bower': ('Bower (JavaScript)', ''),
# php
'pear': ('PEAR (PHP)', ''),
'composer': ('Composer (PHP)', ''),
# python
'setup.py': ('Python package (Python)', ''),
'sdist': ('PyPI (Python)', ''),
'bdist': ('PyPI (Python)', ''),
'pypi': ('PyPI (Python)', ''),
'py': ('Python metadata', ''), # __version__, __copyright__
'egg': ('Egg (Python)', ''),
'wheel': ('Wheel (Python)', ''),
# windows
'nuget': ('NuGet (.NET)', ''),
# exes
'winpe': ('PE Binary (Windows)', ''),
'elf': ('ELF binaries (POSIX)', ''),
'macho': ('Mach-O binaries (MacOSX)', ''),
# installers
'mpkg': ('Apple m/package (MacOSX)', ''),
'msi': ('Windows installer (Windows)', ''),
# mobile
'ipa': ('.ipa (iOS)', ''),
'apk': ('.apk (Android)', ''),
'modlic': ('MODULE_LICENSE (Android)', ''),
# Go
'godoc': ('GoDoc (Go)', ''),
'godep': ('Godep (Go)', 'https://github.com/tools/godep'),
# less common
'gom': ('Gom (Go)', ''),
'gondler': ('Gondler (Go)', ''),
'goop': ('Goop (Go)', ''),
'dondur': ('dondur (Go)', 'https://github.com/oguzbilgic/dondur'),
# less common
'buildout':('buildout (Python)', ''),
'about': ('AboutCode', 'http://aboutcode.org'),
'freebsd': ('FreeBSD ports (Unix)', ''),
'openbsd': ('OpenBSD ports (Unix)', ''),
'podfile': ('CocoaPods Podfile (Objective-C/Swift)', 'https://cocoapods.org/'),
'vmdk': ('VMware disk image', ''),
'vdi': ('VirtualBox disk image', ''),
'spdx': ('SPDX', ''),
'doap': ('DOAP', ''),
'docker': ('Docker Image', ''),
'bosh': ('BOSH (CloudFoundry)', ''),
'ebuild': ('Gentoo ebuild(Linux)', ''),
'nix': ('NixOS (Linux)', ''),
'conary': ('conary rPath (Linux)', ''),
'opkg': ('Yocto opkg (Linux)', ''),
'pacman': ('ArchLinux pacman (Linux)', ''),
'pkgsrc': ('NetBSD pkgsrc (Unix)', ''),
'brew': ('Homebrew (MacOSX)', ''),
'slack': ('Slackware (Linux)', ''),
'solaris': ('Solaris (Unix)', ''),
'cran': ('CRAN (R)' , ''),
'mix': ('Mix (Elixir/Erlang)', 'http://Hex.pm',),
'melpa': ('MELPA (Emacs)', ''),
'cabal': ('Cabal (Haskell)', ''),
'cargo': ('cargo (Rust)', ''),
'conda': ('Conda (Python)', ''),
'pypm': ('PyPM (Python)', ''),
'rocks': ('LuaRocks (Lua)', ''),
'appdata': ('AppStream (Linux)', 'https://github.com/ximion/appstream'),
'asdf': ('ASDF (Common Lisp)', ''),
'ctan': ('CTAN (TeX)', ''),
'appx': ('.appx (Windows 8)', ''),
'sublime': ('Sublime plugin (Sublime)', ''),
'rebar': ('Rebar (Erlang)', ''),
'cean': ('CEAN (Erlang)', ''),
'beam': ('Beam (Erlang)', ''),
}
| 34.196078
| 83
| 0.571292
|
package_formats = {
'rpm': ('RPM (Linux)', ''),
'deb': ('Debian (Linux)', ''),
'pom': ('Maven POM (Java)', ''),
'ivy': ('IVY (Java)', ''),
'gradle': ('gradle (Groovy/Java)', 'https://gradle.org/'),
'jboss': ('JBoss (Java)', ''),
'buildr': ('buildr (Ruby)', 'https://buildr.apache.org/'),
'osgi': ('OSGi (Eclipse/Java)', ''),
'sbt': ('sbt (Scala/Java)', 'http://www.scala-sbt.org/'),
'clojars': ('Clojars (Clojure)', ''),
'eclipse': ('Eclipse plugin (Eclipse)', ''),
'netbeans': ('NetBeans plugin (NetBeans)', ''),
'jenkins': ('Jenkins plugin (Jenkins)', ''),
'lkm': ('Loadable Kernel Module (Linux)', ''),
'cpan': ('CPAN (Perl)', ''),
'gem': ('RubyGems (Ruby)', ''),
'gemfile': ('Bundler Gemfile/Gemfile.lock (Ruby)', ''),
'gemspec': ('RubyGem gemspec file (Ruby)', ''),
'npm': ('npm (JavaScript)', ''),
'jspm': ('jspm (JavaScript)', ''),
'bower': ('Bower (JavaScript)', ''),
'pear': ('PEAR (PHP)', ''),
'composer': ('Composer (PHP)', ''),
'setup.py': ('Python package (Python)', ''),
'sdist': ('PyPI (Python)', ''),
'bdist': ('PyPI (Python)', ''),
'pypi': ('PyPI (Python)', ''),
'py': ('Python metadata', ''),
'egg': ('Egg (Python)', ''),
'wheel': ('Wheel (Python)', ''),
'nuget': ('NuGet (.NET)', ''),
'winpe': ('PE Binary (Windows)', ''),
'elf': ('ELF binaries (POSIX)', ''),
'macho': ('Mach-O binaries (MacOSX)', ''),
'mpkg': ('Apple m/package (MacOSX)', ''),
'msi': ('Windows installer (Windows)', ''),
'ipa': ('.ipa (iOS)', ''),
'apk': ('.apk (Android)', ''),
'modlic': ('MODULE_LICENSE (Android)', ''),
'godoc': ('GoDoc (Go)', ''),
'godep': ('Godep (Go)', 'https://github.com/tools/godep'),
'gom': ('Gom (Go)', ''),
'gondler': ('Gondler (Go)', ''),
'goop': ('Goop (Go)', ''),
'dondur': ('dondur (Go)', 'https://github.com/oguzbilgic/dondur'),
'buildout':('buildout (Python)', ''),
'about': ('AboutCode', 'http://aboutcode.org'),
'freebsd': ('FreeBSD ports (Unix)', ''),
'openbsd': ('OpenBSD ports (Unix)', ''),
'podfile': ('CocoaPods Podfile (Objective-C/Swift)', 'https://cocoapods.org/'),
'vmdk': ('VMware disk image', ''),
'vdi': ('VirtualBox disk image', ''),
'spdx': ('SPDX', ''),
'doap': ('DOAP', ''),
'docker': ('Docker Image', ''),
'bosh': ('BOSH (CloudFoundry)', ''),
'ebuild': ('Gentoo ebuild(Linux)', ''),
'nix': ('NixOS (Linux)', ''),
'conary': ('conary rPath (Linux)', ''),
'opkg': ('Yocto opkg (Linux)', ''),
'pacman': ('ArchLinux pacman (Linux)', ''),
'pkgsrc': ('NetBSD pkgsrc (Unix)', ''),
'brew': ('Homebrew (MacOSX)', ''),
'slack': ('Slackware (Linux)', ''),
'solaris': ('Solaris (Unix)', ''),
'cran': ('CRAN (R)' , ''),
'mix': ('Mix (Elixir/Erlang)', 'http://Hex.pm',),
'melpa': ('MELPA (Emacs)', ''),
'cabal': ('Cabal (Haskell)', ''),
'cargo': ('cargo (Rust)', ''),
'conda': ('Conda (Python)', ''),
'pypm': ('PyPM (Python)', ''),
'rocks': ('LuaRocks (Lua)', ''),
'appdata': ('AppStream (Linux)', 'https://github.com/ximion/appstream'),
'asdf': ('ASDF (Common Lisp)', ''),
'ctan': ('CTAN (TeX)', ''),
'appx': ('.appx (Windows 8)', ''),
'sublime': ('Sublime plugin (Sublime)', ''),
'rebar': ('Rebar (Erlang)', ''),
'cean': ('CEAN (Erlang)', ''),
'beam': ('Beam (Erlang)', ''),
}
| true
| true
|
f70e286f5caeb1490537e3a205b606633b39a2ca
| 3,921
|
py
|
Python
|
feat_imp/generate_dataset.py
|
datajms/feature_importance_study
|
a10cf9fc66e0ca7f956457b2628a215b89cbe4e4
|
[
"MIT"
] | null | null | null |
feat_imp/generate_dataset.py
|
datajms/feature_importance_study
|
a10cf9fc66e0ca7f956457b2628a215b89cbe4e4
|
[
"MIT"
] | null | null | null |
feat_imp/generate_dataset.py
|
datajms/feature_importance_study
|
a10cf9fc66e0ca7f956457b2628a215b89cbe4e4
|
[
"MIT"
] | null | null | null |
import numpy as np
import pandas as pd
import openturns as ot
from .conf_file_generation import GENERATION_CONF, post_process_generated_dataset
def sample_from_conf(
var_conf: dict, corr_conf: dict, n_sample: int, seed: int = None
) -> pd.DataFrame:
"""
Generate dataset with n_sample form configuration file var_conf.
Parameters
----------
var_conf: dict
Configuration file of the variables (correlations,
marginal distributions, rounding)
n_sample: int
Number of row in output dataset
seed: int, optional
Optional seed for replicability
Outputs
-------
df_sample: pd.DataFrame
dataset generated from conf files
"""
## Retrieve target variable
var_list = list(var_conf.keys())
target_var = var_list[-1]
i_target_var = len(var_list) - 1
assert var_conf[target_var]["corr"] is None # Make sure that correlation
# parameter is set to None for the target variable.
## Extract var to i_var dict
var_dict = {}
for i_var, var in enumerate(var_list):
var_dict[var] = i_var
## Define marginal distributions of each variable
marginals = []
for var in var_list:
marginals.append(var_conf[var]["marg"])
## Define correlations with target variable
R = ot.CorrelationMatrix(len(var_list))
for i_var, var in enumerate(var_list):
if var != target_var:
R[i_var, i_target_var] = var_conf[var]["corr"]
## Define correlations within explanatory variables
for key, value in corr_conf.items():
i_min = min(var_dict[key[0]], var_dict[key[1]])
i_max = max(var_dict[key[0]], var_dict[key[1]])
R[i_min, i_max] = value
## Build distribution and sample
copula = ot.NormalCopula(R)
distribution = ot.ComposedDistribution(marginals, copula)
if seed is not None:
ot.RandomGenerator.SetSeed(seed)
df_sample = pd.DataFrame(
np.array(distribution.getSample(n_sample)), columns=var_list
)
## Apply bounds
for var in var_list:
if var_conf[var]["bounds"] is not None:
df_sample[var] = df_sample[var].clip(
var_conf[var]["bounds"][0], var_conf[var]["bounds"][1]
)
## Applys rounding
for var in var_list:
df_sample[var] = df_sample[var].round(var_conf[var]["round"])
## Apply post-processinf
df_sample = post_process_generated_dataset(df_sample)
return df_sample
def prepare_ML_sets(
generation_conf: dict, n_sample: int, test_size: float = 0.25, seed: int = None
) -> tuple:
"""
Generate train, eval and test sets in X, y scikit-learn format.
Parameters
----------
generation_conf: dict
Configuration file of dataset
n_sample: int
Number of row in output dataset
test_size: float, optional
Proportion of test_size. Note that eval_size is set to eval_size
seed: int, optional
Returns
-------
output: tuple
tuple of generated datasets with format:
(X_train, y_train, X_eval, y_eval, X_test, y_test)
"""
## Get target_var name
target_var = list(generation_conf["train"]["var"].keys())[-1]
steps = ["train", "eval", "test"]
n_sample_list = [
int(n_sample * (1 - 2 * test_size)),
int(n_sample * test_size),
int(n_sample * test_size),
]
output = []
for i_step, (step, i_sample) in enumerate(zip(steps, n_sample_list)):
if seed is None: # Change seed for each step
current_seed = None
else:
current_seed = seed + i_step
df_step = sample_from_conf(
generation_conf[step]["var"],
generation_conf[step]["corr"],
i_sample, #
seed=current_seed,
)
output += [df_step.drop([target_var], axis=1), df_step[target_var]]
return tuple(output)
| 27.808511
| 83
| 0.630961
|
import numpy as np
import pandas as pd
import openturns as ot
from .conf_file_generation import GENERATION_CONF, post_process_generated_dataset
def sample_from_conf(
var_conf: dict, corr_conf: dict, n_sample: int, seed: int = None
) -> pd.DataFrame:
onf.keys())
target_var = var_list[-1]
i_target_var = len(var_list) - 1
assert var_conf[target_var]["corr"] is None
i_var, var in enumerate(var_list):
var_dict[var] = i_var
marginals.append(var_conf[var]["marg"])
))
for i_var, var in enumerate(var_list):
if var != target_var:
R[i_var, i_target_var] = var_conf[var]["corr"]
i_min = min(var_dict[key[0]], var_dict[key[1]])
i_max = max(var_dict[key[0]], var_dict[key[1]])
R[i_min, i_max] = value
)
distribution = ot.ComposedDistribution(marginals, copula)
if seed is not None:
ot.RandomGenerator.SetSeed(seed)
df_sample = pd.DataFrame(
np.array(distribution.getSample(n_sample)), columns=var_list
)
n var_list:
if var_conf[var]["bounds"] is not None:
df_sample[var] = df_sample[var].clip(
var_conf[var]["bounds"][0], var_conf[var]["bounds"][1]
)
ar_list:
df_sample[var] = df_sample[var].round(var_conf[var]["round"])
process_generated_dataset(df_sample)
return df_sample
def prepare_ML_sets(
generation_conf: dict, n_sample: int, test_size: float = 0.25, seed: int = None
) -> tuple:
t(generation_conf["train"]["var"].keys())[-1]
steps = ["train", "eval", "test"]
n_sample_list = [
int(n_sample * (1 - 2 * test_size)),
int(n_sample * test_size),
int(n_sample * test_size),
]
output = []
for i_step, (step, i_sample) in enumerate(zip(steps, n_sample_list)):
if seed is None:
current_seed = None
else:
current_seed = seed + i_step
df_step = sample_from_conf(
generation_conf[step]["var"],
generation_conf[step]["corr"],
i_sample,
seed=current_seed,
)
output += [df_step.drop([target_var], axis=1), df_step[target_var]]
return tuple(output)
| true
| true
|
f70e28f6600962a660650acd577d5d54522326cb
| 839
|
py
|
Python
|
api/tests/integration/tests/arom/partial_arom_cano.py
|
tsingdao-Tp/Indigo
|
b2d73faebb6a450e9b3d34fed553fad4f9d0012f
|
[
"Apache-2.0"
] | 204
|
2015-11-06T21:34:34.000Z
|
2022-03-30T16:17:01.000Z
|
api/tests/integration/tests/arom/partial_arom_cano.py
|
tsingdao-Tp/Indigo
|
b2d73faebb6a450e9b3d34fed553fad4f9d0012f
|
[
"Apache-2.0"
] | 509
|
2015-11-05T13:54:43.000Z
|
2022-03-30T22:15:30.000Z
|
api/tests/integration/tests/arom/partial_arom_cano.py
|
tsingdao-Tp/Indigo
|
b2d73faebb6a450e9b3d34fed553fad4f9d0012f
|
[
"Apache-2.0"
] | 89
|
2015-11-17T08:22:54.000Z
|
2022-03-17T04:26:28.000Z
|
import os
import sys
sys.path.append(os.path.normpath(os.path.join(os.path.abspath(__file__), '..', '..', '..', "common")))
from env_indigo import *
indigo = Indigo()
for m in indigo.iterateSDFile(joinPathPy('molecules/partial_arom.sdf', __file__)):
print("Smiles: " + m.smiles())
# count number of aromatic bonds
arom_bonds = len([1 for b in m.iterateBonds() if b.bondOrder() == 4])
print(" Aromatic bonds: %d" % arom_bonds)
m2 = indigo.loadMolecule(m.smiles())
print("Reloaded smiles: " + m2.smiles())
arom_bonds2 = len([1 for b in m2.iterateBonds() if b.bondOrder() == 4])
print(" Aromatic bonds: %d" % arom_bonds2)
if arom_bonds != arom_bonds2:
sys.stderr.write("Number of aromatic bonds (%d and %d) is different in %s and %s.\n" %
(arom_bonds, arom_bonds2, m.smiles(), m2.smiles()))
| 41.95
| 102
| 0.65435
|
import os
import sys
sys.path.append(os.path.normpath(os.path.join(os.path.abspath(__file__), '..', '..', '..', "common")))
from env_indigo import *
indigo = Indigo()
for m in indigo.iterateSDFile(joinPathPy('molecules/partial_arom.sdf', __file__)):
print("Smiles: " + m.smiles())
arom_bonds = len([1 for b in m.iterateBonds() if b.bondOrder() == 4])
print(" Aromatic bonds: %d" % arom_bonds)
m2 = indigo.loadMolecule(m.smiles())
print("Reloaded smiles: " + m2.smiles())
arom_bonds2 = len([1 for b in m2.iterateBonds() if b.bondOrder() == 4])
print(" Aromatic bonds: %d" % arom_bonds2)
if arom_bonds != arom_bonds2:
sys.stderr.write("Number of aromatic bonds (%d and %d) is different in %s and %s.\n" %
(arom_bonds, arom_bonds2, m.smiles(), m2.smiles()))
| true
| true
|
f70e294b178871e8ebefeeaeb03d7e597a7a5791
| 12,728
|
py
|
Python
|
package/spack-hdf5/package.py
|
ctuning/ck-spack
|
307934efce1be2d4f104251275c82fbc70127105
|
[
"BSD-3-Clause"
] | 1
|
2018-07-17T07:45:09.000Z
|
2018-07-17T07:45:09.000Z
|
package/spack-hdf5/package.py
|
ctuning/ck-spack
|
307934efce1be2d4f104251275c82fbc70127105
|
[
"BSD-3-Clause"
] | null | null | null |
package/spack-hdf5/package.py
|
ctuning/ck-spack
|
307934efce1be2d4f104251275c82fbc70127105
|
[
"BSD-3-Clause"
] | null | null | null |
##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
import shutil
import sys
from spack import *
class Hdf5(AutotoolsPackage):
"""HDF5 is a data model, library, and file format for storing and managing
data. It supports an unlimited variety of datatypes, and is designed for
flexible and efficient I/O and for high volume and complex data.
"""
homepage = "https://support.hdfgroup.org/HDF5/"
url = "https://support.hdfgroup.org/ftp/HDF5/releases/hdf5-1.10/hdf5-1.10.1/src/hdf5-1.10.1.tar.gz"
list_url = "https://support.hdfgroup.org/ftp/HDF5/releases"
list_depth = 3
version('1.10.2', '8d4eae84e533efa57496638fd0dca8c3')
version('1.10.1', '43a2f9466702fb1db31df98ae6677f15')
version('1.10.0-patch1', '9180ff0ef8dc2ef3f61bd37a7404f295')
version('1.10.0', 'bdc935337ee8282579cd6bc4270ad199')
version('1.8.19', '7f568e2464d4ab0a74d16b23956d900b')
version('1.8.18', 'dd2148b740713ca0295442ec683d7b1c')
version('1.8.17', '7d572f8f3b798a628b8245af0391a0ca')
version('1.8.16', 'b8ed9a36ae142317f88b0c7ef4b9c618')
version('1.8.15', '03cccb5b33dbe975fdcd8ae9dc021f24')
version('1.8.14', 'a482686e733514a51cde12d6fe5c5d95')
version('1.8.13', 'c03426e9e77d7766944654280b467289')
version('1.8.12', 'd804802feb99b87fc668a90e6fa34411')
version('1.8.10', '710aa9fb61a51d61a7e2c09bf0052157')
variant('debug', default=False,
description='Builds a debug version of the library')
variant('shared', default=True,
description='Builds a shared version of the library')
variant('hl', default=False, description='Enable the high-level library')
variant('cxx', default=False, description='Enable C++ support')
variant('fortran', default=False, description='Enable Fortran support')
variant('threadsafe', default=False,
description='Enable thread-safe capabilities')
variant('mpi', default=True, description='Enable MPI support')
variant('szip', default=False, description='Enable szip support')
variant('pic', default=True,
description='Produce position-independent code (for shared libs)')
depends_on('mpi', when='+mpi')
# numactl does not currently build on darwin
if sys.platform != 'darwin':
depends_on('numactl', when='+mpi+fortran')
depends_on('szip', when='+szip')
depends_on('zlib@1.1.2:')
# There are several officially unsupported combinations of the features:
# 1. Thread safety is not guaranteed via high-level C-API but in some cases
# it works.
# conflicts('+threadsafe+hl')
# 2. Thread safety is not guaranteed via Fortran (CXX) API, but it's
# possible for a dependency tree to contain a package that uses Fortran
# (CXX) API in a single thread and another one that uses low-level C-API
# in multiple threads. To allow for such scenarios, we don't specify the
# following conflicts.
# conflicts('+threadsafe+cxx')
# conflicts('+threadsafe+fortran')
# 3. Parallel features are not supported via CXX API, but for the reasons
# described in #2 we allow for such combination.
# conflicts('+mpi+cxx')
# There are known build failures with intel@18.0.1. This issue is
# discussed and patch is provided at
# https://software.intel.com/en-us/forums/intel-fortran-compiler-for-linux-and-mac-os-x/topic/747951.
patch('h5f90global-mult-obj-same-equivalence-same-common-block.patch',
when='@1.10.1%intel@18')
# Turn line comments into block comments to conform with pre-C99 language
# standards. Versions of hdf5 after 1.8.10 don't require this patch,
# either because they conform to pre-C99 or neglect to ask for pre-C99
# language standards from their compiler. The hdf5 build system adds
# the -ansi cflag (run 'man gcc' for info on -ansi) for some versions
# of some compilers (see hdf5-1.8.10/config/gnu-flags). The hdf5 build
# system does not provide an option to disable -ansi, but since the
# pre-C99 code is restricted to just five lines of line comments in
# three src files, this patch accomplishes the simple task of patching the
# three src files and leaves the hdf5 build system alone.
patch('pre-c99-comments.patch', when='@1.8.10')
filter_compiler_wrappers('h5cc', 'h5c++', 'h5fc', relative_root='bin')
def url_for_version(self, version):
url = "https://support.hdfgroup.org/ftp/HDF5/releases/hdf5-{0}/hdf5-{1}/src/hdf5-{1}.tar.gz"
return url.format(version.up_to(2), version)
@property
def libs(self):
"""HDF5 can be queried for the following parameters:
- "hl": high-level interface
- "cxx": C++ APIs
- "fortran": Fortran APIs
:return: list of matching libraries
"""
query_parameters = self.spec.last_query.extra_parameters
shared = '+shared' in self.spec
# This map contains a translation from query_parameters
# to the libraries needed
query2libraries = {
tuple(): ['libhdf5'],
('cxx', 'fortran', 'hl'): [
'libhdf5hl_fortran',
'libhdf5_hl_cpp',
'libhdf5_hl',
'libhdf5_fortran',
'libhdf5',
],
('cxx', 'hl'): [
'libhdf5_hl_cpp',
'libhdf5_hl',
'libhdf5',
],
('fortran', 'hl'): [
'libhdf5hl_fortran',
'libhdf5_hl',
'libhdf5_fortran',
'libhdf5',
],
('hl',): [
'libhdf5_hl',
'libhdf5',
],
('cxx', 'fortran'): [
'libhdf5_fortran',
'libhdf5_cpp',
'libhdf5',
],
('cxx',): [
'libhdf5_cpp',
'libhdf5',
],
('fortran',): [
'libhdf5_fortran',
'libhdf5',
]
}
# Turn the query into the appropriate key
key = tuple(sorted(query_parameters))
libraries = query2libraries[key]
return find_libraries(
libraries, root=self.prefix, shared=shared, recursive=True
)
@run_before('configure')
def fortran_check(self):
if '+fortran' in self.spec and not self.compiler.fc:
msg = 'cannot build a Fortran variant without a Fortran compiler'
raise RuntimeError(msg)
def configure_args(self):
# Always enable this option. This does not actually enable any
# features: it only *allows* the user to specify certain
# combinations of other arguments. Enabling it just skips a
# sanity check in configure, so this doesn't merit a variant.
extra_args = ['--enable-unsupported']
extra_args += self.enable_or_disable('threadsafe')
extra_args += self.enable_or_disable('cxx')
extra_args += self.enable_or_disable('hl')
extra_args += self.enable_or_disable('fortran')
if '+szip' in self.spec:
extra_args.append('--with-szlib=%s' % self.spec['szip'].prefix)
else:
extra_args.append('--without-szlib')
if self.spec.satisfies('@1.10:'):
if '+debug' in self.spec:
extra_args.append('--enable-build-mode=debug')
else:
extra_args.append('--enable-build-mode=production')
else:
if '+debug' in self.spec:
extra_args.append('--enable-debug=all')
else:
extra_args.append('--enable-production')
# '--enable-fortran2003' no longer exists as of version 1.10.0
if '+fortran' in self.spec:
extra_args.append('--enable-fortran2003')
else:
extra_args.append('--disable-fortran2003')
if '+shared' in self.spec:
extra_args.append('--enable-shared')
else:
extra_args.append('--disable-shared')
extra_args.append('--enable-static-exec')
if '+pic' in self.spec:
extra_args += ['%s=%s' % (f, self.compiler.pic_flag)
for f in ['CFLAGS', 'CXXFLAGS', 'FCFLAGS']]
if '+mpi' in self.spec:
# The HDF5 configure script warns if cxx and mpi are enabled
# together. There doesn't seem to be a real reason for this, except
# that parts of the MPI interface are not accessible via the C++
# interface. Since they are still accessible via the C interface,
# this is not actually a problem.
extra_args += ['--enable-parallel',
'CC=%s' % self.spec['mpi'].mpicc]
if '+cxx' in self.spec:
extra_args.append('CXX=%s' % self.spec['mpi'].mpicxx)
if '+fortran' in self.spec:
extra_args.append('FC=%s' % self.spec['mpi'].mpifc)
extra_args.append('--with-zlib=%s' % self.spec['zlib'].prefix)
return extra_args
@run_after('configure')
def patch_postdeps(self):
if '@:1.8.14' in self.spec:
# On Ubuntu14, HDF5 1.8.12 (and maybe other versions)
# mysteriously end up with "-l -l" in the postdeps in the
# libtool script. Patch this by removing the spurious -l's.
filter_file(
r'postdeps="([^"]*)"',
lambda m: 'postdeps="%s"' % ' '.join(
arg for arg in m.group(1).split(' ') if arg != '-l'),
'libtool')
@run_after('install')
@on_package_attributes(run_tests=True)
def check_install(self):
# Build and run a small program to test the installed HDF5 library
spec = self.spec
print("Checking HDF5 installation...")
checkdir = "spack-check"
with working_dir(checkdir, create=True):
source = r"""
#include <hdf5.h>
#include <assert.h>
#include <stdio.h>
int main(int argc, char **argv) {
unsigned majnum, minnum, relnum;
herr_t herr = H5get_libversion(&majnum, &minnum, &relnum);
assert(!herr);
printf("HDF5 version %d.%d.%d %u.%u.%u\n", H5_VERS_MAJOR, H5_VERS_MINOR,
H5_VERS_RELEASE, majnum, minnum, relnum);
return 0;
}
"""
expected = """\
HDF5 version {version} {version}
""".format(version=str(spec.version.up_to(3)))
with open("check.c", 'w') as f:
f.write(source)
if '+mpi' in spec:
cc = Executable(spec['mpi'].mpicc)
else:
cc = Executable(self.compiler.cc)
cc(*(['-c', "check.c"] + spec['hdf5'].headers.cpp_flags.split()))
cc(*(['-o', "check", "check.o"] +
spec['hdf5'].libs.ld_flags.split()))
try:
check = Executable('./check')
output = check(output=str)
except ProcessError:
output = ""
success = output == expected
if not success:
print("Produced output does not match expected output.")
print("Expected output:")
print('-' * 80)
print(expected)
print('-' * 80)
print("Produced output:")
print('-' * 80)
print(output)
print('-' * 80)
raise RuntimeError("HDF5 install check failed")
shutil.rmtree(checkdir)
| 40.535032
| 108
| 0.594516
|
'--enable-build-mode=debug')
else:
extra_args.append('--enable-build-mode=production')
else:
if '+debug' in self.spec:
extra_args.append('--enable-debug=all')
else:
extra_args.append('--enable-production')
if '+fortran' in self.spec:
extra_args.append('--enable-fortran2003')
else:
extra_args.append('--disable-fortran2003')
if '+shared' in self.spec:
extra_args.append('--enable-shared')
else:
extra_args.append('--disable-shared')
extra_args.append('--enable-static-exec')
if '+pic' in self.spec:
extra_args += ['%s=%s' % (f, self.compiler.pic_flag)
for f in ['CFLAGS', 'CXXFLAGS', 'FCFLAGS']]
if '+mpi' in self.spec:
# that parts of the MPI interface are not accessible via the C++
# interface. Since they are still accessible via the C interface,
# this is not actually a problem.
extra_args += ['--enable-parallel',
'CC=%s' % self.spec['mpi'].mpicc]
if '+cxx' in self.spec:
extra_args.append('CXX=%s' % self.spec['mpi'].mpicxx)
if '+fortran' in self.spec:
extra_args.append('FC=%s' % self.spec['mpi'].mpifc)
extra_args.append('--with-zlib=%s' % self.spec['zlib'].prefix)
return extra_args
@run_after('configure')
def patch_postdeps(self):
if '@:1.8.14' in self.spec:
# On Ubuntu14, HDF5 1.8.12 (and maybe other versions)
# mysteriously end up with "-l -l" in the postdeps in the
# libtool script. Patch this by removing the spurious -l's.
filter_file(
r'postdeps="([^"]*)"',
lambda m: 'postdeps="%s"' % ' '.join(
arg for arg in m.group(1).split(' ') if arg != '-l'),
'libtool')
@run_after('install')
@on_package_attributes(run_tests=True)
def check_install(self):
# Build and run a small program to test the installed HDF5 library
spec = self.spec
print("Checking HDF5 installation...")
checkdir = "spack-check"
with working_dir(checkdir, create=True):
source = r"""
#include <hdf5.h>
#include <assert.h>
#include <stdio.h>
int main(int argc, char **argv) {
unsigned majnum, minnum, relnum;
herr_t herr = H5get_libversion(&majnum, &minnum, &relnum);
assert(!herr);
printf("HDF5 version %d.%d.%d %u.%u.%u\n", H5_VERS_MAJOR, H5_VERS_MINOR,
H5_VERS_RELEASE, majnum, minnum, relnum);
return 0;
}
"""
expected = """\
HDF5 version {version} {version}
""".format(version=str(spec.version.up_to(3)))
with open("check.c", 'w') as f:
f.write(source)
if '+mpi' in spec:
cc = Executable(spec['mpi'].mpicc)
else:
cc = Executable(self.compiler.cc)
cc(*(['-c', "check.c"] + spec['hdf5'].headers.cpp_flags.split()))
cc(*(['-o', "check", "check.o"] +
spec['hdf5'].libs.ld_flags.split()))
try:
check = Executable('./check')
output = check(output=str)
except ProcessError:
output = ""
success = output == expected
if not success:
print("Produced output does not match expected output.")
print("Expected output:")
print('-' * 80)
print(expected)
print('-' * 80)
print("Produced output:")
print('-' * 80)
print(output)
print('-' * 80)
raise RuntimeError("HDF5 install check failed")
shutil.rmtree(checkdir)
| true
| true
|
f70e297f339e3eda72257cd14336ad36b629d4c1
| 7,843
|
py
|
Python
|
cmp3/db_dump.py
|
ericvaandering/cms_consistency
|
8ae26a3e911d3a472945411999fc2c730539e0e3
|
[
"BSD-3-Clause"
] | null | null | null |
cmp3/db_dump.py
|
ericvaandering/cms_consistency
|
8ae26a3e911d3a472945411999fc2c730539e0e3
|
[
"BSD-3-Clause"
] | 1
|
2021-02-15T14:58:45.000Z
|
2021-02-15T14:58:45.000Z
|
cmp3/db_dump.py
|
ericvaandering/cms_consistency
|
8ae26a3e911d3a472945411999fc2c730539e0e3
|
[
"BSD-3-Clause"
] | 4
|
2020-05-05T19:41:40.000Z
|
2022-02-23T21:48:40.000Z
|
import getopt, os, time, re, gzip, json, traceback
import sys, uuid
from config import DBConfig, Config
from part import PartitionedList
from sqlalchemy import create_engine
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import Column, Integer, String
from sqlalchemy.orm import sessionmaker
from sqlalchemy.dialects.postgresql import UUID, JSONB
from sqlalchemy.dialects.oracle import RAW, CLOB
from sqlalchemy.dialects.mysql import BINARY
from sqlalchemy.types import TypeDecorator, CHAR, String
from stats import Stats
Version = "1.1"
t0 = time.time()
#from sqlalchemy import schema
Usage = """
python db_dump.py [options] -c <config.yaml> <rse_name>
-c <config file> -- required
-d <db config file> -- required - uses rucio.cfg format. Must contain "default" and "schema" under [databse]
-v -- verbose
-n <nparts>
-f <state>:<prefix> -- filter files with given state to the files set with prefix
state can be either combination of capital letters or "*"
can be repeated ( -f A:/path1 -f CD:/path2 )
use "*" for state to send all the files to the output set ( -f *:/path )
-l -- include more columns, otherwise physical path only, automatically on if -a is used
-z -- produce gzipped output
-s <stats file> -- write stats into JSON file
-S <key> -- add dump stats to stats under the key
-m <N files> -- stop after N files
"""
class GUID(TypeDecorator):
"""
Platform-independent GUID type.
Uses PostgreSQL's UUID type,
uses Oracle's RAW type,
uses MySQL's BINARY type,
otherwise uses CHAR(32), storing as stringified hex values.
"""
impl = CHAR
def load_dialect_impl(self, dialect):
if dialect.name == 'postgresql':
return dialect.type_descriptor(UUID())
elif dialect.name == 'oracle':
return dialect.type_descriptor(RAW(16))
elif dialect.name == 'mysql':
return dialect.type_descriptor(BINARY(16))
else:
return dialect.type_descriptor(CHAR(32))
def process_bind_param(self, value, dialect):
if value is None:
return value
elif dialect.name == 'postgresql':
return str(value).lower()
elif dialect.name == 'oracle':
return uuid.UUID(value).bytes
elif dialect.name == 'mysql':
return uuid.UUID(value).bytes
else:
if not isinstance(value, uuid.UUID):
return "%.32x" % uuid.UUID(value)
else:
# hexstring
return "%.32x" % value
def process_result_value(self, value, dialect):
if value is None:
return value
elif dialect.name == 'oracle':
return str(uuid.UUID(bytes=value)).replace('-', '').lower()
elif dialect.name == 'mysql':
return str(uuid.UUID(bytes=value)).replace('-', '').lower()
else:
return str(uuid.UUID(value)).replace('-', '').lower()
opts, args = getopt.getopt(sys.argv[1:], "f:c:ln:vd:s:S:zm:")
filters = {}
all_states = set()
for opt, val in opts:
if opt == '-f':
states, prefix = val.split(':')
filters[states] = prefix
all_states |= set(states)
opts = dict(opts)
if not args or (not "-c" in opts and not "-d" in opts):
print (Usage)
sys.exit(2)
verbose = "-v" in opts
long_output = "-l" in opts
out_prefix = opts.get("-o")
zout = "-z" in opts
stats_file = opts.get("-s")
stats_key = opts.get("-S", "db_dump")
stop_after = int(opts.get("-m", 0)) or None
rse_name = args[0]
if "-d" in opts:
dbconfig = DBConfig.from_cfg(opts["-d"])
else:
dbconfig = DBConfig.from_yaml(opts["-c"])
#print("dbconfig: url:", dbconfig.DBURL, "schema:", dbconfig.Schema)
config = Config(opts["-c"])
stats = None if stats_file is None else Stats(stats_file)
if stats:
stats[stats_key] = {
"status":"started",
"version":Version,
"rse":rse_name,
"start_time":t0,
"end_time":None,
"files":None,
"elapsed":None,
"directories":None,
"exception":[]
}
try:
Base = declarative_base()
if dbconfig.Schema:
Base.metadata.schema = dbconfig.Schema
class Replica(Base):
__tablename__ = "replicas"
path = Column(String)
state = Column(String)
rse_id = Column(GUID(), primary_key=True)
scope = Column(String, primary_key=True)
name = Column(String, primary_key=True)
class RSE(Base):
__tablename__ = "rses"
id = Column(GUID(), primary_key=True)
rse = Column(String)
if "-n" in opts:
nparts = int(opts["-n"])
else:
nparts = config.nparts(rse_name) or 1
subdir = config.dbdump_root(rse_name) or "/"
if not subdir.endswith("/"): subdir = subdir + "/"
print(f"Filtering files under {subdir} only")
_, ignore_file_patterns = config.ignore_patterns(rse_name)
engine = create_engine(dbconfig.DBURL, echo=verbose)
Session = sessionmaker(bind=engine)
session = Session()
rse = session.query(RSE).filter(RSE.rse == rse_name).first()
if rse is None:
print ("RSE %s not found" % (rse_name,))
sys.exit(1)
rse_id = rse.id
#print ("rse_id:", type(rse_id), rse_id)
batch = 100000
outputs = {
states:PartitionedList.create(nparts, prefix, zout) for states, prefix in filters.items()
}
all_replicas = '*' in all_states
replicas = session.query(Replica).filter(Replica.rse_id==rse_id).yield_per(batch)
if all_replicas:
sys.stderr.write("including all replias\n")
else:
print("including replicas in states:", list(all_states), file=sys.stderr)
replicas = replicas.filter(Replica.state.in_(list(all_states)))
dirs = set()
n = 0
filter_re = config.dbdump_param(rse, "filter")
if filter_re:
filter_re = re.compile(filter_re)
for r in replicas:
path = r.name
state = r.state
if not path.startswith(subdir):
continue
if filter_re is not None:
if not filter_re.search(path):
continue
if any(p.match(path) for p in ignore_file_patterns):
continue
words = path.rsplit("/", 1)
if len(words) == 1:
dirp = "/"
else:
dirp = words[0]
dirs.add(dirp)
for s, out_list in outputs.items():
if state in s or s == '*':
if long_output:
out_list.add("%s\t%s\t%s\t%s\t%s" % (rse_name, r.scope, r.name, path or "null", r.state))
else:
out_list.add(path or "null")
n += 1
if n % batch == 0:
print(n)
if stop_after is not None and n >= stop_after:
print(f"stopped after {stop_after} files", file=sys.stderr)
break
for out_list in outputs.values():
out_list.close()
sys.stderr.write("Found %d files in %d directories\n" % (n, len(dirs)))
t1 = time.time()
t = int(t1 - t0)
s = t % 60
m = t // 60
sys.stderr.write("Elapsed time: %dm%02ds\n" % (m, s))
except:
lines = traceback.format_exc().split("\n")
t1 = time.time()
if stats is not None:
stats[stats_key].update({
"status":"failed",
"end_time":t1,
"exception":lines
})
stats.save()
else:
if stats is not None:
stats[stats_key].update({
"status":"done",
"end_time":t1,
"files":n,
"elapsed":t1-t0,
"directories":len(dirs)
})
stats.save()
| 29.484962
| 112
| 0.580645
|
import getopt, os, time, re, gzip, json, traceback
import sys, uuid
from config import DBConfig, Config
from part import PartitionedList
from sqlalchemy import create_engine
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import Column, Integer, String
from sqlalchemy.orm import sessionmaker
from sqlalchemy.dialects.postgresql import UUID, JSONB
from sqlalchemy.dialects.oracle import RAW, CLOB
from sqlalchemy.dialects.mysql import BINARY
from sqlalchemy.types import TypeDecorator, CHAR, String
from stats import Stats
Version = "1.1"
t0 = time.time()
Usage = """
python db_dump.py [options] -c <config.yaml> <rse_name>
-c <config file> -- required
-d <db config file> -- required - uses rucio.cfg format. Must contain "default" and "schema" under [databse]
-v -- verbose
-n <nparts>
-f <state>:<prefix> -- filter files with given state to the files set with prefix
state can be either combination of capital letters or "*"
can be repeated ( -f A:/path1 -f CD:/path2 )
use "*" for state to send all the files to the output set ( -f *:/path )
-l -- include more columns, otherwise physical path only, automatically on if -a is used
-z -- produce gzipped output
-s <stats file> -- write stats into JSON file
-S <key> -- add dump stats to stats under the key
-m <N files> -- stop after N files
"""
class GUID(TypeDecorator):
impl = CHAR
def load_dialect_impl(self, dialect):
if dialect.name == 'postgresql':
return dialect.type_descriptor(UUID())
elif dialect.name == 'oracle':
return dialect.type_descriptor(RAW(16))
elif dialect.name == 'mysql':
return dialect.type_descriptor(BINARY(16))
else:
return dialect.type_descriptor(CHAR(32))
def process_bind_param(self, value, dialect):
if value is None:
return value
elif dialect.name == 'postgresql':
return str(value).lower()
elif dialect.name == 'oracle':
return uuid.UUID(value).bytes
elif dialect.name == 'mysql':
return uuid.UUID(value).bytes
else:
if not isinstance(value, uuid.UUID):
return "%.32x" % uuid.UUID(value)
else:
return "%.32x" % value
def process_result_value(self, value, dialect):
if value is None:
return value
elif dialect.name == 'oracle':
return str(uuid.UUID(bytes=value)).replace('-', '').lower()
elif dialect.name == 'mysql':
return str(uuid.UUID(bytes=value)).replace('-', '').lower()
else:
return str(uuid.UUID(value)).replace('-', '').lower()
opts, args = getopt.getopt(sys.argv[1:], "f:c:ln:vd:s:S:zm:")
filters = {}
all_states = set()
for opt, val in opts:
if opt == '-f':
states, prefix = val.split(':')
filters[states] = prefix
all_states |= set(states)
opts = dict(opts)
if not args or (not "-c" in opts and not "-d" in opts):
print (Usage)
sys.exit(2)
verbose = "-v" in opts
long_output = "-l" in opts
out_prefix = opts.get("-o")
zout = "-z" in opts
stats_file = opts.get("-s")
stats_key = opts.get("-S", "db_dump")
stop_after = int(opts.get("-m", 0)) or None
rse_name = args[0]
if "-d" in opts:
dbconfig = DBConfig.from_cfg(opts["-d"])
else:
dbconfig = DBConfig.from_yaml(opts["-c"])
config = Config(opts["-c"])
stats = None if stats_file is None else Stats(stats_file)
if stats:
stats[stats_key] = {
"status":"started",
"version":Version,
"rse":rse_name,
"start_time":t0,
"end_time":None,
"files":None,
"elapsed":None,
"directories":None,
"exception":[]
}
try:
Base = declarative_base()
if dbconfig.Schema:
Base.metadata.schema = dbconfig.Schema
class Replica(Base):
__tablename__ = "replicas"
path = Column(String)
state = Column(String)
rse_id = Column(GUID(), primary_key=True)
scope = Column(String, primary_key=True)
name = Column(String, primary_key=True)
class RSE(Base):
__tablename__ = "rses"
id = Column(GUID(), primary_key=True)
rse = Column(String)
if "-n" in opts:
nparts = int(opts["-n"])
else:
nparts = config.nparts(rse_name) or 1
subdir = config.dbdump_root(rse_name) or "/"
if not subdir.endswith("/"): subdir = subdir + "/"
print(f"Filtering files under {subdir} only")
_, ignore_file_patterns = config.ignore_patterns(rse_name)
engine = create_engine(dbconfig.DBURL, echo=verbose)
Session = sessionmaker(bind=engine)
session = Session()
rse = session.query(RSE).filter(RSE.rse == rse_name).first()
if rse is None:
print ("RSE %s not found" % (rse_name,))
sys.exit(1)
rse_id = rse.id
batch = 100000
outputs = {
states:PartitionedList.create(nparts, prefix, zout) for states, prefix in filters.items()
}
all_replicas = '*' in all_states
replicas = session.query(Replica).filter(Replica.rse_id==rse_id).yield_per(batch)
if all_replicas:
sys.stderr.write("including all replias\n")
else:
print("including replicas in states:", list(all_states), file=sys.stderr)
replicas = replicas.filter(Replica.state.in_(list(all_states)))
dirs = set()
n = 0
filter_re = config.dbdump_param(rse, "filter")
if filter_re:
filter_re = re.compile(filter_re)
for r in replicas:
path = r.name
state = r.state
if not path.startswith(subdir):
continue
if filter_re is not None:
if not filter_re.search(path):
continue
if any(p.match(path) for p in ignore_file_patterns):
continue
words = path.rsplit("/", 1)
if len(words) == 1:
dirp = "/"
else:
dirp = words[0]
dirs.add(dirp)
for s, out_list in outputs.items():
if state in s or s == '*':
if long_output:
out_list.add("%s\t%s\t%s\t%s\t%s" % (rse_name, r.scope, r.name, path or "null", r.state))
else:
out_list.add(path or "null")
n += 1
if n % batch == 0:
print(n)
if stop_after is not None and n >= stop_after:
print(f"stopped after {stop_after} files", file=sys.stderr)
break
for out_list in outputs.values():
out_list.close()
sys.stderr.write("Found %d files in %d directories\n" % (n, len(dirs)))
t1 = time.time()
t = int(t1 - t0)
s = t % 60
m = t // 60
sys.stderr.write("Elapsed time: %dm%02ds\n" % (m, s))
except:
lines = traceback.format_exc().split("\n")
t1 = time.time()
if stats is not None:
stats[stats_key].update({
"status":"failed",
"end_time":t1,
"exception":lines
})
stats.save()
else:
if stats is not None:
stats[stats_key].update({
"status":"done",
"end_time":t1,
"files":n,
"elapsed":t1-t0,
"directories":len(dirs)
})
stats.save()
| true
| true
|
f70e29d6889aa1ef6b97680c7ad657613003f54a
| 684
|
py
|
Python
|
venv/lib/python3.8/site-packages/setuptools/command/bdist_wininst.py
|
realxwx/leetcode-solve
|
3a7d7d8e92a5fd5fecc347d141a1c532b92e763e
|
[
"Apache-2.0"
] | null | null | null |
venv/lib/python3.8/site-packages/setuptools/command/bdist_wininst.py
|
realxwx/leetcode-solve
|
3a7d7d8e92a5fd5fecc347d141a1c532b92e763e
|
[
"Apache-2.0"
] | null | null | null |
venv/lib/python3.8/site-packages/setuptools/command/bdist_wininst.py
|
realxwx/leetcode-solve
|
3a7d7d8e92a5fd5fecc347d141a1c532b92e763e
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2020
# Author: xiaoweixiang
import distutils.command.bdist_wininst as orig
class bdist_wininst(orig.bdist_wininst):
def reinitialize_command(self, command, reinit_subcommands=0):
"""
Supplement reinitialize_command to work around
http://bugs.python.org/issue20819
"""
cmd = self.distribution.reinitialize_command(
command, reinit_subcommands)
if command in ('install', 'install_lib'):
cmd.install_lib = None
return cmd
def run(self):
self._is_running = True
try:
orig.bdist_wininst.run(self)
finally:
self._is_running = False
| 27.36
| 66
| 0.633041
|
import distutils.command.bdist_wininst as orig
class bdist_wininst(orig.bdist_wininst):
def reinitialize_command(self, command, reinit_subcommands=0):
cmd = self.distribution.reinitialize_command(
command, reinit_subcommands)
if command in ('install', 'install_lib'):
cmd.install_lib = None
return cmd
def run(self):
self._is_running = True
try:
orig.bdist_wininst.run(self)
finally:
self._is_running = False
| true
| true
|
f70e29e6bf6eea0771d8185054f6e9499ef78f05
| 5,646
|
py
|
Python
|
test/functional/p2p_invalid_block.py
|
luascoin/luascoin
|
74558007cfd46f9d914e9683dfaac1b0274717a9
|
[
"MIT"
] | null | null | null |
test/functional/p2p_invalid_block.py
|
luascoin/luascoin
|
74558007cfd46f9d914e9683dfaac1b0274717a9
|
[
"MIT"
] | null | null | null |
test/functional/p2p_invalid_block.py
|
luascoin/luascoin
|
74558007cfd46f9d914e9683dfaac1b0274717a9
|
[
"MIT"
] | 1
|
2020-02-04T22:41:33.000Z
|
2020-02-04T22:41:33.000Z
|
#!/usr/bin/env python3
# Copyright (c) 2015-2018 The Luascoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test node responses to invalid blocks.
In this test we connect to one node over p2p, and test block requests:
1) Valid blocks should be requested and become chain tip.
2) Invalid block with duplicated transaction should be re-requested.
3) Invalid block with bad coinbase value should be rejected and not
re-requested.
"""
import copy
from test_framework.blocktools import create_block, create_coinbase, create_tx_with_script
from test_framework.messages import COIN
from test_framework.mininode import P2PDataStore
from test_framework.test_framework import LuascoinTestFramework
from test_framework.util import assert_equal
class InvalidBlockRequestTest(LuascoinTestFramework):
def set_test_params(self):
self.num_nodes = 1
self.setup_clean_chain = True
self.extra_args = [["-whitelist=127.0.0.1"]]
def run_test(self):
# Add p2p connection to node0
node = self.nodes[0] # convenience reference to the node
node.add_p2p_connection(P2PDataStore())
best_block = node.getblock(node.getbestblockhash())
tip = int(node.getbestblockhash(), 16)
height = best_block["height"] + 1
block_time = best_block["time"] + 1
self.log.info("Create a new block with an anyone-can-spend coinbase")
height = 1
block = create_block(tip, create_coinbase(height), block_time)
block.solve()
# Save the coinbase for later
block1 = block
tip = block.sha256
node.p2p.send_blocks_and_test([block1], node, success=True)
self.log.info("Mature the block.")
node.generatetoaddress(100, node.get_deterministic_priv_key().address)
best_block = node.getblock(node.getbestblockhash())
tip = int(node.getbestblockhash(), 16)
height = best_block["height"] + 1
block_time = best_block["time"] + 1
# Use merkle-root malleability to generate an invalid block with
# same blockheader (CVE-2012-2459).
# Manufacture a block with 3 transactions (coinbase, spend of prior
# coinbase, spend of that spend). Duplicate the 3rd transaction to
# leave merkle root and blockheader unchanged but invalidate the block.
# For more information on merkle-root malleability see src/consensus/merkle.cpp.
self.log.info("Test merkle root malleability.")
block2 = create_block(tip, create_coinbase(height), block_time)
block_time += 1
# b'0x51' is OP_TRUE
tx1 = create_tx_with_script(block1.vtx[0], 0, script_sig=b'\x51', amount=50 * COIN)
tx2 = create_tx_with_script(tx1, 0, script_sig=b'\x51', amount=50 * COIN)
block2.vtx.extend([tx1, tx2])
block2.hashMerkleRoot = block2.calc_merkle_root()
block2.rehash()
block2.solve()
orig_hash = block2.sha256
block2_orig = copy.deepcopy(block2)
# Mutate block 2
block2.vtx.append(tx2)
assert_equal(block2.hashMerkleRoot, block2.calc_merkle_root())
assert_equal(orig_hash, block2.rehash())
assert block2_orig.vtx != block2.vtx
node.p2p.send_blocks_and_test([block2], node, success=False, reject_reason='bad-txns-duplicate')
# Check transactions for duplicate inputs (CVE-2018-17144)
self.log.info("Test duplicate input block.")
block2_dup = copy.deepcopy(block2_orig)
block2_dup.vtx[2].vin.append(block2_dup.vtx[2].vin[0])
block2_dup.vtx[2].rehash()
block2_dup.hashMerkleRoot = block2_dup.calc_merkle_root()
block2_dup.rehash()
block2_dup.solve()
node.p2p.send_blocks_and_test([block2_dup], node, success=False, reject_reason='bad-txns-inputs-duplicate')
self.log.info("Test very broken block.")
block3 = create_block(tip, create_coinbase(height), block_time)
block_time += 1
block3.vtx[0].vout[0].nValue = 100 * COIN # Too high!
block3.vtx[0].sha256 = None
block3.vtx[0].calc_sha256()
block3.hashMerkleRoot = block3.calc_merkle_root()
block3.rehash()
block3.solve()
node.p2p.send_blocks_and_test([block3], node, success=False, reject_reason='bad-cb-amount')
# Complete testing of CVE-2012-2459 by sending the original block.
# It should be accepted even though it has the same hash as the mutated one.
self.log.info("Test accepting original block after rejecting its mutated version.")
node.p2p.send_blocks_and_test([block2_orig], node, success=True, timeout=5)
# Update tip info
height += 1
block_time += 1
tip = int(block2_orig.hash, 16)
# Complete testing of CVE-2018-17144, by checking for the inflation bug.
# Create a block that spends the output of a tx in a previous block.
block4 = create_block(tip, create_coinbase(height), block_time)
tx3 = create_tx_with_script(tx2, 0, script_sig=b'\x51', amount=50 * COIN)
# Duplicates input
tx3.vin.append(tx3.vin[0])
tx3.rehash()
block4.vtx.append(tx3)
block4.hashMerkleRoot = block4.calc_merkle_root()
block4.rehash()
block4.solve()
self.log.info("Test inflation by duplicating input")
node.p2p.send_blocks_and_test([block4], node, success=False, reject_reason='bad-txns-inputs-duplicate')
if __name__ == '__main__':
InvalidBlockRequestTest().main()
| 40.913043
| 115
| 0.68119
|
import copy
from test_framework.blocktools import create_block, create_coinbase, create_tx_with_script
from test_framework.messages import COIN
from test_framework.mininode import P2PDataStore
from test_framework.test_framework import LuascoinTestFramework
from test_framework.util import assert_equal
class InvalidBlockRequestTest(LuascoinTestFramework):
def set_test_params(self):
self.num_nodes = 1
self.setup_clean_chain = True
self.extra_args = [["-whitelist=127.0.0.1"]]
def run_test(self):
node = self.nodes[0]
node.add_p2p_connection(P2PDataStore())
best_block = node.getblock(node.getbestblockhash())
tip = int(node.getbestblockhash(), 16)
height = best_block["height"] + 1
block_time = best_block["time"] + 1
self.log.info("Create a new block with an anyone-can-spend coinbase")
height = 1
block = create_block(tip, create_coinbase(height), block_time)
block.solve()
block1 = block
tip = block.sha256
node.p2p.send_blocks_and_test([block1], node, success=True)
self.log.info("Mature the block.")
node.generatetoaddress(100, node.get_deterministic_priv_key().address)
best_block = node.getblock(node.getbestblockhash())
tip = int(node.getbestblockhash(), 16)
height = best_block["height"] + 1
block_time = best_block["time"] + 1
self.log.info("Test merkle root malleability.")
block2 = create_block(tip, create_coinbase(height), block_time)
block_time += 1
tx1 = create_tx_with_script(block1.vtx[0], 0, script_sig=b'\x51', amount=50 * COIN)
tx2 = create_tx_with_script(tx1, 0, script_sig=b'\x51', amount=50 * COIN)
block2.vtx.extend([tx1, tx2])
block2.hashMerkleRoot = block2.calc_merkle_root()
block2.rehash()
block2.solve()
orig_hash = block2.sha256
block2_orig = copy.deepcopy(block2)
block2.vtx.append(tx2)
assert_equal(block2.hashMerkleRoot, block2.calc_merkle_root())
assert_equal(orig_hash, block2.rehash())
assert block2_orig.vtx != block2.vtx
node.p2p.send_blocks_and_test([block2], node, success=False, reject_reason='bad-txns-duplicate')
self.log.info("Test duplicate input block.")
block2_dup = copy.deepcopy(block2_orig)
block2_dup.vtx[2].vin.append(block2_dup.vtx[2].vin[0])
block2_dup.vtx[2].rehash()
block2_dup.hashMerkleRoot = block2_dup.calc_merkle_root()
block2_dup.rehash()
block2_dup.solve()
node.p2p.send_blocks_and_test([block2_dup], node, success=False, reject_reason='bad-txns-inputs-duplicate')
self.log.info("Test very broken block.")
block3 = create_block(tip, create_coinbase(height), block_time)
block_time += 1
block3.vtx[0].vout[0].nValue = 100 * COIN
block3.vtx[0].sha256 = None
block3.vtx[0].calc_sha256()
block3.hashMerkleRoot = block3.calc_merkle_root()
block3.rehash()
block3.solve()
node.p2p.send_blocks_and_test([block3], node, success=False, reject_reason='bad-cb-amount')
self.log.info("Test accepting original block after rejecting its mutated version.")
node.p2p.send_blocks_and_test([block2_orig], node, success=True, timeout=5)
height += 1
block_time += 1
tip = int(block2_orig.hash, 16)
block4 = create_block(tip, create_coinbase(height), block_time)
tx3 = create_tx_with_script(tx2, 0, script_sig=b'\x51', amount=50 * COIN)
tx3.vin.append(tx3.vin[0])
tx3.rehash()
block4.vtx.append(tx3)
block4.hashMerkleRoot = block4.calc_merkle_root()
block4.rehash()
block4.solve()
self.log.info("Test inflation by duplicating input")
node.p2p.send_blocks_and_test([block4], node, success=False, reject_reason='bad-txns-inputs-duplicate')
if __name__ == '__main__':
InvalidBlockRequestTest().main()
| true
| true
|
f70e2cf40b2b6d3ff1c3f728e4b8081fbe399996
| 8,208
|
py
|
Python
|
ImageBot/infrastructure/Pipeline.py
|
FraunhoferIAO/Image-Bot
|
951258a78a297f3fb27478f5671f6bc804cd5715
|
[
"MIT"
] | 2
|
2021-12-28T08:33:14.000Z
|
2022-01-06T15:28:19.000Z
|
ImageBot/infrastructure/Pipeline.py
|
FraunhoferIAO/Image-Bot
|
951258a78a297f3fb27478f5671f6bc804cd5715
|
[
"MIT"
] | 1
|
2022-02-17T17:43:11.000Z
|
2022-02-17T17:43:11.000Z
|
ImageBot/infrastructure/Pipeline.py
|
IAORaisierer/Image-Bot
|
951258a78a297f3fb27478f5671f6bc804cd5715
|
[
"MIT"
] | 1
|
2022-02-09T18:24:09.000Z
|
2022-02-09T18:24:09.000Z
|
"""Pipeline class implementing Pipes and Filters pattern.
A generic pipeline to process messages efficiently in a pipes-and-filter manner (multiprocessing possible).
Inspired, but not copied from
https://deparkes.co.uk/2019/12/08/simple-python-pipes-and-filters/
Authors:
- Lukas Block
- Adrian Raiser
Todo:
- Add license boilerplate
"""
import multiprocessing
from functools import partial
import traceback
from collections.abc import Iterable
from typing import Callable
from numpy import sin
class Pipeline(object):
"""Class representing Pipeline.
Class which represents a pipeline within the pipes and filters pattern.
Every pipeline consists of filters added in series to each other.
"""
def __init__(self, with_multiprocessing=False, max_no_processes=8):
"""Constructor.
Args:
with_multiprocessing (bool, optional): Enable multiprocessing. Defaults to False.
max_no_processes (int, optional): If enabled, create the passed amount of subprocesses. Defaults to 8.
"""
self._multiprocessing = with_multiprocessing
if with_multiprocessing:
self._pool = multiprocessing.Pool(max_no_processes)
self._filters = []
def add(self, filter : Callable, batch_processing=False):
"""Add filter to pipeline.
Args:
filter (Callable): A Callable object, taking a message object or an Iterable of message objects as first input. Message objects are any python serializable objects which are passed between filters in the pipeline.
batch_processing (bool, optional): Enable batch processing. The filter must support batch processing by taking an Iterable of message objects as argument. Defaults to False.
"""
assert callable(filter)
self._filters.append((filter, batch_processing))
def insert(self, index, filter, batch_processing=False):
"""Insert filter at provided index to the pipeline.
Args:
index (Int): Index to insert filter on
filter (Callable): Filter to be added
batch_processing (bool, optional): Enable batch processing. The filter must support it. Defaults to False.
"""
assert callable(filter)
self._filters.insert(index, (filter, batch_processing))
def execute(self, message, clbck=None, batch_processing=False):
"""Execute pipeline on passed message or list of messages.
This function spans a new pipeline process and then returns as soon as the
process was created (i.e. the pipeline is not finished!). The callback is
called when the process finishes.
If the number of max processes for this pipeline is reached, the process is
put aside and will be started as soon as one of the currently running processes
finished.
Args:
message (object|List[object]): Message object or list to be piped
clbck (Callable|None, optional): The callback to be called, when the processing of the message
finished. Be careful, the callback will be called from another process
because pipelines are run in parallel. Thus, it might be necessary to use
a multiprocessing.Queue in the clbck to get the result back into the main
process. Furthermore the callback should not block for a time too long,
because this stops further pipelines from being started. Defaults to None.
batch_processing (bool, optional): Enable batch processing. Defaults to False.
Returns:
None
"""
# Check the clbck type
if clbck is not None:
assert callable(clbck)
# If the batch processing is true, the message must be iterable
if batch_processing:
assert isinstance(message, Iterable)
if self._multiprocessing:
# We are doing multiprocessing
# First prepare the call function
fnc = partial(Pipeline.call_fnc, filters=self._filters, message=message, batch_processing=batch_processing)
# Hand it over
if clbck is None:
return self._pool.apply_async(Pipeline.call_fnc, (message, self._filters, batch_processing), error_callback=Pipeline.error_callback)
else:
return self._pool.apply_async(Pipeline.call_fnc, (message, self._filters ,batch_processing), callback=clbck, error_callback=Pipeline.error_callback)
else:
# We are not doing multiprocessing, call the function directly
try:
result = self(message, batch_processing=batch_processing)
except Exception as ex:
Pipeline.error_callback(ex)
if clbck is not None:
clbck(result)
def __call__(self, message, batch_processing=False):
"""Overloads the call operator. See execute for more information.
Args:
message (object|List[object]): See execute
batch_processing (bool, optional): See execute. Defaults to False.
Returns:
ImageMessage|List[ImageMessage]: See execute.
"""
return Pipeline.call_fnc(message, self._filters, batch_processing=batch_processing)
def join(self):
"""Joins all started subprocesses for the pipeline.
Returns:
None: Returns as soon as all subprocesses of the pipelined finished.
"""
if self._multiprocessing:
self._pool.close()
self._pool.join()
def error_callback(e):
"""Prints error and exceptions which might occure within the pipeline.
Args:
e (object): The error or exception which occured in the pipeline
Returns:
None
"""
print("An exception occurred in the pipeline:")
traceback.print_exception(type(e), e, e.__traceback__)
def call_fnc(message, filters, batch_processing=False):
"""Handles the calling of provided filters.
A filter is a function, which takes a certain message, processes it and
return one or more other messages out of it. If the filter should return
As such, the Pipeline itself
is a filter, too.
Args:
message (object): See execute
filters (List[Callable]): List of filters, which will be executed in order
batch_processing (bool, optional): See execute. Defaults to False.
Returns:
object|List[object]: See execute
"""
# Setup the start message(s)
prev_results = None
if batch_processing:
prev_results = message
else:
prev_results = [message]
# Now start processing
for fb in filters:
# The callable is stored as the first value in the tuple
f = fb[0]
new_results = []
# Run each filter for each message from the previous filter
if not fb[1]:
# The filter is not capable of batch processing all previous results
# at once
for pr in prev_results:
single_new_result = f(pr)
# Collect the single or multiple results of this filter
if isinstance(single_new_result, list):
new_results.extend(single_new_result)
elif single_new_result is None:
# Do nothing, because we have an empty result
pass
else:
new_results.append(single_new_result)
else:
# The filter can do batch processing
new_results = f(prev_results)
# After processing all messages from the previous filter, the collected
# results are now the results from the previous filter
prev_results = new_results
# Done with all filters, return
return prev_results
| 40.235294
| 226
| 0.628411
|
import multiprocessing
from functools import partial
import traceback
from collections.abc import Iterable
from typing import Callable
from numpy import sin
class Pipeline(object):
def __init__(self, with_multiprocessing=False, max_no_processes=8):
self._multiprocessing = with_multiprocessing
if with_multiprocessing:
self._pool = multiprocessing.Pool(max_no_processes)
self._filters = []
def add(self, filter : Callable, batch_processing=False):
assert callable(filter)
self._filters.append((filter, batch_processing))
def insert(self, index, filter, batch_processing=False):
assert callable(filter)
self._filters.insert(index, (filter, batch_processing))
def execute(self, message, clbck=None, batch_processing=False):
if clbck is not None:
assert callable(clbck)
if batch_processing:
assert isinstance(message, Iterable)
if self._multiprocessing:
fnc = partial(Pipeline.call_fnc, filters=self._filters, message=message, batch_processing=batch_processing)
if clbck is None:
return self._pool.apply_async(Pipeline.call_fnc, (message, self._filters, batch_processing), error_callback=Pipeline.error_callback)
else:
return self._pool.apply_async(Pipeline.call_fnc, (message, self._filters ,batch_processing), callback=clbck, error_callback=Pipeline.error_callback)
else:
try:
result = self(message, batch_processing=batch_processing)
except Exception as ex:
Pipeline.error_callback(ex)
if clbck is not None:
clbck(result)
def __call__(self, message, batch_processing=False):
return Pipeline.call_fnc(message, self._filters, batch_processing=batch_processing)
def join(self):
if self._multiprocessing:
self._pool.close()
self._pool.join()
def error_callback(e):
print("An exception occurred in the pipeline:")
traceback.print_exception(type(e), e, e.__traceback__)
def call_fnc(message, filters, batch_processing=False):
prev_results = None
if batch_processing:
prev_results = message
else:
prev_results = [message]
for fb in filters:
f = fb[0]
new_results = []
if not fb[1]:
for pr in prev_results:
single_new_result = f(pr)
if isinstance(single_new_result, list):
new_results.extend(single_new_result)
elif single_new_result is None:
pass
else:
new_results.append(single_new_result)
else:
new_results = f(prev_results)
prev_results = new_results
return prev_results
| true
| true
|
f70e2e152a2e3569ad9ca7208880f6cf943eab0b
| 228
|
py
|
Python
|
setup.py
|
lukovnikov/semparse
|
0fd5fcd9c982b6faac8f08b451f20273d2cc0da7
|
[
"MIT"
] | null | null | null |
setup.py
|
lukovnikov/semparse
|
0fd5fcd9c982b6faac8f08b451f20273d2cc0da7
|
[
"MIT"
] | null | null | null |
setup.py
|
lukovnikov/semparse
|
0fd5fcd9c982b6faac8f08b451f20273d2cc0da7
|
[
"MIT"
] | 1
|
2021-04-06T13:15:01.000Z
|
2021-04-06T13:15:01.000Z
|
from setuptools import setup, find_packages
setup(name="semparse",
description="semparse",
author="Sum-Ting Wong",
author_email="sumting@wo.ng",
install_requires=[],
packages=["semparse"],
)
| 22.8
| 43
| 0.640351
|
from setuptools import setup, find_packages
setup(name="semparse",
description="semparse",
author="Sum-Ting Wong",
author_email="sumting@wo.ng",
install_requires=[],
packages=["semparse"],
)
| true
| true
|
f70e2f26475b76e1fd7448f5a0f2e18350377df4
| 1,541
|
py
|
Python
|
var/spack/repos/builtin/packages/perl-moose/package.py
|
jeanbez/spack
|
f4e51ce8f366c85bf5aa0eafe078677b42dae1ba
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | null | null | null |
var/spack/repos/builtin/packages/perl-moose/package.py
|
jeanbez/spack
|
f4e51ce8f366c85bf5aa0eafe078677b42dae1ba
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 8
|
2021-11-09T20:28:40.000Z
|
2022-03-15T03:26:33.000Z
|
var/spack/repos/builtin/packages/perl-moose/package.py
|
jeanbez/spack
|
f4e51ce8f366c85bf5aa0eafe078677b42dae1ba
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 2
|
2019-02-08T20:37:20.000Z
|
2019-03-31T15:19:26.000Z
|
# Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack.package import *
class PerlMoose(PerlPackage):
"""A postmodern object system for Perl 5"""
homepage = "https://metacpan.org/pod/Moose"
url = "https://search.cpan.org/CPAN/authors/id/E/ET/ETHER/Moose-2.2006.tar.gz"
version('2.2010', sha256='af0905b69f18c27de1177c9bc7778ee495d4ec91be1f223e8ca8333af4de08c5')
version('2.2009', sha256='63ba8a5e27dbcbdbac2cd8f4162fff50a31e9829d8955a196a5898240c02d194')
version('2.2007', sha256='bc75a320b55ba26ac9e60e11a77b3471066cb615bf7097537ed22e20df88afe8')
version('2.2006', sha256='a4e00ab25cc41bebc5e7a11d71375fb5e64b56d5f91159afee225d698e06392b')
depends_on('perl-cpan-meta-check', type=('build', 'run'))
depends_on('perl-test-cleannamespaces', type=('build', 'run'))
depends_on('perl-devel-overloadinfo', type=('build', 'run'))
depends_on('perl-class-load-xs', type=('build', 'run'))
depends_on('perl-devel-stacktrace', type=('build', 'run'))
depends_on('perl-eval-closure', type=('build', 'run'))
depends_on('perl-sub-name', type=('build', 'run'))
depends_on('perl-module-runtime-conflicts', type=('build', 'run'))
depends_on('perl-devel-globaldestruction', type=('build', 'run'))
depends_on('perl-package-deprecationmanager', type=('build', 'run'))
depends_on('perl-package-stash-xs', type=('build', 'run'))
| 49.709677
| 96
| 0.72096
|
from spack.package import *
class PerlMoose(PerlPackage):
homepage = "https://metacpan.org/pod/Moose"
url = "https://search.cpan.org/CPAN/authors/id/E/ET/ETHER/Moose-2.2006.tar.gz"
version('2.2010', sha256='af0905b69f18c27de1177c9bc7778ee495d4ec91be1f223e8ca8333af4de08c5')
version('2.2009', sha256='63ba8a5e27dbcbdbac2cd8f4162fff50a31e9829d8955a196a5898240c02d194')
version('2.2007', sha256='bc75a320b55ba26ac9e60e11a77b3471066cb615bf7097537ed22e20df88afe8')
version('2.2006', sha256='a4e00ab25cc41bebc5e7a11d71375fb5e64b56d5f91159afee225d698e06392b')
depends_on('perl-cpan-meta-check', type=('build', 'run'))
depends_on('perl-test-cleannamespaces', type=('build', 'run'))
depends_on('perl-devel-overloadinfo', type=('build', 'run'))
depends_on('perl-class-load-xs', type=('build', 'run'))
depends_on('perl-devel-stacktrace', type=('build', 'run'))
depends_on('perl-eval-closure', type=('build', 'run'))
depends_on('perl-sub-name', type=('build', 'run'))
depends_on('perl-module-runtime-conflicts', type=('build', 'run'))
depends_on('perl-devel-globaldestruction', type=('build', 'run'))
depends_on('perl-package-deprecationmanager', type=('build', 'run'))
depends_on('perl-package-stash-xs', type=('build', 'run'))
| true
| true
|
f70e2fae26d7e5c8e0f91ebb422314b422641faf
| 1,852
|
py
|
Python
|
MRPT/vdz/atoms/V_0/mrpt.py
|
mussard/share_data_benchmark
|
c02bfa4017b9008800cabe47d7c7959f82c26060
|
[
"MIT"
] | null | null | null |
MRPT/vdz/atoms/V_0/mrpt.py
|
mussard/share_data_benchmark
|
c02bfa4017b9008800cabe47d7c7959f82c26060
|
[
"MIT"
] | null | null | null |
MRPT/vdz/atoms/V_0/mrpt.py
|
mussard/share_data_benchmark
|
c02bfa4017b9008800cabe47d7c7959f82c26060
|
[
"MIT"
] | null | null | null |
import json
from pyscf import gto,scf,mcscf, fci, lo, ci, cc
from pyscf.scf import ROHF, UHF,ROKS
import numpy as np
import pandas as pd
# THIS IS WERE IT STARTS ====================================
df=json.load(open("../../../trail.json"))
spins={'Sc':1, 'Ti':2, 'V':3, 'Cr':6, 'Mn':5, 'Fe':4, 'Cu':1}
nd={'Sc':(1,0), 'Ti':(2,0), 'V':(3,0), 'Cr':(5,0), 'Mn':(5,0), 'Fe':(5,1), 'Cu':(5,5)}
cas={'Sc':3, 'Ti':4, 'V':5, 'Cr':6, 'Mn':7, 'Fe':8, 'Cu':11}
datacsv={}
for nm in ['atom','charge','method','basis','pseudopotential',
'totalenergy','totalenergy-stocherr','totalenergy-syserr']:
datacsv[nm]=[]
basis='vdz'
el='V'
charge=0
mol=gto.Mole()
mol.ecp={}
mol.basis={}
mol.ecp[el]=gto.basis.parse_ecp(df[el]['ecp'])
mol.basis[el]=gto.basis.parse(df[el][basis])
mol.charge=charge
if el == 'Cr' or el == 'Cu':
mol.spin=spins[el]-charge
else:
mol.spin=spins[el]+charge
mol.build(atom="%s 0. 0. 0."%el,verbose=4)
m=ROHF(mol)
m.level_shift=1000.0
dm=m.from_chk("../../../../HF/atoms/"+el+basis+str(charge)+".chk")
hf=m.kernel(dm)
m.analyze()
from pyscf.shciscf import shci
mc = shci.SHCISCF(m, 6, cas[el]-charge)
#mc.fcisolver.conv_tol = 1e-14
mc.fcisolver.mpiprefix="srun -n20"
mc.fcisolver.num_thrds=12
mc.verbose = 4
cas=mc.kernel()[0]
from pyscf.icmpspt import icmpspt
pt=icmpspt.icmpspt(mc,rdmM=500, PTM=1000,\
pttype="MRLCC",\
third_order=True,\
fully_ic=True,\
do_dm4=True)
datacsv['atom'].append(el)
datacsv['charge'].append(charge)
datacsv['method'].append('MRPT')
datacsv['basis'].append(basis)
datacsv['pseudopotential'].append('trail')
datacsv['totalenergy'].append(cas+pt)
datacsv['totalenergy-stocherr'].append(0.0)
datacsv['totalenergy-syserr'].append(0.0)
pd.DataFrame(datacsv).to_csv(el+".csv",index=False)
| 26.84058
| 86
| 0.607991
|
import json
from pyscf import gto,scf,mcscf, fci, lo, ci, cc
from pyscf.scf import ROHF, UHF,ROKS
import numpy as np
import pandas as pd
df=json.load(open("../../../trail.json"))
spins={'Sc':1, 'Ti':2, 'V':3, 'Cr':6, 'Mn':5, 'Fe':4, 'Cu':1}
nd={'Sc':(1,0), 'Ti':(2,0), 'V':(3,0), 'Cr':(5,0), 'Mn':(5,0), 'Fe':(5,1), 'Cu':(5,5)}
cas={'Sc':3, 'Ti':4, 'V':5, 'Cr':6, 'Mn':7, 'Fe':8, 'Cu':11}
datacsv={}
for nm in ['atom','charge','method','basis','pseudopotential',
'totalenergy','totalenergy-stocherr','totalenergy-syserr']:
datacsv[nm]=[]
basis='vdz'
el='V'
charge=0
mol=gto.Mole()
mol.ecp={}
mol.basis={}
mol.ecp[el]=gto.basis.parse_ecp(df[el]['ecp'])
mol.basis[el]=gto.basis.parse(df[el][basis])
mol.charge=charge
if el == 'Cr' or el == 'Cu':
mol.spin=spins[el]-charge
else:
mol.spin=spins[el]+charge
mol.build(atom="%s 0. 0. 0."%el,verbose=4)
m=ROHF(mol)
m.level_shift=1000.0
dm=m.from_chk("../../../../HF/atoms/"+el+basis+str(charge)+".chk")
hf=m.kernel(dm)
m.analyze()
from pyscf.shciscf import shci
mc = shci.SHCISCF(m, 6, cas[el]-charge)
mc.fcisolver.mpiprefix="srun -n20"
mc.fcisolver.num_thrds=12
mc.verbose = 4
cas=mc.kernel()[0]
from pyscf.icmpspt import icmpspt
pt=icmpspt.icmpspt(mc,rdmM=500, PTM=1000,\
pttype="MRLCC",\
third_order=True,\
fully_ic=True,\
do_dm4=True)
datacsv['atom'].append(el)
datacsv['charge'].append(charge)
datacsv['method'].append('MRPT')
datacsv['basis'].append(basis)
datacsv['pseudopotential'].append('trail')
datacsv['totalenergy'].append(cas+pt)
datacsv['totalenergy-stocherr'].append(0.0)
datacsv['totalenergy-syserr'].append(0.0)
pd.DataFrame(datacsv).to_csv(el+".csv",index=False)
| true
| true
|
f70e2fc47aa0a5ea93392ef38025182ca0cd5b10
| 1,030
|
py
|
Python
|
alu.py
|
jhonatheberson/MIPS-architecture
|
b027502992c115c1529fe9d0ceaaf00e7bdb930e
|
[
"MIT"
] | null | null | null |
alu.py
|
jhonatheberson/MIPS-architecture
|
b027502992c115c1529fe9d0ceaaf00e7bdb930e
|
[
"MIT"
] | null | null | null |
alu.py
|
jhonatheberson/MIPS-architecture
|
b027502992c115c1529fe9d0ceaaf00e7bdb930e
|
[
"MIT"
] | null | null | null |
class ALU():
def __init__(self):
self.Rs = None
self.Rt = None
self.Rd = None
def alu(self, opcode):
if (opcode == 0):
self.Rd = self.Rs + self.Rt
return self.Rd
elif (opcode == 1):
self.Rd = self.Rs - self.Rt
return self.Rd
elif (opcode == 2):
self.Rd = int(0) + self.Rt
return self.Rd
elif (opcode == 3): # tipo I == 1
print('não sei o que "BEQ"')
elif (opcode == 4): # tipo J == 2
print('nao sei o que "J"')
elif (opcode == 5 ):
print('nao sei o que "J"')
elif(opcode == 6):
print('nao sei o que "J"')
def setRs(self, Rs_final):
self.Rs = Rs_final
def setRt(self, Rt_final):
self.Rt = Rt_final
def setRd(self, Rd_final):
self.Rd = Rd_final
def getRs(self):
return self.Rs
def getRt(self):
return self.Rt
def getRd(self):
return self.Rd
| 22.391304
| 41
| 0.469903
|
class ALU():
def __init__(self):
self.Rs = None
self.Rt = None
self.Rd = None
def alu(self, opcode):
if (opcode == 0):
self.Rd = self.Rs + self.Rt
return self.Rd
elif (opcode == 1):
self.Rd = self.Rs - self.Rt
return self.Rd
elif (opcode == 2):
self.Rd = int(0) + self.Rt
return self.Rd
elif (opcode == 3):
print('não sei o que "BEQ"')
elif (opcode == 4):
print('nao sei o que "J"')
elif (opcode == 5 ):
print('nao sei o que "J"')
elif(opcode == 6):
print('nao sei o que "J"')
def setRs(self, Rs_final):
self.Rs = Rs_final
def setRt(self, Rt_final):
self.Rt = Rt_final
def setRd(self, Rd_final):
self.Rd = Rd_final
def getRs(self):
return self.Rs
def getRt(self):
return self.Rt
def getRd(self):
return self.Rd
| true
| true
|
f70e3047e3f418e139848f3764890f8c70ffc0e6
| 23
|
py
|
Python
|
__init__.py
|
marcusschiesser/all-intraday
|
8879251896074b0a527d75baa8389a071a81d058
|
[
"MIT"
] | 20
|
2020-02-08T06:41:41.000Z
|
2022-01-06T08:41:01.000Z
|
__init__.py
|
marcusschiesser/all-intraday
|
8879251896074b0a527d75baa8389a071a81d058
|
[
"MIT"
] | null | null | null |
__init__.py
|
marcusschiesser/all-intraday
|
8879251896074b0a527d75baa8389a071a81d058
|
[
"MIT"
] | 5
|
2020-01-15T06:21:49.000Z
|
2021-02-14T16:57:26.000Z
|
from .intraday import *
| 23
| 23
| 0.782609
|
from .intraday import *
| true
| true
|
f70e3222a35b11a54e91bb0d8e4f03eef77080cc
| 1,543
|
py
|
Python
|
osm2rail/plotter.py
|
PariseC/osm2rail
|
dfc373aedba4a82fd144192cb6a855e8a11b0601
|
[
"Apache-2.0"
] | 6
|
2021-04-30T21:18:44.000Z
|
2022-03-07T01:47:53.000Z
|
osm2rail/plotter.py
|
PariseC/osm2rail
|
dfc373aedba4a82fd144192cb6a855e8a11b0601
|
[
"Apache-2.0"
] | null | null | null |
osm2rail/plotter.py
|
PariseC/osm2rail
|
dfc373aedba4a82fd144192cb6a855e8a11b0601
|
[
"Apache-2.0"
] | 4
|
2021-05-02T01:29:15.000Z
|
2022-02-11T02:02:17.000Z
|
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.collections import LineCollection,PolyCollection
def showNetwork(network,savefig=None):
node_x_coords=[]
node_y_coords=[]
link_coords=[]
poi_coords=[]
for _,node in network.node_dict.items():
node_x_coords.append(node.x_coord)
node_y_coords.append(node.y_coord)
for _,link in network.link_dict.items():
coords = list(link.geometry.coords)
link_coords.append(np.array(coords))
if len(network.POI_list):
for poi in network.POI_list:
coords = list(poi.geometry.exterior.coords)
poi_coords.append(np.array(coords))
fig, ax = plt.subplots(figsize=(12, 8))
# plot network nodes
ax.scatter(node_x_coords, node_y_coords, marker='o', c='red', s=10, zorder=1)
# plot network links
ax.add_collection(LineCollection(link_coords, colors='orange', linewidths=1, zorder=2))
# plot network pois
if len(poi_coords):
coll = PolyCollection(poi_coords, alpha=0.7, zorder=0)
ax.add_collection(coll)
# set axis
ax.autoscale_view()
plt.xlabel('x_coord')
plt.ylabel('y_coord')
plt.tight_layout()
# show fig
plt.show()
# save fig
if savefig:
try:
figname = savefig['filename'] if 'filename' in savefig.keys() else 'network.png'
dpi = savefig['dpi'] if 'dpi' in savefig else 300
fig.savefig(figname, dpi=dpi, bbox_inches='tight')
except Exception as e:
print(e)
| 32.829787
| 92
| 0.650032
|
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.collections import LineCollection,PolyCollection
def showNetwork(network,savefig=None):
node_x_coords=[]
node_y_coords=[]
link_coords=[]
poi_coords=[]
for _,node in network.node_dict.items():
node_x_coords.append(node.x_coord)
node_y_coords.append(node.y_coord)
for _,link in network.link_dict.items():
coords = list(link.geometry.coords)
link_coords.append(np.array(coords))
if len(network.POI_list):
for poi in network.POI_list:
coords = list(poi.geometry.exterior.coords)
poi_coords.append(np.array(coords))
fig, ax = plt.subplots(figsize=(12, 8))
ax.scatter(node_x_coords, node_y_coords, marker='o', c='red', s=10, zorder=1)
ax.add_collection(LineCollection(link_coords, colors='orange', linewidths=1, zorder=2))
if len(poi_coords):
coll = PolyCollection(poi_coords, alpha=0.7, zorder=0)
ax.add_collection(coll)
ax.autoscale_view()
plt.xlabel('x_coord')
plt.ylabel('y_coord')
plt.tight_layout()
plt.show()
if savefig:
try:
figname = savefig['filename'] if 'filename' in savefig.keys() else 'network.png'
dpi = savefig['dpi'] if 'dpi' in savefig else 300
fig.savefig(figname, dpi=dpi, bbox_inches='tight')
except Exception as e:
print(e)
| true
| true
|
f70e32309cc4afb69714ebb30f9eb11ce4f0455a
| 18,318
|
py
|
Python
|
napalm_yang/models/openconfig/network_instances/network_instance/protocols/protocol/ospfv2/global_/graceful_restart/__init__.py
|
ckishimo/napalm-yang
|
8f2bd907bd3afcde3c2f8e985192de74748baf6c
|
[
"Apache-2.0"
] | 64
|
2016-10-20T15:47:18.000Z
|
2021-11-11T11:57:32.000Z
|
napalm_yang/models/openconfig/network_instances/network_instance/protocols/protocol/ospfv2/global_/graceful_restart/__init__.py
|
ckishimo/napalm-yang
|
8f2bd907bd3afcde3c2f8e985192de74748baf6c
|
[
"Apache-2.0"
] | 126
|
2016-10-05T10:36:14.000Z
|
2019-05-15T08:43:23.000Z
|
napalm_yang/models/openconfig/network_instances/network_instance/protocols/protocol/ospfv2/global_/graceful_restart/__init__.py
|
ckishimo/napalm-yang
|
8f2bd907bd3afcde3c2f8e985192de74748baf6c
|
[
"Apache-2.0"
] | 63
|
2016-11-07T15:23:08.000Z
|
2021-09-22T14:41:16.000Z
|
# -*- coding: utf-8 -*-
from operator import attrgetter
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType
from pyangbind.lib.yangtypes import RestrictedClassType
from pyangbind.lib.yangtypes import TypedListType
from pyangbind.lib.yangtypes import YANGBool
from pyangbind.lib.yangtypes import YANGListType
from pyangbind.lib.yangtypes import YANGDynClass
from pyangbind.lib.yangtypes import ReferenceType
from pyangbind.lib.base import PybindBase
from collections import OrderedDict
from decimal import Decimal
from bitarray import bitarray
import six
# PY3 support of some PY2 keywords (needs improved)
if six.PY3:
import builtins as __builtin__
long = int
elif six.PY2:
import __builtin__
from . import config
from . import state
class graceful_restart(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance - based on the path /network-instances/network-instance/protocols/protocol/ospfv2/global/graceful-restart. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: Configuration and operational state parameters for OSPFv2
graceful restart
"""
__slots__ = ("_path_helper", "_extmethods", "__config", "__state")
_yang_name = "graceful-restart"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__config = YANGDynClass(
base=config.config,
is_container="container",
yang_name="config",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
self.__state = YANGDynClass(
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return [
"network-instances",
"network-instance",
"protocols",
"protocol",
"ospfv2",
"global",
"graceful-restart",
]
def _get_config(self):
"""
Getter method for config, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/global/graceful_restart/config (container)
YANG Description: Configuration parameters relating to OSPFv2 graceful
restart
"""
return self.__config
def _set_config(self, v, load=False):
"""
Setter method for config, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/global/graceful_restart/config (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_config is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_config() directly.
YANG Description: Configuration parameters relating to OSPFv2 graceful
restart
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=config.config,
is_container="container",
yang_name="config",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """config must be of a type compatible with container""",
"defined-type": "container",
"generated-type": """YANGDynClass(base=config.config, is_container='container', yang_name="config", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=True)""",
}
)
self.__config = t
if hasattr(self, "_set"):
self._set()
def _unset_config(self):
self.__config = YANGDynClass(
base=config.config,
is_container="container",
yang_name="config",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
def _get_state(self):
"""
Getter method for state, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/global/graceful_restart/state (container)
YANG Description: Operational state parameters relating to OSPFv2 graceful
restart
"""
return self.__state
def _set_state(self, v, load=False):
"""
Setter method for state, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/global/graceful_restart/state (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_state is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_state() directly.
YANG Description: Operational state parameters relating to OSPFv2 graceful
restart
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """state must be of a type compatible with container""",
"defined-type": "container",
"generated-type": """YANGDynClass(base=state.state, is_container='container', yang_name="state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=True)""",
}
)
self.__state = t
if hasattr(self, "_set"):
self._set()
def _unset_state(self):
self.__state = YANGDynClass(
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
config = __builtin__.property(_get_config, _set_config)
state = __builtin__.property(_get_state, _set_state)
_pyangbind_elements = OrderedDict([("config", config), ("state", state)])
from . import config
from . import state
class graceful_restart(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance-l2 - based on the path /network-instances/network-instance/protocols/protocol/ospfv2/global/graceful-restart. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: Configuration and operational state parameters for OSPFv2
graceful restart
"""
__slots__ = ("_path_helper", "_extmethods", "__config", "__state")
_yang_name = "graceful-restart"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__config = YANGDynClass(
base=config.config,
is_container="container",
yang_name="config",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
self.__state = YANGDynClass(
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return [
"network-instances",
"network-instance",
"protocols",
"protocol",
"ospfv2",
"global",
"graceful-restart",
]
def _get_config(self):
"""
Getter method for config, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/global/graceful_restart/config (container)
YANG Description: Configuration parameters relating to OSPFv2 graceful
restart
"""
return self.__config
def _set_config(self, v, load=False):
"""
Setter method for config, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/global/graceful_restart/config (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_config is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_config() directly.
YANG Description: Configuration parameters relating to OSPFv2 graceful
restart
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=config.config,
is_container="container",
yang_name="config",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """config must be of a type compatible with container""",
"defined-type": "container",
"generated-type": """YANGDynClass(base=config.config, is_container='container', yang_name="config", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=True)""",
}
)
self.__config = t
if hasattr(self, "_set"):
self._set()
def _unset_config(self):
self.__config = YANGDynClass(
base=config.config,
is_container="container",
yang_name="config",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
def _get_state(self):
"""
Getter method for state, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/global/graceful_restart/state (container)
YANG Description: Operational state parameters relating to OSPFv2 graceful
restart
"""
return self.__state
def _set_state(self, v, load=False):
"""
Setter method for state, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/global/graceful_restart/state (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_state is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_state() directly.
YANG Description: Operational state parameters relating to OSPFv2 graceful
restart
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """state must be of a type compatible with container""",
"defined-type": "container",
"generated-type": """YANGDynClass(base=state.state, is_container='container', yang_name="state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=True)""",
}
)
self.__state = t
if hasattr(self, "_set"):
self._set()
def _unset_state(self):
self.__state = YANGDynClass(
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
config = __builtin__.property(_get_config, _set_config)
state = __builtin__.property(_get_state, _set_state)
_pyangbind_elements = OrderedDict([("config", config), ("state", state)])
| 38.1625
| 377
| 0.604324
|
from operator import attrgetter
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType
from pyangbind.lib.yangtypes import RestrictedClassType
from pyangbind.lib.yangtypes import TypedListType
from pyangbind.lib.yangtypes import YANGBool
from pyangbind.lib.yangtypes import YANGListType
from pyangbind.lib.yangtypes import YANGDynClass
from pyangbind.lib.yangtypes import ReferenceType
from pyangbind.lib.base import PybindBase
from collections import OrderedDict
from decimal import Decimal
from bitarray import bitarray
import six
if six.PY3:
import builtins as __builtin__
long = int
elif six.PY2:
import __builtin__
from . import config
from . import state
class graceful_restart(PybindBase):
__slots__ = ("_path_helper", "_extmethods", "__config", "__state")
_yang_name = "graceful-restart"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__config = YANGDynClass(
base=config.config,
is_container="container",
yang_name="config",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
self.__state = YANGDynClass(
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return [
"network-instances",
"network-instance",
"protocols",
"protocol",
"ospfv2",
"global",
"graceful-restart",
]
def _get_config(self):
return self.__config
def _set_config(self, v, load=False):
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=config.config,
is_container="container",
yang_name="config",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """config must be of a type compatible with container""",
"defined-type": "container",
"generated-type": """YANGDynClass(base=config.config, is_container='container', yang_name="config", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=True)""",
}
)
self.__config = t
if hasattr(self, "_set"):
self._set()
def _unset_config(self):
self.__config = YANGDynClass(
base=config.config,
is_container="container",
yang_name="config",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
def _get_state(self):
return self.__state
def _set_state(self, v, load=False):
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """state must be of a type compatible with container""",
"defined-type": "container",
"generated-type": """YANGDynClass(base=state.state, is_container='container', yang_name="state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=True)""",
}
)
self.__state = t
if hasattr(self, "_set"):
self._set()
def _unset_state(self):
self.__state = YANGDynClass(
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
config = __builtin__.property(_get_config, _set_config)
state = __builtin__.property(_get_state, _set_state)
_pyangbind_elements = OrderedDict([("config", config), ("state", state)])
from . import config
from . import state
class graceful_restart(PybindBase):
__slots__ = ("_path_helper", "_extmethods", "__config", "__state")
_yang_name = "graceful-restart"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__config = YANGDynClass(
base=config.config,
is_container="container",
yang_name="config",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
self.__state = YANGDynClass(
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return [
"network-instances",
"network-instance",
"protocols",
"protocol",
"ospfv2",
"global",
"graceful-restart",
]
def _get_config(self):
return self.__config
def _set_config(self, v, load=False):
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=config.config,
is_container="container",
yang_name="config",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """config must be of a type compatible with container""",
"defined-type": "container",
"generated-type": """YANGDynClass(base=config.config, is_container='container', yang_name="config", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=True)""",
}
)
self.__config = t
if hasattr(self, "_set"):
self._set()
def _unset_config(self):
self.__config = YANGDynClass(
base=config.config,
is_container="container",
yang_name="config",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
def _get_state(self):
return self.__state
def _set_state(self, v, load=False):
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """state must be of a type compatible with container""",
"defined-type": "container",
"generated-type": """YANGDynClass(base=state.state, is_container='container', yang_name="state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=True)""",
}
)
self.__state = t
if hasattr(self, "_set"):
self._set()
def _unset_state(self):
self.__state = YANGDynClass(
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
config = __builtin__.property(_get_config, _set_config)
state = __builtin__.property(_get_state, _set_state)
_pyangbind_elements = OrderedDict([("config", config), ("state", state)])
| true
| true
|
f70e3293e3a739d58604ec20d2cb25c87b48e20e
| 3,356
|
py
|
Python
|
bdd/group_steps.py
|
koppeltatyana/python_training
|
bc07671dcb85b783a13a6b00247e4500fc3c2265
|
[
"Apache-2.0"
] | null | null | null |
bdd/group_steps.py
|
koppeltatyana/python_training
|
bc07671dcb85b783a13a6b00247e4500fc3c2265
|
[
"Apache-2.0"
] | null | null | null |
bdd/group_steps.py
|
koppeltatyana/python_training
|
bc07671dcb85b783a13a6b00247e4500fc3c2265
|
[
"Apache-2.0"
] | null | null | null |
import random
from pytest_bdd import given, when, then # пометки
from model.group import Group
# STEPS FOR ADD GROUP
# предусловие
@given('a group list', target_fixture="group_list") # эти штуки представляют собой фикстуры, а их можно передавать в кач-ве параметра, что мы сделали в ф-ции verify_group_added
def group_list(db):
return db.get_group_list()
# предусловие
@given('a group with <name>, <header> and <footer>', target_fixture="new_group")
def new_group(name, header, footer):
return Group(group_name=name, group_header=header, group_footer=footer)
# действие
@when('I add the group to the list') # это тоже фикстура
def add_new_group(app, new_group):
app.group.create(new_group)
# постусловие
@then('the new group list is equal to the old list with the added group') # и это тоже фикстура
def verify_group_added(db, group_list, new_group):
old_groups_list = group_list
new_groups_list = db.get_group_list()
old_groups_list += [new_group]
assert sorted(old_groups_list, key=Group.id_or_max) == sorted(new_groups_list, key=Group.id_or_max)
# STEPS FOR DELETE GROUP
@given('non empty group list', target_fixture="non_empty_group_list") # эти штуки представляют собой фикстуры, а их можно передавать в кач-ве параметра, что мы сделали в ф-ции verify_group_added
def non_empty_group_list(app, db):
if len(db.get_group_list()) == 0:
app.group.create(Group(group_name="name", group_header="header", group_footer="footer"))
return db.get_group_list()
@given('a random group from non empty group list', target_fixture="random_group")
def random_group(non_empty_group_list):
return random.choice(non_empty_group_list)
@when('I delete the group from the list') # это тоже фикстура
def del_some_group(app, random_group):
app.group.delete_group_by_id(random_group.id)
@then('the new group list is equal to the old list without deleted group') # и это тоже фикстура
def verify_group_deleted(db, non_empty_group_list, random_group):
old_groups_list = non_empty_group_list
new_groups_list = db.get_group_list()
old_groups_list.remove(random_group)
assert sorted(old_groups_list, key=Group.id_or_max) == sorted(new_groups_list, key=Group.id_or_max)
# STEPS FOR MODIFY GROUP
@given('a new group with <new_name>, <new_header> and <new_footer>', target_fixture="new_group_for_modify")
def new_group_for_modify(new_name, new_header, new_footer):
return Group(group_name=new_name, group_header=new_header, group_footer=new_footer)
@when('I modify the group from the list') # это тоже фикстура
def modify_some_group(app, random_group, new_group_for_modify):
app.group.modify_group_by_id(random_group.id, new_group_for_modify)
@then('the new group list is equal to the old list with modify group') # и это тоже фикстура
def verify_group_modify(db, non_empty_group_list, random_group, new_group_for_modify):
old_groups_list = non_empty_group_list
new_groups_list = db.get_group_list()
res_old_groups = []
for i in range(len(old_groups_list)):
if str(old_groups_list[i].id) != str(random_group.id):
res_old_groups += [old_groups_list[i]]
if str(old_groups_list[i].id) == str(random_group.id):
res_old_groups += [new_group_for_modify]
assert res_old_groups == sorted(new_groups_list, key=Group.id_or_max)
| 40.926829
| 195
| 0.758939
|
import random
from pytest_bdd import given, when, then
from model.group import Group
@given('a group list', target_fixture="group_list")
def group_list(db):
return db.get_group_list()
@given('a group with <name>, <header> and <footer>', target_fixture="new_group")
def new_group(name, header, footer):
return Group(group_name=name, group_header=header, group_footer=footer)
@when('I add the group to the list')
def add_new_group(app, new_group):
app.group.create(new_group)
@then('the new group list is equal to the old list with the added group')
def verify_group_added(db, group_list, new_group):
old_groups_list = group_list
new_groups_list = db.get_group_list()
old_groups_list += [new_group]
assert sorted(old_groups_list, key=Group.id_or_max) == sorted(new_groups_list, key=Group.id_or_max)
@given('non empty group list', target_fixture="non_empty_group_list")
def non_empty_group_list(app, db):
if len(db.get_group_list()) == 0:
app.group.create(Group(group_name="name", group_header="header", group_footer="footer"))
return db.get_group_list()
@given('a random group from non empty group list', target_fixture="random_group")
def random_group(non_empty_group_list):
return random.choice(non_empty_group_list)
@when('I delete the group from the list')
def del_some_group(app, random_group):
app.group.delete_group_by_id(random_group.id)
@then('the new group list is equal to the old list without deleted group')
def verify_group_deleted(db, non_empty_group_list, random_group):
old_groups_list = non_empty_group_list
new_groups_list = db.get_group_list()
old_groups_list.remove(random_group)
assert sorted(old_groups_list, key=Group.id_or_max) == sorted(new_groups_list, key=Group.id_or_max)
@given('a new group with <new_name>, <new_header> and <new_footer>', target_fixture="new_group_for_modify")
def new_group_for_modify(new_name, new_header, new_footer):
return Group(group_name=new_name, group_header=new_header, group_footer=new_footer)
@when('I modify the group from the list')
def modify_some_group(app, random_group, new_group_for_modify):
app.group.modify_group_by_id(random_group.id, new_group_for_modify)
@then('the new group list is equal to the old list with modify group')
def verify_group_modify(db, non_empty_group_list, random_group, new_group_for_modify):
old_groups_list = non_empty_group_list
new_groups_list = db.get_group_list()
res_old_groups = []
for i in range(len(old_groups_list)):
if str(old_groups_list[i].id) != str(random_group.id):
res_old_groups += [old_groups_list[i]]
if str(old_groups_list[i].id) == str(random_group.id):
res_old_groups += [new_group_for_modify]
assert res_old_groups == sorted(new_groups_list, key=Group.id_or_max)
| true
| true
|
f70e32fa6a639b2df1a93c5c41ceb46b252ddcef
| 557
|
py
|
Python
|
blackbook/migrations/0014_relinking_transactions_to_budgetperiods.py
|
bsiebens/blackbook
|
636d1adc8966db158914abba43e360c6a0d23173
|
[
"MIT"
] | 1
|
2021-05-10T19:15:48.000Z
|
2021-05-10T19:15:48.000Z
|
blackbook/migrations/0014_relinking_transactions_to_budgetperiods.py
|
bsiebens/BlackBook
|
636d1adc8966db158914abba43e360c6a0d23173
|
[
"MIT"
] | 20
|
2020-12-27T15:56:24.000Z
|
2021-09-22T18:25:02.000Z
|
blackbook/migrations/0014_relinking_transactions_to_budgetperiods.py
|
bsiebens/BlackBook
|
636d1adc8966db158914abba43e360c6a0d23173
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.1.4 on 2020-12-23 21:48
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('blackbook', '0013_add_uuid_to_other_models'),
]
operations = [
migrations.AlterField(
model_name='transactionjournalentry',
name='budget',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='transactions', to='blackbook.budgetperiod'),
),
]
| 27.85
| 163
| 0.675045
|
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('blackbook', '0013_add_uuid_to_other_models'),
]
operations = [
migrations.AlterField(
model_name='transactionjournalentry',
name='budget',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='transactions', to='blackbook.budgetperiod'),
),
]
| true
| true
|
f70e3309c36350f736985ae60d6539bd6ad1bb1e
| 3,254
|
py
|
Python
|
torch_geometric/nn/conv/graph_conv.py
|
Kenneth-Schroeder/pytorch_geometric
|
f7ec9e964bfae1ce5fb21d9b2b30e9e717bf8e24
|
[
"MIT"
] | 12,651
|
2017-10-28T15:14:24.000Z
|
2021-09-12T07:22:57.000Z
|
torch_geometric/nn/conv/graph_conv.py
|
Kenneth-Schroeder/pytorch_geometric
|
f7ec9e964bfae1ce5fb21d9b2b30e9e717bf8e24
|
[
"MIT"
] | 2,472
|
2017-10-30T23:38:47.000Z
|
2021-09-12T06:41:44.000Z
|
torch_geometric/nn/conv/graph_conv.py
|
Kenneth-Schroeder/pytorch_geometric
|
f7ec9e964bfae1ce5fb21d9b2b30e9e717bf8e24
|
[
"MIT"
] | 2,363
|
2017-12-01T13:25:05.000Z
|
2021-09-12T07:23:09.000Z
|
from typing import Union, Tuple
from torch_geometric.typing import OptTensor, OptPairTensor, Adj, Size
from torch import Tensor
from torch_sparse import SparseTensor, matmul
from torch_geometric.nn.dense.linear import Linear
from torch_geometric.nn.conv import MessagePassing
class GraphConv(MessagePassing):
r"""The graph neural network operator from the `"Weisfeiler and Leman Go
Neural: Higher-order Graph Neural Networks"
<https://arxiv.org/abs/1810.02244>`_ paper
.. math::
\mathbf{x}^{\prime}_i = \mathbf{\Theta}_1 \mathbf{x}_i +
\mathbf{\Theta}_2 \sum_{j \in \mathcal{N}(i)} e_{j,i} \cdot
\mathbf{x}_j
where :math:`e_{j,i}` denotes the edge weight from source node :obj:`j` to
target node :obj:`i` (default: :obj:`1`)
Args:
in_channels (int or tuple): Size of each input sample, or :obj:`-1` to
derive the size from the first input(s) to the forward method.
A tuple corresponds to the sizes of source and target
dimensionalities.
out_channels (int): Size of each output sample.
aggr (string, optional): The aggregation scheme to use
(:obj:`"add"`, :obj:`"mean"`, :obj:`"max"`).
(default: :obj:`"add"`)
bias (bool, optional): If set to :obj:`False`, the layer will not learn
an additive bias. (default: :obj:`True`)
**kwargs (optional): Additional arguments of
:class:`torch_geometric.nn.conv.MessagePassing`.
"""
def __init__(
self,
in_channels: Union[int, Tuple[int, int]],
out_channels: int,
aggr: str = 'add',
bias: bool = True,
**kwargs,
):
super(GraphConv, self).__init__(aggr=aggr, **kwargs)
self.in_channels = in_channels
self.out_channels = out_channels
if isinstance(in_channels, int):
in_channels = (in_channels, in_channels)
self.lin_rel = Linear(in_channels[0], out_channels, bias=bias)
self.lin_root = Linear(in_channels[1], out_channels, bias=False)
self.reset_parameters()
def reset_parameters(self):
self.lin_rel.reset_parameters()
self.lin_root.reset_parameters()
def forward(self, x: Union[Tensor, OptPairTensor], edge_index: Adj,
edge_weight: OptTensor = None, size: Size = None) -> Tensor:
""""""
if isinstance(x, Tensor):
x: OptPairTensor = (x, x)
# propagate_type: (x: OptPairTensor, edge_weight: OptTensor)
out = self.propagate(edge_index, x=x, edge_weight=edge_weight,
size=size)
out = self.lin_rel(out)
x_r = x[1]
if x_r is not None:
out += self.lin_root(x_r)
return out
def message(self, x_j: Tensor, edge_weight: OptTensor) -> Tensor:
return x_j if edge_weight is None else edge_weight.view(-1, 1) * x_j
def message_and_aggregate(self, adj_t: SparseTensor,
x: OptPairTensor) -> Tensor:
return matmul(adj_t, x[0], reduce=self.aggr)
def __repr__(self):
return '{}({}, {})'.format(self.__class__.__name__, self.in_channels,
self.out_channels)
| 36.561798
| 79
| 0.613399
|
from typing import Union, Tuple
from torch_geometric.typing import OptTensor, OptPairTensor, Adj, Size
from torch import Tensor
from torch_sparse import SparseTensor, matmul
from torch_geometric.nn.dense.linear import Linear
from torch_geometric.nn.conv import MessagePassing
class GraphConv(MessagePassing):
def __init__(
self,
in_channels: Union[int, Tuple[int, int]],
out_channels: int,
aggr: str = 'add',
bias: bool = True,
**kwargs,
):
super(GraphConv, self).__init__(aggr=aggr, **kwargs)
self.in_channels = in_channels
self.out_channels = out_channels
if isinstance(in_channels, int):
in_channels = (in_channels, in_channels)
self.lin_rel = Linear(in_channels[0], out_channels, bias=bias)
self.lin_root = Linear(in_channels[1], out_channels, bias=False)
self.reset_parameters()
def reset_parameters(self):
self.lin_rel.reset_parameters()
self.lin_root.reset_parameters()
def forward(self, x: Union[Tensor, OptPairTensor], edge_index: Adj,
edge_weight: OptTensor = None, size: Size = None) -> Tensor:
if isinstance(x, Tensor):
x: OptPairTensor = (x, x)
out = self.propagate(edge_index, x=x, edge_weight=edge_weight,
size=size)
out = self.lin_rel(out)
x_r = x[1]
if x_r is not None:
out += self.lin_root(x_r)
return out
def message(self, x_j: Tensor, edge_weight: OptTensor) -> Tensor:
return x_j if edge_weight is None else edge_weight.view(-1, 1) * x_j
def message_and_aggregate(self, adj_t: SparseTensor,
x: OptPairTensor) -> Tensor:
return matmul(adj_t, x[0], reduce=self.aggr)
def __repr__(self):
return '{}({}, {})'.format(self.__class__.__name__, self.in_channels,
self.out_channels)
| true
| true
|
f70e34e21bad90e8461570263511bcb5e33ea616
| 303
|
py
|
Python
|
apps/statistics/management/commands/collect_stats.py
|
starsep/NewsBlur
|
6c59416ca82377ca1bbc7d044890bdead3eba904
|
[
"MIT"
] | 2
|
2017-08-14T03:40:11.000Z
|
2019-06-15T12:28:49.000Z
|
apps/statistics/management/commands/collect_stats.py
|
starsep/NewsBlur
|
6c59416ca82377ca1bbc7d044890bdead3eba904
|
[
"MIT"
] | 7
|
2021-02-08T20:32:31.000Z
|
2022-03-11T23:50:47.000Z
|
apps/statistics/management/commands/collect_stats.py
|
starsep/NewsBlur
|
6c59416ca82377ca1bbc7d044890bdead3eba904
|
[
"MIT"
] | 1
|
2020-11-21T08:43:15.000Z
|
2020-11-21T08:43:15.000Z
|
from optparse import make_option
from django.core.management.base import BaseCommand
from apps.statistics.models import MStatistics
class Command(BaseCommand):
option_list = BaseCommand.option_list + (
)
def handle(self, *args, **options):
MStatistics.collect_statistics()
| 27.545455
| 51
| 0.739274
|
from optparse import make_option
from django.core.management.base import BaseCommand
from apps.statistics.models import MStatistics
class Command(BaseCommand):
option_list = BaseCommand.option_list + (
)
def handle(self, *args, **options):
MStatistics.collect_statistics()
| true
| true
|
f70e357fa181edee335244ff28450013665c1041
| 8,110
|
py
|
Python
|
litex_boards/platforms/alinx_ax7101.py
|
bitinvert/litex-boards
|
907723fff54d0e02dd19c6c5bbca6e4d7165ff6d
|
[
"BSD-2-Clause"
] | null | null | null |
litex_boards/platforms/alinx_ax7101.py
|
bitinvert/litex-boards
|
907723fff54d0e02dd19c6c5bbca6e4d7165ff6d
|
[
"BSD-2-Clause"
] | null | null | null |
litex_boards/platforms/alinx_ax7101.py
|
bitinvert/litex-boards
|
907723fff54d0e02dd19c6c5bbca6e4d7165ff6d
|
[
"BSD-2-Clause"
] | 1
|
2021-12-28T18:05:27.000Z
|
2021-12-28T18:05:27.000Z
|
#
# This file is part of LiteX-Boards.
#
# Copyright (c) 2021 Brendan Christy <brendan.christy@hs-rm.de>
# SPDX-License-Identifier: BSD-2-Clause
from litex.build.generic_platform import *
from litex.build.xilinx import XilinxPlatform, VivadoProgrammer
from litex.build.openocd import OpenOCD
# IOs ----------------------------------------------------------------------------------------------
_io = [
# Clk / Rst
("clk200", 0,
Subsignal("p", Pins("R4"), IOStandard("DIFF_SSTL15")),
Subsignal("n", Pins("T4"), IOStandard("DIFF_SSTL15"))
),
("clk125", 0,
Subsignal("p", Pins("F6"), IOStandard("DIFF_SSTL15")),
Subsignal("n", Pins("E6"), IOStandard("DIFF_SSTL15"))
),
("cpu_reset", 0, Pins("T6"), IOStandard("SSTL15")),
# DDR3 SDRAM
("ddram", 0,
Subsignal("a", Pins("AA4 AB2 AA5 AB5 AB1 U3 W1 T1 V2 U2 Y1 W2 Y2 U1 V3"), IOStandard("SSTL15")),
Subsignal("ba", Pins("AA3 Y3 Y4"), IOStandard("SSTL15")),
Subsignal("ras_n", Pins("V4"), IOStandard("SSTL15")),
Subsignal("cas_n", Pins("W4"), IOStandard("SSTL15")),
Subsignal("we_n", Pins("AA1"), IOStandard("SSTL15")),
Subsignal("dm", Pins("D2 G2 M2 M5"), IOStandard("SSTL15")),
Subsignal("dq", Pins("C2 G1 A1 F3 B2 F1 B1 E2 H3 G3 H2 H5 J1 J5 K1 H4 L4 M3 L3 J6 K3 K6 J4 L5 P1 N4 R1 N2 M6 N5 P6 P2"), IOStandard("SSTL15"), Misc("IN_TERM=UNTUNED_SPLIT_50")),
Subsignal("dqs_p", Pins("E1 K2 M1 P5"), IOStandard("DIFF_SSTL15"), Misc("IN_TERM=UNTUNED_SPLIT_50")),
Subsignal("dqs_n", Pins("D1 J2 L1 P4"), IOStandard("DIFF_SSTL15"), Misc("IN_TERM=UNTUNED_SPLIT_50")),
Subsignal("clk_p", Pins("R3"), IOStandard("DIFF_SSTL15")),
Subsignal("clk_n", Pins("R2"), IOStandard("DIFF_SSTL15")),
Subsignal("cke", Pins("T5"), IOStandard("SSTL15")),
Subsignal("odt", Pins("U5"), IOStandard("SSTL15")),
Subsignal("cs_n", Pins("AB3"), IOStandard("SSTL15")),
Subsignal("reset_n", Pins("W6"), IOStandard("LVCMOS15")),
Misc("SLEW=FAST"),
),
# UART
("serial", 0,
Subsignal("tx", Pins("AB15")),
Subsignal("rx", Pins("AA15")),
IOStandard("LVCMOS33"),
),
# GMII Ethernet
("eth_clocks_ext", 0,
Subsignal("tx", Pins("K21")),
Subsignal("gtx", Pins("G21")),
Subsignal("rx", Pins("K18")),
IOStandard("LVCMOS33")
),
("eth1_clocks_ext", 0,
Subsignal("tx", Pins("T14")),
Subsignal("gtx", Pins("M16")),
Subsignal("rx", Pins("J20")),
IOStandard("LVCMOS33")
),
("eth2_clocks_ext", 0,
Subsignal("tx", Pins("V10")),
Subsignal("gtx", Pins("AA21")),
Subsignal("rx", Pins("V13")),
IOStandard("LVCMOS33")
),
("eth3_clocks_ext", 0,
Subsignal("tx", Pins("U16")),
Subsignal("gtx", Pins("P20")),
Subsignal("rx", Pins("Y18")),
IOStandard("LVCMOS33")
),
("eth", 0,
Subsignal("rst_n", Pins("G20")),
Subsignal("int_n", Pins("D14"), Misc("KEEPER = TRUE")),
Subsignal("mdio", Pins("L16")),
Subsignal("mdc", Pins("J17")),
Subsignal("rx_dv", Pins("M22")),
Subsignal("rx_er", Pins("N18")),
Subsignal("rx_data", Pins("N22 H18 H17 M21 L21 N20 M20 N19")),
Subsignal("tx_en", Pins("G22")),
Subsignal("tx_er", Pins("K17")),
Subsignal("tx_data", Pins("D22 H20 H22 J22 K22 L19 K19 L20")),
Subsignal("col", Pins("M18")),
Subsignal("crs", Pins("L18")),
IOStandard("LVCMOS33")
),
("eth", 1,
Subsignal("rst_n", Pins("L14")),
Subsignal("int_n", Pins("E14"), Misc("KEEPER = TRUE")),
Subsignal("mdc", Pins("AB21")),
Subsignal("mdio", Pins("AB22")),
Subsignal("rx_dv", Pins("L13")),
Subsignal("rx_er", Pins("G13")),
Subsignal("rx_data", Pins("M13 K14 K13 J14 H14 H15 J15 H13")),
Subsignal("tx_en", Pins("M15")),
Subsignal("tx_er", Pins("T15")),
Subsignal("tx_data", Pins("L15 K16 W15 W16 V17 W17 U15 V15")),
Subsignal("col", Pins("J21")),
Subsignal("crs", Pins("E22")),
IOStandard("LVCMOS33")
),
("eth", 2,
Subsignal("rst_n", Pins("T20")),
Subsignal("int_n", Pins("E13"), Misc("KEEPER = TRUE")),
Subsignal("mdc", Pins("V20")),
Subsignal("mdio", Pins("V19")),
Subsignal("rx_dv", Pins("AA20")),
Subsignal("rx_er", Pins("U21")),
Subsignal("rx_data", Pins("AB20 AA19 AA18 AB18 Y17 W22 W21 T21")),
Subsignal("tx_en", Pins("V14")),
Subsignal("tx_er", Pins("AA9")),
Subsignal("tx_data", Pins("W11 W12 Y11 Y12 W10 AA11 AA10 AB10")),
Subsignal("col", Pins("Y21")),
Subsignal("crs", Pins("Y22")),
IOStandard("LVCMOS33")
),
("eth", 3,
Subsignal("rst_n", Pins("R16")),
Subsignal("int_n", Pins("F13")),
Subsignal("mdc", Pins("V18")),
Subsignal("mdio", Pins("U20")),
Subsignal("rx_dv", Pins("W20")),
Subsignal("rx_er", Pins("N13")),
Subsignal("rx_data", Pins("W19 Y19 V22 U22 T18 R18 R14 P14")),
Subsignal("tx_en", Pins("P16")),
Subsignal("tx_er", Pins("R19")),
Subsignal("tx_data", Pins("R17 P15 N17 P17 T16 U17 U18 P19")),
Subsignal("col", Pins("N14")),
Subsignal("crs", Pins("N15")),
IOStandard("LVCMOS33")
),
("spisdcard", 0,
Subsignal("clk", Pins("J16")),
Subsignal("mosi", Pins("A20"), Misc("PULLUP true")),
Subsignal("cs_n", Pins("B22"), Misc("PULLUP true")),
Subsignal("miso", Pins("F20"), Misc("PULLUP true")),
Misc("SLEW=FAST"),
IOStandard("LVCMOS33"),
),
("sdcard", 0,
Subsignal("data", Pins("F20 C22 B20 B22"), Misc("PULLUP true")),
Subsignal("cmd", Pins("A20"), Misc("PULLUP true")),
Subsignal("clk", Pins("J16")),
Subsignal("cd", Pins("F19")),
Misc("SLEW=FAST"),
IOStandard("LVCMOS33"),
)
]
_connectors = []
class Platform(XilinxPlatform):
default_clk_name = "clk200"
default_clk_period = 1e9/200e6
def __init__(self) -> None:
XilinxPlatform.__init__(self, "xc7a100t-fgg484-2", _io, _connectors, toolchain="vivado")
self.add_platform_command("set_property INTERNAL_VREF 0.750 [get_iobanks 34]")
self.add_platform_command("set_property INTERNAL_VREF 0.750 [get_iobanks 35]")
def create_programmer(self):
return OpenOCD("openocd_ax7101.cfg", "bscan_spi_xc7a100t.bit")
def do_finalize(self, fragment):
XilinxPlatform.do_finalize(self, fragment)
self.add_period_constraint(self.lookup_request("clk200", loose=True), 1e9/200e6)
self.add_period_constraint(self.lookup_request("eth_clocks_ext:gtx", loose=True), 1e9/125e6)
self.add_period_constraint(self.lookup_request("eth_clocks_ext:tx", loose=True), 1e9/125e6)
self.add_period_constraint(self.lookup_request("eth_clocks_ext:rx", loose=True), 1e9/125e6)
self.add_period_constraint(self.lookup_request("eth1_clocks_ext:gtx", loose=True), 1e9/125e6)
self.add_period_constraint(self.lookup_request("eth1_clocks_ext:tx", loose=True), 1e9/125e6)
self.add_period_constraint(self.lookup_request("eth1_clocks_ext:rx", loose=True), 1e9/125e6)
self.add_period_constraint(self.lookup_request("eth2_clocks_ext:gtx", loose=True), 1e9/125e6)
self.add_period_constraint(self.lookup_request("eth2_clocks_ext:tx", loose=True), 1e9/125e6)
self.add_period_constraint(self.lookup_request("eth2_clocks_ext:rx", loose=True), 1e9/125e6)
self.add_period_constraint(self.lookup_request("eth3_clocks_ext:gtx", loose=True), 1e9/125e6)
self.add_period_constraint(self.lookup_request("eth3_clocks_ext:tx", loose=True), 1e9/125e6)
self.add_period_constraint(self.lookup_request("eth3_clocks_ext:rx", loose=True), 1e9/125e6)
| 42.910053
| 185
| 0.579162
|
from litex.build.generic_platform import *
from litex.build.xilinx import XilinxPlatform, VivadoProgrammer
from litex.build.openocd import OpenOCD
_io = [
("clk200", 0,
Subsignal("p", Pins("R4"), IOStandard("DIFF_SSTL15")),
Subsignal("n", Pins("T4"), IOStandard("DIFF_SSTL15"))
),
("clk125", 0,
Subsignal("p", Pins("F6"), IOStandard("DIFF_SSTL15")),
Subsignal("n", Pins("E6"), IOStandard("DIFF_SSTL15"))
),
("cpu_reset", 0, Pins("T6"), IOStandard("SSTL15")),
("ddram", 0,
Subsignal("a", Pins("AA4 AB2 AA5 AB5 AB1 U3 W1 T1 V2 U2 Y1 W2 Y2 U1 V3"), IOStandard("SSTL15")),
Subsignal("ba", Pins("AA3 Y3 Y4"), IOStandard("SSTL15")),
Subsignal("ras_n", Pins("V4"), IOStandard("SSTL15")),
Subsignal("cas_n", Pins("W4"), IOStandard("SSTL15")),
Subsignal("we_n", Pins("AA1"), IOStandard("SSTL15")),
Subsignal("dm", Pins("D2 G2 M2 M5"), IOStandard("SSTL15")),
Subsignal("dq", Pins("C2 G1 A1 F3 B2 F1 B1 E2 H3 G3 H2 H5 J1 J5 K1 H4 L4 M3 L3 J6 K3 K6 J4 L5 P1 N4 R1 N2 M6 N5 P6 P2"), IOStandard("SSTL15"), Misc("IN_TERM=UNTUNED_SPLIT_50")),
Subsignal("dqs_p", Pins("E1 K2 M1 P5"), IOStandard("DIFF_SSTL15"), Misc("IN_TERM=UNTUNED_SPLIT_50")),
Subsignal("dqs_n", Pins("D1 J2 L1 P4"), IOStandard("DIFF_SSTL15"), Misc("IN_TERM=UNTUNED_SPLIT_50")),
Subsignal("clk_p", Pins("R3"), IOStandard("DIFF_SSTL15")),
Subsignal("clk_n", Pins("R2"), IOStandard("DIFF_SSTL15")),
Subsignal("cke", Pins("T5"), IOStandard("SSTL15")),
Subsignal("odt", Pins("U5"), IOStandard("SSTL15")),
Subsignal("cs_n", Pins("AB3"), IOStandard("SSTL15")),
Subsignal("reset_n", Pins("W6"), IOStandard("LVCMOS15")),
Misc("SLEW=FAST"),
),
("serial", 0,
Subsignal("tx", Pins("AB15")),
Subsignal("rx", Pins("AA15")),
IOStandard("LVCMOS33"),
),
("eth_clocks_ext", 0,
Subsignal("tx", Pins("K21")),
Subsignal("gtx", Pins("G21")),
Subsignal("rx", Pins("K18")),
IOStandard("LVCMOS33")
),
("eth1_clocks_ext", 0,
Subsignal("tx", Pins("T14")),
Subsignal("gtx", Pins("M16")),
Subsignal("rx", Pins("J20")),
IOStandard("LVCMOS33")
),
("eth2_clocks_ext", 0,
Subsignal("tx", Pins("V10")),
Subsignal("gtx", Pins("AA21")),
Subsignal("rx", Pins("V13")),
IOStandard("LVCMOS33")
),
("eth3_clocks_ext", 0,
Subsignal("tx", Pins("U16")),
Subsignal("gtx", Pins("P20")),
Subsignal("rx", Pins("Y18")),
IOStandard("LVCMOS33")
),
("eth", 0,
Subsignal("rst_n", Pins("G20")),
Subsignal("int_n", Pins("D14"), Misc("KEEPER = TRUE")),
Subsignal("mdio", Pins("L16")),
Subsignal("mdc", Pins("J17")),
Subsignal("rx_dv", Pins("M22")),
Subsignal("rx_er", Pins("N18")),
Subsignal("rx_data", Pins("N22 H18 H17 M21 L21 N20 M20 N19")),
Subsignal("tx_en", Pins("G22")),
Subsignal("tx_er", Pins("K17")),
Subsignal("tx_data", Pins("D22 H20 H22 J22 K22 L19 K19 L20")),
Subsignal("col", Pins("M18")),
Subsignal("crs", Pins("L18")),
IOStandard("LVCMOS33")
),
("eth", 1,
Subsignal("rst_n", Pins("L14")),
Subsignal("int_n", Pins("E14"), Misc("KEEPER = TRUE")),
Subsignal("mdc", Pins("AB21")),
Subsignal("mdio", Pins("AB22")),
Subsignal("rx_dv", Pins("L13")),
Subsignal("rx_er", Pins("G13")),
Subsignal("rx_data", Pins("M13 K14 K13 J14 H14 H15 J15 H13")),
Subsignal("tx_en", Pins("M15")),
Subsignal("tx_er", Pins("T15")),
Subsignal("tx_data", Pins("L15 K16 W15 W16 V17 W17 U15 V15")),
Subsignal("col", Pins("J21")),
Subsignal("crs", Pins("E22")),
IOStandard("LVCMOS33")
),
("eth", 2,
Subsignal("rst_n", Pins("T20")),
Subsignal("int_n", Pins("E13"), Misc("KEEPER = TRUE")),
Subsignal("mdc", Pins("V20")),
Subsignal("mdio", Pins("V19")),
Subsignal("rx_dv", Pins("AA20")),
Subsignal("rx_er", Pins("U21")),
Subsignal("rx_data", Pins("AB20 AA19 AA18 AB18 Y17 W22 W21 T21")),
Subsignal("tx_en", Pins("V14")),
Subsignal("tx_er", Pins("AA9")),
Subsignal("tx_data", Pins("W11 W12 Y11 Y12 W10 AA11 AA10 AB10")),
Subsignal("col", Pins("Y21")),
Subsignal("crs", Pins("Y22")),
IOStandard("LVCMOS33")
),
("eth", 3,
Subsignal("rst_n", Pins("R16")),
Subsignal("int_n", Pins("F13")),
Subsignal("mdc", Pins("V18")),
Subsignal("mdio", Pins("U20")),
Subsignal("rx_dv", Pins("W20")),
Subsignal("rx_er", Pins("N13")),
Subsignal("rx_data", Pins("W19 Y19 V22 U22 T18 R18 R14 P14")),
Subsignal("tx_en", Pins("P16")),
Subsignal("tx_er", Pins("R19")),
Subsignal("tx_data", Pins("R17 P15 N17 P17 T16 U17 U18 P19")),
Subsignal("col", Pins("N14")),
Subsignal("crs", Pins("N15")),
IOStandard("LVCMOS33")
),
("spisdcard", 0,
Subsignal("clk", Pins("J16")),
Subsignal("mosi", Pins("A20"), Misc("PULLUP true")),
Subsignal("cs_n", Pins("B22"), Misc("PULLUP true")),
Subsignal("miso", Pins("F20"), Misc("PULLUP true")),
Misc("SLEW=FAST"),
IOStandard("LVCMOS33"),
),
("sdcard", 0,
Subsignal("data", Pins("F20 C22 B20 B22"), Misc("PULLUP true")),
Subsignal("cmd", Pins("A20"), Misc("PULLUP true")),
Subsignal("clk", Pins("J16")),
Subsignal("cd", Pins("F19")),
Misc("SLEW=FAST"),
IOStandard("LVCMOS33"),
)
]
_connectors = []
class Platform(XilinxPlatform):
default_clk_name = "clk200"
default_clk_period = 1e9/200e6
def __init__(self) -> None:
XilinxPlatform.__init__(self, "xc7a100t-fgg484-2", _io, _connectors, toolchain="vivado")
self.add_platform_command("set_property INTERNAL_VREF 0.750 [get_iobanks 34]")
self.add_platform_command("set_property INTERNAL_VREF 0.750 [get_iobanks 35]")
def create_programmer(self):
return OpenOCD("openocd_ax7101.cfg", "bscan_spi_xc7a100t.bit")
def do_finalize(self, fragment):
XilinxPlatform.do_finalize(self, fragment)
self.add_period_constraint(self.lookup_request("clk200", loose=True), 1e9/200e6)
self.add_period_constraint(self.lookup_request("eth_clocks_ext:gtx", loose=True), 1e9/125e6)
self.add_period_constraint(self.lookup_request("eth_clocks_ext:tx", loose=True), 1e9/125e6)
self.add_period_constraint(self.lookup_request("eth_clocks_ext:rx", loose=True), 1e9/125e6)
self.add_period_constraint(self.lookup_request("eth1_clocks_ext:gtx", loose=True), 1e9/125e6)
self.add_period_constraint(self.lookup_request("eth1_clocks_ext:tx", loose=True), 1e9/125e6)
self.add_period_constraint(self.lookup_request("eth1_clocks_ext:rx", loose=True), 1e9/125e6)
self.add_period_constraint(self.lookup_request("eth2_clocks_ext:gtx", loose=True), 1e9/125e6)
self.add_period_constraint(self.lookup_request("eth2_clocks_ext:tx", loose=True), 1e9/125e6)
self.add_period_constraint(self.lookup_request("eth2_clocks_ext:rx", loose=True), 1e9/125e6)
self.add_period_constraint(self.lookup_request("eth3_clocks_ext:gtx", loose=True), 1e9/125e6)
self.add_period_constraint(self.lookup_request("eth3_clocks_ext:tx", loose=True), 1e9/125e6)
self.add_period_constraint(self.lookup_request("eth3_clocks_ext:rx", loose=True), 1e9/125e6)
| true
| true
|
f70e359018c3901f8ab6fad966adece64040b487
| 12,841
|
py
|
Python
|
django/db/migrations/graph.py
|
ni-ning/django
|
2e7ba6057cfc82a15a22b6021cd60cf307152e2d
|
[
"CNRI-Python-GPL-Compatible",
"BSD-3-Clause"
] | 304
|
2015-01-06T18:02:49.000Z
|
2021-12-11T18:08:37.000Z
|
checkerista/.env/Lib/site-packages/django/db/migrations/graph.py
|
LybaFatimaNasir/CS311S20PID02
|
bc29a8c4c9ee508c74d231c015a57b1ca4dfcb39
|
[
"MIT"
] | 123
|
2019-09-10T14:48:01.000Z
|
2019-11-28T21:24:06.000Z
|
checkerista/.env/Lib/site-packages/django/db/migrations/graph.py
|
LybaFatimaNasir/CS311S20PID02
|
bc29a8c4c9ee508c74d231c015a57b1ca4dfcb39
|
[
"MIT"
] | 145
|
2019-03-14T18:54:45.000Z
|
2022-03-04T20:25:31.000Z
|
from functools import total_ordering
from django.db.migrations.state import ProjectState
from .exceptions import CircularDependencyError, NodeNotFoundError
@total_ordering
class Node:
"""
A single node in the migration graph. Contains direct links to adjacent
nodes in either direction.
"""
def __init__(self, key):
self.key = key
self.children = set()
self.parents = set()
def __eq__(self, other):
return self.key == other
def __lt__(self, other):
return self.key < other
def __hash__(self):
return hash(self.key)
def __getitem__(self, item):
return self.key[item]
def __str__(self):
return str(self.key)
def __repr__(self):
return '<%s: (%r, %r)>' % (self.__class__.__name__, self.key[0], self.key[1])
def add_child(self, child):
self.children.add(child)
def add_parent(self, parent):
self.parents.add(parent)
class DummyNode(Node):
"""
A node that doesn't correspond to a migration file on disk.
(A squashed migration that was removed, for example.)
After the migration graph is processed, all dummy nodes should be removed.
If there are any left, a nonexistent dependency error is raised.
"""
def __init__(self, key, origin, error_message):
super().__init__(key)
self.origin = origin
self.error_message = error_message
def raise_error(self):
raise NodeNotFoundError(self.error_message, self.key, origin=self.origin)
class MigrationGraph:
"""
Represent the digraph of all migrations in a project.
Each migration is a node, and each dependency is an edge. There are
no implicit dependencies between numbered migrations - the numbering is
merely a convention to aid file listing. Every new numbered migration
has a declared dependency to the previous number, meaning that VCS
branch merges can be detected and resolved.
Migrations files can be marked as replacing another set of migrations -
this is to support the "squash" feature. The graph handler isn't responsible
for these; instead, the code to load them in here should examine the
migration files and if the replaced migrations are all either unapplied
or not present, it should ignore the replaced ones, load in just the
replacing migration, and repoint any dependencies that pointed to the
replaced migrations to point to the replacing one.
A node should be a tuple: (app_path, migration_name). The tree special-cases
things within an app - namely, root nodes and leaf nodes ignore dependencies
to other apps.
"""
def __init__(self):
self.node_map = {}
self.nodes = {}
def add_node(self, key, migration):
assert key not in self.node_map
node = Node(key)
self.node_map[key] = node
self.nodes[key] = migration
def add_dummy_node(self, key, origin, error_message):
node = DummyNode(key, origin, error_message)
self.node_map[key] = node
self.nodes[key] = None
def add_dependency(self, migration, child, parent, skip_validation=False):
"""
This may create dummy nodes if they don't yet exist. If
`skip_validation=True`, validate_consistency() should be called
afterwards.
"""
if child not in self.nodes:
error_message = (
"Migration %s dependencies reference nonexistent"
" child node %r" % (migration, child)
)
self.add_dummy_node(child, migration, error_message)
if parent not in self.nodes:
error_message = (
"Migration %s dependencies reference nonexistent"
" parent node %r" % (migration, parent)
)
self.add_dummy_node(parent, migration, error_message)
self.node_map[child].add_parent(self.node_map[parent])
self.node_map[parent].add_child(self.node_map[child])
if not skip_validation:
self.validate_consistency()
def remove_replaced_nodes(self, replacement, replaced):
"""
Remove each of the `replaced` nodes (when they exist). Any
dependencies that were referencing them are changed to reference the
`replacement` node instead.
"""
# Cast list of replaced keys to set to speed up lookup later.
replaced = set(replaced)
try:
replacement_node = self.node_map[replacement]
except KeyError as err:
raise NodeNotFoundError(
"Unable to find replacement node %r. It was either never added"
" to the migration graph, or has been removed." % (replacement,),
replacement
) from err
for replaced_key in replaced:
self.nodes.pop(replaced_key, None)
replaced_node = self.node_map.pop(replaced_key, None)
if replaced_node:
for child in replaced_node.children:
child.parents.remove(replaced_node)
# We don't want to create dependencies between the replaced
# node and the replacement node as this would lead to
# self-referencing on the replacement node at a later iteration.
if child.key not in replaced:
replacement_node.add_child(child)
child.add_parent(replacement_node)
for parent in replaced_node.parents:
parent.children.remove(replaced_node)
# Again, to avoid self-referencing.
if parent.key not in replaced:
replacement_node.add_parent(parent)
parent.add_child(replacement_node)
def remove_replacement_node(self, replacement, replaced):
"""
The inverse operation to `remove_replaced_nodes`. Almost. Remove the
replacement node `replacement` and remap its child nodes to `replaced`
- the list of nodes it would have replaced. Don't remap its parent
nodes as they are expected to be correct already.
"""
self.nodes.pop(replacement, None)
try:
replacement_node = self.node_map.pop(replacement)
except KeyError as err:
raise NodeNotFoundError(
"Unable to remove replacement node %r. It was either never added"
" to the migration graph, or has been removed already." % (replacement,),
replacement
) from err
replaced_nodes = set()
replaced_nodes_parents = set()
for key in replaced:
replaced_node = self.node_map.get(key)
if replaced_node:
replaced_nodes.add(replaced_node)
replaced_nodes_parents |= replaced_node.parents
# We're only interested in the latest replaced node, so filter out
# replaced nodes that are parents of other replaced nodes.
replaced_nodes -= replaced_nodes_parents
for child in replacement_node.children:
child.parents.remove(replacement_node)
for replaced_node in replaced_nodes:
replaced_node.add_child(child)
child.add_parent(replaced_node)
for parent in replacement_node.parents:
parent.children.remove(replacement_node)
# NOTE: There is no need to remap parent dependencies as we can
# assume the replaced nodes already have the correct ancestry.
def validate_consistency(self):
"""Ensure there are no dummy nodes remaining in the graph."""
[n.raise_error() for n in self.node_map.values() if isinstance(n, DummyNode)]
def forwards_plan(self, target):
"""
Given a node, return a list of which previous nodes (dependencies) must
be applied, ending with the node itself. This is the list you would
follow if applying the migrations to a database.
"""
if target not in self.nodes:
raise NodeNotFoundError("Node %r not a valid node" % (target,), target)
return self.iterative_dfs(self.node_map[target])
def backwards_plan(self, target):
"""
Given a node, return a list of which dependent nodes (dependencies)
must be unapplied, ending with the node itself. This is the list you
would follow if removing the migrations from a database.
"""
if target not in self.nodes:
raise NodeNotFoundError("Node %r not a valid node" % (target,), target)
return self.iterative_dfs(self.node_map[target], forwards=False)
def iterative_dfs(self, start, forwards=True):
"""Iterative depth-first search for finding dependencies."""
visited = []
visited_set = set()
stack = [(start, False)]
while stack:
node, processed = stack.pop()
if node in visited_set:
pass
elif processed:
visited_set.add(node)
visited.append(node.key)
else:
stack.append((node, True))
stack += [(n, False) for n in sorted(node.parents if forwards else node.children)]
return visited
def root_nodes(self, app=None):
"""
Return all root nodes - that is, nodes with no dependencies inside
their app. These are the starting point for an app.
"""
roots = set()
for node in self.nodes:
if all(key[0] != node[0] for key in self.node_map[node].parents) and (not app or app == node[0]):
roots.add(node)
return sorted(roots)
def leaf_nodes(self, app=None):
"""
Return all leaf nodes - that is, nodes with no dependents in their app.
These are the "most current" version of an app's schema.
Having more than one per app is technically an error, but one that
gets handled further up, in the interactive command - it's usually the
result of a VCS merge and needs some user input.
"""
leaves = set()
for node in self.nodes:
if all(key[0] != node[0] for key in self.node_map[node].children) and (not app or app == node[0]):
leaves.add(node)
return sorted(leaves)
def ensure_not_cyclic(self):
# Algo from GvR:
# https://neopythonic.blogspot.com/2009/01/detecting-cycles-in-directed-graph.html
todo = set(self.nodes)
while todo:
node = todo.pop()
stack = [node]
while stack:
top = stack[-1]
for child in self.node_map[top].children:
# Use child.key instead of child to speed up the frequent
# hashing.
node = child.key
if node in stack:
cycle = stack[stack.index(node):]
raise CircularDependencyError(", ".join("%s.%s" % n for n in cycle))
if node in todo:
stack.append(node)
todo.remove(node)
break
else:
node = stack.pop()
def __str__(self):
return 'Graph: %s nodes, %s edges' % self._nodes_and_edges()
def __repr__(self):
nodes, edges = self._nodes_and_edges()
return '<%s: nodes=%s, edges=%s>' % (self.__class__.__name__, nodes, edges)
def _nodes_and_edges(self):
return len(self.nodes), sum(len(node.parents) for node in self.node_map.values())
def _generate_plan(self, nodes, at_end):
plan = []
for node in nodes:
for migration in self.forwards_plan(node):
if migration not in plan and (at_end or migration not in nodes):
plan.append(migration)
return plan
def make_state(self, nodes=None, at_end=True, real_apps=None):
"""
Given a migration node or nodes, return a complete ProjectState for it.
If at_end is False, return the state before the migration has run.
If nodes is not provided, return the overall most current project state.
"""
if nodes is None:
nodes = list(self.leaf_nodes())
if not nodes:
return ProjectState()
if not isinstance(nodes[0], tuple):
nodes = [nodes]
plan = self._generate_plan(nodes, at_end)
project_state = ProjectState(real_apps=real_apps)
for node in plan:
project_state = self.nodes[node].mutate_state(project_state, preserve=False)
return project_state
def __contains__(self, node):
return node in self.nodes
| 40.128125
| 110
| 0.613114
|
from functools import total_ordering
from django.db.migrations.state import ProjectState
from .exceptions import CircularDependencyError, NodeNotFoundError
@total_ordering
class Node:
def __init__(self, key):
self.key = key
self.children = set()
self.parents = set()
def __eq__(self, other):
return self.key == other
def __lt__(self, other):
return self.key < other
def __hash__(self):
return hash(self.key)
def __getitem__(self, item):
return self.key[item]
def __str__(self):
return str(self.key)
def __repr__(self):
return '<%s: (%r, %r)>' % (self.__class__.__name__, self.key[0], self.key[1])
def add_child(self, child):
self.children.add(child)
def add_parent(self, parent):
self.parents.add(parent)
class DummyNode(Node):
def __init__(self, key, origin, error_message):
super().__init__(key)
self.origin = origin
self.error_message = error_message
def raise_error(self):
raise NodeNotFoundError(self.error_message, self.key, origin=self.origin)
class MigrationGraph:
def __init__(self):
self.node_map = {}
self.nodes = {}
def add_node(self, key, migration):
assert key not in self.node_map
node = Node(key)
self.node_map[key] = node
self.nodes[key] = migration
def add_dummy_node(self, key, origin, error_message):
node = DummyNode(key, origin, error_message)
self.node_map[key] = node
self.nodes[key] = None
def add_dependency(self, migration, child, parent, skip_validation=False):
if child not in self.nodes:
error_message = (
"Migration %s dependencies reference nonexistent"
" child node %r" % (migration, child)
)
self.add_dummy_node(child, migration, error_message)
if parent not in self.nodes:
error_message = (
"Migration %s dependencies reference nonexistent"
" parent node %r" % (migration, parent)
)
self.add_dummy_node(parent, migration, error_message)
self.node_map[child].add_parent(self.node_map[parent])
self.node_map[parent].add_child(self.node_map[child])
if not skip_validation:
self.validate_consistency()
def remove_replaced_nodes(self, replacement, replaced):
replaced = set(replaced)
try:
replacement_node = self.node_map[replacement]
except KeyError as err:
raise NodeNotFoundError(
"Unable to find replacement node %r. It was either never added"
" to the migration graph, or has been removed." % (replacement,),
replacement
) from err
for replaced_key in replaced:
self.nodes.pop(replaced_key, None)
replaced_node = self.node_map.pop(replaced_key, None)
if replaced_node:
for child in replaced_node.children:
child.parents.remove(replaced_node)
# node and the replacement node as this would lead to
# self-referencing on the replacement node at a later iteration.
if child.key not in replaced:
replacement_node.add_child(child)
child.add_parent(replacement_node)
for parent in replaced_node.parents:
parent.children.remove(replaced_node)
# Again, to avoid self-referencing.
if parent.key not in replaced:
replacement_node.add_parent(parent)
parent.add_child(replacement_node)
def remove_replacement_node(self, replacement, replaced):
self.nodes.pop(replacement, None)
try:
replacement_node = self.node_map.pop(replacement)
except KeyError as err:
raise NodeNotFoundError(
"Unable to remove replacement node %r. It was either never added"
" to the migration graph, or has been removed already." % (replacement,),
replacement
) from err
replaced_nodes = set()
replaced_nodes_parents = set()
for key in replaced:
replaced_node = self.node_map.get(key)
if replaced_node:
replaced_nodes.add(replaced_node)
replaced_nodes_parents |= replaced_node.parents
# We're only interested in the latest replaced node, so filter out
replaced_nodes -= replaced_nodes_parents
for child in replacement_node.children:
child.parents.remove(replacement_node)
for replaced_node in replaced_nodes:
replaced_node.add_child(child)
child.add_parent(replaced_node)
for parent in replacement_node.parents:
parent.children.remove(replacement_node)
def validate_consistency(self):
[n.raise_error() for n in self.node_map.values() if isinstance(n, DummyNode)]
def forwards_plan(self, target):
if target not in self.nodes:
raise NodeNotFoundError("Node %r not a valid node" % (target,), target)
return self.iterative_dfs(self.node_map[target])
def backwards_plan(self, target):
if target not in self.nodes:
raise NodeNotFoundError("Node %r not a valid node" % (target,), target)
return self.iterative_dfs(self.node_map[target], forwards=False)
def iterative_dfs(self, start, forwards=True):
visited = []
visited_set = set()
stack = [(start, False)]
while stack:
node, processed = stack.pop()
if node in visited_set:
pass
elif processed:
visited_set.add(node)
visited.append(node.key)
else:
stack.append((node, True))
stack += [(n, False) for n in sorted(node.parents if forwards else node.children)]
return visited
def root_nodes(self, app=None):
roots = set()
for node in self.nodes:
if all(key[0] != node[0] for key in self.node_map[node].parents) and (not app or app == node[0]):
roots.add(node)
return sorted(roots)
def leaf_nodes(self, app=None):
leaves = set()
for node in self.nodes:
if all(key[0] != node[0] for key in self.node_map[node].children) and (not app or app == node[0]):
leaves.add(node)
return sorted(leaves)
def ensure_not_cyclic(self):
todo = set(self.nodes)
while todo:
node = todo.pop()
stack = [node]
while stack:
top = stack[-1]
for child in self.node_map[top].children:
node = child.key
if node in stack:
cycle = stack[stack.index(node):]
raise CircularDependencyError(", ".join("%s.%s" % n for n in cycle))
if node in todo:
stack.append(node)
todo.remove(node)
break
else:
node = stack.pop()
def __str__(self):
return 'Graph: %s nodes, %s edges' % self._nodes_and_edges()
def __repr__(self):
nodes, edges = self._nodes_and_edges()
return '<%s: nodes=%s, edges=%s>' % (self.__class__.__name__, nodes, edges)
def _nodes_and_edges(self):
return len(self.nodes), sum(len(node.parents) for node in self.node_map.values())
def _generate_plan(self, nodes, at_end):
plan = []
for node in nodes:
for migration in self.forwards_plan(node):
if migration not in plan and (at_end or migration not in nodes):
plan.append(migration)
return plan
def make_state(self, nodes=None, at_end=True, real_apps=None):
if nodes is None:
nodes = list(self.leaf_nodes())
if not nodes:
return ProjectState()
if not isinstance(nodes[0], tuple):
nodes = [nodes]
plan = self._generate_plan(nodes, at_end)
project_state = ProjectState(real_apps=real_apps)
for node in plan:
project_state = self.nodes[node].mutate_state(project_state, preserve=False)
return project_state
def __contains__(self, node):
return node in self.nodes
| true
| true
|
f70e35b4ecfce7e7d8d824aa5520b70979374a28
| 898
|
py
|
Python
|
compare.py
|
zhoukaisspu/py_script
|
784c78a0db541b527fa39f13c1aa4d2d15b8438f
|
[
"BSD-2-Clause"
] | null | null | null |
compare.py
|
zhoukaisspu/py_script
|
784c78a0db541b527fa39f13c1aa4d2d15b8438f
|
[
"BSD-2-Clause"
] | null | null | null |
compare.py
|
zhoukaisspu/py_script
|
784c78a0db541b527fa39f13c1aa4d2d15b8438f
|
[
"BSD-2-Clause"
] | null | null | null |
# compare contents of two files in binary form
import sys
def compareFile(srcFile,destFile):
with open(srcFile,"rb") as src:
srcData = src.read()
with open(destFile,"rb") as dest:
destData = dest.read()
checked = False
if(len(srcData)!=len(destData)):
print("It unequal between ",srcFile,destFile,". The file size is different")
checked = True
for i in range(min(len(srcData),len(destData))):
if(srcData[i] != destData[i]):
print("unequal index:%d, modleDatata:%d, flashData:%d " % (i,srcData[i],destData[i]))
checked = True
if checked:
print('Check Result: unequal')
else:
print('Check Result: equal')
def main():
if(len(sys.argv) !=3 ):
print('Wrong parameters,need two files')
return
compareFile(sys.argv[1],sys.argv[2])
if __name__ == '__main__':
main()
| 29.933333
| 98
| 0.603563
|
import sys
def compareFile(srcFile,destFile):
with open(srcFile,"rb") as src:
srcData = src.read()
with open(destFile,"rb") as dest:
destData = dest.read()
checked = False
if(len(srcData)!=len(destData)):
print("It unequal between ",srcFile,destFile,". The file size is different")
checked = True
for i in range(min(len(srcData),len(destData))):
if(srcData[i] != destData[i]):
print("unequal index:%d, modleDatata:%d, flashData:%d " % (i,srcData[i],destData[i]))
checked = True
if checked:
print('Check Result: unequal')
else:
print('Check Result: equal')
def main():
if(len(sys.argv) !=3 ):
print('Wrong parameters,need two files')
return
compareFile(sys.argv[1],sys.argv[2])
if __name__ == '__main__':
main()
| true
| true
|
f70e3629b7c97b51ca470780ecfbfd9073525e0a
| 559
|
py
|
Python
|
DataGeneticsSolutions/reciprocals.py
|
Zaita/SampleCode
|
2b846a3a1cd907d62aea6f5a047a755d19597dc3
|
[
"MIT"
] | null | null | null |
DataGeneticsSolutions/reciprocals.py
|
Zaita/SampleCode
|
2b846a3a1cd907d62aea6f5a047a755d19597dc3
|
[
"MIT"
] | null | null | null |
DataGeneticsSolutions/reciprocals.py
|
Zaita/SampleCode
|
2b846a3a1cd907d62aea6f5a047a755d19597dc3
|
[
"MIT"
] | null | null | null |
import math
import itertools
digits = []
def search():
for perm in itertools.combinations(digits, 6):
total = 0.0
for x in perm:
total += (1 / x)
if total > 1.0:
break
if total == 1.0:
print('Solution: ' + str(perm))
return True
return False
max_digit = 6
while True:
digits = []
for i in range(1, max_digit + 1):
digits.append(i)
print('Max Digit: ' + str(max_digit))
if search():
break
max_digit += 1
| 17.46875
| 58
| 0.488372
|
import math
import itertools
digits = []
def search():
for perm in itertools.combinations(digits, 6):
total = 0.0
for x in perm:
total += (1 / x)
if total > 1.0:
break
if total == 1.0:
print('Solution: ' + str(perm))
return True
return False
max_digit = 6
while True:
digits = []
for i in range(1, max_digit + 1):
digits.append(i)
print('Max Digit: ' + str(max_digit))
if search():
break
max_digit += 1
| true
| true
|
f70e370b1a7824d9b4845d169600862a98161c47
| 15,476
|
py
|
Python
|
probe/modules/database/nsrl/nsrl.py
|
vaginessa/irma
|
02285080b67b25ef983a99a765044683bd43296c
|
[
"Apache-2.0"
] | null | null | null |
probe/modules/database/nsrl/nsrl.py
|
vaginessa/irma
|
02285080b67b25ef983a99a765044683bd43296c
|
[
"Apache-2.0"
] | null | null | null |
probe/modules/database/nsrl/nsrl.py
|
vaginessa/irma
|
02285080b67b25ef983a99a765044683bd43296c
|
[
"Apache-2.0"
] | null | null | null |
#
# Copyright (c) 2013-2018 Quarkslab.
# This file is part of IRMA project.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License in the top-level directory
# of this distribution and at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# No part of the project, including this file, may be copied,
# modified, propagated, or distributed except according to the
# terms contained in the LICENSE file.
import logging
import json
import os
import pprint
from abc import ABCMeta
# HACK: to avoid an error on import if leveldict is not installed
try:
from leveldict import LevelDictSerialized
except ImportError as e:
# use type and not metaclass because of the singleton
LevelDictSerialized = type
from lib.common.oopatterns import ParametricSingletonMetaClass
log = logging.getLogger(__name__)
# =============
# Serializers
# =============
class NSRLSerializer(object):
fields = None
@classmethod
def loads(cls, value):
value = json.loads(value)
if isinstance(value[0], list):
result = [dict((field, col[index])
for index, field in
enumerate(cls.fields)) for col in value]
else:
result = dict((field, value[index])
for index, field in
enumerate(cls.fields))
return result
@classmethod
def dumps(cls, value):
def detect_charset(string):
import chardet
return chardet.detect(string)['encoding']
try:
if isinstance(value, list):
result = json.dumps([[row.get(key) for key in cls.fields] for row in value])
else:
result = json.dumps([value.get(col) for col in cls.fields])
except Exception:
if isinstance(value, list):
for row in value:
for colkey, colval in list(row.items()):
if not isinstance(colval, str):
charset = detect_charset(colval)
if charset is None:
charset = 'unicode-escape'
try:
row[colkey] = colval.decode(charset)
except:
row[colkey] = colval.decode('unicode-escape')
result = json.dumps([[row.get(key) for key in cls.fields] for row in value])
else:
for colkey, colval in list(value.items()):
if not isinstance(colval, str):
# try to use chardet to find encoding
charset = detect_charset(colval)
if charset is None:
charset = 'unicode-escape'
try:
value[colkey] = colval.decode(charset)
except:
# treat false positive from chardet
value[colkey] = colval.decode('unicode-escape')
result = json.dumps([value.get(col) for col in cls.fields])
return result
class NSRLOsSerializer(NSRLSerializer):
fields = ['OpSystemVersion', 'OpSystemName', 'MfgCode']
class NSRLFileSerializer(NSRLSerializer):
fields = ["MD5", "CRC32", "FileName", "FileSize",
"ProductCode", "OpSystemCode", "SpecialCode"]
@classmethod
def dumps(cls, value):
def detect_charset(string):
import chardet
return chardet.detect(string)['encoding']
try:
if isinstance(value, list):
result = json.dumps([[row.get(key) for key in cls.fields] for row in value])
else:
result = json.dumps([value.get(col) for col in cls.fields])
except Exception:
# failed to json it, bruteforce encoding
if isinstance(value, list):
for row in value:
fname = row['FileName']
if not isinstance(fname, str):
charset = detect_charset(fname)
charset = 'unicode-escape' if not charset else charset
try:
row['FileName'] = fname.decode(charset)
except:
# treat false positive from chardet
row['FileName'] = fname.decode('unicode-escape')
result = json.dumps([[col.get(key) for key in cls.fields] for col in value])
else:
fname = value['FileName']
if not isinstance(fname, str):
# try to use chardet to find encoding
charset = detect_charset(fname)
charset = 'unicode-escape' if not charset else charset
try:
value['FileName'] = fname.decode(charset)
except:
# treat false positive from chardet
value['FileName'] = fname.decode('unicode-escape')
result = json.dumps([value.get(col) for col in cls.fields])
return result
class NSRLManufacturerSerializer(NSRLSerializer):
fields = ["MfgName"]
class NSRLProductSerializer(NSRLSerializer):
fields = ["ProductName", "ProductVersion", "OpSystemCode",
"MfgCode", "Language", "ApplicationType"]
# ==============
# NSRL records
# ==============
# Hack to avoid metaclass conflicts
class LevelDBSingletonMetaClass(ABCMeta, ParametricSingletonMetaClass):
pass
LevelDBSingleton = LevelDBSingletonMetaClass('LevelDBSingleton', (object,), {})
class NSRLLevelDict(LevelDictSerialized, LevelDBSingleton):
key = None
@staticmethod
def depends_on(cls, *args, **kwargs):
# singleton depends on the uri parameter
(db,) = args[0]
return os.path.abspath(db)
def __init__(self, db, serializer=json, **kwargs):
super(NSRLLevelDict, self).__init__(db, serializer, **kwargs)
@classmethod
def create_database(cls, dbfile, records, **kwargs):
# import specific modules
from csv import DictReader
log_threshold = 50000
# create database
db = cls(dbfile, **kwargs)
# open csv files
csv_file = open(records, 'r')
csv_entries = DictReader(csv_file)
for index, row in enumerate(csv_entries):
key = row.pop(cls.key)
value = db.get(key, None)
if not value:
db[key] = row
else:
if isinstance(value, dict):
db[key] = [value, row]
else:
# db[key].append([row]) is not possible as changes are only
# made in memory and __setitem__ is never called
db[key] = value + [row]
if (index % log_threshold) == 0:
print(("Current progress: {0}".format(index)))
return db
# ==================
# NSRL File Record
# ==================
class NSRLFile(NSRLLevelDict):
key = "SHA-1"
def __init__(self, db, **kwargs):
super(NSRLFile, self).__init__(db, NSRLFileSerializer, **kwargs)
# =================
# NSRL OS Record
# =================
class NSRLOs(NSRLLevelDict):
key = "OpSystemCode"
def __init__(self, db, **kwargs):
super(NSRLOs, self).__init__(db,
NSRLOsSerializer,
**kwargs)
# ================
# NSRL OS Record
# ================
class NSRLManufacturer(NSRLLevelDict):
key = "MfgCode"
def __init__(self, db, **kwargs):
super(NSRLManufacturer, self).__init__(db,
NSRLManufacturerSerializer,
**kwargs)
# =====================
# NSRL Product Record
# =====================
class NSRLProduct(NSRLLevelDict):
key = "ProductCode"
def __init__(self, db, **kwargs):
super(NSRLProduct, self).__init__(db,
NSRLProductSerializer,
**kwargs)
# =============
# NSRL module
# =============
class NSRL(object):
def __init__(self,
nsrl_file,
nsrl_product,
nsrl_os, nsrl_manufacturer,
**kwargs):
# TODO: need to specify paths in constructor,
# temporary pass via kwargs
self.nsrl_file = NSRLFile(nsrl_file)
self.nsrl_product = NSRLProduct(nsrl_product)
self.nsrl_os = NSRLOs(nsrl_os)
self.nsrl_manufacturer = NSRLManufacturer(nsrl_manufacturer)
def _lookup_file(self, sha1sum):
return self.nsrl_file[sha1sum]
def _lookup_product(self, product_code):
return self.nsrl_product[product_code]
def _lookup_os(self, op_system_code):
return self.nsrl_os[op_system_code]
def _lookup_manufacturer(self, manufacturer_code):
return self.nsrl_manufacturer[manufacturer_code]
def lookup_by_sha1(self, sha1sum):
operations = [
(sha1sum, 'SHA-1', self.nsrl_file, None),
(None, 'ProductCode', self.nsrl_product, 'SHA-1'),
(None, 'OpSystemCode', self.nsrl_os, 'SHA-1'),
(None, 'MfgCode', self.nsrl_manufacturer, 'ProductCode')
]
entries = dict((name, {}) for (_, name, _, _) in operations)
try:
for value, key, database, where in operations:
if value:
entries[key][value] = database[value]
else:
subkeys = set()
for subkey, subitem in list(entries[where].items()):
if not isinstance(subitem, list):
subitem = [subitem]
subkeys.update([x[key] for x in subitem])
for subkey in subkeys:
entries[key][subkey] = database[subkey]
except:
pass
return entries
##############################################################################
# CLI for debug purposes
##############################################################################
if __name__ == '__main__':
##########################################################################
# local import
##########################################################################
import argparse
##########################################################################
# defined functions
##########################################################################
nsrl_databases = {
'file': NSRLFile,
'os': NSRLOs,
'manufacturer': NSRLManufacturer,
'product': NSRLProduct,
}
def nsrl_create_database(**kwargs):
database_type = kwargs['type']
nsrl_databases[database_type].create_database(kwargs['database'],
kwargs['filename'])
def nsrl_get(**kwargs):
database_type = kwargs['type']
database = nsrl_databases[database_type](kwargs['database'],
block_cache_size=1 << 30,
max_open_files=3000)
value = database.get(kwargs['key'])
print(("key {0}: value {1}".format(kwargs['key'], value)))
def nsrl_resolve(**kwargs):
# TODO: handle in a better way NRSL object
kwargs['nsrl_file_db'] = kwargs['file']
kwargs['nsrl_prod_db'] = kwargs['product']
kwargs['nsrl_os_db'] = kwargs['os']
kwargs['nsrl_mfg_db'] = kwargs['manufacturer']
handle = NSRL(**kwargs)
print((pprint.pformat(handle.lookup_by_sha1(kwargs['sha1']))))
##########################################################################
# arguments
##########################################################################
# define command line arguments
desc_msg = 'NSRL database module CLI mode'
parser = argparse.ArgumentParser(description=desc_msg)
parser.add_argument('-v',
'--verbose',
action='count',
default=0)
subparsers = parser.add_subparsers(help='sub-command help')
# create the create parser
help_msg = 'create NSRL records into a database'
create_parser = subparsers.add_parser('create',
help=help_msg)
create_parser.add_argument('-t',
'--type',
type=str,
choices=['file', 'os',
'manufacturer', 'product'],
help='type of the record')
create_parser.add_argument('filename',
type=str,
help='filename of the NSRL record')
create_parser.add_argument('database',
type=str,
help='database to store NSRL records')
create_parser.set_defaults(func=nsrl_create_database)
# create the scan parser
get_parser = subparsers.add_parser('get',
help='get the entry from database')
get_parser.add_argument('-t',
'--type',
type=str,
choices=['file', 'os', 'manufacturer', 'product'],
help='type of the record')
get_parser.add_argument('database',
type=str,
help='database to read NSRL records')
get_parser.add_argument('key',
type=str,
help='key to retreive')
get_parser.set_defaults(func=nsrl_get)
# create the scan parser
get_parser = subparsers.add_parser('resolve',
help='resolve from sha1')
get_parser.add_argument('file',
type=str,
help='filename for file records')
get_parser.add_argument('product',
type=str,
help='filename for product records')
get_parser.add_argument('os',
type=str,
help='filename for os records')
get_parser.add_argument('manufacturer',
type=str,
help='filename for manufacturer records')
get_parser.add_argument('sha1',
type=str,
help='sha1 to lookup')
get_parser.set_defaults(func=nsrl_resolve)
args = parser.parse_args()
# set verbosity
if args.verbose == 1:
logging.basicConfig(level=logging.INFO)
elif args.verbose == 2:
logging.basicConfig(level=logging.DEBUG)
args = vars(parser.parse_args())
func = args.pop('func')
# with 'func' removed, args is now a kwargs
# with only the specific arguments
# for each subfunction useful for interactive mode.
func(**args)
| 34.544643
| 92
| 0.502843
|
import logging
import json
import os
import pprint
from abc import ABCMeta
try:
from leveldict import LevelDictSerialized
except ImportError as e:
LevelDictSerialized = type
from lib.common.oopatterns import ParametricSingletonMetaClass
log = logging.getLogger(__name__)
class NSRLSerializer(object):
fields = None
@classmethod
def loads(cls, value):
value = json.loads(value)
if isinstance(value[0], list):
result = [dict((field, col[index])
for index, field in
enumerate(cls.fields)) for col in value]
else:
result = dict((field, value[index])
for index, field in
enumerate(cls.fields))
return result
@classmethod
def dumps(cls, value):
def detect_charset(string):
import chardet
return chardet.detect(string)['encoding']
try:
if isinstance(value, list):
result = json.dumps([[row.get(key) for key in cls.fields] for row in value])
else:
result = json.dumps([value.get(col) for col in cls.fields])
except Exception:
if isinstance(value, list):
for row in value:
for colkey, colval in list(row.items()):
if not isinstance(colval, str):
charset = detect_charset(colval)
if charset is None:
charset = 'unicode-escape'
try:
row[colkey] = colval.decode(charset)
except:
row[colkey] = colval.decode('unicode-escape')
result = json.dumps([[row.get(key) for key in cls.fields] for row in value])
else:
for colkey, colval in list(value.items()):
if not isinstance(colval, str):
charset = detect_charset(colval)
if charset is None:
charset = 'unicode-escape'
try:
value[colkey] = colval.decode(charset)
except:
value[colkey] = colval.decode('unicode-escape')
result = json.dumps([value.get(col) for col in cls.fields])
return result
class NSRLOsSerializer(NSRLSerializer):
fields = ['OpSystemVersion', 'OpSystemName', 'MfgCode']
class NSRLFileSerializer(NSRLSerializer):
fields = ["MD5", "CRC32", "FileName", "FileSize",
"ProductCode", "OpSystemCode", "SpecialCode"]
@classmethod
def dumps(cls, value):
def detect_charset(string):
import chardet
return chardet.detect(string)['encoding']
try:
if isinstance(value, list):
result = json.dumps([[row.get(key) for key in cls.fields] for row in value])
else:
result = json.dumps([value.get(col) for col in cls.fields])
except Exception:
if isinstance(value, list):
for row in value:
fname = row['FileName']
if not isinstance(fname, str):
charset = detect_charset(fname)
charset = 'unicode-escape' if not charset else charset
try:
row['FileName'] = fname.decode(charset)
except:
row['FileName'] = fname.decode('unicode-escape')
result = json.dumps([[col.get(key) for key in cls.fields] for col in value])
else:
fname = value['FileName']
if not isinstance(fname, str):
charset = detect_charset(fname)
charset = 'unicode-escape' if not charset else charset
try:
value['FileName'] = fname.decode(charset)
except:
value['FileName'] = fname.decode('unicode-escape')
result = json.dumps([value.get(col) for col in cls.fields])
return result
class NSRLManufacturerSerializer(NSRLSerializer):
fields = ["MfgName"]
class NSRLProductSerializer(NSRLSerializer):
fields = ["ProductName", "ProductVersion", "OpSystemCode",
"MfgCode", "Language", "ApplicationType"]
class LevelDBSingletonMetaClass(ABCMeta, ParametricSingletonMetaClass):
pass
LevelDBSingleton = LevelDBSingletonMetaClass('LevelDBSingleton', (object,), {})
class NSRLLevelDict(LevelDictSerialized, LevelDBSingleton):
key = None
@staticmethod
def depends_on(cls, *args, **kwargs):
(db,) = args[0]
return os.path.abspath(db)
def __init__(self, db, serializer=json, **kwargs):
super(NSRLLevelDict, self).__init__(db, serializer, **kwargs)
@classmethod
def create_database(cls, dbfile, records, **kwargs):
from csv import DictReader
log_threshold = 50000
db = cls(dbfile, **kwargs)
csv_file = open(records, 'r')
csv_entries = DictReader(csv_file)
for index, row in enumerate(csv_entries):
key = row.pop(cls.key)
value = db.get(key, None)
if not value:
db[key] = row
else:
if isinstance(value, dict):
db[key] = [value, row]
else:
db[key] = value + [row]
if (index % log_threshold) == 0:
print(("Current progress: {0}".format(index)))
return db
class NSRLFile(NSRLLevelDict):
key = "SHA-1"
def __init__(self, db, **kwargs):
super(NSRLFile, self).__init__(db, NSRLFileSerializer, **kwargs)
class NSRLOs(NSRLLevelDict):
key = "OpSystemCode"
def __init__(self, db, **kwargs):
super(NSRLOs, self).__init__(db,
NSRLOsSerializer,
**kwargs)
class NSRLManufacturer(NSRLLevelDict):
key = "MfgCode"
def __init__(self, db, **kwargs):
super(NSRLManufacturer, self).__init__(db,
NSRLManufacturerSerializer,
**kwargs)
class NSRLProduct(NSRLLevelDict):
key = "ProductCode"
def __init__(self, db, **kwargs):
super(NSRLProduct, self).__init__(db,
NSRLProductSerializer,
**kwargs)
class NSRL(object):
def __init__(self,
nsrl_file,
nsrl_product,
nsrl_os, nsrl_manufacturer,
**kwargs):
self.nsrl_file = NSRLFile(nsrl_file)
self.nsrl_product = NSRLProduct(nsrl_product)
self.nsrl_os = NSRLOs(nsrl_os)
self.nsrl_manufacturer = NSRLManufacturer(nsrl_manufacturer)
def _lookup_file(self, sha1sum):
return self.nsrl_file[sha1sum]
def _lookup_product(self, product_code):
return self.nsrl_product[product_code]
def _lookup_os(self, op_system_code):
return self.nsrl_os[op_system_code]
def _lookup_manufacturer(self, manufacturer_code):
return self.nsrl_manufacturer[manufacturer_code]
def lookup_by_sha1(self, sha1sum):
operations = [
(sha1sum, 'SHA-1', self.nsrl_file, None),
(None, 'ProductCode', self.nsrl_product, 'SHA-1'),
(None, 'OpSystemCode', self.nsrl_os, 'SHA-1'),
(None, 'MfgCode', self.nsrl_manufacturer, 'ProductCode')
]
entries = dict((name, {}) for (_, name, _, _) in operations)
try:
for value, key, database, where in operations:
if value:
entries[key][value] = database[value]
else:
subkeys = set()
for subkey, subitem in list(entries[where].items()):
if not isinstance(subitem, list):
subitem = [subitem]
subkeys.update([x[key] for x in subitem])
for subkey in subkeys:
entries[key][subkey] = database[subkey]
except:
pass
return entries
| true
| true
|
f70e37545e35f5dfcdb3ab4f9338fcecda5848c2
| 1,318
|
py
|
Python
|
gamestonk_terminal/options/chartexchange_view.py
|
jperkins12/GamestonkTerminal
|
43dd95cc49e47ff0a0fd84f6e8a4b75b0f51ed5e
|
[
"MIT"
] | 1
|
2021-12-04T13:21:40.000Z
|
2021-12-04T13:21:40.000Z
|
gamestonk_terminal/options/chartexchange_view.py
|
TenzinJhopee/GamestonkTerminal
|
ebc0bc5963635888a01b2958070a0718249c8275
|
[
"MIT"
] | null | null | null |
gamestonk_terminal/options/chartexchange_view.py
|
TenzinJhopee/GamestonkTerminal
|
ebc0bc5963635888a01b2958070a0718249c8275
|
[
"MIT"
] | null | null | null |
"""Chartexchange view"""
__docformat__ = "numpy"
import os
from tabulate import tabulate
from gamestonk_terminal.helper_funcs import export_data
from gamestonk_terminal import feature_flags as gtff
from gamestonk_terminal.options import chartexchange_model
def display_raw(
export: str, ticker: str, date: str, call: bool, price: str, num: int = 20
) -> None:
"""Return raw stock data[chartexchange]
Parameters
----------
export : str
Export data as CSV, JSON, XLSX
ticker : str
Ticker for the given option
date : str
Date of expiration for the option
call : bool
Whether the underlying asset should be a call or a put
price : float
The stike of the expiration
num : int
Number of rows to show
"""
df = chartexchange_model.get_option_history(ticker, date, call, price)
export_data(
export,
os.path.dirname(os.path.abspath(__file__)),
"hist",
df,
)
if gtff.USE_TABULATE_DF:
print(
tabulate(
df.head(num),
headers=df.columns,
tablefmt="fancy_grid",
showindex=True,
floatfmt=".2f",
)
)
else:
print(df.to_string(index=False))
print("")
| 23.122807
| 78
| 0.597117
|
__docformat__ = "numpy"
import os
from tabulate import tabulate
from gamestonk_terminal.helper_funcs import export_data
from gamestonk_terminal import feature_flags as gtff
from gamestonk_terminal.options import chartexchange_model
def display_raw(
export: str, ticker: str, date: str, call: bool, price: str, num: int = 20
) -> None:
df = chartexchange_model.get_option_history(ticker, date, call, price)
export_data(
export,
os.path.dirname(os.path.abspath(__file__)),
"hist",
df,
)
if gtff.USE_TABULATE_DF:
print(
tabulate(
df.head(num),
headers=df.columns,
tablefmt="fancy_grid",
showindex=True,
floatfmt=".2f",
)
)
else:
print(df.to_string(index=False))
print("")
| true
| true
|
f70e377b4642eb62c5792be35937a7c90158949b
| 2,837
|
py
|
Python
|
sdk/AsposeEmailCloudSdk/models/ai_bcr_parse_request.py
|
aspose-email-cloud/aspose-email-cloud-python
|
c5c13839cbbbfa5b6617bd1aedf3cf30cd664227
|
[
"MIT"
] | 1
|
2020-02-26T13:19:06.000Z
|
2020-02-26T13:19:06.000Z
|
sdk/AsposeEmailCloudSdk/models/ai_bcr_parse_request.py
|
aspose-email-cloud/aspose-email-cloud-python
|
c5c13839cbbbfa5b6617bd1aedf3cf30cd664227
|
[
"MIT"
] | null | null | null |
sdk/AsposeEmailCloudSdk/models/ai_bcr_parse_request.py
|
aspose-email-cloud/aspose-email-cloud-python
|
c5c13839cbbbfa5b6617bd1aedf3cf30cd664227
|
[
"MIT"
] | null | null | null |
# coding: utf-8
# ----------------------------------------------------------------------------
# <copyright company="Aspose" file="ai_bcr_parse_request.py">
# Copyright (c) 2018-2020 Aspose Pty Ltd. All rights reserved.
# </copyright>
# <summary>
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
# </summary>
# ----------------------------------------------------------------------------
from AsposeEmailCloudSdk.models import *
class AiBcrParseRequest(object):
"""
Request model for ai_bcr_parse operation.
Initializes a new instance.
:param file: File to parse
:type file: str
:param countries: Comma-separated codes of countries.
:type countries: str
:param languages: Comma-separated ISO-639 codes of languages (either 639-1 or 639-3; i.e. \"it\" or \"ita\" for Italian); it's \"\" by default.
:type languages: str
:param is_single: Determines that image contains single VCard or more.
:type is_single: bool
"""
def __init__(self, file: str, countries: str = None, languages: str = None, is_single: bool = None):
"""
Request model for ai_bcr_parse operation.
Initializes a new instance.
:param file: File to parse
:type file: str
:param countries: Comma-separated codes of countries.
:type countries: str
:param languages: Comma-separated ISO-639 codes of languages (either 639-1 or 639-3; i.e. \"it\" or \"ita\" for Italian); it's \"\" by default.
:type languages: str
:param is_single: Determines that image contains single VCard or more.
:type is_single: bool
"""
self.file = file
self.countries = countries
self.languages = languages
self.is_single = is_single
| 43.646154
| 164
| 0.649277
|
from AsposeEmailCloudSdk.models import *
class AiBcrParseRequest(object):
def __init__(self, file: str, countries: str = None, languages: str = None, is_single: bool = None):
self.file = file
self.countries = countries
self.languages = languages
self.is_single = is_single
| true
| true
|
f70e3826342543155d683d371c5acba5b651c0f6
| 4,131
|
py
|
Python
|
src/fbsrankings/domain/service/srs_ranking_service.py
|
mikee385/fbsrankings
|
2b50e26a302b53c21cd8f5c965943d6fbf0680a1
|
[
"MIT"
] | null | null | null |
src/fbsrankings/domain/service/srs_ranking_service.py
|
mikee385/fbsrankings
|
2b50e26a302b53c21cd8f5c965943d6fbf0680a1
|
[
"MIT"
] | null | null | null |
src/fbsrankings/domain/service/srs_ranking_service.py
|
mikee385/fbsrankings
|
2b50e26a302b53c21cd8f5c965943d6fbf0680a1
|
[
"MIT"
] | null | null | null |
from typing import Dict
from typing import List
import numpy
from fbsrankings.domain.model.affiliation import Subdivision
from fbsrankings.domain.model.game import Game
from fbsrankings.domain.model.game import GameStatus
from fbsrankings.domain.model.ranking import Ranking
from fbsrankings.domain.model.ranking import SeasonData
from fbsrankings.domain.model.ranking import TeamRankingRepository
from fbsrankings.domain.model.ranking import TeamRankingService
from fbsrankings.domain.model.team import TeamID
class TeamData:
def __init__(self, index: int) -> None:
self.index = index
self.game_total = 0
self.point_margin = 0
def add_game(self, point_margin: int) -> None:
self.game_total += 1
self.point_margin += point_margin
class SRSRankingService(TeamRankingService):
name: str = "SRS"
def __init__(self, repository: TeamRankingRepository) -> None:
self._repository = repository
def calculate_for_season(self, season_data: SeasonData) -> List[Ranking[TeamID]]:
team_data: Dict[TeamID, TeamData] = {}
for affiliation in season_data.affiliation_map.values():
if affiliation.subdivision == Subdivision.FBS:
team_data[affiliation.team_id] = TeamData(len(team_data))
season_is_complete = True
games_by_week: Dict[int, List[Game]] = {}
for game in season_data.game_map.values():
winning_data = None
if game.winning_team_id is not None:
winning_data = team_data.get(game.winning_team_id)
losing_data = None
if game.losing_team_id is not None:
losing_data = team_data.get(game.losing_team_id)
if winning_data is not None and losing_data is not None:
week_games = games_by_week.setdefault(game.week, [])
week_games.append(game)
elif game.status == GameStatus.SCHEDULED:
season_is_complete = False
n = len(team_data)
a = numpy.zeros((n + 1, n))
b = numpy.zeros(n + 1)
rankings = []
for week in sorted(games_by_week.keys()):
for game in games_by_week[week]:
if (
game.home_team_score is not None
and game.away_team_score is not None
):
home_data = team_data[game.home_team_id]
away_data = team_data[game.away_team_id]
home_margin = self._adjust_margin(
game.home_team_score - game.away_team_score,
)
home_data.add_game(home_margin)
away_data.add_game(-home_margin)
a[home_data.index, away_data.index] -= 1.0
a[away_data.index, home_data.index] -= 1.0
for data in team_data.values():
a[data.index, data.index] = data.game_total
b[data.index] = data.point_margin
a[n, data.index] = 1.0
b[n] = 0.0
x = numpy.linalg.lstsq(a, b, rcond=-1)[0]
result = {id_: x[data.index] for id_, data in team_data.items()}
ranking_values = TeamRankingService._to_values(season_data, result)
rankings.append(
self._repository.create(
SRSRankingService.name,
season_data.season.id_,
week,
ranking_values,
),
)
if season_is_complete:
rankings.append(
self._repository.create(
SRSRankingService.name,
season_data.season.id_,
None,
ranking_values,
),
)
return rankings
@staticmethod
def _adjust_margin(margin: int) -> int:
if margin > 24:
return 24
if margin < -24:
return -24
if 0 < margin < 7:
return 7
if 0 > margin > -7:
return -7
return margin
| 33.585366
| 85
| 0.574679
|
from typing import Dict
from typing import List
import numpy
from fbsrankings.domain.model.affiliation import Subdivision
from fbsrankings.domain.model.game import Game
from fbsrankings.domain.model.game import GameStatus
from fbsrankings.domain.model.ranking import Ranking
from fbsrankings.domain.model.ranking import SeasonData
from fbsrankings.domain.model.ranking import TeamRankingRepository
from fbsrankings.domain.model.ranking import TeamRankingService
from fbsrankings.domain.model.team import TeamID
class TeamData:
def __init__(self, index: int) -> None:
self.index = index
self.game_total = 0
self.point_margin = 0
def add_game(self, point_margin: int) -> None:
self.game_total += 1
self.point_margin += point_margin
class SRSRankingService(TeamRankingService):
name: str = "SRS"
def __init__(self, repository: TeamRankingRepository) -> None:
self._repository = repository
def calculate_for_season(self, season_data: SeasonData) -> List[Ranking[TeamID]]:
team_data: Dict[TeamID, TeamData] = {}
for affiliation in season_data.affiliation_map.values():
if affiliation.subdivision == Subdivision.FBS:
team_data[affiliation.team_id] = TeamData(len(team_data))
season_is_complete = True
games_by_week: Dict[int, List[Game]] = {}
for game in season_data.game_map.values():
winning_data = None
if game.winning_team_id is not None:
winning_data = team_data.get(game.winning_team_id)
losing_data = None
if game.losing_team_id is not None:
losing_data = team_data.get(game.losing_team_id)
if winning_data is not None and losing_data is not None:
week_games = games_by_week.setdefault(game.week, [])
week_games.append(game)
elif game.status == GameStatus.SCHEDULED:
season_is_complete = False
n = len(team_data)
a = numpy.zeros((n + 1, n))
b = numpy.zeros(n + 1)
rankings = []
for week in sorted(games_by_week.keys()):
for game in games_by_week[week]:
if (
game.home_team_score is not None
and game.away_team_score is not None
):
home_data = team_data[game.home_team_id]
away_data = team_data[game.away_team_id]
home_margin = self._adjust_margin(
game.home_team_score - game.away_team_score,
)
home_data.add_game(home_margin)
away_data.add_game(-home_margin)
a[home_data.index, away_data.index] -= 1.0
a[away_data.index, home_data.index] -= 1.0
for data in team_data.values():
a[data.index, data.index] = data.game_total
b[data.index] = data.point_margin
a[n, data.index] = 1.0
b[n] = 0.0
x = numpy.linalg.lstsq(a, b, rcond=-1)[0]
result = {id_: x[data.index] for id_, data in team_data.items()}
ranking_values = TeamRankingService._to_values(season_data, result)
rankings.append(
self._repository.create(
SRSRankingService.name,
season_data.season.id_,
week,
ranking_values,
),
)
if season_is_complete:
rankings.append(
self._repository.create(
SRSRankingService.name,
season_data.season.id_,
None,
ranking_values,
),
)
return rankings
@staticmethod
def _adjust_margin(margin: int) -> int:
if margin > 24:
return 24
if margin < -24:
return -24
if 0 < margin < 7:
return 7
if 0 > margin > -7:
return -7
return margin
| true
| true
|
f70e38b452c7ddafcc3ca7600de08d0f6aafa0d9
| 1,505
|
py
|
Python
|
setup.py
|
shanedabes/youtube_sm_parser
|
642663abb9ee12c8478796b7ed7b9a01210d1fad
|
[
"Apache-2.0"
] | 2
|
2019-05-26T07:50:35.000Z
|
2020-07-14T22:23:22.000Z
|
setup.py
|
shanedabes/youtube_sm_parser
|
642663abb9ee12c8478796b7ed7b9a01210d1fad
|
[
"Apache-2.0"
] | 35
|
2019-05-18T02:09:25.000Z
|
2019-12-03T20:52:21.000Z
|
setup.py
|
shanedabes/youtube_sm_parser
|
642663abb9ee12c8478796b7ed7b9a01210d1fad
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""The setup script."""
from setuptools import setup, find_packages
with open('README.rst') as readme_file:
readme = readme_file.read()
with open('HISTORY.rst') as history_file:
history = history_file.read()
requirements = [
"requests-futures",
"xmltodict",
"PyYAML"
]
setup_requirements = ['pytest-runner', ]
test_requirements = ['pytest', ]
setup(
author="Shane Donohoe",
author_email='shane@donohoe.cc',
classifiers=[
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Natural Language :: English',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
],
description="Output youtube subscriptions using subscription_manager file",
install_requires=requirements,
entry_points={
"console_scripts": [
'youtube_sm_parser = youtube_sm_parser.youtube_sm_parser:main'
]
},
license="Apache Software License 2.0",
long_description=readme + '\n\n' + history,
include_package_data=True,
keywords='youtube_sm_parser',
name='youtube_sm_parser',
packages=find_packages(include=['youtube_sm_parser']),
setup_requires=setup_requirements,
test_suite='tests',
tests_require=test_requirements,
url='https://github.com/shanedabes/youtube_sm_parser',
version='0.1.5',
zip_safe=False,
)
| 27.363636
| 79
| 0.665116
|
from setuptools import setup, find_packages
with open('README.rst') as readme_file:
readme = readme_file.read()
with open('HISTORY.rst') as history_file:
history = history_file.read()
requirements = [
"requests-futures",
"xmltodict",
"PyYAML"
]
setup_requirements = ['pytest-runner', ]
test_requirements = ['pytest', ]
setup(
author="Shane Donohoe",
author_email='shane@donohoe.cc',
classifiers=[
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Natural Language :: English',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
],
description="Output youtube subscriptions using subscription_manager file",
install_requires=requirements,
entry_points={
"console_scripts": [
'youtube_sm_parser = youtube_sm_parser.youtube_sm_parser:main'
]
},
license="Apache Software License 2.0",
long_description=readme + '\n\n' + history,
include_package_data=True,
keywords='youtube_sm_parser',
name='youtube_sm_parser',
packages=find_packages(include=['youtube_sm_parser']),
setup_requires=setup_requirements,
test_suite='tests',
tests_require=test_requirements,
url='https://github.com/shanedabes/youtube_sm_parser',
version='0.1.5',
zip_safe=False,
)
| true
| true
|
f70e39c79abceb650a8a7542ee15c5d7e83b51f3
| 2,871
|
py
|
Python
|
app/api/routers.py
|
snakrani/discovery
|
99690f186a194cabef6a5d1ad18fca715be1e187
|
[
"CC0-1.0"
] | null | null | null |
app/api/routers.py
|
snakrani/discovery
|
99690f186a194cabef6a5d1ad18fca715be1e187
|
[
"CC0-1.0"
] | null | null | null |
app/api/routers.py
|
snakrani/discovery
|
99690f186a194cabef6a5d1ad18fca715be1e187
|
[
"CC0-1.0"
] | null | null | null |
from django.conf.urls import url
from rest_framework.routers import SimpleRouter, Route
class DiscoveryAPIRouter(SimpleRouter):
routes = [
# List route.
Route(
url=r'^{prefix}{trailing_slash}$',
mapping={
'get': 'list'
},
name='{basename}-list',
initkwargs={'suffix': 'List'}
),
# Detail route.
Route(
url=r'^{prefix}/{lookup}{trailing_slash}$',
mapping={
'get': 'retrieve'
},
name='{basename}-detail',
initkwargs={'suffix': 'Instance'}
),
# Values route.
Route(
url=r'^{prefix}/values/{field_lookup}{trailing_slash}$',
mapping={
'get': 'values'
},
name='{basename}-values',
initkwargs={'suffix': 'Values'}
),
# Count route.
Route(
url=r'^{prefix}/count/{field_lookup}{trailing_slash}$',
mapping={
'get': 'count'
},
name='{basename}-count',
initkwargs={'suffix': 'Count'}
)
]
def __init__(self):
self.trailing_slash = '/?'
super(SimpleRouter, self).__init__()
def get_field_lookup_regex(self, viewset, lookup_prefix=''):
base_regex = '(?P<{lookup_prefix}field_lookup>{lookup_value})'
lookup_value = getattr(viewset, 'lookup_value_regex', '[^/.]+')
return base_regex.format(
lookup_prefix=lookup_prefix,
lookup_value=lookup_value
)
def get_urls(self):
"""
Use the registered viewsets to generate a list of URL patterns.
"""
ret = []
for prefix, viewset, basename in self.registry:
lookup = self.get_lookup_regex(viewset)
field_lookup = self.get_field_lookup_regex(viewset)
routes = self.get_routes(viewset)
for route in routes:
mapping = self.get_method_map(viewset, route.mapping)
if not mapping:
continue
regex = route.url.format(
prefix=prefix,
lookup=lookup,
field_lookup=field_lookup,
trailing_slash=self.trailing_slash
)
if not prefix and regex[:2] == '^/':
regex = '^' + regex[2:]
initkwargs = route.initkwargs.copy()
initkwargs.update({
'basename': basename,
})
view = viewset.as_view(mapping, **initkwargs)
name = route.name.format(basename=basename)
ret.append(url(regex, view, name=name))
return ret
| 29.295918
| 71
| 0.487635
|
from django.conf.urls import url
from rest_framework.routers import SimpleRouter, Route
class DiscoveryAPIRouter(SimpleRouter):
routes = [
Route(
url=r'^{prefix}{trailing_slash}$',
mapping={
'get': 'list'
},
name='{basename}-list',
initkwargs={'suffix': 'List'}
),
Route(
url=r'^{prefix}/{lookup}{trailing_slash}$',
mapping={
'get': 'retrieve'
},
name='{basename}-detail',
initkwargs={'suffix': 'Instance'}
),
Route(
url=r'^{prefix}/values/{field_lookup}{trailing_slash}$',
mapping={
'get': 'values'
},
name='{basename}-values',
initkwargs={'suffix': 'Values'}
),
Route(
url=r'^{prefix}/count/{field_lookup}{trailing_slash}$',
mapping={
'get': 'count'
},
name='{basename}-count',
initkwargs={'suffix': 'Count'}
)
]
def __init__(self):
self.trailing_slash = '/?'
super(SimpleRouter, self).__init__()
def get_field_lookup_regex(self, viewset, lookup_prefix=''):
base_regex = '(?P<{lookup_prefix}field_lookup>{lookup_value})'
lookup_value = getattr(viewset, 'lookup_value_regex', '[^/.]+')
return base_regex.format(
lookup_prefix=lookup_prefix,
lookup_value=lookup_value
)
def get_urls(self):
ret = []
for prefix, viewset, basename in self.registry:
lookup = self.get_lookup_regex(viewset)
field_lookup = self.get_field_lookup_regex(viewset)
routes = self.get_routes(viewset)
for route in routes:
mapping = self.get_method_map(viewset, route.mapping)
if not mapping:
continue
regex = route.url.format(
prefix=prefix,
lookup=lookup,
field_lookup=field_lookup,
trailing_slash=self.trailing_slash
)
if not prefix and regex[:2] == '^/':
regex = '^' + regex[2:]
initkwargs = route.initkwargs.copy()
initkwargs.update({
'basename': basename,
})
view = viewset.as_view(mapping, **initkwargs)
name = route.name.format(basename=basename)
ret.append(url(regex, view, name=name))
return ret
| true
| true
|
f70e3a49ab9c031dbc42fb609e3a4c77a6c367bd
| 22,899
|
py
|
Python
|
build/lib/tracc/tracc.py
|
jamaps/tracc
|
0f71b07b6560ed2f5a9a9f6f94a07e487af254c5
|
[
"MIT"
] | 1
|
2021-04-20T21:19:32.000Z
|
2021-04-20T21:19:32.000Z
|
build/lib/tracc/tracc.py
|
jamaps/tracc
|
0f71b07b6560ed2f5a9a9f6f94a07e487af254c5
|
[
"MIT"
] | null | null | null |
build/lib/tracc/tracc.py
|
jamaps/tracc
|
0f71b07b6560ed2f5a9a9f6f94a07e487af254c5
|
[
"MIT"
] | null | null | null |
import tracc
import pandas as pd
import numpy as np
class costs:
def __init__(self,
travelcosts_df,
columns = None
):
"""
Inputs data and prunes columns if desired
"""
if columns is not None:
self.data = travelcosts_df[columns]
else:
self.data = travelcosts_df
def intrazonal(self,
cost_column,
origin_column,
destination_column,
method = "constant",
value = 0,
polygon_file = None,
polygon_id = None
):
"""
Computes and updates intrazonal travel cost in a travel costs matrix. The output will include a travel cost between any origin or destination location in the matrix to itself.
Parameters
----------
cost_column : column name for travel costs
origin_column : column name for origin IDs
destinationn_column : column name for origin IDs
method : "constant" applies a single @value to all intrazonal travel costs. "radius" applies a cost which is proportional to the radius of a circle with the same area as its input polygon
value : parameters for the method
polygon_file : file path to an input spatial polygon (e.g. geojson) if needed (it is for method = "radius")
polygon_id : ID field for the polygon_file needed for joining to the cost matrix
"""
# making sure ID columns are strings for a merge later on
self.data[origin_column] = self.data[origin_column].astype(str)
self.data[destination_column] = self.data[destination_column].astype(str)
# getting set of unique locations in the dataset
locations = list(self.data[origin_column].unique()) + list(self.data[destination_column].unique())
locations = list(set(locations))
if method == "constant":
new_times = [value] * len(locations)
df = pd.DataFrame(
list(zip(locations, locations, new_times)),
columns =[origin_column, destination_column, cost_column + "_i"])
elif method == "radius":
from tracc.spatial import radius
# compute based on the equivilant radius of each polygon
df = radius(polygon_file,polygon_id)
df[origin_column] = df[polygon_id]
df[destination_column] = df[polygon_id]
del df[polygon_id]
df[cost_column + "_i"] = value * df["radius"]
del df["radius"]
else:
raise Exception("Method can only be 'constant' or 'radius'")
df[origin_column] = df[origin_column].astype(str)
df[destination_column] = df[destination_column].astype(str)
# join in the newly created intrazonal travel times
self.data = pd.merge(self.data, df, how='outer', left_on=[origin_column, destination_column], right_on = [origin_column, destination_column])
# replace the older intrazonal travel times
self.data[cost_column] = np.where((self.data[cost_column + "_i"] >= 0),self.data[cost_column + "_i"],self.data[cost_column])
del self.data[cost_column + "_i"]
def fill_missing_costs(
self,
cost_column,
origin_column,
destination_column,
spatial_file_path,
spatial_file_id,
where = "origin",
weight_type = "Queen"
):
"""
Completes an OD matrix by filling locations that were missing from the original matrix, based on a neighbourhood spatial weights matrix. For example if a origin zone has no travel costs, it presumes its travel costs to destinations are the average of the same costs of its neighbouring zones.
"""
from tracc.spatial import area
# get list of zones which are missing from the input costs table
dfz = area(spatial_file_path, spatial_file_id)
dfz[spatial_file_id] = dfz[spatial_file_id].astype(str)
self.data[origin_column] = self.data[origin_column].astype(str)
li1 = list(self.data[origin_column].unique())
li2 = list(dfz[spatial_file_id].unique())
missing = [x for x in li2 if x not in li1]
del li1,li2
if len(missing) == 0:
return None
if where == "origin":
# get neighbours for each missing zone
from tracc.spatial import get_neighbours
neighbours = get_neighbours(spatial_file_path, "Queen", spatial_file_id)
new_times = []
# for each zone, compute average travel times to other zones based on neighbours
for location in missing:
locneigh = neighbours[location]
temp = self.data[self.data[origin_column].isin(locneigh)]
temp = pd.DataFrame(temp.groupby([destination_column], as_index=False)[cost_column].mean())
temp[origin_column] = location
new_times.append(temp)
# combine the outputs, and concat to the input times
new_times = pd.concat(new_times)
self.data = pd.concat([self.data, new_times])
elif where == "destination":
# get neighbours for each missing zone
from tracc.spatial import get_neighbours
neighbours = get_neighbours(spatial_file_path, "Queen", spatial_file_id)
new_times = []
# for each zone, compute average travel times from other zones based on neighbours
for location in missing:
locneigh = neighbours[location]
temp = self.data[self.data[destination_column].isin(locneigh)]
temp = pd.DataFrame(temp.groupby([origin_column], as_index=False)[cost_column].mean())
temp[destination_column] = location
new_times.append(temp)
# combine the outputs, and concat to the input times
new_times = pd.concat(new_times)
self.data = pd.concat([self.data, new_times])
else:
raise Exception("Input paramater @where should either be 'origin' or 'destination'")
def generalized_cost(
self,
columns,
coefficients,
exponents = None,
prune_output = True,
output_cost_name = "GC"
):
"""
Computes generalized costs
"""
# need to add a column check warning, and make the intercept = 0 if none is provided
# set all exponents as 1 if none are inputted
if exponents is None:
exponents = [1] * len(columns)
# compute the generalized cost value
self.data[output_cost_name] = coefficients[len(coefficients) - 1]
i = 0
while i < len(columns):
self.data[output_cost_name] = self.data[output_cost_name] + coefficients[i] * self.data[columns[i]] ** exponents[i]
i += 1
# delete initital cost columns if desired
if prune_output is True:
for col in list(set(columns)):
del self.data[col]
def impedence_calc(
self,
cost_column,
impedence_func,
impedence_func_params,
prune_output = False,
output_col_name = "fCij"
):
"""
Measures impdence given input of travel cost and selected impedence funciton and parameters
# To Do: add in more impdence function options
"""
if impedence_func == "cumulative":
self.data[output_col_name] = self.data[cost_column].apply(tracc.decay.cumulative,args = (impedence_func_params,))
elif impedence_func == "linear":
self.data[output_col_name] = self.data[cost_column].apply(tracc.decay.linear,args = (impedence_func_params,))
elif impedence_func == "exponential":
self.data[output_col_name] = self.data[cost_column].apply(tracc.decay.exponential,args = (impedence_func_params,))
else:
raise Exception("Please select an appropriate decay function")
if prune_output is True:
del self.data[cost_column]
def impedence_combine(self,
columns,
how = "product",
output_col_name = "fCij",
prune_output = True
):
"""
If there are multiple impedences, and we want to combine them into a single impedence value. This is similar to genearlized cost.
For example, if we have an impedence value for transit travel time, and we also want to remove any trips based on a fare criteria, it can be applied in this way.
"""
if how == "product":
self.data[output_col_name] = 1
i = 0
while i < len(columns):
self.data[output_col_name] = self.data[output_col_name] * self.data[columns[i]]
i += 1
elif how == "sum":
self.data[output_col_name] = 0
i = 0
while i < len(columns):
self.data[output_col_name] = self.data[output_col_name] + self.data[columns[i]]
i += 1
else:
raise Exception('the input @how must be one of "product" or "sum"')
def max_impedence(self,
columns,
imp_col_name = "fCij"
):
"""
Reduces the cost table to only include rows with the maximum impedence value for the set of input columns.
For example, if there 3 transit trips from i to j, each with a different computed generalized_cost resulting from different route choices, this function will return the row with the one resulting in the greatest impedence value (i.e. lowest generalized cost)
"""
self.data = self.data.groupby(columns)[imp_col_name].max().reset_index()
class supply:
def __init__(self,
supply_df,
columns = None
):
"""
intitializing can include pruning the dataset to a list of @column names
"""
if columns is not None:
self.data = supply_df[columns]
else:
self.data = supply_df
def weight(self,
columns,
weights,
weight_col_name = "Oj",
prune_output = True
):
"""
Creating a value based on a weighted linear combination other values. Can be used to weight by destinations by their desirability.
Parameters
----------------
columns : columns in which to input into the weights function
weights : linear multipliers, the same length as the weights
weight_col_name : output column name
prune_output : if True, delete all input columns used in the weight function
"""
if len(columns) != len(weights):
raise Exception("Please make sure columns and weights are lists of the same length")
if len(columns) < 2:
raise Exception("Can only weight opportunities if 2 or more are inputted")
if sum(weights) < 0.999 or sum(weights) > 1.001:
print("WARNING: the inputted weights do not sum to 1.")
self.data[weight_col_name] = 0
i = 0
while i < len(columns):
self.data[weight_col_name] = self.data[weight_col_name] + weights[i] * self.data[columns[i]]
i += 1
if prune_output is True:
for col in list(set(columns)):
del self.data[col]
class demand:
def __init__(self,
demand_df,
columns = None
):
"""
intitializing can include pruning the dataset to a list of @column names
"""
if columns is not None:
self.data = demand_df[columns]
else:
self.data = demand_df
def weight(self,
columns,
weights,
weight_col_name = "Pi",
prune_output = True
):
"""
Creating a value based on a weighted linear combination other values. Can be used to weight by population groups by their propensity to travel to certain activity types.
Parameters
----------------
columns : columns in which to input into the weights function
weights : linear multipliers, the same length as the weights
weight_col_name : output column name
prune_output : if True, delete all input columns used in the weight function
"""
if len(columns) != len(weights):
raise Exception("Please make sure columns and weights are lists of the same length")
if len(columns) < 2:
raise Exception("Can only weight opportunities if 2 or more are inputted")
if sum(weights) < 0.999 or sum(weights) > 1.001:
print("WARNING: the inputted weights do not sum to 1.")
self.data[weight_col_name] = 0
i = 0
while i < len(columns):
self.data[weight_col_name] = self.data[weight_col_name] + weights[i] * self.data[columns[i]]
i += 1
if prune_output is True:
for col in list(set(columns)):
del self.data[col]
class accessibility:
def __init__(self,
travelcosts_df,
supply_df,
demand_df = None,
travelcosts_ids = ["origin_id","destination_id"],
supply_ids = "destination_id",
demand_ids = None
):
"""
Parameters
----------
travelcosts_df : a pandas dataframe containing travel costs from a set of locations (e.g. orignis) to another set of locations (e.g. destinations). Data should be in a long table format:
origin_id | destination_id | travel_cost_1 | travel_cost_2 (optional) | etc (optional)
supply_df : a pandas dataframe containing the number of opportunities (e.g. supply), relational to the destination IDs in travelcosts_df
demand_df : a pandas dataframe containing the number of agents competiting for opportunities (e.g. demand), relational to the origin IDs in travelcosts_df. This is optional since several accessibility measures do not account for demand
travelcosts_ids : a two item list of the column names for the origin and destination IDs in the travelcosts_df table
supply_ids : a single variable string for the destination ID in the supply_df table
demand_ids : a single variable string for the origin ID in the demand_df table. This is optional since several accessibility measures do not account for demand
"""
self.travelcosts_ids = travelcosts_ids
self.supply_ids = supply_ids
self.demand_ids = demand_ids
if demand_df is None and supply_df is None:
raise Exception("Please input a supply_df or a demand_df")
# setting ID columns to strings to aid merging
travelcosts_df[travelcosts_ids[0]] = travelcosts_df[travelcosts_ids[0]].astype(str)
travelcosts_df[travelcosts_ids[1]] = travelcosts_df[travelcosts_ids[1]].astype(str)
# join supply data to the travel costs
if supply_df is not None and demand_df is None:
supply_df[supply_ids] = supply_df[supply_ids].astype(str)
self.data = pd.merge(
travelcosts_df,
supply_df,
left_on=travelcosts_ids[1],
right_on=self.supply_ids,
how = 'left'
)
# join demand data as well, if inputted
elif demand_df is not None and supply_df is None:
demand_df[demand_ids] = demand_df[demand_ids].astype(str)
self.data = pd.merge(
travelcosts_df,
demand_df,
left_on=travelcosts_ids[0],
right_on=self.demand_ids,
how = 'left'
)
else:
supply_df[supply_ids] = supply_df[supply_ids].astype(str)
demand_df[demand_ids] = demand_df[demand_ids].astype(str)
self.data = pd.merge(
travelcosts_df,
supply_df,
left_on=travelcosts_ids[1],
right_on=self.supply_ids,
how = 'left'
)
self.data = pd.merge(
self.data,
demand_df,
left_on=travelcosts_ids[0],
right_on=self.demand_ids,
how = 'left'
)
def potential(self, opportunity, impedence, output_col_name = None):
"""
Measures potential accessibility to destinations
Parameters
----------
opportunity : a string indicating the column name for which opportunity we are measuring access to (e.g. jobs, grocery stores, etc.). This column should be in the supply_df dataframe
impedence : column from the travel costs object to weight opportunities by
output_col_name : a string for the column name of the output accessibility measure
Output
----------
A pandas dataframe with the first column with the IDs of the origin point (self.travelcosts_ids[0]), and the second column accessibility measures based on the input parameters.
"""
# set the output name for the accessibility measure
if output_col_name is None:
A_col_name = "A_" + opportunity + "_" + impedence
else:
A_col_name = output_col_name
# multiply the opportunity by the impedence
self.data[A_col_name] = self.data[opportunity] * self.data[impedence]
# sum by the origin locations
Ai = self.data.groupby(self.travelcosts_ids[0])[[A_col_name]].sum().reset_index()
del self.data[A_col_name]
return Ai
def passive(self, population, impedence, output_col_name = None):
"""
Measures passive accessibility to destinations
Parameters
----------
population : a string indicating the column name for which population we are measuring access to (e.g. overall population, employed population, etc.). This column should be in the demand_df dataframe
impedence : column from the travel costs object to weight opportunities by
output_col_name : a string for the column name of the output accessibility measure
Output
----------
A pandas dataframe with the first column with the IDs of the origin point (self.travelcosts_ids[0]), and the second column accessibility measures based on the input parameters.
"""
# set the output name for the accessibility measure
if output_col_name is None:
A_col_name = "A_" + population + "_" + impedence
else:
A_col_name = output_col_name
# multiply the opportunity by the impedence
self.data[A_col_name] = self.data[population] * self.data[impedence]
# sum by the origin locations
Ai = self.data.groupby(self.travelcosts_ids[1])[[A_col_name]].sum().reset_index()
del self.data[A_col_name]
return Ai
def mintravelcost(self, travelcost, opportunity, min_n, output_col_name = None):
"""
Parameters
----------
opportunity : a string indicating the column name for which opportunity we are measuring access to (e.g. jobs, grocery stores, etc.). This column should be in the supply_df dataframe
travelcost : a string indicating the column name for which travel cost shall be used (e.g. travel time, monetary cost, etc.). This column should be in the travelcosts_df dataframe
min_n : an int indicating the number of desired reachable opportunities (e.g. 1 library, 3 grocery stores, 10k jobs, etc.)
output_col_name : a string for the column name of the output accessibility measure
Output
---------
A pandas dataframe with the first column with the IDs of the origin point (self.travelcosts_ids[0]), and the second column are the accessibility measures based on the input parameters.
"""
# set the output name for the accessibility measure
if output_col_name is None:
A_col_name = "A_mintravelcost_" + str(travelcost) + "_" + str(opportunity) + "_" + str(min_n)
else:
A_col_name = output_col_name
# internal function of returning the min travel time for n opportunities
def get_min(df, tc, o, n):
df = df.sort_values(by=[tc], ascending=True)
df["cumsum"] = df[o].cumsum()
df = df[df["cumsum"] >= n]
return df[travelcost].min()
# generating the accessibility measure
out = pd.DataFrame(self.data.groupby(self.travelcosts_ids[0]).apply(get_min, tc = travelcost, o = opportunity, n = min_n))
# setting the column name of the output
out.columns = [A_col_name]
return out
class summary:
"""
Computing various summary statistics of accessibility, usually with respect to different population groups
Some of these can be used to assess distributions and equity of transport networks.
"""
def __init__(
self,
accessibility_df,
summary_vars,
accessibility_id = "id",
summary_vars_id = "id"
):
# join the data
self.data = pd.merge(
accessibility_df,
summary_vars,
left_on=accessibility_id,
right_on=summary_vars_id,
how = 'left'
)
def weighted_mean(self, access_var, group_var):
return tracc.statistics.weighted_mean(self.data, access_var, group_var)
def weighted_var(self, access_var, group_var):
return tracc.statistics.weighted_var(self.data, access_var, group_var)
def weighted_sd(self, access_var, group_var):
return tracc.statistics.weighted_sd(self.data, access_var, group_var)
def weighted_CV(self, access_var, group_var):
return tracc.statistics.weighted_CV(self.data, access_var, group_var)
def weighted_Gini(self, access_var, group_var):
return tracc.statistics.weighted_Gini(self.data, access_var, group_var)
def quantiles(self, access_var, group_vars, nbins = 10, result = "percent"):
# assign each observation a bin, based on nbins
dfq = pd.DataFrame( tracc.statistics.weighted_qcut(self.data[access_var], self.data[group_vars[0]], nbins))
# create a specific name for the quantile column
q_col_name = 'q' + str(nbins) + "_" + (group_vars[0])
dfq.columns = [q_col_name]
self.data = self.data.join(dfq, how='outer')
# group by each bin, susmmarize
dfq = self.data.groupby([q_col_name])[group_vars].sum()
# return as counts or percent
if result == "count":
return dfq
elif result == "percent":
for var in group_vars:
dfq[var] = dfq[var] / dfq[var].sum()
return dfq
| 33.974777
| 300
| 0.619197
|
import tracc
import pandas as pd
import numpy as np
class costs:
def __init__(self,
travelcosts_df,
columns = None
):
if columns is not None:
self.data = travelcosts_df[columns]
else:
self.data = travelcosts_df
def intrazonal(self,
cost_column,
origin_column,
destination_column,
method = "constant",
value = 0,
polygon_file = None,
polygon_id = None
):
self.data[origin_column] = self.data[origin_column].astype(str)
self.data[destination_column] = self.data[destination_column].astype(str)
locations = list(self.data[origin_column].unique()) + list(self.data[destination_column].unique())
locations = list(set(locations))
if method == "constant":
new_times = [value] * len(locations)
df = pd.DataFrame(
list(zip(locations, locations, new_times)),
columns =[origin_column, destination_column, cost_column + "_i"])
elif method == "radius":
from tracc.spatial import radius
df = radius(polygon_file,polygon_id)
df[origin_column] = df[polygon_id]
df[destination_column] = df[polygon_id]
del df[polygon_id]
df[cost_column + "_i"] = value * df["radius"]
del df["radius"]
else:
raise Exception("Method can only be 'constant' or 'radius'")
df[origin_column] = df[origin_column].astype(str)
df[destination_column] = df[destination_column].astype(str)
self.data = pd.merge(self.data, df, how='outer', left_on=[origin_column, destination_column], right_on = [origin_column, destination_column])
self.data[cost_column] = np.where((self.data[cost_column + "_i"] >= 0),self.data[cost_column + "_i"],self.data[cost_column])
del self.data[cost_column + "_i"]
def fill_missing_costs(
self,
cost_column,
origin_column,
destination_column,
spatial_file_path,
spatial_file_id,
where = "origin",
weight_type = "Queen"
):
from tracc.spatial import area
dfz = area(spatial_file_path, spatial_file_id)
dfz[spatial_file_id] = dfz[spatial_file_id].astype(str)
self.data[origin_column] = self.data[origin_column].astype(str)
li1 = list(self.data[origin_column].unique())
li2 = list(dfz[spatial_file_id].unique())
missing = [x for x in li2 if x not in li1]
del li1,li2
if len(missing) == 0:
return None
if where == "origin":
from tracc.spatial import get_neighbours
neighbours = get_neighbours(spatial_file_path, "Queen", spatial_file_id)
new_times = []
for location in missing:
locneigh = neighbours[location]
temp = self.data[self.data[origin_column].isin(locneigh)]
temp = pd.DataFrame(temp.groupby([destination_column], as_index=False)[cost_column].mean())
temp[origin_column] = location
new_times.append(temp)
new_times = pd.concat(new_times)
self.data = pd.concat([self.data, new_times])
elif where == "destination":
from tracc.spatial import get_neighbours
neighbours = get_neighbours(spatial_file_path, "Queen", spatial_file_id)
new_times = []
for location in missing:
locneigh = neighbours[location]
temp = self.data[self.data[destination_column].isin(locneigh)]
temp = pd.DataFrame(temp.groupby([origin_column], as_index=False)[cost_column].mean())
temp[destination_column] = location
new_times.append(temp)
new_times = pd.concat(new_times)
self.data = pd.concat([self.data, new_times])
else:
raise Exception("Input paramater @where should either be 'origin' or 'destination'")
def generalized_cost(
self,
columns,
coefficients,
exponents = None,
prune_output = True,
output_cost_name = "GC"
):
if exponents is None:
exponents = [1] * len(columns)
self.data[output_cost_name] = coefficients[len(coefficients) - 1]
i = 0
while i < len(columns):
self.data[output_cost_name] = self.data[output_cost_name] + coefficients[i] * self.data[columns[i]] ** exponents[i]
i += 1
if prune_output is True:
for col in list(set(columns)):
del self.data[col]
def impedence_calc(
self,
cost_column,
impedence_func,
impedence_func_params,
prune_output = False,
output_col_name = "fCij"
):
if impedence_func == "cumulative":
self.data[output_col_name] = self.data[cost_column].apply(tracc.decay.cumulative,args = (impedence_func_params,))
elif impedence_func == "linear":
self.data[output_col_name] = self.data[cost_column].apply(tracc.decay.linear,args = (impedence_func_params,))
elif impedence_func == "exponential":
self.data[output_col_name] = self.data[cost_column].apply(tracc.decay.exponential,args = (impedence_func_params,))
else:
raise Exception("Please select an appropriate decay function")
if prune_output is True:
del self.data[cost_column]
def impedence_combine(self,
columns,
how = "product",
output_col_name = "fCij",
prune_output = True
):
if how == "product":
self.data[output_col_name] = 1
i = 0
while i < len(columns):
self.data[output_col_name] = self.data[output_col_name] * self.data[columns[i]]
i += 1
elif how == "sum":
self.data[output_col_name] = 0
i = 0
while i < len(columns):
self.data[output_col_name] = self.data[output_col_name] + self.data[columns[i]]
i += 1
else:
raise Exception('the input @how must be one of "product" or "sum"')
def max_impedence(self,
columns,
imp_col_name = "fCij"
):
self.data = self.data.groupby(columns)[imp_col_name].max().reset_index()
class supply:
def __init__(self,
supply_df,
columns = None
):
if columns is not None:
self.data = supply_df[columns]
else:
self.data = supply_df
def weight(self,
columns,
weights,
weight_col_name = "Oj",
prune_output = True
):
if len(columns) != len(weights):
raise Exception("Please make sure columns and weights are lists of the same length")
if len(columns) < 2:
raise Exception("Can only weight opportunities if 2 or more are inputted")
if sum(weights) < 0.999 or sum(weights) > 1.001:
print("WARNING: the inputted weights do not sum to 1.")
self.data[weight_col_name] = 0
i = 0
while i < len(columns):
self.data[weight_col_name] = self.data[weight_col_name] + weights[i] * self.data[columns[i]]
i += 1
if prune_output is True:
for col in list(set(columns)):
del self.data[col]
class demand:
def __init__(self,
demand_df,
columns = None
):
if columns is not None:
self.data = demand_df[columns]
else:
self.data = demand_df
def weight(self,
columns,
weights,
weight_col_name = "Pi",
prune_output = True
):
if len(columns) != len(weights):
raise Exception("Please make sure columns and weights are lists of the same length")
if len(columns) < 2:
raise Exception("Can only weight opportunities if 2 or more are inputted")
if sum(weights) < 0.999 or sum(weights) > 1.001:
print("WARNING: the inputted weights do not sum to 1.")
self.data[weight_col_name] = 0
i = 0
while i < len(columns):
self.data[weight_col_name] = self.data[weight_col_name] + weights[i] * self.data[columns[i]]
i += 1
if prune_output is True:
for col in list(set(columns)):
del self.data[col]
class accessibility:
def __init__(self,
travelcosts_df,
supply_df,
demand_df = None,
travelcosts_ids = ["origin_id","destination_id"],
supply_ids = "destination_id",
demand_ids = None
):
self.travelcosts_ids = travelcosts_ids
self.supply_ids = supply_ids
self.demand_ids = demand_ids
if demand_df is None and supply_df is None:
raise Exception("Please input a supply_df or a demand_df")
travelcosts_df[travelcosts_ids[0]] = travelcosts_df[travelcosts_ids[0]].astype(str)
travelcosts_df[travelcosts_ids[1]] = travelcosts_df[travelcosts_ids[1]].astype(str)
if supply_df is not None and demand_df is None:
supply_df[supply_ids] = supply_df[supply_ids].astype(str)
self.data = pd.merge(
travelcosts_df,
supply_df,
left_on=travelcosts_ids[1],
right_on=self.supply_ids,
how = 'left'
)
elif demand_df is not None and supply_df is None:
demand_df[demand_ids] = demand_df[demand_ids].astype(str)
self.data = pd.merge(
travelcosts_df,
demand_df,
left_on=travelcosts_ids[0],
right_on=self.demand_ids,
how = 'left'
)
else:
supply_df[supply_ids] = supply_df[supply_ids].astype(str)
demand_df[demand_ids] = demand_df[demand_ids].astype(str)
self.data = pd.merge(
travelcosts_df,
supply_df,
left_on=travelcosts_ids[1],
right_on=self.supply_ids,
how = 'left'
)
self.data = pd.merge(
self.data,
demand_df,
left_on=travelcosts_ids[0],
right_on=self.demand_ids,
how = 'left'
)
def potential(self, opportunity, impedence, output_col_name = None):
if output_col_name is None:
A_col_name = "A_" + opportunity + "_" + impedence
else:
A_col_name = output_col_name
self.data[A_col_name] = self.data[opportunity] * self.data[impedence]
Ai = self.data.groupby(self.travelcosts_ids[0])[[A_col_name]].sum().reset_index()
del self.data[A_col_name]
return Ai
def passive(self, population, impedence, output_col_name = None):
if output_col_name is None:
A_col_name = "A_" + population + "_" + impedence
else:
A_col_name = output_col_name
self.data[A_col_name] = self.data[population] * self.data[impedence]
Ai = self.data.groupby(self.travelcosts_ids[1])[[A_col_name]].sum().reset_index()
del self.data[A_col_name]
return Ai
def mintravelcost(self, travelcost, opportunity, min_n, output_col_name = None):
if output_col_name is None:
A_col_name = "A_mintravelcost_" + str(travelcost) + "_" + str(opportunity) + "_" + str(min_n)
else:
A_col_name = output_col_name
def get_min(df, tc, o, n):
df = df.sort_values(by=[tc], ascending=True)
df["cumsum"] = df[o].cumsum()
df = df[df["cumsum"] >= n]
return df[travelcost].min()
out = pd.DataFrame(self.data.groupby(self.travelcosts_ids[0]).apply(get_min, tc = travelcost, o = opportunity, n = min_n))
out.columns = [A_col_name]
return out
class summary:
def __init__(
self,
accessibility_df,
summary_vars,
accessibility_id = "id",
summary_vars_id = "id"
):
self.data = pd.merge(
accessibility_df,
summary_vars,
left_on=accessibility_id,
right_on=summary_vars_id,
how = 'left'
)
def weighted_mean(self, access_var, group_var):
return tracc.statistics.weighted_mean(self.data, access_var, group_var)
def weighted_var(self, access_var, group_var):
return tracc.statistics.weighted_var(self.data, access_var, group_var)
def weighted_sd(self, access_var, group_var):
return tracc.statistics.weighted_sd(self.data, access_var, group_var)
def weighted_CV(self, access_var, group_var):
return tracc.statistics.weighted_CV(self.data, access_var, group_var)
def weighted_Gini(self, access_var, group_var):
return tracc.statistics.weighted_Gini(self.data, access_var, group_var)
def quantiles(self, access_var, group_vars, nbins = 10, result = "percent"):
dfq = pd.DataFrame( tracc.statistics.weighted_qcut(self.data[access_var], self.data[group_vars[0]], nbins))
q_col_name = 'q' + str(nbins) + "_" + (group_vars[0])
dfq.columns = [q_col_name]
self.data = self.data.join(dfq, how='outer')
dfq = self.data.groupby([q_col_name])[group_vars].sum()
if result == "count":
return dfq
elif result == "percent":
for var in group_vars:
dfq[var] = dfq[var] / dfq[var].sum()
return dfq
| true
| true
|
f70e3a4ee58565709da792d106ab2ed75b268330
| 2,283
|
py
|
Python
|
rlgraph/components/memories/__init__.py
|
RLGraph/RLGraph
|
428fc136a9a075f29a397495b4226a491a287be2
|
[
"Apache-2.0"
] | 290
|
2018-07-29T15:30:57.000Z
|
2022-03-19T02:46:53.000Z
|
rlgraph/components/memories/__init__.py
|
RLGraph/RLGraph
|
428fc136a9a075f29a397495b4226a491a287be2
|
[
"Apache-2.0"
] | 76
|
2018-10-19T08:42:01.000Z
|
2020-05-03T08:34:21.000Z
|
rlgraph/components/memories/__init__.py
|
RLGraph/RLGraph
|
428fc136a9a075f29a397495b4226a491a287be2
|
[
"Apache-2.0"
] | 41
|
2018-10-30T07:05:05.000Z
|
2022-03-01T08:28:24.000Z
|
# Copyright 2018/2019 The RLgraph authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from rlgraph import get_backend
from rlgraph.components.memories.memory import Memory
from rlgraph.components.memories.fifo_queue import FIFOQueue
from rlgraph.components.memories.prioritized_replay import PrioritizedReplay
from rlgraph.components.memories.replay_memory import ReplayMemory
from rlgraph.components.memories.ring_buffer import RingBuffer
from rlgraph.components.memories.mem_prioritized_replay import MemPrioritizedReplay
# TODO backend reorg.
if get_backend() == "tf":
Memory.__lookup_classes__ = dict(
fifo=FIFOQueue,
fifoqueue=FIFOQueue,
prioritized=PrioritizedReplay,
prioritizedreplay=PrioritizedReplay,
prioritizedreplaybuffer=PrioritizedReplay,
mem_prioritized_replay=MemPrioritizedReplay,
replay=ReplayMemory,
replaybuffer=ReplayMemory,
replaymemory=ReplayMemory,
ringbuffer=RingBuffer
)
elif get_backend() == "pytorch":
Memory.__lookup_classes__ = dict(
prioritized=MemPrioritizedReplay,
prioritizedreplay=MemPrioritizedReplay,
prioritizedreplaybuffer=MemPrioritizedReplay,
mem_prioritized_replay=MemPrioritizedReplay,
replay=ReplayMemory,
replaybuffer=ReplayMemory,
replaymemory=ReplayMemory,
ringbuffer=RingBuffer
)
Memory.__default_constructor__ = ReplayMemory
__all__ = ["Memory", "PrioritizedReplay"] + \
list(set(map(lambda x: x.__name__, Memory.__lookup_classes__.values())))
| 39.362069
| 83
| 0.74113
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from rlgraph import get_backend
from rlgraph.components.memories.memory import Memory
from rlgraph.components.memories.fifo_queue import FIFOQueue
from rlgraph.components.memories.prioritized_replay import PrioritizedReplay
from rlgraph.components.memories.replay_memory import ReplayMemory
from rlgraph.components.memories.ring_buffer import RingBuffer
from rlgraph.components.memories.mem_prioritized_replay import MemPrioritizedReplay
if get_backend() == "tf":
Memory.__lookup_classes__ = dict(
fifo=FIFOQueue,
fifoqueue=FIFOQueue,
prioritized=PrioritizedReplay,
prioritizedreplay=PrioritizedReplay,
prioritizedreplaybuffer=PrioritizedReplay,
mem_prioritized_replay=MemPrioritizedReplay,
replay=ReplayMemory,
replaybuffer=ReplayMemory,
replaymemory=ReplayMemory,
ringbuffer=RingBuffer
)
elif get_backend() == "pytorch":
Memory.__lookup_classes__ = dict(
prioritized=MemPrioritizedReplay,
prioritizedreplay=MemPrioritizedReplay,
prioritizedreplaybuffer=MemPrioritizedReplay,
mem_prioritized_replay=MemPrioritizedReplay,
replay=ReplayMemory,
replaybuffer=ReplayMemory,
replaymemory=ReplayMemory,
ringbuffer=RingBuffer
)
Memory.__default_constructor__ = ReplayMemory
__all__ = ["Memory", "PrioritizedReplay"] + \
list(set(map(lambda x: x.__name__, Memory.__lookup_classes__.values())))
| true
| true
|
f70e3acd46ac2e732b53b6c2e72d9d4d2ad9cafd
| 5,174
|
py
|
Python
|
tests/test_endpoint.py
|
mister-px/aioauth
|
5b2e0d7856fd7fb154744cd58580953d776a3be2
|
[
"MIT"
] | null | null | null |
tests/test_endpoint.py
|
mister-px/aioauth
|
5b2e0d7856fd7fb154744cd58580953d776a3be2
|
[
"MIT"
] | null | null | null |
tests/test_endpoint.py
|
mister-px/aioauth
|
5b2e0d7856fd7fb154744cd58580953d776a3be2
|
[
"MIT"
] | null | null | null |
import time
from http import HTTPStatus
from typing import Dict, List, Optional, Type
import pytest
from aioauth.storage import BaseStorage
from aioauth.config import Settings
from aioauth.models import Token
from aioauth.requests import Post, Request
from aioauth.server import AuthorizationServer
from aioauth.types import ErrorType, GrantType, RequestMethod
from aioauth.utils import (
catch_errors_and_unavailability,
encode_auth_headers,
generate_token,
)
from .models import Defaults
@pytest.mark.asyncio
async def test_internal_server_error():
class EndpointClass:
available: Optional[bool] = True
def __init__(self, available: Optional[bool] = None):
if available is not None:
self.available = available
@catch_errors_and_unavailability
async def server(self, request):
raise Exception()
e = EndpointClass()
response = await e.server(Request(method=RequestMethod.POST))
assert response.status_code == HTTPStatus.BAD_REQUEST
@pytest.mark.asyncio
async def test_invalid_token(server: AuthorizationServer, defaults: Defaults):
user_id = defaults.user_id
client_id = defaults.client_id
client_secret = defaults.client_secret
request_url = "https://localhost"
token = "invalid token"
post = Post(token=token)
request = Request(
user_id=user_id,
url=request_url,
post=post,
method=RequestMethod.POST,
headers=encode_auth_headers(client_id, client_secret),
)
response = await server.create_token_introspection_response(request)
assert not response.content["active"]
assert response.status_code == HTTPStatus.OK
@pytest.mark.asyncio
async def test_expired_token(
server: AuthorizationServer, storage: Dict[str, List], defaults: Defaults
):
user_id = defaults.user_id
client_id = defaults.client_id
client_secret = defaults.client_secret
settings = Settings(INSECURE_TRANSPORT=True)
token = Token(
user_id=user_id,
client_id=client_id,
expires_in=settings.TOKEN_EXPIRES_IN,
refresh_token_expires_in=settings.REFRESH_TOKEN_EXPIRES_IN,
access_token=generate_token(42),
refresh_token=generate_token(48),
issued_at=int(time.time() - settings.TOKEN_EXPIRES_IN),
scope=defaults.scope,
)
storage["tokens"].append(token)
post = Post(token=token.access_token)
request = Request(
user_id=user_id,
settings=settings,
post=post,
method=RequestMethod.POST,
headers=encode_auth_headers(client_id, client_secret),
)
response = await server.create_token_introspection_response(request)
assert response.status_code == HTTPStatus.OK
assert not response.content["active"]
@pytest.mark.asyncio
async def test_valid_token(
server: AuthorizationServer,
storage: Dict[str, List],
defaults: Defaults,
settings: Settings,
):
user_id = defaults.user_id
client_id = defaults.client_id
client_secret = defaults.client_secret
token = storage["tokens"][0]
post = Post(token=token.access_token)
request = Request(
user_id=user_id,
post=post,
method=RequestMethod.POST,
headers=encode_auth_headers(client_id, client_secret),
settings=settings,
)
response = await server.create_token_introspection_response(request)
assert response.status_code == HTTPStatus.OK
assert response.content["active"]
@pytest.mark.asyncio
async def test_introspect_revoked_token(
server: AuthorizationServer,
storage: Dict[str, List],
defaults: Defaults,
settings: Settings,
):
user_id = defaults.user_id
client_id = defaults.client_id
client_secret = defaults.client_secret
request_url = "https://localhost"
token = storage["tokens"][0]
post = Post(
grant_type=GrantType.TYPE_REFRESH_TOKEN,
refresh_token=token.refresh_token,
)
request = Request(
user_id=user_id,
settings=settings,
url=request_url,
post=post,
method=RequestMethod.POST,
headers=encode_auth_headers(client_id, client_secret),
)
response = await server.create_token_response(request)
assert response.status_code == HTTPStatus.OK
# Check that refreshed token was revoked
post = Post(token=token.access_token)
request = Request(
settings=settings,
post=post,
method=RequestMethod.POST,
headers=encode_auth_headers(client_id, client_secret),
)
response = await server.create_token_introspection_response(request)
assert not response.content["active"], "The refresh_token must be revoked"
@pytest.mark.asyncio
async def test_endpoint_availability(db_class: Type[BaseStorage]):
server = AuthorizationServer(storage=db_class())
request = Request(method=RequestMethod.POST, settings=Settings(AVAILABLE=False))
response = await server.create_token_introspection_response(request)
assert response.status_code == HTTPStatus.BAD_REQUEST
assert response.content["error"] == ErrorType.TEMPORARILY_UNAVAILABLE
| 30.25731
| 84
| 0.71724
|
import time
from http import HTTPStatus
from typing import Dict, List, Optional, Type
import pytest
from aioauth.storage import BaseStorage
from aioauth.config import Settings
from aioauth.models import Token
from aioauth.requests import Post, Request
from aioauth.server import AuthorizationServer
from aioauth.types import ErrorType, GrantType, RequestMethod
from aioauth.utils import (
catch_errors_and_unavailability,
encode_auth_headers,
generate_token,
)
from .models import Defaults
@pytest.mark.asyncio
async def test_internal_server_error():
class EndpointClass:
available: Optional[bool] = True
def __init__(self, available: Optional[bool] = None):
if available is not None:
self.available = available
@catch_errors_and_unavailability
async def server(self, request):
raise Exception()
e = EndpointClass()
response = await e.server(Request(method=RequestMethod.POST))
assert response.status_code == HTTPStatus.BAD_REQUEST
@pytest.mark.asyncio
async def test_invalid_token(server: AuthorizationServer, defaults: Defaults):
user_id = defaults.user_id
client_id = defaults.client_id
client_secret = defaults.client_secret
request_url = "https://localhost"
token = "invalid token"
post = Post(token=token)
request = Request(
user_id=user_id,
url=request_url,
post=post,
method=RequestMethod.POST,
headers=encode_auth_headers(client_id, client_secret),
)
response = await server.create_token_introspection_response(request)
assert not response.content["active"]
assert response.status_code == HTTPStatus.OK
@pytest.mark.asyncio
async def test_expired_token(
server: AuthorizationServer, storage: Dict[str, List], defaults: Defaults
):
user_id = defaults.user_id
client_id = defaults.client_id
client_secret = defaults.client_secret
settings = Settings(INSECURE_TRANSPORT=True)
token = Token(
user_id=user_id,
client_id=client_id,
expires_in=settings.TOKEN_EXPIRES_IN,
refresh_token_expires_in=settings.REFRESH_TOKEN_EXPIRES_IN,
access_token=generate_token(42),
refresh_token=generate_token(48),
issued_at=int(time.time() - settings.TOKEN_EXPIRES_IN),
scope=defaults.scope,
)
storage["tokens"].append(token)
post = Post(token=token.access_token)
request = Request(
user_id=user_id,
settings=settings,
post=post,
method=RequestMethod.POST,
headers=encode_auth_headers(client_id, client_secret),
)
response = await server.create_token_introspection_response(request)
assert response.status_code == HTTPStatus.OK
assert not response.content["active"]
@pytest.mark.asyncio
async def test_valid_token(
server: AuthorizationServer,
storage: Dict[str, List],
defaults: Defaults,
settings: Settings,
):
user_id = defaults.user_id
client_id = defaults.client_id
client_secret = defaults.client_secret
token = storage["tokens"][0]
post = Post(token=token.access_token)
request = Request(
user_id=user_id,
post=post,
method=RequestMethod.POST,
headers=encode_auth_headers(client_id, client_secret),
settings=settings,
)
response = await server.create_token_introspection_response(request)
assert response.status_code == HTTPStatus.OK
assert response.content["active"]
@pytest.mark.asyncio
async def test_introspect_revoked_token(
server: AuthorizationServer,
storage: Dict[str, List],
defaults: Defaults,
settings: Settings,
):
user_id = defaults.user_id
client_id = defaults.client_id
client_secret = defaults.client_secret
request_url = "https://localhost"
token = storage["tokens"][0]
post = Post(
grant_type=GrantType.TYPE_REFRESH_TOKEN,
refresh_token=token.refresh_token,
)
request = Request(
user_id=user_id,
settings=settings,
url=request_url,
post=post,
method=RequestMethod.POST,
headers=encode_auth_headers(client_id, client_secret),
)
response = await server.create_token_response(request)
assert response.status_code == HTTPStatus.OK
post = Post(token=token.access_token)
request = Request(
settings=settings,
post=post,
method=RequestMethod.POST,
headers=encode_auth_headers(client_id, client_secret),
)
response = await server.create_token_introspection_response(request)
assert not response.content["active"], "The refresh_token must be revoked"
@pytest.mark.asyncio
async def test_endpoint_availability(db_class: Type[BaseStorage]):
server = AuthorizationServer(storage=db_class())
request = Request(method=RequestMethod.POST, settings=Settings(AVAILABLE=False))
response = await server.create_token_introspection_response(request)
assert response.status_code == HTTPStatus.BAD_REQUEST
assert response.content["error"] == ErrorType.TEMPORARILY_UNAVAILABLE
| true
| true
|
f70e3aebcb471040075ff1af77b959a95a47b028
| 76
|
py
|
Python
|
ABC051/ABC051d.py
|
VolgaKurvar/AtCoder
|
21acb489f1594bbb1cdc64fbf8421d876b5b476d
|
[
"Unlicense"
] | null | null | null |
ABC051/ABC051d.py
|
VolgaKurvar/AtCoder
|
21acb489f1594bbb1cdc64fbf8421d876b5b476d
|
[
"Unlicense"
] | null | null | null |
ABC051/ABC051d.py
|
VolgaKurvar/AtCoder
|
21acb489f1594bbb1cdc64fbf8421d876b5b476d
|
[
"Unlicense"
] | null | null | null |
#ABC051d
import sys
input = sys.stdin.readline
sys.setrecursionlimit(10**6)
| 15.2
| 28
| 0.789474
|
import sys
input = sys.stdin.readline
sys.setrecursionlimit(10**6)
| true
| true
|
f70e3bfbca3e4580c37fe084878699c2caa37041
| 761
|
py
|
Python
|
migrations/versions/0256_set_postage_tmplt_hstr.py
|
tlwr/notifications-api
|
88a6b7729edb9be41ce3e7c027f1452b7b6d00d2
|
[
"MIT"
] | 51
|
2016-04-03T23:36:17.000Z
|
2022-03-21T20:04:52.000Z
|
migrations/versions/0256_set_postage_tmplt_hstr.py
|
tlwr/notifications-api
|
88a6b7729edb9be41ce3e7c027f1452b7b6d00d2
|
[
"MIT"
] | 1,335
|
2015-12-15T14:28:50.000Z
|
2022-03-30T16:24:27.000Z
|
migrations/versions/0256_set_postage_tmplt_hstr.py
|
tlwr/notifications-api
|
88a6b7729edb9be41ce3e7c027f1452b7b6d00d2
|
[
"MIT"
] | 30
|
2016-01-08T19:05:32.000Z
|
2021-12-20T16:37:23.000Z
|
"""
Revision ID: 0256_set_postage_tmplt_hstr
Revises: 0255_another_letter_org
Create Date: 2019-02-05 14:51:30.808067
"""
from alembic import op
import sqlalchemy as sa
revision = '0256_set_postage_tmplt_hstr'
down_revision = '0255_another_letter_org'
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.execute(
"""UPDATE templates_history SET postage = services.postage
FROM services WHERE template_type = 'letter' AND service_id = services.id"""
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.execute("UPDATE templates_history SET postage = null WHERE template_type = 'letter'")
# ### end Alembic commands ###
| 26.241379
| 92
| 0.703022
|
from alembic import op
import sqlalchemy as sa
revision = '0256_set_postage_tmplt_hstr'
down_revision = '0255_another_letter_org'
def upgrade():
| true
| true
|
f70e3d7e95f5b635f46315e7f8048937f6857a83
| 6,096
|
py
|
Python
|
Filters/Geometry/Testing/Python/geomFilter.py
|
cclauss/VTK
|
f62a52cce9044159efb4adb7cc0cfd7ec0bc8b6d
|
[
"BSD-3-Clause"
] | 1,755
|
2015-01-03T06:55:00.000Z
|
2022-03-29T05:23:26.000Z
|
Filters/Geometry/Testing/Python/geomFilter.py
|
cclauss/VTK
|
f62a52cce9044159efb4adb7cc0cfd7ec0bc8b6d
|
[
"BSD-3-Clause"
] | 29
|
2015-04-23T20:58:30.000Z
|
2022-03-02T16:16:42.000Z
|
Filters/Geometry/Testing/Python/geomFilter.py
|
cclauss/VTK
|
f62a52cce9044159efb4adb7cc0cfd7ec0bc8b6d
|
[
"BSD-3-Clause"
] | 1,044
|
2015-01-05T22:48:27.000Z
|
2022-03-31T02:38:26.000Z
|
#!/usr/bin/env python
import vtk
from vtk.util.misc import vtkGetDataRoot
VTK_DATA_ROOT = vtkGetDataRoot()
import sys
# create pipeline - structured grid
#
pl3d = vtk.vtkMultiBlockPLOT3DReader()
pl3d.SetXYZFileName("" + str(VTK_DATA_ROOT) + "/Data/combxyz.bin")
pl3d.SetQFileName("" + str(VTK_DATA_ROOT) + "/Data/combq.bin")
pl3d.SetScalarFunctionNumber(100)
pl3d.SetVectorFunctionNumber(202)
pl3d.Update()
output = pl3d.GetOutput().GetBlock(0)
gf = vtk.vtkGeometryFilter()
gf.SetInputData(output)
gMapper = vtk.vtkPolyDataMapper()
gMapper.SetInputConnection(gf.GetOutputPort())
gMapper.SetScalarRange(output.GetScalarRange())
gActor = vtk.vtkActor()
gActor.SetMapper(gMapper)
gf2 = vtk.vtkGeometryFilter()
gf2.SetInputData(output)
gf2.ExtentClippingOn()
gf2.SetExtent(10,17,-6,6,23,37)
gf2.PointClippingOn()
gf2.SetPointMinimum(0)
gf2.SetPointMaximum(10000)
gf2.CellClippingOn()
gf2.SetCellMinimum(0)
gf2.SetCellMaximum(7500)
g2Mapper = vtk.vtkPolyDataMapper()
g2Mapper.SetInputConnection(gf2.GetOutputPort())
g2Mapper.SetScalarRange(output.GetScalarRange())
g2Actor = vtk.vtkActor()
g2Actor.SetMapper(g2Mapper)
g2Actor.AddPosition(0,15,0)
# create pipeline - poly data
#
gf3 = vtk.vtkGeometryFilter()
gf3.SetInputConnection(gf.GetOutputPort())
g3Mapper = vtk.vtkPolyDataMapper()
g3Mapper.SetInputConnection(gf3.GetOutputPort())
g3Mapper.SetScalarRange(output.GetScalarRange())
g3Actor = vtk.vtkActor()
g3Actor.SetMapper(g3Mapper)
g3Actor.AddPosition(0,0,15)
gf4 = vtk.vtkGeometryFilter()
gf4.SetInputConnection(gf2.GetOutputPort())
gf4.ExtentClippingOn()
gf4.SetExtent(10,17,-6,6,23,37)
gf4.PointClippingOn()
gf4.SetPointMinimum(0)
gf4.SetPointMaximum(10000)
gf4.CellClippingOn()
gf4.SetCellMinimum(0)
gf4.SetCellMaximum(7500)
g4Mapper = vtk.vtkPolyDataMapper()
g4Mapper.SetInputConnection(gf4.GetOutputPort())
g4Mapper.SetScalarRange(output.GetScalarRange())
g4Actor = vtk.vtkActor()
g4Actor.SetMapper(g4Mapper)
g4Actor.AddPosition(0,15,15)
# create pipeline - unstructured grid
#
s = vtk.vtkSphere()
s.SetCenter(output.GetCenter())
s.SetRadius(100.0)
#everything
eg = vtk.vtkExtractGeometry()
eg.SetInputData(output)
eg.SetImplicitFunction(s)
gf5 = vtk.vtkGeometryFilter()
gf5.SetInputConnection(eg.GetOutputPort())
g5Mapper = vtk.vtkPolyDataMapper()
g5Mapper.SetInputConnection(gf5.GetOutputPort())
g5Mapper.SetScalarRange(output.GetScalarRange())
g5Actor = vtk.vtkActor()
g5Actor.SetMapper(g5Mapper)
g5Actor.AddPosition(0,0,30)
gf6 = vtk.vtkGeometryFilter()
gf6.SetInputConnection(eg.GetOutputPort())
gf6.ExtentClippingOn()
gf6.SetExtent(10,17,-6,6,23,37)
gf6.PointClippingOn()
gf6.SetPointMinimum(0)
gf6.SetPointMaximum(10000)
gf6.CellClippingOn()
gf6.SetCellMinimum(0)
gf6.SetCellMaximum(7500)
g6Mapper = vtk.vtkPolyDataMapper()
g6Mapper.SetInputConnection(gf6.GetOutputPort())
g6Mapper.SetScalarRange(output.GetScalarRange())
g6Actor = vtk.vtkActor()
g6Actor.SetMapper(g6Mapper)
g6Actor.AddPosition(0,15,30)
# create pipeline - rectilinear grid
#
rgridReader = vtk.vtkRectilinearGridReader()
rgridReader.SetFileName("" + str(VTK_DATA_ROOT) + "/Data/RectGrid2.vtk")
rgridReader.Update()
gf7 = vtk.vtkGeometryFilter()
gf7.SetInputConnection(rgridReader.GetOutputPort())
g7Mapper = vtk.vtkPolyDataMapper()
g7Mapper.SetInputConnection(gf7.GetOutputPort())
g7Mapper.SetScalarRange(rgridReader.GetOutput().GetScalarRange())
g7Actor = vtk.vtkActor()
g7Actor.SetMapper(g7Mapper)
g7Actor.SetScale(3,3,3)
gf8 = vtk.vtkGeometryFilter()
gf8.SetInputConnection(rgridReader.GetOutputPort())
gf8.ExtentClippingOn()
gf8.SetExtent(0,1,-2,2,0,4)
gf8.PointClippingOn()
gf8.SetPointMinimum(0)
gf8.SetPointMaximum(10000)
gf8.CellClippingOn()
gf8.SetCellMinimum(0)
gf8.SetCellMaximum(7500)
g8Mapper = vtk.vtkPolyDataMapper()
g8Mapper.SetInputConnection(gf8.GetOutputPort())
g8Mapper.SetScalarRange(rgridReader.GetOutput().GetScalarRange())
g8Actor = vtk.vtkActor()
g8Actor.SetMapper(g8Mapper)
g8Actor.SetScale(3,3,3)
g8Actor.AddPosition(0,15,0)
# Create the RenderWindow, Renderer and both Actors
#
ren1 = vtk.vtkRenderer()
renWin = vtk.vtkRenderWindow()
renWin.AddRenderer(ren1)
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
ren1.AddActor(gActor)
ren1.AddActor(g2Actor)
ren1.AddActor(g3Actor)
ren1.AddActor(g4Actor)
ren1.AddActor(g5Actor)
ren1.AddActor(g6Actor)
ren1.AddActor(g7Actor)
ren1.AddActor(g8Actor)
renWin.SetSize(340,550)
cam1 = ren1.GetActiveCamera()
cam1.SetClippingRange(84,174)
cam1.SetFocalPoint(5.22824,6.09412,35.9813)
cam1.SetPosition(100.052,62.875,102.818)
cam1.SetViewUp(-0.307455,-0.464269,0.830617)
iren.Initialize()
# prevent the tk window from showing up then start the event loop
# test that the cell data is properly mapped in the output
ug = vtk.vtkUnstructuredGrid()
p = vtk.vtkPoints()
p.InsertNextPoint(0, 0, 0)
p.InsertNextPoint(1, 0, 0)
p.InsertNextPoint(2, 0, 0)
p.InsertNextPoint(3, 0, 0)
ug.SetPoints(p)
ug.GetNumberOfPoints()
ug.Allocate(4)
lpts = [0, 1]
ug.InsertNextCell(vtk.VTK_LINE, 2, lpts)
vpts = [1]
ug.InsertNextCell(vtk.VTK_VERTEX, 1, vpts)
lpts = [2, 3]
ug.InsertNextCell(vtk.VTK_LINE, 2, lpts)
vpts = [3]
ug.InsertNextCell(vtk.VTK_VERTEX, 1, vpts)
aa = vtk.vtkIntArray()
aa.InsertNextValue(0)
aa.InsertNextValue(1)
aa.InsertNextValue(2)
aa.InsertNextValue(3)
aa.SetName('testarray')
ug.GetCellData().AddArray(aa)
gf = vtk.vtkGeometryFilter()
gf.SetInputData(ug)
gf.Update()
pd = gf.GetOutput()
oa = pd.GetCellData().GetArray('testarray')
# Check that the ordering of polydata arrays is correct. Verts should come before
# lines.
correctcelldata = [1, 3, 0, 2]
if oa.GetValue(0) != correctcelldata[0] and oa.GetValue(0) != correctcelldata[1]:
print('Bad celldata of test array')
sys.exit(1)
if oa.GetValue(1) != correctcelldata[0] and oa.GetValue(1) != correctcelldata[1]:
print('Bad celldata of test array')
sys.exit(1)
if oa.GetValue(2) != correctcelldata[2] and oa.GetValue(2) != correctcelldata[3]:
print('Bad celldata of test array')
sys.exit(1)
if oa.GetValue(3) != correctcelldata[2] and oa.GetValue(3) != correctcelldata[3]:
print('Bad celldata of test array')
sys.exit(1)
# --- end of script --
| 30.633166
| 81
| 0.781988
|
import vtk
from vtk.util.misc import vtkGetDataRoot
VTK_DATA_ROOT = vtkGetDataRoot()
import sys
pl3d = vtk.vtkMultiBlockPLOT3DReader()
pl3d.SetXYZFileName("" + str(VTK_DATA_ROOT) + "/Data/combxyz.bin")
pl3d.SetQFileName("" + str(VTK_DATA_ROOT) + "/Data/combq.bin")
pl3d.SetScalarFunctionNumber(100)
pl3d.SetVectorFunctionNumber(202)
pl3d.Update()
output = pl3d.GetOutput().GetBlock(0)
gf = vtk.vtkGeometryFilter()
gf.SetInputData(output)
gMapper = vtk.vtkPolyDataMapper()
gMapper.SetInputConnection(gf.GetOutputPort())
gMapper.SetScalarRange(output.GetScalarRange())
gActor = vtk.vtkActor()
gActor.SetMapper(gMapper)
gf2 = vtk.vtkGeometryFilter()
gf2.SetInputData(output)
gf2.ExtentClippingOn()
gf2.SetExtent(10,17,-6,6,23,37)
gf2.PointClippingOn()
gf2.SetPointMinimum(0)
gf2.SetPointMaximum(10000)
gf2.CellClippingOn()
gf2.SetCellMinimum(0)
gf2.SetCellMaximum(7500)
g2Mapper = vtk.vtkPolyDataMapper()
g2Mapper.SetInputConnection(gf2.GetOutputPort())
g2Mapper.SetScalarRange(output.GetScalarRange())
g2Actor = vtk.vtkActor()
g2Actor.SetMapper(g2Mapper)
g2Actor.AddPosition(0,15,0)
gf3 = vtk.vtkGeometryFilter()
gf3.SetInputConnection(gf.GetOutputPort())
g3Mapper = vtk.vtkPolyDataMapper()
g3Mapper.SetInputConnection(gf3.GetOutputPort())
g3Mapper.SetScalarRange(output.GetScalarRange())
g3Actor = vtk.vtkActor()
g3Actor.SetMapper(g3Mapper)
g3Actor.AddPosition(0,0,15)
gf4 = vtk.vtkGeometryFilter()
gf4.SetInputConnection(gf2.GetOutputPort())
gf4.ExtentClippingOn()
gf4.SetExtent(10,17,-6,6,23,37)
gf4.PointClippingOn()
gf4.SetPointMinimum(0)
gf4.SetPointMaximum(10000)
gf4.CellClippingOn()
gf4.SetCellMinimum(0)
gf4.SetCellMaximum(7500)
g4Mapper = vtk.vtkPolyDataMapper()
g4Mapper.SetInputConnection(gf4.GetOutputPort())
g4Mapper.SetScalarRange(output.GetScalarRange())
g4Actor = vtk.vtkActor()
g4Actor.SetMapper(g4Mapper)
g4Actor.AddPosition(0,15,15)
s = vtk.vtkSphere()
s.SetCenter(output.GetCenter())
s.SetRadius(100.0)
eg = vtk.vtkExtractGeometry()
eg.SetInputData(output)
eg.SetImplicitFunction(s)
gf5 = vtk.vtkGeometryFilter()
gf5.SetInputConnection(eg.GetOutputPort())
g5Mapper = vtk.vtkPolyDataMapper()
g5Mapper.SetInputConnection(gf5.GetOutputPort())
g5Mapper.SetScalarRange(output.GetScalarRange())
g5Actor = vtk.vtkActor()
g5Actor.SetMapper(g5Mapper)
g5Actor.AddPosition(0,0,30)
gf6 = vtk.vtkGeometryFilter()
gf6.SetInputConnection(eg.GetOutputPort())
gf6.ExtentClippingOn()
gf6.SetExtent(10,17,-6,6,23,37)
gf6.PointClippingOn()
gf6.SetPointMinimum(0)
gf6.SetPointMaximum(10000)
gf6.CellClippingOn()
gf6.SetCellMinimum(0)
gf6.SetCellMaximum(7500)
g6Mapper = vtk.vtkPolyDataMapper()
g6Mapper.SetInputConnection(gf6.GetOutputPort())
g6Mapper.SetScalarRange(output.GetScalarRange())
g6Actor = vtk.vtkActor()
g6Actor.SetMapper(g6Mapper)
g6Actor.AddPosition(0,15,30)
rgridReader = vtk.vtkRectilinearGridReader()
rgridReader.SetFileName("" + str(VTK_DATA_ROOT) + "/Data/RectGrid2.vtk")
rgridReader.Update()
gf7 = vtk.vtkGeometryFilter()
gf7.SetInputConnection(rgridReader.GetOutputPort())
g7Mapper = vtk.vtkPolyDataMapper()
g7Mapper.SetInputConnection(gf7.GetOutputPort())
g7Mapper.SetScalarRange(rgridReader.GetOutput().GetScalarRange())
g7Actor = vtk.vtkActor()
g7Actor.SetMapper(g7Mapper)
g7Actor.SetScale(3,3,3)
gf8 = vtk.vtkGeometryFilter()
gf8.SetInputConnection(rgridReader.GetOutputPort())
gf8.ExtentClippingOn()
gf8.SetExtent(0,1,-2,2,0,4)
gf8.PointClippingOn()
gf8.SetPointMinimum(0)
gf8.SetPointMaximum(10000)
gf8.CellClippingOn()
gf8.SetCellMinimum(0)
gf8.SetCellMaximum(7500)
g8Mapper = vtk.vtkPolyDataMapper()
g8Mapper.SetInputConnection(gf8.GetOutputPort())
g8Mapper.SetScalarRange(rgridReader.GetOutput().GetScalarRange())
g8Actor = vtk.vtkActor()
g8Actor.SetMapper(g8Mapper)
g8Actor.SetScale(3,3,3)
g8Actor.AddPosition(0,15,0)
ren1 = vtk.vtkRenderer()
renWin = vtk.vtkRenderWindow()
renWin.AddRenderer(ren1)
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
ren1.AddActor(gActor)
ren1.AddActor(g2Actor)
ren1.AddActor(g3Actor)
ren1.AddActor(g4Actor)
ren1.AddActor(g5Actor)
ren1.AddActor(g6Actor)
ren1.AddActor(g7Actor)
ren1.AddActor(g8Actor)
renWin.SetSize(340,550)
cam1 = ren1.GetActiveCamera()
cam1.SetClippingRange(84,174)
cam1.SetFocalPoint(5.22824,6.09412,35.9813)
cam1.SetPosition(100.052,62.875,102.818)
cam1.SetViewUp(-0.307455,-0.464269,0.830617)
iren.Initialize()
ug = vtk.vtkUnstructuredGrid()
p = vtk.vtkPoints()
p.InsertNextPoint(0, 0, 0)
p.InsertNextPoint(1, 0, 0)
p.InsertNextPoint(2, 0, 0)
p.InsertNextPoint(3, 0, 0)
ug.SetPoints(p)
ug.GetNumberOfPoints()
ug.Allocate(4)
lpts = [0, 1]
ug.InsertNextCell(vtk.VTK_LINE, 2, lpts)
vpts = [1]
ug.InsertNextCell(vtk.VTK_VERTEX, 1, vpts)
lpts = [2, 3]
ug.InsertNextCell(vtk.VTK_LINE, 2, lpts)
vpts = [3]
ug.InsertNextCell(vtk.VTK_VERTEX, 1, vpts)
aa = vtk.vtkIntArray()
aa.InsertNextValue(0)
aa.InsertNextValue(1)
aa.InsertNextValue(2)
aa.InsertNextValue(3)
aa.SetName('testarray')
ug.GetCellData().AddArray(aa)
gf = vtk.vtkGeometryFilter()
gf.SetInputData(ug)
gf.Update()
pd = gf.GetOutput()
oa = pd.GetCellData().GetArray('testarray')
correctcelldata = [1, 3, 0, 2]
if oa.GetValue(0) != correctcelldata[0] and oa.GetValue(0) != correctcelldata[1]:
print('Bad celldata of test array')
sys.exit(1)
if oa.GetValue(1) != correctcelldata[0] and oa.GetValue(1) != correctcelldata[1]:
print('Bad celldata of test array')
sys.exit(1)
if oa.GetValue(2) != correctcelldata[2] and oa.GetValue(2) != correctcelldata[3]:
print('Bad celldata of test array')
sys.exit(1)
if oa.GetValue(3) != correctcelldata[2] and oa.GetValue(3) != correctcelldata[3]:
print('Bad celldata of test array')
sys.exit(1)
| true
| true
|
f70e3dd26401020ef35bf9b9bb3093878a91618e
| 7,540
|
py
|
Python
|
algorithms/refinement/parameterisation/scan_varying_crystal_parameters.py
|
Anthchirp/dials
|
211cf7646d6711769b86643b010cb2fe5aaf71b9
|
[
"BSD-3-Clause"
] | null | null | null |
algorithms/refinement/parameterisation/scan_varying_crystal_parameters.py
|
Anthchirp/dials
|
211cf7646d6711769b86643b010cb2fe5aaf71b9
|
[
"BSD-3-Clause"
] | 2
|
2020-07-31T22:37:30.000Z
|
2020-07-31T23:08:55.000Z
|
algorithms/refinement/parameterisation/scan_varying_crystal_parameters.py
|
Anthchirp/dials
|
211cf7646d6711769b86643b010cb2fe5aaf71b9
|
[
"BSD-3-Clause"
] | null | null | null |
from __future__ import absolute_import, division, print_function
from scitbx import matrix
from dials.algorithms.refinement.parameterisation.scan_varying_model_parameters import (
ScanVaryingParameterSet,
ScanVaryingModelParameterisation,
GaussianSmoother,
)
from dials.algorithms.refinement.parameterisation.crystal_parameters import (
CrystalOrientationMixin,
CrystalUnitCellMixin,
)
from dials.algorithms.refinement.refinement_helpers import CrystalOrientationCompose
class ScanVaryingCrystalOrientationParameterisation(
ScanVaryingModelParameterisation, CrystalOrientationMixin
):
"""Scan-varying parameterisation for crystal orientation, with angles
expressed in mrad"""
def __init__(self, crystal, t_range, num_intervals, experiment_ids=None):
if experiment_ids is None:
experiment_ids = [0]
# The state of a scan varying crystal orientation parameterisation
# is an orientation
# matrix '[U](t)', expressed as a function of image number 't'
# in a sequential scan.
#
# The initial state is a snapshot of the crystal orientation
# at the point of initialisation '[U0]', which is independent of
# image number.
#
# Future states are composed by
# rotations around axes of the phi-axis frame by Tait-Bryan angles.
#
# [U](t) = [Phi3](t)[Phi2](t)[Phi1](t)[U0]
# Set up the smoother
smoother = GaussianSmoother(t_range, num_intervals)
nv = smoother.num_values()
# Set up the initial state
istate = matrix.sqr(crystal.get_U())
self._U_at_t = istate
# Factory function to provide to _build_p_list
def parameter_type(value, axis, ptype, name):
return ScanVaryingParameterSet(value, nv, axis, ptype, name)
# Build the parameter list
p_list = self._build_p_list(parameter_type)
# Set up the base class
ScanVaryingModelParameterisation.__init__(
self, crystal, istate, p_list, smoother, experiment_ids=experiment_ids
)
return
def compose(self, t):
"""calculate state and derivatives for model at image number t"""
# Extract orientation from the initial state
U0 = self._initial_state
# extract parameter sets from the internal list
phi1_set, phi2_set, phi3_set = self._param
# extract angles and other data at time t using the smoother
phi1, phi1_weights, phi1_sumweights = self._smoother.value_weight(t, phi1_set)
phi2, phi2_weights, phi2_sumweights = self._smoother.value_weight(t, phi2_set)
phi3, phi3_weights, phi3_sumweights = self._smoother.value_weight(t, phi3_set)
# calculate derivatives of angles wrt underlying parameters.
dphi1_dp = phi1_weights * (1.0 / phi1_sumweights)
dphi2_dp = phi2_weights * (1.0 / phi2_sumweights)
dphi3_dp = phi3_weights * (1.0 / phi3_sumweights)
# calculate state and derivatives using the helper class
coc = CrystalOrientationCompose(
U0, phi1, phi1_set.axis, phi2, phi2_set.axis, phi3, phi3_set.axis
)
self._U_at_t = coc.U()
dU_dphi1 = coc.dU_dphi1()
dU_dphi2 = coc.dU_dphi2()
dU_dphi3 = coc.dU_dphi3()
# calculate derivatives of state wrt underlying parameters
dU_dp1 = [None] * dphi1_dp.size
for (i, v) in dphi1_dp:
dU_dp1[i] = dU_dphi1 * v
dU_dp2 = [None] * dphi2_dp.size
for (i, v) in dphi2_dp:
dU_dp2[i] = dU_dphi2 * v
dU_dp3 = [None] * dphi3_dp.size
for (i, v) in dphi3_dp:
dU_dp3[i] = dU_dphi3 * v
# store derivatives as list-of-lists
self._dstate_dp = [dU_dp1, dU_dp2, dU_dp3]
return
def get_state(self):
"""Return crystal orientation matrix [U] at image number t"""
# only a single crystal is parameterised here, so no multi_state_elt
# argument is allowed
return self._U_at_t
class ScanVaryingCrystalUnitCellParameterisation(
ScanVaryingModelParameterisation, CrystalUnitCellMixin
):
"""Scan-varying parameterisation for the crystal unit cell"""
def __init__(
self,
crystal,
t_range,
num_intervals,
experiment_ids=None,
set_state_uncertainties=False,
):
self._set_state_uncertainties = set_state_uncertainties
from scitbx import matrix
if experiment_ids is None:
experiment_ids = [0]
# The state of a scan-varying unit cell parameterisation is the
# reciprocal space orthogonalisation matrix '[B](t)', expressed as a
# function of image number 't' in a sequential scan.
# Other comments from CrystalUnitCellParameterisation are relevant here
# Set up the smoother
smoother = GaussianSmoother(t_range, num_intervals)
nv = smoother.num_values()
# Set up the initial state
istate = None
self._B_at_t = matrix.sqr(crystal.get_B())
# Factory function to provide to _build_p_list
def parameter_type(value, name):
return ScanVaryingParameterSet(value, nv, name=name)
# Build the parameter list
p_list = self._build_p_list(crystal, parameter_type)
# Set up the base class
ScanVaryingModelParameterisation.__init__(
self, crystal, istate, p_list, smoother, experiment_ids=experiment_ids
)
return
def compose(self, t):
"""calculate state and derivatives for model at image number t"""
# extract values and weights at time t using the smoother
vals, weights, sumweights = zip(
*(self._smoother.value_weight(t, pset) for pset in self._param)
)
# calculate derivatives of metrical matrix parameters wrt underlying
# scan-varying parameters
inv_sumw = [1.0 / sw for sw in sumweights]
dvals_dp = [e * isw for e, isw in zip(weights, inv_sumw)]
# calculate new B and derivatives
self._B_at_t, dB_dval = self._compose_core(vals)
# calculate derivatives of state wrt underlying parameters
self._dstate_dp = [
[b * e for e in a.as_dense_vector()] for a, b in zip(dvals_dp, dB_dval)
]
self._dstate_dp = [[None] * e.size for e in dvals_dp]
for i, (dv, dB) in enumerate(zip(dvals_dp, dB_dval)):
for j, e in dv:
self._dstate_dp[i][j] = e * dB
return
def get_state(self):
"""Return crystal orthogonalisation matrix [B] at image number t"""
# only a single crystal is parameterised here, so no multi_state_elt
# argument is allowed
return self._B_at_t
def set_state_uncertainties(self, var_cov_list):
"""Send the calculated variance-covariance of the elements of the B matrix
for all scan points back to the crystal model, if required
"""
if not self._set_state_uncertainties:
return
# Convert list of 9*9 matrices to a 3d array
from scitbx.array_family import flex
B_cov = flex.double(flex.grid(len(var_cov_list), 9, 9))
for i, v in enumerate(var_cov_list):
v = v.as_flex_double_matrix()
v.reshape(flex.grid(1, 9, 9))
B_cov[i : (i + 1), :, :] = v
# Pass it back to the model
self._model.set_B_covariance_at_scan_points(B_cov)
| 34.907407
| 88
| 0.650265
|
from __future__ import absolute_import, division, print_function
from scitbx import matrix
from dials.algorithms.refinement.parameterisation.scan_varying_model_parameters import (
ScanVaryingParameterSet,
ScanVaryingModelParameterisation,
GaussianSmoother,
)
from dials.algorithms.refinement.parameterisation.crystal_parameters import (
CrystalOrientationMixin,
CrystalUnitCellMixin,
)
from dials.algorithms.refinement.refinement_helpers import CrystalOrientationCompose
class ScanVaryingCrystalOrientationParameterisation(
ScanVaryingModelParameterisation, CrystalOrientationMixin
):
def __init__(self, crystal, t_range, num_intervals, experiment_ids=None):
if experiment_ids is None:
experiment_ids = [0]
smoother = GaussianSmoother(t_range, num_intervals)
nv = smoother.num_values()
istate = matrix.sqr(crystal.get_U())
self._U_at_t = istate
def parameter_type(value, axis, ptype, name):
return ScanVaryingParameterSet(value, nv, axis, ptype, name)
p_list = self._build_p_list(parameter_type)
ScanVaryingModelParameterisation.__init__(
self, crystal, istate, p_list, smoother, experiment_ids=experiment_ids
)
return
def compose(self, t):
U0 = self._initial_state
phi1_set, phi2_set, phi3_set = self._param
phi1, phi1_weights, phi1_sumweights = self._smoother.value_weight(t, phi1_set)
phi2, phi2_weights, phi2_sumweights = self._smoother.value_weight(t, phi2_set)
phi3, phi3_weights, phi3_sumweights = self._smoother.value_weight(t, phi3_set)
dphi1_dp = phi1_weights * (1.0 / phi1_sumweights)
dphi2_dp = phi2_weights * (1.0 / phi2_sumweights)
dphi3_dp = phi3_weights * (1.0 / phi3_sumweights)
coc = CrystalOrientationCompose(
U0, phi1, phi1_set.axis, phi2, phi2_set.axis, phi3, phi3_set.axis
)
self._U_at_t = coc.U()
dU_dphi1 = coc.dU_dphi1()
dU_dphi2 = coc.dU_dphi2()
dU_dphi3 = coc.dU_dphi3()
dU_dp1 = [None] * dphi1_dp.size
for (i, v) in dphi1_dp:
dU_dp1[i] = dU_dphi1 * v
dU_dp2 = [None] * dphi2_dp.size
for (i, v) in dphi2_dp:
dU_dp2[i] = dU_dphi2 * v
dU_dp3 = [None] * dphi3_dp.size
for (i, v) in dphi3_dp:
dU_dp3[i] = dU_dphi3 * v
self._dstate_dp = [dU_dp1, dU_dp2, dU_dp3]
return
def get_state(self):
return self._U_at_t
class ScanVaryingCrystalUnitCellParameterisation(
ScanVaryingModelParameterisation, CrystalUnitCellMixin
):
def __init__(
self,
crystal,
t_range,
num_intervals,
experiment_ids=None,
set_state_uncertainties=False,
):
self._set_state_uncertainties = set_state_uncertainties
from scitbx import matrix
if experiment_ids is None:
experiment_ids = [0]
smoother = GaussianSmoother(t_range, num_intervals)
nv = smoother.num_values()
istate = None
self._B_at_t = matrix.sqr(crystal.get_B())
def parameter_type(value, name):
return ScanVaryingParameterSet(value, nv, name=name)
p_list = self._build_p_list(crystal, parameter_type)
ScanVaryingModelParameterisation.__init__(
self, crystal, istate, p_list, smoother, experiment_ids=experiment_ids
)
return
def compose(self, t):
vals, weights, sumweights = zip(
*(self._smoother.value_weight(t, pset) for pset in self._param)
)
inv_sumw = [1.0 / sw for sw in sumweights]
dvals_dp = [e * isw for e, isw in zip(weights, inv_sumw)]
self._B_at_t, dB_dval = self._compose_core(vals)
self._dstate_dp = [
[b * e for e in a.as_dense_vector()] for a, b in zip(dvals_dp, dB_dval)
]
self._dstate_dp = [[None] * e.size for e in dvals_dp]
for i, (dv, dB) in enumerate(zip(dvals_dp, dB_dval)):
for j, e in dv:
self._dstate_dp[i][j] = e * dB
return
def get_state(self):
return self._B_at_t
def set_state_uncertainties(self, var_cov_list):
if not self._set_state_uncertainties:
return
from scitbx.array_family import flex
B_cov = flex.double(flex.grid(len(var_cov_list), 9, 9))
for i, v in enumerate(var_cov_list):
v = v.as_flex_double_matrix()
v.reshape(flex.grid(1, 9, 9))
B_cov[i : (i + 1), :, :] = v
self._model.set_B_covariance_at_scan_points(B_cov)
| true
| true
|
f70e40e0d8599737f2ce1ae11e3f0c3f5397ee5f
| 12,933
|
py
|
Python
|
electrumsv/gui/qt/console.py
|
Breavyn/electrumsv
|
c28f3dcbf7da29877ff0d90fe61c86a07f84e314
|
[
"MIT"
] | null | null | null |
electrumsv/gui/qt/console.py
|
Breavyn/electrumsv
|
c28f3dcbf7da29877ff0d90fe61c86a07f84e314
|
[
"MIT"
] | null | null | null |
electrumsv/gui/qt/console.py
|
Breavyn/electrumsv
|
c28f3dcbf7da29877ff0d90fe61c86a07f84e314
|
[
"MIT"
] | null | null | null |
# ElectrumSV - lightweight BitcoinSV client
# Copyright (C) 2012 thomasv@gitorious
# Copyright (C) 2019-2020 The ElectrumSV Developers
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# source: http://stackoverflow.com/questions/2758159
import os
import re
import sys
import traceback
from PyQt5 import QtCore, QtWidgets
from PyQt5.QtGui import QFont, QTextCursor, QTextOption
from electrumsv import util
from electrumsv.i18n import _
from electrumsv.logs import logs
from electrumsv.platform import platform
logger = logs.get_logger("console")
class OverlayLabel(QtWidgets.QLabel):
STYLESHEET = '''
QLabel, QLabel link {
color: rgb(0, 0, 0);
background-color: rgb(248, 240, 200);
border: 1px solid;
border-color: rgb(255, 114, 47);
padding: 2px;
}
'''
def __init__(self, text, parent):
super().__init__(text, parent)
self.setMinimumHeight(150)
self.setGeometry(0, 0, self.width(), self.height())
self.setStyleSheet(self.STYLESHEET)
self.setMargin(0)
parent.setHorizontalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff)
self.setWordWrap(True)
def mousePressEvent(self, e):
self.hide()
def on_resize(self, w):
padding = 2 # px, from the stylesheet above
self.setFixedWidth(w - padding)
class Console(QtWidgets.QPlainTextEdit):
def __init__(self, prompt='>> ', startup_message='', parent=None):
QtWidgets.QPlainTextEdit.__init__(self, parent)
self.prompt = prompt
self.history = []
self.namespace = {}
self.construct = []
self.setGeometry(50, 75, 600, 400)
self.setWordWrapMode(QTextOption.WrapAnywhere)
self.setUndoRedoEnabled(False)
self.document().setDefaultFont(QFont(platform.monospace_font, 10, QFont.Normal))
self.showMessage(startup_message)
self.updateNamespace({'run': self.run_script})
self.set_json(False)
warning_text = "<h1><center>{}</center></h1><br>{}<br><br>{}<br><br>{}".format(
_("Warning!"),
_("Do not run code here that you don't understand. Running bad or malicious code "
"could lead to your coins being irreversibly lost."),
_("Text shown here is sent by the server and may be malicious; ignore anything it "
"might be asking you to do."),
_("Click here to hide this message.")
)
self.messageOverlay = OverlayLabel(warning_text, self)
def resizeEvent(self, e):
super().resizeEvent(e)
scrollbar_width = self.verticalScrollBar().width() * self.verticalScrollBar().isVisible()
self.messageOverlay.on_resize(self.width() - scrollbar_width)
def set_json(self, b):
self.is_json = b
def run_script(self, filename):
with open(filename) as f:
script = f.read()
# eval is generally considered bad practice. use it wisely!
# pylint: disable=eval-used
eval(script, self.namespace, self.namespace)
def updateNamespace(self, namespace):
self.namespace.update(namespace)
def showMessage(self, message):
self.appendPlainText(message)
self.newPrompt()
def clear(self):
self.setPlainText('')
self.newPrompt()
def newPrompt(self):
if self.construct:
prompt = '.' * len(self.prompt)
else:
prompt = self.prompt
self.completions_pos = self.textCursor().position()
self.completions_visible = False
self.appendPlainText(prompt)
self.moveCursor(QTextCursor.End)
def getCommand(self):
doc = self.document()
curr_line = doc.findBlockByLineNumber(doc.lineCount() - 1).text()
curr_line = curr_line.rstrip()
curr_line = curr_line[len(self.prompt):]
return curr_line
def setCommand(self, command):
if self.getCommand() == command:
return
doc = self.document()
curr_line = doc.findBlockByLineNumber(doc.lineCount() - 1).text()
self.moveCursor(QTextCursor.End)
for i in range(len(curr_line) - len(self.prompt)):
self.moveCursor(QTextCursor.Left, QTextCursor.KeepAnchor)
self.textCursor().removeSelectedText()
self.textCursor().insertText(command)
self.moveCursor(QTextCursor.End)
def show_completions(self, completions):
if self.completions_visible:
self.hide_completions()
c = self.textCursor()
c.setPosition(self.completions_pos)
completions = [x.split('.')[-1] for x in completions]
t = '\n' + ' '.join(completions)
if len(t) > 500:
t = t[:500] + '...'
c.insertText(t)
self.completions_end = c.position()
self.moveCursor(QTextCursor.End)
self.completions_visible = True
def hide_completions(self):
if not self.completions_visible:
return
c = self.textCursor()
c.setPosition(self.completions_pos)
for x in range(self.completions_end - self.completions_pos):
c.deleteChar()
self.moveCursor(QTextCursor.End)
self.completions_visible = False
def getConstruct(self, command):
if self.construct:
prev_command = self.construct[-1]
self.construct.append(command)
if not prev_command and not command:
ret_val = '\n'.join(self.construct)
self.construct = []
return ret_val
else:
return ''
else:
if command and command[-1] == (':'):
self.construct.append(command)
return ''
else:
return command
def getHistory(self):
return self.history
def setHisory(self, history):
self.history = history
def addToHistory(self, command):
if command[0:1] == ' ':
return
if command and (not self.history or self.history[-1] != command):
self.history.append(command)
self.history_index = len(self.history)
def getPrevHistoryEntry(self):
if self.history:
self.history_index = max(0, self.history_index - 1)
return self.history[self.history_index]
return ''
def getNextHistoryEntry(self):
if self.history:
hist_len = len(self.history)
self.history_index = min(hist_len, self.history_index + 1)
if self.history_index < hist_len:
return self.history[self.history_index]
return ''
def getCursorPosition(self):
c = self.textCursor()
return c.position() - c.block().position() - len(self.prompt)
def setCursorPosition(self, position):
self.moveCursor(QTextCursor.StartOfLine)
for i in range(len(self.prompt) + position):
self.moveCursor(QTextCursor.Right)
def register_command(self, c, func):
methods = {c: func}
self.updateNamespace(methods)
def runCommand(self):
command = self.getCommand()
self.addToHistory(command)
command = self.getConstruct(command)
if command:
tmp_stdout = sys.stdout
class stdoutProxy():
def __init__(self, write_func):
self.write_func = write_func
self.skip = False
def flush(self):
pass
def write(self, text):
if not self.skip:
stripped_text = text.rstrip('\n')
self.write_func(stripped_text)
QtCore.QCoreApplication.processEvents()
self.skip = not self.skip
sys.stdout = stdoutProxy(self.appendPlainText)
try:
try:
# eval is generally considered bad practice. use it wisely!
# pylint: disable=eval-used
result = eval(command, self.namespace, self.namespace)
if result is not None:
if self.is_json:
print(util.json_encode(result))
else:
self.appendPlainText(repr(result))
except SyntaxError:
# exec is generally considered bad practice. use it wisely!
# pylint: disable=exec-used
exec(command, self.namespace, self.namespace)
except SystemExit:
self.close()
except Exception:
# Catch errors in the network layer as well, as long as it uses Exception.
traceback_lines = traceback.format_exc().split('\n')
# Remove traceback mentioning this file, and a linebreak
for i in (3, 2, 1, -1):
traceback_lines.pop(i)
self.appendPlainText('\n'.join(traceback_lines))
sys.stdout = tmp_stdout
self.newPrompt()
self.set_json(False)
def keyPressEvent(self, event):
if event.key() == QtCore.Qt.Key_Tab:
self.completions()
return
self.hide_completions()
if event.key() in (QtCore.Qt.Key_Enter, QtCore.Qt.Key_Return):
self.runCommand()
return
if event.key() == QtCore.Qt.Key_Home:
self.setCursorPosition(0)
return
if event.key() == QtCore.Qt.Key_PageUp:
return
elif event.key() in (QtCore.Qt.Key_Left, QtCore.Qt.Key_Backspace):
if self.getCursorPosition() == 0:
return
elif event.key() == QtCore.Qt.Key_Up:
self.setCommand(self.getPrevHistoryEntry())
return
elif event.key() == QtCore.Qt.Key_Down:
self.setCommand(self.getNextHistoryEntry())
return
elif event.key() == QtCore.Qt.Key_L and event.modifiers() == QtCore.Qt.ControlModifier:
self.clear()
super(Console, self).keyPressEvent(event)
def completions(self):
cmd = self.getCommand()
lastword = re.split(r' |\(|\)', cmd)[-1]
beginning = cmd[0: -len(lastword)]
path = lastword.split('.')
prefix = '.'.join(path[:-1])
prefix = (prefix + '.') if prefix else prefix
ns = self.namespace.keys()
if len(path) > 1:
obj = self.namespace.get(path[0])
try:
for attr in path[1:-1]:
obj = getattr(obj, attr)
except AttributeError:
ns = []
else:
ns = dir(obj)
completions = []
for name in ns:
if name[0] == '_':
continue
if name.startswith(path[-1]):
completions.append(prefix + name)
completions.sort()
if not completions:
self.hide_completions()
elif len(completions) == 1:
self.hide_completions()
self.setCommand(beginning + completions[0])
else:
# find common prefix
p = os.path.commonprefix(completions)
if len(p) > len(lastword):
self.hide_completions()
self.setCommand(beginning + p)
else:
self.show_completions(completions)
welcome_message = '''
---------------------------------------------------------------
Welcome to a primitive Python interpreter.
---------------------------------------------------------------
'''
if __name__ == '__main__':
app = QtWidgets.QApplication(sys.argv)
console = Console(startup_message=welcome_message)
console.updateNamespace({'myVar1': app, 'myVar2': 1234})
console.show()
sys.exit(app.exec_())
| 33.856021
| 97
| 0.586716
|
import os
import re
import sys
import traceback
from PyQt5 import QtCore, QtWidgets
from PyQt5.QtGui import QFont, QTextCursor, QTextOption
from electrumsv import util
from electrumsv.i18n import _
from electrumsv.logs import logs
from electrumsv.platform import platform
logger = logs.get_logger("console")
class OverlayLabel(QtWidgets.QLabel):
STYLESHEET = '''
QLabel, QLabel link {
color: rgb(0, 0, 0);
background-color: rgb(248, 240, 200);
border: 1px solid;
border-color: rgb(255, 114, 47);
padding: 2px;
}
'''
def __init__(self, text, parent):
super().__init__(text, parent)
self.setMinimumHeight(150)
self.setGeometry(0, 0, self.width(), self.height())
self.setStyleSheet(self.STYLESHEET)
self.setMargin(0)
parent.setHorizontalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff)
self.setWordWrap(True)
def mousePressEvent(self, e):
self.hide()
def on_resize(self, w):
padding = 2
self.setFixedWidth(w - padding)
class Console(QtWidgets.QPlainTextEdit):
def __init__(self, prompt='>> ', startup_message='', parent=None):
QtWidgets.QPlainTextEdit.__init__(self, parent)
self.prompt = prompt
self.history = []
self.namespace = {}
self.construct = []
self.setGeometry(50, 75, 600, 400)
self.setWordWrapMode(QTextOption.WrapAnywhere)
self.setUndoRedoEnabled(False)
self.document().setDefaultFont(QFont(platform.monospace_font, 10, QFont.Normal))
self.showMessage(startup_message)
self.updateNamespace({'run': self.run_script})
self.set_json(False)
warning_text = "<h1><center>{}</center></h1><br>{}<br><br>{}<br><br>{}".format(
_("Warning!"),
_("Do not run code here that you don't understand. Running bad or malicious code "
"could lead to your coins being irreversibly lost."),
_("Text shown here is sent by the server and may be malicious; ignore anything it "
"might be asking you to do."),
_("Click here to hide this message.")
)
self.messageOverlay = OverlayLabel(warning_text, self)
def resizeEvent(self, e):
super().resizeEvent(e)
scrollbar_width = self.verticalScrollBar().width() * self.verticalScrollBar().isVisible()
self.messageOverlay.on_resize(self.width() - scrollbar_width)
def set_json(self, b):
self.is_json = b
def run_script(self, filename):
with open(filename) as f:
script = f.read()
# eval is generally considered bad practice. use it wisely!
# pylint: disable=eval-used
eval(script, self.namespace, self.namespace)
def updateNamespace(self, namespace):
self.namespace.update(namespace)
def showMessage(self, message):
self.appendPlainText(message)
self.newPrompt()
def clear(self):
self.setPlainText('')
self.newPrompt()
def newPrompt(self):
if self.construct:
prompt = '.' * len(self.prompt)
else:
prompt = self.prompt
self.completions_pos = self.textCursor().position()
self.completions_visible = False
self.appendPlainText(prompt)
self.moveCursor(QTextCursor.End)
def getCommand(self):
doc = self.document()
curr_line = doc.findBlockByLineNumber(doc.lineCount() - 1).text()
curr_line = curr_line.rstrip()
curr_line = curr_line[len(self.prompt):]
return curr_line
def setCommand(self, command):
if self.getCommand() == command:
return
doc = self.document()
curr_line = doc.findBlockByLineNumber(doc.lineCount() - 1).text()
self.moveCursor(QTextCursor.End)
for i in range(len(curr_line) - len(self.prompt)):
self.moveCursor(QTextCursor.Left, QTextCursor.KeepAnchor)
self.textCursor().removeSelectedText()
self.textCursor().insertText(command)
self.moveCursor(QTextCursor.End)
def show_completions(self, completions):
if self.completions_visible:
self.hide_completions()
c = self.textCursor()
c.setPosition(self.completions_pos)
completions = [x.split('.')[-1] for x in completions]
t = '\n' + ' '.join(completions)
if len(t) > 500:
t = t[:500] + '...'
c.insertText(t)
self.completions_end = c.position()
self.moveCursor(QTextCursor.End)
self.completions_visible = True
def hide_completions(self):
if not self.completions_visible:
return
c = self.textCursor()
c.setPosition(self.completions_pos)
for x in range(self.completions_end - self.completions_pos):
c.deleteChar()
self.moveCursor(QTextCursor.End)
self.completions_visible = False
def getConstruct(self, command):
if self.construct:
prev_command = self.construct[-1]
self.construct.append(command)
if not prev_command and not command:
ret_val = '\n'.join(self.construct)
self.construct = []
return ret_val
else:
return ''
else:
if command and command[-1] == (':'):
self.construct.append(command)
return ''
else:
return command
def getHistory(self):
return self.history
def setHisory(self, history):
self.history = history
def addToHistory(self, command):
if command[0:1] == ' ':
return
if command and (not self.history or self.history[-1] != command):
self.history.append(command)
self.history_index = len(self.history)
def getPrevHistoryEntry(self):
if self.history:
self.history_index = max(0, self.history_index - 1)
return self.history[self.history_index]
return ''
def getNextHistoryEntry(self):
if self.history:
hist_len = len(self.history)
self.history_index = min(hist_len, self.history_index + 1)
if self.history_index < hist_len:
return self.history[self.history_index]
return ''
def getCursorPosition(self):
c = self.textCursor()
return c.position() - c.block().position() - len(self.prompt)
def setCursorPosition(self, position):
self.moveCursor(QTextCursor.StartOfLine)
for i in range(len(self.prompt) + position):
self.moveCursor(QTextCursor.Right)
def register_command(self, c, func):
methods = {c: func}
self.updateNamespace(methods)
def runCommand(self):
command = self.getCommand()
self.addToHistory(command)
command = self.getConstruct(command)
if command:
tmp_stdout = sys.stdout
class stdoutProxy():
def __init__(self, write_func):
self.write_func = write_func
self.skip = False
def flush(self):
pass
def write(self, text):
if not self.skip:
stripped_text = text.rstrip('\n')
self.write_func(stripped_text)
QtCore.QCoreApplication.processEvents()
self.skip = not self.skip
sys.stdout = stdoutProxy(self.appendPlainText)
try:
try:
# eval is generally considered bad practice. use it wisely!
# pylint: disable=eval-used
result = eval(command, self.namespace, self.namespace)
if result is not None:
if self.is_json:
print(util.json_encode(result))
else:
self.appendPlainText(repr(result))
except SyntaxError:
# exec is generally considered bad practice. use it wisely!
# pylint: disable=exec-used
exec(command, self.namespace, self.namespace)
except SystemExit:
self.close()
except Exception:
# Catch errors in the network layer as well, as long as it uses Exception.
traceback_lines = traceback.format_exc().split('\n')
# Remove traceback mentioning this file, and a linebreak
for i in (3, 2, 1, -1):
traceback_lines.pop(i)
self.appendPlainText('\n'.join(traceback_lines))
sys.stdout = tmp_stdout
self.newPrompt()
self.set_json(False)
def keyPressEvent(self, event):
if event.key() == QtCore.Qt.Key_Tab:
self.completions()
return
self.hide_completions()
if event.key() in (QtCore.Qt.Key_Enter, QtCore.Qt.Key_Return):
self.runCommand()
return
if event.key() == QtCore.Qt.Key_Home:
self.setCursorPosition(0)
return
if event.key() == QtCore.Qt.Key_PageUp:
return
elif event.key() in (QtCore.Qt.Key_Left, QtCore.Qt.Key_Backspace):
if self.getCursorPosition() == 0:
return
elif event.key() == QtCore.Qt.Key_Up:
self.setCommand(self.getPrevHistoryEntry())
return
elif event.key() == QtCore.Qt.Key_Down:
self.setCommand(self.getNextHistoryEntry())
return
elif event.key() == QtCore.Qt.Key_L and event.modifiers() == QtCore.Qt.ControlModifier:
self.clear()
super(Console, self).keyPressEvent(event)
def completions(self):
cmd = self.getCommand()
lastword = re.split(r' |\(|\)', cmd)[-1]
beginning = cmd[0: -len(lastword)]
path = lastword.split('.')
prefix = '.'.join(path[:-1])
prefix = (prefix + '.') if prefix else prefix
ns = self.namespace.keys()
if len(path) > 1:
obj = self.namespace.get(path[0])
try:
for attr in path[1:-1]:
obj = getattr(obj, attr)
except AttributeError:
ns = []
else:
ns = dir(obj)
completions = []
for name in ns:
if name[0] == '_':
continue
if name.startswith(path[-1]):
completions.append(prefix + name)
completions.sort()
if not completions:
self.hide_completions()
elif len(completions) == 1:
self.hide_completions()
self.setCommand(beginning + completions[0])
else:
# find common prefix
p = os.path.commonprefix(completions)
if len(p) > len(lastword):
self.hide_completions()
self.setCommand(beginning + p)
else:
self.show_completions(completions)
welcome_message = '''
---------------------------------------------------------------
Welcome to a primitive Python interpreter.
---------------------------------------------------------------
'''
if __name__ == '__main__':
app = QtWidgets.QApplication(sys.argv)
console = Console(startup_message=welcome_message)
console.updateNamespace({'myVar1': app, 'myVar2': 1234})
console.show()
sys.exit(app.exec_())
| true
| true
|
f70e414416ca8304fe30e26ffa6f6350cd2d5994
| 686
|
py
|
Python
|
text/main_part.py
|
Goodjooy/AzurLane-GirlFrontLine-PaintingRestore
|
1245ff752c88ca2b2ffe6fa31ea049e2ed6f379b
|
[
"MIT"
] | 49
|
2018-11-25T08:58:19.000Z
|
2022-01-07T12:07:08.000Z
|
text/main_part.py
|
swordfate3/AzurLane-PaintingExtract
|
4702f3529d6e86a8aa0e9fde7b62ac54b4b2c9b4
|
[
"MIT"
] | 4
|
2018-12-30T05:42:12.000Z
|
2021-06-03T11:05:28.000Z
|
text/main_part.py
|
swordfate3/AzurLane-PaintingExtract
|
4702f3529d6e86a8aa0e9fde7b62ac54b4b2c9b4
|
[
"MIT"
] | 9
|
2019-02-13T05:53:55.000Z
|
2020-07-19T09:43:43.000Z
|
import pygame
import os
import holder as ch
import time
textAsset = []
texture2D = os.listdir('texture2D')
textAsset = os.listdir('textAsset')
text_ = []
for text in textAsset:
text_.append(text.split('.'))
textAsset = []
for text in text_:
textAsset.append(text[0])
textAsset = set(textAsset)
text_ = []
for text in texture2D:
text_.append(text.split('.'))
texture2D = []
for text in text_:
texture2D.append(text[0])
for name in texture2D:
if name not in textAsset:
print("切分文件丢失,请添加【"+name+".atlas.txt】至TextAsset文件夹" )
else:
ch.body_cut(name)
print('完成一个,为'+name)
print("完成,将于15s后关闭")
time.sleep(15)
| 19.6
| 62
| 0.637026
|
import pygame
import os
import holder as ch
import time
textAsset = []
texture2D = os.listdir('texture2D')
textAsset = os.listdir('textAsset')
text_ = []
for text in textAsset:
text_.append(text.split('.'))
textAsset = []
for text in text_:
textAsset.append(text[0])
textAsset = set(textAsset)
text_ = []
for text in texture2D:
text_.append(text.split('.'))
texture2D = []
for text in text_:
texture2D.append(text[0])
for name in texture2D:
if name not in textAsset:
print("切分文件丢失,请添加【"+name+".atlas.txt】至TextAsset文件夹" )
else:
ch.body_cut(name)
print('完成一个,为'+name)
print("完成,将于15s后关闭")
time.sleep(15)
| true
| true
|
f70e4148ccdd54d9e830afa9548c8b7779db1fd7
| 1,659
|
py
|
Python
|
pyramid_oereb/standard/xtf_import/public_law_restriction.py
|
pvalsecc/pyramid_oereb
|
3d9a8a68952641e923c34e086768630e21a559ed
|
[
"BSD-2-Clause"
] | null | null | null |
pyramid_oereb/standard/xtf_import/public_law_restriction.py
|
pvalsecc/pyramid_oereb
|
3d9a8a68952641e923c34e086768630e21a559ed
|
[
"BSD-2-Clause"
] | 3
|
2019-12-26T17:00:44.000Z
|
2022-03-21T22:16:54.000Z
|
pyramid_oereb/standard/xtf_import/public_law_restriction.py
|
pvalsecc/pyramid_oereb
|
3d9a8a68952641e923c34e086768630e21a559ed
|
[
"BSD-2-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
from pyramid_oereb.standard.xtf_import.util import parse_string, parse_multilingual_text, parse_ref
class PublicLawRestriction(object):
TAG_INFORMATION = 'Aussage'
TAG_SUB_THEME = 'SubThema'
TAG_OTHER_THEME = 'WeiteresThema'
TAG_TYPE_CODE = 'ArtCode'
TAG_TYPE_CODE_LIST = 'ArtCodeliste'
TAG_LAW_STATUS = 'Rechtsstatus'
TAG_PUBLISHED_FROM = 'publiziertAb'
TAG_VIEW_SERVICE = 'DarstellungsDienst'
TAG_RESPONSIBLE_OFFICE = 'ZustaendigeStelle'
def __init__(self, session, model, topic_code):
self._session = session
self._model = model
self._topic_code = topic_code
def parse(self, public_law_restriction): # pragma: no cover
instance = self._model(
id=public_law_restriction.attrib['TID'],
information=parse_multilingual_text(public_law_restriction, self.TAG_INFORMATION),
topic=self._topic_code,
sub_theme=parse_string(public_law_restriction, self.TAG_SUB_THEME),
other_theme=parse_string(public_law_restriction, self.TAG_OTHER_THEME),
type_code=parse_string(public_law_restriction, self.TAG_TYPE_CODE),
type_code_list=parse_string(public_law_restriction, self.TAG_TYPE_CODE_LIST),
law_status=parse_string(public_law_restriction, self.TAG_LAW_STATUS),
published_from=parse_string(public_law_restriction, self.TAG_PUBLISHED_FROM),
view_service_id=parse_ref(public_law_restriction, self.TAG_VIEW_SERVICE),
office_id=parse_ref(public_law_restriction, self.TAG_RESPONSIBLE_OFFICE)
)
self._session.add(instance)
| 44.837838
| 99
| 0.729355
|
from pyramid_oereb.standard.xtf_import.util import parse_string, parse_multilingual_text, parse_ref
class PublicLawRestriction(object):
TAG_INFORMATION = 'Aussage'
TAG_SUB_THEME = 'SubThema'
TAG_OTHER_THEME = 'WeiteresThema'
TAG_TYPE_CODE = 'ArtCode'
TAG_TYPE_CODE_LIST = 'ArtCodeliste'
TAG_LAW_STATUS = 'Rechtsstatus'
TAG_PUBLISHED_FROM = 'publiziertAb'
TAG_VIEW_SERVICE = 'DarstellungsDienst'
TAG_RESPONSIBLE_OFFICE = 'ZustaendigeStelle'
def __init__(self, session, model, topic_code):
self._session = session
self._model = model
self._topic_code = topic_code
def parse(self, public_law_restriction):
instance = self._model(
id=public_law_restriction.attrib['TID'],
information=parse_multilingual_text(public_law_restriction, self.TAG_INFORMATION),
topic=self._topic_code,
sub_theme=parse_string(public_law_restriction, self.TAG_SUB_THEME),
other_theme=parse_string(public_law_restriction, self.TAG_OTHER_THEME),
type_code=parse_string(public_law_restriction, self.TAG_TYPE_CODE),
type_code_list=parse_string(public_law_restriction, self.TAG_TYPE_CODE_LIST),
law_status=parse_string(public_law_restriction, self.TAG_LAW_STATUS),
published_from=parse_string(public_law_restriction, self.TAG_PUBLISHED_FROM),
view_service_id=parse_ref(public_law_restriction, self.TAG_VIEW_SERVICE),
office_id=parse_ref(public_law_restriction, self.TAG_RESPONSIBLE_OFFICE)
)
self._session.add(instance)
| true
| true
|
f70e414afae0a9937a03b34fc3512400c7b8aae8
| 5,396
|
py
|
Python
|
SUP_INS_ANCH.py
|
AndrzejLach89/Plant-3D-scripts
|
7c85dc1d3dba498b27f292266235f30d405f86b9
|
[
"MIT"
] | null | null | null |
SUP_INS_ANCH.py
|
AndrzejLach89/Plant-3D-scripts
|
7c85dc1d3dba498b27f292266235f30d405f86b9
|
[
"MIT"
] | null | null | null |
SUP_INS_ANCH.py
|
AndrzejLach89/Plant-3D-scripts
|
7c85dc1d3dba498b27f292266235f30d405f86b9
|
[
"MIT"
] | null | null | null |
# Created by Andrzej Lach @ 2021
# https://github.com/AndrzejLach89
from aqa.math import *
from varmain.primitiv import *
from varmain.custom import *
import math
@activate(Group="Support", Ports=1, TooltipShort="Support - insulated, anchor", TooltipLong="Support - insulated, anchor", LengthUnit="mm")
@group("MainDimensions")
@param(D=LENGTH, TooltipShort="Pipe diameter")
@param(H=LENGTH, TooltipShort="Height", Ask4Dist=True)
@param(CL=LENGTH, TooltipShort="Clamp length")
@param(CT=LENGTH, TooltipShort="Clamp thickness")
@param(CW=LENGTH, TooltipShort="Clamp width")
@param(CO=LENGTH, TooltipShort="Clamp offset")
@param(W=LENGTH, TooltipShort="Bottom plate width")
@param(L=LENGTH, TooltipShort="Bottom plate length")
@param(T=LENGTH, TooltipShort="Plate thickness")
@param(NUT=LENGTH, TooltipShort="Nut size (Mxx)")
@param(PA=LENGTH, TooltipShort="Front/back plate width")
@param(PT=LENGTH, TooltipShort="Front/back plate thickness")
@param(LT=LENGTH, TooltipShort="Total length")
def SUP_INS_ANCH(s, D=114.3, H=192, CL=50, CT=8, W=100, L=200, T=11, CW= 226, CO=10, NUT=16, PA=60, PT=8, LT=230, ID='SUP_INS_ANCH', **kw):
nutSizes = {
8: {'h': 6.500, 'd': 13.000, 'x': 7.5056},
12: {'h': 10.000, 'd': 18.000, 'x': 10.3923},
16: {'h': 13.000, 'd': 24.000, 'x': 13.8564},
20: {'h': 16.000, 'd': 30.000, 'x': 17.3205},
24: {'h': 19.000, 'd': 36.000, 'x': 20.7846}
}
if NUT not in nutSizes:
NUT = min(nutSizes, key=lambda x:abs(x-NUT))
nutType = nutSizes[NUT]
if D <= 0 or H <=0 or CL <= 0 or CT <= 0 or T<=0 or PA<=0 or PT<=0:
return
if LT < L + 2*PT:
LT = L + 2*PT
if W < T:
W = T
body = BOX(s, L=T, W=H-D/2-T/2, H=L).translate((0, 0, (H-D/2-T/2)/2-H))
hPlate = BOX(s, L=W, W=T, H=L).translate((0, 0, T/2 - H))
body.uniteWith(hPlate)
hPlate.erase()
endPlateTranslations = ((L/2+PT/2, 0, -H/2+T/4), (-(L/2+PT/2), 0, -H/2+T/4))
endPlates = []
for i in endPlateTranslations:
endPlates.append(BOX(s,L=PA, W=H, H=PT).translate(i))
for i in endPlates:
endPlateCut = CYLINDER(s, R=D/2 + CT, H=LT, O=0).rotateY(90).translate((-LT/2, 0, 0))
i.subtractFrom(endPlateCut)
endPlateCut.erase()
for i in endPlates:
body.uniteWith(i)
i.erase()
endPlates.clear()
clamps = []
cnt = 0
clampOffset = ((CL/2 - LT/2, 0, 0), (-CL/2 + LT/2, 0, 0))
# bolts
nutHeight = nutType['h']
nutLength = nutType['d']
nutWidth = nutType['x']
cutRadius = math.sqrt(math.pow(nutHeight, 2) + math.pow(nutLength/2, 2))
for off in clampOffset:
clamps.append(CYLINDER(s, R=D/2+CT, H=CL, O=D/2).rotateY(90).translate((-CL/2, 0, 0)))
clampH = BOX(s, L=CW, W=2*CT+CO, H=CL)
vPlateCut = CYLINDER(s, R=D/2, H=CL, O=0).rotateY(90).translate((-CL/2, 0, 0))
clampH.subtractFrom(vPlateCut)
clamps[cnt].uniteWith(clampH)
if CO > 0:
clampCut = BOX(s, L=CW, W=CO, H=CL)
clamps[cnt].subtractFrom(clampCut)
clamps[cnt].translate(clampOffset[cnt])
mainOffsets = ((0, CW/2-(CW/2 - D/2 - CT)/2, 0), (0, -CW/2+(CW/2 - D/2 - CT)/2, 0))
boltH = 2*nutHeight + 2*CT + CO + 5
boltR = NUT/2
boltOffset = (0, 0, -boltH + CO/2 + CT + nutHeight)
bolts = []
nut1offset = (0, 0, nutHeight/2 + CO/2 + CT)
nut2offset = (0, 0, -nutHeight/2 - CO/2 - CT)
nutOffsets = (nut1offset, nut2offset)
for x in mainOffsets:
bolt = CYLINDER(s, R=boltR, H=boltH, O=0).translate(boltOffset)
boltHole = CYLINDER(s, R=boltR+0.5, H=boltH, O=0).translate(boltOffset)
nutParts = []
nc = 0
for i in nutOffsets:
nutParts.append(BOX(s, L=nutWidth, W=nutHeight, H=nutLength).translate(i))
p1 = BOX(s, L=nutWidth, W=nutHeight, H=nutLength).rotateZ(60).translate(i)
p2 = BOX(s, L=nutWidth, W=nutHeight, H=nutLength).rotateZ(120).translate(i)
#nutParts.append(BOX(s, L=nutWidth, W=nutHeight, H=nutLength).rotateZ(60).translate(i))
#nutParts.append(BOX(s, L=nutWidth, W=nutHeight, H=nutLength).rotateZ(120).translate(i))
c1 = HALFSPHERE(s, R=cutRadius).translate(i).translate((0, 0, -nutHeight/2))
nutParts[nc].uniteWith(p1)
nutParts[nc].uniteWith(p2)
nutParts[nc].intersectWith(c1)
p1.erase()
p2.erase()
c1.erase()
if nc == 1:
c2 = HALFSPHERE(s, R=cutRadius).rotateX(180).translate(i).translate((0, 0, nutHeight/2))
nutParts[nc].intersectWith(c2)
c2.erase()
nc += 1
for i in nutParts:
bolt.uniteWith(i)
bolt.translate(x)
boltHole.translate(x)
bolt.translate(clampOffset[cnt])
boltHole.translate(clampOffset[cnt])
clamps[cnt].subtractFrom(boltHole)
clamps[cnt].uniteWith(bolt)
bolt.erase()
boltHole.erase()
body.uniteWith(clamps[cnt])
cnt += 1
clamps.clear()
s.setPoint((0.000, 0.000, 0.000), (1.000, 0.000, 0.000))
s.setLinearDimension('H',(0, 0, 0), (0, 0, -H))
| 40.571429
| 140
| 0.561527
|
from aqa.math import *
from varmain.primitiv import *
from varmain.custom import *
import math
@activate(Group="Support", Ports=1, TooltipShort="Support - insulated, anchor", TooltipLong="Support - insulated, anchor", LengthUnit="mm")
@group("MainDimensions")
@param(D=LENGTH, TooltipShort="Pipe diameter")
@param(H=LENGTH, TooltipShort="Height", Ask4Dist=True)
@param(CL=LENGTH, TooltipShort="Clamp length")
@param(CT=LENGTH, TooltipShort="Clamp thickness")
@param(CW=LENGTH, TooltipShort="Clamp width")
@param(CO=LENGTH, TooltipShort="Clamp offset")
@param(W=LENGTH, TooltipShort="Bottom plate width")
@param(L=LENGTH, TooltipShort="Bottom plate length")
@param(T=LENGTH, TooltipShort="Plate thickness")
@param(NUT=LENGTH, TooltipShort="Nut size (Mxx)")
@param(PA=LENGTH, TooltipShort="Front/back plate width")
@param(PT=LENGTH, TooltipShort="Front/back plate thickness")
@param(LT=LENGTH, TooltipShort="Total length")
def SUP_INS_ANCH(s, D=114.3, H=192, CL=50, CT=8, W=100, L=200, T=11, CW= 226, CO=10, NUT=16, PA=60, PT=8, LT=230, ID='SUP_INS_ANCH', **kw):
nutSizes = {
8: {'h': 6.500, 'd': 13.000, 'x': 7.5056},
12: {'h': 10.000, 'd': 18.000, 'x': 10.3923},
16: {'h': 13.000, 'd': 24.000, 'x': 13.8564},
20: {'h': 16.000, 'd': 30.000, 'x': 17.3205},
24: {'h': 19.000, 'd': 36.000, 'x': 20.7846}
}
if NUT not in nutSizes:
NUT = min(nutSizes, key=lambda x:abs(x-NUT))
nutType = nutSizes[NUT]
if D <= 0 or H <=0 or CL <= 0 or CT <= 0 or T<=0 or PA<=0 or PT<=0:
return
if LT < L + 2*PT:
LT = L + 2*PT
if W < T:
W = T
body = BOX(s, L=T, W=H-D/2-T/2, H=L).translate((0, 0, (H-D/2-T/2)/2-H))
hPlate = BOX(s, L=W, W=T, H=L).translate((0, 0, T/2 - H))
body.uniteWith(hPlate)
hPlate.erase()
endPlateTranslations = ((L/2+PT/2, 0, -H/2+T/4), (-(L/2+PT/2), 0, -H/2+T/4))
endPlates = []
for i in endPlateTranslations:
endPlates.append(BOX(s,L=PA, W=H, H=PT).translate(i))
for i in endPlates:
endPlateCut = CYLINDER(s, R=D/2 + CT, H=LT, O=0).rotateY(90).translate((-LT/2, 0, 0))
i.subtractFrom(endPlateCut)
endPlateCut.erase()
for i in endPlates:
body.uniteWith(i)
i.erase()
endPlates.clear()
clamps = []
cnt = 0
clampOffset = ((CL/2 - LT/2, 0, 0), (-CL/2 + LT/2, 0, 0))
nutHeight = nutType['h']
nutLength = nutType['d']
nutWidth = nutType['x']
cutRadius = math.sqrt(math.pow(nutHeight, 2) + math.pow(nutLength/2, 2))
for off in clampOffset:
clamps.append(CYLINDER(s, R=D/2+CT, H=CL, O=D/2).rotateY(90).translate((-CL/2, 0, 0)))
clampH = BOX(s, L=CW, W=2*CT+CO, H=CL)
vPlateCut = CYLINDER(s, R=D/2, H=CL, O=0).rotateY(90).translate((-CL/2, 0, 0))
clampH.subtractFrom(vPlateCut)
clamps[cnt].uniteWith(clampH)
if CO > 0:
clampCut = BOX(s, L=CW, W=CO, H=CL)
clamps[cnt].subtractFrom(clampCut)
clamps[cnt].translate(clampOffset[cnt])
mainOffsets = ((0, CW/2-(CW/2 - D/2 - CT)/2, 0), (0, -CW/2+(CW/2 - D/2 - CT)/2, 0))
boltH = 2*nutHeight + 2*CT + CO + 5
boltR = NUT/2
boltOffset = (0, 0, -boltH + CO/2 + CT + nutHeight)
bolts = []
nut1offset = (0, 0, nutHeight/2 + CO/2 + CT)
nut2offset = (0, 0, -nutHeight/2 - CO/2 - CT)
nutOffsets = (nut1offset, nut2offset)
for x in mainOffsets:
bolt = CYLINDER(s, R=boltR, H=boltH, O=0).translate(boltOffset)
boltHole = CYLINDER(s, R=boltR+0.5, H=boltH, O=0).translate(boltOffset)
nutParts = []
nc = 0
for i in nutOffsets:
nutParts.append(BOX(s, L=nutWidth, W=nutHeight, H=nutLength).translate(i))
p1 = BOX(s, L=nutWidth, W=nutHeight, H=nutLength).rotateZ(60).translate(i)
p2 = BOX(s, L=nutWidth, W=nutHeight, H=nutLength).rotateZ(120).translate(i)
c1 = HALFSPHERE(s, R=cutRadius).translate(i).translate((0, 0, -nutHeight/2))
nutParts[nc].uniteWith(p1)
nutParts[nc].uniteWith(p2)
nutParts[nc].intersectWith(c1)
p1.erase()
p2.erase()
c1.erase()
if nc == 1:
c2 = HALFSPHERE(s, R=cutRadius).rotateX(180).translate(i).translate((0, 0, nutHeight/2))
nutParts[nc].intersectWith(c2)
c2.erase()
nc += 1
for i in nutParts:
bolt.uniteWith(i)
bolt.translate(x)
boltHole.translate(x)
bolt.translate(clampOffset[cnt])
boltHole.translate(clampOffset[cnt])
clamps[cnt].subtractFrom(boltHole)
clamps[cnt].uniteWith(bolt)
bolt.erase()
boltHole.erase()
body.uniteWith(clamps[cnt])
cnt += 1
clamps.clear()
s.setPoint((0.000, 0.000, 0.000), (1.000, 0.000, 0.000))
s.setLinearDimension('H',(0, 0, 0), (0, 0, -H))
| true
| true
|
f70e415380567b1032e640d739af9444d9165b7a
| 443
|
py
|
Python
|
packages/util/galaxy/project_galaxy_util.py
|
lesperry/Metagenomics
|
a1d8b7d96b32ab83cebe513e889b6ef82f7c1dd6
|
[
"CC-BY-3.0"
] | null | null | null |
packages/util/galaxy/project_galaxy_util.py
|
lesperry/Metagenomics
|
a1d8b7d96b32ab83cebe513e889b6ef82f7c1dd6
|
[
"CC-BY-3.0"
] | null | null | null |
packages/util/galaxy/project_galaxy_util.py
|
lesperry/Metagenomics
|
a1d8b7d96b32ab83cebe513e889b6ef82f7c1dd6
|
[
"CC-BY-3.0"
] | null | null | null |
# -*- coding: utf-8 -*-
__version__ = '20.9.1.dev0'
PROJECT_NAME = "galaxy-util"
PROJECT_OWNER = PROJECT_USERAME = "galaxyproject"
PROJECT_URL = "https://github.com/galaxyproject/galaxy"
PROJECT_AUTHOR = 'Galaxy Project and Community'
PROJECT_DESCRIPTION = 'Galaxy Generic Utilities'
PROJECT_EMAIL = 'galaxy-committers@lists.galaxyproject.org'
RAW_CONTENT_URL = "https://raw.github.com/%s/%s/master/" % (
PROJECT_USERAME, PROJECT_NAME
)
| 31.642857
| 60
| 0.751693
|
__version__ = '20.9.1.dev0'
PROJECT_NAME = "galaxy-util"
PROJECT_OWNER = PROJECT_USERAME = "galaxyproject"
PROJECT_URL = "https://github.com/galaxyproject/galaxy"
PROJECT_AUTHOR = 'Galaxy Project and Community'
PROJECT_DESCRIPTION = 'Galaxy Generic Utilities'
PROJECT_EMAIL = 'galaxy-committers@lists.galaxyproject.org'
RAW_CONTENT_URL = "https://raw.github.com/%s/%s/master/" % (
PROJECT_USERAME, PROJECT_NAME
)
| true
| true
|
f70e422d00b10925ca83c3d4c99af22670910ada
| 1,280
|
py
|
Python
|
pyta/hypothesis/extra/django/__init__.py
|
AbChatt/Tweet-Analyser-Python
|
2953137b021a71d65fe6a83e6d4b87be36d4039b
|
[
"MIT"
] | 1
|
2020-11-29T20:02:41.000Z
|
2020-11-29T20:02:41.000Z
|
pyta/hypothesis/extra/django/__init__.py
|
AbChatt/Tweet-Analyser-Python
|
2953137b021a71d65fe6a83e6d4b87be36d4039b
|
[
"MIT"
] | null | null | null |
pyta/hypothesis/extra/django/__init__.py
|
AbChatt/Tweet-Analyser-Python
|
2953137b021a71d65fe6a83e6d4b87be36d4039b
|
[
"MIT"
] | null | null | null |
# coding=utf-8
#
# This file is part of Hypothesis, which may be found at
# https://github.com/HypothesisWorks/hypothesis-python
#
# Most of this work is copyright (C) 2013-2018 David R. MacIver
# (david@drmaciver.com), but it contains contributions by others. See
# CONTRIBUTING.rst for a full list of people who may hold copyright, and
# consult the git log if you need to determine who owns an individual
# contribution.
#
# This Source Code Form is subject to the terms of the Mozilla Public License,
# v. 2.0. If a copy of the MPL was not distributed with this file, You can
# obtain one at http://mozilla.org/MPL/2.0/.
#
# END HEADER
import unittest
import django.test as dt
class HypothesisTestCase(object):
def setup_example(self):
self._pre_setup()
def teardown_example(self, example):
self._post_teardown()
def __call__(self, result=None):
testMethod = getattr(self, self._testMethodName)
if getattr(testMethod, u'is_hypothesis_test', False):
return unittest.TestCase.__call__(self, result)
else:
return dt.SimpleTestCase.__call__(self, result)
class TestCase(HypothesisTestCase, dt.TestCase):
pass
class TransactionTestCase(HypothesisTestCase, dt.TransactionTestCase):
pass
| 28.444444
| 78
| 0.723438
|
import unittest
import django.test as dt
class HypothesisTestCase(object):
def setup_example(self):
self._pre_setup()
def teardown_example(self, example):
self._post_teardown()
def __call__(self, result=None):
testMethod = getattr(self, self._testMethodName)
if getattr(testMethod, u'is_hypothesis_test', False):
return unittest.TestCase.__call__(self, result)
else:
return dt.SimpleTestCase.__call__(self, result)
class TestCase(HypothesisTestCase, dt.TestCase):
pass
class TransactionTestCase(HypothesisTestCase, dt.TransactionTestCase):
pass
| true
| true
|
f70e42ee92403a8eba3c8360dae72840b023f8ee
| 18,724
|
py
|
Python
|
pygeotools/lib/iolib.py
|
ShashankBice/pygeotools
|
5bc74f96cf79f3089572cab7e4f3632ca36b22bc
|
[
"MIT"
] | null | null | null |
pygeotools/lib/iolib.py
|
ShashankBice/pygeotools
|
5bc74f96cf79f3089572cab7e4f3632ca36b22bc
|
[
"MIT"
] | null | null | null |
pygeotools/lib/iolib.py
|
ShashankBice/pygeotools
|
5bc74f96cf79f3089572cab7e4f3632ca36b22bc
|
[
"MIT"
] | 1
|
2018-09-21T03:10:31.000Z
|
2018-09-21T03:10:31.000Z
|
#! /usr/bin/env python
"""
Functions for IO, mostly wrapped around GDAL
Note: This was all written before RasterIO existed, which might be a better choice.
"""
import os
import subprocess
import numpy as np
from osgeo import gdal, gdal_array, osr
#Define drivers
mem_drv = gdal.GetDriverByName('MEM')
gtif_drv = gdal.GetDriverByName('GTiff')
vrt_drv = gdal.GetDriverByName("VRT")
#Default GDAL creation options
gdal_opt = ['COMPRESS=LZW', 'TILED=YES', 'BIGTIFF=IF_SAFER']
#gdal_opt += ['BLOCKXSIZE=1024', 'BLOCKYSIZE=1024']
#List that can be used for building commands
gdal_opt_co = []
[gdal_opt_co.extend(('-co', i)) for i in gdal_opt]
#Add methods to load ma from OpenCV, PIL, etc.
#These formats should be directly readable as np arrays
#Note: want to modify to import all bands as separate arrays in ndarray
#Unless the user requests a single band, or range of bands
#Check for file existence
def fn_check(fn):
"""Wrapper to check for file existence
Parameters
----------
fn : str
Input filename string.
Returns
-------
bool
True if file exists, False otherwise.
"""
return os.path.exists(fn)
def fn_check_full(fn):
"""Check for file existence
Avoids race condition, but slower than os.path.exists.
Parameters
----------
fn : str
Input filename string.
Returns
-------
status
True if file exists, False otherwise.
"""
status = True
if not os.path.isfile(fn):
status = False
else:
try:
open(fn)
except IOError:
status = False
return status
def fn_list_check(fn_list):
status = True
for fn in fn_list:
if not fn_check(fn):
print('Unable to find: %s' % fn)
status = False
return status
def fn_list_valid(fn_list):
print('%i input fn' % len(fn_list))
out_list = []
for fn in fn_list:
if not fn_check(fn):
print('Unable to find: %s' % fn)
else:
out_list.append(fn)
print('%i output fn' % len(out_list))
return out_list
#Wrapper around gdal.Open
def fn_getds(fn):
"""Wrapper around gdal.Open()
"""
ds = None
if fn_check(fn):
ds = gdal.Open(fn, gdal.GA_ReadOnly)
else:
print("Unable to find %s" % fn)
return ds
def fn_getma(fn, bnum=1):
"""Get masked array from input filename
Parameters
----------
fn : str
Input filename string
bnum : int, optional
Band number
Returns
-------
np.ma.array
Masked array containing raster values
"""
#Add check for filename existence
ds = fn_getds(fn)
return ds_getma(ds, bnum=bnum)
#Given input dataset, return a masked array for the input band
def ds_getma(ds, bnum=1):
"""Get masked array from input GDAL Dataset
Parameters
----------
ds : gdal.Dataset
Input GDAL Datset
bnum : int, optional
Band number
Returns
-------
np.ma.array
Masked array containing raster values
"""
b = ds.GetRasterBand(bnum)
return b_getma(b)
#Given input band, return a masked array
def b_getma(b):
"""Get masked array from input GDAL Band
Parameters
----------
b : gdal.Band
Input GDAL Band
Returns
-------
np.ma.array
Masked array containing raster values
"""
b_ndv = get_ndv_b(b)
#bma = np.ma.masked_equal(b.ReadAsArray(), b_ndv)
#This is more appropriate for float, handles precision issues
bma = np.ma.masked_values(b.ReadAsArray(), b_ndv)
return bma
def get_sub_dim(src_ds, scale=None, maxdim=1024):
"""Compute dimensions of subsampled dataset
Parameters
----------
ds : gdal.Dataset
Input GDAL Datset
scale : int, optional
Scaling factor
maxdim : int, optional
Maximum dimension along either axis, in pixels
Returns
-------
ns
Numper of samples in subsampled output
nl
Numper of lines in subsampled output
scale
Final scaling factor
"""
ns = src_ds.RasterXSize
nl = src_ds.RasterYSize
maxdim = float(maxdim)
if scale is None:
scale_ns = ns/maxdim
scale_nl = nl/maxdim
scale = max(scale_ns, scale_nl)
#Need to check to make sure scale is positive real
if scale > 1:
ns = int(round(ns/scale))
nl = int(round(nl/scale))
return ns, nl, scale
def fn_getma_sub(fn, bnum=1, scale=None, maxdim=1024., return_ds=False):
ds = gdal.Open(fn)
return ds_getma_sub(ds, bnum=bnum, scale=scale, maxdim=maxdim, return_ds=return_ds)
#Load a subsampled array
#Can specify scale factor or max dimension
#No need to load the entire dataset for stats computation
def ds_getma_sub(src_ds, bnum=1, scale=None, maxdim=1024., return_ds=False):
"""Load a subsampled array, rather than full resolution
This is useful when working with large rasters
Uses buf_xsize and buf_ysize options from GDAL ReadAsArray method.
Parameters
----------
ds : gdal.Dataset
Input GDAL Datset
bnum : int, optional
Band number
scale : int, optional
Scaling factor
maxdim : int, optional
Maximum dimension along either axis, in pixels
Returns
-------
np.ma.array
Masked array containing raster values
"""
#print src_ds.GetFileList()[0]
b = src_ds.GetRasterBand(bnum)
b_ndv = get_ndv_b(b)
ns, nl, scale = get_sub_dim(src_ds, scale, maxdim)
#The buf_size parameters determine the final array dimensions
b_array = b.ReadAsArray(buf_xsize=ns, buf_ysize=nl)
bma = np.ma.masked_values(b_array, b_ndv)
out = bma
if return_ds:
dtype = src_ds.GetRasterBand(1).DataType
src_ds_sub = gdal.GetDriverByName('MEM').Create('', ns, nl, 1, dtype)
gt = np.array(src_ds.GetGeoTransform())
gt[[1,5]] = gt[[1,5]]*scale
src_ds_sub.SetGeoTransform(list(gt))
src_ds_sub.SetProjection(src_ds.GetProjection())
b = src_ds_sub.GetRasterBand(1)
b.WriteArray(bma)
b.SetNoDataValue(b_ndv)
out = (bma, src_ds_sub)
return out
#Note: need to consolidate with warplib.writeout (takes ds, not ma)
#Add option to build overviews when writing GTiff
#Input proj must be WKT
def writeGTiff(a, dst_fn, src_ds=None, bnum=1, ndv=None, gt=None, proj=None, create=False, sparse=False):
"""Write input array to disk as GeoTiff
Parameters
----------
a : np.array or np.ma.array
Input array
dst_fn : str
Output filename
src_ds: GDAL Dataset, optional
Source Dataset to use for creating copy
bnum : int, optional
Output band
ndv : float, optional
Output NoData Value
gt : list, optional
Output GeoTransform
proj : str, optional
Output Projection (OGC WKT or PROJ.4 format)
create : bool, optional
Create new dataset
sparse : bool, optional
Output should be created with sparse options
"""
#If input is not np.ma, this creates a new ma, which has default filL_value of 1E20
#Must manually override with ndv
#Also consumes a lot of memory
#Should bypass if input is bool
from pygeotools.lib.malib import checkma
a = checkma(a, fix=False)
#Want to preserve fill_value if already specified
if ndv is not None:
a.set_fill_value(ndv)
driver = gtif_drv
#Currently only support writing singleband rasters
#if a.ndim > 2:
# np_nbands = a.shape[2]
# if src_ds.RasterCount np_nbands:
# for bnum in np_nbands:
nbands = 1
np_dt = a.dtype.name
if src_ds is not None:
#If this is a fn, get a ds
#Note: this saves a lot of unnecessary iolib.fn_getds calls
if isinstance(src_ds, str):
src_ds = fn_getds(src_ds)
#if isinstance(src_ds, gdal.Dataset):
src_dt = gdal.GetDataTypeName(src_ds.GetRasterBand(bnum).DataType)
src_gt = src_ds.GetGeoTransform()
#This is WKT
src_proj = src_ds.GetProjection()
#src_srs = osr.SpatialReference()
#src_srs.ImportFromWkt(src_ds.GetProjectionRef())
#Probably a cleaner way to handle this
if gt is None:
gt = src_gt
if proj is None:
proj = src_proj
#Need to create a new copy of the default options
opt = list(gdal_opt)
#Note: packbits is better for sparse data
if sparse:
opt.remove('COMPRESS=LZW')
opt.append('COMPRESS=PACKBITS')
#Not sure if VW can handle sparse tif
#opt.append('SPARSE_OK=TRUE')
#Use predictor=3 for floating point data
if 'float' in np_dt.lower() and 'COMPRESS=LZW' in opt:
opt.append('PREDICTOR=3')
#If input ma is same as src_ds, write out array using CreateCopy from existing dataset
#if not create and (src_ds is not None) and ((a.shape[0] == src_ds.RasterYSize) and (a.shape[1] == src_ds.RasterXSize) and (np_dt.lower() == src_dt.lower())):
#Should compare srs.IsSame(src_srs)
if not create and (src_ds is not None) and ((a.shape[0] == src_ds.RasterYSize) and (a.shape[1] == src_ds.RasterXSize) and (np_dt.lower() == src_dt.lower())) and (src_gt == gt) and (src_proj == proj):
#Note: third option is strict flag, set to false
dst_ds = driver.CreateCopy(dst_fn, src_ds, 0, options=opt)
#Otherwise, use Create
else:
a_dtype = a.dtype
gdal_dtype = np2gdal_dtype(a_dtype)
if a_dtype.name == 'bool':
#Set ndv to 0
a.fill_value = False
opt.remove('COMPRESS=LZW')
opt.append('COMPRESS=DEFLATE')
#opt.append('NBITS=1')
#Create(fn, nx, ny, nbands, dtype, opt)
dst_ds = driver.Create(dst_fn, a.shape[1], a.shape[0], nbands, gdal_dtype, options=opt)
#Note: Need GeoMA here to make this work, or accept gt as argument
#Could also do ds creation in calling script
if gt is not None:
dst_ds.SetGeoTransform(gt)
if proj is not None:
dst_ds.SetProjection(proj)
dst_ds.GetRasterBand(bnum).WriteArray(a.filled())
dst_ds.GetRasterBand(bnum).SetNoDataValue(float(a.fill_value))
dst_ds = None
def writevrt(out_csv,srs='EPSG:4326',x='field_1',y='field_2'):
"""
Write out a vrt to accompany a csv of points
"""
out_vrt = os.path.splitext(out_csv)[0]+'.vrt'
out_csv = os.path.split(out_csv)[-1]
f = open(out_vrt, 'w')
f.write('<OGRVRTDataSource>\n')
f.write(' <OGRVRTLayer name="%s">\n' % os.path.splitext(out_csv)[0])
f.write(' <SrcDataSource>%s</SrcDataSource>\n' % out_csv)
f.write(' <GeometryType>wkbPoint</GeometryType>\n')
f.write(' <LayerSRS>%s</LayerSRS>\n' % srs)
f.write(' <GeometryField encoding="PointFromColumns" x="%s" y="%s"/>\n' % (x, y))
f.write(' </OGRVRTLayer>\n')
f.write('</OGRVRTDataSource>\n')
f.close()
#Move to geolib?
#Look up equivalent GDAL data type
def np2gdal_dtype(d):
"""
Get GDAL RasterBand datatype that corresponds with NumPy datatype
Input should be numpy array or numpy dtype
"""
dt_dict = gdal_array.codes
if isinstance(d, (np.ndarray, np.generic)):
d = d.dtype
#This creates dtype from another built-in type
#d = np.dtype(d)
if isinstance(d, np.dtype):
if d.name == 'int8':
gdal_dt = 1
elif d.name == 'bool':
#Write out as Byte
gdal_dt = 1
else:
gdal_dt = list(dt_dict.keys())[list(dt_dict.values()).index(d)]
else:
print("Input must be NumPy array or NumPy dtype")
gdal_dt = None
return gdal_dt
def gdal2np_dtype(b):
"""
Get NumPy datatype that corresponds with GDAL RasterBand datatype
Input can be filename, GDAL Dataset, GDAL RasterBand, or GDAL integer dtype
"""
dt_dict = gdal_array.codes
if isinstance(b, str):
b = gdal.Open(b)
if isinstance(b, gdal.Dataset):
b = b.GetRasterBand(1)
if isinstance(b, gdal.Band):
b = b.DataType
if isinstance(b, int):
np_dtype = dt_dict[b]
else:
np_dtype = None
print("Input must be GDAL Dataset or RasterBand object")
return np_dtype
#Replace nodata value in GDAL band
def replace_ndv(b, new_ndv):
b_ndv = get_ndv_b(b)
bma = np.ma.masked_values(b.ReadAsArray(), b_ndv)
bma.set_fill_value(new_ndv)
b.WriteArray(bma.filled())
b.SetNoDataValue(new_ndv)
return b
def set_ndv(dst_fn, ndv):
dst_ds = gdal.Open(dst_fn, gdal.GA_Update)
for n in range(1, dst_ds.RasterCount+1):
b = dst_ds.GetRasterBand(1)
b.SetNoDataValue(ndv)
dst_ds = None
#Should overload these functions to handle fn, ds, or b
#Perhaps abstract, as many functions will need this functionality
def get_ndv_fn(fn):
ds = gdal.Open(fn, gdal.GA_ReadOnly)
return get_ndv_ds(ds)
#Want to modify to handle multi-band images and return list of ndv
def get_ndv_ds(ds, bnum=1):
b = ds.GetRasterBand(bnum)
return get_ndv_b(b)
#Return nodata value for GDAL band
def get_ndv_b(b):
"""Get NoData value for GDAL band.
If NoDataValue is not set in the band,
extract upper left and lower right pixel values.
Otherwise assume NoDataValue is 0.
Parameters
----------
b : GDALRasterBand object
This is the input band.
Returns
-------
b_ndv : float
NoData value
"""
b_ndv = b.GetNoDataValue()
if b_ndv is None:
#Check ul pixel for ndv
ns = b.XSize
nl = b.YSize
ul = float(b.ReadAsArray(0, 0, 1, 1))
#ur = float(b.ReadAsArray(ns-1, 0, 1, 1))
lr = float(b.ReadAsArray(ns-1, nl-1, 1, 1))
#ll = float(b.ReadAsArray(0, nl-1, 1, 1))
#Probably better to use 3/4 corner criterion
#if ul == ur == lr == ll:
if np.isnan(ul) or ul == lr:
b_ndv = ul
else:
#Assume ndv is 0
b_ndv = 0
elif np.isnan(b_ndv):
b_dt = gdal.GetDataTypeName(b.DataType)
if 'Float' in b_dt:
b_ndv = np.nan
else:
b_ndv = 0
return b_ndv
#Write out a recarray as a csv
def write_recarray(outfn, ra):
with open(outfn,'w') as f:
f.write(','.join([str(item) for item in ra.dtype.names])+'\n')
for row in ra:
f.write(','.join([str(item) for item in row])+'\n')
#Check to make sure image doesn't contain errors
def image_check(fn):
ds = gdal.Open(fn)
status = True
for i in range(ds.RasterCount):
ds.GetRasterBand(i+1).Checksum()
if gdal.GetLastErrorType() != 0:
status = False
return status
#Return number of CPUs
#Logical is "virtual" cpu count with hyperthreading
#Set to False for physical cpu count
def cpu_count(logical=True):
"""Return system CPU count
"""
if logical:
from multiprocessing import cpu_count
ncpu=cpu_count()
else:
import psutil
ncpu=psutil.cpu_count(logical=False)
return ncpu
def setstripe(dir, threads=cpu_count()):
#import socket
#if 'nasa' in socket.getfqdn():
#Better to use 'df -T' to determine filesystem of directory
#Can do this with psutil Python lib, but need to also find mount point of file
if dir is not None:
if 'lustre' in str(subprocess.check_output(['df','-T'])):
if os.path.exists(dir):
if threads is None:
threads = cpu_count()
cmd = ['lfs', 'setstripe', dir, '-c', str(threads)]
print(' '.join(cmd))
subprocess.call(cmd)
#This is a shared directory for files like LULC, used by multiple tools
#Default location is $HOME/data
#Can specify in ~/.bashrc or ~/.profile
#export DATADIR=$HOME/data
def get_datadir():
default_datadir = os.path.join(os.path.expanduser('~'), 'data')
datadir = os.environ.get('DATADIR', default_datadir)
if not os.path.exists(datadir):
os.makedirs(datadir)
return datadir
#Function to get files using urllib
#This works with ftp
def getfile(url, outdir=None):
"""Function to fetch files using urllib
Works with ftp
"""
fn = os.path.split(url)[-1]
if outdir is not None:
fn = os.path.join(outdir, fn)
if not os.path.exists(fn):
#Find appropriate urlretrieve for Python 2 and 3
try:
from urllib.request import urlretrieve
except ImportError:
from urllib import urlretrieve
print("Retrieving: %s" % url)
#Add progress bar
urlretrieve(url, fn)
return fn
#Function to get files using requests
#Works with https authentication
def getfile2(url, auth=None, outdir=None):
"""Function to fetch files using requests
Works with https authentication
"""
import requests
print("Retrieving: %s" % url)
fn = os.path.split(url)[-1]
if outdir is not None:
fn = os.path.join(outdir, fn)
if auth is not None:
r = requests.get(url, stream=True, auth=auth)
else:
r = requests.get(url, stream=True)
chunk_size = 1000000
with open(fn, 'wb') as fd:
for chunk in r.iter_content(chunk_size):
fd.write(chunk)
#Get necessary credentials to access MODSCAG products - hopefully this will soon be archived with NSIDC
def get_auth():
"""Get authorization token for https
"""
import getpass
from requests.auth import HTTPDigestAuth
#This binds raw_input to input for Python 2
input_func = input
try:
input_func = raw_input
except NameError:
pass
uname = input_func("MODSCAG Username:")
pw = getpass.getpass("MODSCAG Password:")
auth = HTTPDigestAuth(uname, pw)
#wget -A'h8v4*snow_fraction.tif' --user=uname --password=pw
return auth
def readcsv(fn):
"""
Wrapper to read arbitrary csv, check for header
Needs some work to be more robust, quickly added for demcoreg sampling
"""
import csv
#Check first line for header
with open(fn, 'r') as f:
reader = csv.DictReader(f)
hdr = reader.fieldnames
#Assume there is a header on first line, check
skiprows = 1
if np.all(f.isdigit() for f in hdr):
hdr = None
skiprows = 0
#Check header for lat/lon/z or x/y/z tags
#Should probably do genfromtxt here if header exists and dtype of cols is variable
pts = np.loadtxt(fn, delimiter=',', skiprows=skiprows, dtype=None)
return pts
| 30.00641
| 203
| 0.624653
|
import os
import subprocess
import numpy as np
from osgeo import gdal, gdal_array, osr
mem_drv = gdal.GetDriverByName('MEM')
gtif_drv = gdal.GetDriverByName('GTiff')
vrt_drv = gdal.GetDriverByName("VRT")
gdal_opt = ['COMPRESS=LZW', 'TILED=YES', 'BIGTIFF=IF_SAFER']
gdal_opt_co = []
[gdal_opt_co.extend(('-co', i)) for i in gdal_opt]
def fn_check(fn):
return os.path.exists(fn)
def fn_check_full(fn):
status = True
if not os.path.isfile(fn):
status = False
else:
try:
open(fn)
except IOError:
status = False
return status
def fn_list_check(fn_list):
status = True
for fn in fn_list:
if not fn_check(fn):
print('Unable to find: %s' % fn)
status = False
return status
def fn_list_valid(fn_list):
print('%i input fn' % len(fn_list))
out_list = []
for fn in fn_list:
if not fn_check(fn):
print('Unable to find: %s' % fn)
else:
out_list.append(fn)
print('%i output fn' % len(out_list))
return out_list
def fn_getds(fn):
ds = None
if fn_check(fn):
ds = gdal.Open(fn, gdal.GA_ReadOnly)
else:
print("Unable to find %s" % fn)
return ds
def fn_getma(fn, bnum=1):
ds = fn_getds(fn)
return ds_getma(ds, bnum=bnum)
def ds_getma(ds, bnum=1):
b = ds.GetRasterBand(bnum)
return b_getma(b)
def b_getma(b):
b_ndv = get_ndv_b(b)
bma = np.ma.masked_values(b.ReadAsArray(), b_ndv)
return bma
def get_sub_dim(src_ds, scale=None, maxdim=1024):
ns = src_ds.RasterXSize
nl = src_ds.RasterYSize
maxdim = float(maxdim)
if scale is None:
scale_ns = ns/maxdim
scale_nl = nl/maxdim
scale = max(scale_ns, scale_nl)
if scale > 1:
ns = int(round(ns/scale))
nl = int(round(nl/scale))
return ns, nl, scale
def fn_getma_sub(fn, bnum=1, scale=None, maxdim=1024., return_ds=False):
ds = gdal.Open(fn)
return ds_getma_sub(ds, bnum=bnum, scale=scale, maxdim=maxdim, return_ds=return_ds)
def ds_getma_sub(src_ds, bnum=1, scale=None, maxdim=1024., return_ds=False):
b = src_ds.GetRasterBand(bnum)
b_ndv = get_ndv_b(b)
ns, nl, scale = get_sub_dim(src_ds, scale, maxdim)
b_array = b.ReadAsArray(buf_xsize=ns, buf_ysize=nl)
bma = np.ma.masked_values(b_array, b_ndv)
out = bma
if return_ds:
dtype = src_ds.GetRasterBand(1).DataType
src_ds_sub = gdal.GetDriverByName('MEM').Create('', ns, nl, 1, dtype)
gt = np.array(src_ds.GetGeoTransform())
gt[[1,5]] = gt[[1,5]]*scale
src_ds_sub.SetGeoTransform(list(gt))
src_ds_sub.SetProjection(src_ds.GetProjection())
b = src_ds_sub.GetRasterBand(1)
b.WriteArray(bma)
b.SetNoDataValue(b_ndv)
out = (bma, src_ds_sub)
return out
def writeGTiff(a, dst_fn, src_ds=None, bnum=1, ndv=None, gt=None, proj=None, create=False, sparse=False):
from pygeotools.lib.malib import checkma
a = checkma(a, fix=False)
if ndv is not None:
a.set_fill_value(ndv)
driver = gtif_drv
nbands = 1
np_dt = a.dtype.name
if src_ds is not None:
if isinstance(src_ds, str):
src_ds = fn_getds(src_ds)
src_dt = gdal.GetDataTypeName(src_ds.GetRasterBand(bnum).DataType)
src_gt = src_ds.GetGeoTransform()
src_proj = src_ds.GetProjection()
if gt is None:
gt = src_gt
if proj is None:
proj = src_proj
opt = list(gdal_opt)
if sparse:
opt.remove('COMPRESS=LZW')
opt.append('COMPRESS=PACKBITS')
if 'float' in np_dt.lower() and 'COMPRESS=LZW' in opt:
opt.append('PREDICTOR=3')
if not create and (src_ds is not None) and ((a.shape[0] == src_ds.RasterYSize) and (a.shape[1] == src_ds.RasterXSize) and (np_dt.lower() == src_dt.lower())) and (src_gt == gt) and (src_proj == proj):
dst_ds = driver.CreateCopy(dst_fn, src_ds, 0, options=opt)
else:
a_dtype = a.dtype
gdal_dtype = np2gdal_dtype(a_dtype)
if a_dtype.name == 'bool':
a.fill_value = False
opt.remove('COMPRESS=LZW')
opt.append('COMPRESS=DEFLATE')
dst_ds = driver.Create(dst_fn, a.shape[1], a.shape[0], nbands, gdal_dtype, options=opt)
if gt is not None:
dst_ds.SetGeoTransform(gt)
if proj is not None:
dst_ds.SetProjection(proj)
dst_ds.GetRasterBand(bnum).WriteArray(a.filled())
dst_ds.GetRasterBand(bnum).SetNoDataValue(float(a.fill_value))
dst_ds = None
def writevrt(out_csv,srs='EPSG:4326',x='field_1',y='field_2'):
out_vrt = os.path.splitext(out_csv)[0]+'.vrt'
out_csv = os.path.split(out_csv)[-1]
f = open(out_vrt, 'w')
f.write('<OGRVRTDataSource>\n')
f.write(' <OGRVRTLayer name="%s">\n' % os.path.splitext(out_csv)[0])
f.write(' <SrcDataSource>%s</SrcDataSource>\n' % out_csv)
f.write(' <GeometryType>wkbPoint</GeometryType>\n')
f.write(' <LayerSRS>%s</LayerSRS>\n' % srs)
f.write(' <GeometryField encoding="PointFromColumns" x="%s" y="%s"/>\n' % (x, y))
f.write(' </OGRVRTLayer>\n')
f.write('</OGRVRTDataSource>\n')
f.close()
def np2gdal_dtype(d):
dt_dict = gdal_array.codes
if isinstance(d, (np.ndarray, np.generic)):
d = d.dtype
if isinstance(d, np.dtype):
if d.name == 'int8':
gdal_dt = 1
elif d.name == 'bool':
gdal_dt = 1
else:
gdal_dt = list(dt_dict.keys())[list(dt_dict.values()).index(d)]
else:
print("Input must be NumPy array or NumPy dtype")
gdal_dt = None
return gdal_dt
def gdal2np_dtype(b):
dt_dict = gdal_array.codes
if isinstance(b, str):
b = gdal.Open(b)
if isinstance(b, gdal.Dataset):
b = b.GetRasterBand(1)
if isinstance(b, gdal.Band):
b = b.DataType
if isinstance(b, int):
np_dtype = dt_dict[b]
else:
np_dtype = None
print("Input must be GDAL Dataset or RasterBand object")
return np_dtype
def replace_ndv(b, new_ndv):
b_ndv = get_ndv_b(b)
bma = np.ma.masked_values(b.ReadAsArray(), b_ndv)
bma.set_fill_value(new_ndv)
b.WriteArray(bma.filled())
b.SetNoDataValue(new_ndv)
return b
def set_ndv(dst_fn, ndv):
dst_ds = gdal.Open(dst_fn, gdal.GA_Update)
for n in range(1, dst_ds.RasterCount+1):
b = dst_ds.GetRasterBand(1)
b.SetNoDataValue(ndv)
dst_ds = None
def get_ndv_fn(fn):
ds = gdal.Open(fn, gdal.GA_ReadOnly)
return get_ndv_ds(ds)
def get_ndv_ds(ds, bnum=1):
b = ds.GetRasterBand(bnum)
return get_ndv_b(b)
def get_ndv_b(b):
b_ndv = b.GetNoDataValue()
if b_ndv is None:
ns = b.XSize
nl = b.YSize
ul = float(b.ReadAsArray(0, 0, 1, 1))
lr = float(b.ReadAsArray(ns-1, nl-1, 1, 1))
if np.isnan(ul) or ul == lr:
b_ndv = ul
else:
b_ndv = 0
elif np.isnan(b_ndv):
b_dt = gdal.GetDataTypeName(b.DataType)
if 'Float' in b_dt:
b_ndv = np.nan
else:
b_ndv = 0
return b_ndv
def write_recarray(outfn, ra):
with open(outfn,'w') as f:
f.write(','.join([str(item) for item in ra.dtype.names])+'\n')
for row in ra:
f.write(','.join([str(item) for item in row])+'\n')
def image_check(fn):
ds = gdal.Open(fn)
status = True
for i in range(ds.RasterCount):
ds.GetRasterBand(i+1).Checksum()
if gdal.GetLastErrorType() != 0:
status = False
return status
#Return number of CPUs
#Logical is "virtual" cpu count with hyperthreading
#Set to False for physical cpu count
def cpu_count(logical=True):
if logical:
from multiprocessing import cpu_count
ncpu=cpu_count()
else:
import psutil
ncpu=psutil.cpu_count(logical=False)
return ncpu
def setstripe(dir, threads=cpu_count()):
#import socket
#if 'nasa' in socket.getfqdn():
#Better to use 'df -T' to determine filesystem of directory
#Can do this with psutil Python lib, but need to also find mount point of file
if dir is not None:
if 'lustre' in str(subprocess.check_output(['df','-T'])):
if os.path.exists(dir):
if threads is None:
threads = cpu_count()
cmd = ['lfs', 'setstripe', dir, '-c', str(threads)]
print(' '.join(cmd))
subprocess.call(cmd)
#This is a shared directory for files like LULC, used by multiple tools
#Default location is $HOME/data
#Can specify in ~/.bashrc or ~/.profile
#export DATADIR=$HOME/data
def get_datadir():
default_datadir = os.path.join(os.path.expanduser('~'), 'data')
datadir = os.environ.get('DATADIR', default_datadir)
if not os.path.exists(datadir):
os.makedirs(datadir)
return datadir
#Function to get files using urllib
#This works with ftp
def getfile(url, outdir=None):
fn = os.path.split(url)[-1]
if outdir is not None:
fn = os.path.join(outdir, fn)
if not os.path.exists(fn):
#Find appropriate urlretrieve for Python 2 and 3
try:
from urllib.request import urlretrieve
except ImportError:
from urllib import urlretrieve
print("Retrieving: %s" % url)
#Add progress bar
urlretrieve(url, fn)
return fn
#Function to get files using requests
#Works with https authentication
def getfile2(url, auth=None, outdir=None):
import requests
print("Retrieving: %s" % url)
fn = os.path.split(url)[-1]
if outdir is not None:
fn = os.path.join(outdir, fn)
if auth is not None:
r = requests.get(url, stream=True, auth=auth)
else:
r = requests.get(url, stream=True)
chunk_size = 1000000
with open(fn, 'wb') as fd:
for chunk in r.iter_content(chunk_size):
fd.write(chunk)
#Get necessary credentials to access MODSCAG products - hopefully this will soon be archived with NSIDC
def get_auth():
import getpass
from requests.auth import HTTPDigestAuth
#This binds raw_input to input for Python 2
input_func = input
try:
input_func = raw_input
except NameError:
pass
uname = input_func("MODSCAG Username:")
pw = getpass.getpass("MODSCAG Password:")
auth = HTTPDigestAuth(uname, pw)
#wget -A'h8v4*snow_fraction.tif' --user=uname --password=pw
return auth
def readcsv(fn):
import csv
#Check first line for header
with open(fn, 'r') as f:
reader = csv.DictReader(f)
hdr = reader.fieldnames
#Assume there is a header on first line, check
skiprows = 1
if np.all(f.isdigit() for f in hdr):
hdr = None
skiprows = 0
#Check header for lat/lon/z or x/y/z tags
#Should probably do genfromtxt here if header exists and dtype of cols is variable
pts = np.loadtxt(fn, delimiter=',', skiprows=skiprows, dtype=None)
return pts
| true
| true
|
f70e43504e13a15cffa9cc6d318634ad2265bc25
| 2,824
|
py
|
Python
|
Contributor Corner/Priya/LinkedList/Question2.py
|
hitu1304/interview-corner
|
97503d1967c646f731275ae3665f142814c6a9d7
|
[
"MIT"
] | 39
|
2020-11-01T13:58:48.000Z
|
2021-02-12T08:39:37.000Z
|
Contributor Corner/Priya/LinkedList/Question2.py
|
hitu1304/interview-corner
|
97503d1967c646f731275ae3665f142814c6a9d7
|
[
"MIT"
] | 86
|
2020-09-25T07:20:40.000Z
|
2021-02-18T20:36:29.000Z
|
Contributor Corner/Priya/LinkedList/Question2.py
|
hitu1304/interview-corner
|
97503d1967c646f731275ae3665f142814c6a9d7
|
[
"MIT"
] | 43
|
2020-12-18T03:32:42.000Z
|
2021-02-19T18:08:19.000Z
|
# check if the list contains 1 or more nodes
def getLink(head):
temp = head
while temp is not None and temp.next is not None:
temp = temp.next
return temp
#initialize the pivot ,newHead and newLink to the partition function
def quickSortRec(head, link):
if head is None or head == link:
return head
newHead = None
newLink = None
pivot, newHead, newLink = partition(head, link, newHead, newLink)
if newHead != pivot:
temp = newHead
while temp.next != pivot:
temp = temp.next
temp.next = None
newHead = quickSortRec(newHead, temp)
temp = getLink(newHead)
temp.next = pivot
pivot.next = quickSortRec(pivot.next, newLink)
return newHead
#divide the entire list into two parts
#where the left of the pivot value will have the values less than pivot
# and right of the pivot value will have the vlaues greater than pivot
def partition(head, Link, newHead, newLink):
pivot = Link
prev = None
curr = head
end = pivot
while curr is not pivot:
if curr.data < pivot.data:
if newHead is None:
newHead = curr
prev = curr
curr = curr.next
else:
if prev:
prev.next = curr.next
temp = curr.next
curr.next = None
end.next = curr
end = curr
curr = temp
if newHead is None:
newHead = pivot
newLink = end
return pivot, newHead, newLink
#Driver's code
from collections import defaultdict
class Node:
def __init__(self,data):
self.data=data
self.next=None
class Llist:
def __init__(self):
self.head=None
def insert(self,data,tail):
node=Node(data)
if not self.head:
self.head=node
return node
tail.next=node
return node
def nodeID(head,dic):
while head:
dic[head.data].append(id(head))
head=head.next
def printList(head,dic):
while head:
if id(head) not in dic[head.data]:
print("Do'nt swap data, swap pointer/node")
return
print(head.data,end=' ')
head=head.next
if __name__ == '__main__':
t=int(input())
for i in range(t):
n=int(input())
arr=[int(x) for x in input().split()]
ll=Llist()
tail=None
for nodeData in arr:
tail=ll.insert(nodeData,tail)
dic=defaultdict(list) # dictonary to keep data and id of node
nodeID(ll.head,dic) # putting data and its id
resHead=quickSort(ll.head)
printList(resHead,dic) #verifying and printing
print()
| 25.672727
| 71
| 0.561261
|
def getLink(head):
temp = head
while temp is not None and temp.next is not None:
temp = temp.next
return temp
def quickSortRec(head, link):
if head is None or head == link:
return head
newHead = None
newLink = None
pivot, newHead, newLink = partition(head, link, newHead, newLink)
if newHead != pivot:
temp = newHead
while temp.next != pivot:
temp = temp.next
temp.next = None
newHead = quickSortRec(newHead, temp)
temp = getLink(newHead)
temp.next = pivot
pivot.next = quickSortRec(pivot.next, newLink)
return newHead
def partition(head, Link, newHead, newLink):
pivot = Link
prev = None
curr = head
end = pivot
while curr is not pivot:
if curr.data < pivot.data:
if newHead is None:
newHead = curr
prev = curr
curr = curr.next
else:
if prev:
prev.next = curr.next
temp = curr.next
curr.next = None
end.next = curr
end = curr
curr = temp
if newHead is None:
newHead = pivot
newLink = end
return pivot, newHead, newLink
from collections import defaultdict
class Node:
def __init__(self,data):
self.data=data
self.next=None
class Llist:
def __init__(self):
self.head=None
def insert(self,data,tail):
node=Node(data)
if not self.head:
self.head=node
return node
tail.next=node
return node
def nodeID(head,dic):
while head:
dic[head.data].append(id(head))
head=head.next
def printList(head,dic):
while head:
if id(head) not in dic[head.data]:
print("Do'nt swap data, swap pointer/node")
return
print(head.data,end=' ')
head=head.next
if __name__ == '__main__':
t=int(input())
for i in range(t):
n=int(input())
arr=[int(x) for x in input().split()]
ll=Llist()
tail=None
for nodeData in arr:
tail=ll.insert(nodeData,tail)
dic=defaultdict(list)
nodeID(ll.head,dic)
resHead=quickSort(ll.head)
printList(resHead,dic)
print()
| true
| true
|
f70e44cfa550ef5447e7d7fbe888bb3e387e4c72
| 9,588
|
py
|
Python
|
homeassistant/components/hassio/__init__.py
|
billyburly/home-assistant
|
9795449d22783e77a0ca7b745f15c89a830c5cc6
|
[
"Apache-2.0"
] | 5
|
2020-09-17T10:48:51.000Z
|
2021-11-22T00:08:17.000Z
|
homeassistant/components/hassio/__init__.py
|
billyburly/home-assistant
|
9795449d22783e77a0ca7b745f15c89a830c5cc6
|
[
"Apache-2.0"
] | 7
|
2016-04-09T20:56:30.000Z
|
2016-04-19T21:28:46.000Z
|
homeassistant/components/hassio/__init__.py
|
billyburly/home-assistant
|
9795449d22783e77a0ca7b745f15c89a830c5cc6
|
[
"Apache-2.0"
] | 6
|
2019-12-01T19:06:52.000Z
|
2020-09-17T00:57:06.000Z
|
"""Support for Hass.io."""
from datetime import timedelta
import logging
import os
import voluptuous as vol
from homeassistant.auth.const import GROUP_ID_ADMIN
from homeassistant.components.homeassistant import SERVICE_CHECK_CONFIG
import homeassistant.config as conf_util
from homeassistant.const import (
ATTR_NAME,
EVENT_CORE_CONFIG_UPDATE,
SERVICE_HOMEASSISTANT_RESTART,
SERVICE_HOMEASSISTANT_STOP,
)
from homeassistant.core import DOMAIN as HASS_DOMAIN, callback
from homeassistant.exceptions import HomeAssistantError
import homeassistant.helpers.config_validation as cv
from homeassistant.loader import bind_hass
from homeassistant.util.dt import utcnow
from .addon_panel import async_setup_addon_panel
from .auth import async_setup_auth_view
from .discovery import async_setup_discovery_view
from .handler import HassIO, HassioAPIError
from .http import HassIOView
from .ingress import async_setup_ingress_view
_LOGGER = logging.getLogger(__name__)
DOMAIN = "hassio"
STORAGE_KEY = DOMAIN
STORAGE_VERSION = 1
CONF_FRONTEND_REPO = "development_repo"
CONFIG_SCHEMA = vol.Schema(
{vol.Optional(DOMAIN): vol.Schema({vol.Optional(CONF_FRONTEND_REPO): cv.isdir})},
extra=vol.ALLOW_EXTRA,
)
DATA_HOMEASSISTANT_VERSION = "hassio_hass_version"
HASSIO_UPDATE_INTERVAL = timedelta(minutes=55)
SERVICE_ADDON_START = "addon_start"
SERVICE_ADDON_STOP = "addon_stop"
SERVICE_ADDON_RESTART = "addon_restart"
SERVICE_ADDON_STDIN = "addon_stdin"
SERVICE_HOST_SHUTDOWN = "host_shutdown"
SERVICE_HOST_REBOOT = "host_reboot"
SERVICE_SNAPSHOT_FULL = "snapshot_full"
SERVICE_SNAPSHOT_PARTIAL = "snapshot_partial"
SERVICE_RESTORE_FULL = "restore_full"
SERVICE_RESTORE_PARTIAL = "restore_partial"
ATTR_ADDON = "addon"
ATTR_INPUT = "input"
ATTR_SNAPSHOT = "snapshot"
ATTR_ADDONS = "addons"
ATTR_FOLDERS = "folders"
ATTR_HOMEASSISTANT = "homeassistant"
ATTR_PASSWORD = "password"
SCHEMA_NO_DATA = vol.Schema({})
SCHEMA_ADDON = vol.Schema({vol.Required(ATTR_ADDON): cv.slug})
SCHEMA_ADDON_STDIN = SCHEMA_ADDON.extend(
{vol.Required(ATTR_INPUT): vol.Any(dict, cv.string)}
)
SCHEMA_SNAPSHOT_FULL = vol.Schema(
{vol.Optional(ATTR_NAME): cv.string, vol.Optional(ATTR_PASSWORD): cv.string}
)
SCHEMA_SNAPSHOT_PARTIAL = SCHEMA_SNAPSHOT_FULL.extend(
{
vol.Optional(ATTR_FOLDERS): vol.All(cv.ensure_list, [cv.string]),
vol.Optional(ATTR_ADDONS): vol.All(cv.ensure_list, [cv.string]),
}
)
SCHEMA_RESTORE_FULL = vol.Schema(
{vol.Required(ATTR_SNAPSHOT): cv.slug, vol.Optional(ATTR_PASSWORD): cv.string}
)
SCHEMA_RESTORE_PARTIAL = SCHEMA_RESTORE_FULL.extend(
{
vol.Optional(ATTR_HOMEASSISTANT): cv.boolean,
vol.Optional(ATTR_FOLDERS): vol.All(cv.ensure_list, [cv.string]),
vol.Optional(ATTR_ADDONS): vol.All(cv.ensure_list, [cv.string]),
}
)
MAP_SERVICE_API = {
SERVICE_ADDON_START: ("/addons/{addon}/start", SCHEMA_ADDON, 60, False),
SERVICE_ADDON_STOP: ("/addons/{addon}/stop", SCHEMA_ADDON, 60, False),
SERVICE_ADDON_RESTART: ("/addons/{addon}/restart", SCHEMA_ADDON, 60, False),
SERVICE_ADDON_STDIN: ("/addons/{addon}/stdin", SCHEMA_ADDON_STDIN, 60, False),
SERVICE_HOST_SHUTDOWN: ("/host/shutdown", SCHEMA_NO_DATA, 60, False),
SERVICE_HOST_REBOOT: ("/host/reboot", SCHEMA_NO_DATA, 60, False),
SERVICE_SNAPSHOT_FULL: ("/snapshots/new/full", SCHEMA_SNAPSHOT_FULL, 300, True),
SERVICE_SNAPSHOT_PARTIAL: (
"/snapshots/new/partial",
SCHEMA_SNAPSHOT_PARTIAL,
300,
True,
),
SERVICE_RESTORE_FULL: (
"/snapshots/{snapshot}/restore/full",
SCHEMA_RESTORE_FULL,
300,
True,
),
SERVICE_RESTORE_PARTIAL: (
"/snapshots/{snapshot}/restore/partial",
SCHEMA_RESTORE_PARTIAL,
300,
True,
),
}
@callback
@bind_hass
def get_homeassistant_version(hass):
"""Return latest available Home Assistant version.
Async friendly.
"""
return hass.data.get(DATA_HOMEASSISTANT_VERSION)
@callback
@bind_hass
def is_hassio(hass):
"""Return true if Hass.io is loaded.
Async friendly.
"""
return DOMAIN in hass.config.components
async def async_setup(hass, config):
"""Set up the Hass.io component."""
# Check local setup
for env in ("HASSIO", "HASSIO_TOKEN"):
if os.environ.get(env):
continue
_LOGGER.error("Missing %s environment variable.", env)
return False
host = os.environ["HASSIO"]
websession = hass.helpers.aiohttp_client.async_get_clientsession()
hass.data[DOMAIN] = hassio = HassIO(hass.loop, websession, host)
if not await hassio.is_connected():
_LOGGER.warning("Not connected with Hass.io / system to busy!")
store = hass.helpers.storage.Store(STORAGE_VERSION, STORAGE_KEY)
data = await store.async_load()
if data is None:
data = {}
refresh_token = None
if "hassio_user" in data:
user = await hass.auth.async_get_user(data["hassio_user"])
if user and user.refresh_tokens:
refresh_token = list(user.refresh_tokens.values())[0]
# Migrate old Hass.io users to be admin.
if not user.is_admin:
await hass.auth.async_update_user(user, group_ids=[GROUP_ID_ADMIN])
if refresh_token is None:
user = await hass.auth.async_create_system_user("Hass.io", [GROUP_ID_ADMIN])
refresh_token = await hass.auth.async_create_refresh_token(user)
data["hassio_user"] = user.id
await store.async_save(data)
# This overrides the normal API call that would be forwarded
development_repo = config.get(DOMAIN, {}).get(CONF_FRONTEND_REPO)
if development_repo is not None:
hass.http.register_static_path(
"/api/hassio/app", os.path.join(development_repo, "hassio/build"), False
)
hass.http.register_view(HassIOView(host, websession))
if "frontend" in hass.config.components:
await hass.components.panel_custom.async_register_panel(
frontend_url_path="hassio",
webcomponent_name="hassio-main",
sidebar_title="Hass.io",
sidebar_icon="hass:home-assistant",
js_url="/api/hassio/app/entrypoint.js",
embed_iframe=True,
require_admin=True,
)
await hassio.update_hass_api(config.get("http", {}), refresh_token)
async def push_config(_):
"""Push core config to Hass.io."""
await hassio.update_hass_timezone(str(hass.config.time_zone))
hass.bus.async_listen(EVENT_CORE_CONFIG_UPDATE, push_config)
await push_config(None)
async def async_service_handler(service):
"""Handle service calls for Hass.io."""
api_command = MAP_SERVICE_API[service.service][0]
data = service.data.copy()
addon = data.pop(ATTR_ADDON, None)
snapshot = data.pop(ATTR_SNAPSHOT, None)
payload = None
# Pass data to Hass.io API
if service.service == SERVICE_ADDON_STDIN:
payload = data[ATTR_INPUT]
elif MAP_SERVICE_API[service.service][3]:
payload = data
# Call API
try:
await hassio.send_command(
api_command.format(addon=addon, snapshot=snapshot),
payload=payload,
timeout=MAP_SERVICE_API[service.service][2],
)
except HassioAPIError as err:
_LOGGER.error("Error on Hass.io API: %s", err)
for service, settings in MAP_SERVICE_API.items():
hass.services.async_register(
DOMAIN, service, async_service_handler, schema=settings[1]
)
async def update_homeassistant_version(now):
"""Update last available Home Assistant version."""
try:
data = await hassio.get_homeassistant_info()
hass.data[DATA_HOMEASSISTANT_VERSION] = data["last_version"]
except HassioAPIError as err:
_LOGGER.warning("Can't read last version: %s", err)
hass.helpers.event.async_track_point_in_utc_time(
update_homeassistant_version, utcnow() + HASSIO_UPDATE_INTERVAL
)
# Fetch last version
await update_homeassistant_version(None)
async def async_handle_core_service(call):
"""Service handler for handling core services."""
if call.service == SERVICE_HOMEASSISTANT_STOP:
await hassio.stop_homeassistant()
return
try:
errors = await conf_util.async_check_ha_config_file(hass)
except HomeAssistantError:
return
if errors:
_LOGGER.error(errors)
hass.components.persistent_notification.async_create(
"Config error. See [the logs](/developer-tools/logs) for details.",
"Config validating",
f"{HASS_DOMAIN}.check_config",
)
return
if call.service == SERVICE_HOMEASSISTANT_RESTART:
await hassio.restart_homeassistant()
# Mock core services
for service in (
SERVICE_HOMEASSISTANT_STOP,
SERVICE_HOMEASSISTANT_RESTART,
SERVICE_CHECK_CONFIG,
):
hass.services.async_register(HASS_DOMAIN, service, async_handle_core_service)
# Init discovery Hass.io feature
async_setup_discovery_view(hass, hassio)
# Init auth Hass.io feature
async_setup_auth_view(hass, user)
# Init ingress Hass.io feature
async_setup_ingress_view(hass, host)
# Init add-on ingress panels
await async_setup_addon_panel(hass, hassio)
return True
| 31.748344
| 85
| 0.690134
|
from datetime import timedelta
import logging
import os
import voluptuous as vol
from homeassistant.auth.const import GROUP_ID_ADMIN
from homeassistant.components.homeassistant import SERVICE_CHECK_CONFIG
import homeassistant.config as conf_util
from homeassistant.const import (
ATTR_NAME,
EVENT_CORE_CONFIG_UPDATE,
SERVICE_HOMEASSISTANT_RESTART,
SERVICE_HOMEASSISTANT_STOP,
)
from homeassistant.core import DOMAIN as HASS_DOMAIN, callback
from homeassistant.exceptions import HomeAssistantError
import homeassistant.helpers.config_validation as cv
from homeassistant.loader import bind_hass
from homeassistant.util.dt import utcnow
from .addon_panel import async_setup_addon_panel
from .auth import async_setup_auth_view
from .discovery import async_setup_discovery_view
from .handler import HassIO, HassioAPIError
from .http import HassIOView
from .ingress import async_setup_ingress_view
_LOGGER = logging.getLogger(__name__)
DOMAIN = "hassio"
STORAGE_KEY = DOMAIN
STORAGE_VERSION = 1
CONF_FRONTEND_REPO = "development_repo"
CONFIG_SCHEMA = vol.Schema(
{vol.Optional(DOMAIN): vol.Schema({vol.Optional(CONF_FRONTEND_REPO): cv.isdir})},
extra=vol.ALLOW_EXTRA,
)
DATA_HOMEASSISTANT_VERSION = "hassio_hass_version"
HASSIO_UPDATE_INTERVAL = timedelta(minutes=55)
SERVICE_ADDON_START = "addon_start"
SERVICE_ADDON_STOP = "addon_stop"
SERVICE_ADDON_RESTART = "addon_restart"
SERVICE_ADDON_STDIN = "addon_stdin"
SERVICE_HOST_SHUTDOWN = "host_shutdown"
SERVICE_HOST_REBOOT = "host_reboot"
SERVICE_SNAPSHOT_FULL = "snapshot_full"
SERVICE_SNAPSHOT_PARTIAL = "snapshot_partial"
SERVICE_RESTORE_FULL = "restore_full"
SERVICE_RESTORE_PARTIAL = "restore_partial"
ATTR_ADDON = "addon"
ATTR_INPUT = "input"
ATTR_SNAPSHOT = "snapshot"
ATTR_ADDONS = "addons"
ATTR_FOLDERS = "folders"
ATTR_HOMEASSISTANT = "homeassistant"
ATTR_PASSWORD = "password"
SCHEMA_NO_DATA = vol.Schema({})
SCHEMA_ADDON = vol.Schema({vol.Required(ATTR_ADDON): cv.slug})
SCHEMA_ADDON_STDIN = SCHEMA_ADDON.extend(
{vol.Required(ATTR_INPUT): vol.Any(dict, cv.string)}
)
SCHEMA_SNAPSHOT_FULL = vol.Schema(
{vol.Optional(ATTR_NAME): cv.string, vol.Optional(ATTR_PASSWORD): cv.string}
)
SCHEMA_SNAPSHOT_PARTIAL = SCHEMA_SNAPSHOT_FULL.extend(
{
vol.Optional(ATTR_FOLDERS): vol.All(cv.ensure_list, [cv.string]),
vol.Optional(ATTR_ADDONS): vol.All(cv.ensure_list, [cv.string]),
}
)
SCHEMA_RESTORE_FULL = vol.Schema(
{vol.Required(ATTR_SNAPSHOT): cv.slug, vol.Optional(ATTR_PASSWORD): cv.string}
)
SCHEMA_RESTORE_PARTIAL = SCHEMA_RESTORE_FULL.extend(
{
vol.Optional(ATTR_HOMEASSISTANT): cv.boolean,
vol.Optional(ATTR_FOLDERS): vol.All(cv.ensure_list, [cv.string]),
vol.Optional(ATTR_ADDONS): vol.All(cv.ensure_list, [cv.string]),
}
)
MAP_SERVICE_API = {
SERVICE_ADDON_START: ("/addons/{addon}/start", SCHEMA_ADDON, 60, False),
SERVICE_ADDON_STOP: ("/addons/{addon}/stop", SCHEMA_ADDON, 60, False),
SERVICE_ADDON_RESTART: ("/addons/{addon}/restart", SCHEMA_ADDON, 60, False),
SERVICE_ADDON_STDIN: ("/addons/{addon}/stdin", SCHEMA_ADDON_STDIN, 60, False),
SERVICE_HOST_SHUTDOWN: ("/host/shutdown", SCHEMA_NO_DATA, 60, False),
SERVICE_HOST_REBOOT: ("/host/reboot", SCHEMA_NO_DATA, 60, False),
SERVICE_SNAPSHOT_FULL: ("/snapshots/new/full", SCHEMA_SNAPSHOT_FULL, 300, True),
SERVICE_SNAPSHOT_PARTIAL: (
"/snapshots/new/partial",
SCHEMA_SNAPSHOT_PARTIAL,
300,
True,
),
SERVICE_RESTORE_FULL: (
"/snapshots/{snapshot}/restore/full",
SCHEMA_RESTORE_FULL,
300,
True,
),
SERVICE_RESTORE_PARTIAL: (
"/snapshots/{snapshot}/restore/partial",
SCHEMA_RESTORE_PARTIAL,
300,
True,
),
}
@callback
@bind_hass
def get_homeassistant_version(hass):
return hass.data.get(DATA_HOMEASSISTANT_VERSION)
@callback
@bind_hass
def is_hassio(hass):
return DOMAIN in hass.config.components
async def async_setup(hass, config):
for env in ("HASSIO", "HASSIO_TOKEN"):
if os.environ.get(env):
continue
_LOGGER.error("Missing %s environment variable.", env)
return False
host = os.environ["HASSIO"]
websession = hass.helpers.aiohttp_client.async_get_clientsession()
hass.data[DOMAIN] = hassio = HassIO(hass.loop, websession, host)
if not await hassio.is_connected():
_LOGGER.warning("Not connected with Hass.io / system to busy!")
store = hass.helpers.storage.Store(STORAGE_VERSION, STORAGE_KEY)
data = await store.async_load()
if data is None:
data = {}
refresh_token = None
if "hassio_user" in data:
user = await hass.auth.async_get_user(data["hassio_user"])
if user and user.refresh_tokens:
refresh_token = list(user.refresh_tokens.values())[0]
if not user.is_admin:
await hass.auth.async_update_user(user, group_ids=[GROUP_ID_ADMIN])
if refresh_token is None:
user = await hass.auth.async_create_system_user("Hass.io", [GROUP_ID_ADMIN])
refresh_token = await hass.auth.async_create_refresh_token(user)
data["hassio_user"] = user.id
await store.async_save(data)
development_repo = config.get(DOMAIN, {}).get(CONF_FRONTEND_REPO)
if development_repo is not None:
hass.http.register_static_path(
"/api/hassio/app", os.path.join(development_repo, "hassio/build"), False
)
hass.http.register_view(HassIOView(host, websession))
if "frontend" in hass.config.components:
await hass.components.panel_custom.async_register_panel(
frontend_url_path="hassio",
webcomponent_name="hassio-main",
sidebar_title="Hass.io",
sidebar_icon="hass:home-assistant",
js_url="/api/hassio/app/entrypoint.js",
embed_iframe=True,
require_admin=True,
)
await hassio.update_hass_api(config.get("http", {}), refresh_token)
async def push_config(_):
await hassio.update_hass_timezone(str(hass.config.time_zone))
hass.bus.async_listen(EVENT_CORE_CONFIG_UPDATE, push_config)
await push_config(None)
async def async_service_handler(service):
api_command = MAP_SERVICE_API[service.service][0]
data = service.data.copy()
addon = data.pop(ATTR_ADDON, None)
snapshot = data.pop(ATTR_SNAPSHOT, None)
payload = None
if service.service == SERVICE_ADDON_STDIN:
payload = data[ATTR_INPUT]
elif MAP_SERVICE_API[service.service][3]:
payload = data
try:
await hassio.send_command(
api_command.format(addon=addon, snapshot=snapshot),
payload=payload,
timeout=MAP_SERVICE_API[service.service][2],
)
except HassioAPIError as err:
_LOGGER.error("Error on Hass.io API: %s", err)
for service, settings in MAP_SERVICE_API.items():
hass.services.async_register(
DOMAIN, service, async_service_handler, schema=settings[1]
)
async def update_homeassistant_version(now):
try:
data = await hassio.get_homeassistant_info()
hass.data[DATA_HOMEASSISTANT_VERSION] = data["last_version"]
except HassioAPIError as err:
_LOGGER.warning("Can't read last version: %s", err)
hass.helpers.event.async_track_point_in_utc_time(
update_homeassistant_version, utcnow() + HASSIO_UPDATE_INTERVAL
)
# Fetch last version
await update_homeassistant_version(None)
async def async_handle_core_service(call):
if call.service == SERVICE_HOMEASSISTANT_STOP:
await hassio.stop_homeassistant()
return
try:
errors = await conf_util.async_check_ha_config_file(hass)
except HomeAssistantError:
return
if errors:
_LOGGER.error(errors)
hass.components.persistent_notification.async_create(
"Config error. See [the logs](/developer-tools/logs) for details.",
"Config validating",
f"{HASS_DOMAIN}.check_config",
)
return
if call.service == SERVICE_HOMEASSISTANT_RESTART:
await hassio.restart_homeassistant()
# Mock core services
for service in (
SERVICE_HOMEASSISTANT_STOP,
SERVICE_HOMEASSISTANT_RESTART,
SERVICE_CHECK_CONFIG,
):
hass.services.async_register(HASS_DOMAIN, service, async_handle_core_service)
# Init discovery Hass.io feature
async_setup_discovery_view(hass, hassio)
# Init auth Hass.io feature
async_setup_auth_view(hass, user)
# Init ingress Hass.io feature
async_setup_ingress_view(hass, host)
# Init add-on ingress panels
await async_setup_addon_panel(hass, hassio)
return True
| true
| true
|
f70e45028620215655edff86e95adc79d831e0d7
| 8,254
|
py
|
Python
|
gateapi-python/gate_api/models/margin_account_book.py
|
jarenmt/IEOPUMP
|
220f7f612d299f7305e82fe6c33661e6871f2d86
|
[
"MIT"
] | null | null | null |
gateapi-python/gate_api/models/margin_account_book.py
|
jarenmt/IEOPUMP
|
220f7f612d299f7305e82fe6c33661e6871f2d86
|
[
"MIT"
] | null | null | null |
gateapi-python/gate_api/models/margin_account_book.py
|
jarenmt/IEOPUMP
|
220f7f612d299f7305e82fe6c33661e6871f2d86
|
[
"MIT"
] | null | null | null |
# coding: utf-8
"""
Gate API v4
Welcome to Gate.io API APIv4 provides spot, margin and futures trading operations. There are public APIs to retrieve the real-time market statistics, and private APIs which needs authentication to trade on user's behalf. # noqa: E501
Contact: support@mail.gate.io
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from gate_api.configuration import Configuration
class MarginAccountBook(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'id': 'str',
'time': 'str',
'time_ms': 'int',
'currency': 'str',
'currency_pair': 'str',
'change': 'str',
'balance': 'str',
}
attribute_map = {
'id': 'id',
'time': 'time',
'time_ms': 'time_ms',
'currency': 'currency',
'currency_pair': 'currency_pair',
'change': 'change',
'balance': 'balance',
}
def __init__(
self,
id=None,
time=None,
time_ms=None,
currency=None,
currency_pair=None,
change=None,
balance=None,
local_vars_configuration=None,
): # noqa: E501
# type: (str, str, int, str, str, str, str, Configuration) -> None
"""MarginAccountBook - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._id = None
self._time = None
self._time_ms = None
self._currency = None
self._currency_pair = None
self._change = None
self._balance = None
self.discriminator = None
if id is not None:
self.id = id
if time is not None:
self.time = time
if time_ms is not None:
self.time_ms = time_ms
if currency is not None:
self.currency = currency
if currency_pair is not None:
self.currency_pair = currency_pair
if change is not None:
self.change = change
if balance is not None:
self.balance = balance
@property
def id(self):
"""Gets the id of this MarginAccountBook. # noqa: E501
Balance change record ID # noqa: E501
:return: The id of this MarginAccountBook. # noqa: E501
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this MarginAccountBook.
Balance change record ID # noqa: E501
:param id: The id of this MarginAccountBook. # noqa: E501
:type: str
"""
self._id = id
@property
def time(self):
"""Gets the time of this MarginAccountBook. # noqa: E501
Balance changed timestamp # noqa: E501
:return: The time of this MarginAccountBook. # noqa: E501
:rtype: str
"""
return self._time
@time.setter
def time(self, time):
"""Sets the time of this MarginAccountBook.
Balance changed timestamp # noqa: E501
:param time: The time of this MarginAccountBook. # noqa: E501
:type: str
"""
self._time = time
@property
def time_ms(self):
"""Gets the time_ms of this MarginAccountBook. # noqa: E501
The timestamp of the change (in milliseconds) # noqa: E501
:return: The time_ms of this MarginAccountBook. # noqa: E501
:rtype: int
"""
return self._time_ms
@time_ms.setter
def time_ms(self, time_ms):
"""Sets the time_ms of this MarginAccountBook.
The timestamp of the change (in milliseconds) # noqa: E501
:param time_ms: The time_ms of this MarginAccountBook. # noqa: E501
:type: int
"""
self._time_ms = time_ms
@property
def currency(self):
"""Gets the currency of this MarginAccountBook. # noqa: E501
Currency changed # noqa: E501
:return: The currency of this MarginAccountBook. # noqa: E501
:rtype: str
"""
return self._currency
@currency.setter
def currency(self, currency):
"""Sets the currency of this MarginAccountBook.
Currency changed # noqa: E501
:param currency: The currency of this MarginAccountBook. # noqa: E501
:type: str
"""
self._currency = currency
@property
def currency_pair(self):
"""Gets the currency_pair of this MarginAccountBook. # noqa: E501
Account currency pair # noqa: E501
:return: The currency_pair of this MarginAccountBook. # noqa: E501
:rtype: str
"""
return self._currency_pair
@currency_pair.setter
def currency_pair(self, currency_pair):
"""Sets the currency_pair of this MarginAccountBook.
Account currency pair # noqa: E501
:param currency_pair: The currency_pair of this MarginAccountBook. # noqa: E501
:type: str
"""
self._currency_pair = currency_pair
@property
def change(self):
"""Gets the change of this MarginAccountBook. # noqa: E501
Amount changed. Positive value means transferring in, while negative out # noqa: E501
:return: The change of this MarginAccountBook. # noqa: E501
:rtype: str
"""
return self._change
@change.setter
def change(self, change):
"""Sets the change of this MarginAccountBook.
Amount changed. Positive value means transferring in, while negative out # noqa: E501
:param change: The change of this MarginAccountBook. # noqa: E501
:type: str
"""
self._change = change
@property
def balance(self):
"""Gets the balance of this MarginAccountBook. # noqa: E501
Balance after change # noqa: E501
:return: The balance of this MarginAccountBook. # noqa: E501
:rtype: str
"""
return self._balance
@balance.setter
def balance(self, balance):
"""Sets the balance of this MarginAccountBook.
Balance after change # noqa: E501
:param balance: The balance of this MarginAccountBook. # noqa: E501
:type: str
"""
self._balance = balance
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(
map(
lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item,
value.items(),
)
)
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, MarginAccountBook):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, MarginAccountBook):
return True
return self.to_dict() != other.to_dict()
| 27.513333
| 239
| 0.579961
|
import pprint
import re
import six
from gate_api.configuration import Configuration
class MarginAccountBook(object):
openapi_types = {
'id': 'str',
'time': 'str',
'time_ms': 'int',
'currency': 'str',
'currency_pair': 'str',
'change': 'str',
'balance': 'str',
}
attribute_map = {
'id': 'id',
'time': 'time',
'time_ms': 'time_ms',
'currency': 'currency',
'currency_pair': 'currency_pair',
'change': 'change',
'balance': 'balance',
}
def __init__(
self,
id=None,
time=None,
time_ms=None,
currency=None,
currency_pair=None,
change=None,
balance=None,
local_vars_configuration=None,
):
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._id = None
self._time = None
self._time_ms = None
self._currency = None
self._currency_pair = None
self._change = None
self._balance = None
self.discriminator = None
if id is not None:
self.id = id
if time is not None:
self.time = time
if time_ms is not None:
self.time_ms = time_ms
if currency is not None:
self.currency = currency
if currency_pair is not None:
self.currency_pair = currency_pair
if change is not None:
self.change = change
if balance is not None:
self.balance = balance
@property
def id(self):
return self._id
@id.setter
def id(self, id):
self._id = id
@property
def time(self):
return self._time
@time.setter
def time(self, time):
self._time = time
@property
def time_ms(self):
return self._time_ms
@time_ms.setter
def time_ms(self, time_ms):
self._time_ms = time_ms
@property
def currency(self):
return self._currency
@currency.setter
def currency(self, currency):
self._currency = currency
@property
def currency_pair(self):
return self._currency_pair
@currency_pair.setter
def currency_pair(self, currency_pair):
self._currency_pair = currency_pair
@property
def change(self):
return self._change
@change.setter
def change(self, change):
self._change = change
@property
def balance(self):
return self._balance
@balance.setter
def balance(self, balance):
self._balance = balance
def to_dict(self):
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(
map(
lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item,
value.items(),
)
)
else:
result[attr] = value
return result
def to_str(self):
return pprint.pformat(self.to_dict())
def __repr__(self):
return self.to_str()
def __eq__(self, other):
if not isinstance(other, MarginAccountBook):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
if not isinstance(other, MarginAccountBook):
return True
return self.to_dict() != other.to_dict()
| true
| true
|
f70e4517d1db8c8e699000ac090e9cb2e6804237
| 68
|
py
|
Python
|
leo/test/unittest/at-nosent-test.py
|
ATikhonov2/leo-editor
|
225aac990a9b2804aaa9dea29574d6e072e30474
|
[
"MIT"
] | 2
|
2020-01-19T18:11:05.000Z
|
2020-01-19T18:12:07.000Z
|
leo/test/unittest/at-nosent-test.py
|
ATikhonov2/leo-editor
|
225aac990a9b2804aaa9dea29574d6e072e30474
|
[
"MIT"
] | 1
|
2020-06-19T02:28:25.000Z
|
2020-06-19T02:28:25.000Z
|
leo/test/unittest/at-nosent-test.py
|
ATikhonov2/leo-editor
|
225aac990a9b2804aaa9dea29574d6e072e30474
|
[
"MIT"
] | null | null | null |
def spam():
pass # Unicode test: Ã after.
def eggs():
pass
| 11.333333
| 33
| 0.573529
|
def spam():
pass
def eggs():
pass
| true
| true
|
f70e48341f8af8da92f5b40473129505df7071a9
| 6,330
|
py
|
Python
|
python/onshape_client/oas/models/btp_top_level_import285_all_of.py
|
toebes/onshape-clients
|
a26cf6a77cfc7901321e603d5a097e23eb51e35c
|
[
"MIT"
] | 14
|
2019-06-23T08:47:41.000Z
|
2021-11-29T16:28:45.000Z
|
python/onshape_client/oas/models/btp_top_level_import285_all_of.py
|
toebes/onshape-clients
|
a26cf6a77cfc7901321e603d5a097e23eb51e35c
|
[
"MIT"
] | 40
|
2019-05-22T14:39:46.000Z
|
2022-03-10T10:36:17.000Z
|
python/onshape_client/oas/models/btp_top_level_import285_all_of.py
|
toebes/onshape-clients
|
a26cf6a77cfc7901321e603d5a097e23eb51e35c
|
[
"MIT"
] | 24
|
2019-06-02T01:03:41.000Z
|
2022-03-29T13:25:36.000Z
|
# coding: utf-8
"""
Onshape REST API
The Onshape REST API consumed by all clients. # noqa: E501
The version of the OpenAPI document: 1.113
Contact: api-support@onshape.zendesk.com
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import re # noqa: F401
import sys # noqa: F401
import six # noqa: F401
import nulltype # noqa: F401
from onshape_client.oas.model_utils import ( # noqa: F401
ModelComposed,
ModelNormal,
ModelSimple,
date,
datetime,
file_type,
int,
none_type,
str,
validate_get_composed_info,
)
try:
from onshape_client.oas.models import btp_identifier8
except ImportError:
btp_identifier8 = sys.modules["onshape_client.oas.models.btp_identifier8"]
try:
from onshape_client.oas.models import btp_module_id235
except ImportError:
btp_module_id235 = sys.modules["onshape_client.oas.models.btp_module_id235"]
try:
from onshape_client.oas.models import btp_space10
except ImportError:
btp_space10 = sys.modules["onshape_client.oas.models.btp_space10"]
class BTPTopLevelImport285AllOf(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {}
validations = {}
additional_properties_type = None
@staticmethod
def openapi_types():
"""
This must be a class method so a model may have properties that are
of type self, this ensures that we don't create a cyclic import
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
return {
"bt_type": (str,), # noqa: E501
"combined_namespace_path_and_version": (str,), # noqa: E501
"import_microversion": (str,), # noqa: E501
"module_id": (btp_module_id235.BTPModuleId235,), # noqa: E501
"namespace": ([btp_identifier8.BTPIdentifier8],), # noqa: E501
"namespace_string": (str,), # noqa: E501
"space_before_import": (btp_space10.BTPSpace10,), # noqa: E501
}
@staticmethod
def discriminator():
return None
attribute_map = {
"bt_type": "btType", # noqa: E501
"combined_namespace_path_and_version": "combinedNamespacePathAndVersion", # noqa: E501
"import_microversion": "importMicroversion", # noqa: E501
"module_id": "moduleId", # noqa: E501
"namespace": "namespace", # noqa: E501
"namespace_string": "namespaceString", # noqa: E501
"space_before_import": "spaceBeforeImport", # noqa: E501
}
@staticmethod
def _composed_schemas():
return None
required_properties = set(
[
"_data_store",
"_check_type",
"_from_server",
"_path_to_item",
"_configuration",
]
)
def __init__(
self,
_check_type=True,
_from_server=False,
_path_to_item=(),
_configuration=None,
**kwargs
): # noqa: E501
"""btp_top_level_import285_all_of.BTPTopLevelImport285AllOf - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_from_server (bool): True if the data is from the server
False if the data is from the client (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
bt_type (str): [optional] # noqa: E501
combined_namespace_path_and_version (str): [optional] # noqa: E501
import_microversion (str): [optional] # noqa: E501
module_id (btp_module_id235.BTPModuleId235): [optional] # noqa: E501
namespace ([btp_identifier8.BTPIdentifier8]): [optional] # noqa: E501
namespace_string (str): [optional] # noqa: E501
space_before_import (btp_space10.BTPSpace10): [optional] # noqa: E501
"""
self._data_store = {}
self._check_type = _check_type
self._from_server = _from_server
self._path_to_item = _path_to_item
self._configuration = _configuration
for var_name, var_value in six.iteritems(kwargs):
if (
var_name not in self.attribute_map
and self._configuration is not None
and self._configuration.discard_unknown_keys
and self.additional_properties_type is None
):
# discard variable.
continue
setattr(self, var_name, var_value)
| 36.171429
| 96
| 0.622433
|
from __future__ import absolute_import
import re
import sys
import six
import nulltype
from onshape_client.oas.model_utils import (
ModelComposed,
ModelNormal,
ModelSimple,
date,
datetime,
file_type,
int,
none_type,
str,
validate_get_composed_info,
)
try:
from onshape_client.oas.models import btp_identifier8
except ImportError:
btp_identifier8 = sys.modules["onshape_client.oas.models.btp_identifier8"]
try:
from onshape_client.oas.models import btp_module_id235
except ImportError:
btp_module_id235 = sys.modules["onshape_client.oas.models.btp_module_id235"]
try:
from onshape_client.oas.models import btp_space10
except ImportError:
btp_space10 = sys.modules["onshape_client.oas.models.btp_space10"]
class BTPTopLevelImport285AllOf(ModelNormal):
allowed_values = {}
validations = {}
additional_properties_type = None
@staticmethod
def openapi_types():
return {
"bt_type": (str,),
"combined_namespace_path_and_version": (str,),
"import_microversion": (str,),
"module_id": (btp_module_id235.BTPModuleId235,),
"namespace": ([btp_identifier8.BTPIdentifier8],),
"namespace_string": (str,),
"space_before_import": (btp_space10.BTPSpace10,),
}
@staticmethod
def discriminator():
return None
attribute_map = {
"bt_type": "btType",
"combined_namespace_path_and_version": "combinedNamespacePathAndVersion",
"import_microversion": "importMicroversion",
"module_id": "moduleId",
"namespace": "namespace",
"namespace_string": "namespaceString",
"space_before_import": "spaceBeforeImport",
}
@staticmethod
def _composed_schemas():
return None
required_properties = set(
[
"_data_store",
"_check_type",
"_from_server",
"_path_to_item",
"_configuration",
]
)
def __init__(
self,
_check_type=True,
_from_server=False,
_path_to_item=(),
_configuration=None,
**kwargs
):
self._data_store = {}
self._check_type = _check_type
self._from_server = _from_server
self._path_to_item = _path_to_item
self._configuration = _configuration
for var_name, var_value in six.iteritems(kwargs):
if (
var_name not in self.attribute_map
and self._configuration is not None
and self._configuration.discard_unknown_keys
and self.additional_properties_type is None
):
continue
setattr(self, var_name, var_value)
| true
| true
|
f70e4872b40d326f3b83fe49a7d3cc1f2e93f6c3
| 5,886
|
py
|
Python
|
data/utils.py
|
chenxiaoyu523/RPNet-Pytorch
|
7beceb9f39e66eba5283536b478f86523fcc96c7
|
[
"MIT"
] | 38
|
2019-04-25T09:46:14.000Z
|
2021-10-11T04:35:46.000Z
|
data/utils.py
|
chenxiaoyu523/RPNet-Pytorch
|
7beceb9f39e66eba5283536b478f86523fcc96c7
|
[
"MIT"
] | 6
|
2019-08-29T13:20:55.000Z
|
2022-03-11T23:45:32.000Z
|
data/utils.py
|
chenxiaoyu523/RPNet-Pytorch
|
7beceb9f39e66eba5283536b478f86523fcc96c7
|
[
"MIT"
] | 10
|
2019-04-27T02:29:48.000Z
|
2022-03-22T07:49:41.000Z
|
import os
from PIL import Image
import numpy as np
def get_files(folder, name_filter=None, extension_filter=None):
"""Helper function that returns the list of files in a specified folder
with a specified extension.
Keyword arguments:
- folder (``string``): The path to a folder.
- name_filter (```string``, optional): The returned files must contain
this substring in their filename. Default: None; files are not filtered.
- extension_filter (``string``, optional): The desired file extension.
Default: None; files are not filtered
"""
if not os.path.isdir(folder):
raise RuntimeError("\"{0}\" is not a folder.".format(folder))
# Filename filter: if not specified don't filter (condition always true);
# otherwise, use a lambda expression to filter out files that do not
# contain "name_filter"
if name_filter is None:
# This looks hackish...there is probably a better way
name_cond = lambda filename: True
else:
name_cond = lambda filename: name_filter in filename
# Extension filter: if not specified don't filter (condition always true);
# otherwise, use a lambda expression to filter out files whose extension
# is not "extension_filter"
if extension_filter is None:
# This looks hackish...there is probably a better way
ext_cond = lambda filename: True
else:
ext_cond = lambda filename: filename.endswith(extension_filter)
filtered_files = []
# Explore the directory tree to get files that contain "name_filter" and
# with extension "extension_filter"
for path, _, files in os.walk(folder):
files.sort()
for file in files:
if name_cond(file) and ext_cond(file):
full_path = os.path.join(path, file)
filtered_files.append(full_path)
return filtered_files
def pil_loader(data_path, label_path):
"""Loads a sample and label image given their path as PIL images.
Keyword arguments:
- data_path (``string``): The filepath to the image.
- label_path (``string``): The filepath to the ground-truth image.
Returns the image and the label as PIL images.
"""
data = Image.open(data_path)
label = Image.open(label_path)
return data, label
def remap(image, old_values, new_values):
assert isinstance(image, Image.Image) or isinstance(
image, np.ndarray), "image must be of type PIL.Image or numpy.ndarray"
assert type(new_values) is tuple, "new_values must be of type tuple"
assert type(old_values) is tuple, "old_values must be of type tuple"
assert len(new_values) == len(
old_values), "new_values and old_values must have the same length"
# If image is a PIL.Image convert it to a numpy array
if isinstance(image, Image.Image):
image = np.array(image)
# Replace old values by the new ones
tmp = np.zeros_like(image)
for old, new in zip(old_values, new_values):
# Since tmp is already initialized as zeros we can skip new values
# equal to 0
if new != 0:
tmp[image == old] = new
return Image.fromarray(tmp)
def enet_weighing(dataloader, num_classes, c=1.02):
"""Computes class weights as described in the ENet paper:
w_class = 1 / (ln(c + p_class)),
where c is usually 1.02 and p_class is the propensity score of that
class:
propensity_score = freq_class / total_pixels.
References: https://arxiv.org/abs/1606.02147
Keyword arguments:
- dataloader (``data.Dataloader``): A data loader to iterate over the
dataset.
- num_classes (``int``): The number of classes.
- c (``int``, optional): AN additional hyper-parameter which restricts
the interval of values for the weights. Default: 1.02.
"""
class_count = 0
total = 0
for _, label in dataloader:
label = label.cpu().numpy()
# Flatten label
flat_label = label.flatten()
# Sum up the number of pixels of each class and the total pixel
# counts for each label
class_count += np.bincount(flat_label, minlength=num_classes)
total += flat_label.size
# Compute propensity score and then the weights for each class
propensity_score = class_count / total
class_weights = 1 / (np.log(c + propensity_score))
return class_weights
def median_freq_balancing(dataloader, num_classes):
"""Computes class weights using median frequency balancing as described
in https://arxiv.org/abs/1411.4734:
w_class = median_freq / freq_class,
where freq_class is the number of pixels of a given class divided by
the total number of pixels in images where that class is present, and
median_freq is the median of freq_class.
Keyword arguments:
- dataloader (``data.Dataloader``): A data loader to iterate over the
dataset.
whose weights are going to be computed.
- num_classes (``int``): The number of classes
"""
class_count = 0
total = 0
for _, label in dataloader:
label = label.cpu().numpy()
# Flatten label
flat_label = label.flatten()
# Sum up the class frequencies
bincount = np.bincount(flat_label, minlength=num_classes)
# Create of mask of classes that exist in the label
mask = bincount > 0
# Multiply the mask by the pixel count. The resulting array has
# one element for each class. The value is either 0 (if the class
# does not exist in the label) or equal to the pixel count (if
# the class exists in the label)
total += mask * flat_label.size
# Sum up the number of pixels found for each class
class_count += bincount
# Compute the frequency and its median
freq = class_count / total
med = np.median(freq)
return med / freq
| 33.443182
| 78
| 0.668705
|
import os
from PIL import Image
import numpy as np
def get_files(folder, name_filter=None, extension_filter=None):
if not os.path.isdir(folder):
raise RuntimeError("\"{0}\" is not a folder.".format(folder))
# otherwise, use a lambda expression to filter out files that do not
# contain "name_filter"
if name_filter is None:
# This looks hackish...there is probably a better way
name_cond = lambda filename: True
else:
name_cond = lambda filename: name_filter in filename
# Extension filter: if not specified don't filter (condition always true);
if extension_filter is None:
ext_cond = lambda filename: True
else:
ext_cond = lambda filename: filename.endswith(extension_filter)
filtered_files = []
for path, _, files in os.walk(folder):
files.sort()
for file in files:
if name_cond(file) and ext_cond(file):
full_path = os.path.join(path, file)
filtered_files.append(full_path)
return filtered_files
def pil_loader(data_path, label_path):
data = Image.open(data_path)
label = Image.open(label_path)
return data, label
def remap(image, old_values, new_values):
assert isinstance(image, Image.Image) or isinstance(
image, np.ndarray), "image must be of type PIL.Image or numpy.ndarray"
assert type(new_values) is tuple, "new_values must be of type tuple"
assert type(old_values) is tuple, "old_values must be of type tuple"
assert len(new_values) == len(
old_values), "new_values and old_values must have the same length"
if isinstance(image, Image.Image):
image = np.array(image)
tmp = np.zeros_like(image)
for old, new in zip(old_values, new_values):
if new != 0:
tmp[image == old] = new
return Image.fromarray(tmp)
def enet_weighing(dataloader, num_classes, c=1.02):
class_count = 0
total = 0
for _, label in dataloader:
label = label.cpu().numpy()
flat_label = label.flatten()
class_count += np.bincount(flat_label, minlength=num_classes)
total += flat_label.size
propensity_score = class_count / total
class_weights = 1 / (np.log(c + propensity_score))
return class_weights
def median_freq_balancing(dataloader, num_classes):
class_count = 0
total = 0
for _, label in dataloader:
label = label.cpu().numpy()
flat_label = label.flatten()
bincount = np.bincount(flat_label, minlength=num_classes)
mask = bincount > 0
total += mask * flat_label.size
class_count += bincount
freq = class_count / total
med = np.median(freq)
return med / freq
| true
| true
|
f70e4a685313e46875726d300d749d557e2869a3
| 437
|
py
|
Python
|
replay.py
|
pengg307/yygh
|
6937ca9b9a9e6f79ed406fafa513cdf3b2b7869e
|
[
"Apache-2.0"
] | null | null | null |
replay.py
|
pengg307/yygh
|
6937ca9b9a9e6f79ed406fafa513cdf3b2b7869e
|
[
"Apache-2.0"
] | null | null | null |
replay.py
|
pengg307/yygh
|
6937ca9b9a9e6f79ed406fafa513cdf3b2b7869e
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
__author__ = 'pengg'
from datetime import date
from tqsdk import TqApi, TqAuth, TqReplay
'''
复盘模式示例: 指定日期行情完全复盘
复盘 2020-05-26 行情
'''
# 在创建 api 实例时传入 TqReplay 就会进入复盘模式
api = TqApi(backtest=TqReplay(date(2020, 10, 15)), auth=TqAuth("aimoons", "112411"))
quote = api.get_quote("SHFE.cu2101")
while True:
api.wait_update()
if api.is_changing(quote):
print("最新价", quote.datetime, quote.last_price)
| 23
| 84
| 0.691076
|
__author__ = 'pengg'
from datetime import date
from tqsdk import TqApi, TqAuth, TqReplay
api = TqApi(backtest=TqReplay(date(2020, 10, 15)), auth=TqAuth("aimoons", "112411"))
quote = api.get_quote("SHFE.cu2101")
while True:
api.wait_update()
if api.is_changing(quote):
print("最新价", quote.datetime, quote.last_price)
| true
| true
|
f70e4a94c07d2052afd857b27b3cf1c18682c06a
| 1,052
|
py
|
Python
|
setup.py
|
stefan-jansen/pipeline-live
|
1d654f544d748d5cc9fb927bff1f8f4c3a592cc6
|
[
"Apache-2.0"
] | 2
|
2020-09-18T22:54:28.000Z
|
2020-12-06T13:58:19.000Z
|
setup.py
|
stefan-jansen/pipeline-live
|
1d654f544d748d5cc9fb927bff1f8f4c3a592cc6
|
[
"Apache-2.0"
] | null | null | null |
setup.py
|
stefan-jansen/pipeline-live
|
1d654f544d748d5cc9fb927bff1f8f4c3a592cc6
|
[
"Apache-2.0"
] | 2
|
2020-09-09T22:40:26.000Z
|
2021-02-22T09:58:58.000Z
|
#!/usr/bin/env python
import ast
import re
from setuptools import setup, find_packages
_version_re = re.compile(r'__version__\s+=\s+(.*)')
with open('pipeline_live/_version.py', 'rb') as f:
version = str(ast.literal_eval(_version_re.search(
f.read().decode('utf-8')).group(1)))
with open('README.md') as readme_file:
README = readme_file.read()
setup(
name='pipeline-live',
version=version,
description='Zipline Pipeline extension for live trade',
long_description=README,
long_description_content_type='text/markdown',
author='Alpaca',
author_email='oss@alpaca.markets',
url='https://github.com/alpacahq/pipeline_live',
keywords='financial,zipline,pipeline,stock,screening,api,trade',
packages=find_packages(),
install_requires=[
'alpaca-trade-api>=0.29',
'iexfinance>=0.4.1',
'zipline==1.3.0',
'numpy==1.16.1',
],
tests_require=[
'pytest',
'pytest-cov',
'flake8',
],
setup_requires=['pytest-runner', 'flake8'],
)
| 26.3
| 68
| 0.648289
|
import ast
import re
from setuptools import setup, find_packages
_version_re = re.compile(r'__version__\s+=\s+(.*)')
with open('pipeline_live/_version.py', 'rb') as f:
version = str(ast.literal_eval(_version_re.search(
f.read().decode('utf-8')).group(1)))
with open('README.md') as readme_file:
README = readme_file.read()
setup(
name='pipeline-live',
version=version,
description='Zipline Pipeline extension for live trade',
long_description=README,
long_description_content_type='text/markdown',
author='Alpaca',
author_email='oss@alpaca.markets',
url='https://github.com/alpacahq/pipeline_live',
keywords='financial,zipline,pipeline,stock,screening,api,trade',
packages=find_packages(),
install_requires=[
'alpaca-trade-api>=0.29',
'iexfinance>=0.4.1',
'zipline==1.3.0',
'numpy==1.16.1',
],
tests_require=[
'pytest',
'pytest-cov',
'flake8',
],
setup_requires=['pytest-runner', 'flake8'],
)
| true
| true
|
f70e4a9f20f1a0e853bf6a37f58d1f6ba991ea33
| 3,124
|
py
|
Python
|
tests/test_domains.py
|
geraxe/dolib
|
2728db044a65b0bba15e7bfbc633d24a21b955d0
|
[
"MIT"
] | 5
|
2020-05-30T05:20:06.000Z
|
2021-05-21T21:42:34.000Z
|
tests/test_domains.py
|
geraxe/dolib
|
2728db044a65b0bba15e7bfbc633d24a21b955d0
|
[
"MIT"
] | 17
|
2020-05-30T08:17:10.000Z
|
2021-06-20T13:26:37.000Z
|
tests/test_domains.py
|
geraxe/dolib
|
2728db044a65b0bba15e7bfbc633d24a21b955d0
|
[
"MIT"
] | 3
|
2020-05-30T05:28:08.000Z
|
2021-04-10T17:07:02.000Z
|
import pytest
from dolib.client import AsyncClient, Client
from dolib.models import Domain
@pytest.mark.vcr
@pytest.mark.block_network()
def test_crud_domains(client: Client) -> None:
domain = Domain(name="test.dolib.io")
# create domain
created_domain = client.domains.create(domain=domain)
assert created_domain.name == "test.dolib.io"
# read domain
read_domain = client.domains.get(name=domain.name)
assert read_domain.name == domain.name
assert read_domain.ttl > 0
assert len(read_domain.zone_file) > 0
# list domains
domains = client.domains.all()
assert len(domains) > 0
# create domain record
record = Domain.Record(type="A", name="@", data="8.8.8.8")
record = client.domains.create_record(name=domain.name, record=record)
assert record.id > 0
assert record.ttl == 1800
# update domain record
record.name = "test"
record.ttl = 60
record = client.domains.update_record(name=domain.name, record=record)
assert record.ttl == 60
assert record.name == "test"
# read domain records
records = client.domains.records(name=domain.name)
len_records = len(records)
assert len_records > 0
filtered_records = client.domains.records(
name=domain.name, record_name="test.test.dolib.io", record_type="A"
)
assert len(filtered_records) == 1
# delete domain record
client.domains.delete_record(name=domain.name, record=record)
# delete domain
client.domains.delete(domain=created_domain)
@pytest.mark.vcr
@pytest.mark.block_network()
@pytest.mark.asyncio
async def test_async_crud_domains(async_client: AsyncClient) -> None:
domain = Domain(name="test.dolib.io")
# create domain
created_domain = await async_client.domains.create(domain=domain)
assert created_domain.name == "test.dolib.io"
# read domain
read_domain = await async_client.domains.get(name=domain.name)
assert read_domain.name == domain.name
assert read_domain.ttl > 0
assert len(read_domain.zone_file) > 0
# list domains
domains = await async_client.domains.all()
assert len(domains) > 0
# create domain record
record = Domain.Record(type="A", name="@", data="8.8.8.8")
record = await async_client.domains.create_record(name=domain.name, record=record)
assert record.id > 0
assert record.ttl == 1800
# update domain record
record.name = "test"
record.ttl = 60
record = await async_client.domains.update_record(name=domain.name, record=record)
assert record.ttl == 60
assert record.name == "test"
# read domain records
records = await async_client.domains.records(name=domain.name)
len_records = len(records)
assert len_records > 0
filtered_records = await async_client.domains.records(
name=domain.name, record_name="test.test.dolib.io", record_type="A"
)
assert len(filtered_records) == 1
# delete domain record
await async_client.domains.delete_record(name=domain.name, record=record)
# delete domain
await async_client.domains.delete(domain=created_domain)
| 30.627451
| 86
| 0.702625
|
import pytest
from dolib.client import AsyncClient, Client
from dolib.models import Domain
@pytest.mark.vcr
@pytest.mark.block_network()
def test_crud_domains(client: Client) -> None:
domain = Domain(name="test.dolib.io")
created_domain = client.domains.create(domain=domain)
assert created_domain.name == "test.dolib.io"
read_domain = client.domains.get(name=domain.name)
assert read_domain.name == domain.name
assert read_domain.ttl > 0
assert len(read_domain.zone_file) > 0
domains = client.domains.all()
assert len(domains) > 0
record = Domain.Record(type="A", name="@", data="8.8.8.8")
record = client.domains.create_record(name=domain.name, record=record)
assert record.id > 0
assert record.ttl == 1800
record.name = "test"
record.ttl = 60
record = client.domains.update_record(name=domain.name, record=record)
assert record.ttl == 60
assert record.name == "test"
records = client.domains.records(name=domain.name)
len_records = len(records)
assert len_records > 0
filtered_records = client.domains.records(
name=domain.name, record_name="test.test.dolib.io", record_type="A"
)
assert len(filtered_records) == 1
client.domains.delete_record(name=domain.name, record=record)
client.domains.delete(domain=created_domain)
@pytest.mark.vcr
@pytest.mark.block_network()
@pytest.mark.asyncio
async def test_async_crud_domains(async_client: AsyncClient) -> None:
domain = Domain(name="test.dolib.io")
created_domain = await async_client.domains.create(domain=domain)
assert created_domain.name == "test.dolib.io"
read_domain = await async_client.domains.get(name=domain.name)
assert read_domain.name == domain.name
assert read_domain.ttl > 0
assert len(read_domain.zone_file) > 0
domains = await async_client.domains.all()
assert len(domains) > 0
record = Domain.Record(type="A", name="@", data="8.8.8.8")
record = await async_client.domains.create_record(name=domain.name, record=record)
assert record.id > 0
assert record.ttl == 1800
record.name = "test"
record.ttl = 60
record = await async_client.domains.update_record(name=domain.name, record=record)
assert record.ttl == 60
assert record.name == "test"
records = await async_client.domains.records(name=domain.name)
len_records = len(records)
assert len_records > 0
filtered_records = await async_client.domains.records(
name=domain.name, record_name="test.test.dolib.io", record_type="A"
)
assert len(filtered_records) == 1
await async_client.domains.delete_record(name=domain.name, record=record)
await async_client.domains.delete(domain=created_domain)
| true
| true
|
f70e4acdef1254afc4d5607c4f0f8b0a22414499
| 1,522
|
py
|
Python
|
cfgov/scripts/create_careers_pages.py
|
higs4281/cfgov-refresh
|
a02b193fb2373d443265c21845adf8a196e05675
|
[
"CC0-1.0"
] | 1
|
2019-11-26T20:18:22.000Z
|
2019-11-26T20:18:22.000Z
|
cfgov/scripts/create_careers_pages.py
|
higs4281/cfgov-refresh
|
a02b193fb2373d443265c21845adf8a196e05675
|
[
"CC0-1.0"
] | 8
|
2021-03-11T00:55:51.000Z
|
2022-02-13T21:10:14.000Z
|
cfgov/scripts/create_careers_pages.py
|
higs4281/cfgov-refresh
|
a02b193fb2373d443265c21845adf8a196e05675
|
[
"CC0-1.0"
] | 1
|
2019-12-28T14:04:07.000Z
|
2019-12-28T14:04:07.000Z
|
import logging
from django.db import transaction
from wagtail.wagtailcore.models import Page, Site
from v1.models import BrowsePage, LandingPage, SublandingPage
from v1.tests.wagtail_pages.helpers import save_new_page
logger = logging.getLogger(__name__)
@transaction.atomic
def run():
default_site = Site.objects.get(is_default_site=True)
root_page = default_site.root_page
try:
about_us = Page.objects.get(slug='about-us')
except Page.DoesNotExist:
logger.info('Creating page: About Us')
about_us = LandingPage(title='About Us', slug='about-us', live=False)
save_new_page(about_us, root=root_page)
try:
careers = Page.objects.get(slug='careers')
except Page.DoesNotExist:
logger.info('Creating page: Careers')
careers = SublandingPage(title='Careers', slug='careers', live=False)
save_new_page(careers, root=about_us)
child_pages = [
('Working at the CFPB', 'working-at-cfpb'),
('Job Application Process', 'application-process'),
('Students and Graduates', 'students-and-graduates'),
('Current Openings', 'current-openings'),
]
for title, slug in child_pages:
try:
child_page = Page.objects.get(slug=slug)
except Page.DoesNotExist:
logger.info('Creating page: {}'.format(title))
child_page = BrowsePage(title=title, slug=slug, live=False)
save_new_page(child_page, careers)
if '__main__' == __name__:
run()
| 29.843137
| 77
| 0.670171
|
import logging
from django.db import transaction
from wagtail.wagtailcore.models import Page, Site
from v1.models import BrowsePage, LandingPage, SublandingPage
from v1.tests.wagtail_pages.helpers import save_new_page
logger = logging.getLogger(__name__)
@transaction.atomic
def run():
default_site = Site.objects.get(is_default_site=True)
root_page = default_site.root_page
try:
about_us = Page.objects.get(slug='about-us')
except Page.DoesNotExist:
logger.info('Creating page: About Us')
about_us = LandingPage(title='About Us', slug='about-us', live=False)
save_new_page(about_us, root=root_page)
try:
careers = Page.objects.get(slug='careers')
except Page.DoesNotExist:
logger.info('Creating page: Careers')
careers = SublandingPage(title='Careers', slug='careers', live=False)
save_new_page(careers, root=about_us)
child_pages = [
('Working at the CFPB', 'working-at-cfpb'),
('Job Application Process', 'application-process'),
('Students and Graduates', 'students-and-graduates'),
('Current Openings', 'current-openings'),
]
for title, slug in child_pages:
try:
child_page = Page.objects.get(slug=slug)
except Page.DoesNotExist:
logger.info('Creating page: {}'.format(title))
child_page = BrowsePage(title=title, slug=slug, live=False)
save_new_page(child_page, careers)
if '__main__' == __name__:
run()
| true
| true
|
f70e4b0adb4d0a26505c85ebbd4d38ee6b69c8c0
| 657
|
py
|
Python
|
pyof/v0x05/asynchronous/request_forward.py
|
mhaji007/python-openflow
|
25f032d660e648501d1e732969b6f91357ef5b66
|
[
"MIT"
] | null | null | null |
pyof/v0x05/asynchronous/request_forward.py
|
mhaji007/python-openflow
|
25f032d660e648501d1e732969b6f91357ef5b66
|
[
"MIT"
] | null | null | null |
pyof/v0x05/asynchronous/request_forward.py
|
mhaji007/python-openflow
|
25f032d660e648501d1e732969b6f91357ef5b66
|
[
"MIT"
] | null | null | null |
"""Defines a Request Forward Message."""
# System imports
from enum import IntEnum
# Local source tree imports
from pyof.foundation.base import GenericMessage
from pyof.v0x05.common.header import Header,Type
# Enums
class RequestForwardReason(IntEnum):
"""
Request Forward Reason
"""
#: Forward Group Mod requests
OFPRFR_GROUP_MOD = 0
#: Forward meter mod requests
OFPRFR_METER_MOD = 1
# Classes
class RequestForwardHeader(GenericMessage):
"""Ofp Request Forward Header"""
#: Type OFPT_REQUESTFORWARD
header = Header(message_type=Type.OFPT_REQUESTFORWARD)
#: Request being forwarded
request = Header()
| 21.9
| 58
| 0.727549
|
from enum import IntEnum
from pyof.foundation.base import GenericMessage
from pyof.v0x05.common.header import Header,Type
class RequestForwardReason(IntEnum):
OFPRFR_GROUP_MOD = 0
OFPRFR_METER_MOD = 1
class RequestForwardHeader(GenericMessage):
header = Header(message_type=Type.OFPT_REQUESTFORWARD)
request = Header()
| true
| true
|
f70e4b4ef58486cb4a40ecd6a8b27bfdc8a0ad74
| 6,463
|
py
|
Python
|
normalizer_mysql.py
|
miandu/es-mysql-report-generation
|
d2d92dc2cd864e951763f674e306889e47a485aa
|
[
"MIT"
] | null | null | null |
normalizer_mysql.py
|
miandu/es-mysql-report-generation
|
d2d92dc2cd864e951763f674e306889e47a485aa
|
[
"MIT"
] | null | null | null |
normalizer_mysql.py
|
miandu/es-mysql-report-generation
|
d2d92dc2cd864e951763f674e306889e47a485aa
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python3
import json,datetime,time,argparse,logging,sys,os
sys.path.append(os.path.join(os.path.dirname(__file__), "libs"))
from boto3.dynamodb.conditions import Attr
import general_storage,sqs,utils,query,general_storage_mysql
from progress.bar import Bar
from pprint import pprint
class Normalizer():
## Normalizer class hold data and configurations for normalizing source/target pairs
## source(input) of normalization
source={}
## target(output) of normalization
target={}
## mapping from target key to source key or lambda function
target_source_rule={}
def set_source(self,source):
self.source=source
def set_target(self,target):
self.target=target
def get_source_value(self,s):
## get value from source with key s or lambda function s
mapping=self.target_source_rule[s]
if isinstance(mapping,str):
## if mapping is a string key
return self.source.get(mapping)
else:
## if mapping is lambda function
return mapping(self)
def get_info(self,item):
## get info field
author = self.get_author(item)
return utils.fix_data_to_string({
"created_time" : item["created_time"],
"message":item['message'],
"from" : author
})
def get_author(self,item):
## get author field
return utils.fix_data_to_string({"id":item["user_id"],
"name":item.get("user_name","unknown"),
"profile_picture_url":item['original_data'].get("user",{}).get("profile_image_url_https","")})
def normalize_source_to_target(self,cf,source):
## Normalizing from source obect to target object
self.set_source(source)
if self.source:
for s in self.target_source_rule:
self.target[s] = self.get_source_value(s)
else:
print("No source specified")
class Normalizer_post_dynomodb_mysql(Normalizer):
## Normalizer class for post from dynamodb to mysql
name="posts"
## source(input) of normalization
source={}
## target(output) of normalization
target={}
target_source_rule={'page_id':'asset_id',
'sub_page_id':'asset_id',
'post_id':'object_id',
'updated_time':'updated_time',
'created_time':'created_time',
'info':lambda x: x.get_info(x.source),
'json_search':'',
'author':lambda x:x.get_author(x.source),
'tags':'',
'task_ids':''
}
class Normalizer_comment_dynomodb_mysql(Normalizer):
## Normalizer class for comment from dynamodb to mysql
name="comments"
## source(input) of normalization
source={}
## target(output) of normalization
target={}
target_source_rule={'page_id':'asset_id',
'sub_page_id':'asset_id',
'message':'message',
'post_id':'post_id',
'comment_id':'object_id',
#'parent_id':'post_id',
#'updated_time':'updated_time',
'created_time':'created_time',
'info':lambda x: x.get_info(x.source),
'json_search':'',
'author':lambda x:x.get_author(x.source),
'tags':'',
'task_ids':''
}
def insert_dynamodb_item_into_mysql(cf,i):
## Main function to call normalizer to normalize object from dynamodb object to mysql object, and then insert normalized item to mysql database
if i['object_type']=='post':
nl = Normalizer_post_dynomodb_mysql()
else:
nl = Normalizer_comment_dynomodb_mysql()
nl.normalize_source_to_target(cf,i)
connection = general_storage_mysql.create_connection(cf)
attributes,values = general_storage_mysql.simple_json_to_mysql_query(nl.target)
query="insert into twit_%s_%s(%s) values(%s)" %(nl.name,cf.client_short_name,attributes,values)
print(query)
general_storage_mysql.execute_query(connection,query)
def delete_mysql_item(cf,i):
## Main function to call deleteitem to mysql database
if i['object_type']=='post':
query="delete from twit_posts_%s(%s) where post_id=%s" %(cf.client_short_name,i['object_id'])
else:
query="delete from twit_comments_%s(%s) where comment_id=%s" %(cf.client_short_name,i['object_id'])
connection = general_storage_mysql.create_connection(cf)
general_storage_mysql.execute_query(connection,query)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Normalizer for twitter between DynamoDB and mysql')
parser.add_argument('config', type=str, help='an config file for normalizer')
parser.add_argument('--query', type=str, default=None, help='query to get data for normalizer')
parser.add_argument('--type', type=str, default="own", help='general or own. general:get everything using query; own:get own post and all replies')
args = parser.parse_args()
config = __import__(args.config)
cf =config.Config()
if args.type=="own":
query_str = args.query
if query_str:
query_str = query_str + " AND user_id:%s AND object_type:post" %(cf.twitter_user_id)
else:
query_str="user_id:%s AND object_type:post" %(cf.twitter_user_id)
total,posts = query.query_items(cf,query_str)
if total>0:
for post_id in [x["id"] for x in posts]:
post_with_comments=general_storage.get_item_and_comments(cf,post_id)
#print("%s comments" %(len(post_with_comments["comments"])))
insert_dynamodb_item_into_mysql(cf,post_with_comments["item"])
for comment in post_with_comments["comments"]:
insert_dynamodb_item_into_mysql(cf,comment)
elif args.type=="general":
#utils.run_until_finish(lambda: utils.process_sqs_rerun(cf,queue_name,process_clara,cf.clara_batch_size))
db_items=general_storage.get_items_by_ids(cf,query.es_outputs_to_ids(items))
for i in db_items:
insert_dynamodb_item_into_mysql(cf,i)
| 42.24183
| 151
| 0.617515
|
import json,datetime,time,argparse,logging,sys,os
sys.path.append(os.path.join(os.path.dirname(__file__), "libs"))
from boto3.dynamodb.conditions import Attr
import general_storage,sqs,utils,query,general_storage_mysql
from progress.bar import Bar
from pprint import pprint
class Normalizer():
et_source_value(self,s):
isinstance(mapping,str):
rce.get(mapping)
else:
f)
def get_info(self,item):
self.get_author(item)
return utils.fix_data_to_string({
"created_time" : item["created_time"],
"message":item['message'],
"from" : author
})
def get_author(self,item):
ils.fix_data_to_string({"id":item["user_id"],
"name":item.get("user_name","unknown"),
"profile_picture_url":item['original_data'].get("user",{}).get("profile_image_url_https","")})
def normalize_source_to_target(self,cf,source):
.source:
for s in self.target_source_rule:
self.target[s] = self.get_source_value(s)
else:
print("No source specified")
class Normalizer_post_dynomodb_mysql(Normalizer):
'sub_page_id':'asset_id',
'post_id':'object_id',
'updated_time':'updated_time',
'created_time':'created_time',
'info':lambda x: x.get_info(x.source),
'json_search':'',
'author':lambda x:x.get_author(x.source),
'tags':'',
'task_ids':''
}
class Normalizer_comment_dynomodb_mysql(Normalizer):
'sub_page_id':'asset_id',
'message':'message',
'post_id':'post_id',
'comment_id':'object_id',
'created_time':'created_time',
'info':lambda x: x.get_info(x.source),
'json_search':'',
'author':lambda x:x.get_author(x.source),
'tags':'',
'task_ids':''
}
def insert_dynamodb_item_into_mysql(cf,i):
nl.normalize_source_to_target(cf,i)
connection = general_storage_mysql.create_connection(cf)
attributes,values = general_storage_mysql.simple_json_to_mysql_query(nl.target)
query="insert into twit_%s_%s(%s) values(%s)" %(nl.name,cf.client_short_name,attributes,values)
print(query)
general_storage_mysql.execute_query(connection,query)
def delete_mysql_item(cf,i):
ete from twit_posts_%s(%s) where post_id=%s" %(cf.client_short_name,i['object_id'])
else:
query="delete from twit_comments_%s(%s) where comment_id=%s" %(cf.client_short_name,i['object_id'])
connection = general_storage_mysql.create_connection(cf)
general_storage_mysql.execute_query(connection,query)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Normalizer for twitter between DynamoDB and mysql')
parser.add_argument('config', type=str, help='an config file for normalizer')
parser.add_argument('--query', type=str, default=None, help='query to get data for normalizer')
parser.add_argument('--type', type=str, default="own", help='general or own. general:get everything using query; own:get own post and all replies')
args = parser.parse_args()
config = __import__(args.config)
cf =config.Config()
if args.type=="own":
query_str = args.query
if query_str:
query_str = query_str + " AND user_id:%s AND object_type:post" %(cf.twitter_user_id)
else:
query_str="user_id:%s AND object_type:post" %(cf.twitter_user_id)
total,posts = query.query_items(cf,query_str)
if total>0:
for post_id in [x["id"] for x in posts]:
post_with_comments=general_storage.get_item_and_comments(cf,post_id)
insert_dynamodb_item_into_mysql(cf,post_with_comments["item"])
for comment in post_with_comments["comments"]:
insert_dynamodb_item_into_mysql(cf,comment)
elif args.type=="general":
db_items=general_storage.get_items_by_ids(cf,query.es_outputs_to_ids(items))
for i in db_items:
insert_dynamodb_item_into_mysql(cf,i)
| true
| true
|
f70e4b96622b6b392ae2ae9b4baec1637da6e2b6
| 2,915
|
py
|
Python
|
src/png2ico/app.py
|
lpchg1992/png2ico
|
d379a72207717b2d242ec4ba128620024ac47c2c
|
[
"BSD-3-Clause"
] | 1
|
2020-03-15T14:22:57.000Z
|
2020-03-15T14:22:57.000Z
|
src/png2ico/app.py
|
lpchg1992/png2ico
|
d379a72207717b2d242ec4ba128620024ac47c2c
|
[
"BSD-3-Clause"
] | null | null | null |
src/png2ico/app.py
|
lpchg1992/png2ico
|
d379a72207717b2d242ec4ba128620024ac47c2c
|
[
"BSD-3-Clause"
] | null | null | null |
"""
Png to Ico
"""
import toga
from toga.style import Pack
from toga.style.pack import COLUMN, ROW
from PIL import Image
import random
import os
class Png2Ico(toga.App):
def startup(self):
self.msg = '请开始操作'
main_box = toga.Box(style=Pack(direction=COLUMN))
img_path_box = toga.Box(style=Pack(direction=ROW))
labelPath = toga.Label(
'请选择文件',
style=Pack(padding=(5, 5))
)
self.labelMsg = toga.Label(
self.msg,
style=Pack(padding=(5, 5))
)
buttonDir = toga.Button(
'选择图像',
on_press=self.select_png,
style=Pack(padding=5)
)
buttonExec = toga.Button(
'执行转换',
on_press=self.png_to_ico,
style=Pack(padding=5)
)
self.dirInput = toga.TextInput(style=Pack(flex=1), readonly=True)
img_path_box.add(labelPath)
img_path_box.add(buttonDir)
img_path_box.add(self.dirInput)
main_box.add(img_path_box)
main_box.add(self.labelMsg)
main_box.add(buttonExec)
self.main_window = toga.MainWindow(title=self.formal_name)
self.main_window.content = main_box
self.main_window.show()
def select_png(self, widget):
try:
pngPath_ = self.main_window.open_file_dialog(
'请选择png图片', file_types="(*.png)|*.png")
if pngPath_:
if pngPath_.split('.').pop() == 'png':
self.pngPath = pngPath_
self.msg = '已成功选择图片!转换后将保存到相同目录!'
else:
self.msg = '请选择png格式的图片!'
self.pngPath = ''
except Exception as e:
self.pngPath = ''
self.labelMsg.text = self.msg
self.dirInput.value = self.pngPath
def png_to_ico(self, widget):
try:
self.pngPath
goOn = 1
if self.dirInput.value == self.pngPath:
goOn = 1
else:
goOn = 0
except Exception:
goOn = 0
if goOn and os.path.exists(self.pngPath):
if self.pngPath:
preList_ = self.pngPath.split('\\')
preList_.pop()
self.saveDir = '\\'.join(preList_)+'\\'
self.msg = '成功选择存储路径,转换中...'
goOn = 1
else:
self.msg = '存储路径不存在'
goOn = 0
self.labelMsg.text = self.msg
if goOn == 1:
toIco = Image.open(self.pngPath)
toIco.save(self.saveDir+'transfromed' +
str(random.randint(10000, 99999)) + '.ico')
self.msg = '转换成功!'
else:
pass
else:
self.msg = '操作路径不存在,请重新选择!'
self.labelMsg.text = self.msg
def main():
return Png2Ico()
| 29.744898
| 73
| 0.504288
|
import toga
from toga.style import Pack
from toga.style.pack import COLUMN, ROW
from PIL import Image
import random
import os
class Png2Ico(toga.App):
def startup(self):
self.msg = '请开始操作'
main_box = toga.Box(style=Pack(direction=COLUMN))
img_path_box = toga.Box(style=Pack(direction=ROW))
labelPath = toga.Label(
'请选择文件',
style=Pack(padding=(5, 5))
)
self.labelMsg = toga.Label(
self.msg,
style=Pack(padding=(5, 5))
)
buttonDir = toga.Button(
'选择图像',
on_press=self.select_png,
style=Pack(padding=5)
)
buttonExec = toga.Button(
'执行转换',
on_press=self.png_to_ico,
style=Pack(padding=5)
)
self.dirInput = toga.TextInput(style=Pack(flex=1), readonly=True)
img_path_box.add(labelPath)
img_path_box.add(buttonDir)
img_path_box.add(self.dirInput)
main_box.add(img_path_box)
main_box.add(self.labelMsg)
main_box.add(buttonExec)
self.main_window = toga.MainWindow(title=self.formal_name)
self.main_window.content = main_box
self.main_window.show()
def select_png(self, widget):
try:
pngPath_ = self.main_window.open_file_dialog(
'请选择png图片', file_types="(*.png)|*.png")
if pngPath_:
if pngPath_.split('.').pop() == 'png':
self.pngPath = pngPath_
self.msg = '已成功选择图片!转换后将保存到相同目录!'
else:
self.msg = '请选择png格式的图片!'
self.pngPath = ''
except Exception as e:
self.pngPath = ''
self.labelMsg.text = self.msg
self.dirInput.value = self.pngPath
def png_to_ico(self, widget):
try:
self.pngPath
goOn = 1
if self.dirInput.value == self.pngPath:
goOn = 1
else:
goOn = 0
except Exception:
goOn = 0
if goOn and os.path.exists(self.pngPath):
if self.pngPath:
preList_ = self.pngPath.split('\\')
preList_.pop()
self.saveDir = '\\'.join(preList_)+'\\'
self.msg = '成功选择存储路径,转换中...'
goOn = 1
else:
self.msg = '存储路径不存在'
goOn = 0
self.labelMsg.text = self.msg
if goOn == 1:
toIco = Image.open(self.pngPath)
toIco.save(self.saveDir+'transfromed' +
str(random.randint(10000, 99999)) + '.ico')
self.msg = '转换成功!'
else:
pass
else:
self.msg = '操作路径不存在,请重新选择!'
self.labelMsg.text = self.msg
def main():
return Png2Ico()
| true
| true
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.