hexsha
stringlengths 40
40
| size
int64 10
805k
| ext
stringclasses 6
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
176
| max_stars_repo_name
stringlengths 7
114
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
176
| max_issues_repo_name
stringlengths 7
114
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
48.5k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
176
| max_forks_repo_name
stringlengths 7
114
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 10
805k
| avg_line_length
float64 5.53
11k
| max_line_length
int64 10
129k
| alphanum_fraction
float64 0.13
0.93
| content_no_comment
stringlengths 0
449k
| is_comment_constant_removed
bool 2
classes | is_sharp_comment_removed
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f7182d6e6d3b349d21daf564822adcc7676043c2
| 763
|
py
|
Python
|
osmaxx/contrib/auth/migrations/0005_auto_20170511_1100.py
|
tyrasd/osmaxx
|
da4454083d17b2ef8b0623cad62e39992b6bd52a
|
[
"MIT"
] | 27
|
2015-03-30T14:17:26.000Z
|
2022-02-19T17:30:44.000Z
|
osmaxx/contrib/auth/migrations/0005_auto_20170511_1100.py
|
tyrasd/osmaxx
|
da4454083d17b2ef8b0623cad62e39992b6bd52a
|
[
"MIT"
] | 483
|
2015-03-09T16:58:03.000Z
|
2022-03-14T09:29:06.000Z
|
osmaxx/contrib/auth/migrations/0005_auto_20170511_1100.py
|
tyrasd/osmaxx
|
da4454083d17b2ef8b0623cad62e39992b6bd52a
|
[
"MIT"
] | 6
|
2015-04-07T07:38:30.000Z
|
2020-04-01T12:45:53.000Z
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-05-11 09:00
from __future__ import unicode_literals
import django.contrib.auth.validators
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('auth', '0004_add_high_priority_user_group'),
]
operations = [
migrations.AlterField(
model_name='user',
name='username',
field=models.CharField(error_messages={'unique': 'A user with that username already exists.'}, help_text='Required. 150 characters or fewer. Letters, digits and @/./+/-/_ only.', max_length=150, unique=True, validators=[django.contrib.auth.validators.UnicodeUsernameValidator()], verbose_name='username'),
),
]
| 34.681818
| 317
| 0.684142
|
from __future__ import unicode_literals
import django.contrib.auth.validators
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('auth', '0004_add_high_priority_user_group'),
]
operations = [
migrations.AlterField(
model_name='user',
name='username',
field=models.CharField(error_messages={'unique': 'A user with that username already exists.'}, help_text='Required. 150 characters or fewer. Letters, digits and @/./+/-/_ only.', max_length=150, unique=True, validators=[django.contrib.auth.validators.UnicodeUsernameValidator()], verbose_name='username'),
),
]
| true
| true
|
f7182d8eab34c12e2650f3389e5bc285665ac5d6
| 82
|
py
|
Python
|
PyRacing2/gym_race/envs/__init__.py
|
CeVauDe/bugfree-game-about-bugs
|
ff1b3e07188fca1775fcc1ce95b59b188c29cee2
|
[
"MIT"
] | null | null | null |
PyRacing2/gym_race/envs/__init__.py
|
CeVauDe/bugfree-game-about-bugs
|
ff1b3e07188fca1775fcc1ce95b59b188c29cee2
|
[
"MIT"
] | 3
|
2021-07-09T21:32:37.000Z
|
2021-07-09T21:55:02.000Z
|
PyRacing2/gym_race/envs/__init__.py
|
CeVauDe/bugfree-game-about-bugs
|
ff1b3e07188fca1775fcc1ce95b59b188c29cee2
|
[
"MIT"
] | null | null | null |
from gym_race.envs.race_env import *
from gym_race.envs.pyrace_2d import PyRace2D
| 27.333333
| 44
| 0.841463
|
from gym_race.envs.race_env import *
from gym_race.envs.pyrace_2d import PyRace2D
| true
| true
|
f7182d96424929dbe947667e2add89639bbe42ee
| 2,472
|
py
|
Python
|
markwiki/util.py
|
cabalamat/markwiki
|
7c18c3c52eee51ee4544eceee570db6b63782152
|
[
"BSD-2-Clause"
] | 1
|
2019-09-18T12:05:44.000Z
|
2019-09-18T12:05:44.000Z
|
markwiki/util.py
|
cabalamat/markwiki
|
7c18c3c52eee51ee4544eceee570db6b63782152
|
[
"BSD-2-Clause"
] | null | null | null |
markwiki/util.py
|
cabalamat/markwiki
|
7c18c3c52eee51ee4544eceee570db6b63782152
|
[
"BSD-2-Clause"
] | null | null | null |
# Copyright (c) 2016, Matt Layman
'''The junk drawer. A place for methods that don't logically fit elsewhere.'''
import os
import random
import shutil
import string
import sys
from werkzeug import security
from markwiki.models.user import User
def boolify(value):
'''Check the string value for boolean-like behavior and return a bool.'''
return value.lower().startswith('t')
def bootstrap(app):
'''Bootstrap the wiki with some basic content.'''
here = os.path.abspath(os.path.dirname(__file__))
# Copy all the help content.
wiki_path = app.config['WIKI_PATH']
markwiki_help = os.path.join(here, 'templates', 'MarkWiki')
shutil.copytree(markwiki_help, os.path.join(wiki_path, 'MarkWiki'))
# Populate the wiki with the main page.
home_source = os.path.join(markwiki_help, 'Introduction.md')
shutil.copy(home_source, os.path.join(wiki_path, 'Home.md'))
token = os.path.join(app.config['MARKWIKI_HOME'],
app.bootstrapped_token_file)
with open(token, 'w') as f:
f.write('Bootstrapping is complete. Do not delete this file.')
def bootstrap_auth(app):
'''Bootstrap all the necessary authentication support if it is enabled.'''
# Check that the admin credentials are valid.
if not app.config.get('ADMINISTRATOR'):
sys.exit('You did not provide an administrator username.')
if not app.config.get('ADMIN_PASSWORD'):
sys.exit('You did not provide an administrator password.')
# Store the credentials of the admin account.
admin = app.user_storage.find_by_name(app.config['ADMINISTRATOR'])
if admin is None:
pwhash = security.generate_password_hash(app.config['ADMIN_PASSWORD'])
# No admin for this account name so create one.
admin = User(app.config['ADMINISTRATOR'],
'', # The admin does not use email.
'password',
pwhash)
app.user_storage.create(admin)
else:
# The configuration file may have changed the password so always update
# the administrator's password.
pwhash = security.generate_password_hash(app.config['ADMIN_PASSWORD'])
admin.password_digest = pwhash
app.user_storage.update(admin)
def generate_password():
'''Generate a random password.'''
chars = string.ascii_lowercase + string.ascii_uppercase + string.digits
return ''.join(random.choice(chars) for i in xrange(12))
| 35.314286
| 79
| 0.678803
|
import os
import random
import shutil
import string
import sys
from werkzeug import security
from markwiki.models.user import User
def boolify(value):
return value.lower().startswith('t')
def bootstrap(app):
here = os.path.abspath(os.path.dirname(__file__))
wiki_path = app.config['WIKI_PATH']
markwiki_help = os.path.join(here, 'templates', 'MarkWiki')
shutil.copytree(markwiki_help, os.path.join(wiki_path, 'MarkWiki'))
home_source = os.path.join(markwiki_help, 'Introduction.md')
shutil.copy(home_source, os.path.join(wiki_path, 'Home.md'))
token = os.path.join(app.config['MARKWIKI_HOME'],
app.bootstrapped_token_file)
with open(token, 'w') as f:
f.write('Bootstrapping is complete. Do not delete this file.')
def bootstrap_auth(app):
if not app.config.get('ADMINISTRATOR'):
sys.exit('You did not provide an administrator username.')
if not app.config.get('ADMIN_PASSWORD'):
sys.exit('You did not provide an administrator password.')
admin = app.user_storage.find_by_name(app.config['ADMINISTRATOR'])
if admin is None:
pwhash = security.generate_password_hash(app.config['ADMIN_PASSWORD'])
admin = User(app.config['ADMINISTRATOR'],
'',
'password',
pwhash)
app.user_storage.create(admin)
else:
pwhash = security.generate_password_hash(app.config['ADMIN_PASSWORD'])
admin.password_digest = pwhash
app.user_storage.update(admin)
def generate_password():
chars = string.ascii_lowercase + string.ascii_uppercase + string.digits
return ''.join(random.choice(chars) for i in xrange(12))
| true
| true
|
f7182f57d6af96c12b32a662d6d180925830beae
| 398
|
py
|
Python
|
server/urbanity/wsgi.py
|
zoek1/urbanity
|
33fef559645183c76527df2d7982dee5fcde28f7
|
[
"MIT"
] | null | null | null |
server/urbanity/wsgi.py
|
zoek1/urbanity
|
33fef559645183c76527df2d7982dee5fcde28f7
|
[
"MIT"
] | null | null | null |
server/urbanity/wsgi.py
|
zoek1/urbanity
|
33fef559645183c76527df2d7982dee5fcde28f7
|
[
"MIT"
] | null | null | null |
"""
WSGI config for firey_server project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "urbanity.settings")
application = get_wsgi_application()
| 23.411765
| 78
| 0.788945
|
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "urbanity.settings")
application = get_wsgi_application()
| true
| true
|
f71834accc3ef2b84d9e02493e984df9c647ca15
| 12,864
|
py
|
Python
|
make_cv.py
|
toritamantaro/yaml_cv_py
|
40bc07b90873bb47aad08975495c2c52e2e0e1cd
|
[
"MIT"
] | 10
|
2018-09-05T15:34:21.000Z
|
2021-02-11T05:15:34.000Z
|
make_cv.py
|
toritamantaro/yaml_cv_py
|
40bc07b90873bb47aad08975495c2c52e2e0e1cd
|
[
"MIT"
] | 2
|
2021-03-18T08:14:09.000Z
|
2021-04-22T04:26:27.000Z
|
make_cv.py
|
toritamantaro/yaml_cv_py
|
40bc07b90873bb47aad08975495c2c52e2e0e1cd
|
[
"MIT"
] | 1
|
2020-06-15T23:58:02.000Z
|
2020-06-15T23:58:02.000Z
|
# -*- coding: utf-8 -*-
from typing import Dict, Tuple, Any, Optional
import re
import string
import ast
import codecs
import argparse
import yaml
from reportlab.pdfgen import canvas
from reportlab.pdfbase import pdfmetrics
from reportlab.pdfbase.cidfonts import UnicodeCIDFont
from reportlab.pdfbase.ttfonts import TTFont, TTFError
from reportlab.lib.pagesizes import A3, A4, B5 # 用紙サイズを読み込む
from reportlab.lib.pagesizes import landscape, portrait # 横向き、縦向き
from reportlab.lib.units import inch, cm, mm, pica
from text2yaml import TextConverter
# Windowsで利用可能なフォントファイル
DEPENDENT_FONT_FILE = (
('msmincho', 'msmincho.ttc'),
('msgothic', 'msgothic.ttc'),
('yugothl', 'YuGothL.ttc'),
('meiryo', 'meiryo.ttc'),
)
DEFAULT_FONT_FACE = 'msmincho'
DEFAULT_FONT_SIZE = 12
DEFAULT_LINE_WIDTH = 0.5
class PdfMaker(object):
def __init__(self, add_font: Optional[Tuple[str]]):
self._input_data = None
self._canvas = None # reportlab.pdfgen.canvas
self._fonts = [] # 登録したフォント名
# ReprotLabで用意されている日本語フォントを登録する
pdfmetrics.registerFont(UnicodeCIDFont("HeiseiKakuGo-W5"))
pdfmetrics.registerFont(UnicodeCIDFont("HeiseiMin-W3"))
self._fonts = ['HeiseiKakuGo-W5', 'HeiseiMin-W3']
self.check_font(DEPENDENT_FONT_FILE)
if add_font is not None:
self.register_font(add_font)
def check_font(self, font_file: Tuple[Tuple[str]]):
'''
任意のフォントファイルを登録する
:param font_file:
登録するフォント名・ファイル名を、(フォント名,ファイル名)というタプルにして渡す。
e.g. (('msmincho', 'msmincho.ttc'), ... ,('msmincho', 'msmincho.ttc'))
'''
for t in font_file:
if (not isinstance(t, Tuple)) or len(t) < 2:
continue
self.register_font(t)
def register_font(self, font_info: Tuple[str]):
try:
pdfmetrics.registerFont(TTFont(font_info[0], font_info[1]))
except TTFError:
print("フォント名:'{0}'_フォントファイル:'{1}'の登録に失敗しました。".format(font_info[0], font_info[1]))
else:
self._fonts.append(font_info[0])
def get_value(self, params: Dict) -> str:
value = params.get('value', '')
s_values = re.search(r'\$(.*)$', value)
if s_values:
key = s_values.group(1)
val = self._input_data.get(key, '')
# キーワード引数を使ったテンプレートによる置換
t = string.Template(s_values.group(0))
value = t.substitute(**{key: val})
return value
def get_font(self, params: Dict) -> Tuple[float, str]:
font_size = float(params.get('font_size', DEFAULT_FONT_SIZE))
font_face = params.get('font_face', DEFAULT_FONT_FACE)
if font_face not in self._fonts:
face = DEFAULT_FONT_FACE if (DEFAULT_FONT_FACE in self._fonts) else self._fonts[0]
print("フォント名'{}'は指定できません。'{}'が適用されます。".format(font_face, face))
font_face = face
return float(font_size), font_face
def put_string(self, x: float, y: float, char: str, font_size: float, font_face: str):
if not char:
return
self._canvas.setFont(font_face, font_size)
self._canvas.drawString(x, y, char)
def put_textbox(self, x: float, y: float, char: str, font_size: float, font_face: str):
if not char:
return
text_obj = self._canvas.beginText()
text_obj.setTextOrigin(x, y)
text_obj.setFont(font_face, font_size)
text_obj.textLines(char)
self._canvas.drawText(text_obj)
def string(self, params: Dict):
x = self.unit2dot(params.get('x'))
y = self.unit2dot(params.get('y'))
value = self.get_value(params)
font_size, font_face = self.get_font(params)
self.put_string(x, y, value, font_size, font_face)
def box(self, params: Dict):
self.line_style(params)
x = self.unit2dot(params.get('x'))
y = self.unit2dot(params.get('y'))
w = self.unit2dot(params.get('width'))
h = self.unit2dot(params.get('height'))
self._canvas.rect(x, y, w, h)
def line(self, params: Dict):
self.line_style(params)
x = self.unit2dot(params.get('x'))
y = self.unit2dot(params.get('y'))
dx = self.unit2dot(params.get('dx'))
dy = self.unit2dot(params.get('dy'))
self._canvas.line(x, y, x + dx, y + dy)
def lines(self, params: Dict):
self.line_style(params)
points = params.get('points')
x = self.unit2dot(points[0].get('x'))
y = self.unit2dot(points[0].get('y'))
close = params.get('close')
path = self._canvas.beginPath() # 描画用のパスを生成
path.moveTo(x, y)
for dct in points[1:]:
dx = self.unit2dot(dct.get('x'))
dy = self.unit2dot(dct.get('y'))
x = x + dx
y = y + dy
# print(dx, ',', dy)
path.lineTo(x, y)
if close:
path.close()
self._canvas.drawPath(path)
def multi_lines(self, params: Dict):
self.line_style(params)
x = self.unit2dot(params.get('x'))
y = self.unit2dot(params.get('y'))
dx = self.unit2dot(params.get('dx'))
dy = self.unit2dot(params.get('dy'))
sx = self.unit2dot(params.get('sx'))
sy = self.unit2dot(params.get('sy'))
num = int(params.get('num'))
self._canvas.line(x, y, x + dx, y + dy)
for i in range(num):
self._canvas.line(x, y, x + dx, y + dy)
x = x + sx
y = y + sy
def line_style(self, params: Dict):
# 線のスタイル
line_styles = {
'dashed': [2, 2], # 線と間隔の長さを任意に定義
'chain': [2, 2, 10, 2], # 線と間隔の長さを任意に定義
'solid': [], # 空のリスト -> 実線
}
style = params.get('line_style', '')
style_lst = line_styles.get(style, []) # 対応するキーがない場合は空のリストを渡す -> 実線
self._canvas.setDash(style_lst)
# 線の幅
width = params.get('line_width')
try:
width = float(width)
except:
width = DEFAULT_LINE_WIDTH
self._canvas.setLineWidth(width)
def unit2dot(self, s: Any) -> float:
'''
「数値+単位」(文字列型)で指定された値をドットに変換する
'''
inch_val = re.search(r'\s*(-?[0-9\.]+)\s*inch', s)
mm_val = re.search(r'\s*(-?[0-9\.]+)\s*mm', s)
cm_val = re.search(r'\s*(-?[0-9\.]+)\s*cm', s)
pica_val = re.search(r'\s*(-?[0-9\.]+)\s*pica', s)
if mm_val:
return float(mm_val.group(1)) * mm
elif cm_val:
return float(cm_val.group(1)) * cm
elif inch_val:
return float(inch_val.group(1)) * inch
elif pica_val:
return float(pica_val.group(1)) * pica
else:
# 単位が指定されていない場合
try:
return float(s)
except TypeError:
return None
def new_page(self, params: Dict = None):
self._canvas.showPage() # これまでのページを確定
def save(self):
self._canvas.save()
def education_experience(self, params: Dict):
y = self.unit2dot(params.get('y'))
year_x = self.unit2dot(params.get('year_x'))
month_x = self.unit2dot(params.get('month_x'))
value_x = self.unit2dot(params.get('value_x'))
ijo_x = self.unit2dot(params.get('ijo_x'))
dy = self.unit2dot(params.get('dy'))
caption_x = self.unit2dot(params.get('caption_x'))
font_size, font_face = self.get_font(params)
# 学歴
self.put_string(caption_x, y, '学歴', font_size, font_face)
y = y - dy
education = self._input_data.get('education', [])
for d in education:
year = str(d.get('year', ''))
month = str(d.get('month', ''))
self.put_string(year_x, y, year, font_size, font_face)
x = month_x - (len(month) - 1) * font_size * 0.3
self.put_string(x, y, month, font_size, font_face)
self.put_string(value_x, y, str(d.get('value', '')), font_size, font_face)
y = y - dy
# 職歴
self.put_string(caption_x, y, '職歴', font_size, font_face)
y = y - dy
experience = self._input_data.get('experience', [])
for d in experience:
year = str(d.get('year', ''))
month = str(d.get('month', ''))
self.put_string(year_x, y, year, font_size, font_face)
x = month_x - (len(month) - 1) * font_size * 0.3
self.put_string(x, y, month, font_size, font_face)
self.put_string(value_x, y, str(d.get('value', '')), font_size, font_face)
y = y - dy
# 以上
self.put_string(ijo_x, y, '以上', font_size, font_face)
def license_certification(self, params: Dict):
y = self.unit2dot(params.get('y'))
year_x = self.unit2dot(params.get('year_x'))
month_x = self.unit2dot(params.get('month_x'))
value_x = self.unit2dot(params.get('value_x'))
dy = self.unit2dot(params.get('dy'))
value = self.get_value(params)
font_size, font_face = self.get_font(params)
try:
data_dcts = ast.literal_eval(value) # 文字列を辞書型に変換
except ValueError:
print('免許・資格情報の読み込みに失敗しました。')
return
for d in data_dcts:
year = str(d.get('year', ''))
month = str(d.get('month', ''))
self.put_string(year_x, y, year, font_size, font_face)
x = month_x - (len(month) - 1) * font_size * 0.3
self.put_string(x, y, month, font_size, font_face)
self.put_string(value_x, y, str(d.get('value', '')), font_size, font_face)
y = y + dy
def textbox(self, params: Dict):
x = self.unit2dot(params.get('x'))
y = self.unit2dot(params.get('y'))
value = self.get_value(params)
font_size, font_face = self.get_font(params)
self.put_textbox(x, y, value, font_size, font_face)
def generate(self, input_file: str, style_file: str, output_file: str):
# 記述内容の読み込み
if not re.search(r'\.(YAML|YML)$', input_file.upper()):
print("ファイル名の拡張子は「*.yaml」もしくは「*.yml」にしてください。"
"現在のファイル名:{0}".format(input_file))
return
with codecs.open(input_file, 'r', 'utf-8') as yaml_file:
self._input_data = yaml.load(yaml_file, Loader=yaml.SafeLoader)
# PDFファイルの生成
if not re.search(r'\.PDF$', output_file.upper()):
print("ファイル名の拡張子は「*.pdf」にしてください。"
"現在のファイル名:{0}".format(output_file))
return
self._canvas = canvas.Canvas(
"./{0}".format(output_file),
# bottomup=False, # buutomup=Trueは左下が原点という意味。Falseのときは左上原点
pagesize=B5,
# pagesize=landscape(A3), # 横向き(サイズも指定)
)
# 指定されたスタイルの読み込み
data = []
if re.search(r'\.(TXT|CSV)$', style_file.upper()):
converter = TextConverter()
data = converter.convert(style_file)
elif re.search(r'\.(YAML|YML)$', style_file.upper()):
with codecs.open(style_file, 'r', 'utf-8') as yaml_file:
data = yaml.load(yaml_file, Loader=yaml.SafeLoader)
else:
print("ファイル名:{0}の読み込みに失敗しました。".format(style_file))
return
# PDFファイルへの描画
for dct in data:
# e.g. dct = {'font_size': '9', 'type': 'string', 'value': '$name_kana', 'x': '30mm', 'y': '238mm'}
try:
getattr(self, dct.get('type'))(dct)
except AttributeError as e:
print(e.args)
self.save()
def parse_option():
dc = 'This script is ...'
parser = argparse.ArgumentParser(description=dc)
parser.add_argument('-i', action='store', type=str, dest='input',
default='data.yaml',
help='set input file path. e.g. hoge.yaml')
parser.add_argument('-s', action='store', type=str, dest='style',
default='style.yaml',
help='set style file path. e.g. hoge.yaml or hoge.txt')
parser.add_argument('-o', action='store', type=str, dest='output',
default='output.pdf',
help='set output file path. e.g. hoge.pdf')
parser.add_argument('-f', action='store', type=str, dest='font', nargs=2,
help='set font name and font file. e.g. msgothic msgothic.ttc')
return parser.parse_args()
def main():
args = parse_option()
input_file = args.input
style_file = args.style
output_file = args.output
font_file_info = None if args.font is None else tuple(args.font)
maker = PdfMaker(add_font=font_file_info)
maker.generate(input_file, style_file, output_file)
if __name__ == '__main__':
main()
| 36.965517
| 111
| 0.569108
|
from typing import Dict, Tuple, Any, Optional
import re
import string
import ast
import codecs
import argparse
import yaml
from reportlab.pdfgen import canvas
from reportlab.pdfbase import pdfmetrics
from reportlab.pdfbase.cidfonts import UnicodeCIDFont
from reportlab.pdfbase.ttfonts import TTFont, TTFError
from reportlab.lib.pagesizes import A3, A4, B5
from reportlab.lib.pagesizes import landscape, portrait
from reportlab.lib.units import inch, cm, mm, pica
from text2yaml import TextConverter
DEPENDENT_FONT_FILE = (
('msmincho', 'msmincho.ttc'),
('msgothic', 'msgothic.ttc'),
('yugothl', 'YuGothL.ttc'),
('meiryo', 'meiryo.ttc'),
)
DEFAULT_FONT_FACE = 'msmincho'
DEFAULT_FONT_SIZE = 12
DEFAULT_LINE_WIDTH = 0.5
class PdfMaker(object):
def __init__(self, add_font: Optional[Tuple[str]]):
self._input_data = None
self._canvas = None
self._fonts = []
pdfmetrics.registerFont(UnicodeCIDFont("HeiseiKakuGo-W5"))
pdfmetrics.registerFont(UnicodeCIDFont("HeiseiMin-W3"))
self._fonts = ['HeiseiKakuGo-W5', 'HeiseiMin-W3']
self.check_font(DEPENDENT_FONT_FILE)
if add_font is not None:
self.register_font(add_font)
def check_font(self, font_file: Tuple[Tuple[str]]):
for t in font_file:
if (not isinstance(t, Tuple)) or len(t) < 2:
continue
self.register_font(t)
def register_font(self, font_info: Tuple[str]):
try:
pdfmetrics.registerFont(TTFont(font_info[0], font_info[1]))
except TTFError:
print("フォント名:'{0}'_フォントファイル:'{1}'の登録に失敗しました。".format(font_info[0], font_info[1]))
else:
self._fonts.append(font_info[0])
def get_value(self, params: Dict) -> str:
value = params.get('value', '')
s_values = re.search(r'\$(.*)$', value)
if s_values:
key = s_values.group(1)
val = self._input_data.get(key, '')
t = string.Template(s_values.group(0))
value = t.substitute(**{key: val})
return value
def get_font(self, params: Dict) -> Tuple[float, str]:
font_size = float(params.get('font_size', DEFAULT_FONT_SIZE))
font_face = params.get('font_face', DEFAULT_FONT_FACE)
if font_face not in self._fonts:
face = DEFAULT_FONT_FACE if (DEFAULT_FONT_FACE in self._fonts) else self._fonts[0]
print("フォント名'{}'は指定できません。'{}'が適用されます。".format(font_face, face))
font_face = face
return float(font_size), font_face
def put_string(self, x: float, y: float, char: str, font_size: float, font_face: str):
if not char:
return
self._canvas.setFont(font_face, font_size)
self._canvas.drawString(x, y, char)
def put_textbox(self, x: float, y: float, char: str, font_size: float, font_face: str):
if not char:
return
text_obj = self._canvas.beginText()
text_obj.setTextOrigin(x, y)
text_obj.setFont(font_face, font_size)
text_obj.textLines(char)
self._canvas.drawText(text_obj)
def string(self, params: Dict):
x = self.unit2dot(params.get('x'))
y = self.unit2dot(params.get('y'))
value = self.get_value(params)
font_size, font_face = self.get_font(params)
self.put_string(x, y, value, font_size, font_face)
def box(self, params: Dict):
self.line_style(params)
x = self.unit2dot(params.get('x'))
y = self.unit2dot(params.get('y'))
w = self.unit2dot(params.get('width'))
h = self.unit2dot(params.get('height'))
self._canvas.rect(x, y, w, h)
def line(self, params: Dict):
self.line_style(params)
x = self.unit2dot(params.get('x'))
y = self.unit2dot(params.get('y'))
dx = self.unit2dot(params.get('dx'))
dy = self.unit2dot(params.get('dy'))
self._canvas.line(x, y, x + dx, y + dy)
def lines(self, params: Dict):
self.line_style(params)
points = params.get('points')
x = self.unit2dot(points[0].get('x'))
y = self.unit2dot(points[0].get('y'))
close = params.get('close')
path = self._canvas.beginPath()
path.moveTo(x, y)
for dct in points[1:]:
dx = self.unit2dot(dct.get('x'))
dy = self.unit2dot(dct.get('y'))
x = x + dx
y = y + dy
path.lineTo(x, y)
if close:
path.close()
self._canvas.drawPath(path)
def multi_lines(self, params: Dict):
self.line_style(params)
x = self.unit2dot(params.get('x'))
y = self.unit2dot(params.get('y'))
dx = self.unit2dot(params.get('dx'))
dy = self.unit2dot(params.get('dy'))
sx = self.unit2dot(params.get('sx'))
sy = self.unit2dot(params.get('sy'))
num = int(params.get('num'))
self._canvas.line(x, y, x + dx, y + dy)
for i in range(num):
self._canvas.line(x, y, x + dx, y + dy)
x = x + sx
y = y + sy
def line_style(self, params: Dict):
line_styles = {
'dashed': [2, 2],
'chain': [2, 2, 10, 2],
'solid': [],
}
style = params.get('line_style', '')
style_lst = line_styles.get(style, [])
self._canvas.setDash(style_lst)
width = params.get('line_width')
try:
width = float(width)
except:
width = DEFAULT_LINE_WIDTH
self._canvas.setLineWidth(width)
def unit2dot(self, s: Any) -> float:
inch_val = re.search(r'\s*(-?[0-9\.]+)\s*inch', s)
mm_val = re.search(r'\s*(-?[0-9\.]+)\s*mm', s)
cm_val = re.search(r'\s*(-?[0-9\.]+)\s*cm', s)
pica_val = re.search(r'\s*(-?[0-9\.]+)\s*pica', s)
if mm_val:
return float(mm_val.group(1)) * mm
elif cm_val:
return float(cm_val.group(1)) * cm
elif inch_val:
return float(inch_val.group(1)) * inch
elif pica_val:
return float(pica_val.group(1)) * pica
else:
try:
return float(s)
except TypeError:
return None
def new_page(self, params: Dict = None):
self._canvas.showPage()
def save(self):
self._canvas.save()
def education_experience(self, params: Dict):
y = self.unit2dot(params.get('y'))
year_x = self.unit2dot(params.get('year_x'))
month_x = self.unit2dot(params.get('month_x'))
value_x = self.unit2dot(params.get('value_x'))
ijo_x = self.unit2dot(params.get('ijo_x'))
dy = self.unit2dot(params.get('dy'))
caption_x = self.unit2dot(params.get('caption_x'))
font_size, font_face = self.get_font(params)
self.put_string(caption_x, y, '学歴', font_size, font_face)
y = y - dy
education = self._input_data.get('education', [])
for d in education:
year = str(d.get('year', ''))
month = str(d.get('month', ''))
self.put_string(year_x, y, year, font_size, font_face)
x = month_x - (len(month) - 1) * font_size * 0.3
self.put_string(x, y, month, font_size, font_face)
self.put_string(value_x, y, str(d.get('value', '')), font_size, font_face)
y = y - dy
self.put_string(caption_x, y, '職歴', font_size, font_face)
y = y - dy
experience = self._input_data.get('experience', [])
for d in experience:
year = str(d.get('year', ''))
month = str(d.get('month', ''))
self.put_string(year_x, y, year, font_size, font_face)
x = month_x - (len(month) - 1) * font_size * 0.3
self.put_string(x, y, month, font_size, font_face)
self.put_string(value_x, y, str(d.get('value', '')), font_size, font_face)
y = y - dy
self.put_string(ijo_x, y, '以上', font_size, font_face)
def license_certification(self, params: Dict):
y = self.unit2dot(params.get('y'))
year_x = self.unit2dot(params.get('year_x'))
month_x = self.unit2dot(params.get('month_x'))
value_x = self.unit2dot(params.get('value_x'))
dy = self.unit2dot(params.get('dy'))
value = self.get_value(params)
font_size, font_face = self.get_font(params)
try:
data_dcts = ast.literal_eval(value)
except ValueError:
print('免許・資格情報の読み込みに失敗しました。')
return
for d in data_dcts:
year = str(d.get('year', ''))
month = str(d.get('month', ''))
self.put_string(year_x, y, year, font_size, font_face)
x = month_x - (len(month) - 1) * font_size * 0.3
self.put_string(x, y, month, font_size, font_face)
self.put_string(value_x, y, str(d.get('value', '')), font_size, font_face)
y = y + dy
def textbox(self, params: Dict):
x = self.unit2dot(params.get('x'))
y = self.unit2dot(params.get('y'))
value = self.get_value(params)
font_size, font_face = self.get_font(params)
self.put_textbox(x, y, value, font_size, font_face)
def generate(self, input_file: str, style_file: str, output_file: str):
if not re.search(r'\.(YAML|YML)$', input_file.upper()):
print("ファイル名の拡張子は「*.yaml」もしくは「*.yml」にしてください。"
"現在のファイル名:{0}".format(input_file))
return
with codecs.open(input_file, 'r', 'utf-8') as yaml_file:
self._input_data = yaml.load(yaml_file, Loader=yaml.SafeLoader)
if not re.search(r'\.PDF$', output_file.upper()):
print("ファイル名の拡張子は「*.pdf」にしてください。"
"現在のファイル名:{0}".format(output_file))
return
self._canvas = canvas.Canvas(
"./{0}".format(output_file),
data = []
if re.search(r'\.(TXT|CSV)$', style_file.upper()):
converter = TextConverter()
data = converter.convert(style_file)
elif re.search(r'\.(YAML|YML)$', style_file.upper()):
with codecs.open(style_file, 'r', 'utf-8') as yaml_file:
data = yaml.load(yaml_file, Loader=yaml.SafeLoader)
else:
print("ファイル名:{0}の読み込みに失敗しました。".format(style_file))
return
for dct in data:
try:
getattr(self, dct.get('type'))(dct)
except AttributeError as e:
print(e.args)
self.save()
def parse_option():
dc = 'This script is ...'
parser = argparse.ArgumentParser(description=dc)
parser.add_argument('-i', action='store', type=str, dest='input',
default='data.yaml',
help='set input file path. e.g. hoge.yaml')
parser.add_argument('-s', action='store', type=str, dest='style',
default='style.yaml',
help='set style file path. e.g. hoge.yaml or hoge.txt')
parser.add_argument('-o', action='store', type=str, dest='output',
default='output.pdf',
help='set output file path. e.g. hoge.pdf')
parser.add_argument('-f', action='store', type=str, dest='font', nargs=2,
help='set font name and font file. e.g. msgothic msgothic.ttc')
return parser.parse_args()
def main():
args = parse_option()
input_file = args.input
style_file = args.style
output_file = args.output
font_file_info = None if args.font is None else tuple(args.font)
maker = PdfMaker(add_font=font_file_info)
maker.generate(input_file, style_file, output_file)
if __name__ == '__main__':
main()
| true
| true
|
f71835403421c46fd7d2e1a64c97d3ad46ed7a63
| 691
|
py
|
Python
|
docker_registry/storage/__init__.py
|
kirat-singh/docker-registry
|
ca53d728fb57302606892362820dfaa8aed105c5
|
[
"Apache-2.0"
] | 1,568
|
2015-01-01T02:12:42.000Z
|
2020-03-10T06:24:39.000Z
|
docker_registry/storage/__init__.py
|
kirat-singh/docker-registry
|
ca53d728fb57302606892362820dfaa8aed105c5
|
[
"Apache-2.0"
] | 316
|
2015-01-01T01:15:21.000Z
|
2018-09-10T21:19:04.000Z
|
docker_registry/storage/__init__.py
|
kirat-singh/docker-registry
|
ca53d728fb57302606892362820dfaa8aed105c5
|
[
"Apache-2.0"
] | 596
|
2015-01-03T03:54:42.000Z
|
2020-03-05T14:40:55.000Z
|
# -*- coding: utf-8 -*-
import docker_registry.core.driver as engine
import tempfile
from ..lib import config
__all__ = ['load']
def temp_store_handler():
tmpf = tempfile.TemporaryFile()
def fn(buf):
tmpf.write(buf)
return tmpf, fn
_storage = {}
def load(kind=None):
"""Returns the right storage class according to the configuration."""
global _storage
cfg = config.load()
if not kind:
kind = cfg.storage.lower()
if kind == 'local':
kind = 'file'
if kind in _storage:
return _storage[kind]
_storage[kind] = engine.fetch(kind)(
path=cfg.storage_path,
config=cfg)
return _storage[kind]
| 16.853659
| 73
| 0.620839
|
import docker_registry.core.driver as engine
import tempfile
from ..lib import config
__all__ = ['load']
def temp_store_handler():
tmpf = tempfile.TemporaryFile()
def fn(buf):
tmpf.write(buf)
return tmpf, fn
_storage = {}
def load(kind=None):
global _storage
cfg = config.load()
if not kind:
kind = cfg.storage.lower()
if kind == 'local':
kind = 'file'
if kind in _storage:
return _storage[kind]
_storage[kind] = engine.fetch(kind)(
path=cfg.storage_path,
config=cfg)
return _storage[kind]
| true
| true
|
f7183651e5c9a3365c1a19a88fe1f26f07d9c6f3
| 1,745
|
py
|
Python
|
q2_pepsirf/actions/link.py
|
LadnerLab/q2-pepsirf
|
47de628294cb47d1c1c5881b825e3807b1b5fa02
|
[
"Apache-2.0"
] | null | null | null |
q2_pepsirf/actions/link.py
|
LadnerLab/q2-pepsirf
|
47de628294cb47d1c1c5881b825e3807b1b5fa02
|
[
"Apache-2.0"
] | null | null | null |
q2_pepsirf/actions/link.py
|
LadnerLab/q2-pepsirf
|
47de628294cb47d1c1c5881b825e3807b1b5fa02
|
[
"Apache-2.0"
] | null | null | null |
import subprocess, os
import tempfile, qiime2
from q2_pepsirf.format_types import PeptideFastaFmt, ProteinFastaFmt, PepsirfLinkTSVFormat
# Name: link
# Process: runs pepsirf's link module
# Method inputs/parameters: protein_file, peptide_file, meta,
# kmer_size, kmer_redundancy_control, outfile, pepsirf_binary
# Method outputs/Returned: the link tsv
# Dependencies: subprocess, os, tempfile
def link(
protein_file: ProteinFastaFmt,
peptide_file: PeptideFastaFmt,
meta: str,
kmer_size: int,
kmer_redundancy_control: bool = False,
outfile: str = "./link.out",
pepsirf_binary: str = "pepsirf") -> PepsirfLinkTSVFormat:
#collect filepath for TSVFormat
tsv_output = PepsirfLinkTSVFormat()
#collect absolute filepaths for input files and binary if it is a file
protein_file = "'%s'" % (str(protein_file))
peptide_file = "'%s'" % (str(peptide_file))
if os.path.isfile(pepsirf_binary):
pepsirf_binary = "'%s'" % (os.path.abspath(pepsirf_binary))
#create a temp directory to run pepsirf in
with tempfile.TemporaryDirectory() as tempdir:
#start command with required/defualt parameters
cmd = "%s link --protein_file %s --peptide_file %s --meta %s -k %s -o %s" % (
pepsirf_binary, protein_file, peptide_file, meta, str(kmer_size), tsv_output
)
#check if optional parameters are inputted and add to command
if kmer_redundancy_control:
cmd += " -r"
#add outfile to command
cmd += ' >> %s' % (outfile)
#run command in the command line
subprocess.run(cmd, shell=True, check=True)
#return norm output
return tsv_output
| 32.314815
| 91
| 0.664183
|
import subprocess, os
import tempfile, qiime2
from q2_pepsirf.format_types import PeptideFastaFmt, ProteinFastaFmt, PepsirfLinkTSVFormat
# Method inputs/parameters: protein_file, peptide_file, meta,
# kmer_size, kmer_redundancy_control, outfile, pepsirf_binary
# Method outputs/Returned: the link tsv
# Dependencies: subprocess, os, tempfile
def link(
protein_file: ProteinFastaFmt,
peptide_file: PeptideFastaFmt,
meta: str,
kmer_size: int,
kmer_redundancy_control: bool = False,
outfile: str = "./link.out",
pepsirf_binary: str = "pepsirf") -> PepsirfLinkTSVFormat:
#collect filepath for TSVFormat
tsv_output = PepsirfLinkTSVFormat()
#collect absolute filepaths for input files and binary if it is a file
protein_file = "'%s'" % (str(protein_file))
peptide_file = "'%s'" % (str(peptide_file))
if os.path.isfile(pepsirf_binary):
pepsirf_binary = "'%s'" % (os.path.abspath(pepsirf_binary))
#create a temp directory to run pepsirf in
with tempfile.TemporaryDirectory() as tempdir:
#start command with required/defualt parameters
cmd = "%s link --protein_file %s --peptide_file %s --meta %s -k %s -o %s" % (
pepsirf_binary, protein_file, peptide_file, meta, str(kmer_size), tsv_output
)
#check if optional parameters are inputted and add to command
if kmer_redundancy_control:
cmd += " -r"
#add outfile to command
cmd += ' >> %s' % (outfile)
#run command in the command line
subprocess.run(cmd, shell=True, check=True)
#return norm output
return tsv_output
| true
| true
|
f71836fb5482752ab213272525c889404b51a0e6
| 953
|
py
|
Python
|
trading_calendars/exchange_calendar_twse.py
|
playma/stockAI-trading_calendars
|
97aa9451961b000ef38e791c394c450015f4724d
|
[
"Apache-2.0"
] | null | null | null |
trading_calendars/exchange_calendar_twse.py
|
playma/stockAI-trading_calendars
|
97aa9451961b000ef38e791c394c450015f4724d
|
[
"Apache-2.0"
] | null | null | null |
trading_calendars/exchange_calendar_twse.py
|
playma/stockAI-trading_calendars
|
97aa9451961b000ef38e791c394c450015f4724d
|
[
"Apache-2.0"
] | null | null | null |
from datetime import time
import pandas as pd
from pytz import timezone
from .precomputed_trading_calendar import PrecomputedTradingCalendar
precomputed_taiwan_holidays = pd.to_datetime([
"1999-01-01",
"1999-02-10",
"1999-02-11",
"1999-02-12",
"1999-02-15",
"1999-02-16"
# TODO
])
class TWSEExchangeCalendar(PrecomputedTradingCalendar):
"""
Exchange calendar for the Taiwan Stock Exchange (TWSE).
Open time: 9:00 Asia/Taipei
Close time: 13:30 Asia/Taipei
Due to the complexity around the Taiwan exchange holidays, we are
hardcoding a list of holidays covering 1999-2025, inclusive. There are
no known early closes or late opens.
"""
name = "TWSE"
tz = timezone("Asia/Taipei")
open_times = (
(None, time(9, 1)),
)
close_times = (
(None, time(13, 30)),
)
@property
def precomputed_holidays(self):
return precomputed_taiwan_holidays
| 23.243902
| 74
| 0.667366
|
from datetime import time
import pandas as pd
from pytz import timezone
from .precomputed_trading_calendar import PrecomputedTradingCalendar
precomputed_taiwan_holidays = pd.to_datetime([
"1999-01-01",
"1999-02-10",
"1999-02-11",
"1999-02-12",
"1999-02-15",
"1999-02-16"
])
class TWSEExchangeCalendar(PrecomputedTradingCalendar):
name = "TWSE"
tz = timezone("Asia/Taipei")
open_times = (
(None, time(9, 1)),
)
close_times = (
(None, time(13, 30)),
)
@property
def precomputed_holidays(self):
return precomputed_taiwan_holidays
| true
| true
|
f71838ec9f6985fcdd4b5b4edd492ad64896a68e
| 178
|
py
|
Python
|
examples/cg_example_pkg/main.py
|
SMAT-Lab/Scalpel
|
1022200043f2d9e8c24256821b863997ab34dd49
|
[
"Apache-2.0"
] | 102
|
2021-12-15T09:08:48.000Z
|
2022-03-24T15:15:25.000Z
|
examples/cg_example_pkg/main.py
|
StarWatch27/Scalpel
|
8853e6e84f318f3cfeda0e03d274748b2fbe30fa
|
[
"Apache-2.0"
] | 11
|
2021-12-04T11:48:31.000Z
|
2022-03-21T09:21:45.000Z
|
examples/cg_example_pkg/main.py
|
StarWatch27/Scalpel
|
8853e6e84f318f3cfeda0e03d274748b2fbe30fa
|
[
"Apache-2.0"
] | 11
|
2021-12-04T11:47:41.000Z
|
2022-02-06T09:04:39.000Z
|
from .sub_folder1.module1 import Module1
from .sub_folder1.module2 import Module2
module1 = Module1()
do_add = module1.add(1,1)
module2 = Module2()
do_minus = module2.minus(1,1)
| 25.428571
| 40
| 0.775281
|
from .sub_folder1.module1 import Module1
from .sub_folder1.module2 import Module2
module1 = Module1()
do_add = module1.add(1,1)
module2 = Module2()
do_minus = module2.minus(1,1)
| true
| true
|
f718392acc40e2659410454bda12b4e661825d9c
| 1,537
|
py
|
Python
|
CustomExtension.extension/STVTools.tab/Experiment.panel/Test.pulldown/Tag Seleted.pushbutton/script.py
|
Melca-G/Aeolus
|
e014cdbbffc1c650d569efd8750480bc5a4cdc3b
|
[
"MIT"
] | null | null | null |
CustomExtension.extension/STVTools.tab/Experiment.panel/Test.pulldown/Tag Seleted.pushbutton/script.py
|
Melca-G/Aeolus
|
e014cdbbffc1c650d569efd8750480bc5a4cdc3b
|
[
"MIT"
] | null | null | null |
CustomExtension.extension/STVTools.tab/Experiment.panel/Test.pulldown/Tag Seleted.pushbutton/script.py
|
Melca-G/Aeolus
|
e014cdbbffc1c650d569efd8750480bc5a4cdc3b
|
[
"MIT"
] | null | null | null |
import sys, clr
import ConfigParser
from os.path import expanduser
# Set system path
home = expanduser("~")
cfgfile = open(home + "\\STVTools.ini", 'r')
config = ConfigParser.ConfigParser()
config.read(home + "\\STVTools.ini")
# Master Path
syspath1 = config.get('SysDir','MasterPackage')
sys.path.append(syspath1)
# Built Path
syspath2 = config.get('SysDir','SecondaryPackage')
sys.path.append(syspath2)
import Selection
clr.AddReference('System')
from Autodesk.Revit.DB import Document, FilteredElementCollector, GraphicsStyle, Transaction, BuiltInCategory,\
RevitLinkInstance, UV, XYZ, SpatialElementBoundaryOptions, CurveArray, ElementId, View, RevitLinkType, WorksetTable,\
Workset, FilteredWorksetCollector, WorksetKind, RevitLinkType, RevitLinkInstance, View3D, ViewType,ElementClassFilter,\
ViewFamilyType, ViewFamily, BuiltInParameter, IndependentTag, Reference, TagMode, TagOrientation
from pyrevit import revit, DB, forms
clr. AddReferenceByPartialName('PresentationCore')
clr.AddReferenceByPartialName('PresentationFramework')
clr.AddReferenceByPartialName('System.Windows.Forms')
import System.Windows.Forms
uidoc = __revit__.ActiveUIDocument
doc = __revit__.ActiveUIDocument.Document
t = Transaction(doc, 'Tag Selected')
t.Start()
selection = Selection.get_selected_elements(doc)
for a in selection:
location = a.Location
IndependentTag.Create(doc, doc.ActiveView.Id, Reference(a), True, TagMode.TM_ADDBY_MULTICATEGORY, TagOrientation.Horizontal, location.Point)
print(location.Point)
t.Commit()
| 39.410256
| 144
| 0.798308
|
import sys, clr
import ConfigParser
from os.path import expanduser
home = expanduser("~")
cfgfile = open(home + "\\STVTools.ini", 'r')
config = ConfigParser.ConfigParser()
config.read(home + "\\STVTools.ini")
syspath1 = config.get('SysDir','MasterPackage')
sys.path.append(syspath1)
syspath2 = config.get('SysDir','SecondaryPackage')
sys.path.append(syspath2)
import Selection
clr.AddReference('System')
from Autodesk.Revit.DB import Document, FilteredElementCollector, GraphicsStyle, Transaction, BuiltInCategory,\
RevitLinkInstance, UV, XYZ, SpatialElementBoundaryOptions, CurveArray, ElementId, View, RevitLinkType, WorksetTable,\
Workset, FilteredWorksetCollector, WorksetKind, RevitLinkType, RevitLinkInstance, View3D, ViewType,ElementClassFilter,\
ViewFamilyType, ViewFamily, BuiltInParameter, IndependentTag, Reference, TagMode, TagOrientation
from pyrevit import revit, DB, forms
clr. AddReferenceByPartialName('PresentationCore')
clr.AddReferenceByPartialName('PresentationFramework')
clr.AddReferenceByPartialName('System.Windows.Forms')
import System.Windows.Forms
uidoc = __revit__.ActiveUIDocument
doc = __revit__.ActiveUIDocument.Document
t = Transaction(doc, 'Tag Selected')
t.Start()
selection = Selection.get_selected_elements(doc)
for a in selection:
location = a.Location
IndependentTag.Create(doc, doc.ActiveView.Id, Reference(a), True, TagMode.TM_ADDBY_MULTICATEGORY, TagOrientation.Horizontal, location.Point)
print(location.Point)
t.Commit()
| true
| true
|
f718393cecda836a590a6dc97b77a13ca4ce20f5
| 70,177
|
py
|
Python
|
cathpy/core/align.py
|
shouldsee/cathpy
|
5f7fa1322434b2d254f0158c5840f029b12dbafe
|
[
"MIT"
] | null | null | null |
cathpy/core/align.py
|
shouldsee/cathpy
|
5f7fa1322434b2d254f0158c5840f029b12dbafe
|
[
"MIT"
] | null | null | null |
cathpy/core/align.py
|
shouldsee/cathpy
|
5f7fa1322434b2d254f0158c5840f029b12dbafe
|
[
"MIT"
] | null | null | null |
"""
Manipulate protein sequences and alignments
"""
# core
import io
import gzip
import logging
import re
import functools
# pip
import dendropy
# local
from cathpy.core import error as err
from cathpy.core.tests import is_valid_domain_id
from cathpy.core.models import AminoAcid, AminoAcids, Residue, Segment
LOG = logging.getLogger(__name__)
class Sequence(object):
"""Class to represent a protein sequence."""
re_gap_chars = r'[.\-]'
has_warned_about_deprecated_sequence_headers = False
def __init__(self, hdr: str, seq: str, *, meta=None, description=None):
self._hdr = hdr
self._seq = seq
try:
hdr_info = Sequence.split_hdr(hdr)
except:
raise err.GeneralError('caught error while parsing sequence header: '+hdr)
self._id = hdr_info['id']
self.accession = hdr_info['accession']
self.description = description
self.id_type = hdr_info['id_type']
self.id_ver = hdr_info['id_ver']
self.segs = hdr_info['segs']
self.meta = hdr_info['meta']
if meta:
for key, val in meta.items():
self.meta[key] = val
@property
def uid(self):
"""Returns the unique id for this Sequence"""
return self._id
def set_uid(self, _id):
"""Sets the unique id of the current Sequence object"""
self._id = _id
@property
def is_cath_domain(self):
"""Returns whether this Sequence is a CATH domain."""
return self.id_type == 'domain'
def get_residues(self):
"""
Returns an array of Residue objects based on this sequence.
Note: if segment information has been specified then this
will be used to calculate the `seq_num` attribute.
Raises:
OutOfBoundsError: problem mapping segment info to sequence
"""
residues = []
segs = self.segs
if not segs:
segs = [Segment(1, len(self.seq_no_gaps))]
current_seg_offset = 0
def next_seg():
nonlocal current_seg_offset
if current_seg_offset < len(segs):
seg = segs[current_seg_offset]
current_seg_offset += 1
return seg
else:
return None
# theoretical length according to segment info vs length according to sequence
seg_length = 0
for seg in segs:
seg_length += seg.stop - seg.start + 1
actual_length = len(self.seq_no_gaps)
if seg_length != actual_length:
# should this be a warning? (with 1-n numbering as fallback?)
raise err.OutOfBoundsError(
('segment information {} suggests that the sequence '
'length should be {}, but the sequence has {} (non-gap) characters: {}').format(
repr(segs), seg_length, actual_length, self.seq))
current_seg = next_seg()
seq_num = current_seg.start
for offset, aa in enumerate(self.seq, 0):
if current_seg and seq_num > current_seg.stop:
current_seg = next_seg()
if not current_seg:
if not Sequence.is_gap(aa):
raise err.OutOfBoundsError(
('unable to map segment ({}) to sequence: '
'the final segment ends at {}, but the sequence has {} residues '
'(offset: {}, aa: {})').format(
repr(current_seg), seq_num-1, len(self.seq_no_gaps), offset, aa
))
else:
seq_num = None
else:
seq_num = current_seg.start
if Sequence.is_gap(aa):
res = Residue(aa)
else:
res = Residue(aa, seq_num)
seq_num += 1
residues.append(res)
return residues
def get_res_at_offset(self, offset):
"""Return the residue character at the given offset (includes gaps)."""
try:
res = self.seq[offset]
except:
raise err.SeqIOError((
"Error: failed to get residue at offset {} from sequence "
"with length {}: '{}'").format(offset, self.length(), self.seq))
return res
def get_res_at_seq_position(self, seq_pos):
"""Return the residue character at the given sequence position (ignores gaps)."""
seq_nogap = re.sub(Sequence.re_gap_chars, '', self.seq)
try:
res = seq_nogap[seq_pos-1]
except:
raise err.SeqIOError((
"Error: failed to get residue at position {} from sequence with {} "
"non-gap sequence positions: '{}'").format(
seq_pos, len(seq_nogap), self.seq))
return res
def get_seq_position_at_offset(self, offset):
"""Returns sequence position (ignoring gaps) of the given residue (may include gaps)."""
seq_to_offset = self.seq[:offset+1]
if re.match(seq_to_offset[-1], Sequence.re_gap_chars):
raise err.GapError(
"Cannot get sequence position at offset {} since this corresponds to a gap".format(
offset))
seq_nogap = re.sub(Sequence.re_gap_chars, '', seq_to_offset)
return len(seq_nogap)
def get_offset_at_seq_position(self, seq_pos):
"""Return the offset (with gaps) of the given sequence position (ignores gaps)."""
current_seq_pos = 0
for offset in range(len(self.seq)):
if not re.match(Sequence.re_gap_chars, self.seq[offset]):
current_seq_pos += 1
if current_seq_pos == seq_pos:
return offset
raise err.OutOfBoundsError("failed to find offset at sequence position {}".format(seq_pos))
def length(self):
"""Return the length of the sequence."""
return len(self.seq)
@property
def seq(self):
"""Return the amino acid sequence as a string."""
return self._seq
@property
def seq_no_gaps(self):
"""Return the amino acid sequence as a string (after removing all gaps)."""
seq = re.sub(self.re_gap_chars, '', self._seq)
return seq
def set_sequence(self, seq):
"""Sets the AA residues for this Sequence."""
self._seq = seq
def set_cluster_id(self, id_str):
"""Sets the cluster id for this Sequence."""
self.meta['CLUSTER_ID'] = id_str
@property
def cluster_id(self):
"""Returns the cluster id for this Sequence."""
return self.meta['CLUSTER_ID'] if 'CLUSTER_ID' in self.meta else None
@classmethod
def split_hdr(cls, hdr: str) -> dict:
"""
Splits a sequence header into meta information.
Args:
hdr (str): header string (eg `'domain|4_2_0|1cukA01/3-23_56-123'`)
Returns:
info (dict): header info
::
{
'id': 'domain|4_2_0|1cukA01/3-23_56-123',
'accession': '1cukA01',
'id_type': 'domain',
'id_ver': '4_2_0',
'segs': [Segment(3, 23), Segment(56,123)],
'meta': {}
}
"""
accession = None
id_type = None
id_ver = None
segs = []
meta = {}
if not hdr:
raise err.ParamError('hdr seems to be empty')
# split meta features (after whitespace)
hdr_parts = hdr.split(maxsplit=1)
id_with_segs_str = hdr_parts[0]
meta_str = hdr_parts[1] if len(hdr_parts) > 1 else None
# split id / segments
id_with_segs_parts = id_with_segs_str.split('/', maxsplit=1)
id_str = id_with_segs_parts[0]
segs_str = id_with_segs_parts[1] if len(id_with_segs_parts) > 1 else None
# split id into type, id, version
id_parts = id_str.split('|')
# 1cukA01/23-123
if len(id_parts) == 1:
accession = id_parts[0]
if is_valid_domain_id(accession):
id_type = 'domain'
# domain|1cukA01/23-123
if len(id_parts) == 2:
id_type, accession = id_parts
# cath|4_2_0|5lhzA00/886-963
# cath|current|5lhzA00/886-963
if len(id_parts) == 3:
id_type, id_ver, accession = id_parts
if is_valid_domain_id(id_ver):
if not __class__.has_warned_about_deprecated_sequence_headers:
LOG.warning(
("Warning: found an old sequence header with TYPE|ID|VERSION '%s'. "
"Parsing this as TYPE|VERSION|ID for now, but this is a hack, and "
"may be deprecated in future versions (fix structural cluster reps?)"
"(ignoring all future occurrences in this runtime)"), id_parts)
__class__.has_warned_about_deprecated_sequence_headers = True
id_type, accession, id_ver = id_parts
# segments
if segs_str:
for seg_str in segs_str.split('_'):
(start, stop) = seg_str.split('-')
seg = Segment(int(start), int(stop))
segs.append(seg)
# features
if meta_str:
meta_parts = meta_str.split()
for f in meta_parts.split('=', maxsplit=1):
if len(f) == 2:
meta[f[0]] = f[1]
else:
LOG.warning("failed to parse meta feature from string %s", meta_str)
return({'accession': accession, 'id': id_with_segs_str, 'id_type': id_type,
'id_ver': id_ver, 'segs': segs, 'meta': meta})
def to_fasta(self, wrap_width=80):
"""Return a string for this Sequence in FASTA format."""
fasta_str = ""
fasta_str += '>' + self.uid + '\n'
if wrap_width:
for line in Sequence._chunker(self.seq, wrap_width):
fasta_str += line + '\n'
else:
fasta_str += self.seq + '\n'
return fasta_str
def to_pir(self, wrap_width=60, use_accession=False):
"""Return a string for this Sequence in PIR format."""
pir_str = ""
pir_str += '>P1;{}\n'.format(self.uid if not use_accession else self.accession)
desc = self.description or self.accession
pir_str += desc + '\n'
seq = self.seq + '*'
if wrap_width:
for line in Sequence._chunker(seq, wrap_width):
pir_str += line + '\n'
else:
pir_str += seq + '\n'
return pir_str
def copy(self):
"""Provide a deep copy of this sequence."""
s = Sequence(self._hdr, self.seq, meta=self.meta)
return s
def insert_gap_at_offset(self, offset, gap_char="-"):
"""Insert a gap into the current sequence at a given offset."""
new_seq = self.seq[:offset] + gap_char + self.seq[offset:]
self.set_sequence(new_seq)
def set_gap_char_at_offset(self, offset, gap_char):
"""
Set the gap character at the given offset.
If the residue at a given position is a gap, then override
the gap char with the given character.
"""
residues = list(self.seq)
if Sequence.is_gap(residues[offset]) and residues[offset] != gap_char:
residues[offset] = gap_char
self.set_sequence("".join(residues))
def lower_case_at_offset(self, start, stop=None):
"""Lower case the residues in the given sequence window."""
if stop is None:
stop = start + 1
old_seq = self.seq
new_seq = old_seq[:start] + old_seq[start:stop].lower() + old_seq[stop:]
self.set_sequence(new_seq)
def set_all_gap_chars(self, gap_char='-'):
"""Sets all gap characters."""
seqstr = re.sub(self.re_gap_chars, gap_char, self.seq)
self.set_sequence(seqstr)
def set_lower_case_to_gap(self, gap_char='-'):
"""Set all lower-case characters to gap."""
seqstr = re.sub(r'[a-z]', gap_char, self.seq)
self.set_sequence(seqstr)
def slice_seq(self, start, stop=None):
"""Return a slice of this sequence."""
return self.seq[start:stop]
@staticmethod
def _chunker(text_str, width):
return (text_str[pos:pos + width] for pos in range(0, len(text_str), width))
@staticmethod
def is_gap(res_char):
"""Test whether a character is considered a gap."""
return res_char in ['-', '.']
@property
def accession_and_seginfo(self):
"""Returns accession and segment info for this Sequence."""
segs_str = self.seginfo
if segs_str:
return self.accession + '/' + segs_str
else:
return self.accession
@property
def seginfo(self):
"""Returns the segment info for this Sequence."""
segs_str = '_'.join(['-'.join([str(s.start), str(s.stop)]) for s in self.segs])
return segs_str
def apply_segments(self, segs):
"""
Returns a subset of the current sequence, chopped by the segments.
Args:
segs ([]): [Segment] or [[START, STOP], ...]
Returns:
seq (:class:`Sequence`): sequence object
"""
if self.segs:
raise Exception("cannot apply segments as Sequence already has segments defined")
seq = self.seq
acc = self.accession
startstops = [(seg[0], seg[1]) for seg in segs]
seq_range = '_'.join(['{}-{}'.format(ss[0],ss[1]) for ss in startstops])
seq_parts = [seq[ss[0]-1:ss[1]] for ss in startstops]
subseq = Sequence(hdr="{}/{}".format(acc, seq_range), seq="".join(seq_parts))
return subseq
def __str__(self):
"""Represents this Sequence as a string."""
return '{:<30} {}'.format(self.uid, self.seq)
def __len__(self):
return len(self.seq)
class Correspondence(object):
"""
Provides a mapping between ATOM and SEQRES residues.
A correspondence is a type of alignment that provides the equivalences
between the residues in the protein sequence (eg ``SEQRES`` records) and
the residues actually observed in the structure (eg ``ATOM`` records).
Within CATH, this is most commonly initialised from a GCF file:
::
aln = Correspondence.from_gcf('/path/to/<uid>.gcf')
TODO: allow this to be created from PDBe API endpoint.
"""
GCF_GAP_CHAR = '*'
FASTA_GAP_CHAR = '-'
def __init__(self, uid=None, *, hdr=None, residues=None,):
"""Create a new Correspondence object."""
self._uid = uid
self._hdr = hdr
self.residues = residues if residues else []
super().__init__()
@property
def uid(self):
"""Returns the unique id of the current Correspondence object."""
return self._uid
@classmethod
def from_gcf(cls, gcf_io):
"""Create a new Correspondence object from a GCF io / filename / string.
This provides a correspondence between SEQRES and ATOM records for a given
protein structure.
Example format:
::
>gi|void|ref1
A 1 5 A
K 2 6 K
G 3 7 G
H 4 8 H
P 5 9 P
G 6 10 G
P 7 10A P
K 8 10B K
A 9 11 A
P 10 * *
G 11 * *
...
"""
if isinstance(gcf_io, str):
if gcf_io[0] == '>':
gcf_io = io.StringIO(gcf_io)
else:
gcf_io = open(gcf_io)
try:
hdr = gcf_io.readline().strip()
hdr = hdr[1:] # remove '>'
uid = hdr.split('|')[-1]
except AttributeError:
# make a potentially confusing error slightly less so
raise err.SeqIOError(
"encountered an error trying to readline() on GCF io ({})".format(gcf_io))
line_no = 1
residues = []
for line in gcf_io:
line_no += 1
try:
seqres_aa, seqres_num, pdb_label, pdb_aa = line.split()
if pdb_aa is not seqres_aa and pdb_aa is not Correspondence.GCF_GAP_CHAR:
LOG.warning("pdb_aa '%s' does not match seqres_aa '%s' (line: %s)",
pdb_aa, seqres_aa, line_no)
except:
raise err.SeqIOError("Error: failed to parse GCF '{}' ({}:{})".format(
line, str(gcf_io), line_no))
if pdb_label is Correspondence.GCF_GAP_CHAR:
pdb_label = None
pdb_aa = None
res = Residue(seqres_aa, int(seqres_num), pdb_label, pdb_aa=pdb_aa)
residues.extend([res])
gcf_io.close()
corr = Correspondence(uid=uid, hdr=hdr, residues=residues)
return corr
@property
def seqres_length(self) -> int:
"""Returns the number of `SEQRES` residues"""
return len(self.residues)
@property
def atom_length(self) -> int:
"""Returns the number of `ATOM` residues"""
atom_residues = [res for res in self.residues if res.pdb_label is not None]
return len(atom_residues)
def get_res_at_offset(self, offset: int) -> Residue:
"""Returns the :class:`Residue` at the given offset (zero-based)"""
return self.residues[offset]
def get_res_by_seq_num(self, seq_num: int) -> Residue:
"""Return the :class:`Residue` with the given sequence number"""
res = next((res for res in self.residues if res.seq_num == seq_num), None)
return res
def get_res_by_pdb_label(self, pdb_label: str) -> Residue:
"""Returns the :class:`Residue` that matches `pdb_label`"""
res = next((res for res in self.residues if res.pdb_label == pdb_label), None)
return res
def get_res_by_atom_pos(self, pos: int) -> Residue:
"""Returns Residue corresponding to position in the ATOM sequence (ignores gaps)."""
assert isinstance(pos, int)
assert pos >= 1
atom_residues = [res for res in self.residues if res.pdb_label is not None]
res = atom_residues[pos-1]
return res
def get_res_offset_by_atom_pos(self, pos: int) -> Residue:
"""
Returns offset of Residue at given position in the ATOM sequence (ignoring gaps).
Raises:
:class:`cathpy.error.OutOfBoundsError`
"""
assert isinstance(pos, int)
assert pos >= 1
atom_pos = 0
for offset, res in enumerate(self.residues):
if res.pdb_label is not None:
atom_pos += 1
# LOG.debug("pos({}) -> res: offset: {}, res: {}, atom_pos: {}".format(
# pos, offset, repr(res), atom_pos))
if atom_pos == pos:
return offset
atom_residues = [res for res in self.residues if res.pdb_label is not None]
raise err.OutOfBoundsError(
"failed to find residue in atom pos {}, last atom residue is {} (position {})".format(
pos, repr(atom_residues[-1]), atom_pos))
@property
def first_residue(self) -> Residue:
"""Returns the first residue in the correspondence."""
return self.get_res_at_offset(0)
@property
def last_residue(self) -> Residue:
"""Returns the last residue in the correspondence."""
return self.get_res_at_offset(-1)
@property
def atom_sequence(self) -> Sequence:
"""Returns a Sequence corresponding to the ATOM records."""
_id = "atom|{}".format(self.uid)
res = [res.pdb_aa if res.pdb_label else Correspondence.FASTA_GAP_CHAR
for res in self.residues]
return Sequence(_id, "".join(res))
@property
def seqres_sequence(self) -> Sequence:
"""Returns a Sequence corresponding to the SEQRES records."""
_id = "seqres|{}".format(self.uid)
res = [res.aa for res in self.residues]
return Sequence(_id, "".join(res))
def apply_seqres_segments(self, segs):
"""Returns a new correspondence from just the residues within the segments."""
current_seg_offset = 0
def next_seg():
nonlocal current_seg_offset
# LOG.debug("apply_seqres_segments.next_seg: current={} segs={}".format(
# current_seg_offset, repr(segs) ))
if current_seg_offset < len(segs):
seg = segs[current_seg_offset]
current_seg_offset += 1
return seg
current_seg = next_seg()
selected_residues = []
for res in self.residues:
# LOG.debug('apply_seqres.res: [{}] {}-{} seq_num={}'.format(
# current_seg_offset, current_seg.start, current_seg.stop,
# res.seq_num))
if res.seq_num >= current_seg.start and res.seq_num <= current_seg.stop:
selected_residues.append(res)
elif res.seq_num < current_seg.start:
pass
elif res.seq_num > current_seg.stop:
current_seg = next_seg()
if not current_seg:
break
else:
raise err.SeqIOError("unexpected error - shouldn't be able to reach this code")
corr = __class__(uid=self.uid, hdr=self._hdr, residues=selected_residues)
return corr
def to_gcf(self) -> str:
"""Renders the current object as a GCF string.
Example format:
::
>gi|void|ref1
A 1 5 A
K 2 6 K
G 3 7 G
H 4 8 H
P 5 9 P
G 6 10 G
P 7 10A P
K 8 10B K
A 9 11 A
P 10 * *
G 11 * *
...
"""
hdr = self._hdr if self._hdr else self.uid
gcf_str = '>' + hdr + '\n'
for res in self.residues:
if res.pdb_label:
pdb_label = '{}{}'.format(res.pdb_residue_num, res.pdb_insert_code if res.pdb_insert_code else ' ')
vals = [res.aa, res.seq_num, pdb_label, res.pdb_aa]
else:
vals = [res.aa, res.seq_num, '* ', '*']
gcf_str += '{} {:>3} {:>4} {}\n'.format(*vals)
return gcf_str
def to_sequences(self) -> [Sequence]:
"""Returns the Correspondence as a list of `Sequence` objects"""
seqs = (self.seqres_sequence, self.atom_sequence)
return seqs
def to_fasta(self, **kwargs) -> str:
"""Returns the Correspondence as a string (FASTA format)."""
seqs = self.to_sequences()
return seqs[0].to_fasta(**kwargs) + seqs[1].to_fasta(**kwargs)
def to_aln(self):
"""Returns the Correspondence as an Align object."""
seqs = self.to_sequences()
return Align(seqs=seqs)
def __str__(self):
return self.to_fasta()
def __repr__(self):
return self.to_fasta()
class AlignMetaSummary(object):
def __init__(self, *, seq_count, ec_term_counts=None, go_term_counts=None,
cath_domain_count=0, dops_score=None, organism_newick=None):
self.seq_count = seq_count
self.ec_term_counts = ec_term_counts
self.go_term_counts = go_term_counts
self.cath_domain_count = cath_domain_count
self.dops_score = dops_score
self.organism_newick = organism_newick
class Align(object):
"""
Object representing a protein sequence alignment.
The only required field is `sequences`, otherwise all fields are optional
and are mainly here to satisfy the named fields in `STOCKHOLM` alignment
format.
Args:
seqs ([:class:`Sequence`]): aligned sequences (required)
uid (str): unique identifier for this alignment
accession (str): accession for this alignment
author (str): person responsible for creating this alignment
cath_version (str | :class:`CathVersion`): CATH version
dops_score (float): sequence diversity score (0 low, 100 high)
description (str): description to associate with this alignment
aln_type (str): type of alignment (eg cluster type)
min_bitscore (float): minimum bitscore for sequences in this alignment
tree_nhx (str): store the tree (NHX format)
tree_id (str): identifier of the tree
"""
REF_GAP_CHAR = '-'
MERGE_GAP_CHAR = '.'
STO_META_TO_ATTR = [
# required
('ID', '_uid'),
('AC', 'accession'),
('DE', 'description'),
('AU', 'author'),
('SE', 'meta.source_seed'),
('SS', 'meta.source_structure'),
('BM', 'meta.build_method'),
('SM', 'meta.search_method'),
('GA', 'meta.gathering_threshold'),
('TC', 'meta.trusted_cutoff'),
('NC', 'meta.noise_cutoff'),
('AC', 'accession'),
('TP', 'aln_type'),
('TC', 'min_bitscore'),
('SQ', None),
# optional
('DC', 'meta.db_comment'),
('DR', {
'CATH': 'cath_version',
'DOPS': 'dops_score',
'INTERPRO': 'interpro',
}),
('RC', 'meta.ref_comment'),
('RN', 'meta.ref_number'),
('RM', 'meta.ref_medline'),
('RT', 'meta.ref_title'),
('RA', 'meta.ref_author'),
('RL', 'meta.ref_location'),
('PI', 'meta.prev_id'),
('KW', 'meta.keywords'),
('CC', 'meta.comment'),
('NE', 'meta.pfam_accession'),
('NL', 'meta.seq_location'),
('WK', 'meta.wikipedia_link'),
('CL', 'meta.pfam_clan'),
('MB', 'meta.pfam_clan_membership'),
# trees
('NH', 'tree_nhx'),
('TN', 'tree_id'),
]
def __init__(self, seqs=None, *, uid=None, accession=None, author=None,
cath_version=None, dops_score=None, description=None,
aln_type=None, min_bitscore=None, tree_nhx=None, tree_id=None):
self.meta = {} # per file meta data
self.seq_meta = {} # consensus sequence-based meta data
self.__seq_ids = set()
self._uid = uid
self.accession = accession
self.author = author
self.description = description
self.cath_version = cath_version
self.dops_score = dops_score
self.accession = accession
self.aln_type = aln_type
self.min_bitscore = min_bitscore
self.tree_nhx = tree_nhx
self.tree_id = tree_id
self.seqs = seqs if seqs else []
self.__aln_positions = 0
self._merge_counter = 0
@property
def uid(self):
"""Returns the id of this Align object."""
return self._uid
def set_uid(self, uid):
"""Sets the id of this Align object."""
self._uid = uid
def _next_merge_id(self):
self._merge_counter += 1
return self._merge_counter
@property
def sequences(self):
"""Provides access to the Sequence objects in the alignment."""
return self.seqs
@property
def aln_positions(self):
"""Returns the number of alignment positions."""
return self.__aln_positions
@aln_positions.setter
def aln_positions(self, value):
self.__aln_positions = value
@property
def count_sequences(self):
"""Returns the number of sequences in the alignment."""
return len(self.seqs)
@property
def total_gap_positions(self):
"""Returns the total number of gaps in the alignment."""
total_gaps = 0
for s in self.seqs:
total_gaps += s.seq.count(self.REF_GAP_CHAR)
total_gaps += s.seq.count(self.MERGE_GAP_CHAR)
return total_gaps
@property
def total_positions(self):
"""Returns the total number of positions in the alignment."""
return self.count_sequences * self.aln_positions
def find_first_seq_by_accession(self, acc):
"""Returns the first Sequence with the given accession."""
seqs_with_acc = [seq for seq in self.seqs if seq.accession == acc]
return seqs_with_acc[0]
def find_seq_by_id(self, _id):
"""Returns the Sequence corresponding to the provided id."""
seqs_with_id = [seq for seq in self.seqs if seq.uid == _id]
if len(seqs_with_id) > 1:
raise err.SeqIOError("Found more than one ({}) sequence matching id '{}'".format(
len(seqs_with_id), _id))
if not seqs_with_id: # ie empty list
raise err.NoMatchesError('failed to find sequence with id {} in alignment'.format(_id))
return seqs_with_id[0]
def find_seq_by_accession(self, acc):
"""Returns the Sequence corresponding to the provided id."""
seqs_with_acc = [seq for seq in self.seqs if seq.accession == acc]
if len(seqs_with_acc) > 1:
raise err.TooManyMatchesError(
"Found more than one ({}) sequence matching accession '{}'".format(
len(seqs_with_acc), acc),)
if len(seqs_with_acc) == 0:
raise err.NoMatchesError(
'failed to find sequence with accession {} in alignment'.format(acc))
return seqs_with_acc[0]
def get_seq_at_offset(self, offset):
"""Returns the Sequence at the given offset (zero-based)."""
return self.seqs[offset]
@classmethod
def from_fasta(cls, fasta_io):
"""Initialises an alignment object from a FASTA file / string / io"""
aln = Align()
aln.read_sequences_from_fasta(fasta_io)
return aln
@classmethod
def from_pir(cls, pir_io):
"""Initialises an alignment object from a PIR file / string / io"""
aln = Align()
aln.read_sequences_from_pir(pir_io)
return aln
@staticmethod
def _get_io_from_file_or_string(file_or_string):
filename = str(file_or_string)
if isinstance(file_or_string, str):
filename = '<string>'
if file_or_string[0] in ('>', '#'): # fasta or stockholm
_io = io.StringIO(file_or_string)
elif file_or_string.endswith('.gz'):
_io = gzip.open(file_or_string, 'rt')
else:
_io = open(file_or_string, 'rt')
elif isinstance(file_or_string, io.IOBase):
_io = file_or_string
else:
_io = file_or_string
LOG.warning("unexpected io type: %s", repr(file_or_string))
return _io, filename
@classmethod
def from_stockholm(cls, sto_io, *, nowarnings=False):
"""Initialises an alignment object from a STOCKHOLM file / string / io"""
sto_io, sto_filename = cls._get_io_from_file_or_string(sto_io)
aln = cls()
sto_header = sto_io.readline()
assert sto_header.startswith('# STOCKHOLM 1.0')
aln_meta = {}
aln_seq_meta = {}
seq_meta_by_id = {}
seq_aa_by_id = {}
aln_meta_unrecognised_features = {}
gc_meta_to_attr = {meta: attr for (meta, attr) in cls.STO_META_TO_ATTR}
line_count = 0
for line in sto_io:
line_count += 1
line = line.strip()
if line.startswith('#=GF'):
try:
_, feature, per_file_ann = line.split(None, 2)
except ValueError:
if not nowarnings:
LOG.warning('ignoring GF record with incorrect columns (%s:%s "%s")',
sto_filename, line_count, line)
except:
raise err.ParseError('failed to parse line {} "{}"'.format(
line_count, line))
if feature not in gc_meta_to_attr:
raise err.ParseError(
'encountered unexpected GF tag {} in line {} "{}" (known tags: {})'.format(
feature, line_count, line, repr(gc_meta_to_attr)))
attr = gc_meta_to_attr[feature]
if type(attr) is dict:
key, val = re.compile(r'[;:]\s+').split(per_file_ann, maxsplit=1)
per_file_ann = val
if key in attr:
attr = attr[key]
else:
LOG.warning('encountered unexpected GF tag %s->%s in line %s "%s" (known tags: %s)',
feature, key, line_count, line, repr(attr))
if feature not in aln_meta_unrecognised_features:
aln_meta_unrecognised_features[feature] = []
aln_meta_unrecognised_features[feature].extend([per_file_ann])
attr = None
if attr:
if attr.startswith('meta.'):
attr = attr[len('meta.'):]
aln_meta[attr] = per_file_ann
else:
LOG.debug('setting aln attr "%s" to "%s"', attr, per_file_ann)
setattr(aln, attr, per_file_ann)
elif line.startswith('#=GC'):
try:
_, feature, per_col_ann = line.split(None, 2)
aln_seq_meta[feature] = per_col_ann
except ValueError:
if not nowarnings:
LOG.warning('ignoring GC record with incorrect columns (%s:%s "%s")',
sto_filename, line_count, line)
except:
raise err.ParseError('failed to parse line {} "{}"'.format(
line_count, line))
elif line.startswith('#=GS'):
try:
_, seq_id, feature, per_seq_ann = line.split(None, 3)
if feature == 'DR':
dr_type, per_seq_ann = per_seq_ann.split(None, 1)
dr_type = dr_type.rstrip(';')
feature = feature + '_' + dr_type
if seq_id not in seq_meta_by_id:
seq_meta_by_id[seq_id] = {}
seq_meta_by_id[seq_id][feature] = per_seq_ann
except ValueError:
if not nowarnings:
LOG.warning('ignoring GS record with incorrect columns (%s:%s "%s")',
sto_filename, line_count, line)
except:
raise err.ParseError('failed to parse line {} "{}"'.format(
line_count, line))
elif line.startswith('#=GR'):
_, seq_id, feature, per_res_ann = line.split(None, 3)
seq_meta_by_id[seq_id][feature] = per_res_ann
elif line.startswith('//'):
pass
else:
seq_id, seq_aa = line.split()
if seq_id not in seq_aa_by_id:
seq_aa_by_id[seq_id] = ''
seq_aa_by_id[seq_id] += seq_aa
for seq_id, seq_aa in seq_aa_by_id.items():
seq_meta = seq_meta_by_id[seq_id] if seq_id in seq_meta_by_id else {}
seq = Sequence(seq_id, seq_aa, meta=seq_meta)
aln.add_sequence(seq)
for key, val in aln_meta.items():
aln.meta[key] = val
for key, val in aln_seq_meta.items():
aln.seq_meta[key] = val
sto_io.close()
return aln
def read_sequences_from_fasta(self, fasta_io):
"""Parses aligned sequences from FASTA (str, file, io) and adds them to the current
Align object. Returns the number of sequences that are added."""
fasta_io, fasta_filename = __class__._get_io_from_file_or_string(fasta_io)
re_seqstr = re.compile(r'^[a-zA-Z.\-]+$')
seq_added = 0
current_hdr = None
current_seq = ''
line_count = 0
for line in fasta_io:
line_count += 1
line = line.rstrip()
if line == "":
break
if line[0] == '>':
if current_seq:
seq = Sequence(current_hdr, current_seq)
self.add_sequence(seq)
current_seq = ''
seq_added += 1
current_hdr = line[1:]
else:
if not re_seqstr.match(line):
raise err.SeqIOError(
('encountered an error parsing FASTA: '
'string "{}" does not look like a sequence ({}:{})').format(
line, fasta_filename, line_count))
if not current_hdr:
raise err.SeqIOError(
('encountered an error parsing FASTA: '
'found sequence "{}" without a header ({}:{})').format(
line, fasta_filename, line_count))
current_seq += str(line)
fasta_io.close()
if current_seq:
seq = Sequence(current_hdr, current_seq)
self.add_sequence(seq)
seq_added += 1
return seq_added
def read_sequences_from_pir(self, pir_io):
"""Parse aligned sequences from PIR (str, file, io) and adds them to the current
Align object. Returns the number of sequences that are added."""
pir_io, pir_filename = __class__._get_io_from_file_or_string(pir_io)
re_seqstr = re.compile(r'^[a-zA-Z.\-]+\*?$')
seq_added = 0
current_hdr = None
current_desc = None
current_seq = ''
line_count = 0
for line in pir_io:
line_count += 1
line = line.rstrip()
if line == "":
continue
if line[0] == '>':
# following line is description as free text
if current_seq:
current_seq = current_seq.replace("*", "")
seq = Sequence(current_hdr, current_seq, description=current_desc)
self.add_sequence(seq)
current_seq = ''
seq_added += 1
seq_type, current_hdr = line[1:].split(';')
line = next(pir_io).rstrip()
current_desc = line
else:
if not re_seqstr.match(line):
raise err.SeqIOError(
('encountered an error parsing PIR: '
'string "{}" does not look like a sequence ({}:{})').format(
line, pir_filename, line_count))
if not current_hdr:
raise err.SeqIOError(
('encountered an error parsing PIR: '
'found sequence "{}" without a header ({}:{})').format(
line, pir_filename, line_count))
current_seq += str(line)
pir_io.close()
if current_seq:
current_seq = current_seq.replace("*", "")
seq = Sequence(current_hdr, current_seq, description=current_desc)
self.add_sequence(seq)
seq_added += 1
return seq_added
def _reindex_seq_ids(self):
self.__seq_ids = set()
for seq in self.seqs:
self.__seq_ids.add(seq.uid)
def add_sequence(self, seq:Sequence, *, offset:int=None):
"""
Add a sequence to this alignment.
Args:
offset (int): the index in the list where the sequence should be added (default: append)
"""
if not offset:
offset = len(self.sequences)
if seq.uid in self.__seq_ids:
raise err.SeqIOError((
"Error: cannot add a sequence with id {}, "
"since this alignment already has a sequence with that id. [{}]").format(
seq.uid, ",".join(self.__seq_ids)))
if self.aln_positions:
if self.aln_positions != seq.length():
raise err.SeqIOError((
"Error: cannot add a sequence (id:{}) "
"with {} positions to an alignment with {} positions.").format(
seq.uid, seq.length(), self.aln_positions))
else:
self.__aln_positions = seq.length()
self.seqs.insert(offset, seq)
self.__seq_ids.add(seq.uid)
return seq
def subset(self, ids, *, collapse_gaps=True):
"""
Returns a subset of the alignment containing just the sequence ids
"""
seqs = [self.find_seq_by_id(i) for i in ids]
new_align = Align(seqs=seqs)
if collapse_gaps:
new_align = new_align.remove_alignment_gaps()
return new_align
def remove_sequence_by_id(self, seq_id: str):
"""Removes a sequence from the alignment."""
for idx, seq in enumerate(self.seqs):
if seq.uid == seq_id:
LOG.info("Removing sequence with '{}' from alignment".format(seq_id))
del self.seqs[idx]
return seq
raise err.NoMatchesError('failed to find sequence with id {}'.format(seq_id))
def remove_alignment_gaps(self):
"""Return a new alignment after removing alignment positions
that contain a gap for all sequences."""
seqs = self.seqs
seq_length = seqs[0].length()
new_seq_strings = ["" for s in range(len(seqs))]
for aln_offset in range(seq_length):
total_gaps = 0
for seq in seqs:
if seq.seq[aln_offset] == '-' or seq.seq[aln_offset] == '.':
total_gaps += 1
if total_gaps < len(seqs):
for seq_pos in range(len(seqs)):
res = seqs[seq_pos].seq[aln_offset]
# print( "seq[{}:{}] pos:{} res:{}".format(
# aln_offset, seqs[seq_pos].uid, seq_pos, res) )
new_seq_strings[seq_pos] += res
else:
LOG.debug("Removing complete gap from alignment offset: %s", aln_offset)
new_aln = Align()
for seq_pos in range(len(new_seq_strings)):
hdr = seqs[seq_pos]._hdr
seq_str = new_seq_strings[seq_pos]
seq = Sequence(hdr, seq_str)
new_aln.add_sequence(seq)
return new_aln
def insert_gap_at_offset(self, offset, gap_char='-'):
"""Insert a gap char at the given offset (zero-based)."""
self.__aln_positions += 1
for s in self.seqs:
s.insert_gap_at_offset(offset, gap_char)
def set_gap_char_at_offset(self, offset, gap_char):
"""Override the gap char for all sequences at a given offset."""
for s in self.seqs:
s.set_gap_char_at_offset(offset, gap_char)
def lower_case_at_offset(self, start, stop=None):
"""Lower case all the residues in the given alignment window."""
for s in self.seqs:
s.lower_case_at_offset(start, stop)
def slice_seqs(self, start, stop=None):
"""Return an array of Sequence objects from start to end."""
return [Sequence(s._hdr, s.slice_seq(start, stop)) for s in self.seqs]
def merge_alignment(self, merge_aln, ref_seq_acc: str,
ref_correspondence: Correspondence = None,
*, cluster_label=None, merge_ref_id=False, self_ref_id=False):
"""
Merges aligned sequences into the current object via a reference sequence.
Sequences in ``merge_aln`` are brought into the current alignment using
the equivalences identified in reference sequence ``ref_seq_acc`` (which
must exist in both the ``self`` and ``merge_aln``).
This function was originally written to merge FunFam alignments
according to structural equivalences identified by CORA (a multiple
structural alignment tool). Moving between structure and sequence
provides the added complication that
sequences in the structural alignment (CORA) are based on ATOM records,
whereas sequences in the merge alignment (FunFams) are based on SEQRES
records. The ``ref_correspondence`` argument allows this mapping to be
taken into account.
Args:
merge_aln (Align): An Align containing the reference
sequence and any additional sequences to merge.
ref_seq_acc (str): The accession that will be used to find the
reference sequence in the current alignment and merge_aln
ref_correspondence (Correspondence): An optional Correspondence
object that provides a mapping between the reference
sequence found in ``self`` (ATOM records) and reference
sequence as it appears in ``merge_aln`` (SEQRES records).
cluster_label (str): Provide a label to differentiate the sequences
being merged (eg for groupsim calculations). A default label
is provided if this is ``None``.
self_ref_id (str): Specify the id to use when adding the ref sequence
from the current alignment.
merge_ref_id (str): Specify the id to use when adding the ref sequence
from the merge alignment. By default this sequence is only inluded
in the final alignment (as ``<id>_merge``) if a custom
correspondence is provided.
Returns:
[Sequence]: Array of Sequences added to the current alignment.
Raises:
MergeCorrespondenceError: problem mapping reference
sequence between alignment and correspondence
"""
merge_aln = merge_aln.copy()
if not cluster_label:
cluster_label = self._next_merge_id()
for seq in merge_aln.seqs:
seq.set_cluster_id(cluster_label)
ref_seq_in_ref = self.find_seq_by_accession(ref_seq_acc)
ref_seq_in_ref.set_cluster_id(cluster_label)
ref_seq_in_merge = merge_aln.find_seq_by_accession(ref_seq_acc)
if self_ref_id:
ref_seq_in_ref.set_uid(self_ref_id)
# if the merge_ref_id has been specified, or there is not a 1:1 correspondence
# between reference sequence in the alignments, then the merged ref sequence
# will be included in the final alignment. Otherwise it will be removed.
if merge_ref_id:
ref_seq_in_merge.set_uid(merge_ref_id)
else:
ref_seq_in_merge.accession += '_merge'
ref_id = ref_seq_in_merge.accession_and_seginfo
ref_seq_in_merge.set_uid(ref_id)
del ref_id
if ref_seq_in_ref.uid is ref_seq_in_merge.uid:
raise err.DuplicateSequenceError((
'sequence in ref alignment [{}] cannot have the same id as '
'sequence in merge alignment [{}] (consider specifying self_ref_id'
'or merge_ref_id)').format(ref_seq_in_ref.uid, ref_seq_in_merge.uid))
self._reindex_seq_ids()
if ref_correspondence or merge_ref_id:
merge_id_to_remove = None
else:
merge_id_to_remove = ref_seq_in_merge.uid
if ref_correspondence is None:
# fake a 1:1 correspondence for internal use
# ignore any residue that does not have a seq_num (ie gap)
residues = [res for res in ref_seq_in_ref.get_residues() if res.seq_num]
for r in residues:
r.set_pdb_label(str(r.seq_num))
# LOG.debug("fake correspondence: residue={}".format(repr(r)))
ref_correspondence = Correspondence(ref_seq_acc, residues=residues)
# check: ref sequence (in self) must match the ATOM sequence in Correspondence
ref_no_gaps = ref_seq_in_ref.seq_no_gaps
corr_no_gaps = ref_correspondence.atom_sequence.seq_no_gaps
if ref_no_gaps != corr_no_gaps:
raise err.MergeCorrespondenceError(
seq_id=ref_seq_acc, aln_type='current', seq_type='ATOM',
ref_no_gaps=ref_no_gaps, corr_no_gaps=corr_no_gaps)
# check: ref sequence (in merge) must match the SEQRES sequence in Correspondence
ref_no_gaps = ref_seq_in_merge.seq_no_gaps
corr_no_gaps = ref_correspondence.seqres_sequence.seq_no_gaps
if ref_no_gaps != corr_no_gaps:
raise err.MergeCorrespondenceError(
seq_id=ref_seq_acc, aln_type='merge', seq_type='SEQRES',
ref_no_gaps=ref_no_gaps, corr_no_gaps=corr_no_gaps)
# clean up
del ref_no_gaps
del corr_no_gaps
ref_aln_pos = 0
ref_corr_pos = 0
merge_aln_pos = 0
correspondence_length = ref_correspondence.seqres_length
LOG.debug("ref_alignment.positions: {}".format(self.aln_positions))
LOG.debug("merge_alignment.positions: {}".format(merge_aln.aln_positions))
LOG.debug("ref_seq_in_ref: {}".format(str(ref_seq_in_ref)))
LOG.debug("ref_seq_in_merge: {}".format(str(ref_seq_in_merge)))
while True:
if merge_aln_pos >= merge_aln.aln_positions \
and ref_aln_pos >= self.aln_positions \
and ref_corr_pos >= correspondence_length:
break
LOG.debug("REF %s/%s; CORRESPONDENCE %s/%s; MERGE %s/%s",
ref_aln_pos, self.aln_positions, ref_corr_pos,
correspondence_length, merge_aln_pos, merge_aln.aln_positions)
# sort the gaps in the reference alignment
if ref_aln_pos < self.aln_positions:
for seq in self.slice_seqs(0, ref_aln_pos):
LOG.debug( "{:<10} {}".format("REF", str(seq)) )
ref_res_in_ref = ref_seq_in_ref.get_res_at_offset(ref_aln_pos)
LOG.debug("REF_POSITION {:>3} of {:>3} => '{}'".format(
ref_aln_pos, self.aln_positions, ref_res_in_ref))
# insert all the gaps in the reference alignment into the merge sequences
# keep doing this until we don't have any more gaps
if Sequence.is_gap(ref_res_in_ref):
LOG.debug(("GAP '{}' in ref sequence in REF alignment [{}], "
"inserting gap '{}' at position [{}] in all merge sequences").format(
ref_res_in_ref, ref_aln_pos, ref_res_in_ref, merge_aln_pos))
merge_aln.insert_gap_at_offset(merge_aln_pos, gap_char=ref_res_in_ref)
# this is a gap: do NOT increment ref_corr_pos
ref_aln_pos += 1
merge_aln_pos += 1
continue
# sort the gaps in the merge alignment
if merge_aln_pos < merge_aln.aln_positions:
# for seq in merge_aln.slice_seqs(0, merge_aln_pos):
# LOG.debug( "{:<10} {}".format("MERGE", str(seq)) )
ref_res_in_merge = ref_seq_in_merge.get_res_at_offset(merge_aln_pos)
LOG.debug("MERGE_POSITION {:>3} of {:>3} => '{}'".format(
ref_aln_pos, self.aln_positions, ref_res_in_ref))
# insert all the gaps in the merge alignment into the ref sequences
# keep doing this until we don't have any more gaps
if Sequence.is_gap(ref_res_in_merge):
LOG.debug(("GAP '{}' in ref sequence in MERGE alignment [{}], "
"inserting gap '{}' at position [{}] in all ref sequences").format(
ref_res_in_merge, merge_aln_pos, Align.MERGE_GAP_CHAR, merge_aln_pos))
self.insert_gap_at_offset(ref_aln_pos, gap_char=Align.MERGE_GAP_CHAR)
merge_aln.lower_case_at_offset(merge_aln_pos)
merge_aln.set_gap_char_at_offset(merge_aln_pos, '.')
#ref_corr_pos += 1
ref_aln_pos += 1
merge_aln_pos += 1
continue
# if there are gaps in the correspondence then we add gaps to the ref sequence here
if ref_corr_pos < correspondence_length:
for seq in ref_correspondence.to_sequences():
seq = seq.slice_seq(0, ref_corr_pos)
LOG.debug( "{:<10} {}".format("CORR", str(seq)) )
ref_res_in_corr = ref_correspondence.get_res_at_offset(ref_corr_pos)
if ref_res_in_corr.pdb_label is None:
LOG.debug(("GAP '{}' in ATOM records of correspondence [{}], "
"inserting gap '{}' at position [{}] in ref sequences").format(
'*', ref_corr_pos, Align.MERGE_GAP_CHAR, ref_aln_pos))
#merge_aln.insert_gap_at_offset(merge_aln_pos, gap_char=Align.MERGE_GAP_CHAR)
self.insert_gap_at_offset(ref_aln_pos, gap_char=Align.MERGE_GAP_CHAR)
merge_aln.lower_case_at_offset(merge_aln_pos)
merge_aln.set_gap_char_at_offset(merge_aln_pos, '.')
# IMPORTANT: do not increment merge_aln_pos
ref_corr_pos += 1
ref_aln_pos += 1
merge_aln_pos += 1
continue
ref_corr_pos += 1
ref_aln_pos += 1
merge_aln_pos += 1
LOG.info("FINISHED MERGE")
# for seq in ref_correspondence.to_sequences():
# seq = seq.slice_seq(0, ref_corr_pos)
# LOG.debug( "{:<10} {}".format("CORR", str(seq)) )
# for seq in self.seqs:
# LOG.debug( "{:<10} {}".format("REF", str(seq)) )
# for seq in merge_aln.seqs:
# LOG.debug( "{:<10} {}".format("MERGE", str(seq)) )
# add the merged sequences into this alignment
for seq in merge_aln.seqs:
self.add_sequence(seq)
# for seq in self.seqs:
# LOG.debug( "{:<10} {}".format("MERGED", str(seq)) )
# test the final, merged alignment
# 1. get sequences that correspond to the input aln
# 2. remove alignment positions where there's a gap in the reference sequence
LOG.debug("Checking merge results for %s (%s) ...",
ref_seq_acc, repr(ref_seq_in_merge._hdr))
for original_seq in merge_aln.seqs:
# searching by accession is necessary for CATH domains (since the headers
# in the structure-based alignment do not have segment information),
# however uniprot accessions can appear multiple times so we need to use
# the full id
if original_seq.is_cath_domain:
seq = self.find_seq_by_accession(original_seq.accession)
else:
seq = self.find_seq_by_id(original_seq.uid)
# LOG.debug('Working on sequence: {}'.format(str(original_seq)))
# this provides the residues in the merge alignment with seqres numbering
ref_merge_residues = ref_seq_in_merge.get_residues()
# the lookup lets us go from the seq numbering to the sequence offset
ref_merge_seqnum_to_seqpos = {}
for seq_pos, res in enumerate([res for res in ref_merge_residues if res.seq_num], 1):
ref_merge_seqnum_to_seqpos[res.seq_num] = seq_pos
if not seq:
raise err.SeqIOError("failed to find sequence with id '{}' in merge aln".format(seq.uid))
for aln_offset in range(self.aln_positions):
ref_res = ref_seq_in_ref.get_res_at_offset(aln_offset)
merged_res_at_aln_offset = seq.get_res_at_offset(aln_offset)
if ref_res == self.MERGE_GAP_CHAR:
# everything else should be a '.' or a lowercase residue
assert merged_res_at_aln_offset == '.' or re.match(r'[a-z]', merged_res_at_aln_offset)
elif ref_res == self.REF_GAP_CHAR:
# everything else should be a '-' or an uppercase residue
assert merged_res_at_aln_offset == '-' or re.match(r'[A-Z]', merged_res_at_aln_offset)
else:
# find the sequence offset of this aln position in the ref sequence
ref_seq_pos_in_ref = ref_seq_in_ref.get_seq_position_at_offset(aln_offset)
# use the correspondence to find the equivalent reference residue in the merge alignment
ref_corr_res = ref_correspondence.get_res_by_atom_pos(ref_seq_pos_in_ref)
ref_seq_num_in_merge = ref_corr_res.seq_num
if ref_seq_num_in_merge is None:
raise err.GeneralError(('weird... found a residue without a seq_num in the correspondence record '
' ref_seq_pos_in_ref: {}, res: {}, corr: {}').format(
ref_seq_pos_in_ref, repr(ref_corr_res), repr(ref_correspondence)))
if ref_seq_num_in_merge not in ref_merge_seqnum_to_seqpos:
raise err.OutOfBoundsError(('failed to find seq_num {} ({}) in seqnum/seqpos '
'lookup: {}\ncorrespondence (length: {})').format(
ref_seq_num_in_merge, repr(ref_corr_res), ref_merge_seqnum_to_seqpos,
ref_correspondence.seqres_length, ))
# find out where this seq_num occurs in the merge sequence (account for segment numbering)
ref_seq_pos_in_merge = ref_merge_seqnum_to_seqpos[ref_seq_num_in_merge]
# find the aln offset for the equivalent position in the original merge alignment
ref_merge_offset = ref_seq_in_merge.get_offset_at_seq_position(ref_seq_pos_in_merge)
# LOG.debug("ref_seq_pos (ref): {}, ref_seq_pos (merge): {}, correspondence_res: {}, ref_merge_offset: {}".format(
# ref_seq_pos_in_ref, ref_seq_pos_in_merge, repr(ref_corr_res), ref_merge_offset
# ))
# find the residue at the equivalent position in the merge alignment
original_res = original_seq.get_res_at_offset(ref_merge_offset)
if merged_res_at_aln_offset != original_res:
raise err.MergeCheckError(("Expected the merged residue '{}' to "
"match the original residue '{}' at alignment "
"offset {} (sequence: '{}')\n\n"
"CORR_ATOM: {}\n"
"CORR_SEQRES: {}\n"
"\n\n"
"REF_SEQ_IN_REF: {}\n"
"REF_SEQ_IN_MERGE: {}\n"
"ORIGINAL_SEQ: {}\n"
" {aln_pointer:>{merge_pos}}\n"
"MERGED_SEQ: {}\n"
" {aln_pointer:>{aln_pos}}\n"
"(aln_offset={}, seq_pos(ref)={}, seq_num(merge)={}, seq_pos(merge)={}, ref_merge_offset={})"
).format(
merged_res_at_aln_offset, original_res, aln_offset, seq.uid,
ref_correspondence.atom_sequence,
ref_correspondence.seqres_sequence,
ref_seq_in_ref.seq,
ref_seq_in_merge.seq,
original_seq.seq,
seq.seq,
aln_offset, ref_seq_pos_in_ref, ref_seq_num_in_merge, ref_seq_pos_in_merge, ref_merge_offset,
aln_pointer='^', aln_pos=(aln_offset+1), merge_pos=(ref_merge_offset+1)
))
LOG.info("Finshed checking merge for {} ({})".format(ref_seq_acc, repr(ref_seq_in_merge._hdr)))
# if we have not been given a correspondence then there's no point
# adding the reference sequence from the reference alignment (since
# there is a 1:1 mapping)
if merge_id_to_remove:
LOG.info("Removing reference sequence '%s' from alignment (because 'merge_ref_id' or 'ref_correspondence' is not set)",
merge_id_to_remove)
self.remove_sequence_by_id(merge_id_to_remove)
seqs_by_cluster_id = {}
for seq in self.seqs:
if seq.cluster_id not in seqs_by_cluster_id:
seqs_by_cluster_id[seq.cluster_id] = []
seqs_by_cluster_id[seq.cluster_id].extend([seq])
for cluster_id in seqs_by_cluster_id:
seq_ids = ', '.join([s.uid for s in seqs_by_cluster_id[cluster_id]])
LOG.debug("Cluster %s: %s", cluster_id, seq_ids)
return merge_aln.seqs
def copy(self):
"""Return a deepcopy of this object."""
new_aln = Align()
new_seqs = [s.copy() for s in self.seqs]
new_aln.seqs = new_seqs
new_aln.aln_positions = new_aln.seqs[0].length()
return new_aln
def to_fasta(self, wrap_width=80):
"""Returns the alignment as a string (FASTA format)"""
fasta_str = ''
for seq in self.seqs:
fasta_str += seq.to_fasta(wrap_width=wrap_width)
return fasta_str
def to_pir(self, wrap_width=80):
"""Returns the alignment as a string (PIR format)"""
pir_str = ''
for seq in self.seqs:
pir_str += seq.to_pir(wrap_width=wrap_width)
return pir_str
def write_fasta(self, fasta_file, wrap_width=80):
"""Write the alignment to a file in FASTA format."""
with open(fasta_file, 'w') as f:
for seq in self.seqs:
f.write(seq.to_fasta(wrap_width=wrap_width))
def write_pir(self, pir_file, wrap_width=80, *, use_accession=False):
"""Write the alignment to a file in PIR format."""
with open(pir_file, 'w') as f:
for seq in self.seqs:
f.write(seq.to_pir(wrap_width=wrap_width, use_accession=use_accession))
def add_scorecons(self):
"""Add scorecons annotation to this alignment."""
from cathpy.core.util import ScoreconsRunner
scons = ScoreconsRunner()
LOG.info("Calculating scorecons / DOPS ...")
# output alignment to tmp fasta file
scons_result = scons.run_alignment(self)
self.dops_score = scons_result.dops
self.seq_meta['scorecons'] = scons_result.to_string
def add_groupsim(self):
"""Add groupsim annotation to this alignment."""
from cathpy.core.util import GroupsimRunner
gs = GroupsimRunner()
LOG.info("Calculating GroupSim ...")
# output alignment to tmp fasta file
gs_result = gs.run_alignment(self)
self.seq_meta['groupsim'] = gs_result.to_string
def write_sto(self, sto_file, *, meta=None):
"""Write the alignment to a file in STOCKHOLM format."""
# putting these here to separate the data from the formatting
sto_format = '1.0'
# allow meta keys to be provided in args, otherwise fill with the
# appropriate alignment attributes
aln_meta = {}
if meta:
for key, attr in self.STO_META_TO_ATTR:
aln_meta[key] = meta.get(key, None)
comment_pad = 0
for seq in self.seqs:
comment_pad = max(comment_pad, len(seq.uid) + 1)
seq_pad = comment_pad + 8
gc_pad = seq_pad - 5
# single data point about the file
def _GF(f, key, val):
f.write('#=GF {} {}\n'.format(key, val))
# single data point about each sequence
def _GS(f, seq_id, key, val):
if key.startswith('DR_'):
val = "{}; {}".format(key[3:], val)
key = 'DR'
f.write('#=GS {:<{comment_pad}} {} {}\n'.format(seq_id, key, val, comment_pad=comment_pad))
# positional data about the file
def _GC(f, key, per_pos_str):
f.write('#=GC {:<{gc_pad}} {}\n'.format(key, per_pos_str,
gc_pad=gc_pad))
# positional data about each sequence
def _GR(f, seq_id, key, per_pos_str):
f.write('#=GR {:<{comment_pad}} {} {}\n'.format(seq_id, key, per_pos_str, comment_pad=comment_pad))
def _SEQ(f, seq):
f.write('{:<{seq_pad}} {}\n'.format(seq.uid, seq.seq, seq_pad=seq_pad))
def _START(f):
f.write('# STOCKHOLM {}\n'.format(sto_format))
def _END(f):
f.write('//\n')
with open(sto_file, 'w') as f:
_START(f)
_GF(f, 'ID', aln_meta.get('ID', self.uid))
_GF(f, 'DE', aln_meta.get('DE', self.description))
_GF(f, 'AC', aln_meta.get('AC', self.accession))
_GF(f, 'TP', aln_meta.get('TP', self.aln_type))
if self.cath_version:
_GF(f, 'DR', 'CATH: ' + self.cath_version)
if self.dops_score:
_GF(f, 'DR', 'DOPS: {:.3f}'.format(float(self.dops_score)))
for key, val in sorted(self.meta.items()):
_GF(f, key, val)
for seq in self.seqs:
for key, val in seq.meta.items():
_GS(f, seq.uid, key, val)
if self.min_bitscore:
_GF(f, 'TC', self.min_bitscore)
_GF(f, 'SQ', self.count_sequences)
for seq in self.seqs:
_SEQ(f, seq)
for key, val in sorted(self.seq_meta.items()):
_GC(f, key, val)
_END(f)
def get_meta_summary(self):
"""
Returns summary of information about meta data
This makes some assumptions about the formatting of certain `GS DR` records in
stockholm files.
"""
uniq_go_counts = {}
uniq_ec_counts = {}
cath_domain_count = 0
nodes_by_id = {}
tree = dendropy.Tree()
nodes_by_id['ROOT'] = tree.seed_node
all_taxon_terms = set()
for seq in self.seqs:
go_terms = []
ec_terms = []
org_terms = []
if seq.is_cath_domain:
cath_domain_count += 1
if 'DR_GO' in seq.meta:
go_terms = list(filter(None,
[s.strip() for s in seq.meta['DR_GO'].split(';')]))
if 'DR_EC' in seq.meta:
ec_terms = list(filter(None,
[s.strip() for s in seq.meta['DR_EC'].split(';')]))
if 'DR_ORG' in seq.meta:
org_terms = list(filter(None,
[s.strip() for s in seq.meta['DR_ORG'].split(';')]))
for go_term in go_terms:
if go_term not in uniq_go_counts:
uniq_go_counts[go_term] = 0
uniq_go_counts[go_term] += 1
for ec_term in ec_terms:
if ec_term not in uniq_ec_counts:
uniq_ec_counts[ec_term] = 0
uniq_ec_counts[ec_term] += 1
for org_term in org_terms:
all_taxon_terms.add(org_term)
for idx in range(len(org_terms)-1, 0, -1):
org_term = org_terms[idx]
parent_org_term = org_terms[idx-1] if idx > 1 else 'ROOT'
node_id = '/'.join(org_terms[:idx])
if node_id not in nodes_by_id:
nodes_by_id[node_id] = dendropy.Node(label=org_term)
node = nodes_by_id[node_id]
parent_node_id = '/'.join(org_terms[:idx-1]) if idx > 1 else 'ROOT'
if parent_node_id not in nodes_by_id:
nodes_by_id[parent_node_id] = dendropy.Node(label=parent_org_term)
parent_node = nodes_by_id[parent_node_id]
parent_node.add_child(node)
if not hasattr(node, 'sequence_count'):
setattr(node, 'sequence_count', 0)
if not hasattr(parent_node, 'sequence_count'):
setattr(parent_node, 'sequence_count', 0)
node.sequence_count += 1
taxon_namespace = dendropy.TaxonNamespace(all_taxon_terms)
tree.taxon_namespace = taxon_namespace
for node_id, node in nodes_by_id.items():
taxon_id = node_id.split('/')[-1]
node.taxon = taxon_namespace.get_taxon(taxon_id)
node.label = "{} ({})".format(node.label, node.sequence_count)
tree.seed_node.label = "ROOT ({})".format(self.count_sequences)
# LOG.info("tree:\n{}".format(tree.as_ascii_plot(show_internal_node_labels=True)))
# LOG.info("newick: {}".format(tree.as_string(schema="newick")))
organism_newick = tree.as_string(schema="newick").strip()
uniq_ec_counts = uniq_ec_counts if uniq_ec_counts else None
uniq_go_counts = uniq_go_counts if uniq_go_counts else None
return AlignMetaSummary(
ec_term_counts=uniq_ec_counts,
go_term_counts=uniq_go_counts,
cath_domain_count=cath_domain_count,
seq_count=self.count_sequences,
dops_score=float(self.dops_score),
organism_newick=organism_newick,
)
def __str__(self):
return "\n".join([str(seq) for seq in self.seqs])
| 38.139674
| 134
| 0.561281
|
import io
import gzip
import logging
import re
import functools
import dendropy
from cathpy.core import error as err
from cathpy.core.tests import is_valid_domain_id
from cathpy.core.models import AminoAcid, AminoAcids, Residue, Segment
LOG = logging.getLogger(__name__)
class Sequence(object):
re_gap_chars = r'[.\-]'
has_warned_about_deprecated_sequence_headers = False
def __init__(self, hdr: str, seq: str, *, meta=None, description=None):
self._hdr = hdr
self._seq = seq
try:
hdr_info = Sequence.split_hdr(hdr)
except:
raise err.GeneralError('caught error while parsing sequence header: '+hdr)
self._id = hdr_info['id']
self.accession = hdr_info['accession']
self.description = description
self.id_type = hdr_info['id_type']
self.id_ver = hdr_info['id_ver']
self.segs = hdr_info['segs']
self.meta = hdr_info['meta']
if meta:
for key, val in meta.items():
self.meta[key] = val
@property
def uid(self):
return self._id
def set_uid(self, _id):
self._id = _id
@property
def is_cath_domain(self):
return self.id_type == 'domain'
def get_residues(self):
residues = []
segs = self.segs
if not segs:
segs = [Segment(1, len(self.seq_no_gaps))]
current_seg_offset = 0
def next_seg():
nonlocal current_seg_offset
if current_seg_offset < len(segs):
seg = segs[current_seg_offset]
current_seg_offset += 1
return seg
else:
return None
seg_length = 0
for seg in segs:
seg_length += seg.stop - seg.start + 1
actual_length = len(self.seq_no_gaps)
if seg_length != actual_length:
raise err.OutOfBoundsError(
('segment information {} suggests that the sequence '
'length should be {}, but the sequence has {} (non-gap) characters: {}').format(
repr(segs), seg_length, actual_length, self.seq))
current_seg = next_seg()
seq_num = current_seg.start
for offset, aa in enumerate(self.seq, 0):
if current_seg and seq_num > current_seg.stop:
current_seg = next_seg()
if not current_seg:
if not Sequence.is_gap(aa):
raise err.OutOfBoundsError(
('unable to map segment ({}) to sequence: '
'the final segment ends at {}, but the sequence has {} residues '
'(offset: {}, aa: {})').format(
repr(current_seg), seq_num-1, len(self.seq_no_gaps), offset, aa
))
else:
seq_num = None
else:
seq_num = current_seg.start
if Sequence.is_gap(aa):
res = Residue(aa)
else:
res = Residue(aa, seq_num)
seq_num += 1
residues.append(res)
return residues
def get_res_at_offset(self, offset):
try:
res = self.seq[offset]
except:
raise err.SeqIOError((
"Error: failed to get residue at offset {} from sequence "
"with length {}: '{}'").format(offset, self.length(), self.seq))
return res
def get_res_at_seq_position(self, seq_pos):
seq_nogap = re.sub(Sequence.re_gap_chars, '', self.seq)
try:
res = seq_nogap[seq_pos-1]
except:
raise err.SeqIOError((
"Error: failed to get residue at position {} from sequence with {} "
"non-gap sequence positions: '{}'").format(
seq_pos, len(seq_nogap), self.seq))
return res
def get_seq_position_at_offset(self, offset):
seq_to_offset = self.seq[:offset+1]
if re.match(seq_to_offset[-1], Sequence.re_gap_chars):
raise err.GapError(
"Cannot get sequence position at offset {} since this corresponds to a gap".format(
offset))
seq_nogap = re.sub(Sequence.re_gap_chars, '', seq_to_offset)
return len(seq_nogap)
def get_offset_at_seq_position(self, seq_pos):
current_seq_pos = 0
for offset in range(len(self.seq)):
if not re.match(Sequence.re_gap_chars, self.seq[offset]):
current_seq_pos += 1
if current_seq_pos == seq_pos:
return offset
raise err.OutOfBoundsError("failed to find offset at sequence position {}".format(seq_pos))
def length(self):
return len(self.seq)
@property
def seq(self):
return self._seq
@property
def seq_no_gaps(self):
seq = re.sub(self.re_gap_chars, '', self._seq)
return seq
def set_sequence(self, seq):
self._seq = seq
def set_cluster_id(self, id_str):
self.meta['CLUSTER_ID'] = id_str
@property
def cluster_id(self):
return self.meta['CLUSTER_ID'] if 'CLUSTER_ID' in self.meta else None
@classmethod
def split_hdr(cls, hdr: str) -> dict:
accession = None
id_type = None
id_ver = None
segs = []
meta = {}
if not hdr:
raise err.ParamError('hdr seems to be empty')
hdr_parts = hdr.split(maxsplit=1)
id_with_segs_str = hdr_parts[0]
meta_str = hdr_parts[1] if len(hdr_parts) > 1 else None
id_with_segs_parts = id_with_segs_str.split('/', maxsplit=1)
id_str = id_with_segs_parts[0]
segs_str = id_with_segs_parts[1] if len(id_with_segs_parts) > 1 else None
id_parts = id_str.split('|')
if len(id_parts) == 1:
accession = id_parts[0]
if is_valid_domain_id(accession):
id_type = 'domain'
if len(id_parts) == 2:
id_type, accession = id_parts
if len(id_parts) == 3:
id_type, id_ver, accession = id_parts
if is_valid_domain_id(id_ver):
if not __class__.has_warned_about_deprecated_sequence_headers:
LOG.warning(
("Warning: found an old sequence header with TYPE|ID|VERSION '%s'. "
"Parsing this as TYPE|VERSION|ID for now, but this is a hack, and "
"may be deprecated in future versions (fix structural cluster reps?)"
"(ignoring all future occurrences in this runtime)"), id_parts)
__class__.has_warned_about_deprecated_sequence_headers = True
id_type, accession, id_ver = id_parts
if segs_str:
for seg_str in segs_str.split('_'):
(start, stop) = seg_str.split('-')
seg = Segment(int(start), int(stop))
segs.append(seg)
if meta_str:
meta_parts = meta_str.split()
for f in meta_parts.split('=', maxsplit=1):
if len(f) == 2:
meta[f[0]] = f[1]
else:
LOG.warning("failed to parse meta feature from string %s", meta_str)
return({'accession': accession, 'id': id_with_segs_str, 'id_type': id_type,
'id_ver': id_ver, 'segs': segs, 'meta': meta})
def to_fasta(self, wrap_width=80):
fasta_str = ""
fasta_str += '>' + self.uid + '\n'
if wrap_width:
for line in Sequence._chunker(self.seq, wrap_width):
fasta_str += line + '\n'
else:
fasta_str += self.seq + '\n'
return fasta_str
def to_pir(self, wrap_width=60, use_accession=False):
pir_str = ""
pir_str += '>P1;{}\n'.format(self.uid if not use_accession else self.accession)
desc = self.description or self.accession
pir_str += desc + '\n'
seq = self.seq + '*'
if wrap_width:
for line in Sequence._chunker(seq, wrap_width):
pir_str += line + '\n'
else:
pir_str += seq + '\n'
return pir_str
def copy(self):
s = Sequence(self._hdr, self.seq, meta=self.meta)
return s
def insert_gap_at_offset(self, offset, gap_char="-"):
new_seq = self.seq[:offset] + gap_char + self.seq[offset:]
self.set_sequence(new_seq)
def set_gap_char_at_offset(self, offset, gap_char):
residues = list(self.seq)
if Sequence.is_gap(residues[offset]) and residues[offset] != gap_char:
residues[offset] = gap_char
self.set_sequence("".join(residues))
def lower_case_at_offset(self, start, stop=None):
if stop is None:
stop = start + 1
old_seq = self.seq
new_seq = old_seq[:start] + old_seq[start:stop].lower() + old_seq[stop:]
self.set_sequence(new_seq)
def set_all_gap_chars(self, gap_char='-'):
seqstr = re.sub(self.re_gap_chars, gap_char, self.seq)
self.set_sequence(seqstr)
def set_lower_case_to_gap(self, gap_char='-'):
seqstr = re.sub(r'[a-z]', gap_char, self.seq)
self.set_sequence(seqstr)
def slice_seq(self, start, stop=None):
return self.seq[start:stop]
@staticmethod
def _chunker(text_str, width):
return (text_str[pos:pos + width] for pos in range(0, len(text_str), width))
@staticmethod
def is_gap(res_char):
return res_char in ['-', '.']
@property
def accession_and_seginfo(self):
segs_str = self.seginfo
if segs_str:
return self.accession + '/' + segs_str
else:
return self.accession
@property
def seginfo(self):
segs_str = '_'.join(['-'.join([str(s.start), str(s.stop)]) for s in self.segs])
return segs_str
def apply_segments(self, segs):
if self.segs:
raise Exception("cannot apply segments as Sequence already has segments defined")
seq = self.seq
acc = self.accession
startstops = [(seg[0], seg[1]) for seg in segs]
seq_range = '_'.join(['{}-{}'.format(ss[0],ss[1]) for ss in startstops])
seq_parts = [seq[ss[0]-1:ss[1]] for ss in startstops]
subseq = Sequence(hdr="{}/{}".format(acc, seq_range), seq="".join(seq_parts))
return subseq
def __str__(self):
return '{:<30} {}'.format(self.uid, self.seq)
def __len__(self):
return len(self.seq)
class Correspondence(object):
GCF_GAP_CHAR = '*'
FASTA_GAP_CHAR = '-'
def __init__(self, uid=None, *, hdr=None, residues=None,):
self._uid = uid
self._hdr = hdr
self.residues = residues if residues else []
super().__init__()
@property
def uid(self):
return self._uid
@classmethod
def from_gcf(cls, gcf_io):
if isinstance(gcf_io, str):
if gcf_io[0] == '>':
gcf_io = io.StringIO(gcf_io)
else:
gcf_io = open(gcf_io)
try:
hdr = gcf_io.readline().strip()
hdr = hdr[1:]
uid = hdr.split('|')[-1]
except AttributeError:
raise err.SeqIOError(
"encountered an error trying to readline() on GCF io ({})".format(gcf_io))
line_no = 1
residues = []
for line in gcf_io:
line_no += 1
try:
seqres_aa, seqres_num, pdb_label, pdb_aa = line.split()
if pdb_aa is not seqres_aa and pdb_aa is not Correspondence.GCF_GAP_CHAR:
LOG.warning("pdb_aa '%s' does not match seqres_aa '%s' (line: %s)",
pdb_aa, seqres_aa, line_no)
except:
raise err.SeqIOError("Error: failed to parse GCF '{}' ({}:{})".format(
line, str(gcf_io), line_no))
if pdb_label is Correspondence.GCF_GAP_CHAR:
pdb_label = None
pdb_aa = None
res = Residue(seqres_aa, int(seqres_num), pdb_label, pdb_aa=pdb_aa)
residues.extend([res])
gcf_io.close()
corr = Correspondence(uid=uid, hdr=hdr, residues=residues)
return corr
@property
def seqres_length(self) -> int:
return len(self.residues)
@property
def atom_length(self) -> int:
atom_residues = [res for res in self.residues if res.pdb_label is not None]
return len(atom_residues)
def get_res_at_offset(self, offset: int) -> Residue:
return self.residues[offset]
def get_res_by_seq_num(self, seq_num: int) -> Residue:
res = next((res for res in self.residues if res.seq_num == seq_num), None)
return res
def get_res_by_pdb_label(self, pdb_label: str) -> Residue:
res = next((res for res in self.residues if res.pdb_label == pdb_label), None)
return res
def get_res_by_atom_pos(self, pos: int) -> Residue:
assert isinstance(pos, int)
assert pos >= 1
atom_residues = [res for res in self.residues if res.pdb_label is not None]
res = atom_residues[pos-1]
return res
def get_res_offset_by_atom_pos(self, pos: int) -> Residue:
assert isinstance(pos, int)
assert pos >= 1
atom_pos = 0
for offset, res in enumerate(self.residues):
if res.pdb_label is not None:
atom_pos += 1
if atom_pos == pos:
return offset
atom_residues = [res for res in self.residues if res.pdb_label is not None]
raise err.OutOfBoundsError(
"failed to find residue in atom pos {}, last atom residue is {} (position {})".format(
pos, repr(atom_residues[-1]), atom_pos))
@property
def first_residue(self) -> Residue:
return self.get_res_at_offset(0)
@property
def last_residue(self) -> Residue:
return self.get_res_at_offset(-1)
@property
def atom_sequence(self) -> Sequence:
_id = "atom|{}".format(self.uid)
res = [res.pdb_aa if res.pdb_label else Correspondence.FASTA_GAP_CHAR
for res in self.residues]
return Sequence(_id, "".join(res))
@property
def seqres_sequence(self) -> Sequence:
_id = "seqres|{}".format(self.uid)
res = [res.aa for res in self.residues]
return Sequence(_id, "".join(res))
def apply_seqres_segments(self, segs):
current_seg_offset = 0
def next_seg():
nonlocal current_seg_offset
if current_seg_offset < len(segs):
seg = segs[current_seg_offset]
current_seg_offset += 1
return seg
current_seg = next_seg()
selected_residues = []
for res in self.residues:
if res.seq_num >= current_seg.start and res.seq_num <= current_seg.stop:
selected_residues.append(res)
elif res.seq_num < current_seg.start:
pass
elif res.seq_num > current_seg.stop:
current_seg = next_seg()
if not current_seg:
break
else:
raise err.SeqIOError("unexpected error - shouldn't be able to reach this code")
corr = __class__(uid=self.uid, hdr=self._hdr, residues=selected_residues)
return corr
def to_gcf(self) -> str:
hdr = self._hdr if self._hdr else self.uid
gcf_str = '>' + hdr + '\n'
for res in self.residues:
if res.pdb_label:
pdb_label = '{}{}'.format(res.pdb_residue_num, res.pdb_insert_code if res.pdb_insert_code else ' ')
vals = [res.aa, res.seq_num, pdb_label, res.pdb_aa]
else:
vals = [res.aa, res.seq_num, '* ', '*']
gcf_str += '{} {:>3} {:>4} {}\n'.format(*vals)
return gcf_str
def to_sequences(self) -> [Sequence]:
seqs = (self.seqres_sequence, self.atom_sequence)
return seqs
def to_fasta(self, **kwargs) -> str:
seqs = self.to_sequences()
return seqs[0].to_fasta(**kwargs) + seqs[1].to_fasta(**kwargs)
def to_aln(self):
seqs = self.to_sequences()
return Align(seqs=seqs)
def __str__(self):
return self.to_fasta()
def __repr__(self):
return self.to_fasta()
class AlignMetaSummary(object):
def __init__(self, *, seq_count, ec_term_counts=None, go_term_counts=None,
cath_domain_count=0, dops_score=None, organism_newick=None):
self.seq_count = seq_count
self.ec_term_counts = ec_term_counts
self.go_term_counts = go_term_counts
self.cath_domain_count = cath_domain_count
self.dops_score = dops_score
self.organism_newick = organism_newick
class Align(object):
REF_GAP_CHAR = '-'
MERGE_GAP_CHAR = '.'
STO_META_TO_ATTR = [
# required
('ID', '_uid'),
('AC', 'accession'),
('DE', 'description'),
('AU', 'author'),
('SE', 'meta.source_seed'),
('SS', 'meta.source_structure'),
('BM', 'meta.build_method'),
('SM', 'meta.search_method'),
('GA', 'meta.gathering_threshold'),
('TC', 'meta.trusted_cutoff'),
('NC', 'meta.noise_cutoff'),
('AC', 'accession'),
('TP', 'aln_type'),
('TC', 'min_bitscore'),
('SQ', None),
# optional
('DC', 'meta.db_comment'),
('DR', {
'CATH': 'cath_version',
'DOPS': 'dops_score',
'INTERPRO': 'interpro',
}),
('RC', 'meta.ref_comment'),
('RN', 'meta.ref_number'),
('RM', 'meta.ref_medline'),
('RT', 'meta.ref_title'),
('RA', 'meta.ref_author'),
('RL', 'meta.ref_location'),
('PI', 'meta.prev_id'),
('KW', 'meta.keywords'),
('CC', 'meta.comment'),
('NE', 'meta.pfam_accession'),
('NL', 'meta.seq_location'),
('WK', 'meta.wikipedia_link'),
('CL', 'meta.pfam_clan'),
('MB', 'meta.pfam_clan_membership'),
# trees
('NH', 'tree_nhx'),
('TN', 'tree_id'),
]
def __init__(self, seqs=None, *, uid=None, accession=None, author=None,
cath_version=None, dops_score=None, description=None,
aln_type=None, min_bitscore=None, tree_nhx=None, tree_id=None):
self.meta = {} # per file meta data
self.seq_meta = {} # consensus sequence-based meta data
self.__seq_ids = set()
self._uid = uid
self.accession = accession
self.author = author
self.description = description
self.cath_version = cath_version
self.dops_score = dops_score
self.accession = accession
self.aln_type = aln_type
self.min_bitscore = min_bitscore
self.tree_nhx = tree_nhx
self.tree_id = tree_id
self.seqs = seqs if seqs else []
self.__aln_positions = 0
self._merge_counter = 0
@property
def uid(self):
return self._uid
def set_uid(self, uid):
self._uid = uid
def _next_merge_id(self):
self._merge_counter += 1
return self._merge_counter
@property
def sequences(self):
return self.seqs
@property
def aln_positions(self):
return self.__aln_positions
@aln_positions.setter
def aln_positions(self, value):
self.__aln_positions = value
@property
def count_sequences(self):
return len(self.seqs)
@property
def total_gap_positions(self):
total_gaps = 0
for s in self.seqs:
total_gaps += s.seq.count(self.REF_GAP_CHAR)
total_gaps += s.seq.count(self.MERGE_GAP_CHAR)
return total_gaps
@property
def total_positions(self):
return self.count_sequences * self.aln_positions
def find_first_seq_by_accession(self, acc):
seqs_with_acc = [seq for seq in self.seqs if seq.accession == acc]
return seqs_with_acc[0]
def find_seq_by_id(self, _id):
seqs_with_id = [seq for seq in self.seqs if seq.uid == _id]
if len(seqs_with_id) > 1:
raise err.SeqIOError("Found more than one ({}) sequence matching id '{}'".format(
len(seqs_with_id), _id))
if not seqs_with_id: # ie empty list
raise err.NoMatchesError('failed to find sequence with id {} in alignment'.format(_id))
return seqs_with_id[0]
def find_seq_by_accession(self, acc):
seqs_with_acc = [seq for seq in self.seqs if seq.accession == acc]
if len(seqs_with_acc) > 1:
raise err.TooManyMatchesError(
"Found more than one ({}) sequence matching accession '{}'".format(
len(seqs_with_acc), acc),)
if len(seqs_with_acc) == 0:
raise err.NoMatchesError(
'failed to find sequence with accession {} in alignment'.format(acc))
return seqs_with_acc[0]
def get_seq_at_offset(self, offset):
return self.seqs[offset]
@classmethod
def from_fasta(cls, fasta_io):
aln = Align()
aln.read_sequences_from_fasta(fasta_io)
return aln
@classmethod
def from_pir(cls, pir_io):
aln = Align()
aln.read_sequences_from_pir(pir_io)
return aln
@staticmethod
def _get_io_from_file_or_string(file_or_string):
filename = str(file_or_string)
if isinstance(file_or_string, str):
filename = '<string>'
if file_or_string[0] in ('>', '
_io = io.StringIO(file_or_string)
elif file_or_string.endswith('.gz'):
_io = gzip.open(file_or_string, 'rt')
else:
_io = open(file_or_string, 'rt')
elif isinstance(file_or_string, io.IOBase):
_io = file_or_string
else:
_io = file_or_string
LOG.warning("unexpected io type: %s", repr(file_or_string))
return _io, filename
@classmethod
def from_stockholm(cls, sto_io, *, nowarnings=False):
sto_io, sto_filename = cls._get_io_from_file_or_string(sto_io)
aln = cls()
sto_header = sto_io.readline()
assert sto_header.startswith('
aln_meta = {}
aln_seq_meta = {}
seq_meta_by_id = {}
seq_aa_by_id = {}
aln_meta_unrecognised_features = {}
gc_meta_to_attr = {meta: attr for (meta, attr) in cls.STO_META_TO_ATTR}
line_count = 0
for line in sto_io:
line_count += 1
line = line.strip()
if line.startswith('
try:
_, feature, per_file_ann = line.split(None, 2)
except ValueError:
if not nowarnings:
LOG.warning('ignoring GF record with incorrect columns (%s:%s "%s")',
sto_filename, line_count, line)
except:
raise err.ParseError('failed to parse line {} "{}"'.format(
line_count, line))
if feature not in gc_meta_to_attr:
raise err.ParseError(
'encountered unexpected GF tag {} in line {} "{}" (known tags: {})'.format(
feature, line_count, line, repr(gc_meta_to_attr)))
attr = gc_meta_to_attr[feature]
if type(attr) is dict:
key, val = re.compile(r'[;:]\s+').split(per_file_ann, maxsplit=1)
per_file_ann = val
if key in attr:
attr = attr[key]
else:
LOG.warning('encountered unexpected GF tag %s->%s in line %s "%s" (known tags: %s)',
feature, key, line_count, line, repr(attr))
if feature not in aln_meta_unrecognised_features:
aln_meta_unrecognised_features[feature] = []
aln_meta_unrecognised_features[feature].extend([per_file_ann])
attr = None
if attr:
if attr.startswith('meta.'):
attr = attr[len('meta.'):]
aln_meta[attr] = per_file_ann
else:
LOG.debug('setting aln attr "%s" to "%s"', attr, per_file_ann)
setattr(aln, attr, per_file_ann)
elif line.startswith('
try:
_, feature, per_col_ann = line.split(None, 2)
aln_seq_meta[feature] = per_col_ann
except ValueError:
if not nowarnings:
LOG.warning('ignoring GC record with incorrect columns (%s:%s "%s")',
sto_filename, line_count, line)
except:
raise err.ParseError('failed to parse line {} "{}"'.format(
line_count, line))
elif line.startswith('
try:
_, seq_id, feature, per_seq_ann = line.split(None, 3)
if feature == 'DR':
dr_type, per_seq_ann = per_seq_ann.split(None, 1)
dr_type = dr_type.rstrip(';')
feature = feature + '_' + dr_type
if seq_id not in seq_meta_by_id:
seq_meta_by_id[seq_id] = {}
seq_meta_by_id[seq_id][feature] = per_seq_ann
except ValueError:
if not nowarnings:
LOG.warning('ignoring GS record with incorrect columns (%s:%s "%s")',
sto_filename, line_count, line)
except:
raise err.ParseError('failed to parse line {} "{}"'.format(
line_count, line))
elif line.startswith('
_, seq_id, feature, per_res_ann = line.split(None, 3)
seq_meta_by_id[seq_id][feature] = per_res_ann
elif line.startswith('//'):
pass
else:
seq_id, seq_aa = line.split()
if seq_id not in seq_aa_by_id:
seq_aa_by_id[seq_id] = ''
seq_aa_by_id[seq_id] += seq_aa
for seq_id, seq_aa in seq_aa_by_id.items():
seq_meta = seq_meta_by_id[seq_id] if seq_id in seq_meta_by_id else {}
seq = Sequence(seq_id, seq_aa, meta=seq_meta)
aln.add_sequence(seq)
for key, val in aln_meta.items():
aln.meta[key] = val
for key, val in aln_seq_meta.items():
aln.seq_meta[key] = val
sto_io.close()
return aln
def read_sequences_from_fasta(self, fasta_io):
fasta_io, fasta_filename = __class__._get_io_from_file_or_string(fasta_io)
re_seqstr = re.compile(r'^[a-zA-Z.\-]+$')
seq_added = 0
current_hdr = None
current_seq = ''
line_count = 0
for line in fasta_io:
line_count += 1
line = line.rstrip()
if line == "":
break
if line[0] == '>':
if current_seq:
seq = Sequence(current_hdr, current_seq)
self.add_sequence(seq)
current_seq = ''
seq_added += 1
current_hdr = line[1:]
else:
if not re_seqstr.match(line):
raise err.SeqIOError(
('encountered an error parsing FASTA: '
'string "{}" does not look like a sequence ({}:{})').format(
line, fasta_filename, line_count))
if not current_hdr:
raise err.SeqIOError(
('encountered an error parsing FASTA: '
'found sequence "{}" without a header ({}:{})').format(
line, fasta_filename, line_count))
current_seq += str(line)
fasta_io.close()
if current_seq:
seq = Sequence(current_hdr, current_seq)
self.add_sequence(seq)
seq_added += 1
return seq_added
def read_sequences_from_pir(self, pir_io):
pir_io, pir_filename = __class__._get_io_from_file_or_string(pir_io)
re_seqstr = re.compile(r'^[a-zA-Z.\-]+\*?$')
seq_added = 0
current_hdr = None
current_desc = None
current_seq = ''
line_count = 0
for line in pir_io:
line_count += 1
line = line.rstrip()
if line == "":
continue
if line[0] == '>':
# following line is description as free text
if current_seq:
current_seq = current_seq.replace("*", "")
seq = Sequence(current_hdr, current_seq, description=current_desc)
self.add_sequence(seq)
current_seq = ''
seq_added += 1
seq_type, current_hdr = line[1:].split(';')
line = next(pir_io).rstrip()
current_desc = line
else:
if not re_seqstr.match(line):
raise err.SeqIOError(
('encountered an error parsing PIR: '
'string "{}" does not look like a sequence ({}:{})').format(
line, pir_filename, line_count))
if not current_hdr:
raise err.SeqIOError(
('encountered an error parsing PIR: '
'found sequence "{}" without a header ({}:{})').format(
line, pir_filename, line_count))
current_seq += str(line)
pir_io.close()
if current_seq:
current_seq = current_seq.replace("*", "")
seq = Sequence(current_hdr, current_seq, description=current_desc)
self.add_sequence(seq)
seq_added += 1
return seq_added
def _reindex_seq_ids(self):
self.__seq_ids = set()
for seq in self.seqs:
self.__seq_ids.add(seq.uid)
def add_sequence(self, seq:Sequence, *, offset:int=None):
if not offset:
offset = len(self.sequences)
if seq.uid in self.__seq_ids:
raise err.SeqIOError((
"Error: cannot add a sequence with id {}, "
"since this alignment already has a sequence with that id. [{}]").format(
seq.uid, ",".join(self.__seq_ids)))
if self.aln_positions:
if self.aln_positions != seq.length():
raise err.SeqIOError((
"Error: cannot add a sequence (id:{}) "
"with {} positions to an alignment with {} positions.").format(
seq.uid, seq.length(), self.aln_positions))
else:
self.__aln_positions = seq.length()
self.seqs.insert(offset, seq)
self.__seq_ids.add(seq.uid)
return seq
def subset(self, ids, *, collapse_gaps=True):
seqs = [self.find_seq_by_id(i) for i in ids]
new_align = Align(seqs=seqs)
if collapse_gaps:
new_align = new_align.remove_alignment_gaps()
return new_align
def remove_sequence_by_id(self, seq_id: str):
for idx, seq in enumerate(self.seqs):
if seq.uid == seq_id:
LOG.info("Removing sequence with '{}' from alignment".format(seq_id))
del self.seqs[idx]
return seq
raise err.NoMatchesError('failed to find sequence with id {}'.format(seq_id))
def remove_alignment_gaps(self):
seqs = self.seqs
seq_length = seqs[0].length()
new_seq_strings = ["" for s in range(len(seqs))]
for aln_offset in range(seq_length):
total_gaps = 0
for seq in seqs:
if seq.seq[aln_offset] == '-' or seq.seq[aln_offset] == '.':
total_gaps += 1
if total_gaps < len(seqs):
for seq_pos in range(len(seqs)):
res = seqs[seq_pos].seq[aln_offset]
# print( "seq[{}:{}] pos:{} res:{}".format(
# aln_offset, seqs[seq_pos].uid, seq_pos, res) )
new_seq_strings[seq_pos] += res
else:
LOG.debug("Removing complete gap from alignment offset: %s", aln_offset)
new_aln = Align()
for seq_pos in range(len(new_seq_strings)):
hdr = seqs[seq_pos]._hdr
seq_str = new_seq_strings[seq_pos]
seq = Sequence(hdr, seq_str)
new_aln.add_sequence(seq)
return new_aln
def insert_gap_at_offset(self, offset, gap_char='-'):
self.__aln_positions += 1
for s in self.seqs:
s.insert_gap_at_offset(offset, gap_char)
def set_gap_char_at_offset(self, offset, gap_char):
for s in self.seqs:
s.set_gap_char_at_offset(offset, gap_char)
def lower_case_at_offset(self, start, stop=None):
for s in self.seqs:
s.lower_case_at_offset(start, stop)
def slice_seqs(self, start, stop=None):
return [Sequence(s._hdr, s.slice_seq(start, stop)) for s in self.seqs]
def merge_alignment(self, merge_aln, ref_seq_acc: str,
ref_correspondence: Correspondence = None,
*, cluster_label=None, merge_ref_id=False, self_ref_id=False):
merge_aln = merge_aln.copy()
if not cluster_label:
cluster_label = self._next_merge_id()
for seq in merge_aln.seqs:
seq.set_cluster_id(cluster_label)
ref_seq_in_ref = self.find_seq_by_accession(ref_seq_acc)
ref_seq_in_ref.set_cluster_id(cluster_label)
ref_seq_in_merge = merge_aln.find_seq_by_accession(ref_seq_acc)
if self_ref_id:
ref_seq_in_ref.set_uid(self_ref_id)
# if the merge_ref_id has been specified, or there is not a 1:1 correspondence
# between reference sequence in the alignments, then the merged ref sequence
# will be included in the final alignment. Otherwise it will be removed.
if merge_ref_id:
ref_seq_in_merge.set_uid(merge_ref_id)
else:
ref_seq_in_merge.accession += '_merge'
ref_id = ref_seq_in_merge.accession_and_seginfo
ref_seq_in_merge.set_uid(ref_id)
del ref_id
if ref_seq_in_ref.uid is ref_seq_in_merge.uid:
raise err.DuplicateSequenceError((
'sequence in ref alignment [{}] cannot have the same id as '
'sequence in merge alignment [{}] (consider specifying self_ref_id'
'or merge_ref_id)').format(ref_seq_in_ref.uid, ref_seq_in_merge.uid))
self._reindex_seq_ids()
if ref_correspondence or merge_ref_id:
merge_id_to_remove = None
else:
merge_id_to_remove = ref_seq_in_merge.uid
if ref_correspondence is None:
# fake a 1:1 correspondence for internal use
# ignore any residue that does not have a seq_num (ie gap)
residues = [res for res in ref_seq_in_ref.get_residues() if res.seq_num]
for r in residues:
r.set_pdb_label(str(r.seq_num))
# LOG.debug("fake correspondence: residue={}".format(repr(r)))
ref_correspondence = Correspondence(ref_seq_acc, residues=residues)
# check: ref sequence (in self) must match the ATOM sequence in Correspondence
ref_no_gaps = ref_seq_in_ref.seq_no_gaps
corr_no_gaps = ref_correspondence.atom_sequence.seq_no_gaps
if ref_no_gaps != corr_no_gaps:
raise err.MergeCorrespondenceError(
seq_id=ref_seq_acc, aln_type='current', seq_type='ATOM',
ref_no_gaps=ref_no_gaps, corr_no_gaps=corr_no_gaps)
# check: ref sequence (in merge) must match the SEQRES sequence in Correspondence
ref_no_gaps = ref_seq_in_merge.seq_no_gaps
corr_no_gaps = ref_correspondence.seqres_sequence.seq_no_gaps
if ref_no_gaps != corr_no_gaps:
raise err.MergeCorrespondenceError(
seq_id=ref_seq_acc, aln_type='merge', seq_type='SEQRES',
ref_no_gaps=ref_no_gaps, corr_no_gaps=corr_no_gaps)
# clean up
del ref_no_gaps
del corr_no_gaps
ref_aln_pos = 0
ref_corr_pos = 0
merge_aln_pos = 0
correspondence_length = ref_correspondence.seqres_length
LOG.debug("ref_alignment.positions: {}".format(self.aln_positions))
LOG.debug("merge_alignment.positions: {}".format(merge_aln.aln_positions))
LOG.debug("ref_seq_in_ref: {}".format(str(ref_seq_in_ref)))
LOG.debug("ref_seq_in_merge: {}".format(str(ref_seq_in_merge)))
while True:
if merge_aln_pos >= merge_aln.aln_positions \
and ref_aln_pos >= self.aln_positions \
and ref_corr_pos >= correspondence_length:
break
LOG.debug("REF %s/%s; CORRESPONDENCE %s/%s; MERGE %s/%s",
ref_aln_pos, self.aln_positions, ref_corr_pos,
correspondence_length, merge_aln_pos, merge_aln.aln_positions)
# sort the gaps in the reference alignment
if ref_aln_pos < self.aln_positions:
for seq in self.slice_seqs(0, ref_aln_pos):
LOG.debug( "{:<10} {}".format("REF", str(seq)) )
ref_res_in_ref = ref_seq_in_ref.get_res_at_offset(ref_aln_pos)
LOG.debug("REF_POSITION {:>3} of {:>3} => '{}'".format(
ref_aln_pos, self.aln_positions, ref_res_in_ref))
# insert all the gaps in the reference alignment into the merge sequences
# keep doing this until we don't have any more gaps
if Sequence.is_gap(ref_res_in_ref):
LOG.debug(("GAP '{}' in ref sequence in REF alignment [{}], "
"inserting gap '{}' at position [{}] in all merge sequences").format(
ref_res_in_ref, ref_aln_pos, ref_res_in_ref, merge_aln_pos))
merge_aln.insert_gap_at_offset(merge_aln_pos, gap_char=ref_res_in_ref)
ref_aln_pos += 1
merge_aln_pos += 1
continue
if merge_aln_pos < merge_aln.aln_positions:
ref_res_in_merge = ref_seq_in_merge.get_res_at_offset(merge_aln_pos)
LOG.debug("MERGE_POSITION {:>3} of {:>3} => '{}'".format(
ref_aln_pos, self.aln_positions, ref_res_in_ref))
if Sequence.is_gap(ref_res_in_merge):
LOG.debug(("GAP '{}' in ref sequence in MERGE alignment [{}], "
"inserting gap '{}' at position [{}] in all ref sequences").format(
ref_res_in_merge, merge_aln_pos, Align.MERGE_GAP_CHAR, merge_aln_pos))
self.insert_gap_at_offset(ref_aln_pos, gap_char=Align.MERGE_GAP_CHAR)
merge_aln.lower_case_at_offset(merge_aln_pos)
merge_aln.set_gap_char_at_offset(merge_aln_pos, '.')
#ref_corr_pos += 1
ref_aln_pos += 1
merge_aln_pos += 1
continue
# if there are gaps in the correspondence then we add gaps to the ref sequence here
if ref_corr_pos < correspondence_length:
for seq in ref_correspondence.to_sequences():
seq = seq.slice_seq(0, ref_corr_pos)
LOG.debug( "{:<10} {}".format("CORR", str(seq)) )
ref_res_in_corr = ref_correspondence.get_res_at_offset(ref_corr_pos)
if ref_res_in_corr.pdb_label is None:
LOG.debug(("GAP '{}' in ATOM records of correspondence [{}], "
"inserting gap '{}' at position [{}] in ref sequences").format(
'*', ref_corr_pos, Align.MERGE_GAP_CHAR, ref_aln_pos))
#merge_aln.insert_gap_at_offset(merge_aln_pos, gap_char=Align.MERGE_GAP_CHAR)
self.insert_gap_at_offset(ref_aln_pos, gap_char=Align.MERGE_GAP_CHAR)
merge_aln.lower_case_at_offset(merge_aln_pos)
merge_aln.set_gap_char_at_offset(merge_aln_pos, '.')
# IMPORTANT: do not increment merge_aln_pos
ref_corr_pos += 1
ref_aln_pos += 1
merge_aln_pos += 1
continue
ref_corr_pos += 1
ref_aln_pos += 1
merge_aln_pos += 1
LOG.info("FINISHED MERGE")
# for seq in ref_correspondence.to_sequences():
# seq = seq.slice_seq(0, ref_corr_pos)
# LOG.debug( "{:<10} {}".format("CORR", str(seq)) )
# for seq in self.seqs:
# LOG.debug( "{:<10} {}".format("REF", str(seq)) )
# for seq in merge_aln.seqs:
# LOG.debug( "{:<10} {}".format("MERGE", str(seq)) )
# add the merged sequences into this alignment
for seq in merge_aln.seqs:
self.add_sequence(seq)
# for seq in self.seqs:
# LOG.debug( "{:<10} {}".format("MERGED", str(seq)) )
# test the final, merged alignment
# 1. get sequences that correspond to the input aln
# 2. remove alignment positions where there's a gap in the reference sequence
LOG.debug("Checking merge results for %s (%s) ...",
ref_seq_acc, repr(ref_seq_in_merge._hdr))
for original_seq in merge_aln.seqs:
if original_seq.is_cath_domain:
seq = self.find_seq_by_accession(original_seq.accession)
else:
seq = self.find_seq_by_id(original_seq.uid)
ref_merge_residues = ref_seq_in_merge.get_residues()
ref_merge_seqnum_to_seqpos = {}
for seq_pos, res in enumerate([res for res in ref_merge_residues if res.seq_num], 1):
ref_merge_seqnum_to_seqpos[res.seq_num] = seq_pos
if not seq:
raise err.SeqIOError("failed to find sequence with id '{}' in merge aln".format(seq.uid))
for aln_offset in range(self.aln_positions):
ref_res = ref_seq_in_ref.get_res_at_offset(aln_offset)
merged_res_at_aln_offset = seq.get_res_at_offset(aln_offset)
if ref_res == self.MERGE_GAP_CHAR:
assert merged_res_at_aln_offset == '.' or re.match(r'[a-z]', merged_res_at_aln_offset)
elif ref_res == self.REF_GAP_CHAR:
assert merged_res_at_aln_offset == '-' or re.match(r'[A-Z]', merged_res_at_aln_offset)
else:
ref_seq_pos_in_ref = ref_seq_in_ref.get_seq_position_at_offset(aln_offset)
ref_corr_res = ref_correspondence.get_res_by_atom_pos(ref_seq_pos_in_ref)
ref_seq_num_in_merge = ref_corr_res.seq_num
if ref_seq_num_in_merge is None:
raise err.GeneralError(('weird... found a residue without a seq_num in the correspondence record '
' ref_seq_pos_in_ref: {}, res: {}, corr: {}').format(
ref_seq_pos_in_ref, repr(ref_corr_res), repr(ref_correspondence)))
if ref_seq_num_in_merge not in ref_merge_seqnum_to_seqpos:
raise err.OutOfBoundsError(('failed to find seq_num {} ({}) in seqnum/seqpos '
'lookup: {}\ncorrespondence (length: {})').format(
ref_seq_num_in_merge, repr(ref_corr_res), ref_merge_seqnum_to_seqpos,
ref_correspondence.seqres_length, ))
ref_seq_pos_in_merge = ref_merge_seqnum_to_seqpos[ref_seq_num_in_merge]
ref_merge_offset = ref_seq_in_merge.get_offset_at_seq_position(ref_seq_pos_in_merge)
original_res = original_seq.get_res_at_offset(ref_merge_offset)
if merged_res_at_aln_offset != original_res:
raise err.MergeCheckError(("Expected the merged residue '{}' to "
"match the original residue '{}' at alignment "
"offset {} (sequence: '{}')\n\n"
"CORR_ATOM: {}\n"
"CORR_SEQRES: {}\n"
"\n\n"
"REF_SEQ_IN_REF: {}\n"
"REF_SEQ_IN_MERGE: {}\n"
"ORIGINAL_SEQ: {}\n"
" {aln_pointer:>{merge_pos}}\n"
"MERGED_SEQ: {}\n"
" {aln_pointer:>{aln_pos}}\n"
"(aln_offset={}, seq_pos(ref)={}, seq_num(merge)={}, seq_pos(merge)={}, ref_merge_offset={})"
).format(
merged_res_at_aln_offset, original_res, aln_offset, seq.uid,
ref_correspondence.atom_sequence,
ref_correspondence.seqres_sequence,
ref_seq_in_ref.seq,
ref_seq_in_merge.seq,
original_seq.seq,
seq.seq,
aln_offset, ref_seq_pos_in_ref, ref_seq_num_in_merge, ref_seq_pos_in_merge, ref_merge_offset,
aln_pointer='^', aln_pos=(aln_offset+1), merge_pos=(ref_merge_offset+1)
))
LOG.info("Finshed checking merge for {} ({})".format(ref_seq_acc, repr(ref_seq_in_merge._hdr)))
# adding the reference sequence from the reference alignment (since
# there is a 1:1 mapping)
if merge_id_to_remove:
LOG.info("Removing reference sequence '%s' from alignment (because 'merge_ref_id' or 'ref_correspondence' is not set)",
merge_id_to_remove)
self.remove_sequence_by_id(merge_id_to_remove)
seqs_by_cluster_id = {}
for seq in self.seqs:
if seq.cluster_id not in seqs_by_cluster_id:
seqs_by_cluster_id[seq.cluster_id] = []
seqs_by_cluster_id[seq.cluster_id].extend([seq])
for cluster_id in seqs_by_cluster_id:
seq_ids = ', '.join([s.uid for s in seqs_by_cluster_id[cluster_id]])
LOG.debug("Cluster %s: %s", cluster_id, seq_ids)
return merge_aln.seqs
def copy(self):
new_aln = Align()
new_seqs = [s.copy() for s in self.seqs]
new_aln.seqs = new_seqs
new_aln.aln_positions = new_aln.seqs[0].length()
return new_aln
def to_fasta(self, wrap_width=80):
fasta_str = ''
for seq in self.seqs:
fasta_str += seq.to_fasta(wrap_width=wrap_width)
return fasta_str
def to_pir(self, wrap_width=80):
pir_str = ''
for seq in self.seqs:
pir_str += seq.to_pir(wrap_width=wrap_width)
return pir_str
def write_fasta(self, fasta_file, wrap_width=80):
with open(fasta_file, 'w') as f:
for seq in self.seqs:
f.write(seq.to_fasta(wrap_width=wrap_width))
def write_pir(self, pir_file, wrap_width=80, *, use_accession=False):
with open(pir_file, 'w') as f:
for seq in self.seqs:
f.write(seq.to_pir(wrap_width=wrap_width, use_accession=use_accession))
def add_scorecons(self):
from cathpy.core.util import ScoreconsRunner
scons = ScoreconsRunner()
LOG.info("Calculating scorecons / DOPS ...")
# output alignment to tmp fasta file
scons_result = scons.run_alignment(self)
self.dops_score = scons_result.dops
self.seq_meta['scorecons'] = scons_result.to_string
def add_groupsim(self):
from cathpy.core.util import GroupsimRunner
gs = GroupsimRunner()
LOG.info("Calculating GroupSim ...")
# output alignment to tmp fasta file
gs_result = gs.run_alignment(self)
self.seq_meta['groupsim'] = gs_result.to_string
def write_sto(self, sto_file, *, meta=None):
# putting these here to separate the data from the formatting
sto_format = '1.0'
# allow meta keys to be provided in args, otherwise fill with the
# appropriate alignment attributes
aln_meta = {}
if meta:
for key, attr in self.STO_META_TO_ATTR:
aln_meta[key] = meta.get(key, None)
comment_pad = 0
for seq in self.seqs:
comment_pad = max(comment_pad, len(seq.uid) + 1)
seq_pad = comment_pad + 8
gc_pad = seq_pad - 5
# single data point about the file
def _GF(f, key, val):
f.write('
# single data point about each sequence
def _GS(f, seq_id, key, val):
if key.startswith('DR_'):
val = "{}; {}".format(key[3:], val)
key = 'DR'
f.write('
# positional data about the file
def _GC(f, key, per_pos_str):
f.write('
gc_pad=gc_pad))
# positional data about each sequence
def _GR(f, seq_id, key, per_pos_str):
f.write('
def _SEQ(f, seq):
f.write('{:<{seq_pad}} {}\n'.format(seq.uid, seq.seq, seq_pad=seq_pad))
def _START(f):
f.write('
def _END(f):
f.write('//\n')
with open(sto_file, 'w') as f:
_START(f)
_GF(f, 'ID', aln_meta.get('ID', self.uid))
_GF(f, 'DE', aln_meta.get('DE', self.description))
_GF(f, 'AC', aln_meta.get('AC', self.accession))
_GF(f, 'TP', aln_meta.get('TP', self.aln_type))
if self.cath_version:
_GF(f, 'DR', 'CATH: ' + self.cath_version)
if self.dops_score:
_GF(f, 'DR', 'DOPS: {:.3f}'.format(float(self.dops_score)))
for key, val in sorted(self.meta.items()):
_GF(f, key, val)
for seq in self.seqs:
for key, val in seq.meta.items():
_GS(f, seq.uid, key, val)
if self.min_bitscore:
_GF(f, 'TC', self.min_bitscore)
_GF(f, 'SQ', self.count_sequences)
for seq in self.seqs:
_SEQ(f, seq)
for key, val in sorted(self.seq_meta.items()):
_GC(f, key, val)
_END(f)
def get_meta_summary(self):
uniq_go_counts = {}
uniq_ec_counts = {}
cath_domain_count = 0
nodes_by_id = {}
tree = dendropy.Tree()
nodes_by_id['ROOT'] = tree.seed_node
all_taxon_terms = set()
for seq in self.seqs:
go_terms = []
ec_terms = []
org_terms = []
if seq.is_cath_domain:
cath_domain_count += 1
if 'DR_GO' in seq.meta:
go_terms = list(filter(None,
[s.strip() for s in seq.meta['DR_GO'].split(';')]))
if 'DR_EC' in seq.meta:
ec_terms = list(filter(None,
[s.strip() for s in seq.meta['DR_EC'].split(';')]))
if 'DR_ORG' in seq.meta:
org_terms = list(filter(None,
[s.strip() for s in seq.meta['DR_ORG'].split(';')]))
for go_term in go_terms:
if go_term not in uniq_go_counts:
uniq_go_counts[go_term] = 0
uniq_go_counts[go_term] += 1
for ec_term in ec_terms:
if ec_term not in uniq_ec_counts:
uniq_ec_counts[ec_term] = 0
uniq_ec_counts[ec_term] += 1
for org_term in org_terms:
all_taxon_terms.add(org_term)
for idx in range(len(org_terms)-1, 0, -1):
org_term = org_terms[idx]
parent_org_term = org_terms[idx-1] if idx > 1 else 'ROOT'
node_id = '/'.join(org_terms[:idx])
if node_id not in nodes_by_id:
nodes_by_id[node_id] = dendropy.Node(label=org_term)
node = nodes_by_id[node_id]
parent_node_id = '/'.join(org_terms[:idx-1]) if idx > 1 else 'ROOT'
if parent_node_id not in nodes_by_id:
nodes_by_id[parent_node_id] = dendropy.Node(label=parent_org_term)
parent_node = nodes_by_id[parent_node_id]
parent_node.add_child(node)
if not hasattr(node, 'sequence_count'):
setattr(node, 'sequence_count', 0)
if not hasattr(parent_node, 'sequence_count'):
setattr(parent_node, 'sequence_count', 0)
node.sequence_count += 1
taxon_namespace = dendropy.TaxonNamespace(all_taxon_terms)
tree.taxon_namespace = taxon_namespace
for node_id, node in nodes_by_id.items():
taxon_id = node_id.split('/')[-1]
node.taxon = taxon_namespace.get_taxon(taxon_id)
node.label = "{} ({})".format(node.label, node.sequence_count)
tree.seed_node.label = "ROOT ({})".format(self.count_sequences)
# LOG.info("tree:\n{}".format(tree.as_ascii_plot(show_internal_node_labels=True)))
# LOG.info("newick: {}".format(tree.as_string(schema="newick")))
organism_newick = tree.as_string(schema="newick").strip()
uniq_ec_counts = uniq_ec_counts if uniq_ec_counts else None
uniq_go_counts = uniq_go_counts if uniq_go_counts else None
return AlignMetaSummary(
ec_term_counts=uniq_ec_counts,
go_term_counts=uniq_go_counts,
cath_domain_count=cath_domain_count,
seq_count=self.count_sequences,
dops_score=float(self.dops_score),
organism_newick=organism_newick,
)
def __str__(self):
return "\n".join([str(seq) for seq in self.seqs])
| true
| true
|
f7183a9bcfc54494fe82a755b44a74fff0114292
| 869
|
py
|
Python
|
examples/pyGriddata/manufactureGAP_patches.py
|
tmiesse/PolyADCIRC
|
a4a31dda2c2dac4cd696c0f3827dbbcea7feab33
|
[
"BSD-3-Clause"
] | 5
|
2016-03-04T19:42:32.000Z
|
2022-01-20T15:39:25.000Z
|
examples/pyGriddata/manufactureGAP_patches.py
|
tmiesse/PolyADCIRC
|
a4a31dda2c2dac4cd696c0f3827dbbcea7feab33
|
[
"BSD-3-Clause"
] | 5
|
2015-04-28T05:14:28.000Z
|
2017-01-19T12:54:59.000Z
|
examples/pyGriddata/manufactureGAP_patches.py
|
UT-CHG/PolyADCIRC
|
a4a31dda2c2dac4cd696c0f3827dbbcea7feab33
|
[
"BSD-3-Clause"
] | 5
|
2016-01-20T00:34:47.000Z
|
2022-01-02T11:00:56.000Z
|
import polyadcirc.run_framework.domain as dom
import polyadcirc.pyGriddata.manufacture_gap as manu
grid_dir = '.'
domain = dom.domain(grid_dir)
domain.read_spatial_grid()
x_values = [n.x for n in domain.node.values()]
y_values = [n.y for n in domain.node.values()]
xr = max(x_values)
xl = min(x_values)
yu = max(y_values)
yl = min(y_values)
p = [[0, 0, 0, 1],
[0, 0, 0, 1],
[0, 0, 0, 1],
[0, 0, 0, 1],
[0, 0, 0, 1],
[0, 0, 0, 1],
[0, 0, 0, 1],
[0.7, 0.2, 0.1, 0],
[0.1, 0.1, 0.8, 0],
[0.8, 0.2, 0, 0],
[0.2, 0.4, 0.4, 0],
[0.1, 0.2, 0.7, 0],
[0.2, 0.4, 0.4, 0],
[0.7, 0.3, 0, 0]]
x_points = (xl, 750, xr)
y_points = (yl, -1225, -750, 100, 500, 1150, 1300, yu)
rand_rect = manu.random_patches(x_points, y_points, [1, 2, 3, 4], p_sections=p)
manu.write_gapfile(rand_rect, xl, yl, 'band_sections.asc')
| 24.828571
| 79
| 0.558113
|
import polyadcirc.run_framework.domain as dom
import polyadcirc.pyGriddata.manufacture_gap as manu
grid_dir = '.'
domain = dom.domain(grid_dir)
domain.read_spatial_grid()
x_values = [n.x for n in domain.node.values()]
y_values = [n.y for n in domain.node.values()]
xr = max(x_values)
xl = min(x_values)
yu = max(y_values)
yl = min(y_values)
p = [[0, 0, 0, 1],
[0, 0, 0, 1],
[0, 0, 0, 1],
[0, 0, 0, 1],
[0, 0, 0, 1],
[0, 0, 0, 1],
[0, 0, 0, 1],
[0.7, 0.2, 0.1, 0],
[0.1, 0.1, 0.8, 0],
[0.8, 0.2, 0, 0],
[0.2, 0.4, 0.4, 0],
[0.1, 0.2, 0.7, 0],
[0.2, 0.4, 0.4, 0],
[0.7, 0.3, 0, 0]]
x_points = (xl, 750, xr)
y_points = (yl, -1225, -750, 100, 500, 1150, 1300, yu)
rand_rect = manu.random_patches(x_points, y_points, [1, 2, 3, 4], p_sections=p)
manu.write_gapfile(rand_rect, xl, yl, 'band_sections.asc')
| true
| true
|
f7183abbb18c9023d0fc21e6873bb8ac7147a1c6
| 3,095
|
py
|
Python
|
networkx/algorithms/__init__.py
|
youssefmahmoud89/networkx
|
cbf88aaff39ae9247eec426d4a416e759667a15b
|
[
"BSD-3-Clause"
] | null | null | null |
networkx/algorithms/__init__.py
|
youssefmahmoud89/networkx
|
cbf88aaff39ae9247eec426d4a416e759667a15b
|
[
"BSD-3-Clause"
] | null | null | null |
networkx/algorithms/__init__.py
|
youssefmahmoud89/networkx
|
cbf88aaff39ae9247eec426d4a416e759667a15b
|
[
"BSD-3-Clause"
] | null | null | null |
from networkx.algorithms.assortativity import *
from networkx.algorithms.block import *
from networkx.algorithms.boundary import *
from networkx.algorithms.centrality import *
from networkx.algorithms.cluster import *
from networkx.algorithms.clique import *
from networkx.algorithms.community import *
from networkx.algorithms.components import *
from networkx.algorithms.coloring import *
from networkx.algorithms.core import *
from networkx.algorithms.cycles import *
from networkx.algorithms.dag import *
from networkx.algorithms.distance_measures import *
from networkx.algorithms.dominance import *
from networkx.algorithms.dominating import *
from networkx.algorithms.hierarchy import *
from networkx.algorithms.hybrid import *
from networkx.algorithms.matching import *
from networkx.algorithms.minors import *
from networkx.algorithms.mis import *
from networkx.algorithms.mst import *
from networkx.algorithms.link_analysis import *
from networkx.algorithms.link_prediction import *
from networkx.algorithms.operators import *
from networkx.algorithms.shortest_paths import *
from networkx.algorithms.smetric import *
from networkx.algorithms.triads import *
from networkx.algorithms.traversal import *
from networkx.algorithms.isolate import *
from networkx.algorithms.euler import *
from networkx.algorithms.vitality import *
from networkx.algorithms.chordal import *
from networkx.algorithms.richclub import *
from networkx.algorithms.distance_regular import *
from networkx.algorithms.swap import *
from networkx.algorithms.graphical import *
from networkx.algorithms.simple_paths import *
import networkx.algorithms.assortativity
import networkx.algorithms.bipartite
import networkx.algorithms.centrality
import networkx.algorithms.cluster
import networkx.algorithms.clique
import networkx.algorithms.components
import networkx.algorithms.connectivity
import networkx.algorithms.coloring
import networkx.algorithms.flow
import networkx.algorithms.isomorphism
import networkx.algorithms.link_analysis
import networkx.algorithms.shortest_paths
import networkx.algorithms.traversal
import networkx.algorithms.chordal
import networkx.algorithms.operators
import networkx.algorithms.tree
# bipartite
from networkx.algorithms.bipartite import (projected_graph, project, is_bipartite,
complete_bipartite_graph)
# connectivity
from networkx.algorithms.connectivity import (minimum_edge_cut, minimum_node_cut,
average_node_connectivity, edge_connectivity, node_connectivity,
stoer_wagner, all_pairs_node_connectivity, all_node_cuts)
# isomorphism
from networkx.algorithms.isomorphism import (is_isomorphic, could_be_isomorphic,
fast_could_be_isomorphic, faster_could_be_isomorphic)
# flow
from networkx.algorithms.flow import (maximum_flow, maximum_flow_value,
minimum_cut, minimum_cut_value, capacity_scaling, network_simplex,
min_cost_flow_cost, max_flow_min_cost, min_cost_flow, cost_of_flow)
from .tree.recognition import *
from .tree.branchings import (
maximum_branching, minimum_branching,
maximum_spanning_arborescence, minimum_spanning_arborescence
)
| 40.723684
| 82
| 0.850404
|
from networkx.algorithms.assortativity import *
from networkx.algorithms.block import *
from networkx.algorithms.boundary import *
from networkx.algorithms.centrality import *
from networkx.algorithms.cluster import *
from networkx.algorithms.clique import *
from networkx.algorithms.community import *
from networkx.algorithms.components import *
from networkx.algorithms.coloring import *
from networkx.algorithms.core import *
from networkx.algorithms.cycles import *
from networkx.algorithms.dag import *
from networkx.algorithms.distance_measures import *
from networkx.algorithms.dominance import *
from networkx.algorithms.dominating import *
from networkx.algorithms.hierarchy import *
from networkx.algorithms.hybrid import *
from networkx.algorithms.matching import *
from networkx.algorithms.minors import *
from networkx.algorithms.mis import *
from networkx.algorithms.mst import *
from networkx.algorithms.link_analysis import *
from networkx.algorithms.link_prediction import *
from networkx.algorithms.operators import *
from networkx.algorithms.shortest_paths import *
from networkx.algorithms.smetric import *
from networkx.algorithms.triads import *
from networkx.algorithms.traversal import *
from networkx.algorithms.isolate import *
from networkx.algorithms.euler import *
from networkx.algorithms.vitality import *
from networkx.algorithms.chordal import *
from networkx.algorithms.richclub import *
from networkx.algorithms.distance_regular import *
from networkx.algorithms.swap import *
from networkx.algorithms.graphical import *
from networkx.algorithms.simple_paths import *
import networkx.algorithms.assortativity
import networkx.algorithms.bipartite
import networkx.algorithms.centrality
import networkx.algorithms.cluster
import networkx.algorithms.clique
import networkx.algorithms.components
import networkx.algorithms.connectivity
import networkx.algorithms.coloring
import networkx.algorithms.flow
import networkx.algorithms.isomorphism
import networkx.algorithms.link_analysis
import networkx.algorithms.shortest_paths
import networkx.algorithms.traversal
import networkx.algorithms.chordal
import networkx.algorithms.operators
import networkx.algorithms.tree
from networkx.algorithms.bipartite import (projected_graph, project, is_bipartite,
complete_bipartite_graph)
from networkx.algorithms.connectivity import (minimum_edge_cut, minimum_node_cut,
average_node_connectivity, edge_connectivity, node_connectivity,
stoer_wagner, all_pairs_node_connectivity, all_node_cuts)
from networkx.algorithms.isomorphism import (is_isomorphic, could_be_isomorphic,
fast_could_be_isomorphic, faster_could_be_isomorphic)
from networkx.algorithms.flow import (maximum_flow, maximum_flow_value,
minimum_cut, minimum_cut_value, capacity_scaling, network_simplex,
min_cost_flow_cost, max_flow_min_cost, min_cost_flow, cost_of_flow)
from .tree.recognition import *
from .tree.branchings import (
maximum_branching, minimum_branching,
maximum_spanning_arborescence, minimum_spanning_arborescence
)
| true
| true
|
f7183b94cf580f5a6e9ee4b7d2af068f678ffd8b
| 5,971
|
py
|
Python
|
customSDK/servicefabric/models/partition_reconfiguration_completed_event.py
|
leikong/service-fabric-cli
|
6ec1b1c8445b7cc5a889f3b172b47a6017c8888c
|
[
"MIT"
] | 1
|
2020-06-16T22:32:27.000Z
|
2020-06-16T22:32:27.000Z
|
customSDK/servicefabric/models/partition_reconfiguration_completed_event.py
|
leikong/service-fabric-cli
|
6ec1b1c8445b7cc5a889f3b172b47a6017c8888c
|
[
"MIT"
] | null | null | null |
customSDK/servicefabric/models/partition_reconfiguration_completed_event.py
|
leikong/service-fabric-cli
|
6ec1b1c8445b7cc5a889f3b172b47a6017c8888c
|
[
"MIT"
] | null | null | null |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .partition_event import PartitionEvent
class PartitionReconfigurationCompletedEvent(PartitionEvent):
"""Partition Reconfiguration Completed event.
:param event_instance_id: The identifier for the FabricEvent instance.
:type event_instance_id: str
:param time_stamp: The time event was logged.
:type time_stamp: datetime
:param has_correlated_events: Shows there is existing related events
available.
:type has_correlated_events: bool
:param kind: Constant filled by server.
:type kind: str
:param partition_id: An internal ID used by Service Fabric to uniquely
identify a partition. This is a randomly generated GUID when the service
was created. The partition ID is unique and does not change for the
lifetime of the service. If the same service was deleted and recreated the
IDs of its partitions would be different.
:type partition_id: str
:param node_name: The name of a Service Fabric node.
:type node_name: str
:param node_instance_id: Id of Node instance.
:type node_instance_id: str
:param service_type: Type of Service.
:type service_type: str
:param cc_epoch_data_loss_version: CcEpochDataLoss version.
:type cc_epoch_data_loss_version: long
:param cc_epoch_config_version: CcEpochConfig version.
:type cc_epoch_config_version: long
:param reconfig_type: Type of reconfiguration.
:type reconfig_type: str
:param result: Describes reconfiguration result.
:type result: str
:param phase0_duration_ms: Duration of Phase0 in milli-seconds.
:type phase0_duration_ms: float
:param phase1_duration_ms: Duration of Phase1 in milli-seconds.
:type phase1_duration_ms: float
:param phase2_duration_ms: Duration of Phase2 in milli-seconds.
:type phase2_duration_ms: float
:param phase3_duration_ms: Duration of Phase3 in milli-seconds.
:type phase3_duration_ms: float
:param phase4_duration_ms: Duration of Phase4 in milli-seconds.
:type phase4_duration_ms: float
:param total_duration_ms: Total duration in milli-seconds.
:type total_duration_ms: float
"""
_validation = {
'event_instance_id': {'required': True},
'time_stamp': {'required': True},
'kind': {'required': True},
'partition_id': {'required': True},
'node_name': {'required': True},
'node_instance_id': {'required': True},
'service_type': {'required': True},
'cc_epoch_data_loss_version': {'required': True},
'cc_epoch_config_version': {'required': True},
'reconfig_type': {'required': True},
'result': {'required': True},
'phase0_duration_ms': {'required': True},
'phase1_duration_ms': {'required': True},
'phase2_duration_ms': {'required': True},
'phase3_duration_ms': {'required': True},
'phase4_duration_ms': {'required': True},
'total_duration_ms': {'required': True},
}
_attribute_map = {
'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'},
'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'},
'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'},
'kind': {'key': 'Kind', 'type': 'str'},
'partition_id': {'key': 'PartitionId', 'type': 'str'},
'node_name': {'key': 'NodeName', 'type': 'str'},
'node_instance_id': {'key': 'NodeInstanceId', 'type': 'str'},
'service_type': {'key': 'ServiceType', 'type': 'str'},
'cc_epoch_data_loss_version': {'key': 'CcEpochDataLossVersion', 'type': 'long'},
'cc_epoch_config_version': {'key': 'CcEpochConfigVersion', 'type': 'long'},
'reconfig_type': {'key': 'ReconfigType', 'type': 'str'},
'result': {'key': 'Result', 'type': 'str'},
'phase0_duration_ms': {'key': 'Phase0DurationMs', 'type': 'float'},
'phase1_duration_ms': {'key': 'Phase1DurationMs', 'type': 'float'},
'phase2_duration_ms': {'key': 'Phase2DurationMs', 'type': 'float'},
'phase3_duration_ms': {'key': 'Phase3DurationMs', 'type': 'float'},
'phase4_duration_ms': {'key': 'Phase4DurationMs', 'type': 'float'},
'total_duration_ms': {'key': 'TotalDurationMs', 'type': 'float'},
}
def __init__(self, event_instance_id, time_stamp, partition_id, node_name, node_instance_id, service_type, cc_epoch_data_loss_version, cc_epoch_config_version, reconfig_type, result, phase0_duration_ms, phase1_duration_ms, phase2_duration_ms, phase3_duration_ms, phase4_duration_ms, total_duration_ms, has_correlated_events=None):
super(PartitionReconfigurationCompletedEvent, self).__init__(event_instance_id=event_instance_id, time_stamp=time_stamp, has_correlated_events=has_correlated_events, partition_id=partition_id)
self.node_name = node_name
self.node_instance_id = node_instance_id
self.service_type = service_type
self.cc_epoch_data_loss_version = cc_epoch_data_loss_version
self.cc_epoch_config_version = cc_epoch_config_version
self.reconfig_type = reconfig_type
self.result = result
self.phase0_duration_ms = phase0_duration_ms
self.phase1_duration_ms = phase1_duration_ms
self.phase2_duration_ms = phase2_duration_ms
self.phase3_duration_ms = phase3_duration_ms
self.phase4_duration_ms = phase4_duration_ms
self.total_duration_ms = total_duration_ms
self.kind = 'PartitionReconfigurationCompleted'
| 50.601695
| 334
| 0.679116
|
from .partition_event import PartitionEvent
class PartitionReconfigurationCompletedEvent(PartitionEvent):
_validation = {
'event_instance_id': {'required': True},
'time_stamp': {'required': True},
'kind': {'required': True},
'partition_id': {'required': True},
'node_name': {'required': True},
'node_instance_id': {'required': True},
'service_type': {'required': True},
'cc_epoch_data_loss_version': {'required': True},
'cc_epoch_config_version': {'required': True},
'reconfig_type': {'required': True},
'result': {'required': True},
'phase0_duration_ms': {'required': True},
'phase1_duration_ms': {'required': True},
'phase2_duration_ms': {'required': True},
'phase3_duration_ms': {'required': True},
'phase4_duration_ms': {'required': True},
'total_duration_ms': {'required': True},
}
_attribute_map = {
'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'},
'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'},
'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'},
'kind': {'key': 'Kind', 'type': 'str'},
'partition_id': {'key': 'PartitionId', 'type': 'str'},
'node_name': {'key': 'NodeName', 'type': 'str'},
'node_instance_id': {'key': 'NodeInstanceId', 'type': 'str'},
'service_type': {'key': 'ServiceType', 'type': 'str'},
'cc_epoch_data_loss_version': {'key': 'CcEpochDataLossVersion', 'type': 'long'},
'cc_epoch_config_version': {'key': 'CcEpochConfigVersion', 'type': 'long'},
'reconfig_type': {'key': 'ReconfigType', 'type': 'str'},
'result': {'key': 'Result', 'type': 'str'},
'phase0_duration_ms': {'key': 'Phase0DurationMs', 'type': 'float'},
'phase1_duration_ms': {'key': 'Phase1DurationMs', 'type': 'float'},
'phase2_duration_ms': {'key': 'Phase2DurationMs', 'type': 'float'},
'phase3_duration_ms': {'key': 'Phase3DurationMs', 'type': 'float'},
'phase4_duration_ms': {'key': 'Phase4DurationMs', 'type': 'float'},
'total_duration_ms': {'key': 'TotalDurationMs', 'type': 'float'},
}
def __init__(self, event_instance_id, time_stamp, partition_id, node_name, node_instance_id, service_type, cc_epoch_data_loss_version, cc_epoch_config_version, reconfig_type, result, phase0_duration_ms, phase1_duration_ms, phase2_duration_ms, phase3_duration_ms, phase4_duration_ms, total_duration_ms, has_correlated_events=None):
super(PartitionReconfigurationCompletedEvent, self).__init__(event_instance_id=event_instance_id, time_stamp=time_stamp, has_correlated_events=has_correlated_events, partition_id=partition_id)
self.node_name = node_name
self.node_instance_id = node_instance_id
self.service_type = service_type
self.cc_epoch_data_loss_version = cc_epoch_data_loss_version
self.cc_epoch_config_version = cc_epoch_config_version
self.reconfig_type = reconfig_type
self.result = result
self.phase0_duration_ms = phase0_duration_ms
self.phase1_duration_ms = phase1_duration_ms
self.phase2_duration_ms = phase2_duration_ms
self.phase3_duration_ms = phase3_duration_ms
self.phase4_duration_ms = phase4_duration_ms
self.total_duration_ms = total_duration_ms
self.kind = 'PartitionReconfigurationCompleted'
| true
| true
|
f7183cbd31d35f4bfaba280136946eb69c968a7d
| 25,408
|
py
|
Python
|
z3/z3_utils_hakank.py
|
Wikunia/hakank
|
030bc928d2efe8dcbc5118bda3f8ae9575d0fd13
|
[
"MIT"
] | 279
|
2015-01-10T09:55:35.000Z
|
2022-03-28T02:34:03.000Z
|
z3/z3_utils_hakank.py
|
Wikunia/hakank
|
030bc928d2efe8dcbc5118bda3f8ae9575d0fd13
|
[
"MIT"
] | 10
|
2017-10-05T15:48:50.000Z
|
2021-09-20T12:06:52.000Z
|
z3/z3_utils_hakank.py
|
Wikunia/hakank
|
030bc928d2efe8dcbc5118bda3f8ae9575d0fd13
|
[
"MIT"
] | 83
|
2015-01-20T03:44:00.000Z
|
2022-03-13T23:53:06.000Z
|
#!/usr/bin/python -u
# -*- coding: latin-1 -*-
#
# Utilities and (decompositions of) global constraints in Z3.
#
# Here I have collected some useful (or perhaps not that useful) methods for z3py.
# These where added mostly for simplifying the porting of my "traditional"
# constraint programming models.
#
#
#####################################################################
# Convenience wrappers for creating variables and their domains etc #
#####################################################################
#
# - makeIntVar(sol,name,min_val, max_val)
# - makeIntVarVals(sol,name,vals)
# - makeIntVars(sol,name,size, min_val, max_val)
# - makeIntVector(sol,name,min_val, max_val)
# - makeIntVectorMatrix(sol,name,rows,cols,min_value,max_value)
# - makeIntArray(sol,name,min_val, max_val)
# - makeIntArrayVector(sol,name,min_val, max_val)
#
# - makeRealVar(sol,name,min_val, max_val)
# - makeRealVars(sol,name,size, min_val, max_val)
# - makeRealVector(sol,name,min_val, max_val)
#
# - getDifferentSolution(sol,mod,*params)
# - getLessSolution(sol,mod,z)
# - getGreaterSolution(sol,mod,z)
#
# - evalArray(mod,a)
# - print_grid(sol,mod,x,num_rows,num_cols)
# - copyArray(sol,a1,name, min_val, max_val)
# - copyArrayMatrix(sol,a1,name, rows, cols, min_val, max_val)
#
#
#############################################
# Global constraints (decompositions) in Z3 #
#############################################
#
# - all_different(sol,x)
# - all_different_except_0(sol,x)
# - element(sol,ix,x,v,n)
# - element_matrix(sol,ix,jx,x,v,rows,cols)
# - increasing(sol,x)
# - decreasing(sol,x)
# - count(sol, value, x, n)
# - global_cardinality_count(sol, values, x, gcc)
# - at_most(sol, v,x,max)
# - at_least(sol, v,x,min)
# - scalar_product(sol, a,x,product)
# - product == scalar_product2(sol, a,x)
# - circuit(sol,z,path,n) See circuit.py
# - inverse(sol,f,invf,n)
# - maximum(sol,max,x)
# - v == maximum2(sol,x)
# - minimum(sol,min,x)
# - v == minimum2(sol,x)
# - abs(x)
# - toNum
# - subset_sum(sol, values, total)
# - allowed_assignments(sol,t,allowed) aka table, table_in
# - member_of(sol, e, v)
# - no_overlap(sol, s1, d1, s2, d2)
# - sliding_sum(sol, low, up, seq, x)
# - bin_packing(sol,capacity, bins, weights)
# - cumulative(sol, s, d, r, b,times_min,times_max1)
# - global_contiguity(sol, x,start,end)
# - regular(sol, x, Q, S, d, q0, F, x_len)
# - all_different_modulo(sol, x, m)
# - among(sol,m,x,v)
# - nvalue(sol, m, x, min_val,max_val)
# - clique(sol, g, clique, card)
# - all_min_dist(sol,min_dist, x, n)
# - all_different_cst(sol, xs, cst)
# - all_different_on_intersection(sol, x, y)
# - all_different_pairs(sol, a, s)
# - increasing_pairs(sol,a, s)
# - decreasing_pairs(sol,a, s)
# - pairs(sol, a, s)
# - all_differ_from_at_least_k_pos(sol, k, x)
# - all_differ_from_exact_k_pos(sol, k, vectors)
# - all_differ_from_at_most_k_pos(sol, k, x)
# - all_equal(sol,x)
# - arith(sol, x, relop, val)
# - arith_relop(sol, a, t, b)
#
#
# TODO
# lex_(le|lt|ge|gt)(sol,x,y) : array x is lexicographic (equal or) less/greater than array y
# diffn?
# subcircuit???
#
# This Z3 model was written by Hakan Kjellerstrand (hakank@gmail.com)
# See also my Z3 page: http://hakank.org/z3/
#
#
from __future__ import print_function
from z3 import *
import uuid
import time
def getNewId():
return uuid.uuid4().int
#
# Utils to create Int, IntVector, Array etc
# as well as the fiddling of evaluation and ensuring new solutions.
#
# creates Int() with a domain
def makeIntVar(sol,name,min_val, max_val):
v = Int(name)
sol.add(v >= min_val, v <= max_val)
return v
def makeIntVarVals(sol,name,vals):
v = Int(name)
sol.add(Or([v == i for i in vals]))
return v
# creates [ Int() for i in range(size)] with a domains
def makeIntVars(sol,name,size, min_val, max_val):
a = [Int("%s_%i" % (name,i)) for i in range(size)]
[sol.add(a[i] >= min_val, a[i] <= max_val) for i in range(size)]
return a
# creates an IntVector with a domain
def makeIntVector(sol,name, size, min_val, max_val):
v = IntVector(name,size)
[sol.add(v[i] >= min_val, v[i] <= max_val) for i in range(size)]
return v
def makeIntVectorMatrix(sol,name,rows,cols,min_value,max_value):
x = {}
for i in range(rows):
for j in range(cols):
x[(i,j)] = makeIntVar(sol,name + "%i_%i"%(i,j),min_value,max_value)
return x
# creates an Array with a domain
def makeIntArray(sol,name, size, min_val, max_val):
a = Array(name,IntSort(),IntSort())
[sol.add(a[i] >= min_val, a[i] <= max_val) for i in range(size)]
return a
# creates an Array with a domain, and returns an array
def makeIntArrayVector(sol,name, size, min_val, max_val):
a = Array(name,IntSort(),IntSort())
[ sol.add(a[i] >= min_val, a[i] <= max_val) for i in range(size)]
return [a[i] for i in range(size)]
def makeRealVar(sol,name,min_val, max_val):
v = Real(name)
sol.add(v >= min_val, v <= max_val)
return v
# creates [ Real() for i in range(size)] with a domains
def makeRealVars(sol,name,size, min_val, max_val):
a = [Real("%s_%i" % (name,i)) for i in range(size)]
[sol.add(a[i] >= min_val, a[i] <= max_val) for i in range(size)]
return a
# creates an IntVector with a domain
def makeRealVector(sol,name, size, min_val, max_val):
v = RealVector(name,size)
[sol.add(v[i] >= min_val, v[i] <= max_val) for i in range(size)]
return v
# creates an Array with a domain
def makeRealArray(sol,name, size, min_val, max_val):
a = Array(name,RealSort(),RealSort())
[sol.add(a[i] >= min_val, a[i] <= max_val) for i in range(size)]
return a
#
# When using
# while sol.check() == sat:
# one must add some differences to get new solutions.
#
# Usage:
# addDifferentSolution(sol,mod,x,y,z,...)
# where x,y,z,.. are arrays.
#
# Note: For the optimization problems, one should use either
# addLessSolution(sol,mod,z)
# for minimization problems
# or
# addGreaterSolution(sol,mod,z)
# for maximization problems.
#
def getDifferentSolution(sol,mod, *params):
for t in params:
sol.add(Or([t[i] != mod.eval(t[i]) for i in range(len(t))]))
# special case for a matrix; requires number of rows and columns
def getDifferentSolutionMatrix(sol,mod, x, rows, cols):
sol.add(Or([x[i,j] != mod.eval(x[i,j]) for i in range(rows) for j in range(cols)]))
# ensure that we get a solution with a less value of z
def getLessSolution(sol,mod, z):
sol.add(z < mod.eval(z))
# ensure that we get a solution with a greater value of z
def getGreaterSolution(sol,mod, z):
sol.add(z > mod.eval(z))
# evalArray(mod,a)
# return an evaluated array
def evalArray(mod,a):
return [mod.eval(a[i]) for i in range(len(a))]
# print_grid(sol,mod,x,num_rows,num_cols)
# prints an (unformatted) grid/matrix
def print_grid(mod,x,rows,cols):
for i in range(rows):
for j in range(cols):
print(mod.eval(x[(i,j)]), end=' ')
print()
print()
#
# Copy the (integer) array into an Array()
#
def copyArray(sol,a1,name, min_val, max_val):
n = len(a1)
a = makeIntArray(sol,name,n,min_val,max_val)
for i in range(n):
sol.add(a[i] == a1[i])
return a
#
# Copy the (integer) array into an Array()
#
def copyRealArray(sol,a1,name, min_val, max_val):
n = len(a1)
a = makeRealArray(sol,name,n,min_val,max_val)
for i in range(n):
sol.add(a[i] == a1[i])
return a
#
# Copy the (integer) matrix into an Array()
#
def copyArrayMatrix(sol,a1,name, rows, cols, min_val, max_val):
a = makeIntArray(sol,name,rows*cols,min_val,max_val)
for i in range(rows):
for j in range(cols):
sol.add(a[i*cols+j] == a1[i][j])
return a
#
# Decompositions of global constraints
#
# all_different_except_0/2
def all_different_except_0(sol,x):
for i in range(len(x)):
for j in range(i):
sol.add( Implies(Or(x[i] != 0, x[j] != 0), x[j] != x[i] ))
# all_different/2
# (but one should probably use Distinct/1 instead...)
def all_different(sol,x):
for i in range(len(x)):
for j in range(i):
sol.add( x[i] != x[j])
#
# element(sol,ix,x,v,n)
# v = x[ix]
# n = length of x
#
# Experimental!
#
def element(sol,ix,x,v,n):
for i in range(n):
sol.add(Implies(i==ix, v == x[i]))
#
# element_matrix(sol,ix,jx,x,v,rows,cols)
# v = x[(ix,jx)]
# where x is an matrix of rows x cols
#
# Experimental!
#
def element_matrix(sol,ix,jx,x,v,rows,cols):
for i in range(rows):
for j in range(cols):
sol.add(Implies(And(i == ix, j == jx), v == x[(i,j)]))
# increasing_strict/2
def increasing_strict(sol,x):
for i in range(len(x)-1):
sol.add(x[i] < x[i+1])
# increasing/2
def increasing(sol,x):
for i in range(len(x)-1):
sol.add(x[i] <= x[i+1])
# decreasing_strict/2
def decreasing_strict(sol,x):
for i in range(len(x)-1):
sol.add(x[i] > x[i+1])
# decreasing/2
def decreasing(sol,x):
for i in range(len(x)-1):
sol.add(x[i] >= x[i+1])
# count/4:
# * if n is Int(): count the number of value in x
# * if n is fixed: ensure that the number of value in the x array is exactly n
# * if both value and n are Int()'s: count one/all value(s)
def count(sol,value,x,n):
sol.add(n == Sum([If(x[i] == value, 1,0) for i in range(len(x))]))
# count/3
# same as count/4 but returns the sum value
def count2(sol,value,x):
return Sum([If(x[i] == value, 1,0) for i in range(len(x))])
# global_cardinality_count/4
# * gcc[v] containts the occurrences of the value of values[v] in array x
# (it's a generalization of count/4)
def global_cardinality_count(sol,values,x,gcc):
for v in range(len(values)):
count(sol,values[v],x,gcc[v])
# at_most/4
# * there are at most max occurrences of value v in x
def at_most(sol,v,x,max):
c = Int("c")
sol.add(c>=0, c <= len(x))
count(sol,v,x,c)
sol.add(c <= max)
# at_least/4
# * there are at least max occurrences of value v in x
def at_least(sol,v,x,min):
c = Int("c")
sol.add(c>=0, c <= len(x))
count(sol,v,x,c)
sol.add(c >= min)
# scalar_product(sol,a,x,product)
# ensures that a[*]*x[*] == product
def scalar_product(sol,a,x,product):
sol.add(product == Sum([a[i]*x[i] for i in range(len(x))]))
# product == scalar_product2(sol,a,x)
# ensures that Sum([a[i]*x[i] ... ] == product
def scalar_product2(sol,a,x):
return Sum([a[i]*x[i] for i in range(len(x))])
#
# constraint(sol,x,path,n)
# find a (Hamiltonian) circuit of x and its path path
# n is the size of x and path
def circuit(sol, x, z, n):
# z = Array('z',IntSort(), IntSort())
# for i in range(n):
# sol.add(z[i] >= 1, z[i] <= n)
#
# The main constraint is that Z[I] must not be 1
# until I = N, and for I = N it must be 1.
#
sol.add(Distinct([x[i] for i in range(n)])),
sol.add(Distinct([z[i] for i in range(n)])),
# first element of x[0] == z[0]
sol.add(x[0] == z[0])
# The last element in z must be 1 (back to original spot)
sol.add(z[n-1] == 1)
# Get the orbit for Z.
for i in range(1,n):
# I'm very happy that this element works! Z3 is cool. :-)
sol.add(x[z[i-1]] == z[i])
# inverse(..f, invf, ..)
# ensures that each value in f is the position in invf, and vice versa
# Note that we are 0-based so the domain of both arrays are 0..n-1!
#
# See inverse.py
#
def inverse(sol, f, invf, n):
for i in range(n):
for j in range(n):
sol.add((j == f[i]) == (i == invf[j]))
# v is the maximum value of x
def maximum(sol, v, x):
sol.add(Or([v == x[i] for i in range(len(x))])) # max is an element in x)
for i in range(len(x)):
sol.add(v >= x[i]) # and it's the greatest
# v == maximum2(sol,x): v is the maximum value of x
def maximum2(sol, x):
v = Int("v_%i"% uuid.uuid4().int)
sol.add(Or([v == x[i] for i in range(len(x))])) # v is an element in x)
for i in range(len(x)):
sol.add(v >= x[i]) # and it's the greatest
return v
# min is the minimum value of x
def minimum(sol, v, x):
sol.add(Or([v == x[i] for i in range(len(x))])) # v is an element in x)
for i in range(len(x)):
sol.add(v <= x[i]) # and it's the smallest
# v == minimum2(sol,x): v is the minimum value of x
def minimum2(sol, x):
v = Int("v_%i"% uuid.uuid4().int)
sol.add(Or([v == x[i] for i in range(len(x))])) # min is an element in x)
for i in range(len(x)):
sol.add(v <= x[i]) # and it's the smallest
return v
# absolute value of x
def Abs(x):
return If(x >= 0,x,-x)
# converts a number (s) <-> an array of integers (t) in the specific base.
# See toNum.py
def toNum(sol, t, s, base):
tlen = len(t)
sol.add(s == Sum([(base ** (tlen - i - 1)) * t[i] for i in range(tlen)]))
# subset_sum(sol,values,total)
# total is the sum of the values of the select elements in values
# returns array of the selected entries and the sum of the selected values
def subset_sum(sol, values, total):
n = len(values)
x = [makeIntVar(sol,"x_%i"%i,0,n) for i in range(n)]
ss = makeIntVar(sol,"ss", 0, n)
sol.add(ss == Sum([x[i] for i in range(n)]))
sol.add(total == scalar_product2(sol,x, values))
return x, ss
# allowed_assignments(sol,t,allowed):
# a.k.a. table, table_in etc
# ensure that the tuple (list) t is in the list allowed
# (of allowed assignments)
def allowed_assignments(sol,t,allowed):
len_allowed = len(allowed)
t_len = len(t)
sol.add(
Or([ And([t[a] == allowed[k][a] for a in range(t_len)])
for k in range(len_allowed)]
))
# ensure that element e is one of v
def member_of(sol, e, v):
sol.add(Or([e == i for i in v]))
# No overlapping of tasks s1 and s2
def no_overlap(sol, s1, d1, s2, d2):
sol.add(Or(s1 + d1 <= s2, s2 + d2 <= s1))
#
# sliding_sum(sol,low,up,seq,x)
# ensures that the sum of all subsequences in x of length seq
# are between low and up
# low, up, and seq must be fixed integers
#
def sliding_sum(sol, low, up, seq, x):
vlen = len(x)
for i in range(vlen-seq+1):
s = makeIntVar(sol, "s_%i"%i,low,up)
sol.add(s == Sum([x[j] for j in range(i,i+seq)]))
# bin_packing
#
# Note: capacity (and bins) might be IntVar but weights must be an int vector
#
def bin_packing(sol,capacity, bins, weights):
n = len(bins)
for b in range(n):
sol.add(Sum([ weights[j]*If(bins[j] == b,1,0) for j in range(n)] ) <= capacity)
#
# Decompositon of cumulative.
#
# Inspired by the MiniZinc implementation:
# http://www.g12.csse.unimelb.edu.au/wiki/doku.php?id=g12:zinc:lib:minizinc:std:cumulative.mzn&s[]=cumulative
# The MiniZinc decomposition is discussed in the paper:
# A. Schutt, T. Feydy, P.J. Stuckey, and M. G. Wallace.
# 'Why cumulative decomposition is not as bad as it sounds.'
# Download:
# http://www.cs.mu.oz.au/%7Epjs/rcpsp/papers/cp09-cu.pdf
# http://www.cs.mu.oz.au/%7Epjs/rcpsp/cumu_lazyfd.pdf
#
#
# Parameters:
#
# s: start_times assumption: array of IntVar
# d: durations assumption: array of int
# r: resources assumption: array of int
# b: resource limit assumption: IntVar or int
#
# Note: since I don't know how to extract the bounds of the
# domains, both times_min and times_max1 are required
# which is the lower/upper limits of s (the start_times).
# Which makes it slower...
#
def cumulative(sol, s, d, r, b,times_min,times_max1):
tasks = [i for i in range(len(s)) if r[i] > 0 and d[i] > 0]
# how do I get the upper/lower value of a decision variable?
# times_min = min([s[i].Min() for i in tasks])
# times_max = max([s[i].Max() + max(d) for i in tasks])
times_max = times_max1 + max(d)
for t in range(times_min, times_max + 1):
for i in tasks:
sol.add(Sum([(If(s[i] <= t,1,0) * If(t < s[i] + d[i],1,0))*r[i] for i in tasks]) <= b)
# Somewhat experimental:
# This constraint is needed to contrain the upper limit of b.
if not isinstance(b, int):
sol.add(b <= sum(r))
#
# Global_contiguity:
# Enforce that all 1s must be in a contiguous group.
# Assumption: There must be at least one 1.
#
def global_contiguity(sol, x,start,end):
n = len(x)
sol.add(start<=end)
for i in range(n):
sol.add(And(i >= start, i <= end) == x[i] == 1)
#
# Global constraint regular
#
# This is a translation of MiniZinc's regular constraint (defined in
# lib/zinc/globals.mzn), via the Comet code refered above.
# All comments are from the MiniZinc code.
# '''
# The sequence of values in array 'x' (which must all be in the range 1..S)
# is accepted by the DFA of 'Q' states with input 1..S and transition
# function 'd' (which maps (1..Q, 1..S) -> 0..Q)) and initial state 'q0'
# (which must be in 1..Q) and accepting states 'F' (which all must be in
# 1..Q). We reserve state 0 to be an always failing state.
# '''
#
# x : IntVar array
# Q : number of states
# S : input_max
# d : transition matrix
# q0: initial state
# F : accepting states
# x_len: length of x [when using Array we cannot extract the length]
#
def regular(sol, x, Q, S, d, q0, F, x_len):
assert Q > 0, 'regular: "Q" must be greater than zero'
assert S > 0, 'regular: "S" must be greater than zero'
# d2 is the same as d, except we add one extra transition for
# each possible input; each extra transition is from state zero
# to state zero. This allows us to continue even if we hit a
# non-accepted input.
# Comet: int d2[0..Q, 1..S]
d2 = []
for i in range(Q + 1):
row = []
for j in range(S):
if i == 0:
row.append(0)
else:
row.append(d[i - 1][j])
d2.append(row)
d2_flatten = [d2[i][j] for i in range(Q + 1) for j in range(S)]
d2_flatten_a = makeIntArray(sol,"d2_flatten_a_%i"%uuid.uuid4().int,len(d2_flatten),min(d2_flatten),max(d2_flatten))
for i in range(len(d2_flatten)):
sol.add(d2_flatten[i] == d2_flatten_a[i])
# If x has index set m..n, then a[m-1] holds the initial state
# (q0), and a[i+1] holds the state we're in after processing
# x[i]. If a[n] is in F, then we succeed (ie. accept the
# string).
x_range = list(range(0, x_len))
m = 0
# n = len(x)
n = x_len
a = [makeIntVar(sol,'a[%i]_%i' % (i,uuid.uuid4().int), 0, Q + 1) for i in range(m, n + 1)]
# Check that the final state is in F
member_of(sol,a[-1],F)
# First state is q0
sol.add(a[m] == q0)
for i in x_range:
sol.add(x[i] >= 1)
sol.add(x[i] <= S)
# Determine a[i+1]: a[i+1] == d2[a[i], x[i]]
sol.add(a[i + 1] == d2_flatten_a[(a[i] * S) + (x[i] - 1)])
#
# all_different_modulo(sol, x, m)
#
# Ensure that all elements in x (modulo m) are distinct
#
def all_different_modulo(sol, x, m):
n = len(x)
mods = makeIntVector(sol,"mods",n, 0,m-1)
for i in range(n):
sol.add(mods[i] == x[i] % m)
sol.add(Distinct(mods))
# among(sol,m,x,v)
#
# Requires exactly m variables in x to take one of the values in v.
#
def among(sol,m,x,v):
sol.add(m == Sum([If(x[i] == j,1,0) for i in range(len(x)) for j in v]))
# nvalue(sol, m, x, min_val,max_val)
#
# Requires that there is exactly m distinct values in x
# (min_val and max_val are the minimum and maximum value
# in x, respectively)
#
def nvalue(sol, m, x, min_val,max_val):
n = len(x)
sol.add(m == Sum([ If(Sum([ If(x[j] == i,1,0) for j in range(n)]) > 0,1,0) for i in range(min_val, max_val+1)]))
#
# clique(sol, g, clique, card)
#
# Ensure that the boolean array "clique" (of Integer Array type)
# represents a clique in the graph g with the cardinality card.
#
# Note: This is kind of backward, but it is the whole thing:
# If there is a connection between nodes I and J (I \= J) then
# there should be a node from I to J in G. If it's not then
# both c1 and c2 is not in the clique.
#
def clique(sol, g, clique, card):
n = len(g)
sol.add(card == Sum([clique[i] for i in range(n)]))
for (c1,i) in zip(clique, range(n)):
for (c2,j) in zip(clique, range(n)):
sol.add(Implies(And(i != j, g[i][j] == 0), Or(c1 == 0, c2 == 0)))
#
# all_min_dist(sol,min_dist, x, n)
#
# Ensures that the differences of all pairs (i !=j) are
# >= min_dist.
#
def all_min_dist(sol,min_dist, x, n):
for i in range(n):
for j in range(i):
sol.add(Abs(x[i]-x[j]) >= min_dist)
#
# Ensure that all elements in xs + cst are distinct
#
def all_different_cst(sol, xs, cst):
sol.add(Distinct([(x + c) for (x,c) in zip(xs,cst)]))
#
# Ensure that the values that are common in x and y are distinct (in each array)
#
def all_different_on_intersection(sol, x, y):
_count_a_in_b(sol,x,y)
_count_a_in_b(sol,y,x)
# helper for all_different_on_intersection
def _count_a_in_b(sol,ass,bss):
for a in ass:
sol.add(Sum([If(a == b,1,0) for b in bss]) <= 1)
# all pairs must be different
def all_different_pairs(sol, a, s):
sol.add(Distinct([p for p in pairs(sol,a,s)]))
# the pairs are in increasing order
def increasing_pairs(sol,a, s):
increasing(sol,pairs(sol,a,s))
# the pairs are in decreasing order
def decreasing_pairs(sol,a, s):
decreasing(sol,pairs(sol,a,s))
# return the pairs of a in the "integer representation": a[k,0]*(n-1) + a[k,1]
# s is the size of max value of n
def pairs(sol, a, s):
n = len(a)//2
return [ a[(k,0)]*(s-1) + a[(k,1)] for k in range(n)]
#
# all_differ_from_at_least_k_pos(sol, k, x)
#
# Ensure that all pairs of vectors has >= k different values
#
def all_differ_from_at_least_k_pos(sol, k, vectors):
n = len(vectors)
m = len(vectors[0])
for i in range(n):
for j in range(i+1,n):
sol.add(Sum([If(vectors[i][kk] != vectors[j][kk],1,0) for kk in range(m)]) >= k)
#
# all_differ_from_exact_k_pos(sol, k, vectors)
#
# Ensure that all pairs of vectors has exactly k different values
#
def all_differ_from_exact_k_pos(sol, k, vectors):
n = len(vectors)
m = len(vectors[0])
for i in range(n):
for j in range(i+1,n):
sol.add(Sum([If(vectors[i][kk] != vectors[j][kk],1,0) for kk in range(m)]) == k)
#
# all_differ_from_at_most_k_pos(sol, k, x)
#
# Ensure that all pairs of vectors has <= k different values
#
def all_differ_from_at_most_k_pos(sol, k, vectors):
n = len(vectors)
m = len(vectors[0])
for i in range(n):
for j in range(i+1,n):
sol.add(Sum([If(vectors[i][kk] != vectors[j][kk],1,0) for kk in range(m)]) <= k)
#
# all values in x must be equal
#
def all_equal(sol,x):
sol.add(And([x[i] == x[i-1] for i in range(len(x))]))
#
# Ensure that all elements in x are <relop> then val.
#
def arith(sol, x, relop, val):
for i in range(len(x)):
arith_relop(sol,x[i],relop, val)
#
# This is (arguably) a hack.
# Represents each function as an integer 0..5.
#
def arith_relop(sol, a, t, b):
sol.add(Implies(t == 0,a < b))
sol.add(Implies(t == 1,a <= b))
sol.add(Implies(t == 2,a == b))
sol.add(Implies(t == 3,a >= b))
sol.add(Implies(t == 4,a > b))
sol.add(Implies(t == 5,a != b))
# Some experiments
if __name__ == "__main__":
sol = Solver()
n = 5
# x = IntVector("x",n)
# for i in range(n):
# sol.add(x[i]>=0, x[i] <= n)
x = makeIntVector(sol,"x",n,0,n)
# sol.add(Distinct(x))
# all_different_except_0(sol,x)
# all_different(sol,x)
increasing(sol,x)
# increasing_strict(sol,x)
# decreasing(sol,x)
# decreasing_strict(sol,x)
# exactly twp 0s
# count(sol,0,x,2)
# count the number of 0's
c = Int("c")
sol.add(c >= 0, c <= n)
count(sol,0,x,c) # simple example
# Here we also let the value free (i.e. not just checking 0)
# So we count the number of all values 1..n
# v = Int(v)
# sol.add(v >= 0, v <= n)
# count(sol,v,x,c)
gcc = IntVector("gcc",n+1)
for i in range(n):
sol.add(gcc[i] >= 0, gcc[i] <= n+1)
# for i in [i for i in range(n)]:
# nn = Int("nn")
# # sol.add(nn>=0, nn<=n+1)
# count(sol,i,x,gcc[i])
# # sol.add(gcc[i] == nn)
global_cardinality_count(sol,[i for i in range(0,n+1)], x, gcc)
# enfore that we should have 2 0s
# sol.add(gcc[0] == 1)
at_most(sol,2,x,2)
at_least(sol,2,x,2)
num_solutions = 0
print(sol.check())
while sol.check() == sat:
num_solutions = num_solutions + 1
mod = sol.model()
ss = [mod.eval(x[i]) for i in range(n)]
cc = mod.eval(c)
# vv = m.eval(v)
gccs = ([mod.eval(gcc[i]) for i in range(n)])
# print(ss, " #0s: ", mod.eval(cc), " v:", m.eval(v))
print(ss, " #0s: ", mod.eval(cc), " gcc:", gccs)
sol.add(
Or(
Or([x[i] != ss[i] for i in range(n)]),
cc != c
, Or([gcc[i] != gccs[i] for i in range(n)]),
#, vv != v
)
)
print("num_solutions:", num_solutions)
#
# diffn ported from MiniZinc's fzn_diffn:
#
# predicate fzn_diffn(array[int] of var int: x,
# array[int] of var int: y,
# array[int] of var int: dx,
# array[int] of var int: dy) =
# forall(i,j in index_set(x) where i < j)(
# x[i] + dx[i] <= x[j] \/ y[i] + dy[i] <= y[j] \/
# x[j] + dx[j] <= x[i] \/ y[j] + dy[j] <= y[i]
# );
#
def diffn(sol,x,y,dx,dy):
n = len(x)
for i in range(n):
for j in range(i+1,n):
sol.add(
Or([x[i] + dx[i] <= x[j],
y[i] + dy[i] <= y[j],
x[j] + dx[j] <= x[i],
y[j] + dy[j] <= y[i]]
)
)
| 28.64487
| 117
| 0.609572
|
sol.add(s == Sum([(base ** (tlen - i - 1)) * t[i] for i in range(tlen)]))
# subset_sum(sol,values,total)
# total is the sum of the values of the select elements in values
# returns array of the selected entries and the sum of the selected values
def subset_sum(sol, values, total):
n = len(values)
x = [makeIntVar(sol,"x_%i"%i,0,n) for i in range(n)]
ss = makeIntVar(sol,"ss", 0, n)
sol.add(ss == Sum([x[i] for i in range(n)]))
sol.add(total == scalar_product2(sol,x, values))
return x, ss
# allowed_assignments(sol,t,allowed):
# a.k.a. table, table_in etc
# ensure that the tuple (list) t is in the list allowed
# (of allowed assignments)
def allowed_assignments(sol,t,allowed):
len_allowed = len(allowed)
t_len = len(t)
sol.add(
Or([ And([t[a] == allowed[k][a] for a in range(t_len)])
for k in range(len_allowed)]
))
# ensure that element e is one of v
def member_of(sol, e, v):
sol.add(Or([e == i for i in v]))
# No overlapping of tasks s1 and s2
def no_overlap(sol, s1, d1, s2, d2):
sol.add(Or(s1 + d1 <= s2, s2 + d2 <= s1))
#
# sliding_sum(sol,low,up,seq,x)
# ensures that the sum of all subsequences in x of length seq
# are between low and up
# low, up, and seq must be fixed integers
#
def sliding_sum(sol, low, up, seq, x):
vlen = len(x)
for i in range(vlen-seq+1):
s = makeIntVar(sol, "s_%i"%i,low,up)
sol.add(s == Sum([x[j] for j in range(i,i+seq)]))
# bin_packing
#
# Note: capacity (and bins) might be IntVar but weights must be an int vector
#
def bin_packing(sol,capacity, bins, weights):
n = len(bins)
for b in range(n):
sol.add(Sum([ weights[j]*If(bins[j] == b,1,0) for j in range(n)] ) <= capacity)
#
# Decompositon of cumulative.
#
# Inspired by the MiniZinc implementation:
# http://www.g12.csse.unimelb.edu.au/wiki/doku.php?id=g12:zinc:lib:minizinc:std:cumulative.mzn&s[]=cumulative
# The MiniZinc decomposition is discussed in the paper:
# A. Schutt, T. Feydy, P.J. Stuckey, and M. G. Wallace.
# 'Why cumulative decomposition is not as bad as it sounds.'
# Download:
# http://www.cs.mu.oz.au/%7Epjs/rcpsp/papers/cp09-cu.pdf
# http://www.cs.mu.oz.au/%7Epjs/rcpsp/cumu_lazyfd.pdf
#
#
# Parameters:
#
# s: start_times assumption: array of IntVar
# d: durations assumption: array of int
# r: resources assumption: array of int
# b: resource limit assumption: IntVar or int
#
# Note: since I don't know how to extract the bounds of the
def cumulative(sol, s, d, r, b,times_min,times_max1):
tasks = [i for i in range(len(s)) if r[i] > 0 and d[i] > 0]
times_max = times_max1 + max(d)
for t in range(times_min, times_max + 1):
for i in tasks:
sol.add(Sum([(If(s[i] <= t,1,0) * If(t < s[i] + d[i],1,0))*r[i] for i in tasks]) <= b)
if not isinstance(b, int):
sol.add(b <= sum(r))
def global_contiguity(sol, x,start,end):
n = len(x)
sol.add(start<=end)
for i in range(n):
sol.add(And(i >= start, i <= end) == x[i] == 1)
# lib/zinc/globals.mzn), via the Comet code refered above.
# All comments are from the MiniZinc code.
# '''
# The sequence of values in array 'x' (which must all be in the range 1..S)
# is accepted by the DFA of 'Q' states with input 1..S and transition
# function 'd' (which maps (1..Q, 1..S) -> 0..Q)) and initial state 'q0'
# (which must be in 1..Q) and accepting states 'F' (which all must be in
# 1..Q). We reserve state 0 to be an always failing state.
# '''
#
# x : IntVar array
# Q : number of states
# S : input_max
# d : transition matrix
# q0: initial state
# F : accepting states
# x_len: length of x [when using Array we cannot extract the length]
#
def regular(sol, x, Q, S, d, q0, F, x_len):
assert Q > 0, 'regular: "Q" must be greater than zero'
assert S > 0, 'regular: "S" must be greater than zero'
# d2 is the same as d, except we add one extra transition for
# each possible input; each extra transition is from state zero
# to state zero. This allows us to continue even if we hit a
# non-accepted input.
# Comet: int d2[0..Q, 1..S]
d2 = []
for i in range(Q + 1):
row = []
for j in range(S):
if i == 0:
row.append(0)
else:
row.append(d[i - 1][j])
d2.append(row)
d2_flatten = [d2[i][j] for i in range(Q + 1) for j in range(S)]
d2_flatten_a = makeIntArray(sol,"d2_flatten_a_%i"%uuid.uuid4().int,len(d2_flatten),min(d2_flatten),max(d2_flatten))
for i in range(len(d2_flatten)):
sol.add(d2_flatten[i] == d2_flatten_a[i])
# If x has index set m..n, then a[m-1] holds the initial state
# (q0), and a[i+1] holds the state we're in after processing
x_range = list(range(0, x_len))
m = 0
n = x_len
a = [makeIntVar(sol,'a[%i]_%i' % (i,uuid.uuid4().int), 0, Q + 1) for i in range(m, n + 1)]
member_of(sol,a[-1],F)
sol.add(a[m] == q0)
for i in x_range:
sol.add(x[i] >= 1)
sol.add(x[i] <= S)
sol.add(a[i + 1] == d2_flatten_a[(a[i] * S) + (x[i] - 1)])
def all_different_modulo(sol, x, m):
n = len(x)
mods = makeIntVector(sol,"mods",n, 0,m-1)
for i in range(n):
sol.add(mods[i] == x[i] % m)
sol.add(Distinct(mods))
def among(sol,m,x,v):
sol.add(m == Sum([If(x[i] == j,1,0) for i in range(len(x)) for j in v]))
def nvalue(sol, m, x, min_val,max_val):
n = len(x)
sol.add(m == Sum([ If(Sum([ If(x[j] == i,1,0) for j in range(n)]) > 0,1,0) for i in range(min_val, max_val+1)]))
# both c1 and c2 is not in the clique.
#
def clique(sol, g, clique, card):
n = len(g)
sol.add(card == Sum([clique[i] for i in range(n)]))
for (c1,i) in zip(clique, range(n)):
for (c2,j) in zip(clique, range(n)):
sol.add(Implies(And(i != j, g[i][j] == 0), Or(c1 == 0, c2 == 0)))
#
# all_min_dist(sol,min_dist, x, n)
#
# Ensures that the differences of all pairs (i !=j) are
# >= min_dist.
#
def all_min_dist(sol,min_dist, x, n):
for i in range(n):
for j in range(i):
sol.add(Abs(x[i]-x[j]) >= min_dist)
#
# Ensure that all elements in xs + cst are distinct
#
def all_different_cst(sol, xs, cst):
sol.add(Distinct([(x + c) for (x,c) in zip(xs,cst)]))
#
# Ensure that the values that are common in x and y are distinct (in each array)
#
def all_different_on_intersection(sol, x, y):
_count_a_in_b(sol,x,y)
_count_a_in_b(sol,y,x)
# helper for all_different_on_intersection
def _count_a_in_b(sol,ass,bss):
for a in ass:
sol.add(Sum([If(a == b,1,0) for b in bss]) <= 1)
# all pairs must be different
def all_different_pairs(sol, a, s):
sol.add(Distinct([p for p in pairs(sol,a,s)]))
# the pairs are in increasing order
def increasing_pairs(sol,a, s):
increasing(sol,pairs(sol,a,s))
# the pairs are in decreasing order
def decreasing_pairs(sol,a, s):
decreasing(sol,pairs(sol,a,s))
# return the pairs of a in the "integer representation": a[k,0]*(n-1) + a[k,1]
# s is the size of max value of n
def pairs(sol, a, s):
n = len(a)//2
return [ a[(k,0)]*(s-1) + a[(k,1)] for k in range(n)]
#
# all_differ_from_at_least_k_pos(sol, k, x)
#
# Ensure that all pairs of vectors has >= k different values
#
def all_differ_from_at_least_k_pos(sol, k, vectors):
n = len(vectors)
m = len(vectors[0])
for i in range(n):
for j in range(i+1,n):
sol.add(Sum([If(vectors[i][kk] != vectors[j][kk],1,0) for kk in range(m)]) >= k)
#
# all_differ_from_exact_k_pos(sol, k, vectors)
#
# Ensure that all pairs of vectors has exactly k different values
#
def all_differ_from_exact_k_pos(sol, k, vectors):
n = len(vectors)
m = len(vectors[0])
for i in range(n):
for j in range(i+1,n):
sol.add(Sum([If(vectors[i][kk] != vectors[j][kk],1,0) for kk in range(m)]) == k)
#
# all_differ_from_at_most_k_pos(sol, k, x)
#
# Ensure that all pairs of vectors has <= k different values
#
def all_differ_from_at_most_k_pos(sol, k, vectors):
n = len(vectors)
m = len(vectors[0])
for i in range(n):
for j in range(i+1,n):
sol.add(Sum([If(vectors[i][kk] != vectors[j][kk],1,0) for kk in range(m)]) <= k)
#
# all values in x must be equal
#
def all_equal(sol,x):
sol.add(And([x[i] == x[i-1] for i in range(len(x))]))
#
# Ensure that all elements in x are <relop> then val.
#
def arith(sol, x, relop, val):
for i in range(len(x)):
arith_relop(sol,x[i],relop, val)
#
# This is (arguably) a hack.
# Represents each function as an integer 0..5.
#
def arith_relop(sol, a, t, b):
sol.add(Implies(t == 0,a < b))
sol.add(Implies(t == 1,a <= b))
sol.add(Implies(t == 2,a == b))
sol.add(Implies(t == 3,a >= b))
sol.add(Implies(t == 4,a > b))
sol.add(Implies(t == 5,a != b))
# Some experiments
if __name__ == "__main__":
sol = Solver()
n = 5
# x = IntVector("x",n)
# for i in range(n):
# sol.add(x[i]>=0, x[i] <= n)
x = makeIntVector(sol,"x",n,0,n)
# sol.add(Distinct(x))
# all_different_except_0(sol,x)
# all_different(sol,x)
increasing(sol,x)
# increasing_strict(sol,x)
# decreasing(sol,x)
# decreasing_strict(sol,x)
# exactly twp 0s
# count(sol,0,x,2)
# count the number of 0's
c = Int("c")
sol.add(c >= 0, c <= n)
count(sol,0,x,c)
gcc = IntVector("gcc",n+1)
for i in range(n):
sol.add(gcc[i] >= 0, gcc[i] <= n+1)
or i in range(0,n+1)], x, gcc)
at_most(sol,2,x,2)
at_least(sol,2,x,2)
num_solutions = 0
print(sol.check())
while sol.check() == sat:
num_solutions = num_solutions + 1
mod = sol.model()
ss = [mod.eval(x[i]) for i in range(n)]
cc = mod.eval(c)
gccs = ([mod.eval(gcc[i]) for i in range(n)])
print(ss, " #0s: ", mod.eval(cc), " gcc:", gccs)
sol.add(
Or(
Or([x[i] != ss[i] for i in range(n)]),
cc != c
, Or([gcc[i] != gccs[i] for i in range(n)]),
)
)
print("num_solutions:", num_solutions)
#
# predicate fzn_diffn(array[int] of var int: x,
# array[int] of var int: y,
# array[int] of var int: dx,
# array[int] of var int: dy) =
# forall(i,j in index_set(x) where i < j)(
# x[i] + dx[i] <= x[j] \/ y[i] + dy[i] <= y[j] \/
# x[j] + dx[j] <= x[i] \/ y[j] + dy[j] <= y[i]
# );
#
def diffn(sol,x,y,dx,dy):
n = len(x)
for i in range(n):
for j in range(i+1,n):
sol.add(
Or([x[i] + dx[i] <= x[j],
y[i] + dy[i] <= y[j],
x[j] + dx[j] <= x[i],
y[j] + dy[j] <= y[i]]
)
)
| true
| true
|
f7183dfa463649652124a5c44236ae03377d0d36
| 23,967
|
py
|
Python
|
code/scripts/2020/04/11_12_fine_tune_palminized.py
|
lucgiffon/psm-nets
|
dec43c26281febf6e5c8b8f42bfb78098ae7101d
|
[
"MIT"
] | 1
|
2021-07-15T07:05:18.000Z
|
2021-07-15T07:05:18.000Z
|
code/scripts/2020/04/11_12_fine_tune_palminized.py
|
lucgiffon/psm-nets
|
dec43c26281febf6e5c8b8f42bfb78098ae7101d
|
[
"MIT"
] | 2
|
2021-07-15T06:12:47.000Z
|
2021-07-16T10:05:36.000Z
|
code/scripts/2020/04/11_12_fine_tune_palminized.py
|
lucgiffon/psm-nets
|
dec43c26281febf6e5c8b8f42bfb78098ae7101d
|
[
"MIT"
] | null | null | null |
"""
This script finds a palminized model with given arguments then finetune it.
Usage:
script.py --input-dir path [-h] [-v|-vv] [--seed int] [--train-val-split float] [--keep-last-layer] [--lr float] [--use-clr policy] [--min-lr float --max-lr float] [--epoch-step-size int] [--nb-epoch int] [--only-mask] [--tb] (--mnist|--svhn|--cifar10|--cifar100|--test-data) [--cifar100-resnet50|--cifar100-resnet20|--mnist-500|--mnist-lenet|--test-model|--cifar10-vgg19|--cifar100-vgg19|--svhn-vgg19] --sparsity-factor=int [--nb-iteration-palm=int] [--delta-threshold=float] [--hierarchical] [--nb-factor=int]
Options:
-h --help Show this screen.
-vv Set verbosity to debug.
-v Set verbosity to info.
--seed int The seed for the experiments
--input-dir path Path to input directory where to find previously generated results.
--tb Tell if tensorboard should be printed.
--lr float Flat lr to be used (Overidable)
--min-lr float Tells the min reasonable lr (Overide everything else).
--max-lr float Tells the max reasonable lr (Overide everything else).
--nb-epoch int Number of epochs of training (Overide everything else).
--epoch-step-size int Number of epochs for an half cycle of CLR.
--use-clr policy Tell to use clr. Policy can be "triangular" or "triangular2" (see Cyclical learning rate)
--keep-last-layer Do not compress classification layer.
--train-val-split float Tells the proportion of validation data. If not specified, validation data is test data.
Dataset:
--mnist Use Mnist dataset.
--svhn Use svhn dataset.
--cifar10 Use cifar10 dataset.
--cifar100 Use cifar100 dataset.
--test-data Use test datasset (that is actually mnist).
Model:
--mnist-lenet Use model lenet pretrained for mnist.
--test-model Use test, small, model.
--cifar10-vgg19 Use model vgg19 pretrained on cifar10.
--cifar100-vgg19 Use model vgg19 pretrained on cifar100.
--svhn-vgg19 Use model vgg19 pretrained on svhn.
--mnist-500 Use model fc 500 hidden units pretrained on mnist.
--cifar100-resnet50 Use model resnet50 pretrained on cifar100.
--cifar100-resnet20 Use model resnet20 pretrained on cifar100.
Palm-Specifc options:
--sparsity-factor=int Integer coefficient from which is computed the number of value in each factor.
--nb-iteration-palm=int Number of iterations in the inner palm4msa calls. [default: 300]
--delta-threshold=float Threshold value before stopping palm iterations. [default: 1e-6]
--hierarchical Tells if palm should use the hierarchical euristic or not. Muhc longer but better approximation results.
--nb-factor=int Tells the number of sparse factor for palm
--only-mask Use only sparsity mask given by palm but re-initialize weights.
"""
import logging
import os
import pickle
import pandas as pd
import sys
from collections import defaultdict
from sklearn.model_selection import train_test_split
import time
from copy import deepcopy
import keras
from keras.engine import Model, InputLayer
import signal
import docopt
from scipy.sparse import coo_matrix
from palmnet.utils import CyclicLR
from palmnet.core.palminizer import Palminizer
from palmnet.core.palminizable import Palminizable
from palmnet.data import Mnist, Test, Svhn, Cifar100, Cifar10
# from palmnet.layers.sparse_tensor import SparseFactorisationDense#, SparseFactorisationConv2DDensify
from palmnet.layers.sparse_facto_conv2D_masked import SparseFactorisationConv2D
from palmnet.layers.sparse_facto_dense_masked import SparseFactorisationDense
from palmnet.utils import get_sparsity_pattern, insert_layer_nonseq, timeout_signal_handler, get_lr_metric, CSVLoggerByBatch
from palmnet.experiments.utils import ParameterManagerPalminize, ParameterManagerPalminizeFinetune, ResultPrinter
from skluc.utils import logger, log_memory_usage
from keras.layers import Dense, Conv2D
import numpy as np
import keras.backend as K
from palmnet.core import palminizable
from palmnet.core.palminizer import Palminizer
palminizable.Palminizer = Palminizer
import sys
sys.modules["palmnet.core.palminize"] = palminizable
lst_results_header = [
"test_accuracy_finetuned_model"
]
def get_idx_last_dense_layer(model):
idx_last_dense_layer = -1
for i, layer in enumerate(model.layers):
if isinstance(layer, Dense):
idx_last_dense_layer = i
if idx_last_dense_layer == -1:
logger.warning("No dense layer found")
return idx_last_dense_layer
def replace_layers_with_sparse_facto(model, dct_name_facto):
new_model = deepcopy(model)
log_memory_usage("After copy model")
lst_tpl_str_bool_new_model_layers = []
dct_new_layer_attr = defaultdict(lambda: {})
idx_last_dense_layer = get_idx_last_dense_layer(new_model) if paraman["--keep-last-layer"] else -1
for i, layer in enumerate(new_model.layers):
layer_name = layer.name
sparse_factorization = dct_name_facto[layer_name]
logger.info('Prepare layer {}'.format(layer.name))
# if sparse_factorization != (None, None) and (i != idx_last_dense_layer and paraman["--keep-last-layer"]):
if sparse_factorization != (None, None) and not (i == idx_last_dense_layer and paraman["--keep-last-layer"]):
# scaling = 1.
if paraman["--only-mask"]:
scaling = []
else:
scaling = [np.array(sparse_factorization[0])[None]]
# factors_sparse = [coo_matrix(fac.toarray()) for fac in sparse_factorization[1].get_list_of_factors()]
factors = [fac.toarray() for fac in sparse_factorization[1].get_list_of_factors()]
# sparsity_patterns = [get_sparsity_pattern(w.toarray()) for w in factors]
sparsity_patterns = [get_sparsity_pattern(w) for w in factors]
nb_val_sparse_factors = np.sum([np.sum(fac) for fac in sparsity_patterns])
# factor_data_sparse = [f.data for f in factors_sparse]
factor_data = factors
reconstructed_matrix = np.linalg.multi_dot(factors) * scaling[0]
nb_val_full_matrix = np.prod(reconstructed_matrix.shape)
if nb_val_full_matrix <= nb_val_sparse_factors:
logger.info("Less values in full matrix than factorization. Keep full matrix. {} <= {}".format(nb_val_full_matrix, nb_val_sparse_factors))
dct_new_layer_attr[layer_name]["modified"] = False
lst_tpl_str_bool_new_model_layers.append((layer_name, False))
dct_new_layer_attr[layer_name]["layer_obj"] = layer
continue
base_palminized_matrix = np.reshape(layer.get_weights()[0], reconstructed_matrix.shape)
diff = np.linalg.norm(base_palminized_matrix - reconstructed_matrix) / np.linalg.norm(base_palminized_matrix)
# assert np.allclose(diff, 0, atol=1e-5), "Reconstructed is different than base"
# create new layer
if isinstance(layer, Dense):
logger.debug("Dense layer treatment")
hidden_layer_dim = layer.units
activation = layer.activation
regularizer = layer.kernel_regularizer
replacing_layer = SparseFactorisationDense(use_scaling=not paraman["--only-mask"], units=hidden_layer_dim, sparsity_patterns=sparsity_patterns, use_bias=layer.use_bias, activation=activation, kernel_regularizer=regularizer)
replacing_weights = scaling + factor_data + [layer.get_weights()[-1]] if layer.use_bias else []
# new_model = insert_layer_nonseq(new_model, layer_name, lambda: replacing_layer, position="replace")
# replacing_layer.set_weights(replacing_weights)
elif isinstance(layer, Conv2D):
logger.debug("Conv2D layer treatment")
nb_filters = layer.filters
strides = layer.strides
kernel_size = layer.kernel_size
activation = layer.activation
padding = layer.padding
regularizer = layer.kernel_regularizer
replacing_layer = SparseFactorisationConv2D(use_scaling=not paraman["--only-mask"], strides=strides, filters=nb_filters, kernel_size=kernel_size, sparsity_patterns=sparsity_patterns, use_bias=layer.use_bias, activation=activation, padding=padding, kernel_regularizer=regularizer)
replacing_weights = scaling + factor_data + [layer.get_weights()[-1]] if layer.use_bias else []
# new_model = insert_layer_nonseq(new_model, layer_name, lambda: replacing_layer, position="replace")
# replacing_layer.set_weights(replacing_weights)
else:
raise ValueError("unknown layer class")
dct_new_layer_attr[layer_name]["layer_weights"] = replacing_weights
dct_new_layer_attr[layer_name]["sparsity_pattern"] = sparsity_patterns
dct_new_layer_attr[layer_name]["layer_obj"] = replacing_layer
dct_new_layer_attr[layer_name]["modified"] = True
lst_tpl_str_bool_new_model_layers.append((layer_name, True))
else:
dct_new_layer_attr[layer_name]["modified"] = False
lst_tpl_str_bool_new_model_layers.append((layer_name, False))
dct_new_layer_attr[layer_name]["layer_obj"] = layer
log_memory_usage("After prepare all sparse layers ")
network_dict = {'input_layers_of': defaultdict(lambda: []), 'new_output_tensor_of': defaultdict(lambda: [])}
if not isinstance(new_model.layers[0], InputLayer):
new_model = Model(input=new_model.input, output=new_model.output)
# Set the input layers of each layer
for layer in new_model.layers:
# each layer is set as `input` layer of all its outbound layers
for node in layer._outbound_nodes:
outbound_layer_name = node.outbound_layer.name
# if outbound_layer_name not in network_dict
# network_dict['input_layers_of'].update({outbound_layer_name: [layer.name]})
network_dict['input_layers_of'][outbound_layer_name].append(layer.name)
# Set the output tensor of the input layer
network_dict['new_output_tensor_of'].update(
{new_model.layers[0].name: new_model.input})
for layer in new_model.layers[1:]:
log_memory_usage("Before layer {}".format(layer.name))
layer_name = layer.name
layer_input = [network_dict['new_output_tensor_of'][layer_aux] for layer_aux in network_dict['input_layers_of'][layer.name]]
if len(layer_input) == 1:
layer_input = layer_input[0]
proxy_new_layer_attr = dct_new_layer_attr[layer_name]
if proxy_new_layer_attr["modified"]:
x = layer_input
new_layer = proxy_new_layer_attr["layer_obj"] # type: keras.layers.Layer
new_layer.name = '{}_{}'.format(layer.name,
new_layer.name)
x = new_layer(x)
if not paraman["--only-mask"]:
if layer.use_bias:
reconstructed_matrix = np.linalg.multi_dot(proxy_new_layer_attr["layer_weights"][1:-1]) * proxy_new_layer_attr["layer_weights"][0]
else:
reconstructed_matrix = np.linalg.multi_dot(proxy_new_layer_attr["layer_weights"][1:]) * proxy_new_layer_attr["layer_weights"][0]
base_palminized_matrix = np.reshape(layer.get_weights()[0], reconstructed_matrix.shape)
diff = np.linalg.norm(base_palminized_matrix - reconstructed_matrix) / np.linalg.norm(base_palminized_matrix)
# assert np.allclose(diff, 0, atol=1e-5), "Reconstructed is different than base"
del base_palminized_matrix
new_layer.set_weights(proxy_new_layer_attr["layer_weights"])
else:
masked_weights = []
i = 0
for w in new_layer.get_weights():
if len(w.shape) > 1:
new_weight = w * proxy_new_layer_attr["sparsity_pattern"][i]
i += 1
else:
new_weight = w
masked_weights.append(new_weight)
new_layer.set_weights(masked_weights)
logger.info('Layer {} modified into {}'.format(layer.name, new_layer.name))
else:
x = layer(layer_input)
logger.info('Layer {} unmodified'.format(layer.name))
network_dict['new_output_tensor_of'].update({layer.name: x})
del dct_new_layer_attr[layer_name]
new_model = Model(inputs=new_model.inputs, outputs=x)
return new_model
def main():
if paraman["--mnist-lenet"]:
param_train_dataset = Mnist.get_model_param_training()
elif paraman["--mnist-500"]:
param_train_dataset = Mnist.get_model_param_training("mnist_500")
elif paraman["--cifar10-vgg19"]:
param_train_dataset = Cifar10.get_model_param_training()
elif paraman["--cifar100-vgg19"]:
param_train_dataset = Cifar100.get_model_param_training()
elif paraman["--cifar100-resnet20"] or paraman["--cifar100-resnet50"]:
param_train_dataset = Cifar100.get_model_param_training("cifar100_resnet")
elif paraman["--svhn-vgg19"]:
param_train_dataset = Svhn.get_model_param_training()
elif paraman["--test-model"]:
param_train_dataset = Test.get_model_param_training()
else:
raise NotImplementedError("No dataset specified.")
(x_train, y_train), (x_test, y_test) = paraman.get_dataset().load_data()
if paraman["--mnist-500"]:
x_test = np.reshape(x_test, (-1, 784))
x_train = np.reshape(x_train, (-1, 784))
if paraman["--train-val-split"] is not None:
x_train, x_val, y_train, y_val = train_test_split(x_train, y_train, test_size=paraman["--train-val-split"], random_state=paraman["--seed"])
else:
x_val, y_val = x_test, y_test
# noinspection PyUnreachableCode
if os.path.exists(paraman["output_file_notfinishedprinter"]):
df = pd.read_csv(paraman["output_file_resprinter"])
init_nb_epoch = pd.read_csv(paraman["output_file_csvcbprinter"])["epoch"].max() -1
logger.debug("Loaded results " + str(df))
base_score = float(df["base_score"])
before_finetuned_score = float(df["before_finetuned_score"])
palminized_score = float(df["palminized_score"])
actual_learning_rate = float(df["actual-lr"])
fine_tuned_model = keras.models.load_model(paraman["output_file_modelprinter"],custom_objects={'SparseFactorisationConv2D':SparseFactorisationConv2D,
"SparseFactorisationDense": SparseFactorisationDense})
else:
init_nb_epoch = 0
mypalminizedmodel = pickle.load(open(paraman["input_model_path"], "rb"))
log_memory_usage("After load mypalminized model")
base_model = mypalminizedmodel.base_model
dct_name_facto = mypalminizedmodel.sparsely_factorized_layers
base_score = base_model.evaluate(x_test, y_test, verbose=0)[1]
print(base_score)
palminized_model = mypalminizedmodel.compressed_model
palminized_score = palminized_model.evaluate(x_test, y_test, verbose=1)[1]
print(palminized_score)
fine_tuned_model = replace_layers_with_sparse_facto(palminized_model, dct_name_facto)
log_memory_usage("After get_finetuned_model")
# fine_tuned_model = palminized_model
input_by_shape = {(32,32,3): x_test[:3]}
# for i, layer in enumerate(palminized_model.layers[1:]):
# i = i+1
# print("Start with layer {}".format(layer.name))
# dense_palm_layer = layer
# sparsefacto_palm_layer = fine_tuned_model.layers[i]
#
# dense_layer_output_function = K.function([dense_palm_layer.input],
# [dense_palm_layer.output])
#
# sparsefacto_layer_outut_function = K.function([sparsefacto_palm_layer.get_input_at(-1)],
# [sparsefacto_palm_layer.get_output_at(-1)])
#
# necessary_input_shapes = [tuple(inpt.shape.as_list()[1:]) for inpt in dense_layer_output_function.inputs]
# input_data_layer = [input_by_shape[shap] for shap in necessary_input_shapes]
#
# dense_layer_output = dense_layer_output_function(input_data_layer)[0]
# sparsefacto_layer_output = sparsefacto_layer_outut_function(input_data_layer)[0]
#
# # try:
# assert np.allclose(np.linalg.norm(dense_layer_output - sparsefacto_layer_output) / np.linalg.norm(dense_layer_output), 0, atol=1e-5)
# # except:
# # print("error")
# input_by_shape[dense_layer_output.shape[1:]] = dense_layer_output
params_optimizer = param_train_dataset.params_optimizer
params_optimizer["lr"] = paraman["--lr"] if paraman["--lr"] is not None else params_optimizer["lr"]
fine_tuned_model.compile(loss=param_train_dataset.loss,
optimizer=param_train_dataset.optimizer(**params_optimizer),
metrics=['categorical_accuracy'])
# metrics=['categorical_accuracy', get_lr_metric(param_train_dataset.optimizer)])
before_finetuned_score = fine_tuned_model.evaluate(x_test, y_test, verbose=1)[1]
print(before_finetuned_score)
actual_learning_rate = K.eval(fine_tuned_model.optimizer.lr)
# results must be already printed once in case process is killed afterward
dct_results = {
"actual-lr": actual_learning_rate,
"finetuned_score": None,
"before_finetuned_score": before_finetuned_score,
"base_score": base_score,
"palminized_score": palminized_score,
}
resprinter.add(dct_results)
resprinter.print()
# if paraman["--hierarchical"]:
# if not paraman["--only-mask"]:
# assert before_finetuned_score == palminized_score, \
# "the reconstructed model with sparse facto should equal in perf to the reconstructed model with dense product. {} != {}".format(before_finetuned_score, palminized_score)
# else: # small fix for a bug where when I wasn't using hierarchical palm returned a matrix that wasn't multiplied by lambda
# # this should pass until results are generated without bug..
# assert before_finetuned_score != palminized_score, \
# "the reconstructed model with sparse facto should equal in perf to the reconstructed model with dense product. {} != {}".format(before_finetuned_score, palminized_score)
fine_tuned_model.summary()
call_backs = []
model_checkpoint_callback = keras.callbacks.ModelCheckpoint(str(paraman["output_file_modelprinter"]),
monitor='val_loss',
verbose=0, save_best_only=False,
save_weights_only=False, mode='auto', period=1)
call_backs.append(model_checkpoint_callback)
if paraman["--tb"]:
tbCallBack = keras.callbacks.TensorBoard(log_dir=str(paraman["output_file_tensorboardprinter"]), histogram_freq=20, write_graph=False, write_images=False, batch_size=param_train_dataset.batch_size, write_grads=True, update_freq="epoch")
call_backs.append(tbCallBack)
actual_min_lr = param_train_dataset.min_lr if paraman["--min-lr"] is None else paraman["--min-lr"]
actual_max_lr = param_train_dataset.max_lr if paraman["--max-lr"] is None else paraman["--max-lr"]
if paraman["--use-clr"] is not None:
clr_cb = CyclicLR(base_lr=actual_min_lr,
max_lr=actual_max_lr,
step_size=(paraman["--epoch-step-size"]*(x_train.shape[0] // param_train_dataset.batch_size)),
logrange=True,
mode=paraman["--use-clr"])
call_backs.append(clr_cb)
csvcallback = CSVLoggerByBatch(str(paraman["output_file_csvcbprinter"]), n_batch_between_display=100, separator=',', append=True)
call_backs.append(csvcallback)
finetuned_score = None
open(paraman["output_file_notfinishedprinter"], 'w').close()
actual_number_of_epochs = (param_train_dataset.epochs if paraman["--nb-epoch"] is None else paraman["--nb-epoch"])
actual_batch_size = param_train_dataset.batch_size
history = fine_tuned_model.fit(param_train_dataset.image_data_generator.flow(x_train, y_train, batch_size=param_train_dataset.batch_size),
epochs= actual_number_of_epochs - init_nb_epoch,
# epochs=2 - init_nb_epoch,
verbose=2,
validation_data=(x_val, y_val),
callbacks=param_train_dataset.callbacks + call_backs)
finetuned_score = fine_tuned_model.evaluate(x_test, y_test, verbose=1)[1]
print(finetuned_score)
if os.path.exists(paraman["output_file_notfinishedprinter"]):
os.remove(paraman["output_file_notfinishedprinter"])
dct_results = {
"actual-batch-size": actual_batch_size,
"actual-nb-epochs": actual_number_of_epochs,
"actual-min-lr":actual_min_lr,
"actual-max-lr":actual_max_lr,
"actual-lr": actual_learning_rate,
"finetuned_score": finetuned_score,
"before_finetuned_score": before_finetuned_score,
"base_score": base_score,
"palminized_score": palminized_score,
}
fine_tuned_model.save(str(paraman["output_file_modelprinter"]))
resprinter.add(dct_results)
if __name__ == "__main__":
logger.info("Command line: " + " ".join(sys.argv))
log_memory_usage("Memory at startup")
arguments = docopt.docopt(__doc__)
paraman = ParameterManagerPalminizeFinetune(arguments)
initialized_results = dict((v, None) for v in lst_results_header)
resprinter = ResultPrinter(output_file=paraman["output_file_resprinter"])
resprinter.add(initialized_results)
resprinter.add(paraman)
if paraman["-v"] >= 2:
logger.setLevel(level=logging.DEBUG)
elif paraman["-v"] >= 1:
logger.setLevel(level=logging.INFO)
else:
logger.setLevel(level=logging.WARNING)
logger.warning("Verbosity set to warning")
logger.info("Verbosity set to info")
logger.debug("Verbosity set to debug")
if not os.path.exists(paraman["output_file_notfinishedprinter"]) and \
os.path.exists(paraman["output_file_resprinter"]) and \
os.path.exists(paraman["output_file_modelprinter"]):
sys.exit("Expe {} already executed. Exit".format(paraman["hash"]))
has_failed = False
try:
main()
except Exception as e:
has_failed = True
raise e
finally:
failure_dict = {
"failure": has_failed
}
resprinter.add(failure_dict)
resprinter.print()
| 51.653017
| 515
| 0.652147
|
import logging
import os
import pickle
import pandas as pd
import sys
from collections import defaultdict
from sklearn.model_selection import train_test_split
import time
from copy import deepcopy
import keras
from keras.engine import Model, InputLayer
import signal
import docopt
from scipy.sparse import coo_matrix
from palmnet.utils import CyclicLR
from palmnet.core.palminizer import Palminizer
from palmnet.core.palminizable import Palminizable
from palmnet.data import Mnist, Test, Svhn, Cifar100, Cifar10
onv2D_masked import SparseFactorisationConv2D
from palmnet.layers.sparse_facto_dense_masked import SparseFactorisationDense
from palmnet.utils import get_sparsity_pattern, insert_layer_nonseq, timeout_signal_handler, get_lr_metric, CSVLoggerByBatch
from palmnet.experiments.utils import ParameterManagerPalminize, ParameterManagerPalminizeFinetune, ResultPrinter
from skluc.utils import logger, log_memory_usage
from keras.layers import Dense, Conv2D
import numpy as np
import keras.backend as K
from palmnet.core import palminizable
from palmnet.core.palminizer import Palminizer
palminizable.Palminizer = Palminizer
import sys
sys.modules["palmnet.core.palminize"] = palminizable
lst_results_header = [
"test_accuracy_finetuned_model"
]
def get_idx_last_dense_layer(model):
idx_last_dense_layer = -1
for i, layer in enumerate(model.layers):
if isinstance(layer, Dense):
idx_last_dense_layer = i
if idx_last_dense_layer == -1:
logger.warning("No dense layer found")
return idx_last_dense_layer
def replace_layers_with_sparse_facto(model, dct_name_facto):
new_model = deepcopy(model)
log_memory_usage("After copy model")
lst_tpl_str_bool_new_model_layers = []
dct_new_layer_attr = defaultdict(lambda: {})
idx_last_dense_layer = get_idx_last_dense_layer(new_model) if paraman["--keep-last-layer"] else -1
for i, layer in enumerate(new_model.layers):
layer_name = layer.name
sparse_factorization = dct_name_facto[layer_name]
logger.info('Prepare layer {}'.format(layer.name))
if sparse_factorization != (None, None) and not (i == idx_last_dense_layer and paraman["--keep-last-layer"]):
if paraman["--only-mask"]:
scaling = []
else:
scaling = [np.array(sparse_factorization[0])[None]]
factors = [fac.toarray() for fac in sparse_factorization[1].get_list_of_factors()]
sparsity_patterns = [get_sparsity_pattern(w) for w in factors]
nb_val_sparse_factors = np.sum([np.sum(fac) for fac in sparsity_patterns])
factor_data = factors
reconstructed_matrix = np.linalg.multi_dot(factors) * scaling[0]
nb_val_full_matrix = np.prod(reconstructed_matrix.shape)
if nb_val_full_matrix <= nb_val_sparse_factors:
logger.info("Less values in full matrix than factorization. Keep full matrix. {} <= {}".format(nb_val_full_matrix, nb_val_sparse_factors))
dct_new_layer_attr[layer_name]["modified"] = False
lst_tpl_str_bool_new_model_layers.append((layer_name, False))
dct_new_layer_attr[layer_name]["layer_obj"] = layer
continue
base_palminized_matrix = np.reshape(layer.get_weights()[0], reconstructed_matrix.shape)
diff = np.linalg.norm(base_palminized_matrix - reconstructed_matrix) / np.linalg.norm(base_palminized_matrix)
if isinstance(layer, Dense):
logger.debug("Dense layer treatment")
hidden_layer_dim = layer.units
activation = layer.activation
regularizer = layer.kernel_regularizer
replacing_layer = SparseFactorisationDense(use_scaling=not paraman["--only-mask"], units=hidden_layer_dim, sparsity_patterns=sparsity_patterns, use_bias=layer.use_bias, activation=activation, kernel_regularizer=regularizer)
replacing_weights = scaling + factor_data + [layer.get_weights()[-1]] if layer.use_bias else []
elif isinstance(layer, Conv2D):
logger.debug("Conv2D layer treatment")
nb_filters = layer.filters
strides = layer.strides
kernel_size = layer.kernel_size
activation = layer.activation
padding = layer.padding
regularizer = layer.kernel_regularizer
replacing_layer = SparseFactorisationConv2D(use_scaling=not paraman["--only-mask"], strides=strides, filters=nb_filters, kernel_size=kernel_size, sparsity_patterns=sparsity_patterns, use_bias=layer.use_bias, activation=activation, padding=padding, kernel_regularizer=regularizer)
replacing_weights = scaling + factor_data + [layer.get_weights()[-1]] if layer.use_bias else []
else:
raise ValueError("unknown layer class")
dct_new_layer_attr[layer_name]["layer_weights"] = replacing_weights
dct_new_layer_attr[layer_name]["sparsity_pattern"] = sparsity_patterns
dct_new_layer_attr[layer_name]["layer_obj"] = replacing_layer
dct_new_layer_attr[layer_name]["modified"] = True
lst_tpl_str_bool_new_model_layers.append((layer_name, True))
else:
dct_new_layer_attr[layer_name]["modified"] = False
lst_tpl_str_bool_new_model_layers.append((layer_name, False))
dct_new_layer_attr[layer_name]["layer_obj"] = layer
log_memory_usage("After prepare all sparse layers ")
network_dict = {'input_layers_of': defaultdict(lambda: []), 'new_output_tensor_of': defaultdict(lambda: [])}
if not isinstance(new_model.layers[0], InputLayer):
new_model = Model(input=new_model.input, output=new_model.output)
for layer in new_model.layers:
for node in layer._outbound_nodes:
outbound_layer_name = node.outbound_layer.name
network_dict['input_layers_of'][outbound_layer_name].append(layer.name)
network_dict['new_output_tensor_of'].update(
{new_model.layers[0].name: new_model.input})
for layer in new_model.layers[1:]:
log_memory_usage("Before layer {}".format(layer.name))
layer_name = layer.name
layer_input = [network_dict['new_output_tensor_of'][layer_aux] for layer_aux in network_dict['input_layers_of'][layer.name]]
if len(layer_input) == 1:
layer_input = layer_input[0]
proxy_new_layer_attr = dct_new_layer_attr[layer_name]
if proxy_new_layer_attr["modified"]:
x = layer_input
new_layer = proxy_new_layer_attr["layer_obj"]
new_layer.name = '{}_{}'.format(layer.name,
new_layer.name)
x = new_layer(x)
if not paraman["--only-mask"]:
if layer.use_bias:
reconstructed_matrix = np.linalg.multi_dot(proxy_new_layer_attr["layer_weights"][1:-1]) * proxy_new_layer_attr["layer_weights"][0]
else:
reconstructed_matrix = np.linalg.multi_dot(proxy_new_layer_attr["layer_weights"][1:]) * proxy_new_layer_attr["layer_weights"][0]
base_palminized_matrix = np.reshape(layer.get_weights()[0], reconstructed_matrix.shape)
diff = np.linalg.norm(base_palminized_matrix - reconstructed_matrix) / np.linalg.norm(base_palminized_matrix)
del base_palminized_matrix
new_layer.set_weights(proxy_new_layer_attr["layer_weights"])
else:
masked_weights = []
i = 0
for w in new_layer.get_weights():
if len(w.shape) > 1:
new_weight = w * proxy_new_layer_attr["sparsity_pattern"][i]
i += 1
else:
new_weight = w
masked_weights.append(new_weight)
new_layer.set_weights(masked_weights)
logger.info('Layer {} modified into {}'.format(layer.name, new_layer.name))
else:
x = layer(layer_input)
logger.info('Layer {} unmodified'.format(layer.name))
network_dict['new_output_tensor_of'].update({layer.name: x})
del dct_new_layer_attr[layer_name]
new_model = Model(inputs=new_model.inputs, outputs=x)
return new_model
def main():
if paraman["--mnist-lenet"]:
param_train_dataset = Mnist.get_model_param_training()
elif paraman["--mnist-500"]:
param_train_dataset = Mnist.get_model_param_training("mnist_500")
elif paraman["--cifar10-vgg19"]:
param_train_dataset = Cifar10.get_model_param_training()
elif paraman["--cifar100-vgg19"]:
param_train_dataset = Cifar100.get_model_param_training()
elif paraman["--cifar100-resnet20"] or paraman["--cifar100-resnet50"]:
param_train_dataset = Cifar100.get_model_param_training("cifar100_resnet")
elif paraman["--svhn-vgg19"]:
param_train_dataset = Svhn.get_model_param_training()
elif paraman["--test-model"]:
param_train_dataset = Test.get_model_param_training()
else:
raise NotImplementedError("No dataset specified.")
(x_train, y_train), (x_test, y_test) = paraman.get_dataset().load_data()
if paraman["--mnist-500"]:
x_test = np.reshape(x_test, (-1, 784))
x_train = np.reshape(x_train, (-1, 784))
if paraman["--train-val-split"] is not None:
x_train, x_val, y_train, y_val = train_test_split(x_train, y_train, test_size=paraman["--train-val-split"], random_state=paraman["--seed"])
else:
x_val, y_val = x_test, y_test
if os.path.exists(paraman["output_file_notfinishedprinter"]):
df = pd.read_csv(paraman["output_file_resprinter"])
init_nb_epoch = pd.read_csv(paraman["output_file_csvcbprinter"])["epoch"].max() -1
logger.debug("Loaded results " + str(df))
base_score = float(df["base_score"])
before_finetuned_score = float(df["before_finetuned_score"])
palminized_score = float(df["palminized_score"])
actual_learning_rate = float(df["actual-lr"])
fine_tuned_model = keras.models.load_model(paraman["output_file_modelprinter"],custom_objects={'SparseFactorisationConv2D':SparseFactorisationConv2D,
"SparseFactorisationDense": SparseFactorisationDense})
else:
init_nb_epoch = 0
mypalminizedmodel = pickle.load(open(paraman["input_model_path"], "rb"))
log_memory_usage("After load mypalminized model")
base_model = mypalminizedmodel.base_model
dct_name_facto = mypalminizedmodel.sparsely_factorized_layers
base_score = base_model.evaluate(x_test, y_test, verbose=0)[1]
print(base_score)
palminized_model = mypalminizedmodel.compressed_model
palminized_score = palminized_model.evaluate(x_test, y_test, verbose=1)[1]
print(palminized_score)
fine_tuned_model = replace_layers_with_sparse_facto(palminized_model, dct_name_facto)
log_memory_usage("After get_finetuned_model")
input_by_shape = {(32,32,3): x_test[:3]}
arams_optimizer = param_train_dataset.params_optimizer
params_optimizer["lr"] = paraman["--lr"] if paraman["--lr"] is not None else params_optimizer["lr"]
fine_tuned_model.compile(loss=param_train_dataset.loss,
optimizer=param_train_dataset.optimizer(**params_optimizer),
metrics=['categorical_accuracy'])
before_finetuned_score = fine_tuned_model.evaluate(x_test, y_test, verbose=1)[1]
print(before_finetuned_score)
actual_learning_rate = K.eval(fine_tuned_model.optimizer.lr)
dct_results = {
"actual-lr": actual_learning_rate,
"finetuned_score": None,
"before_finetuned_score": before_finetuned_score,
"base_score": base_score,
"palminized_score": palminized_score,
}
resprinter.add(dct_results)
resprinter.print()
monitor='val_loss',
verbose=0, save_best_only=False,
save_weights_only=False, mode='auto', period=1)
call_backs.append(model_checkpoint_callback)
if paraman["--tb"]:
tbCallBack = keras.callbacks.TensorBoard(log_dir=str(paraman["output_file_tensorboardprinter"]), histogram_freq=20, write_graph=False, write_images=False, batch_size=param_train_dataset.batch_size, write_grads=True, update_freq="epoch")
call_backs.append(tbCallBack)
actual_min_lr = param_train_dataset.min_lr if paraman["--min-lr"] is None else paraman["--min-lr"]
actual_max_lr = param_train_dataset.max_lr if paraman["--max-lr"] is None else paraman["--max-lr"]
if paraman["--use-clr"] is not None:
clr_cb = CyclicLR(base_lr=actual_min_lr,
max_lr=actual_max_lr,
step_size=(paraman["--epoch-step-size"]*(x_train.shape[0] // param_train_dataset.batch_size)),
logrange=True,
mode=paraman["--use-clr"])
call_backs.append(clr_cb)
csvcallback = CSVLoggerByBatch(str(paraman["output_file_csvcbprinter"]), n_batch_between_display=100, separator=',', append=True)
call_backs.append(csvcallback)
finetuned_score = None
open(paraman["output_file_notfinishedprinter"], 'w').close()
actual_number_of_epochs = (param_train_dataset.epochs if paraman["--nb-epoch"] is None else paraman["--nb-epoch"])
actual_batch_size = param_train_dataset.batch_size
history = fine_tuned_model.fit(param_train_dataset.image_data_generator.flow(x_train, y_train, batch_size=param_train_dataset.batch_size),
epochs= actual_number_of_epochs - init_nb_epoch,
verbose=2,
validation_data=(x_val, y_val),
callbacks=param_train_dataset.callbacks + call_backs)
finetuned_score = fine_tuned_model.evaluate(x_test, y_test, verbose=1)[1]
print(finetuned_score)
if os.path.exists(paraman["output_file_notfinishedprinter"]):
os.remove(paraman["output_file_notfinishedprinter"])
dct_results = {
"actual-batch-size": actual_batch_size,
"actual-nb-epochs": actual_number_of_epochs,
"actual-min-lr":actual_min_lr,
"actual-max-lr":actual_max_lr,
"actual-lr": actual_learning_rate,
"finetuned_score": finetuned_score,
"before_finetuned_score": before_finetuned_score,
"base_score": base_score,
"palminized_score": palminized_score,
}
fine_tuned_model.save(str(paraman["output_file_modelprinter"]))
resprinter.add(dct_results)
if __name__ == "__main__":
logger.info("Command line: " + " ".join(sys.argv))
log_memory_usage("Memory at startup")
arguments = docopt.docopt(__doc__)
paraman = ParameterManagerPalminizeFinetune(arguments)
initialized_results = dict((v, None) for v in lst_results_header)
resprinter = ResultPrinter(output_file=paraman["output_file_resprinter"])
resprinter.add(initialized_results)
resprinter.add(paraman)
if paraman["-v"] >= 2:
logger.setLevel(level=logging.DEBUG)
elif paraman["-v"] >= 1:
logger.setLevel(level=logging.INFO)
else:
logger.setLevel(level=logging.WARNING)
logger.warning("Verbosity set to warning")
logger.info("Verbosity set to info")
logger.debug("Verbosity set to debug")
if not os.path.exists(paraman["output_file_notfinishedprinter"]) and \
os.path.exists(paraman["output_file_resprinter"]) and \
os.path.exists(paraman["output_file_modelprinter"]):
sys.exit("Expe {} already executed. Exit".format(paraman["hash"]))
has_failed = False
try:
main()
except Exception as e:
has_failed = True
raise e
finally:
failure_dict = {
"failure": has_failed
}
resprinter.add(failure_dict)
resprinter.print()
| true
| true
|
f7183fd3085594df64eada3d76e8fe9e7ca83d8a
| 1,056
|
py
|
Python
|
picymcsortpy/exif_tool.py
|
patrjon/PicyMcSortpy
|
922cd169464afdc6c0ec7e64f14696147f26d595
|
[
"MIT"
] | null | null | null |
picymcsortpy/exif_tool.py
|
patrjon/PicyMcSortpy
|
922cd169464afdc6c0ec7e64f14696147f26d595
|
[
"MIT"
] | null | null | null |
picymcsortpy/exif_tool.py
|
patrjon/PicyMcSortpy
|
922cd169464afdc6c0ec7e64f14696147f26d595
|
[
"MIT"
] | null | null | null |
import subprocess
import json
import os
class ExifTool:
sentinel = "{ready}\n"
def __init__(self, executable="/usr/bin/exiftool"):
self.executable = executable
def __enter__(self):
self.process = subprocess.Popen(
[self.executable, "-stay_open", "True", "-@", "-"],
universal_newlines=True,
stdin=subprocess.PIPE, stdout=subprocess.PIPE)
return self
def __exit__(self, exc_type, exc_value, traceback):
self.process.stdin.write("-stay_open\nFalse\n")
self.process.stdin.flush()
def execute(self, *args):
args = args + ("-execute\n",)
self.process.stdin.write(str.join("\n", args))
self.process.stdin.flush()
output = ""
fd = self.process.stdout.fileno()
while not output.endswith(self.sentinel):
output += os.read(fd, 4096).decode('utf-8')
return output[:-len(self.sentinel)]
def get_metadata(self, *filenames):
return json.loads(self.execute("-G", "-j", "-n", *filenames))
| 30.171429
| 69
| 0.60322
|
import subprocess
import json
import os
class ExifTool:
sentinel = "{ready}\n"
def __init__(self, executable="/usr/bin/exiftool"):
self.executable = executable
def __enter__(self):
self.process = subprocess.Popen(
[self.executable, "-stay_open", "True", "-@", "-"],
universal_newlines=True,
stdin=subprocess.PIPE, stdout=subprocess.PIPE)
return self
def __exit__(self, exc_type, exc_value, traceback):
self.process.stdin.write("-stay_open\nFalse\n")
self.process.stdin.flush()
def execute(self, *args):
args = args + ("-execute\n",)
self.process.stdin.write(str.join("\n", args))
self.process.stdin.flush()
output = ""
fd = self.process.stdout.fileno()
while not output.endswith(self.sentinel):
output += os.read(fd, 4096).decode('utf-8')
return output[:-len(self.sentinel)]
def get_metadata(self, *filenames):
return json.loads(self.execute("-G", "-j", "-n", *filenames))
| true
| true
|
f7183ff8a33a0e9f78c1b4442d34b7537864c2e0
| 557
|
py
|
Python
|
src/analytics/migrations/0012_auto_20150408_1024.py
|
paveu/srvup_rest
|
97491df4106d5e8b951c6117770fe74072612e49
|
[
"MIT"
] | 1
|
2015-10-10T16:49:30.000Z
|
2015-10-10T16:49:30.000Z
|
src/analytics/migrations/0012_auto_20150408_1024.py
|
paveu/srvup_rest
|
97491df4106d5e8b951c6117770fe74072612e49
|
[
"MIT"
] | null | null | null |
src/analytics/migrations/0012_auto_20150408_1024.py
|
paveu/srvup_rest
|
97491df4106d5e8b951c6117770fe74072612e49
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import datetime
from django.utils.timezone import utc
class Migration(migrations.Migration):
dependencies = [
('analytics', '0011_auto_20150404_1556'),
]
operations = [
migrations.AlterField(
model_name='pageview',
name='timestamp',
field=models.DateTimeField(default=datetime.datetime(2015, 4, 8, 10, 24, 6, 433946, tzinfo=utc)),
preserve_default=True,
),
]
| 24.217391
| 109
| 0.640934
|
from __future__ import unicode_literals
from django.db import models, migrations
import datetime
from django.utils.timezone import utc
class Migration(migrations.Migration):
dependencies = [
('analytics', '0011_auto_20150404_1556'),
]
operations = [
migrations.AlterField(
model_name='pageview',
name='timestamp',
field=models.DateTimeField(default=datetime.datetime(2015, 4, 8, 10, 24, 6, 433946, tzinfo=utc)),
preserve_default=True,
),
]
| true
| true
|
f718404b8071703ea14bb0ff06af2600f0bf9dff
| 482
|
py
|
Python
|
custom/icds_reports/migrations/0105_aww_incentive_report_monthly.py
|
tobiasmcnulty/commcare-hq
|
234aa1fba98a96de1b625bbd70b2066fc877eed1
|
[
"BSD-3-Clause"
] | 1
|
2020-07-14T13:00:23.000Z
|
2020-07-14T13:00:23.000Z
|
custom/icds_reports/migrations/0105_aww_incentive_report_monthly.py
|
tobiasmcnulty/commcare-hq
|
234aa1fba98a96de1b625bbd70b2066fc877eed1
|
[
"BSD-3-Clause"
] | null | null | null |
custom/icds_reports/migrations/0105_aww_incentive_report_monthly.py
|
tobiasmcnulty/commcare-hq
|
234aa1fba98a96de1b625bbd70b2066fc877eed1
|
[
"BSD-3-Clause"
] | null | null | null |
# Generated by Django 1.11.16 on 2019-03-12 10:52
from corehq.sql_db.operations import RawSQLMigration
from django.db import migrations
from custom.icds_reports.const import SQL_TEMPLATES_ROOT
migrator = RawSQLMigration((SQL_TEMPLATES_ROOT, 'database_views'))
class Migration(migrations.Migration):
dependencies = [
('icds_reports', '0104_agg_ls_monthly_ls_name'),
]
operations = [
migrator.get_migration('aww_incentive_report_monthly.sql'),
]
| 25.368421
| 67
| 0.753112
|
from corehq.sql_db.operations import RawSQLMigration
from django.db import migrations
from custom.icds_reports.const import SQL_TEMPLATES_ROOT
migrator = RawSQLMigration((SQL_TEMPLATES_ROOT, 'database_views'))
class Migration(migrations.Migration):
dependencies = [
('icds_reports', '0104_agg_ls_monthly_ls_name'),
]
operations = [
migrator.get_migration('aww_incentive_report_monthly.sql'),
]
| true
| true
|
f71840f3a7a1fdd44593af674c086cc6379e7e61
| 13,265
|
py
|
Python
|
mne/forward/tests/test_forward.py
|
dgwakeman/mne-python
|
3cc7a3f8456d78c828355f1860dd7e0297e59c73
|
[
"BSD-3-Clause"
] | 1
|
2020-12-15T03:07:38.000Z
|
2020-12-15T03:07:38.000Z
|
mne/forward/tests/test_forward.py
|
dgwakeman/mne-python
|
3cc7a3f8456d78c828355f1860dd7e0297e59c73
|
[
"BSD-3-Clause"
] | null | null | null |
mne/forward/tests/test_forward.py
|
dgwakeman/mne-python
|
3cc7a3f8456d78c828355f1860dd7e0297e59c73
|
[
"BSD-3-Clause"
] | null | null | null |
import os
import os.path as op
import warnings
import gc
from nose.tools import assert_true, assert_raises
import numpy as np
from numpy.testing import (assert_array_almost_equal, assert_equal,
assert_array_equal, assert_allclose)
from mne.datasets import testing
from mne.io import Raw
from mne import (read_forward_solution, apply_forward, apply_forward_raw,
average_forward_solutions, write_forward_solution,
convert_forward_solution)
from mne import SourceEstimate, pick_types_forward, read_evokeds
from mne.label import read_label
from mne.utils import (requires_mne, run_subprocess, _TempDir,
run_tests_if_main, slow_test)
from mne.forward import (restrict_forward_to_stc, restrict_forward_to_label,
Forward)
data_path = testing.data_path(download=False)
fname_meeg = op.join(data_path, 'MEG', 'sample',
'sample_audvis_trunc-meg-eeg-oct-4-fwd.fif')
fname_meeg_grad = op.join(data_path, 'MEG', 'sample',
'sample_audvis_trunc-meg-eeg-oct-2-grad-fwd.fif')
fname_raw = op.join(op.dirname(__file__), '..', '..', 'io', 'tests', 'data',
'test_raw.fif')
fname_evoked = op.join(op.dirname(__file__), '..', '..', 'io', 'tests',
'data', 'test-ave.fif')
fname_mri = op.join(data_path, 'MEG', 'sample',
'sample_audvis_trunc-trans.fif')
subjects_dir = os.path.join(data_path, 'subjects')
fname_src = op.join(subjects_dir, 'sample', 'bem', 'sample-oct-4-src.fif')
def compare_forwards(f1, f2):
"""Helper to compare two potentially converted forward solutions"""
assert_allclose(f1['sol']['data'], f2['sol']['data'])
assert_equal(f1['sol']['ncol'], f2['sol']['ncol'])
assert_allclose(f1['source_nn'], f2['source_nn'])
if f1['sol_grad'] is not None:
assert_true(f2['sol_grad'] is not None)
assert_allclose(f1['sol_grad']['data'], f2['sol_grad']['data'])
assert_equal(f1['sol_grad']['ncol'], f2['sol_grad']['ncol'])
else:
assert_true(f2['sol_grad'] is None)
assert_equal(f1['source_ori'], f2['source_ori'])
assert_equal(f1['surf_ori'], f2['surf_ori'])
@testing.requires_testing_data
def test_convert_forward():
"""Test converting forward solution between different representations
"""
fwd = read_forward_solution(fname_meeg_grad)
assert_true(repr(fwd))
assert_true(isinstance(fwd, Forward))
# look at surface orientation
fwd_surf = convert_forward_solution(fwd, surf_ori=True)
fwd_surf_io = read_forward_solution(fname_meeg_grad, surf_ori=True)
compare_forwards(fwd_surf, fwd_surf_io)
del fwd_surf_io
gc.collect()
# go back
fwd_new = convert_forward_solution(fwd_surf, surf_ori=False)
assert_true(repr(fwd_new))
assert_true(isinstance(fwd_new, Forward))
compare_forwards(fwd, fwd_new)
# now go to fixed
fwd_fixed = convert_forward_solution(fwd_surf, surf_ori=False,
force_fixed=True)
del fwd_surf
gc.collect()
assert_true(repr(fwd_fixed))
assert_true(isinstance(fwd_fixed, Forward))
fwd_fixed_io = read_forward_solution(fname_meeg_grad, surf_ori=False,
force_fixed=True)
compare_forwards(fwd_fixed, fwd_fixed_io)
del fwd_fixed_io
gc.collect()
# now go back to cartesian (original condition)
fwd_new = convert_forward_solution(fwd_fixed)
assert_true(repr(fwd_new))
assert_true(isinstance(fwd_new, Forward))
compare_forwards(fwd, fwd_new)
del fwd, fwd_new, fwd_fixed
gc.collect()
@slow_test
@testing.requires_testing_data
def test_io_forward():
"""Test IO for forward solutions
"""
temp_dir = _TempDir()
# do extensive tests with MEEG + grad
n_channels, n_src = 366, 108
fwd = read_forward_solution(fname_meeg_grad)
assert_true(isinstance(fwd, Forward))
fwd = read_forward_solution(fname_meeg_grad, surf_ori=True)
leadfield = fwd['sol']['data']
assert_equal(leadfield.shape, (n_channels, n_src))
assert_equal(len(fwd['sol']['row_names']), n_channels)
fname_temp = op.join(temp_dir, 'test-fwd.fif')
write_forward_solution(fname_temp, fwd, overwrite=True)
fwd = read_forward_solution(fname_meeg_grad, surf_ori=True)
fwd_read = read_forward_solution(fname_temp, surf_ori=True)
leadfield = fwd_read['sol']['data']
assert_equal(leadfield.shape, (n_channels, n_src))
assert_equal(len(fwd_read['sol']['row_names']), n_channels)
assert_equal(len(fwd_read['info']['chs']), n_channels)
assert_true('dev_head_t' in fwd_read['info'])
assert_true('mri_head_t' in fwd_read)
assert_array_almost_equal(fwd['sol']['data'], fwd_read['sol']['data'])
fwd = read_forward_solution(fname_meeg_grad, force_fixed=True)
leadfield = fwd['sol']['data']
assert_equal(leadfield.shape, (n_channels, n_src / 3))
assert_equal(len(fwd['sol']['row_names']), n_channels)
assert_equal(len(fwd['info']['chs']), n_channels)
assert_true('dev_head_t' in fwd['info'])
assert_true('mri_head_t' in fwd)
assert_true(fwd['surf_ori'])
# test warnings on bad filenames
fwd = read_forward_solution(fname_meeg_grad)
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
fwd_badname = op.join(temp_dir, 'test-bad-name.fif.gz')
write_forward_solution(fwd_badname, fwd)
read_forward_solution(fwd_badname)
assert_true(len(w) == 2)
fwd = read_forward_solution(fname_meeg)
write_forward_solution(fname_temp, fwd, overwrite=True)
fwd_read = read_forward_solution(fname_temp)
compare_forwards(fwd, fwd_read)
@testing.requires_testing_data
def test_apply_forward():
"""Test projection of source space data to sensor space
"""
start = 0
stop = 5
n_times = stop - start - 1
sfreq = 10.0
t_start = 0.123
fwd = read_forward_solution(fname_meeg, force_fixed=True)
fwd = pick_types_forward(fwd, meg=True)
assert_true(isinstance(fwd, Forward))
vertno = [fwd['src'][0]['vertno'], fwd['src'][1]['vertno']]
stc_data = np.ones((len(vertno[0]) + len(vertno[1]), n_times))
stc = SourceEstimate(stc_data, vertno, tmin=t_start, tstep=1.0 / sfreq)
gain_sum = np.sum(fwd['sol']['data'], axis=1)
# Evoked
with warnings.catch_warnings(record=True) as w:
evoked = read_evokeds(fname_evoked, condition=0)
evoked = apply_forward(fwd, stc, evoked, start=start, stop=stop)
assert_equal(len(w), 2)
data = evoked.data
times = evoked.times
# do some tests
assert_array_almost_equal(evoked.info['sfreq'], sfreq)
assert_array_almost_equal(np.sum(data, axis=1), n_times * gain_sum)
assert_array_almost_equal(times[0], t_start)
assert_array_almost_equal(times[-1], t_start + (n_times - 1) / sfreq)
# Raw
raw = Raw(fname_raw)
raw_proj = apply_forward_raw(fwd, stc, raw, start=start, stop=stop)
data, times = raw_proj[:, :]
# do some tests
assert_array_almost_equal(raw_proj.info['sfreq'], sfreq)
assert_array_almost_equal(np.sum(data, axis=1), n_times * gain_sum)
atol = 1. / sfreq
assert_allclose(raw_proj.first_samp / sfreq, t_start, atol=atol)
assert_allclose(raw_proj.last_samp / sfreq,
t_start + (n_times - 1) / sfreq, atol=atol)
@testing.requires_testing_data
def test_restrict_forward_to_stc():
"""Test restriction of source space to source SourceEstimate
"""
start = 0
stop = 5
n_times = stop - start - 1
sfreq = 10.0
t_start = 0.123
fwd = read_forward_solution(fname_meeg, force_fixed=True)
fwd = pick_types_forward(fwd, meg=True)
vertno = [fwd['src'][0]['vertno'][0:15], fwd['src'][1]['vertno'][0:5]]
stc_data = np.ones((len(vertno[0]) + len(vertno[1]), n_times))
stc = SourceEstimate(stc_data, vertno, tmin=t_start, tstep=1.0 / sfreq)
fwd_out = restrict_forward_to_stc(fwd, stc)
assert_true(isinstance(fwd_out, Forward))
assert_equal(fwd_out['sol']['ncol'], 20)
assert_equal(fwd_out['src'][0]['nuse'], 15)
assert_equal(fwd_out['src'][1]['nuse'], 5)
assert_equal(fwd_out['src'][0]['vertno'], fwd['src'][0]['vertno'][0:15])
assert_equal(fwd_out['src'][1]['vertno'], fwd['src'][1]['vertno'][0:5])
fwd = read_forward_solution(fname_meeg, force_fixed=False)
fwd = pick_types_forward(fwd, meg=True)
vertno = [fwd['src'][0]['vertno'][0:15], fwd['src'][1]['vertno'][0:5]]
stc_data = np.ones((len(vertno[0]) + len(vertno[1]), n_times))
stc = SourceEstimate(stc_data, vertno, tmin=t_start, tstep=1.0 / sfreq)
fwd_out = restrict_forward_to_stc(fwd, stc)
assert_equal(fwd_out['sol']['ncol'], 60)
assert_equal(fwd_out['src'][0]['nuse'], 15)
assert_equal(fwd_out['src'][1]['nuse'], 5)
assert_equal(fwd_out['src'][0]['vertno'], fwd['src'][0]['vertno'][0:15])
assert_equal(fwd_out['src'][1]['vertno'], fwd['src'][1]['vertno'][0:5])
@testing.requires_testing_data
def test_restrict_forward_to_label():
"""Test restriction of source space to label
"""
fwd = read_forward_solution(fname_meeg, force_fixed=True)
fwd = pick_types_forward(fwd, meg=True)
label_path = op.join(data_path, 'MEG', 'sample', 'labels')
labels = ['Aud-lh', 'Vis-rh']
label_lh = read_label(op.join(label_path, labels[0] + '.label'))
label_rh = read_label(op.join(label_path, labels[1] + '.label'))
fwd_out = restrict_forward_to_label(fwd, [label_lh, label_rh])
src_sel_lh = np.intersect1d(fwd['src'][0]['vertno'], label_lh.vertices)
src_sel_lh = np.searchsorted(fwd['src'][0]['vertno'], src_sel_lh)
src_sel_rh = np.intersect1d(fwd['src'][1]['vertno'], label_rh.vertices)
src_sel_rh = (np.searchsorted(fwd['src'][1]['vertno'], src_sel_rh) +
len(fwd['src'][0]['vertno']))
assert_equal(fwd_out['sol']['ncol'], len(src_sel_lh) + len(src_sel_rh))
assert_equal(fwd_out['src'][0]['nuse'], len(src_sel_lh))
assert_equal(fwd_out['src'][1]['nuse'], len(src_sel_rh))
assert_equal(fwd_out['src'][0]['vertno'], src_sel_lh)
assert_equal(fwd_out['src'][1]['vertno'], src_sel_rh)
fwd = read_forward_solution(fname_meeg, force_fixed=False)
fwd = pick_types_forward(fwd, meg=True)
label_path = op.join(data_path, 'MEG', 'sample', 'labels')
labels = ['Aud-lh', 'Vis-rh']
label_lh = read_label(op.join(label_path, labels[0] + '.label'))
label_rh = read_label(op.join(label_path, labels[1] + '.label'))
fwd_out = restrict_forward_to_label(fwd, [label_lh, label_rh])
src_sel_lh = np.intersect1d(fwd['src'][0]['vertno'], label_lh.vertices)
src_sel_lh = np.searchsorted(fwd['src'][0]['vertno'], src_sel_lh)
src_sel_rh = np.intersect1d(fwd['src'][1]['vertno'], label_rh.vertices)
src_sel_rh = (np.searchsorted(fwd['src'][1]['vertno'], src_sel_rh) +
len(fwd['src'][0]['vertno']))
assert_equal(fwd_out['sol']['ncol'],
3 * (len(src_sel_lh) + len(src_sel_rh)))
assert_equal(fwd_out['src'][0]['nuse'], len(src_sel_lh))
assert_equal(fwd_out['src'][1]['nuse'], len(src_sel_rh))
assert_equal(fwd_out['src'][0]['vertno'], src_sel_lh)
assert_equal(fwd_out['src'][1]['vertno'], src_sel_rh)
@testing.requires_testing_data
@requires_mne
def test_average_forward_solution():
"""Test averaging forward solutions
"""
temp_dir = _TempDir()
fwd = read_forward_solution(fname_meeg)
# input not a list
assert_raises(TypeError, average_forward_solutions, 1)
# list is too short
assert_raises(ValueError, average_forward_solutions, [])
# negative weights
assert_raises(ValueError, average_forward_solutions, [fwd, fwd], [-1, 0])
# all zero weights
assert_raises(ValueError, average_forward_solutions, [fwd, fwd], [0, 0])
# weights not same length
assert_raises(ValueError, average_forward_solutions, [fwd, fwd], [0, 0, 0])
# list does not only have all dict()
assert_raises(TypeError, average_forward_solutions, [1, fwd])
# try an easy case
fwd_copy = average_forward_solutions([fwd])
assert_true(isinstance(fwd_copy, Forward))
assert_array_equal(fwd['sol']['data'], fwd_copy['sol']['data'])
# modify a fwd solution, save it, use MNE to average with old one
fwd_copy['sol']['data'] *= 0.5
fname_copy = op.join(temp_dir, 'copy-fwd.fif')
write_forward_solution(fname_copy, fwd_copy, overwrite=True)
cmd = ('mne_average_forward_solutions', '--fwd', fname_meeg, '--fwd',
fname_copy, '--out', fname_copy)
run_subprocess(cmd)
# now let's actually do it, with one filename and one fwd
fwd_ave = average_forward_solutions([fwd, fwd_copy])
assert_array_equal(0.75 * fwd['sol']['data'], fwd_ave['sol']['data'])
# fwd_ave_mne = read_forward_solution(fname_copy)
# assert_array_equal(fwd_ave_mne['sol']['data'], fwd_ave['sol']['data'])
# with gradient
fwd = read_forward_solution(fname_meeg_grad)
fwd_ave = average_forward_solutions([fwd, fwd])
compare_forwards(fwd, fwd_ave)
run_tests_if_main()
| 39.954819
| 79
| 0.672597
|
import os
import os.path as op
import warnings
import gc
from nose.tools import assert_true, assert_raises
import numpy as np
from numpy.testing import (assert_array_almost_equal, assert_equal,
assert_array_equal, assert_allclose)
from mne.datasets import testing
from mne.io import Raw
from mne import (read_forward_solution, apply_forward, apply_forward_raw,
average_forward_solutions, write_forward_solution,
convert_forward_solution)
from mne import SourceEstimate, pick_types_forward, read_evokeds
from mne.label import read_label
from mne.utils import (requires_mne, run_subprocess, _TempDir,
run_tests_if_main, slow_test)
from mne.forward import (restrict_forward_to_stc, restrict_forward_to_label,
Forward)
data_path = testing.data_path(download=False)
fname_meeg = op.join(data_path, 'MEG', 'sample',
'sample_audvis_trunc-meg-eeg-oct-4-fwd.fif')
fname_meeg_grad = op.join(data_path, 'MEG', 'sample',
'sample_audvis_trunc-meg-eeg-oct-2-grad-fwd.fif')
fname_raw = op.join(op.dirname(__file__), '..', '..', 'io', 'tests', 'data',
'test_raw.fif')
fname_evoked = op.join(op.dirname(__file__), '..', '..', 'io', 'tests',
'data', 'test-ave.fif')
fname_mri = op.join(data_path, 'MEG', 'sample',
'sample_audvis_trunc-trans.fif')
subjects_dir = os.path.join(data_path, 'subjects')
fname_src = op.join(subjects_dir, 'sample', 'bem', 'sample-oct-4-src.fif')
def compare_forwards(f1, f2):
assert_allclose(f1['sol']['data'], f2['sol']['data'])
assert_equal(f1['sol']['ncol'], f2['sol']['ncol'])
assert_allclose(f1['source_nn'], f2['source_nn'])
if f1['sol_grad'] is not None:
assert_true(f2['sol_grad'] is not None)
assert_allclose(f1['sol_grad']['data'], f2['sol_grad']['data'])
assert_equal(f1['sol_grad']['ncol'], f2['sol_grad']['ncol'])
else:
assert_true(f2['sol_grad'] is None)
assert_equal(f1['source_ori'], f2['source_ori'])
assert_equal(f1['surf_ori'], f2['surf_ori'])
@testing.requires_testing_data
def test_convert_forward():
fwd = read_forward_solution(fname_meeg_grad)
assert_true(repr(fwd))
assert_true(isinstance(fwd, Forward))
fwd_surf = convert_forward_solution(fwd, surf_ori=True)
fwd_surf_io = read_forward_solution(fname_meeg_grad, surf_ori=True)
compare_forwards(fwd_surf, fwd_surf_io)
del fwd_surf_io
gc.collect()
fwd_new = convert_forward_solution(fwd_surf, surf_ori=False)
assert_true(repr(fwd_new))
assert_true(isinstance(fwd_new, Forward))
compare_forwards(fwd, fwd_new)
fwd_fixed = convert_forward_solution(fwd_surf, surf_ori=False,
force_fixed=True)
del fwd_surf
gc.collect()
assert_true(repr(fwd_fixed))
assert_true(isinstance(fwd_fixed, Forward))
fwd_fixed_io = read_forward_solution(fname_meeg_grad, surf_ori=False,
force_fixed=True)
compare_forwards(fwd_fixed, fwd_fixed_io)
del fwd_fixed_io
gc.collect()
fwd_new = convert_forward_solution(fwd_fixed)
assert_true(repr(fwd_new))
assert_true(isinstance(fwd_new, Forward))
compare_forwards(fwd, fwd_new)
del fwd, fwd_new, fwd_fixed
gc.collect()
@slow_test
@testing.requires_testing_data
def test_io_forward():
temp_dir = _TempDir()
n_channels, n_src = 366, 108
fwd = read_forward_solution(fname_meeg_grad)
assert_true(isinstance(fwd, Forward))
fwd = read_forward_solution(fname_meeg_grad, surf_ori=True)
leadfield = fwd['sol']['data']
assert_equal(leadfield.shape, (n_channels, n_src))
assert_equal(len(fwd['sol']['row_names']), n_channels)
fname_temp = op.join(temp_dir, 'test-fwd.fif')
write_forward_solution(fname_temp, fwd, overwrite=True)
fwd = read_forward_solution(fname_meeg_grad, surf_ori=True)
fwd_read = read_forward_solution(fname_temp, surf_ori=True)
leadfield = fwd_read['sol']['data']
assert_equal(leadfield.shape, (n_channels, n_src))
assert_equal(len(fwd_read['sol']['row_names']), n_channels)
assert_equal(len(fwd_read['info']['chs']), n_channels)
assert_true('dev_head_t' in fwd_read['info'])
assert_true('mri_head_t' in fwd_read)
assert_array_almost_equal(fwd['sol']['data'], fwd_read['sol']['data'])
fwd = read_forward_solution(fname_meeg_grad, force_fixed=True)
leadfield = fwd['sol']['data']
assert_equal(leadfield.shape, (n_channels, n_src / 3))
assert_equal(len(fwd['sol']['row_names']), n_channels)
assert_equal(len(fwd['info']['chs']), n_channels)
assert_true('dev_head_t' in fwd['info'])
assert_true('mri_head_t' in fwd)
assert_true(fwd['surf_ori'])
fwd = read_forward_solution(fname_meeg_grad)
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
fwd_badname = op.join(temp_dir, 'test-bad-name.fif.gz')
write_forward_solution(fwd_badname, fwd)
read_forward_solution(fwd_badname)
assert_true(len(w) == 2)
fwd = read_forward_solution(fname_meeg)
write_forward_solution(fname_temp, fwd, overwrite=True)
fwd_read = read_forward_solution(fname_temp)
compare_forwards(fwd, fwd_read)
@testing.requires_testing_data
def test_apply_forward():
start = 0
stop = 5
n_times = stop - start - 1
sfreq = 10.0
t_start = 0.123
fwd = read_forward_solution(fname_meeg, force_fixed=True)
fwd = pick_types_forward(fwd, meg=True)
assert_true(isinstance(fwd, Forward))
vertno = [fwd['src'][0]['vertno'], fwd['src'][1]['vertno']]
stc_data = np.ones((len(vertno[0]) + len(vertno[1]), n_times))
stc = SourceEstimate(stc_data, vertno, tmin=t_start, tstep=1.0 / sfreq)
gain_sum = np.sum(fwd['sol']['data'], axis=1)
with warnings.catch_warnings(record=True) as w:
evoked = read_evokeds(fname_evoked, condition=0)
evoked = apply_forward(fwd, stc, evoked, start=start, stop=stop)
assert_equal(len(w), 2)
data = evoked.data
times = evoked.times
assert_array_almost_equal(evoked.info['sfreq'], sfreq)
assert_array_almost_equal(np.sum(data, axis=1), n_times * gain_sum)
assert_array_almost_equal(times[0], t_start)
assert_array_almost_equal(times[-1], t_start + (n_times - 1) / sfreq)
raw = Raw(fname_raw)
raw_proj = apply_forward_raw(fwd, stc, raw, start=start, stop=stop)
data, times = raw_proj[:, :]
assert_array_almost_equal(raw_proj.info['sfreq'], sfreq)
assert_array_almost_equal(np.sum(data, axis=1), n_times * gain_sum)
atol = 1. / sfreq
assert_allclose(raw_proj.first_samp / sfreq, t_start, atol=atol)
assert_allclose(raw_proj.last_samp / sfreq,
t_start + (n_times - 1) / sfreq, atol=atol)
@testing.requires_testing_data
def test_restrict_forward_to_stc():
start = 0
stop = 5
n_times = stop - start - 1
sfreq = 10.0
t_start = 0.123
fwd = read_forward_solution(fname_meeg, force_fixed=True)
fwd = pick_types_forward(fwd, meg=True)
vertno = [fwd['src'][0]['vertno'][0:15], fwd['src'][1]['vertno'][0:5]]
stc_data = np.ones((len(vertno[0]) + len(vertno[1]), n_times))
stc = SourceEstimate(stc_data, vertno, tmin=t_start, tstep=1.0 / sfreq)
fwd_out = restrict_forward_to_stc(fwd, stc)
assert_true(isinstance(fwd_out, Forward))
assert_equal(fwd_out['sol']['ncol'], 20)
assert_equal(fwd_out['src'][0]['nuse'], 15)
assert_equal(fwd_out['src'][1]['nuse'], 5)
assert_equal(fwd_out['src'][0]['vertno'], fwd['src'][0]['vertno'][0:15])
assert_equal(fwd_out['src'][1]['vertno'], fwd['src'][1]['vertno'][0:5])
fwd = read_forward_solution(fname_meeg, force_fixed=False)
fwd = pick_types_forward(fwd, meg=True)
vertno = [fwd['src'][0]['vertno'][0:15], fwd['src'][1]['vertno'][0:5]]
stc_data = np.ones((len(vertno[0]) + len(vertno[1]), n_times))
stc = SourceEstimate(stc_data, vertno, tmin=t_start, tstep=1.0 / sfreq)
fwd_out = restrict_forward_to_stc(fwd, stc)
assert_equal(fwd_out['sol']['ncol'], 60)
assert_equal(fwd_out['src'][0]['nuse'], 15)
assert_equal(fwd_out['src'][1]['nuse'], 5)
assert_equal(fwd_out['src'][0]['vertno'], fwd['src'][0]['vertno'][0:15])
assert_equal(fwd_out['src'][1]['vertno'], fwd['src'][1]['vertno'][0:5])
@testing.requires_testing_data
def test_restrict_forward_to_label():
fwd = read_forward_solution(fname_meeg, force_fixed=True)
fwd = pick_types_forward(fwd, meg=True)
label_path = op.join(data_path, 'MEG', 'sample', 'labels')
labels = ['Aud-lh', 'Vis-rh']
label_lh = read_label(op.join(label_path, labels[0] + '.label'))
label_rh = read_label(op.join(label_path, labels[1] + '.label'))
fwd_out = restrict_forward_to_label(fwd, [label_lh, label_rh])
src_sel_lh = np.intersect1d(fwd['src'][0]['vertno'], label_lh.vertices)
src_sel_lh = np.searchsorted(fwd['src'][0]['vertno'], src_sel_lh)
src_sel_rh = np.intersect1d(fwd['src'][1]['vertno'], label_rh.vertices)
src_sel_rh = (np.searchsorted(fwd['src'][1]['vertno'], src_sel_rh) +
len(fwd['src'][0]['vertno']))
assert_equal(fwd_out['sol']['ncol'], len(src_sel_lh) + len(src_sel_rh))
assert_equal(fwd_out['src'][0]['nuse'], len(src_sel_lh))
assert_equal(fwd_out['src'][1]['nuse'], len(src_sel_rh))
assert_equal(fwd_out['src'][0]['vertno'], src_sel_lh)
assert_equal(fwd_out['src'][1]['vertno'], src_sel_rh)
fwd = read_forward_solution(fname_meeg, force_fixed=False)
fwd = pick_types_forward(fwd, meg=True)
label_path = op.join(data_path, 'MEG', 'sample', 'labels')
labels = ['Aud-lh', 'Vis-rh']
label_lh = read_label(op.join(label_path, labels[0] + '.label'))
label_rh = read_label(op.join(label_path, labels[1] + '.label'))
fwd_out = restrict_forward_to_label(fwd, [label_lh, label_rh])
src_sel_lh = np.intersect1d(fwd['src'][0]['vertno'], label_lh.vertices)
src_sel_lh = np.searchsorted(fwd['src'][0]['vertno'], src_sel_lh)
src_sel_rh = np.intersect1d(fwd['src'][1]['vertno'], label_rh.vertices)
src_sel_rh = (np.searchsorted(fwd['src'][1]['vertno'], src_sel_rh) +
len(fwd['src'][0]['vertno']))
assert_equal(fwd_out['sol']['ncol'],
3 * (len(src_sel_lh) + len(src_sel_rh)))
assert_equal(fwd_out['src'][0]['nuse'], len(src_sel_lh))
assert_equal(fwd_out['src'][1]['nuse'], len(src_sel_rh))
assert_equal(fwd_out['src'][0]['vertno'], src_sel_lh)
assert_equal(fwd_out['src'][1]['vertno'], src_sel_rh)
@testing.requires_testing_data
@requires_mne
def test_average_forward_solution():
temp_dir = _TempDir()
fwd = read_forward_solution(fname_meeg)
assert_raises(TypeError, average_forward_solutions, 1)
assert_raises(ValueError, average_forward_solutions, [])
assert_raises(ValueError, average_forward_solutions, [fwd, fwd], [-1, 0])
assert_raises(ValueError, average_forward_solutions, [fwd, fwd], [0, 0])
assert_raises(ValueError, average_forward_solutions, [fwd, fwd], [0, 0, 0])
assert_raises(TypeError, average_forward_solutions, [1, fwd])
fwd_copy = average_forward_solutions([fwd])
assert_true(isinstance(fwd_copy, Forward))
assert_array_equal(fwd['sol']['data'], fwd_copy['sol']['data'])
fwd_copy['sol']['data'] *= 0.5
fname_copy = op.join(temp_dir, 'copy-fwd.fif')
write_forward_solution(fname_copy, fwd_copy, overwrite=True)
cmd = ('mne_average_forward_solutions', '--fwd', fname_meeg, '--fwd',
fname_copy, '--out', fname_copy)
run_subprocess(cmd)
fwd_ave = average_forward_solutions([fwd, fwd_copy])
assert_array_equal(0.75 * fwd['sol']['data'], fwd_ave['sol']['data'])
# fwd_ave_mne = read_forward_solution(fname_copy)
# assert_array_equal(fwd_ave_mne['sol']['data'], fwd_ave['sol']['data'])
# with gradient
fwd = read_forward_solution(fname_meeg_grad)
fwd_ave = average_forward_solutions([fwd, fwd])
compare_forwards(fwd, fwd_ave)
run_tests_if_main()
| true
| true
|
f71841007efb94c107588e1a059e02b58a6e4403
| 4,624
|
py
|
Python
|
models/wide_resnet.py
|
christophbrgr/ood_detection_framework
|
c3b7e3064ed8ee4aeb112cd2ab946ee41636f79f
|
[
"MIT"
] | 7
|
2021-07-26T14:28:51.000Z
|
2021-11-18T13:20:00.000Z
|
models/wide_resnet.py
|
christophbrgr/ood_detection_framework
|
c3b7e3064ed8ee4aeb112cd2ab946ee41636f79f
|
[
"MIT"
] | null | null | null |
models/wide_resnet.py
|
christophbrgr/ood_detection_framework
|
c3b7e3064ed8ee4aeb112cd2ab946ee41636f79f
|
[
"MIT"
] | null | null | null |
import sys
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.init as init
from torch.autograd import Variable
def conv3x3(in_planes, out_planes, stride=1):
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=True)
def conv_init(m):
classname = m.__class__.__name__
if classname.find('Conv') != -1:
init.xavier_uniform_(m.weight, gain=np.sqrt(2))
init.constant_(m.bias, 0)
elif classname.find('BatchNorm') != -1:
init.constant_(m.weight, 1)
init.constant_(m.bias, 0)
class wide_basic(nn.Module):
def __init__(self, in_planes, planes, dropout_rate, stride=1):
super(wide_basic, self).__init__()
self.bn1 = nn.BatchNorm2d(in_planes)
self.conv1 = nn.Conv2d(
in_planes, planes, kernel_size=3, padding=1, bias=True)
self.dropout = nn.Dropout(p=dropout_rate)
self.bn2 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3,
stride=stride, padding=1, bias=True)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != planes:
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, planes, kernel_size=1,
stride=stride, bias=True),
)
def forward(self, x):
out = self.dropout(self.conv1(F.relu(self.bn1(x))))
out = self.conv2(F.relu(self.bn2(out)))
out += self.shortcut(x)
return out
class Wide_ResNet(nn.Module):
def __init__(self, depth, widen_factor, dropout_rate, num_classes):
super(Wide_ResNet, self).__init__()
self.in_planes = 16
assert ((depth-4) % 6 == 0), 'Wide-resnet depth should be 6n+4'
n = (depth-4)/6
k = widen_factor
print('Wide-Resnet %dx%d' % (depth, k))
nStages = [16, 16*k, 32*k, 64*k]
self.conv1 = conv3x3(3, nStages[0])
self.layer1 = self._wide_layer(
wide_basic, nStages[1], n, dropout_rate, stride=1)
self.layer2 = self._wide_layer(
wide_basic, nStages[2], n, dropout_rate, stride=2)
self.layer3 = self._wide_layer(
wide_basic, nStages[3], n, dropout_rate, stride=2)
self.bn1 = nn.BatchNorm2d(nStages[3], momentum=0.9)
self.linear = nn.Linear(nStages[3], num_classes)
def _wide_layer(self, block, planes, num_blocks, dropout_rate, stride):
strides = [stride] + [1]*(int(num_blocks)-1)
layers = []
for stride in strides:
layers.append(block(self.in_planes, planes, dropout_rate, stride))
self.in_planes = planes
return nn.Sequential(*layers)
def forward(self, x):
out = self.conv1(x)
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = F.relu(self.bn1(out))
# print(f'Shape before avg pooling: {out.shape}')
out = F.avg_pool2d(out, int(out.shape[3]))
# print(f'Shape after avg pooling: {out.shape}')
out = out.view(out.size(0), -1)
penultimate = out
out = self.linear(out)
return out, penultimate
# feature extraction for Mahalanobis
def feature_list(self, x):
out_list = []
out = self.conv1(x)
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = F.relu(self.bn1(out))
# print shape
# print(f'Shape: {out.shape}')
# out2 = F.max_pool3d(out, (4,4,4))
out2 = F.max_pool2d(out, (8,8))
out_list.append(out2)
print(f'Shape: {out2.shape}')
out = F.avg_pool2d(out, int(out.shape[3]))
out = out.view(out.size(0), -1)
return self.linear(out), out_list
def intermediate_forward(self, x, layer_index):
out = self.conv1(x)
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = F.relu(self.bn1(out))
return F.max_pool2d(out, (8,8))# F.max_pool3d(out, (4,4,4))
# function to extract the penultimate features
def penultimate_forward(self, x):
out = self.conv1(x)
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
penultimate = F.relu(self.bn1(out))
penultimate = F.max_pool2d(penultimate, (8,8))
# penultimate = F.max_pool3d(penultimate, (4,4,4))
out = F.avg_pool2d(penultimate, int(out.shape[3]))
out = out.view(out.size(0), -1)
return self.linear(out), penultimate
| 33.507246
| 95
| 0.592777
|
import sys
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.init as init
from torch.autograd import Variable
def conv3x3(in_planes, out_planes, stride=1):
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=True)
def conv_init(m):
classname = m.__class__.__name__
if classname.find('Conv') != -1:
init.xavier_uniform_(m.weight, gain=np.sqrt(2))
init.constant_(m.bias, 0)
elif classname.find('BatchNorm') != -1:
init.constant_(m.weight, 1)
init.constant_(m.bias, 0)
class wide_basic(nn.Module):
def __init__(self, in_planes, planes, dropout_rate, stride=1):
super(wide_basic, self).__init__()
self.bn1 = nn.BatchNorm2d(in_planes)
self.conv1 = nn.Conv2d(
in_planes, planes, kernel_size=3, padding=1, bias=True)
self.dropout = nn.Dropout(p=dropout_rate)
self.bn2 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3,
stride=stride, padding=1, bias=True)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != planes:
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, planes, kernel_size=1,
stride=stride, bias=True),
)
def forward(self, x):
out = self.dropout(self.conv1(F.relu(self.bn1(x))))
out = self.conv2(F.relu(self.bn2(out)))
out += self.shortcut(x)
return out
class Wide_ResNet(nn.Module):
def __init__(self, depth, widen_factor, dropout_rate, num_classes):
super(Wide_ResNet, self).__init__()
self.in_planes = 16
assert ((depth-4) % 6 == 0), 'Wide-resnet depth should be 6n+4'
n = (depth-4)/6
k = widen_factor
print('Wide-Resnet %dx%d' % (depth, k))
nStages = [16, 16*k, 32*k, 64*k]
self.conv1 = conv3x3(3, nStages[0])
self.layer1 = self._wide_layer(
wide_basic, nStages[1], n, dropout_rate, stride=1)
self.layer2 = self._wide_layer(
wide_basic, nStages[2], n, dropout_rate, stride=2)
self.layer3 = self._wide_layer(
wide_basic, nStages[3], n, dropout_rate, stride=2)
self.bn1 = nn.BatchNorm2d(nStages[3], momentum=0.9)
self.linear = nn.Linear(nStages[3], num_classes)
def _wide_layer(self, block, planes, num_blocks, dropout_rate, stride):
strides = [stride] + [1]*(int(num_blocks)-1)
layers = []
for stride in strides:
layers.append(block(self.in_planes, planes, dropout_rate, stride))
self.in_planes = planes
return nn.Sequential(*layers)
def forward(self, x):
out = self.conv1(x)
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = F.relu(self.bn1(out))
out = F.avg_pool2d(out, int(out.shape[3]))
out = out.view(out.size(0), -1)
penultimate = out
out = self.linear(out)
return out, penultimate
def feature_list(self, x):
out_list = []
out = self.conv1(x)
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = F.relu(self.bn1(out))
out2 = F.max_pool2d(out, (8,8))
out_list.append(out2)
print(f'Shape: {out2.shape}')
out = F.avg_pool2d(out, int(out.shape[3]))
out = out.view(out.size(0), -1)
return self.linear(out), out_list
def intermediate_forward(self, x, layer_index):
out = self.conv1(x)
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = F.relu(self.bn1(out))
return F.max_pool2d(out, (8,8))
def penultimate_forward(self, x):
out = self.conv1(x)
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
penultimate = F.relu(self.bn1(out))
penultimate = F.max_pool2d(penultimate, (8,8))
out = F.avg_pool2d(penultimate, int(out.shape[3]))
out = out.view(out.size(0), -1)
return self.linear(out), penultimate
| true
| true
|
f71841789466e4e20df0d21ef95d9c6b8ff31374
| 638
|
py
|
Python
|
examples/jobs.py
|
0x9fff00/flask-apscheduler
|
cc52c39e1948c4e8de5da0d01db45f1779f61997
|
[
"Apache-2.0"
] | 1
|
2021-02-08T06:53:31.000Z
|
2021-02-08T06:53:31.000Z
|
examples/jobs.py
|
0x9fff00/flask-apscheduler
|
cc52c39e1948c4e8de5da0d01db45f1779f61997
|
[
"Apache-2.0"
] | null | null | null |
examples/jobs.py
|
0x9fff00/flask-apscheduler
|
cc52c39e1948c4e8de5da0d01db45f1779f61997
|
[
"Apache-2.0"
] | null | null | null |
from flask import Flask
from flask_apscheduler import APScheduler
class Config(object):
JOBS = [
{
'id': 'job1',
'func': 'jobs:job1',
'args': (1, 2),
'trigger': 'interval',
'seconds': 10
}
]
SCHEDULER_API_ENABLED = True
def job1(a, b):
print(str(a) + ' ' + str(b))
if __name__ == '__main__':
app = Flask(__name__)
app.config.from_object(Config())
scheduler = APScheduler()
# it is also possible to enable the API directly
# scheduler.api_enabled = True
scheduler.init_app(app)
scheduler.start()
app.run()
| 18.764706
| 52
| 0.557994
|
from flask import Flask
from flask_apscheduler import APScheduler
class Config(object):
JOBS = [
{
'id': 'job1',
'func': 'jobs:job1',
'args': (1, 2),
'trigger': 'interval',
'seconds': 10
}
]
SCHEDULER_API_ENABLED = True
def job1(a, b):
print(str(a) + ' ' + str(b))
if __name__ == '__main__':
app = Flask(__name__)
app.config.from_object(Config())
scheduler = APScheduler()
scheduler.init_app(app)
scheduler.start()
app.run()
| true
| true
|
f71841cab0f8915fee87ecc76cec17035187d190
| 3,469
|
py
|
Python
|
sdk/python/pulumi_gcp/compute/__init__.py
|
23doors/pulumi-gcp
|
ded01b199f95b164884266ea3e6f8206c8231270
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_gcp/compute/__init__.py
|
23doors/pulumi-gcp
|
ded01b199f95b164884266ea3e6f8206c8231270
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_gcp/compute/__init__.py
|
23doors/pulumi-gcp
|
ded01b199f95b164884266ea3e6f8206c8231270
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
# Export this package's modules as members:
from .address import *
from .attached_disk import *
from .autoscalar import *
from .backend_bucket import *
from .backend_bucket_signed_url_key import *
from .backend_service import *
from .backend_service_signed_url_key import *
from .disk import *
from .disk_resource_policy_attachment import *
from .external_vpn_gateway import *
from .firewall import *
from .forwarding_rule import *
from .global_address import *
from .global_forwarding_rule import *
from .ha_vpn_gateway import *
from .health_check import *
from .http_health_check import *
from .https_health_check import *
from .image import *
from .instance import *
from .instance_from_template import *
from .instance_group import *
from .instance_group_manager import *
from .instance_iam_binding import *
from .instance_iam_member import *
from .instance_iam_policy import *
from .instance_template import *
from .interconnect_attachment import *
from .managed_ssl_certificate import *
from .manged_ssl_certificate import *
from .network import *
from .network_endpoint import *
from .network_endpoint_group import *
from .network_peering import *
from .network_peering_routes_config import *
from .node_group import *
from .node_template import *
from .packet_mirroring import *
from .project_default_network_tier import *
from .project_metadata import *
from .project_metadata_item import *
from .region_autoscaler import *
from .region_backend_service import *
from .region_disk import *
from .region_health_check import *
from .region_instance_group_manager import *
from .region_ssl_certificate import *
from .region_target_http_proxy import *
from .region_target_https_proxy import *
from .region_url_map import *
from .reservation import *
from .resource_policy import *
from .route import *
from .router import *
from .router_interface import *
from .router_nat import *
from .router_peer import *
from .security_policy import *
from .shared_vpc_host_project import *
from .shared_vpc_service_project import *
from .snapshot import *
from .ssl_certificate import *
from .ssl_policy import *
from .subnetwork import *
from .subnetwork_iam_binding import *
from .subnetwork_iam_member import *
from .subnetwork_iam_policy import *
from .target_http_proxy import *
from .target_https_proxy import *
from .target_instance import *
from .target_pool import *
from .target_ssl_proxy import *
from .target_tcp_proxy import *
from .url_map import *
from .vpn_gateway import *
from .vpn_tunnel import *
from .security_scan_config import *
from .get_address import *
from .get_backend_bucket import *
from .get_backend_service import *
from .get_default_service_account import *
from .get_forwarding_rule import *
from .get_global_address import *
from .get_image import *
from .get_instance import *
from .get_instance_group import *
from .get_lbip_ranges import *
from .get_network import *
from .get_network_endpoint_group import *
from .get_node_types import *
from .get_region_instance_group import *
from .get_regions import *
from .get_resource_policy import *
from .get_router import *
from .get_certificate import *
from .get_ssl_policy import *
from .get_subnetwork import *
from .get_vpn_gateway import *
from .get_zones import *
from .get_netblock_ip_ranges import *
| 32.726415
| 87
| 0.807437
|
# Export this package's modules as members:
from .address import *
from .attached_disk import *
from .autoscalar import *
from .backend_bucket import *
from .backend_bucket_signed_url_key import *
from .backend_service import *
from .backend_service_signed_url_key import *
from .disk import *
from .disk_resource_policy_attachment import *
from .external_vpn_gateway import *
from .firewall import *
from .forwarding_rule import *
from .global_address import *
from .global_forwarding_rule import *
from .ha_vpn_gateway import *
from .health_check import *
from .http_health_check import *
from .https_health_check import *
from .image import *
from .instance import *
from .instance_from_template import *
from .instance_group import *
from .instance_group_manager import *
from .instance_iam_binding import *
from .instance_iam_member import *
from .instance_iam_policy import *
from .instance_template import *
from .interconnect_attachment import *
from .managed_ssl_certificate import *
from .manged_ssl_certificate import *
from .network import *
from .network_endpoint import *
from .network_endpoint_group import *
from .network_peering import *
from .network_peering_routes_config import *
from .node_group import *
from .node_template import *
from .packet_mirroring import *
from .project_default_network_tier import *
from .project_metadata import *
from .project_metadata_item import *
from .region_autoscaler import *
from .region_backend_service import *
from .region_disk import *
from .region_health_check import *
from .region_instance_group_manager import *
from .region_ssl_certificate import *
from .region_target_http_proxy import *
from .region_target_https_proxy import *
from .region_url_map import *
from .reservation import *
from .resource_policy import *
from .route import *
from .router import *
from .router_interface import *
from .router_nat import *
from .router_peer import *
from .security_policy import *
from .shared_vpc_host_project import *
from .shared_vpc_service_project import *
from .snapshot import *
from .ssl_certificate import *
from .ssl_policy import *
from .subnetwork import *
from .subnetwork_iam_binding import *
from .subnetwork_iam_member import *
from .subnetwork_iam_policy import *
from .target_http_proxy import *
from .target_https_proxy import *
from .target_instance import *
from .target_pool import *
from .target_ssl_proxy import *
from .target_tcp_proxy import *
from .url_map import *
from .vpn_gateway import *
from .vpn_tunnel import *
from .security_scan_config import *
from .get_address import *
from .get_backend_bucket import *
from .get_backend_service import *
from .get_default_service_account import *
from .get_forwarding_rule import *
from .get_global_address import *
from .get_image import *
from .get_instance import *
from .get_instance_group import *
from .get_lbip_ranges import *
from .get_network import *
from .get_network_endpoint_group import *
from .get_node_types import *
from .get_region_instance_group import *
from .get_regions import *
from .get_resource_policy import *
from .get_router import *
from .get_certificate import *
from .get_ssl_policy import *
from .get_subnetwork import *
from .get_vpn_gateway import *
from .get_zones import *
from .get_netblock_ip_ranges import *
| true
| true
|
f71844d3d33365dc18ad7dca788bcce625f77326
| 8,255
|
py
|
Python
|
taiga/projects/milestones/models.py
|
threefoldtech/Threefold-Circles
|
cbc433796b25cf7af9a295af65d665a4a279e2d6
|
[
"Apache-2.0"
] | null | null | null |
taiga/projects/milestones/models.py
|
threefoldtech/Threefold-Circles
|
cbc433796b25cf7af9a295af65d665a4a279e2d6
|
[
"Apache-2.0"
] | 12
|
2019-11-25T14:08:32.000Z
|
2021-06-24T10:35:51.000Z
|
taiga/projects/milestones/models.py
|
threefoldtech/Threefold-Circles
|
cbc433796b25cf7af9a295af65d665a4a279e2d6
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright (C) 2014-2017 Andrey Antukh <niwi@niwi.nz>
# Copyright (C) 2014-2017 Jesús Espino <jespinog@gmail.com>
# Copyright (C) 2014-2017 David Barragán <bameda@dbarragan.com>
# Copyright (C) 2014-2017 Alejandro Alonso <alejandro.alonso@kaleidos.net>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from django.db import models
from django.db.models import Count
from django.conf import settings
from django.utils.translation import ugettext_lazy as _
from django.utils import timezone
from django.core.exceptions import ValidationError
from django.utils.functional import cached_property
from taiga.base.utils.slug import slugify_uniquely
from taiga.base.utils.dicts import dict_sum
from taiga.projects.notifications.mixins import WatchedModelMixin
import itertools
import datetime
class Milestone(WatchedModelMixin, models.Model):
name = models.CharField(max_length=200, db_index=True, null=False, blank=False,
verbose_name=_("name"))
# TODO: Change the unique restriction to a unique together with the project id
slug = models.SlugField(max_length=250, db_index=True, null=False, blank=True,
verbose_name=_("slug"))
owner = models.ForeignKey(settings.AUTH_USER_MODEL, null=True, blank=True,
related_name="owned_milestones", verbose_name=_("owner"))
project = models.ForeignKey("projects.Project", null=False, blank=False,
related_name="milestones", verbose_name=_("project"))
estimated_start = models.DateField(verbose_name=_("estimated start date"))
estimated_finish = models.DateField(verbose_name=_("estimated finish date"))
created_date = models.DateTimeField(null=False, blank=False,
verbose_name=_("created date"),
default=timezone.now)
modified_date = models.DateTimeField(null=False, blank=False,
verbose_name=_("modified date"))
closed = models.BooleanField(default=False, null=False, blank=True,
verbose_name=_("is closed"))
disponibility = models.FloatField(default=0.0, null=True, blank=True,
verbose_name=_("disponibility"))
order = models.PositiveSmallIntegerField(default=1, null=False, blank=False,
verbose_name=_("order"))
_importing = None
_total_closed_points_by_date = None
class Meta:
verbose_name = "milestone"
verbose_name_plural = "milestones"
ordering = ["project", "created_date"]
unique_together = [("name", "project"), ("slug", "project")]
permissions = (
("view_milestone", "Can view milestone"),
)
def __str__(self):
return self.name
def __repr__(self):
return "<Milestone {0}>".format(self.id)
def clean(self):
# Don't allow draft entries to have a pub_date.
if self.estimated_start and self.estimated_finish and self.estimated_start > self.estimated_finish:
raise ValidationError(_('The estimated start must be previous to the estimated finish.'))
def save(self, *args, **kwargs):
if not self._importing or not self.modified_date:
self.modified_date = timezone.now()
if not self.slug:
self.slug = slugify_uniquely(self.name, self.__class__)
super().save(*args, **kwargs)
@cached_property
def cached_user_stories(self):
return (self.user_stories.prefetch_related("role_points", "role_points__points")
.annotate(num_tasks=Count("tasks")))
def _get_user_stories_points(self, user_stories):
role_points = [us.role_points.all() for us in user_stories]
flat_role_points = itertools.chain(*role_points)
flat_role_dicts = map(lambda x: {x.role_id: x.points.value if x.points.value else 0}, flat_role_points)
return dict_sum(*flat_role_dicts)
@property
def total_points(self):
return self._get_user_stories_points(
[us for us in self.cached_user_stories]
)
@property
def closed_points(self):
return self._get_user_stories_points(
[us for us in self.cached_user_stories if us.is_closed]
)
def total_closed_points_by_date(self, date):
# Milestone instance will keep a cache of the total closed points by date
if self._total_closed_points_by_date is None:
self._total_closed_points_by_date = {}
# We need to keep the milestone user stories indexed by id in a dict
user_stories = {}
for us in self.cached_user_stories:
us._total_us_points = sum(self._get_user_stories_points([us]).values())
user_stories[us.id] = us
tasks = self.tasks.\
select_related("user_story").\
exclude(finished_date__isnull=True).\
exclude(user_story__isnull=True)
# For each finished task we try to know the proporional part of points
# it represetnts from the user story and add it to the closed points
# for that date
# This calulation is the total user story points divided by its number of tasks
for task in tasks:
user_story = user_stories.get(task.user_story.id, None)
if user_story is None:
total_us_points = 0
us_tasks_counter = 0
else:
total_us_points = user_story._total_us_points
us_tasks_counter = user_story.num_tasks
# If the task was finished before starting the sprint it needs
# to be included
finished_date = task.finished_date.date()
if finished_date < self.estimated_start:
finished_date = self.estimated_start
points_by_date = self._total_closed_points_by_date.get(finished_date, 0)
if us_tasks_counter != 0:
points_by_date += total_us_points / us_tasks_counter
self._total_closed_points_by_date[finished_date] = points_by_date
for us in self.cached_user_stories:
if us.num_tasks > 0 or us.finish_date is None:
continue
finished_date = us.finish_date.date()
if finished_date < self.estimated_start:
finished_date = self.estimated_start
points_by_date = self._total_closed_points_by_date.get(finished_date, 0)
points_by_date += us._total_us_points
self._total_closed_points_by_date[finished_date] = points_by_date
# At this point self._total_closed_points_by_date keeps a dict where the
# finished date of the task is the key and the value is the increment of points
# We are transforming this dict of increments in an acumulation one including
# all the dates from the sprint
acumulated_date_points = 0
current_date = self.estimated_start
while current_date <= self.estimated_finish:
acumulated_date_points += self._total_closed_points_by_date.get(current_date, 0)
self._total_closed_points_by_date[current_date] = acumulated_date_points
current_date = current_date + datetime.timedelta(days=1)
return self._total_closed_points_by_date.get(date, 0)
| 46.903409
| 111
| 0.650151
|
from django.db import models
from django.db.models import Count
from django.conf import settings
from django.utils.translation import ugettext_lazy as _
from django.utils import timezone
from django.core.exceptions import ValidationError
from django.utils.functional import cached_property
from taiga.base.utils.slug import slugify_uniquely
from taiga.base.utils.dicts import dict_sum
from taiga.projects.notifications.mixins import WatchedModelMixin
import itertools
import datetime
class Milestone(WatchedModelMixin, models.Model):
name = models.CharField(max_length=200, db_index=True, null=False, blank=False,
verbose_name=_("name"))
slug = models.SlugField(max_length=250, db_index=True, null=False, blank=True,
verbose_name=_("slug"))
owner = models.ForeignKey(settings.AUTH_USER_MODEL, null=True, blank=True,
related_name="owned_milestones", verbose_name=_("owner"))
project = models.ForeignKey("projects.Project", null=False, blank=False,
related_name="milestones", verbose_name=_("project"))
estimated_start = models.DateField(verbose_name=_("estimated start date"))
estimated_finish = models.DateField(verbose_name=_("estimated finish date"))
created_date = models.DateTimeField(null=False, blank=False,
verbose_name=_("created date"),
default=timezone.now)
modified_date = models.DateTimeField(null=False, blank=False,
verbose_name=_("modified date"))
closed = models.BooleanField(default=False, null=False, blank=True,
verbose_name=_("is closed"))
disponibility = models.FloatField(default=0.0, null=True, blank=True,
verbose_name=_("disponibility"))
order = models.PositiveSmallIntegerField(default=1, null=False, blank=False,
verbose_name=_("order"))
_importing = None
_total_closed_points_by_date = None
class Meta:
verbose_name = "milestone"
verbose_name_plural = "milestones"
ordering = ["project", "created_date"]
unique_together = [("name", "project"), ("slug", "project")]
permissions = (
("view_milestone", "Can view milestone"),
)
def __str__(self):
return self.name
def __repr__(self):
return "<Milestone {0}>".format(self.id)
def clean(self):
if self.estimated_start and self.estimated_finish and self.estimated_start > self.estimated_finish:
raise ValidationError(_('The estimated start must be previous to the estimated finish.'))
def save(self, *args, **kwargs):
if not self._importing or not self.modified_date:
self.modified_date = timezone.now()
if not self.slug:
self.slug = slugify_uniquely(self.name, self.__class__)
super().save(*args, **kwargs)
@cached_property
def cached_user_stories(self):
return (self.user_stories.prefetch_related("role_points", "role_points__points")
.annotate(num_tasks=Count("tasks")))
def _get_user_stories_points(self, user_stories):
role_points = [us.role_points.all() for us in user_stories]
flat_role_points = itertools.chain(*role_points)
flat_role_dicts = map(lambda x: {x.role_id: x.points.value if x.points.value else 0}, flat_role_points)
return dict_sum(*flat_role_dicts)
@property
def total_points(self):
return self._get_user_stories_points(
[us for us in self.cached_user_stories]
)
@property
def closed_points(self):
return self._get_user_stories_points(
[us for us in self.cached_user_stories if us.is_closed]
)
def total_closed_points_by_date(self, date):
# Milestone instance will keep a cache of the total closed points by date
if self._total_closed_points_by_date is None:
self._total_closed_points_by_date = {}
# We need to keep the milestone user stories indexed by id in a dict
user_stories = {}
for us in self.cached_user_stories:
us._total_us_points = sum(self._get_user_stories_points([us]).values())
user_stories[us.id] = us
tasks = self.tasks.\
select_related("user_story").\
exclude(finished_date__isnull=True).\
exclude(user_story__isnull=True)
# For each finished task we try to know the proporional part of points
# it represetnts from the user story and add it to the closed points
# for that date
# This calulation is the total user story points divided by its number of tasks
for task in tasks:
user_story = user_stories.get(task.user_story.id, None)
if user_story is None:
total_us_points = 0
us_tasks_counter = 0
else:
total_us_points = user_story._total_us_points
us_tasks_counter = user_story.num_tasks
# If the task was finished before starting the sprint it needs
# to be included
finished_date = task.finished_date.date()
if finished_date < self.estimated_start:
finished_date = self.estimated_start
points_by_date = self._total_closed_points_by_date.get(finished_date, 0)
if us_tasks_counter != 0:
points_by_date += total_us_points / us_tasks_counter
self._total_closed_points_by_date[finished_date] = points_by_date
for us in self.cached_user_stories:
if us.num_tasks > 0 or us.finish_date is None:
continue
finished_date = us.finish_date.date()
if finished_date < self.estimated_start:
finished_date = self.estimated_start
points_by_date = self._total_closed_points_by_date.get(finished_date, 0)
points_by_date += us._total_us_points
self._total_closed_points_by_date[finished_date] = points_by_date
# At this point self._total_closed_points_by_date keeps a dict where the
# finished date of the task is the key and the value is the increment of points
# We are transforming this dict of increments in an acumulation one including
# all the dates from the sprint
acumulated_date_points = 0
current_date = self.estimated_start
while current_date <= self.estimated_finish:
acumulated_date_points += self._total_closed_points_by_date.get(current_date, 0)
self._total_closed_points_by_date[current_date] = acumulated_date_points
current_date = current_date + datetime.timedelta(days=1)
return self._total_closed_points_by_date.get(date, 0)
| true
| true
|
f718455a325df87d3e545c2788fcccb4ad2bfd8c
| 334
|
py
|
Python
|
setup.py
|
anthonyshook/healthcare-scraper
|
16c9fd4791e89f597f4e5066fbaa8bc02a55f63b
|
[
"MIT"
] | null | null | null |
setup.py
|
anthonyshook/healthcare-scraper
|
16c9fd4791e89f597f4e5066fbaa8bc02a55f63b
|
[
"MIT"
] | null | null | null |
setup.py
|
anthonyshook/healthcare-scraper
|
16c9fd4791e89f597f4e5066fbaa8bc02a55f63b
|
[
"MIT"
] | null | null | null |
from distutils.core import setup
setup(
name='HealthcareScraper',
version='1.0',
packages=['HealthcareScraper'],
url='',
license='MIT',
author='anthonyshook',
author_email='',
description='Code for Fetching data from the Healthcare.gov website.',
long_description_content_type = 'text/markdown'
)
| 23.857143
| 74
| 0.685629
|
from distutils.core import setup
setup(
name='HealthcareScraper',
version='1.0',
packages=['HealthcareScraper'],
url='',
license='MIT',
author='anthonyshook',
author_email='',
description='Code for Fetching data from the Healthcare.gov website.',
long_description_content_type = 'text/markdown'
)
| true
| true
|
f71845e192e5ae9af44017bb236e7de213806278
| 177
|
py
|
Python
|
exercicio_05.py
|
marcusxyyz/Python---Geek-University
|
57ec0a5a45d3713bb74ffdae13d778c0708a4749
|
[
"Apache-2.0"
] | null | null | null |
exercicio_05.py
|
marcusxyyz/Python---Geek-University
|
57ec0a5a45d3713bb74ffdae13d778c0708a4749
|
[
"Apache-2.0"
] | null | null | null |
exercicio_05.py
|
marcusxyyz/Python---Geek-University
|
57ec0a5a45d3713bb74ffdae13d778c0708a4749
|
[
"Apache-2.0"
] | null | null | null |
"""
Leia um número real e imprima a quinta parte deste número.
"""
num = float(input('Digite um número real: '))
qui = num / 5
print(f'A quinta parte de {num} é {qui}')
| 22.125
| 59
| 0.632768
|
num = float(input('Digite um número real: '))
qui = num / 5
print(f'A quinta parte de {num} é {qui}')
| true
| true
|
f71846b951f60a4224bbcba15029808462901768
| 377
|
py
|
Python
|
Chapter04/c4_09_python_fv.py
|
andrewjcoxon/Hands-On-Data-Science-with-Anaconda
|
82504a059ecd284b3599fa9af2b3eb6bbd6e28f3
|
[
"MIT"
] | 25
|
2018-06-25T16:21:09.000Z
|
2022-02-08T09:28:29.000Z
|
Hands-On-Data-Science-with-Anaconda-master/Hands-On-Data-Science-with-Anaconda-master/Chapter04/c4_09_python_fv.py
|
manual123/Nacho-Jupyter-Notebooks
|
e75523434b1a90313a6b44e32b056f63de8a7135
|
[
"MIT"
] | null | null | null |
Hands-On-Data-Science-with-Anaconda-master/Hands-On-Data-Science-with-Anaconda-master/Chapter04/c4_09_python_fv.py
|
manual123/Nacho-Jupyter-Notebooks
|
e75523434b1a90313a6b44e32b056f63de8a7135
|
[
"MIT"
] | 17
|
2018-06-15T02:55:30.000Z
|
2022-03-09T15:24:42.000Z
|
"
Name : c4_09_python_fv.py
Book : Hands-on Data Science with Anaconda )
Publisher: Packt Publishing Ltd.
Author : Yuxing Yan and James Yan
Date : 1/25/2018
email : yany@canisius.edu
paulyxy@hotmail.com
"
import numpy as np
import matplotlib.pyplot as mlt
n=np.linspace(0,10,10)
pv=100
R=0.1
fv=pv*(1+R)**n
mlt.plot(n,fv)
mlt.show()
| 20.944444
| 50
| 0.65252
|
"
Name : c4_09_python_fv.py
Book : Hands-on Data Science with Anaconda )
Publisher: Packt Publishing Ltd.
Author : Yuxing Yan and James Yan
Date : 1/25/2018
email : yany@canisius.edu
paulyxy@hotmail.com
"
import numpy as np
import matplotlib.pyplot as mlt
n=np.linspace(0,10,10)
pv=100
R=0.1
fv=pv*(1+R)**n
mlt.plot(n,fv)
mlt.show()
| false
| true
|
f7184824078e9439bc4fe364829673d0d03fea2d
| 2,608
|
py
|
Python
|
python_bitbankcc/public_api.py
|
bitbankinc/python-bitbankcc
|
c1dfddaf39e69499301b6461fa73793f91ee6a76
|
[
"MIT"
] | 56
|
2017-08-25T07:39:49.000Z
|
2022-03-23T15:04:18.000Z
|
python_bitbankcc/public_api.py
|
bitbankinc/python-bitbankcc
|
c1dfddaf39e69499301b6461fa73793f91ee6a76
|
[
"MIT"
] | 7
|
2017-10-10T02:10:01.000Z
|
2022-01-12T00:57:50.000Z
|
python_bitbankcc/public_api.py
|
bitbankinc/python-bitbankcc
|
c1dfddaf39e69499301b6461fa73793f91ee6a76
|
[
"MIT"
] | 33
|
2017-10-09T17:48:26.000Z
|
2022-01-28T18:36:32.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# MIT License
#
# Copyright (c) 2017 bitbank, inc. (ビットバンク株式会社)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from __future__ import absolute_import, division, print_function, unicode_literals
from .utils import error_parser, try_json_parse
from logging import getLogger
import requests, contextlib
logger = getLogger(__name__)
class bitbankcc_public(object):
def __init__(self, end_point='https://public.bitbank.cc'):
self.end_point = end_point
def _query(self, query_url):
with contextlib.closing(requests.get(query_url)) as response:
response.raise_for_status()
return error_parser(try_json_parse(response, logger))
def get_ticker(self, pair):
path = '/' + pair + '/ticker'
return self._query(self.end_point + path)
def get_tickers(self):
path = '/tickers'
return self._query(self.end_point + path)
def get_tickers_jpy(self):
path = '/tickers_jpy'
return self._query(self.end_point + path)
def get_depth(self, pair):
path = '/' + pair + '/depth'
return self._query(self.end_point + path)
def get_transactions(self, pair, yyyymmdd=None):
path = '/' + pair + '/transactions'
if yyyymmdd: path += '/' + yyyymmdd
return self._query(self.end_point + path)
def get_candlestick(self, pair, candle_type, yyyymmdd):
path = '/' + pair + '/candlestick/' + candle_type + '/' + yyyymmdd
return self._query(self.end_point + path)
| 37.257143
| 82
| 0.695552
|
from __future__ import absolute_import, division, print_function, unicode_literals
from .utils import error_parser, try_json_parse
from logging import getLogger
import requests, contextlib
logger = getLogger(__name__)
class bitbankcc_public(object):
def __init__(self, end_point='https://public.bitbank.cc'):
self.end_point = end_point
def _query(self, query_url):
with contextlib.closing(requests.get(query_url)) as response:
response.raise_for_status()
return error_parser(try_json_parse(response, logger))
def get_ticker(self, pair):
path = '/' + pair + '/ticker'
return self._query(self.end_point + path)
def get_tickers(self):
path = '/tickers'
return self._query(self.end_point + path)
def get_tickers_jpy(self):
path = '/tickers_jpy'
return self._query(self.end_point + path)
def get_depth(self, pair):
path = '/' + pair + '/depth'
return self._query(self.end_point + path)
def get_transactions(self, pair, yyyymmdd=None):
path = '/' + pair + '/transactions'
if yyyymmdd: path += '/' + yyyymmdd
return self._query(self.end_point + path)
def get_candlestick(self, pair, candle_type, yyyymmdd):
path = '/' + pair + '/candlestick/' + candle_type + '/' + yyyymmdd
return self._query(self.end_point + path)
| true
| true
|
f718490b281d027fc767b61480f567c5c0d98b9b
| 2,696
|
py
|
Python
|
Functions/libsvm-3.23/tools/easy.py
|
klop670/TwitterBotDetectMLClass
|
88a22807a5d07378935d02fbca4cd6cc36a68d24
|
[
"MIT"
] | 5
|
2021-05-31T07:03:36.000Z
|
2022-01-31T11:51:05.000Z
|
Functions/libsvm-3.23/tools/easy.py
|
klop670/TwitterBotDetectMLClass
|
88a22807a5d07378935d02fbca4cd6cc36a68d24
|
[
"MIT"
] | 2
|
2021-09-27T12:24:42.000Z
|
2021-12-02T10:02:31.000Z
|
Functions/libsvm-3.23/tools/easy.py
|
klop670/TwitterBotDetectMLClass
|
88a22807a5d07378935d02fbca4cd6cc36a68d24
|
[
"MIT"
] | 2
|
2020-09-15T12:34:16.000Z
|
2021-07-19T00:57:43.000Z
|
#!/usr/bin/env python
import sys
import os
from subprocess import *
if len(sys.argv) <= 1:
print('Usage: {0} training_file [testing_file]'.format(sys.argv[0]))
raise SystemExit
# svm, grid, and gnuplot executable files
is_win32 = (sys.platform == 'win32')
if not is_win32:
svmscale_exe = "../svm-scale"
svmtrain_exe = "../svm-train"
svmpredict_exe = "../svm-predict"
grid_py = "./grid.py"
gnuplot_exe = "/usr/bin/gnuplot"
else:
# example for windows
svmscale_exe = r"..\windows\svm-scale.exe"
svmtrain_exe = r"..\windows\svm-train.exe"
svmpredict_exe = r"..\windows\svm-predict.exe"
gnuplot_exe = r"c:\tmp\gnuplot\binary\pgnuplot.exe"
grid_py = r".\grid.py"
assert os.path.exists(svmscale_exe),"svm-scale executable not found"
assert os.path.exists(svmtrain_exe),"svm-train executable not found"
assert os.path.exists(svmpredict_exe),"svm-predict executable not found"
assert os.path.exists(gnuplot_exe),"gnuplot executable not found"
assert os.path.exists(grid_py),"grid.py not found"
train_pathname = sys.argv[1]
assert os.path.exists(train_pathname),"training file not found"
file_name = os.path.split(train_pathname)[1]
scaled_file = file_name + ".scale"
model_file = file_name + ".model"
range_file = file_name + ".range"
if len(sys.argv) > 2:
test_pathname = sys.argv[2]
file_name = os.path.split(test_pathname)[1]
assert os.path.exists(test_pathname),"testing file not found"
scaled_test_file = file_name + ".scale"
predict_test_file = file_name + ".predict"
cmd = '{0} -s "{1}" "{2}" > "{3}"'.format(svmscale_exe, range_file, train_pathname, scaled_file)
print('Scaling training data...')
Popen(cmd, shell = True, stdout = PIPE).communicate()
cmd = '{0} -svmtrain "{1}" -gnuplot "{2}" "{3}"'.format(grid_py, svmtrain_exe, gnuplot_exe, scaled_file)
print('Cross validation...')
f = Popen(cmd, shell = True, stdout = PIPE).stdout
line = ''
while True:
last_line = line
line = f.readline()
if not line: break
c,g,rate = map(float,last_line.split())
print('Best c={0}, g={1} CV rate={2}'.format(c,g,rate))
cmd = '{0} -c {1} -g {2} "{3}" "{4}"'.format(svmtrain_exe,c,g,scaled_file,model_file)
print('Training...')
Popen(cmd, shell = True, stdout = PIPE).communicate()
print('Output model: {0}'.format(model_file))
if len(sys.argv) > 2:
cmd = '{0} -r "{1}" "{2}" > "{3}"'.format(svmscale_exe, range_file, test_pathname, scaled_test_file)
print('Scaling testing data...')
Popen(cmd, shell = True, stdout = PIPE).communicate()
cmd = '{0} "{1}" "{2}" "{3}"'.format(svmpredict_exe, scaled_test_file, model_file, predict_test_file)
print('Testing...')
Popen(cmd, shell = True).communicate()
print('Output prediction: {0}'.format(predict_test_file))
| 33.7
| 104
| 0.700297
|
import sys
import os
from subprocess import *
if len(sys.argv) <= 1:
print('Usage: {0} training_file [testing_file]'.format(sys.argv[0]))
raise SystemExit
is_win32 = (sys.platform == 'win32')
if not is_win32:
svmscale_exe = "../svm-scale"
svmtrain_exe = "../svm-train"
svmpredict_exe = "../svm-predict"
grid_py = "./grid.py"
gnuplot_exe = "/usr/bin/gnuplot"
else:
svmscale_exe = r"..\windows\svm-scale.exe"
svmtrain_exe = r"..\windows\svm-train.exe"
svmpredict_exe = r"..\windows\svm-predict.exe"
gnuplot_exe = r"c:\tmp\gnuplot\binary\pgnuplot.exe"
grid_py = r".\grid.py"
assert os.path.exists(svmscale_exe),"svm-scale executable not found"
assert os.path.exists(svmtrain_exe),"svm-train executable not found"
assert os.path.exists(svmpredict_exe),"svm-predict executable not found"
assert os.path.exists(gnuplot_exe),"gnuplot executable not found"
assert os.path.exists(grid_py),"grid.py not found"
train_pathname = sys.argv[1]
assert os.path.exists(train_pathname),"training file not found"
file_name = os.path.split(train_pathname)[1]
scaled_file = file_name + ".scale"
model_file = file_name + ".model"
range_file = file_name + ".range"
if len(sys.argv) > 2:
test_pathname = sys.argv[2]
file_name = os.path.split(test_pathname)[1]
assert os.path.exists(test_pathname),"testing file not found"
scaled_test_file = file_name + ".scale"
predict_test_file = file_name + ".predict"
cmd = '{0} -s "{1}" "{2}" > "{3}"'.format(svmscale_exe, range_file, train_pathname, scaled_file)
print('Scaling training data...')
Popen(cmd, shell = True, stdout = PIPE).communicate()
cmd = '{0} -svmtrain "{1}" -gnuplot "{2}" "{3}"'.format(grid_py, svmtrain_exe, gnuplot_exe, scaled_file)
print('Cross validation...')
f = Popen(cmd, shell = True, stdout = PIPE).stdout
line = ''
while True:
last_line = line
line = f.readline()
if not line: break
c,g,rate = map(float,last_line.split())
print('Best c={0}, g={1} CV rate={2}'.format(c,g,rate))
cmd = '{0} -c {1} -g {2} "{3}" "{4}"'.format(svmtrain_exe,c,g,scaled_file,model_file)
print('Training...')
Popen(cmd, shell = True, stdout = PIPE).communicate()
print('Output model: {0}'.format(model_file))
if len(sys.argv) > 2:
cmd = '{0} -r "{1}" "{2}" > "{3}"'.format(svmscale_exe, range_file, test_pathname, scaled_test_file)
print('Scaling testing data...')
Popen(cmd, shell = True, stdout = PIPE).communicate()
cmd = '{0} "{1}" "{2}" "{3}"'.format(svmpredict_exe, scaled_test_file, model_file, predict_test_file)
print('Testing...')
Popen(cmd, shell = True).communicate()
print('Output prediction: {0}'.format(predict_test_file))
| true
| true
|
f7184a5a9982e3dd5b398457d2310a43a37432e0
| 78,547
|
py
|
Python
|
python/cudf/cudf/tests/test_binops.py
|
esoha-nvidia/cudf
|
663457b186bbf27ea2926e08438b8c01b5c7633e
|
[
"Apache-2.0"
] | 1
|
2021-05-02T11:27:22.000Z
|
2021-05-02T11:27:22.000Z
|
python/cudf/cudf/tests/test_binops.py
|
esoha-nvidia/cudf
|
663457b186bbf27ea2926e08438b8c01b5c7633e
|
[
"Apache-2.0"
] | null | null | null |
python/cudf/cudf/tests/test_binops.py
|
esoha-nvidia/cudf
|
663457b186bbf27ea2926e08438b8c01b5c7633e
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2018-2021, NVIDIA CORPORATION.
from __future__ import division
import decimal
import operator
import random
from itertools import product
import numpy as np
import pandas as pd
import pytest
import cudf
from cudf.core import Series
from cudf.core.index import as_index
from cudf.tests import utils
from cudf.utils.dtypes import (
BOOL_TYPES,
DATETIME_TYPES,
FLOAT_TYPES,
INTEGER_TYPES,
NUMERIC_TYPES,
TIMEDELTA_TYPES,
)
STRING_TYPES = {"str"}
_binops = [
operator.add,
operator.sub,
operator.mul,
operator.floordiv,
operator.truediv,
operator.mod,
operator.pow,
]
@pytest.mark.parametrize("obj_class", ["Series", "Index"])
@pytest.mark.parametrize("binop", _binops)
def test_series_binop(binop, obj_class):
nelem = 1000
arr1 = utils.gen_rand("float64", nelem) * 10000
# Keeping a low value because CUDA 'pow' has 2 full range error
arr2 = utils.gen_rand("float64", nelem) * 10
sr1 = Series(arr1)
sr2 = Series(arr2)
if obj_class == "Index":
sr1 = as_index(sr1)
sr2 = as_index(sr2)
result = binop(sr1, sr2)
expect = binop(pd.Series(arr1), pd.Series(arr2))
if obj_class == "Index":
result = Series(result)
utils.assert_eq(result, expect)
@pytest.mark.parametrize("binop", _binops)
def test_series_binop_concurrent(binop):
def func(index):
arr = np.random.random(100) * 10
sr = Series(arr)
result = binop(sr.astype("int32"), sr)
expect = binop(arr.astype("int32"), arr)
np.testing.assert_almost_equal(result.to_array(), expect, decimal=5)
from concurrent.futures import ThreadPoolExecutor
indices = range(10)
with ThreadPoolExecutor(4) as e: # four processes
list(e.map(func, indices))
@pytest.mark.parametrize("use_cudf_scalar", [False, True])
@pytest.mark.parametrize("obj_class", ["Series", "Index"])
@pytest.mark.parametrize("nelem,binop", list(product([1, 2, 100], _binops)))
def test_series_binop_scalar(nelem, binop, obj_class, use_cudf_scalar):
arr = np.random.random(nelem)
rhs = random.choice(arr).item()
sr = Series(arr)
if obj_class == "Index":
sr = as_index(sr)
if use_cudf_scalar:
result = binop(sr, rhs)
else:
result = binop(sr, cudf.Scalar(rhs))
if obj_class == "Index":
result = Series(result)
np.testing.assert_almost_equal(result.to_array(), binop(arr, rhs))
_bitwise_binops = [operator.and_, operator.or_, operator.xor]
_int_types = [
"int8",
"int16",
"int32",
"int64",
"uint8",
"uint16",
"uint32",
]
@pytest.mark.parametrize("obj_class", ["Series", "Index"])
@pytest.mark.parametrize("binop", _bitwise_binops)
@pytest.mark.parametrize(
"lhs_dtype,rhs_dtype", list(product(_int_types, _int_types))
)
def test_series_bitwise_binop(binop, obj_class, lhs_dtype, rhs_dtype):
arr1 = (np.random.random(100) * 100).astype(lhs_dtype)
sr1 = Series(arr1)
arr2 = (np.random.random(100) * 100).astype(rhs_dtype)
sr2 = Series(arr2)
if obj_class == "Index":
sr1 = as_index(sr1)
sr2 = as_index(sr2)
result = binop(sr1, sr2)
if obj_class == "Index":
result = Series(result)
np.testing.assert_almost_equal(result.to_array(), binop(arr1, arr2))
_logical_binops = [
(operator.and_, operator.and_),
(operator.or_, operator.or_),
(np.logical_and, cudf.logical_and),
(np.logical_or, cudf.logical_or),
]
@pytest.mark.parametrize("lhstype", _int_types + [np.bool_])
@pytest.mark.parametrize("rhstype", _int_types + [np.bool_])
@pytest.mark.parametrize("binop,cubinop", _logical_binops)
def test_series_logical_binop(lhstype, rhstype, binop, cubinop):
arr1 = pd.Series(np.random.choice([True, False], 10))
if lhstype is not np.bool_:
arr1 = arr1 * (np.random.random(10) * 100).astype(lhstype)
sr1 = Series(arr1)
arr2 = pd.Series(np.random.choice([True, False], 10))
if rhstype is not np.bool_:
arr2 = arr2 * (np.random.random(10) * 100).astype(rhstype)
sr2 = Series(arr2)
result = cubinop(sr1, sr2)
expect = binop(arr1, arr2)
utils.assert_eq(result, expect)
_cmpops = [
operator.lt,
operator.gt,
operator.le,
operator.ge,
operator.eq,
operator.ne,
]
@pytest.mark.parametrize("obj_class", ["Series", "Index"])
@pytest.mark.parametrize("cmpop", _cmpops)
@pytest.mark.parametrize(
"dtype", ["int8", "int32", "int64", "float32", "float64", "datetime64[ms]"]
)
def test_series_compare(cmpop, obj_class, dtype):
arr1 = np.random.randint(0, 100, 100).astype(dtype)
arr2 = np.random.randint(0, 100, 100).astype(dtype)
sr1 = Series(arr1)
sr2 = Series(arr2)
if obj_class == "Index":
sr1 = as_index(sr1)
sr2 = as_index(sr2)
result1 = cmpop(sr1, sr1)
result2 = cmpop(sr2, sr2)
result3 = cmpop(sr1, sr2)
if obj_class == "Index":
result1 = Series(result1)
result2 = Series(result2)
result3 = Series(result3)
np.testing.assert_equal(result1.to_array(), cmpop(arr1, arr1))
np.testing.assert_equal(result2.to_array(), cmpop(arr2, arr2))
np.testing.assert_equal(result3.to_array(), cmpop(arr1, arr2))
def _series_compare_nulls_typegen():
tests = []
tests += list(product(DATETIME_TYPES, DATETIME_TYPES))
tests += list(product(TIMEDELTA_TYPES, TIMEDELTA_TYPES))
tests += list(product(NUMERIC_TYPES, NUMERIC_TYPES))
tests += list(product(STRING_TYPES, STRING_TYPES))
return tests
@pytest.mark.parametrize("cmpop", _cmpops)
@pytest.mark.parametrize("dtypes", _series_compare_nulls_typegen())
def test_series_compare_nulls(cmpop, dtypes):
ltype, rtype = dtypes
ldata = [1, 2, None, None, 5]
rdata = [2, 1, None, 4, None]
lser = Series(ldata, dtype=ltype)
rser = Series(rdata, dtype=rtype)
lmask = ~lser.isnull()
rmask = ~rser.isnull()
expect_mask = np.logical_and(lmask, rmask)
expect = cudf.Series([None] * 5, dtype="bool")
expect[expect_mask] = cmpop(lser[expect_mask], rser[expect_mask])
got = cmpop(lser, rser)
utils.assert_eq(expect, got)
@pytest.mark.parametrize(
"obj", [pd.Series(["a", "b", None, "d", "e", None], dtype="string"), "a"]
)
@pytest.mark.parametrize("cmpop", _cmpops)
@pytest.mark.parametrize(
"cmp_obj",
[pd.Series(["b", "a", None, "d", "f", None], dtype="string"), "a"],
)
def test_string_series_compare(obj, cmpop, cmp_obj):
g_obj = obj
if isinstance(g_obj, pd.Series):
g_obj = Series.from_pandas(g_obj)
g_cmp_obj = cmp_obj
if isinstance(g_cmp_obj, pd.Series):
g_cmp_obj = Series.from_pandas(g_cmp_obj)
got = cmpop(g_obj, g_cmp_obj)
expected = cmpop(obj, cmp_obj)
if isinstance(expected, pd.Series):
expected = cudf.from_pandas(expected)
utils.assert_eq(expected, got)
@pytest.mark.parametrize("obj_class", ["Series", "Index"])
@pytest.mark.parametrize("nelem", [1, 2, 100])
@pytest.mark.parametrize("cmpop", _cmpops)
@pytest.mark.parametrize("dtype", utils.NUMERIC_TYPES + ["datetime64[ms]"])
@pytest.mark.parametrize("use_cudf_scalar", [True, False])
def test_series_compare_scalar(
nelem, cmpop, obj_class, dtype, use_cudf_scalar
):
arr1 = np.random.randint(0, 100, 100).astype(dtype)
sr1 = Series(arr1)
rhs = random.choice(arr1).item()
if use_cudf_scalar:
rhs = cudf.Scalar(rhs)
if obj_class == "Index":
sr1 = as_index(sr1)
result1 = cmpop(sr1, rhs)
result2 = cmpop(rhs, sr1)
if obj_class == "Index":
result1 = Series(result1)
result2 = Series(result2)
np.testing.assert_equal(result1.to_array(), cmpop(arr1, rhs))
np.testing.assert_equal(result2.to_array(), cmpop(rhs, arr1))
_nulls = ["none", "some"]
@pytest.mark.parametrize("nelem", [1, 7, 8, 9, 32, 64, 128])
@pytest.mark.parametrize("lhs_nulls,rhs_nulls", list(product(_nulls, _nulls)))
def test_validity_add(nelem, lhs_nulls, rhs_nulls):
np.random.seed(0)
# LHS
lhs_data = np.random.random(nelem)
if lhs_nulls == "some":
lhs_mask = utils.random_bitmask(nelem)
lhs_bitmask = utils.expand_bits_to_bytes(lhs_mask)[:nelem]
lhs_null_count = utils.count_zero(lhs_bitmask)
assert lhs_null_count >= 0
lhs = Series.from_masked_array(lhs_data, lhs_mask)
assert lhs.null_count == lhs_null_count
else:
lhs = Series(lhs_data)
# RHS
rhs_data = np.random.random(nelem)
if rhs_nulls == "some":
rhs_mask = utils.random_bitmask(nelem)
rhs_bitmask = utils.expand_bits_to_bytes(rhs_mask)[:nelem]
rhs_null_count = utils.count_zero(rhs_bitmask)
assert rhs_null_count >= 0
rhs = Series.from_masked_array(rhs_data, rhs_mask)
assert rhs.null_count == rhs_null_count
else:
rhs = Series(rhs_data)
# Result
res = lhs + rhs
if lhs_nulls == "some" and rhs_nulls == "some":
res_mask = np.asarray(
utils.expand_bits_to_bytes(lhs_mask & rhs_mask), dtype=np.bool_
)[:nelem]
if lhs_nulls == "some" and rhs_nulls == "none":
res_mask = np.asarray(
utils.expand_bits_to_bytes(lhs_mask), dtype=np.bool_
)[:nelem]
if lhs_nulls == "none" and rhs_nulls == "some":
res_mask = np.asarray(
utils.expand_bits_to_bytes(rhs_mask), dtype=np.bool_
)[:nelem]
# Fill NA values
na_value = -10000
got = res.fillna(na_value).to_array()
expect = lhs_data + rhs_data
if lhs_nulls == "some" or rhs_nulls == "some":
expect[~res_mask] = na_value
np.testing.assert_array_equal(expect, got)
@pytest.mark.parametrize("obj_class", ["Series", "Index"])
@pytest.mark.parametrize(
"binop,lhs_dtype,rhs_dtype",
list(
product(
[operator.add, operator.mul],
utils.NUMERIC_TYPES,
utils.NUMERIC_TYPES,
)
),
)
def test_series_binop_mixed_dtype(binop, lhs_dtype, rhs_dtype, obj_class):
nelem = 10
lhs = (np.random.random(nelem) * nelem).astype(lhs_dtype)
rhs = (np.random.random(nelem) * nelem).astype(rhs_dtype)
sr1 = Series(lhs)
sr2 = Series(rhs)
if obj_class == "Index":
sr1 = as_index(sr1)
sr2 = as_index(sr2)
result = binop(Series(sr1), Series(sr2))
if obj_class == "Index":
result = Series(result)
np.testing.assert_almost_equal(result.to_array(), binop(lhs, rhs))
@pytest.mark.parametrize("obj_class", ["Series", "Index"])
@pytest.mark.parametrize(
"cmpop,lhs_dtype,rhs_dtype",
list(product(_cmpops, utils.NUMERIC_TYPES, utils.NUMERIC_TYPES)),
)
def test_series_cmpop_mixed_dtype(cmpop, lhs_dtype, rhs_dtype, obj_class):
nelem = 5
lhs = (np.random.random(nelem) * nelem).astype(lhs_dtype)
rhs = (np.random.random(nelem) * nelem).astype(rhs_dtype)
sr1 = Series(lhs)
sr2 = Series(rhs)
if obj_class == "Index":
sr1 = as_index(sr1)
sr2 = as_index(sr2)
result = cmpop(Series(sr1), Series(sr2))
if obj_class == "Index":
result = Series(result)
np.testing.assert_array_equal(result.to_array(), cmpop(lhs, rhs))
_reflected_ops = [
lambda x: 1 + x,
lambda x: 2 * x,
lambda x: 2 - x,
lambda x: 2 // x,
lambda x: 2 / x,
lambda x: 3 + x,
lambda x: 3 * x,
lambda x: 3 - x,
lambda x: 3 // x,
lambda x: 3 / x,
lambda x: 3 % x,
lambda x: -1 + x,
lambda x: -2 * x,
lambda x: -2 - x,
lambda x: -2 // x,
lambda x: -2 / x,
lambda x: -3 + x,
lambda x: -3 * x,
lambda x: -3 - x,
lambda x: -3 // x,
lambda x: -3 / x,
lambda x: -3 % x,
lambda x: 0 + x,
lambda x: 0 * x,
lambda x: 0 - x,
lambda x: 0 // x,
lambda x: 0 / x,
]
@pytest.mark.parametrize("obj_class", ["Series", "Index"])
@pytest.mark.parametrize(
"func, dtype", list(product(_reflected_ops, utils.NUMERIC_TYPES))
)
def test_reflected_ops_scalar(func, dtype, obj_class):
# create random series
np.random.seed(12)
random_series = utils.gen_rand(dtype, 100, low=10)
# gpu series
gs = Series(random_series)
# class typing
if obj_class == "Index":
gs = as_index(gs)
gs_result = func(gs)
# class typing
if obj_class == "Index":
gs = Series(gs)
# pandas
ps_result = func(random_series)
# verify
np.testing.assert_allclose(ps_result, gs_result.to_array())
_cudf_scalar_reflected_ops = [
lambda x: cudf.Scalar(1) + x,
lambda x: cudf.Scalar(2) * x,
lambda x: cudf.Scalar(2) - x,
lambda x: cudf.Scalar(2) // x,
lambda x: cudf.Scalar(2) / x,
lambda x: cudf.Scalar(3) + x,
lambda x: cudf.Scalar(3) * x,
lambda x: cudf.Scalar(3) - x,
lambda x: cudf.Scalar(3) // x,
lambda x: cudf.Scalar(3) / x,
lambda x: cudf.Scalar(3) % x,
lambda x: cudf.Scalar(-1) + x,
lambda x: cudf.Scalar(-2) * x,
lambda x: cudf.Scalar(-2) - x,
lambda x: cudf.Scalar(-2) // x,
lambda x: cudf.Scalar(-2) / x,
lambda x: cudf.Scalar(-3) + x,
lambda x: cudf.Scalar(-3) * x,
lambda x: cudf.Scalar(-3) - x,
lambda x: cudf.Scalar(-3) // x,
lambda x: cudf.Scalar(-3) / x,
lambda x: cudf.Scalar(-3) % x,
lambda x: cudf.Scalar(0) + x,
lambda x: cudf.Scalar(0) * x,
lambda x: cudf.Scalar(0) - x,
lambda x: cudf.Scalar(0) // x,
lambda x: cudf.Scalar(0) / x,
]
@pytest.mark.parametrize("obj_class", ["Series", "Index"])
@pytest.mark.parametrize(
"funcs, dtype",
list(
product(
list(zip(_reflected_ops, _cudf_scalar_reflected_ops)),
utils.NUMERIC_TYPES,
)
),
)
def test_reflected_ops_cudf_scalar(funcs, dtype, obj_class):
cpu_func, gpu_func = funcs
# create random series
np.random.seed(12)
random_series = utils.gen_rand(dtype, 100, low=10)
# gpu series
gs = Series(random_series)
# class typing
if obj_class == "Index":
gs = as_index(gs)
gs_result = gpu_func(gs)
# class typing
if obj_class == "Index":
gs = Series(gs)
# pandas
ps_result = cpu_func(random_series)
# verify
np.testing.assert_allclose(ps_result, gs_result.to_array())
@pytest.mark.parametrize("binop", _binops)
def test_different_shapes_and_columns(binop):
# TODO: support `pow()` on NaN values. Particularly, the cases:
# `pow(1, NaN) == 1` and `pow(NaN, 0) == 1`
if binop is operator.pow:
return
# Empty frame on the right side
pd_frame = binop(pd.DataFrame({"x": [1, 2]}), pd.DataFrame({}))
cd_frame = binop(cudf.DataFrame({"x": [1, 2]}), cudf.DataFrame({}))
utils.assert_eq(cd_frame, pd_frame)
# Empty frame on the left side
pd_frame = pd.DataFrame({}) + pd.DataFrame({"x": [1, 2]})
cd_frame = cudf.DataFrame({}) + cudf.DataFrame({"x": [1, 2]})
utils.assert_eq(cd_frame, pd_frame)
# Note: the below rely on a discrepancy between cudf and pandas
# While pandas inserts columns in alphabetical order, cudf inserts in the
# order of whichever column comes first. So the following code will not
# work if the names of columns are reversed i.e. ('y', 'x') != ('x', 'y')
# More rows on the left side
pd_frame = pd.DataFrame({"x": [1, 2, 3]}) + pd.DataFrame({"y": [1, 2]})
cd_frame = cudf.DataFrame({"x": [1, 2, 3]}) + cudf.DataFrame({"y": [1, 2]})
utils.assert_eq(cd_frame, pd_frame)
# More rows on the right side
pd_frame = pd.DataFrame({"x": [1, 2]}) + pd.DataFrame({"y": [1, 2, 3]})
cd_frame = cudf.DataFrame({"x": [1, 2]}) + cudf.DataFrame({"y": [1, 2, 3]})
utils.assert_eq(cd_frame, pd_frame)
@pytest.mark.parametrize("binop", _binops)
def test_different_shapes_and_same_columns(binop):
# TODO: support `pow()` on NaN values. Particularly, the cases:
# `pow(1, NaN) == 1` and `pow(NaN, 0) == 1`
if binop is operator.pow:
return
pd_frame = binop(
pd.DataFrame({"x": [1, 2]}), pd.DataFrame({"x": [1, 2, 3]})
)
cd_frame = binop(
cudf.DataFrame({"x": [1, 2]}), cudf.DataFrame({"x": [1, 2, 3]})
)
# cast x as float64 so it matches pandas dtype
cd_frame["x"] = cd_frame["x"].astype(np.float64)
utils.assert_eq(cd_frame, pd_frame)
@pytest.mark.parametrize("binop", _binops)
def test_different_shapes_and_columns_with_unaligned_indices(binop):
# TODO: support `pow()` on NaN values. Particularly, the cases:
# `pow(1, NaN) == 1` and `pow(NaN, 0) == 1`
if binop is operator.pow:
return
# Test with a RangeIndex
pdf1 = pd.DataFrame({"x": [4, 3, 2, 1], "y": [7, 3, 8, 6]})
# Test with a GenericIndex
pdf2 = pd.DataFrame(
{"x": [1, 2, 3, 7], "y": [4, 5, 6, 7]}, index=[0, 1, 3, 4]
)
# Test with a GenericIndex in a different order
pdf3 = pd.DataFrame(
{"x": [4, 5, 6, 7], "y": [1, 2, 3, 7], "z": [0, 5, 3, 7]},
index=[0, 3, 5, 3],
)
gdf1 = cudf.DataFrame.from_pandas(pdf1)
gdf2 = cudf.DataFrame.from_pandas(pdf2)
gdf3 = cudf.DataFrame.from_pandas(pdf3)
pd_frame = binop(binop(pdf1, pdf2), pdf3)
cd_frame = binop(binop(gdf1, gdf2), gdf3)
# cast x and y as float64 so it matches pandas dtype
cd_frame["x"] = cd_frame["x"].astype(np.float64)
cd_frame["y"] = cd_frame["y"].astype(np.float64)
utils.assert_eq(cd_frame, pd_frame)
@pytest.mark.parametrize(
"df2",
[
cudf.DataFrame({"a": [3, 2, 1]}, index=[3, 2, 1]),
cudf.DataFrame([3, 2]),
],
)
@pytest.mark.parametrize("binop", [operator.eq, operator.ne])
def test_df_different_index_shape(df2, binop):
df1 = cudf.DataFrame([1, 2, 3], index=[1, 2, 3])
pdf1 = df1.to_pandas()
pdf2 = df2.to_pandas()
utils.assert_exceptions_equal(
lfunc=binop,
rfunc=binop,
lfunc_args_and_kwargs=([pdf1, pdf2],),
rfunc_args_and_kwargs=([df1, df2],),
)
@pytest.mark.parametrize("op", [operator.eq, operator.ne])
def test_boolean_scalar_binop(op):
psr = pd.Series(np.random.choice([True, False], 10))
gsr = cudf.from_pandas(psr)
utils.assert_eq(op(psr, True), op(gsr, True))
utils.assert_eq(op(psr, False), op(gsr, False))
# cuDF scalar
utils.assert_eq(op(psr, True), op(gsr, cudf.Scalar(True)))
utils.assert_eq(op(psr, False), op(gsr, cudf.Scalar(False)))
_operators_arithmetic = [
"add",
"radd",
"sub",
"rsub",
"mul",
"rmul",
"mod",
"rmod",
"pow",
"rpow",
"floordiv",
"rfloordiv",
"truediv",
"rtruediv",
]
_operators_comparison = ["eq", "ne", "lt", "le", "gt", "ge"]
@pytest.mark.parametrize("func", _operators_arithmetic)
@pytest.mark.parametrize("has_nulls", [True, False])
@pytest.mark.parametrize("fill_value", [None, 27])
@pytest.mark.parametrize("dtype", ["float32", "float64"])
def test_operator_func_between_series(dtype, func, has_nulls, fill_value):
count = 1000
gdf_series_a = utils.gen_rand_series(
dtype, count, has_nulls=has_nulls, stride=10000
)
gdf_series_b = utils.gen_rand_series(
dtype, count, has_nulls=has_nulls, stride=100
)
pdf_series_a = gdf_series_a.to_pandas()
pdf_series_b = gdf_series_b.to_pandas()
gdf_result = getattr(gdf_series_a, func)(
gdf_series_b, fill_value=fill_value
)
pdf_result = getattr(pdf_series_a, func)(
pdf_series_b, fill_value=fill_value
)
utils.assert_eq(pdf_result, gdf_result)
@pytest.mark.parametrize("func", _operators_arithmetic)
@pytest.mark.parametrize("has_nulls", [True, False])
@pytest.mark.parametrize("fill_value", [None, 27])
@pytest.mark.parametrize("dtype", ["float32", "float64"])
@pytest.mark.parametrize("use_cudf_scalar", [False, True])
def test_operator_func_series_and_scalar(
dtype, func, has_nulls, fill_value, use_cudf_scalar
):
count = 1000
scalar = 59
gdf_series = utils.gen_rand_series(
dtype, count, has_nulls=has_nulls, stride=10000
)
pdf_series = gdf_series.to_pandas()
gdf_series_result = getattr(gdf_series, func)(
cudf.Scalar(scalar) if use_cudf_scalar else scalar,
fill_value=fill_value,
)
pdf_series_result = getattr(pdf_series, func)(
scalar, fill_value=fill_value
)
utils.assert_eq(pdf_series_result, gdf_series_result)
_permu_values = [0, 1, None, np.nan]
@pytest.mark.parametrize("fill_value", _permu_values)
@pytest.mark.parametrize("scalar_a", _permu_values)
@pytest.mark.parametrize("scalar_b", _permu_values)
@pytest.mark.parametrize("func", _operators_comparison)
@pytest.mark.parametrize("dtype", ["float32", "float64"])
def test_operator_func_between_series_logical(
dtype, func, scalar_a, scalar_b, fill_value
):
gdf_series_a = Series([scalar_a], nan_as_null=False).astype(dtype)
gdf_series_b = Series([scalar_b], nan_as_null=False).astype(dtype)
pdf_series_a = gdf_series_a.to_pandas(nullable=True)
pdf_series_b = gdf_series_b.to_pandas(nullable=True)
gdf_series_result = getattr(gdf_series_a, func)(
gdf_series_b, fill_value=fill_value
)
pdf_series_result = getattr(pdf_series_a, func)(
pdf_series_b, fill_value=fill_value
)
expect = pdf_series_result
got = gdf_series_result.to_pandas(nullable=True)
# If fill_value is np.nan, things break down a bit,
# because setting a NaN into a pandas nullable float
# array still gets transformed to <NA>. As such,
# pd_series_with_nulls.fillna(np.nan) has no effect.
if (
(pdf_series_a.isnull().sum() != pdf_series_b.isnull().sum())
and np.isscalar(fill_value)
and np.isnan(fill_value)
):
with pytest.raises(AssertionError):
utils.assert_eq(expect, got)
return
utils.assert_eq(expect, got)
@pytest.mark.parametrize("dtype", ["float32", "float64"])
@pytest.mark.parametrize("func", _operators_comparison)
@pytest.mark.parametrize("has_nulls", [True, False])
@pytest.mark.parametrize("scalar", [-59.0, np.nan, 0, 59.0])
@pytest.mark.parametrize("fill_value", [None, True, False, 1.0])
@pytest.mark.parametrize("use_cudf_scalar", [False, True])
def test_operator_func_series_and_scalar_logical(
dtype, func, has_nulls, scalar, fill_value, use_cudf_scalar
):
gdf_series = utils.gen_rand_series(
dtype, 1000, has_nulls=has_nulls, stride=10000
)
pdf_series = gdf_series.to_pandas(nullable=True)
gdf_series_result = getattr(gdf_series, func)(
cudf.Scalar(scalar) if use_cudf_scalar else scalar,
fill_value=fill_value,
)
pdf_series_result = getattr(pdf_series, func)(
scalar, fill_value=fill_value
)
expect = pdf_series_result
got = gdf_series_result.to_pandas(nullable=True)
utils.assert_eq(expect, got)
@pytest.mark.parametrize("func", _operators_arithmetic)
@pytest.mark.parametrize("nulls", _nulls)
@pytest.mark.parametrize("fill_value", [None, 27])
@pytest.mark.parametrize("other", ["df", "scalar"])
def test_operator_func_dataframe(func, nulls, fill_value, other):
num_rows = 100
num_cols = 3
def gen_df():
pdf = pd.DataFrame()
from string import ascii_lowercase
cols = np.random.choice(num_cols + 5, num_cols, replace=False)
for i in range(num_cols):
colname = ascii_lowercase[cols[i]]
data = utils.gen_rand("float64", num_rows) * 10000
if nulls == "some":
idx = np.random.choice(
num_rows, size=int(num_rows / 2), replace=False
)
data[idx] = np.nan
pdf[colname] = data
return pdf
pdf1 = gen_df()
pdf2 = gen_df() if other == "df" else 59.0
gdf1 = cudf.DataFrame.from_pandas(pdf1)
gdf2 = cudf.DataFrame.from_pandas(pdf2) if other == "df" else 59.0
got = getattr(gdf1, func)(gdf2, fill_value=fill_value)
expect = getattr(pdf1, func)(pdf2, fill_value=fill_value)[list(got._data)]
utils.assert_eq(expect, got)
@pytest.mark.parametrize("func", _operators_arithmetic + _operators_comparison)
@pytest.mark.parametrize("rhs", [0, 1, 2, 128])
def test_binop_bool_uint(func, rhs):
# TODO: remove this once issue #2172 is resolved
if func == "rmod" or func == "rfloordiv":
return
psr = pd.Series([True, False, False])
gsr = cudf.from_pandas(psr)
utils.assert_eq(
getattr(psr, func)(rhs), getattr(gsr, func)(rhs), check_dtype=False
)
def test_series_misc_binop():
pds = pd.Series([1, 2, 4], name="abc xyz")
gds = cudf.Series([1, 2, 4], name="abc xyz")
utils.assert_eq(pds + 1, gds + 1)
utils.assert_eq(1 + pds, 1 + gds)
utils.assert_eq(pds + pds, gds + gds)
pds1 = pd.Series([1, 2, 4], name="hello world")
gds1 = cudf.Series([1, 2, 4], name="hello world")
utils.assert_eq(pds + pds1, gds + gds1)
utils.assert_eq(pds1 + pds, gds1 + gds)
utils.assert_eq(pds1 + pds + 5, gds1 + gds + 5)
def test_int8_float16_binop():
a = cudf.Series([1], dtype="int8")
b = np.float16(2)
expect = cudf.Series([0.5])
got = a / b
utils.assert_eq(expect, got, check_dtype=False)
@pytest.mark.parametrize("dtype", ["int64", "float64", "str"])
def test_vector_to_none_binops(dtype):
data = Series([1, 2, 3, None], dtype=dtype)
expect = Series([None] * 4).astype(dtype)
got = data + None
utils.assert_eq(expect, got)
@pytest.mark.parametrize(
"lhs",
[
1,
3,
4,
pd.Series([5, 6, 2]),
pd.Series([0, 10, 20, 30, 3, 4, 5, 6, 2]),
6,
],
)
@pytest.mark.parametrize("rhs", [1, 3, 4, pd.Series([5, 6, 2])])
@pytest.mark.parametrize(
"ops",
[
(np.remainder, cudf.remainder),
(np.floor_divide, cudf.floor_divide),
(np.subtract, cudf.subtract),
(np.add, cudf.add),
(np.true_divide, cudf.true_divide),
(np.multiply, cudf.multiply),
],
)
def test_ufunc_ops(lhs, rhs, ops):
np_op, cu_op = ops
if isinstance(lhs, pd.Series):
culhs = cudf.from_pandas(lhs)
else:
culhs = lhs
if isinstance(rhs, pd.Series):
curhs = cudf.from_pandas(rhs)
else:
curhs = rhs
expect = np_op(lhs, rhs)
got = cu_op(culhs, curhs)
if np.isscalar(expect):
assert got == expect
else:
utils.assert_eq(
expect, got,
)
def dtype_scalar(val, dtype):
if dtype == "str":
return str(val)
dtype = np.dtype(dtype)
if dtype.type in {np.datetime64, np.timedelta64}:
res, _ = np.datetime_data(dtype)
return dtype.type(val, res)
else:
return dtype.type(val)
def make_valid_scalar_add_data():
valid = set()
# to any int, we may add any kind of
# other int, float, datetime timedelta, or bool
valid |= set(
product(
INTEGER_TYPES,
FLOAT_TYPES | DATETIME_TYPES | TIMEDELTA_TYPES | BOOL_TYPES,
)
)
# to any float, we may add any int, float, or bool
valid |= set(
product(FLOAT_TYPES, INTEGER_TYPES | FLOAT_TYPES | BOOL_TYPES)
)
# to any datetime, we may add any int, timedelta, or bool
valid |= set(
product(DATETIME_TYPES, INTEGER_TYPES | TIMEDELTA_TYPES | BOOL_TYPES)
)
# to any timedelta, we may add any int, datetime, other timedelta, or bool
valid |= set(
product(TIMEDELTA_TYPES, INTEGER_TYPES | DATETIME_TYPES | BOOL_TYPES)
)
# to any bool, we may add any int, float, datetime, timedelta, or bool
valid |= set(
product(
BOOL_TYPES,
INTEGER_TYPES
| FLOAT_TYPES
| DATETIME_TYPES
| TIMEDELTA_TYPES
| BOOL_TYPES,
)
)
# to any string, we may add any other string
valid |= {("str", "str")}
return sorted(list(valid))
def make_invalid_scalar_add_data():
invalid = set()
# we can not add a datetime to a float
invalid |= set(product(FLOAT_TYPES, DATETIME_TYPES))
# We can not add a timedelta to a float
invalid |= set(product(FLOAT_TYPES, TIMEDELTA_TYPES))
# we can not add a float to any datetime
invalid |= set(product(DATETIME_TYPES, FLOAT_TYPES))
# can can not add a datetime to a datetime
invalid |= set(product(DATETIME_TYPES, DATETIME_TYPES))
# can not add a timedelta to a float
invalid |= set(product(FLOAT_TYPES, TIMEDELTA_TYPES))
return sorted(list(invalid))
@pytest.mark.parametrize("dtype_l,dtype_r", make_valid_scalar_add_data())
def test_scalar_add(dtype_l, dtype_r):
test_value = 1
lval_host = dtype_scalar(test_value, dtype=dtype_l)
rval_host = dtype_scalar(test_value, dtype=dtype_r)
lval_gpu = cudf.Scalar(test_value, dtype=dtype_l)
rval_gpu = cudf.Scalar(test_value, dtype=dtype_r)
# expect = np.add(lval_host, rval_host)
expect = lval_host + rval_host
got = lval_gpu + rval_gpu
assert expect == got.value
if not dtype_l == dtype_r == "str":
assert expect.dtype == got.dtype
@pytest.mark.parametrize("dtype_l,dtype_r", make_invalid_scalar_add_data())
def test_scalar_add_invalid(dtype_l, dtype_r):
test_value = 1
lval_gpu = cudf.Scalar(test_value, dtype=dtype_l)
rval_gpu = cudf.Scalar(test_value, dtype=dtype_r)
with pytest.raises(TypeError):
lval_gpu + rval_gpu
def make_scalar_difference_data():
valid = set()
# from an int, we may subtract any int, float, timedelta,
# or boolean value
valid |= set(
product(
INTEGER_TYPES,
INTEGER_TYPES | FLOAT_TYPES | TIMEDELTA_TYPES | BOOL_TYPES,
)
)
# from any float, we may subtract any int, float, or bool
valid |= set(
product(FLOAT_TYPES, INTEGER_TYPES | FLOAT_TYPES | BOOL_TYPES)
)
# from any datetime we may subtract any int, datetime, timedelta, or bool
valid |= set(
product(
DATETIME_TYPES,
INTEGER_TYPES | DATETIME_TYPES | TIMEDELTA_TYPES | BOOL_TYPES,
)
)
# from any timedelta we may subtract any int, timedelta, or bool
valid |= set(
product(TIMEDELTA_TYPES, INTEGER_TYPES | TIMEDELTA_TYPES | BOOL_TYPES)
)
# from any bool we may subtract any int, float or timedelta
valid |= set(
product(BOOL_TYPES, INTEGER_TYPES | FLOAT_TYPES | TIMEDELTA_TYPES)
)
return sorted(list(valid))
def make_scalar_difference_data_invalid():
invalid = set()
# we can't subtract a datetime from an int
invalid |= set(product(INTEGER_TYPES, DATETIME_TYPES))
# we can't subtract a datetime or timedelta from a float
invalid |= set(product(FLOAT_TYPES, DATETIME_TYPES | TIMEDELTA_TYPES))
# we can't subtract a float from a datetime or timedelta
invalid |= set(product(DATETIME_TYPES | TIMEDELTA_TYPES, FLOAT_TYPES))
# We can't subtract a datetime from a timedelta
invalid |= set(product(TIMEDELTA_TYPES, DATETIME_TYPES))
# we can't subtract a datetime or bool from a bool
invalid |= set(product(BOOL_TYPES, BOOL_TYPES | DATETIME_TYPES))
return sorted(list(invalid))
@pytest.mark.parametrize("dtype_l,dtype_r", make_scalar_difference_data())
def test_scalar_difference(dtype_l, dtype_r):
test_value = 1
lval_host = dtype_scalar(test_value, dtype=dtype_l)
rval_host = dtype_scalar(test_value, dtype=dtype_r)
lval_gpu = cudf.Scalar(test_value, dtype=dtype_l)
rval_gpu = cudf.Scalar(test_value, dtype=dtype_r)
expect = lval_host - rval_host
got = lval_gpu - rval_gpu
assert expect == got.value
assert expect.dtype == got.dtype
@pytest.mark.parametrize(
"dtype_l,dtype_r", make_scalar_difference_data_invalid()
)
def test_scalar_difference_invalid(dtype_l, dtype_r):
test_value = 1
lval_gpu = cudf.Scalar(test_value, dtype=dtype_l)
rval_gpu = cudf.Scalar(test_value, dtype=dtype_r)
with pytest.raises(TypeError):
lval_gpu - rval_gpu
def make_scalar_product_data():
valid = set()
# we can multiply an int, or bool by any int, float, timedelta, or bool
valid |= set(
product(
INTEGER_TYPES | BOOL_TYPES,
INTEGER_TYPES | FLOAT_TYPES | TIMEDELTA_TYPES | BOOL_TYPES,
)
)
# we can muliply any timedelta by any int, or bool
valid |= set(product(TIMEDELTA_TYPES, INTEGER_TYPES | BOOL_TYPES))
# we can multiply a float by any int, float, or bool
valid |= set(
product(FLOAT_TYPES, INTEGER_TYPES | FLOAT_TYPES | BOOL_TYPES)
)
return sorted(list(valid))
def make_scalar_product_data_invalid():
invalid = set()
# can't multiply a ints, floats, datetimes, timedeltas,
# or bools by datetimes
invalid |= set(
product(
INTEGER_TYPES
| FLOAT_TYPES
| DATETIME_TYPES
| TIMEDELTA_TYPES
| BOOL_TYPES,
DATETIME_TYPES,
)
)
# can't multiply datetimes with anything really
invalid |= set(
product(
DATETIME_TYPES,
INTEGER_TYPES
| FLOAT_TYPES
| DATETIME_TYPES
| TIMEDELTA_TYPES
| BOOL_TYPES,
)
)
# can't multiply timedeltas by timedeltas
invalid |= set(product(TIMEDELTA_TYPES, TIMEDELTA_TYPES))
return sorted(list(invalid))
@pytest.mark.parametrize("dtype_l,dtype_r", make_scalar_product_data())
def test_scalar_product(dtype_l, dtype_r):
test_value = 1
lval_host = dtype_scalar(test_value, dtype=dtype_l)
rval_host = dtype_scalar(test_value, dtype=dtype_r)
lval_gpu = cudf.Scalar(test_value, dtype=dtype_l)
rval_gpu = cudf.Scalar(test_value, dtype=dtype_r)
expect = lval_host * rval_host
got = lval_gpu * rval_gpu
assert expect == got.value
assert expect.dtype == got.dtype
@pytest.mark.parametrize("dtype_l,dtype_r", make_scalar_product_data_invalid())
def test_scalar_product_invalid(dtype_l, dtype_r):
test_value = 1
lval_gpu = cudf.Scalar(test_value, dtype=dtype_l)
rval_gpu = cudf.Scalar(test_value, dtype=dtype_r)
with pytest.raises(TypeError):
lval_gpu * rval_gpu
def make_scalar_floordiv_data():
valid = set()
# we can divide ints and floats by other ints, floats, or bools
valid |= set(
product(
INTEGER_TYPES | FLOAT_TYPES,
INTEGER_TYPES | FLOAT_TYPES | BOOL_TYPES,
)
)
# we can divide timedeltas by ints, floats or other timedeltas
valid |= set(
product(TIMEDELTA_TYPES, INTEGER_TYPES | FLOAT_TYPES | TIMEDELTA_TYPES)
)
# we can divide bools by ints, floats or bools
valid |= set(product(BOOL_TYPES, INTEGER_TYPES | FLOAT_TYPES | BOOL_TYPES))
return sorted(list(valid))
def make_scalar_floordiv_data_invalid():
invalid = set()
# we can't numeric types into datelike types
invalid |= set(
product(
INTEGER_TYPES | FLOAT_TYPES | BOOL_TYPES,
DATETIME_TYPES | TIMEDELTA_TYPES,
)
)
# we can't divide datetime types into anything
invalid |= set(
product(
DATETIME_TYPES,
INTEGER_TYPES
| FLOAT_TYPES
| DATETIME_TYPES
| TIMEDELTA_TYPES
| BOOL_TYPES,
)
)
# we can't divide timedeltas into bools, or datetimes
invalid |= set(product(TIMEDELTA_TYPES, BOOL_TYPES | DATETIME_TYPES))
return sorted(list(invalid))
@pytest.mark.parametrize("dtype_l,dtype_r", make_scalar_floordiv_data())
def test_scalar_floordiv(dtype_l, dtype_r):
test_value = 1
lval_host = dtype_scalar(test_value, dtype=dtype_l)
rval_host = dtype_scalar(test_value, dtype=dtype_r)
lval_gpu = cudf.Scalar(test_value, dtype=dtype_l)
rval_gpu = cudf.Scalar(test_value, dtype=dtype_r)
expect = lval_host // rval_host
got = lval_gpu // rval_gpu
assert expect == got.value
assert expect.dtype == got.dtype
@pytest.mark.parametrize(
"dtype_l,dtype_r", make_scalar_floordiv_data_invalid()
)
def test_scalar_floordiv_invalid(dtype_l, dtype_r):
test_value = 1
lval_gpu = cudf.Scalar(test_value, dtype=dtype_l)
rval_gpu = cudf.Scalar(test_value, dtype=dtype_r)
with pytest.raises(TypeError):
lval_gpu // rval_gpu
def make_scalar_truediv_data():
valid = set()
# we can true divide ints, floats, or bools by other
# ints, floats or bools
valid |= set(
product(
INTEGER_TYPES | FLOAT_TYPES | BOOL_TYPES,
INTEGER_TYPES | FLOAT_TYPES | BOOL_TYPES,
)
)
# we can true divide timedeltas by ints floats or timedeltas
valid |= set(product(TIMEDELTA_TYPES, INTEGER_TYPES | TIMEDELTA_TYPES))
return sorted(list(valid))
def make_scalar_truediv_data_invalid():
invalid = set()
# we can't divide ints, floats or bools by datetimes
# or timedeltas
invalid |= set(
product(
INTEGER_TYPES | FLOAT_TYPES | BOOL_TYPES,
DATETIME_TYPES | TIMEDELTA_TYPES,
)
)
# we cant true divide datetime types by anything
invalid |= set(
product(
DATETIME_TYPES,
INTEGER_TYPES
| FLOAT_TYPES
| DATETIME_TYPES
| TIMEDELTA_TYPES
| BOOL_TYPES,
)
)
# we cant true divide timedeltas by datetimes or bools or floats
invalid |= set(
product(TIMEDELTA_TYPES, DATETIME_TYPES | BOOL_TYPES | FLOAT_TYPES)
)
return sorted(list(invalid))
@pytest.mark.parametrize("dtype_l,dtype_r", make_scalar_truediv_data())
def test_scalar_truediv(dtype_l, dtype_r):
test_value = 1
lval_host = dtype_scalar(test_value, dtype=dtype_l)
rval_host = dtype_scalar(test_value, dtype=dtype_r)
lval_gpu = cudf.Scalar(test_value, dtype=dtype_l)
rval_gpu = cudf.Scalar(test_value, dtype=dtype_r)
expect = np.true_divide(lval_host, rval_host)
got = lval_gpu / rval_gpu
assert expect == got.value
# numpy bug
if np.dtype(dtype_l).itemsize <= 2 and np.dtype(dtype_r).itemsize <= 2:
assert expect.dtype == "float64" and got.dtype == "float32"
else:
assert expect.dtype == got.dtype
# assert expect.dtype == got.dtype
@pytest.mark.parametrize("dtype_l,dtype_r", make_scalar_truediv_data_invalid())
def test_scalar_truediv_invalid(dtype_l, dtype_r):
test_value = 1
lval_gpu = cudf.Scalar(test_value, dtype=dtype_l)
rval_gpu = cudf.Scalar(test_value, dtype=dtype_r)
with pytest.raises(TypeError):
lval_gpu / rval_gpu
def make_scalar_remainder_data():
valid = set()
# can mod numeric types with each other
valid |= set(
product(
INTEGER_TYPES | FLOAT_TYPES | BOOL_TYPES,
INTEGER_TYPES | FLOAT_TYPES | BOOL_TYPES,
)
)
# can mod timedeltas by other timedeltas
valid |= set(product(TIMEDELTA_TYPES, TIMEDELTA_TYPES))
return sorted(list(valid))
def make_scalar_remainder_data_invalid():
invalid = set()
# numeric types cant be modded against timedeltas
# or datetimes. Also, datetimes can't be modded
# against datetimes or timedeltas
invalid |= set(
product(
INTEGER_TYPES | FLOAT_TYPES | BOOL_TYPES | DATETIME_TYPES,
DATETIME_TYPES | TIMEDELTA_TYPES,
)
)
# datetime and timedelta types cant be modded against
# any numeric types
invalid |= set(
product(
DATETIME_TYPES | TIMEDELTA_TYPES,
INTEGER_TYPES | FLOAT_TYPES | BOOL_TYPES,
)
)
# timedeltas cant mod with datetimes
invalid |= set(product(TIMEDELTA_TYPES, DATETIME_TYPES))
return sorted(list(invalid))
@pytest.mark.parametrize("dtype_l,dtype_r", make_scalar_remainder_data())
def test_scalar_remainder(dtype_l, dtype_r):
test_value = 1
lval_host = dtype_scalar(test_value, dtype=dtype_l)
rval_host = dtype_scalar(test_value, dtype=dtype_r)
lval_gpu = cudf.Scalar(test_value, dtype=dtype_l)
rval_gpu = cudf.Scalar(test_value, dtype=dtype_r)
expect = lval_host % rval_host
got = lval_gpu % rval_gpu
assert expect == got.value
assert expect.dtype == got.dtype
@pytest.mark.parametrize(
"dtype_l,dtype_r", make_scalar_remainder_data_invalid()
)
def test_scalar_remainder_invalid(dtype_l, dtype_r):
test_value = 1
lval_gpu = cudf.Scalar(test_value, dtype=dtype_l)
rval_gpu = cudf.Scalar(test_value, dtype=dtype_r)
with pytest.raises(TypeError):
lval_gpu % rval_gpu
def make_scalar_power_data():
# only numeric values form valid operands for power
return sorted(
product(
INTEGER_TYPES | FLOAT_TYPES | BOOL_TYPES,
INTEGER_TYPES | FLOAT_TYPES | BOOL_TYPES,
)
)
def make_scalar_power_data_invalid():
invalid = set()
# datetimes and timedeltas cant go in exponents
invalid |= set(
product(
INTEGER_TYPES
| FLOAT_TYPES
| TIMEDELTA_TYPES
| DATETIME_TYPES
| BOOL_TYPES,
DATETIME_TYPES | TIMEDELTA_TYPES,
)
)
# datetimes and timedeltas may not be raised to
# any exponent of any dtype
invalid |= set(
product(
DATETIME_TYPES | TIMEDELTA_TYPES,
DATETIME_TYPES
| TIMEDELTA_TYPES
| INTEGER_TYPES
| FLOAT_TYPES
| BOOL_TYPES,
)
)
return sorted(list(invalid))
@pytest.mark.parametrize("dtype_l,dtype_r", make_scalar_power_data())
def test_scalar_power(dtype_l, dtype_r):
test_value = 1
lval_host = dtype_scalar(test_value, dtype=dtype_l)
rval_host = dtype_scalar(test_value, dtype=dtype_r)
lval_gpu = cudf.Scalar(test_value, dtype=dtype_l)
rval_gpu = cudf.Scalar(test_value, dtype=dtype_r)
expect = lval_host ** rval_host
got = lval_gpu ** rval_gpu
assert expect == got.value
assert expect.dtype == got.dtype
@pytest.mark.parametrize("dtype_l,dtype_r", make_scalar_power_data_invalid())
def test_scalar_power_invalid(dtype_l, dtype_r):
test_value = 1
lval_gpu = cudf.Scalar(test_value, dtype=dtype_l)
rval_gpu = cudf.Scalar(test_value, dtype=dtype_r)
with pytest.raises(TypeError):
lval_gpu ** rval_gpu
@pytest.mark.parametrize(
"date_col",
[
[
"2000-01-01 00:00:00.012345678",
"2000-01-31 00:00:00.012345678",
"2000-02-29 00:00:00.012345678",
]
],
)
@pytest.mark.parametrize("n_periods", [0, 1, -1, 12, -12])
@pytest.mark.parametrize(
"frequency",
[
"months",
"years",
"days",
"hours",
"minutes",
"seconds",
"microseconds",
pytest.param(
"nanoseconds",
marks=pytest.mark.xfail(
reason="https://github.com/pandas-dev/pandas/issues/36589"
),
),
],
)
@pytest.mark.parametrize(
"dtype",
["datetime64[ns]", "datetime64[us]", "datetime64[ms]", "datetime64[s]"],
)
@pytest.mark.parametrize("op", [operator.add, operator.sub])
def test_datetime_dateoffset_binaryop(
date_col, n_periods, frequency, dtype, op
):
gsr = cudf.Series(date_col, dtype=dtype)
psr = gsr.to_pandas() # converts to nanos
kwargs = {frequency: n_periods}
goffset = cudf.DateOffset(**kwargs)
poffset = pd.DateOffset(**kwargs)
expect = op(psr, poffset)
got = op(gsr, goffset)
utils.assert_eq(expect, got)
expect = op(psr, -poffset)
got = op(gsr, -goffset)
utils.assert_eq(expect, got)
@pytest.mark.parametrize(
"date_col",
[
[
"2000-01-01 00:00:00.012345678",
"2000-01-31 00:00:00.012345678",
"2000-02-29 00:00:00.012345678",
]
],
)
@pytest.mark.parametrize(
"kwargs",
[
{"months": 2, "years": 5},
{"microseconds": 1, "seconds": 1},
{"months": 2, "years": 5, "seconds": 923, "microseconds": 481},
pytest.param(
{"milliseconds": 4},
marks=pytest.mark.xfail(
reason="Pandas gets the wrong answer for milliseconds"
),
),
pytest.param(
{"milliseconds": 4, "years": 2},
marks=pytest.mark.xfail(
reason="Pandas construction fails with these keywords"
),
),
pytest.param(
{"nanoseconds": 12},
marks=pytest.mark.xfail(
reason="Pandas gets the wrong answer for nanoseconds"
),
),
],
)
@pytest.mark.parametrize("op", [operator.add, operator.sub])
def test_datetime_dateoffset_binaryop_multiple(date_col, kwargs, op):
gsr = cudf.Series(date_col, dtype="datetime64[ns]")
psr = gsr.to_pandas()
poffset = pd.DateOffset(**kwargs)
goffset = cudf.DateOffset(**kwargs)
expect = op(psr, poffset)
got = op(gsr, goffset)
utils.assert_eq(expect, got)
@pytest.mark.parametrize(
"date_col",
[
[
"2000-01-01 00:00:00.012345678",
"2000-01-31 00:00:00.012345678",
"2000-02-29 00:00:00.012345678",
]
],
)
@pytest.mark.parametrize("n_periods", [0, 1, -1, 12, -12])
@pytest.mark.parametrize(
"frequency",
[
"months",
"years",
"days",
"hours",
"minutes",
"seconds",
"microseconds",
pytest.param(
"nanoseconds",
marks=pytest.mark.xfail(
reason="https://github.com/pandas-dev/pandas/issues/36589"
),
),
],
)
@pytest.mark.parametrize(
"dtype",
["datetime64[ns]", "datetime64[us]", "datetime64[ms]", "datetime64[s]"],
)
def test_datetime_dateoffset_binaryop_reflected(
date_col, n_periods, frequency, dtype
):
gsr = cudf.Series(date_col, dtype=dtype)
psr = gsr.to_pandas() # converts to nanos
kwargs = {frequency: n_periods}
goffset = cudf.DateOffset(**kwargs)
poffset = pd.DateOffset(**kwargs)
expect = poffset + psr
got = goffset + gsr
utils.assert_eq(expect, got)
with pytest.raises(TypeError):
poffset - psr
with pytest.raises(TypeError):
goffset - gsr
@pytest.mark.parametrize("frame", [cudf.Series, cudf.Index, cudf.DataFrame])
@pytest.mark.parametrize(
"dtype", ["int", "str", "datetime64[s]", "timedelta64[s]", "category"]
)
def test_binops_with_lhs_numpy_scalar(frame, dtype):
data = [1, 2, 3, 4, 5]
data = (
frame({"a": data}, dtype=dtype)
if isinstance(frame, cudf.DataFrame)
else frame(data, dtype=dtype)
)
if dtype == "datetime64[s]":
val = np.dtype(dtype).type(4, "s")
elif dtype == "timedelta64[s]":
val = np.dtype(dtype).type(4, "s")
elif dtype == "category":
val = np.int64(4)
else:
val = np.dtype(dtype).type(4)
expected = val == data.to_pandas()
got = val == data
# In case of index, expected would be a numpy array
if isinstance(data, cudf.Index):
expected = pd.Index(expected)
utils.assert_eq(expected, got)
@pytest.mark.parametrize(
"dtype",
[
"int8",
"int16",
"int32",
"int64",
"uint8",
"uint16",
"uint32",
"uint64",
"float32",
"float64",
"datetime64[ns]",
"datetime64[us]",
"datetime64[ms]",
"datetime64[s]",
"timedelta64[ns]",
"timedelta64[us]",
"timedelta64[ms]",
"timedelta64[s]",
],
)
@pytest.mark.parametrize("op", _operators_comparison)
def test_binops_with_NA_consistent(dtype, op):
data = [1, 2, 3]
sr = cudf.Series(data, dtype=dtype)
result = getattr(sr, op)(cudf.NA)
if dtype in NUMERIC_TYPES:
if op == "ne":
expect_all = True
else:
expect_all = False
assert (result == expect_all).all()
elif dtype in DATETIME_TYPES & TIMEDELTA_TYPES:
assert result._column.null_count == len(data)
def _decimal_series(input, dtype):
return cudf.Series(
[x if x is None else decimal.Decimal(x) for x in input], dtype=dtype,
)
@pytest.mark.parametrize(
"args",
[
(
operator.add,
["1.5", "2.0"],
cudf.Decimal64Dtype(scale=2, precision=2),
["1.5", "2.0"],
cudf.Decimal64Dtype(scale=2, precision=2),
["3.0", "4.0"],
cudf.Decimal64Dtype(scale=2, precision=3),
),
(
operator.add,
["1.5", "2.0"],
cudf.Decimal64Dtype(scale=2, precision=2),
["2.25", "1.005"],
cudf.Decimal64Dtype(scale=3, precision=4),
["3.75", "3.005"],
cudf.Decimal64Dtype(scale=3, precision=5),
),
(
operator.add,
["100", "200"],
cudf.Decimal64Dtype(scale=-2, precision=3),
["0.1", "0.2"],
cudf.Decimal64Dtype(scale=3, precision=4),
["100.1", "200.2"],
cudf.Decimal64Dtype(scale=3, precision=9),
),
(
operator.sub,
["1.5", "2.0"],
cudf.Decimal64Dtype(scale=2, precision=2),
["2.25", "1.005"],
cudf.Decimal64Dtype(scale=3, precision=4),
["-0.75", "0.995"],
cudf.Decimal64Dtype(scale=3, precision=5),
),
(
operator.sub,
["1.5", "2.0"],
cudf.Decimal64Dtype(scale=2, precision=2),
["2.25", "1.005"],
cudf.Decimal64Dtype(scale=3, precision=4),
["-0.75", "0.995"],
cudf.Decimal64Dtype(scale=3, precision=5),
),
(
operator.sub,
["100", "200"],
cudf.Decimal64Dtype(scale=-2, precision=3),
["0.1", "0.2"],
cudf.Decimal64Dtype(scale=3, precision=4),
["99.9", "199.8"],
cudf.Decimal64Dtype(scale=3, precision=9),
),
(
operator.mul,
["1.5", "2.0"],
cudf.Decimal64Dtype(scale=2, precision=2),
["1.5", "3.0"],
cudf.Decimal64Dtype(scale=3, precision=4),
["2.25", "6.0"],
cudf.Decimal64Dtype(scale=5, precision=7),
),
(
operator.mul,
["100", "200"],
cudf.Decimal64Dtype(scale=-2, precision=3),
["0.1", "0.2"],
cudf.Decimal64Dtype(scale=3, precision=4),
["10.0", "40.0"],
cudf.Decimal64Dtype(scale=1, precision=8),
),
(
operator.mul,
["1000", "2000"],
cudf.Decimal64Dtype(scale=-3, precision=4),
["0.343", "0.500"],
cudf.Decimal64Dtype(scale=3, precision=3),
["343.0", "1000.0"],
cudf.Decimal64Dtype(scale=0, precision=8),
),
(
operator.add,
["1.5", None, "2.0"],
cudf.Decimal64Dtype(scale=2, precision=2),
["1.5", None, "2.0"],
cudf.Decimal64Dtype(scale=2, precision=2),
["3.0", None, "4.0"],
cudf.Decimal64Dtype(scale=2, precision=3),
),
(
operator.add,
["1.5", None],
cudf.Decimal64Dtype(scale=2, precision=2),
["2.25", "1.005"],
cudf.Decimal64Dtype(scale=3, precision=4),
["3.75", None],
cudf.Decimal64Dtype(scale=3, precision=5),
),
(
operator.sub,
["1.5", None],
cudf.Decimal64Dtype(scale=2, precision=2),
["2.25", None],
cudf.Decimal64Dtype(scale=3, precision=4),
["-0.75", None],
cudf.Decimal64Dtype(scale=3, precision=5),
),
(
operator.sub,
["1.5", "2.0"],
cudf.Decimal64Dtype(scale=2, precision=2),
["2.25", None],
cudf.Decimal64Dtype(scale=3, precision=4),
["-0.75", None],
cudf.Decimal64Dtype(scale=3, precision=5),
),
(
operator.mul,
["1.5", None],
cudf.Decimal64Dtype(scale=2, precision=2),
["1.5", None],
cudf.Decimal64Dtype(scale=3, precision=4),
["2.25", None],
cudf.Decimal64Dtype(scale=5, precision=7),
),
(
operator.mul,
["100", "200"],
cudf.Decimal64Dtype(scale=-2, precision=3),
["0.1", None],
cudf.Decimal64Dtype(scale=3, precision=4),
["10.0", None],
cudf.Decimal64Dtype(scale=1, precision=8),
),
(
operator.eq,
["0.18", "0.42"],
cudf.Decimal64Dtype(scale=2, precision=3),
["0.18", "0.21"],
cudf.Decimal64Dtype(scale=2, precision=3),
[True, False],
bool,
),
(
operator.eq,
["0.18", "0.42"],
cudf.Decimal64Dtype(scale=2, precision=3),
["0.1800", "0.2100"],
cudf.Decimal64Dtype(scale=4, precision=5),
[True, False],
bool,
),
(
operator.eq,
["100", None],
cudf.Decimal64Dtype(scale=-2, precision=3),
["100", "200"],
cudf.Decimal64Dtype(scale=-1, precision=4),
[True, None],
bool,
),
(
operator.lt,
["0.18", "0.42", "1.00"],
cudf.Decimal64Dtype(scale=2, precision=3),
["0.10", "0.87", "1.00"],
cudf.Decimal64Dtype(scale=2, precision=3),
[False, True, False],
bool,
),
(
operator.lt,
["0.18", "0.42", "1.00"],
cudf.Decimal64Dtype(scale=2, precision=3),
["0.1000", "0.8700", "1.0000"],
cudf.Decimal64Dtype(scale=4, precision=5),
[False, True, False],
bool,
),
(
operator.lt,
["200", None, "100"],
cudf.Decimal64Dtype(scale=-2, precision=3),
["100", "200", "100"],
cudf.Decimal64Dtype(scale=-1, precision=4),
[False, None, False],
bool,
),
(
operator.gt,
["0.18", "0.42", "1.00"],
cudf.Decimal64Dtype(scale=2, precision=3),
["0.10", "0.87", "1.00"],
cudf.Decimal64Dtype(scale=2, precision=3),
[True, False, False],
bool,
),
(
operator.gt,
["0.18", "0.42", "1.00"],
cudf.Decimal64Dtype(scale=2, precision=3),
["0.1000", "0.8700", "1.0000"],
cudf.Decimal64Dtype(scale=4, precision=5),
[True, False, False],
bool,
),
(
operator.gt,
["300", None, "100"],
cudf.Decimal64Dtype(scale=-2, precision=3),
["100", "200", "100"],
cudf.Decimal64Dtype(scale=-1, precision=4),
[True, None, False],
bool,
),
(
operator.le,
["0.18", "0.42", "1.00"],
cudf.Decimal64Dtype(scale=2, precision=3),
["0.10", "0.87", "1.00"],
cudf.Decimal64Dtype(scale=2, precision=3),
[False, True, True],
bool,
),
(
operator.le,
["0.18", "0.42", "1.00"],
cudf.Decimal64Dtype(scale=2, precision=3),
["0.1000", "0.8700", "1.0000"],
cudf.Decimal64Dtype(scale=4, precision=5),
[False, True, True],
bool,
),
(
operator.le,
["300", None, "100"],
cudf.Decimal64Dtype(scale=-2, precision=3),
["100", "200", "100"],
cudf.Decimal64Dtype(scale=-1, precision=4),
[False, None, True],
bool,
),
(
operator.ge,
["0.18", "0.42", "1.00"],
cudf.Decimal64Dtype(scale=2, precision=3),
["0.10", "0.87", "1.00"],
cudf.Decimal64Dtype(scale=2, precision=3),
[True, False, True],
bool,
),
(
operator.ge,
["0.18", "0.42", "1.00"],
cudf.Decimal64Dtype(scale=2, precision=3),
["0.1000", "0.8700", "1.0000"],
cudf.Decimal64Dtype(scale=4, precision=5),
[True, False, True],
bool,
),
(
operator.ge,
["300", None, "100"],
cudf.Decimal64Dtype(scale=-2, precision=3),
["100", "200", "100"],
cudf.Decimal64Dtype(scale=-1, precision=4),
[True, None, True],
bool,
),
],
)
def test_binops_decimal(args):
op, lhs, l_dtype, rhs, r_dtype, expect, expect_dtype = args
a = _decimal_series(lhs, l_dtype)
b = _decimal_series(rhs, r_dtype)
expect = (
_decimal_series(expect, expect_dtype)
if isinstance(expect_dtype, cudf.Decimal64Dtype)
else cudf.Series(expect, dtype=expect_dtype)
)
got = op(a, b)
assert expect.dtype == got.dtype
utils.assert_eq(expect, got)
@pytest.mark.parametrize(
"args",
[
(
operator.eq,
["100", "41", None],
cudf.Decimal64Dtype(scale=0, precision=5),
[100, 42, 12],
cudf.Series([True, False, None], dtype=bool),
cudf.Series([True, False, None], dtype=bool),
),
(
operator.eq,
["100.000", "42.001", None],
cudf.Decimal64Dtype(scale=3, precision=6),
[100, 42, 12],
cudf.Series([True, False, None], dtype=bool),
cudf.Series([True, False, None], dtype=bool),
),
(
operator.eq,
["100", "40", None],
cudf.Decimal64Dtype(scale=-1, precision=3),
[100, 42, 12],
cudf.Series([True, False, None], dtype=bool),
cudf.Series([True, False, None], dtype=bool),
),
(
operator.lt,
["100", "40", "28", None],
cudf.Decimal64Dtype(scale=0, precision=3),
[100, 42, 24, 12],
cudf.Series([False, True, False, None], dtype=bool),
cudf.Series([False, False, True, None], dtype=bool),
),
(
operator.lt,
["100.000", "42.002", "23.999", None],
cudf.Decimal64Dtype(scale=3, precision=6),
[100, 42, 24, 12],
cudf.Series([False, False, True, None], dtype=bool),
cudf.Series([False, True, False, None], dtype=bool),
),
(
operator.lt,
["100", "40", "10", None],
cudf.Decimal64Dtype(scale=-1, precision=3),
[100, 42, 8, 12],
cudf.Series([False, True, False, None], dtype=bool),
cudf.Series([False, False, True, None], dtype=bool),
),
(
operator.gt,
["100", "42", "20", None],
cudf.Decimal64Dtype(scale=0, precision=3),
[100, 40, 24, 12],
cudf.Series([False, True, False, None], dtype=bool),
cudf.Series([False, False, True, None], dtype=bool),
),
(
operator.gt,
["100.000", "42.002", "23.999", None],
cudf.Decimal64Dtype(scale=3, precision=6),
[100, 42, 24, 12],
cudf.Series([False, True, False, None], dtype=bool),
cudf.Series([False, False, True, None], dtype=bool),
),
(
operator.gt,
["100", "40", "10", None],
cudf.Decimal64Dtype(scale=-1, precision=3),
[100, 42, 8, 12],
cudf.Series([False, False, True, None], dtype=bool),
cudf.Series([False, True, False, None], dtype=bool),
),
(
operator.le,
["100", "40", "28", None],
cudf.Decimal64Dtype(scale=0, precision=3),
[100, 42, 24, 12],
cudf.Series([True, True, False, None], dtype=bool),
cudf.Series([True, False, True, None], dtype=bool),
),
(
operator.le,
["100.000", "42.002", "23.999", None],
cudf.Decimal64Dtype(scale=3, precision=6),
[100, 42, 24, 12],
cudf.Series([True, False, True, None], dtype=bool),
cudf.Series([True, True, False, None], dtype=bool),
),
(
operator.le,
["100", "40", "10", None],
cudf.Decimal64Dtype(scale=-1, precision=3),
[100, 42, 8, 12],
cudf.Series([True, True, False, None], dtype=bool),
cudf.Series([True, False, True, None], dtype=bool),
),
(
operator.ge,
["100", "42", "20", None],
cudf.Decimal64Dtype(scale=0, precision=3),
[100, 40, 24, 12],
cudf.Series([True, True, False, None], dtype=bool),
cudf.Series([True, False, True, None], dtype=bool),
),
(
operator.ge,
["100.000", "42.002", "23.999", None],
cudf.Decimal64Dtype(scale=3, precision=6),
[100, 42, 24, 12],
cudf.Series([True, True, False, None], dtype=bool),
cudf.Series([True, False, True, None], dtype=bool),
),
(
operator.ge,
["100", "40", "10", None],
cudf.Decimal64Dtype(scale=-1, precision=3),
[100, 42, 8, 12],
cudf.Series([True, False, True, None], dtype=bool),
cudf.Series([True, True, False, None], dtype=bool),
),
],
)
@pytest.mark.parametrize("integer_dtype", cudf.tests.utils.INTEGER_TYPES)
@pytest.mark.parametrize("reflected", [True, False])
def test_binops_decimal_comp_mixed_integer(args, integer_dtype, reflected):
"""
Tested compare operations:
eq, lt, gt, le, ge
Each operation has 3 decimal data setups, with scale from {==0, >0, <0}.
Decimal precisions are sufficient to hold the digits.
For each decimal data setup, there is at least one row that lead to one
of the following compare results: {True, False, None}.
"""
if not reflected:
op, ldata, ldtype, rdata, expected, _ = args
else:
op, ldata, ldtype, rdata, _, expected = args
lhs = _decimal_series(ldata, ldtype)
rhs = cudf.Series(rdata, dtype=integer_dtype)
if reflected:
rhs, lhs = lhs, rhs
actual = op(lhs, rhs)
utils.assert_eq(expected, actual)
@pytest.mark.parametrize(
"args",
[
(
operator.add,
["100", "200"],
cudf.Decimal64Dtype(scale=-2, precision=3),
decimal.Decimal(1),
["101", "201"],
cudf.Decimal64Dtype(scale=0, precision=6),
False,
),
(
operator.add,
["100", "200"],
cudf.Decimal64Dtype(scale=-2, precision=3),
1,
["101", "201"],
cudf.Decimal64Dtype(scale=0, precision=6),
False,
),
(
operator.add,
["100", "200"],
cudf.Decimal64Dtype(scale=-2, precision=3),
decimal.Decimal("1.5"),
["101.5", "201.5"],
cudf.Decimal64Dtype(scale=1, precision=7),
False,
),
(
operator.add,
["100", "200"],
cudf.Decimal64Dtype(scale=-2, precision=3),
cudf.Scalar(decimal.Decimal("1.5")),
["101.5", "201.5"],
cudf.Decimal64Dtype(scale=1, precision=7),
False,
),
(
operator.add,
["100", "200"],
cudf.Decimal64Dtype(scale=-2, precision=3),
decimal.Decimal(1),
["101", "201"],
cudf.Decimal64Dtype(scale=0, precision=6),
True,
),
(
operator.add,
["100", "200"],
cudf.Decimal64Dtype(scale=-2, precision=3),
1,
["101", "201"],
cudf.Decimal64Dtype(scale=0, precision=6),
True,
),
(
operator.add,
["100", "200"],
cudf.Decimal64Dtype(scale=-2, precision=3),
decimal.Decimal("1.5"),
["101.5", "201.5"],
cudf.Decimal64Dtype(scale=1, precision=7),
True,
),
(
operator.add,
["100", "200"],
cudf.Decimal64Dtype(scale=-2, precision=3),
cudf.Scalar(decimal.Decimal("1.5")),
["101.5", "201.5"],
cudf.Decimal64Dtype(scale=1, precision=7),
True,
),
(
operator.mul,
["100", "200"],
cudf.Decimal64Dtype(scale=-2, precision=3),
1,
["100", "200"],
cudf.Decimal64Dtype(scale=-2, precision=5),
False,
),
(
operator.mul,
["100", "200"],
cudf.Decimal64Dtype(scale=-2, precision=3),
decimal.Decimal(2),
["200", "400"],
cudf.Decimal64Dtype(scale=-2, precision=5),
False,
),
(
operator.mul,
["100", "200"],
cudf.Decimal64Dtype(scale=-2, precision=3),
decimal.Decimal("1.5"),
["150", "300"],
cudf.Decimal64Dtype(scale=-1, precision=6),
False,
),
(
operator.mul,
["100", "200"],
cudf.Decimal64Dtype(scale=-2, precision=3),
cudf.Scalar(decimal.Decimal("1.5")),
["150", "300"],
cudf.Decimal64Dtype(scale=-1, precision=6),
False,
),
(
operator.mul,
["100", "200"],
cudf.Decimal64Dtype(scale=-2, precision=3),
1,
["100", "200"],
cudf.Decimal64Dtype(scale=-2, precision=5),
True,
),
(
operator.mul,
["100", "200"],
cudf.Decimal64Dtype(scale=-2, precision=3),
decimal.Decimal(2),
["200", "400"],
cudf.Decimal64Dtype(scale=-2, precision=5),
True,
),
(
operator.mul,
["100", "200"],
cudf.Decimal64Dtype(scale=-2, precision=3),
decimal.Decimal("1.5"),
["150", "300"],
cudf.Decimal64Dtype(scale=-1, precision=6),
True,
),
(
operator.mul,
["100", "200"],
cudf.Decimal64Dtype(scale=-2, precision=3),
cudf.Scalar(decimal.Decimal("1.5")),
["150", "300"],
cudf.Decimal64Dtype(scale=-1, precision=6),
True,
),
(
operator.sub,
["100", "200"],
cudf.Decimal64Dtype(scale=-2, precision=3),
decimal.Decimal(2),
["98", "198"],
cudf.Decimal64Dtype(scale=0, precision=6),
False,
),
(
operator.sub,
["100", "200"],
cudf.Decimal64Dtype(scale=-2, precision=3),
decimal.Decimal("2.5"),
["97.5", "197.5"],
cudf.Decimal64Dtype(scale=1, precision=7),
False,
),
(
operator.sub,
["100", "200"],
cudf.Decimal64Dtype(scale=-2, precision=3),
4,
["96", "196"],
cudf.Decimal64Dtype(scale=0, precision=6),
False,
),
(
operator.sub,
["100", "200"],
cudf.Decimal64Dtype(scale=-2, precision=3),
cudf.Scalar(decimal.Decimal("2.5")),
["97.5", "197.5"],
cudf.Decimal64Dtype(scale=1, precision=7),
False,
),
(
operator.sub,
["100", "200"],
cudf.Decimal64Dtype(scale=-2, precision=3),
decimal.Decimal(2),
["-98", "-198"],
cudf.Decimal64Dtype(scale=0, precision=6),
True,
),
(
operator.sub,
["100", "200"],
cudf.Decimal64Dtype(scale=-2, precision=3),
4,
["-96", "-196"],
cudf.Decimal64Dtype(scale=0, precision=6),
True,
),
(
operator.sub,
["100", "200"],
cudf.Decimal64Dtype(scale=-2, precision=3),
decimal.Decimal("2.5"),
["-97.5", "-197.5"],
cudf.Decimal64Dtype(scale=1, precision=7),
True,
),
(
operator.sub,
["100", "200"],
cudf.Decimal64Dtype(scale=-2, precision=3),
cudf.Scalar(decimal.Decimal("2.5")),
["-97.5", "-197.5"],
cudf.Decimal64Dtype(scale=1, precision=7),
True,
),
],
)
def test_binops_decimal_scalar(args):
op, lhs, l_dtype, rhs, expect, expect_dtype, reflect = args
def decimal_series(input, dtype):
return cudf.Series(
[x if x is None else decimal.Decimal(x) for x in input],
dtype=dtype,
)
lhs = decimal_series(lhs, l_dtype)
expect = decimal_series(expect, expect_dtype)
if reflect:
lhs, rhs = rhs, lhs
got = op(lhs, rhs)
assert expect.dtype == got.dtype
utils.assert_eq(expect, got)
@pytest.mark.parametrize(
"args",
[
(
operator.eq,
["100.00", "41", None],
cudf.Decimal64Dtype(scale=0, precision=5),
100,
cudf.Series([True, False, None], dtype=bool),
cudf.Series([True, False, None], dtype=bool),
),
(
operator.eq,
["100.123", "41", None],
cudf.Decimal64Dtype(scale=3, precision=6),
decimal.Decimal("100.123"),
cudf.Series([True, False, None], dtype=bool),
cudf.Series([True, False, None], dtype=bool),
),
(
operator.eq,
["100.123", "41", None],
cudf.Decimal64Dtype(scale=3, precision=6),
cudf.Scalar(decimal.Decimal("100.123")),
cudf.Series([True, False, None], dtype=bool),
cudf.Series([True, False, None], dtype=bool),
),
(
operator.gt,
["100.00", "41", "120.21", None],
cudf.Decimal64Dtype(scale=2, precision=5),
100,
cudf.Series([False, False, True, None], dtype=bool),
cudf.Series([False, True, False, None], dtype=bool),
),
(
operator.gt,
["100.123", "41", "120.21", None],
cudf.Decimal64Dtype(scale=3, precision=6),
decimal.Decimal("100.123"),
cudf.Series([False, False, True, None], dtype=bool),
cudf.Series([False, True, False, None], dtype=bool),
),
(
operator.gt,
["100.123", "41", "120.21", None],
cudf.Decimal64Dtype(scale=3, precision=6),
cudf.Scalar(decimal.Decimal("100.123")),
cudf.Series([False, False, True, None], dtype=bool),
cudf.Series([False, True, False, None], dtype=bool),
),
(
operator.ge,
["100.00", "41", "120.21", None],
cudf.Decimal64Dtype(scale=2, precision=5),
100,
cudf.Series([True, False, True, None], dtype=bool),
cudf.Series([True, True, False, None], dtype=bool),
),
(
operator.ge,
["100.123", "41", "120.21", None],
cudf.Decimal64Dtype(scale=3, precision=6),
decimal.Decimal("100.123"),
cudf.Series([True, False, True, None], dtype=bool),
cudf.Series([True, True, False, None], dtype=bool),
),
(
operator.ge,
["100.123", "41", "120.21", None],
cudf.Decimal64Dtype(scale=3, precision=6),
cudf.Scalar(decimal.Decimal("100.123")),
cudf.Series([True, False, True, None], dtype=bool),
cudf.Series([True, True, False, None], dtype=bool),
),
(
operator.lt,
["100.00", "41", "120.21", None],
cudf.Decimal64Dtype(scale=2, precision=5),
100,
cudf.Series([False, True, False, None], dtype=bool),
cudf.Series([False, False, True, None], dtype=bool),
),
(
operator.lt,
["100.123", "41", "120.21", None],
cudf.Decimal64Dtype(scale=3, precision=6),
decimal.Decimal("100.123"),
cudf.Series([False, True, False, None], dtype=bool),
cudf.Series([False, False, True, None], dtype=bool),
),
(
operator.lt,
["100.123", "41", "120.21", None],
cudf.Decimal64Dtype(scale=3, precision=6),
cudf.Scalar(decimal.Decimal("100.123")),
cudf.Series([False, True, False, None], dtype=bool),
cudf.Series([False, False, True, None], dtype=bool),
),
(
operator.le,
["100.00", "41", "120.21", None],
cudf.Decimal64Dtype(scale=2, precision=5),
100,
cudf.Series([True, True, False, None], dtype=bool),
cudf.Series([True, False, True, None], dtype=bool),
),
(
operator.le,
["100.123", "41", "120.21", None],
cudf.Decimal64Dtype(scale=3, precision=6),
decimal.Decimal("100.123"),
cudf.Series([True, True, False, None], dtype=bool),
cudf.Series([True, False, True, None], dtype=bool),
),
(
operator.le,
["100.123", "41", "120.21", None],
cudf.Decimal64Dtype(scale=3, precision=6),
cudf.Scalar(decimal.Decimal("100.123")),
cudf.Series([True, True, False, None], dtype=bool),
cudf.Series([True, False, True, None], dtype=bool),
),
],
)
@pytest.mark.parametrize("reflected", [True, False])
def test_binops_decimal_scalar_compare(args, reflected):
"""
Tested compare operations:
eq, lt, gt, le, ge
Each operation has 3 data setups: pyints, Decimal, and
decimal cudf.Scalar
For each data setup, there is at least one row that lead to one of the
following compare results: {True, False, None}.
"""
if not reflected:
op, ldata, ldtype, rdata, expected, _ = args
else:
op, ldata, ldtype, rdata, _, expected = args
lhs = _decimal_series(ldata, ldtype)
rhs = rdata
if reflected:
rhs, lhs = lhs, rhs
actual = op(lhs, rhs)
utils.assert_eq(expected, actual)
@pytest.mark.parametrize(
"dtype",
[
"uint8",
"uint16",
"uint32",
"uint64",
"int8",
"int16",
"int32",
"int64",
"float32",
"float64",
"str",
"datetime64[ns]",
"datetime64[us]",
"datetime64[ms]",
"datetime64[s]",
"timedelta64[ns]",
"timedelta64[us]",
"timedelta64[ms]",
"timedelta64[s]",
],
)
@pytest.mark.parametrize("null_scalar", [None, cudf.NA, np.datetime64("NaT")])
@pytest.mark.parametrize("cmpop", _cmpops)
def test_column_null_scalar_comparison(dtype, null_scalar, cmpop):
# This test is meant to validate that comparing
# a series of any dtype with a null scalar produces
# a new series where all the elements are <NA>.
if isinstance(null_scalar, np.datetime64):
if np.dtype(dtype).kind not in "mM":
pytest.skip()
null_scalar = null_scalar.astype(dtype)
dtype = np.dtype(dtype)
data = [1, 2, 3, 4, 5]
sr = cudf.Series(data, dtype=dtype)
result = cmpop(sr, null_scalar)
assert result.isnull().all()
@pytest.mark.parametrize("fn", ["eq", "ne", "lt", "gt", "le", "ge"])
def test_equality_ops_index_mismatch(fn):
a = cudf.Series(
[1, 2, 3, None, None, 4], index=["a", "b", "c", "d", "e", "f"]
)
b = cudf.Series(
[-5, 4, 3, 2, 1, 0, 19, 11],
index=["aa", "b", "c", "d", "e", "f", "y", "z"],
)
pa = a.to_pandas(nullable=True)
pb = b.to_pandas(nullable=True)
expected = getattr(pa, fn)(pb)
actual = getattr(a, fn)(b).to_pandas(nullable=True)
utils.assert_eq(expected, actual)
def generate_test_null_equals_columnops_data():
# Generate tuples of:
# (left_data, right_data, compare_bool
# where compare_bool is the correct answer to
# if the columns should compare as null equals
def set_null_cases(column_l, column_r, case):
if case == "neither":
return column_l, column_r
elif case == "left":
column_l[1] = None
elif case == "right":
column_r[1] = None
elif case == "both":
column_l[1] = None
column_r[1] = None
else:
raise ValueError("Unknown null case")
return column_l, column_r
null_cases = ["neither", "left", "right", "both"]
data = [1, 2, 3]
results = []
# TODO: Numeric types can be cross compared as null equal
for dtype in (
list(NUMERIC_TYPES)
+ list(DATETIME_TYPES)
+ list(TIMEDELTA_TYPES)
+ list(STRING_TYPES)
+ ["category"]
):
for case in null_cases:
left = cudf.Series(data, dtype=dtype)
right = cudf.Series(data, dtype=dtype)
if case in {"left", "right"}:
answer = False
else:
answer = True
left, right = set_null_cases(left, right, case)
results.append((left._column, right._column, answer, case))
return results
@pytest.mark.parametrize(
"lcol,rcol,ans,case", generate_test_null_equals_columnops_data()
)
def test_null_equals_columnops(lcol, rcol, ans, case):
assert lcol._null_equals(rcol).all() == ans
| 29.10226
| 79
| 0.571429
|
from __future__ import division
import decimal
import operator
import random
from itertools import product
import numpy as np
import pandas as pd
import pytest
import cudf
from cudf.core import Series
from cudf.core.index import as_index
from cudf.tests import utils
from cudf.utils.dtypes import (
BOOL_TYPES,
DATETIME_TYPES,
FLOAT_TYPES,
INTEGER_TYPES,
NUMERIC_TYPES,
TIMEDELTA_TYPES,
)
STRING_TYPES = {"str"}
_binops = [
operator.add,
operator.sub,
operator.mul,
operator.floordiv,
operator.truediv,
operator.mod,
operator.pow,
]
@pytest.mark.parametrize("obj_class", ["Series", "Index"])
@pytest.mark.parametrize("binop", _binops)
def test_series_binop(binop, obj_class):
nelem = 1000
arr1 = utils.gen_rand("float64", nelem) * 10000
arr2 = utils.gen_rand("float64", nelem) * 10
sr1 = Series(arr1)
sr2 = Series(arr2)
if obj_class == "Index":
sr1 = as_index(sr1)
sr2 = as_index(sr2)
result = binop(sr1, sr2)
expect = binop(pd.Series(arr1), pd.Series(arr2))
if obj_class == "Index":
result = Series(result)
utils.assert_eq(result, expect)
@pytest.mark.parametrize("binop", _binops)
def test_series_binop_concurrent(binop):
def func(index):
arr = np.random.random(100) * 10
sr = Series(arr)
result = binop(sr.astype("int32"), sr)
expect = binop(arr.astype("int32"), arr)
np.testing.assert_almost_equal(result.to_array(), expect, decimal=5)
from concurrent.futures import ThreadPoolExecutor
indices = range(10)
with ThreadPoolExecutor(4) as e:
list(e.map(func, indices))
@pytest.mark.parametrize("use_cudf_scalar", [False, True])
@pytest.mark.parametrize("obj_class", ["Series", "Index"])
@pytest.mark.parametrize("nelem,binop", list(product([1, 2, 100], _binops)))
def test_series_binop_scalar(nelem, binop, obj_class, use_cudf_scalar):
arr = np.random.random(nelem)
rhs = random.choice(arr).item()
sr = Series(arr)
if obj_class == "Index":
sr = as_index(sr)
if use_cudf_scalar:
result = binop(sr, rhs)
else:
result = binop(sr, cudf.Scalar(rhs))
if obj_class == "Index":
result = Series(result)
np.testing.assert_almost_equal(result.to_array(), binop(arr, rhs))
_bitwise_binops = [operator.and_, operator.or_, operator.xor]
_int_types = [
"int8",
"int16",
"int32",
"int64",
"uint8",
"uint16",
"uint32",
]
@pytest.mark.parametrize("obj_class", ["Series", "Index"])
@pytest.mark.parametrize("binop", _bitwise_binops)
@pytest.mark.parametrize(
"lhs_dtype,rhs_dtype", list(product(_int_types, _int_types))
)
def test_series_bitwise_binop(binop, obj_class, lhs_dtype, rhs_dtype):
arr1 = (np.random.random(100) * 100).astype(lhs_dtype)
sr1 = Series(arr1)
arr2 = (np.random.random(100) * 100).astype(rhs_dtype)
sr2 = Series(arr2)
if obj_class == "Index":
sr1 = as_index(sr1)
sr2 = as_index(sr2)
result = binop(sr1, sr2)
if obj_class == "Index":
result = Series(result)
np.testing.assert_almost_equal(result.to_array(), binop(arr1, arr2))
_logical_binops = [
(operator.and_, operator.and_),
(operator.or_, operator.or_),
(np.logical_and, cudf.logical_and),
(np.logical_or, cudf.logical_or),
]
@pytest.mark.parametrize("lhstype", _int_types + [np.bool_])
@pytest.mark.parametrize("rhstype", _int_types + [np.bool_])
@pytest.mark.parametrize("binop,cubinop", _logical_binops)
def test_series_logical_binop(lhstype, rhstype, binop, cubinop):
arr1 = pd.Series(np.random.choice([True, False], 10))
if lhstype is not np.bool_:
arr1 = arr1 * (np.random.random(10) * 100).astype(lhstype)
sr1 = Series(arr1)
arr2 = pd.Series(np.random.choice([True, False], 10))
if rhstype is not np.bool_:
arr2 = arr2 * (np.random.random(10) * 100).astype(rhstype)
sr2 = Series(arr2)
result = cubinop(sr1, sr2)
expect = binop(arr1, arr2)
utils.assert_eq(result, expect)
_cmpops = [
operator.lt,
operator.gt,
operator.le,
operator.ge,
operator.eq,
operator.ne,
]
@pytest.mark.parametrize("obj_class", ["Series", "Index"])
@pytest.mark.parametrize("cmpop", _cmpops)
@pytest.mark.parametrize(
"dtype", ["int8", "int32", "int64", "float32", "float64", "datetime64[ms]"]
)
def test_series_compare(cmpop, obj_class, dtype):
arr1 = np.random.randint(0, 100, 100).astype(dtype)
arr2 = np.random.randint(0, 100, 100).astype(dtype)
sr1 = Series(arr1)
sr2 = Series(arr2)
if obj_class == "Index":
sr1 = as_index(sr1)
sr2 = as_index(sr2)
result1 = cmpop(sr1, sr1)
result2 = cmpop(sr2, sr2)
result3 = cmpop(sr1, sr2)
if obj_class == "Index":
result1 = Series(result1)
result2 = Series(result2)
result3 = Series(result3)
np.testing.assert_equal(result1.to_array(), cmpop(arr1, arr1))
np.testing.assert_equal(result2.to_array(), cmpop(arr2, arr2))
np.testing.assert_equal(result3.to_array(), cmpop(arr1, arr2))
def _series_compare_nulls_typegen():
tests = []
tests += list(product(DATETIME_TYPES, DATETIME_TYPES))
tests += list(product(TIMEDELTA_TYPES, TIMEDELTA_TYPES))
tests += list(product(NUMERIC_TYPES, NUMERIC_TYPES))
tests += list(product(STRING_TYPES, STRING_TYPES))
return tests
@pytest.mark.parametrize("cmpop", _cmpops)
@pytest.mark.parametrize("dtypes", _series_compare_nulls_typegen())
def test_series_compare_nulls(cmpop, dtypes):
ltype, rtype = dtypes
ldata = [1, 2, None, None, 5]
rdata = [2, 1, None, 4, None]
lser = Series(ldata, dtype=ltype)
rser = Series(rdata, dtype=rtype)
lmask = ~lser.isnull()
rmask = ~rser.isnull()
expect_mask = np.logical_and(lmask, rmask)
expect = cudf.Series([None] * 5, dtype="bool")
expect[expect_mask] = cmpop(lser[expect_mask], rser[expect_mask])
got = cmpop(lser, rser)
utils.assert_eq(expect, got)
@pytest.mark.parametrize(
"obj", [pd.Series(["a", "b", None, "d", "e", None], dtype="string"), "a"]
)
@pytest.mark.parametrize("cmpop", _cmpops)
@pytest.mark.parametrize(
"cmp_obj",
[pd.Series(["b", "a", None, "d", "f", None], dtype="string"), "a"],
)
def test_string_series_compare(obj, cmpop, cmp_obj):
g_obj = obj
if isinstance(g_obj, pd.Series):
g_obj = Series.from_pandas(g_obj)
g_cmp_obj = cmp_obj
if isinstance(g_cmp_obj, pd.Series):
g_cmp_obj = Series.from_pandas(g_cmp_obj)
got = cmpop(g_obj, g_cmp_obj)
expected = cmpop(obj, cmp_obj)
if isinstance(expected, pd.Series):
expected = cudf.from_pandas(expected)
utils.assert_eq(expected, got)
@pytest.mark.parametrize("obj_class", ["Series", "Index"])
@pytest.mark.parametrize("nelem", [1, 2, 100])
@pytest.mark.parametrize("cmpop", _cmpops)
@pytest.mark.parametrize("dtype", utils.NUMERIC_TYPES + ["datetime64[ms]"])
@pytest.mark.parametrize("use_cudf_scalar", [True, False])
def test_series_compare_scalar(
nelem, cmpop, obj_class, dtype, use_cudf_scalar
):
arr1 = np.random.randint(0, 100, 100).astype(dtype)
sr1 = Series(arr1)
rhs = random.choice(arr1).item()
if use_cudf_scalar:
rhs = cudf.Scalar(rhs)
if obj_class == "Index":
sr1 = as_index(sr1)
result1 = cmpop(sr1, rhs)
result2 = cmpop(rhs, sr1)
if obj_class == "Index":
result1 = Series(result1)
result2 = Series(result2)
np.testing.assert_equal(result1.to_array(), cmpop(arr1, rhs))
np.testing.assert_equal(result2.to_array(), cmpop(rhs, arr1))
_nulls = ["none", "some"]
@pytest.mark.parametrize("nelem", [1, 7, 8, 9, 32, 64, 128])
@pytest.mark.parametrize("lhs_nulls,rhs_nulls", list(product(_nulls, _nulls)))
def test_validity_add(nelem, lhs_nulls, rhs_nulls):
np.random.seed(0)
lhs_data = np.random.random(nelem)
if lhs_nulls == "some":
lhs_mask = utils.random_bitmask(nelem)
lhs_bitmask = utils.expand_bits_to_bytes(lhs_mask)[:nelem]
lhs_null_count = utils.count_zero(lhs_bitmask)
assert lhs_null_count >= 0
lhs = Series.from_masked_array(lhs_data, lhs_mask)
assert lhs.null_count == lhs_null_count
else:
lhs = Series(lhs_data)
rhs_data = np.random.random(nelem)
if rhs_nulls == "some":
rhs_mask = utils.random_bitmask(nelem)
rhs_bitmask = utils.expand_bits_to_bytes(rhs_mask)[:nelem]
rhs_null_count = utils.count_zero(rhs_bitmask)
assert rhs_null_count >= 0
rhs = Series.from_masked_array(rhs_data, rhs_mask)
assert rhs.null_count == rhs_null_count
else:
rhs = Series(rhs_data)
res = lhs + rhs
if lhs_nulls == "some" and rhs_nulls == "some":
res_mask = np.asarray(
utils.expand_bits_to_bytes(lhs_mask & rhs_mask), dtype=np.bool_
)[:nelem]
if lhs_nulls == "some" and rhs_nulls == "none":
res_mask = np.asarray(
utils.expand_bits_to_bytes(lhs_mask), dtype=np.bool_
)[:nelem]
if lhs_nulls == "none" and rhs_nulls == "some":
res_mask = np.asarray(
utils.expand_bits_to_bytes(rhs_mask), dtype=np.bool_
)[:nelem]
na_value = -10000
got = res.fillna(na_value).to_array()
expect = lhs_data + rhs_data
if lhs_nulls == "some" or rhs_nulls == "some":
expect[~res_mask] = na_value
np.testing.assert_array_equal(expect, got)
@pytest.mark.parametrize("obj_class", ["Series", "Index"])
@pytest.mark.parametrize(
"binop,lhs_dtype,rhs_dtype",
list(
product(
[operator.add, operator.mul],
utils.NUMERIC_TYPES,
utils.NUMERIC_TYPES,
)
),
)
def test_series_binop_mixed_dtype(binop, lhs_dtype, rhs_dtype, obj_class):
nelem = 10
lhs = (np.random.random(nelem) * nelem).astype(lhs_dtype)
rhs = (np.random.random(nelem) * nelem).astype(rhs_dtype)
sr1 = Series(lhs)
sr2 = Series(rhs)
if obj_class == "Index":
sr1 = as_index(sr1)
sr2 = as_index(sr2)
result = binop(Series(sr1), Series(sr2))
if obj_class == "Index":
result = Series(result)
np.testing.assert_almost_equal(result.to_array(), binop(lhs, rhs))
@pytest.mark.parametrize("obj_class", ["Series", "Index"])
@pytest.mark.parametrize(
"cmpop,lhs_dtype,rhs_dtype",
list(product(_cmpops, utils.NUMERIC_TYPES, utils.NUMERIC_TYPES)),
)
def test_series_cmpop_mixed_dtype(cmpop, lhs_dtype, rhs_dtype, obj_class):
nelem = 5
lhs = (np.random.random(nelem) * nelem).astype(lhs_dtype)
rhs = (np.random.random(nelem) * nelem).astype(rhs_dtype)
sr1 = Series(lhs)
sr2 = Series(rhs)
if obj_class == "Index":
sr1 = as_index(sr1)
sr2 = as_index(sr2)
result = cmpop(Series(sr1), Series(sr2))
if obj_class == "Index":
result = Series(result)
np.testing.assert_array_equal(result.to_array(), cmpop(lhs, rhs))
_reflected_ops = [
lambda x: 1 + x,
lambda x: 2 * x,
lambda x: 2 - x,
lambda x: 2 // x,
lambda x: 2 / x,
lambda x: 3 + x,
lambda x: 3 * x,
lambda x: 3 - x,
lambda x: 3 // x,
lambda x: 3 / x,
lambda x: 3 % x,
lambda x: -1 + x,
lambda x: -2 * x,
lambda x: -2 - x,
lambda x: -2 // x,
lambda x: -2 / x,
lambda x: -3 + x,
lambda x: -3 * x,
lambda x: -3 - x,
lambda x: -3 // x,
lambda x: -3 / x,
lambda x: -3 % x,
lambda x: 0 + x,
lambda x: 0 * x,
lambda x: 0 - x,
lambda x: 0 // x,
lambda x: 0 / x,
]
@pytest.mark.parametrize("obj_class", ["Series", "Index"])
@pytest.mark.parametrize(
"func, dtype", list(product(_reflected_ops, utils.NUMERIC_TYPES))
)
def test_reflected_ops_scalar(func, dtype, obj_class):
np.random.seed(12)
random_series = utils.gen_rand(dtype, 100, low=10)
gs = Series(random_series)
if obj_class == "Index":
gs = as_index(gs)
gs_result = func(gs)
if obj_class == "Index":
gs = Series(gs)
ps_result = func(random_series)
np.testing.assert_allclose(ps_result, gs_result.to_array())
_cudf_scalar_reflected_ops = [
lambda x: cudf.Scalar(1) + x,
lambda x: cudf.Scalar(2) * x,
lambda x: cudf.Scalar(2) - x,
lambda x: cudf.Scalar(2) // x,
lambda x: cudf.Scalar(2) / x,
lambda x: cudf.Scalar(3) + x,
lambda x: cudf.Scalar(3) * x,
lambda x: cudf.Scalar(3) - x,
lambda x: cudf.Scalar(3) // x,
lambda x: cudf.Scalar(3) / x,
lambda x: cudf.Scalar(3) % x,
lambda x: cudf.Scalar(-1) + x,
lambda x: cudf.Scalar(-2) * x,
lambda x: cudf.Scalar(-2) - x,
lambda x: cudf.Scalar(-2) // x,
lambda x: cudf.Scalar(-2) / x,
lambda x: cudf.Scalar(-3) + x,
lambda x: cudf.Scalar(-3) * x,
lambda x: cudf.Scalar(-3) - x,
lambda x: cudf.Scalar(-3) // x,
lambda x: cudf.Scalar(-3) / x,
lambda x: cudf.Scalar(-3) % x,
lambda x: cudf.Scalar(0) + x,
lambda x: cudf.Scalar(0) * x,
lambda x: cudf.Scalar(0) - x,
lambda x: cudf.Scalar(0) // x,
lambda x: cudf.Scalar(0) / x,
]
@pytest.mark.parametrize("obj_class", ["Series", "Index"])
@pytest.mark.parametrize(
"funcs, dtype",
list(
product(
list(zip(_reflected_ops, _cudf_scalar_reflected_ops)),
utils.NUMERIC_TYPES,
)
),
)
def test_reflected_ops_cudf_scalar(funcs, dtype, obj_class):
cpu_func, gpu_func = funcs
np.random.seed(12)
random_series = utils.gen_rand(dtype, 100, low=10)
gs = Series(random_series)
if obj_class == "Index":
gs = as_index(gs)
gs_result = gpu_func(gs)
if obj_class == "Index":
gs = Series(gs)
ps_result = cpu_func(random_series)
np.testing.assert_allclose(ps_result, gs_result.to_array())
@pytest.mark.parametrize("binop", _binops)
def test_different_shapes_and_columns(binop):
if binop is operator.pow:
return
pd_frame = binop(pd.DataFrame({"x": [1, 2]}), pd.DataFrame({}))
cd_frame = binop(cudf.DataFrame({"x": [1, 2]}), cudf.DataFrame({}))
utils.assert_eq(cd_frame, pd_frame)
pd_frame = pd.DataFrame({}) + pd.DataFrame({"x": [1, 2]})
cd_frame = cudf.DataFrame({}) + cudf.DataFrame({"x": [1, 2]})
utils.assert_eq(cd_frame, pd_frame)
pd_frame = pd.DataFrame({"x": [1, 2, 3]}) + pd.DataFrame({"y": [1, 2]})
cd_frame = cudf.DataFrame({"x": [1, 2, 3]}) + cudf.DataFrame({"y": [1, 2]})
utils.assert_eq(cd_frame, pd_frame)
pd_frame = pd.DataFrame({"x": [1, 2]}) + pd.DataFrame({"y": [1, 2, 3]})
cd_frame = cudf.DataFrame({"x": [1, 2]}) + cudf.DataFrame({"y": [1, 2, 3]})
utils.assert_eq(cd_frame, pd_frame)
@pytest.mark.parametrize("binop", _binops)
def test_different_shapes_and_same_columns(binop):
if binop is operator.pow:
return
pd_frame = binop(
pd.DataFrame({"x": [1, 2]}), pd.DataFrame({"x": [1, 2, 3]})
)
cd_frame = binop(
cudf.DataFrame({"x": [1, 2]}), cudf.DataFrame({"x": [1, 2, 3]})
)
cd_frame["x"] = cd_frame["x"].astype(np.float64)
utils.assert_eq(cd_frame, pd_frame)
@pytest.mark.parametrize("binop", _binops)
def test_different_shapes_and_columns_with_unaligned_indices(binop):
if binop is operator.pow:
return
pdf1 = pd.DataFrame({"x": [4, 3, 2, 1], "y": [7, 3, 8, 6]})
pdf2 = pd.DataFrame(
{"x": [1, 2, 3, 7], "y": [4, 5, 6, 7]}, index=[0, 1, 3, 4]
)
pdf3 = pd.DataFrame(
{"x": [4, 5, 6, 7], "y": [1, 2, 3, 7], "z": [0, 5, 3, 7]},
index=[0, 3, 5, 3],
)
gdf1 = cudf.DataFrame.from_pandas(pdf1)
gdf2 = cudf.DataFrame.from_pandas(pdf2)
gdf3 = cudf.DataFrame.from_pandas(pdf3)
pd_frame = binop(binop(pdf1, pdf2), pdf3)
cd_frame = binop(binop(gdf1, gdf2), gdf3)
cd_frame["x"] = cd_frame["x"].astype(np.float64)
cd_frame["y"] = cd_frame["y"].astype(np.float64)
utils.assert_eq(cd_frame, pd_frame)
@pytest.mark.parametrize(
"df2",
[
cudf.DataFrame({"a": [3, 2, 1]}, index=[3, 2, 1]),
cudf.DataFrame([3, 2]),
],
)
@pytest.mark.parametrize("binop", [operator.eq, operator.ne])
def test_df_different_index_shape(df2, binop):
df1 = cudf.DataFrame([1, 2, 3], index=[1, 2, 3])
pdf1 = df1.to_pandas()
pdf2 = df2.to_pandas()
utils.assert_exceptions_equal(
lfunc=binop,
rfunc=binop,
lfunc_args_and_kwargs=([pdf1, pdf2],),
rfunc_args_and_kwargs=([df1, df2],),
)
@pytest.mark.parametrize("op", [operator.eq, operator.ne])
def test_boolean_scalar_binop(op):
psr = pd.Series(np.random.choice([True, False], 10))
gsr = cudf.from_pandas(psr)
utils.assert_eq(op(psr, True), op(gsr, True))
utils.assert_eq(op(psr, False), op(gsr, False))
utils.assert_eq(op(psr, True), op(gsr, cudf.Scalar(True)))
utils.assert_eq(op(psr, False), op(gsr, cudf.Scalar(False)))
_operators_arithmetic = [
"add",
"radd",
"sub",
"rsub",
"mul",
"rmul",
"mod",
"rmod",
"pow",
"rpow",
"floordiv",
"rfloordiv",
"truediv",
"rtruediv",
]
_operators_comparison = ["eq", "ne", "lt", "le", "gt", "ge"]
@pytest.mark.parametrize("func", _operators_arithmetic)
@pytest.mark.parametrize("has_nulls", [True, False])
@pytest.mark.parametrize("fill_value", [None, 27])
@pytest.mark.parametrize("dtype", ["float32", "float64"])
def test_operator_func_between_series(dtype, func, has_nulls, fill_value):
count = 1000
gdf_series_a = utils.gen_rand_series(
dtype, count, has_nulls=has_nulls, stride=10000
)
gdf_series_b = utils.gen_rand_series(
dtype, count, has_nulls=has_nulls, stride=100
)
pdf_series_a = gdf_series_a.to_pandas()
pdf_series_b = gdf_series_b.to_pandas()
gdf_result = getattr(gdf_series_a, func)(
gdf_series_b, fill_value=fill_value
)
pdf_result = getattr(pdf_series_a, func)(
pdf_series_b, fill_value=fill_value
)
utils.assert_eq(pdf_result, gdf_result)
@pytest.mark.parametrize("func", _operators_arithmetic)
@pytest.mark.parametrize("has_nulls", [True, False])
@pytest.mark.parametrize("fill_value", [None, 27])
@pytest.mark.parametrize("dtype", ["float32", "float64"])
@pytest.mark.parametrize("use_cudf_scalar", [False, True])
def test_operator_func_series_and_scalar(
dtype, func, has_nulls, fill_value, use_cudf_scalar
):
count = 1000
scalar = 59
gdf_series = utils.gen_rand_series(
dtype, count, has_nulls=has_nulls, stride=10000
)
pdf_series = gdf_series.to_pandas()
gdf_series_result = getattr(gdf_series, func)(
cudf.Scalar(scalar) if use_cudf_scalar else scalar,
fill_value=fill_value,
)
pdf_series_result = getattr(pdf_series, func)(
scalar, fill_value=fill_value
)
utils.assert_eq(pdf_series_result, gdf_series_result)
_permu_values = [0, 1, None, np.nan]
@pytest.mark.parametrize("fill_value", _permu_values)
@pytest.mark.parametrize("scalar_a", _permu_values)
@pytest.mark.parametrize("scalar_b", _permu_values)
@pytest.mark.parametrize("func", _operators_comparison)
@pytest.mark.parametrize("dtype", ["float32", "float64"])
def test_operator_func_between_series_logical(
dtype, func, scalar_a, scalar_b, fill_value
):
gdf_series_a = Series([scalar_a], nan_as_null=False).astype(dtype)
gdf_series_b = Series([scalar_b], nan_as_null=False).astype(dtype)
pdf_series_a = gdf_series_a.to_pandas(nullable=True)
pdf_series_b = gdf_series_b.to_pandas(nullable=True)
gdf_series_result = getattr(gdf_series_a, func)(
gdf_series_b, fill_value=fill_value
)
pdf_series_result = getattr(pdf_series_a, func)(
pdf_series_b, fill_value=fill_value
)
expect = pdf_series_result
got = gdf_series_result.to_pandas(nullable=True)
if (
(pdf_series_a.isnull().sum() != pdf_series_b.isnull().sum())
and np.isscalar(fill_value)
and np.isnan(fill_value)
):
with pytest.raises(AssertionError):
utils.assert_eq(expect, got)
return
utils.assert_eq(expect, got)
@pytest.mark.parametrize("dtype", ["float32", "float64"])
@pytest.mark.parametrize("func", _operators_comparison)
@pytest.mark.parametrize("has_nulls", [True, False])
@pytest.mark.parametrize("scalar", [-59.0, np.nan, 0, 59.0])
@pytest.mark.parametrize("fill_value", [None, True, False, 1.0])
@pytest.mark.parametrize("use_cudf_scalar", [False, True])
def test_operator_func_series_and_scalar_logical(
dtype, func, has_nulls, scalar, fill_value, use_cudf_scalar
):
gdf_series = utils.gen_rand_series(
dtype, 1000, has_nulls=has_nulls, stride=10000
)
pdf_series = gdf_series.to_pandas(nullable=True)
gdf_series_result = getattr(gdf_series, func)(
cudf.Scalar(scalar) if use_cudf_scalar else scalar,
fill_value=fill_value,
)
pdf_series_result = getattr(pdf_series, func)(
scalar, fill_value=fill_value
)
expect = pdf_series_result
got = gdf_series_result.to_pandas(nullable=True)
utils.assert_eq(expect, got)
@pytest.mark.parametrize("func", _operators_arithmetic)
@pytest.mark.parametrize("nulls", _nulls)
@pytest.mark.parametrize("fill_value", [None, 27])
@pytest.mark.parametrize("other", ["df", "scalar"])
def test_operator_func_dataframe(func, nulls, fill_value, other):
num_rows = 100
num_cols = 3
def gen_df():
pdf = pd.DataFrame()
from string import ascii_lowercase
cols = np.random.choice(num_cols + 5, num_cols, replace=False)
for i in range(num_cols):
colname = ascii_lowercase[cols[i]]
data = utils.gen_rand("float64", num_rows) * 10000
if nulls == "some":
idx = np.random.choice(
num_rows, size=int(num_rows / 2), replace=False
)
data[idx] = np.nan
pdf[colname] = data
return pdf
pdf1 = gen_df()
pdf2 = gen_df() if other == "df" else 59.0
gdf1 = cudf.DataFrame.from_pandas(pdf1)
gdf2 = cudf.DataFrame.from_pandas(pdf2) if other == "df" else 59.0
got = getattr(gdf1, func)(gdf2, fill_value=fill_value)
expect = getattr(pdf1, func)(pdf2, fill_value=fill_value)[list(got._data)]
utils.assert_eq(expect, got)
@pytest.mark.parametrize("func", _operators_arithmetic + _operators_comparison)
@pytest.mark.parametrize("rhs", [0, 1, 2, 128])
def test_binop_bool_uint(func, rhs):
rmod" or func == "rfloordiv":
return
psr = pd.Series([True, False, False])
gsr = cudf.from_pandas(psr)
utils.assert_eq(
getattr(psr, func)(rhs), getattr(gsr, func)(rhs), check_dtype=False
)
def test_series_misc_binop():
pds = pd.Series([1, 2, 4], name="abc xyz")
gds = cudf.Series([1, 2, 4], name="abc xyz")
utils.assert_eq(pds + 1, gds + 1)
utils.assert_eq(1 + pds, 1 + gds)
utils.assert_eq(pds + pds, gds + gds)
pds1 = pd.Series([1, 2, 4], name="hello world")
gds1 = cudf.Series([1, 2, 4], name="hello world")
utils.assert_eq(pds + pds1, gds + gds1)
utils.assert_eq(pds1 + pds, gds1 + gds)
utils.assert_eq(pds1 + pds + 5, gds1 + gds + 5)
def test_int8_float16_binop():
a = cudf.Series([1], dtype="int8")
b = np.float16(2)
expect = cudf.Series([0.5])
got = a / b
utils.assert_eq(expect, got, check_dtype=False)
@pytest.mark.parametrize("dtype", ["int64", "float64", "str"])
def test_vector_to_none_binops(dtype):
data = Series([1, 2, 3, None], dtype=dtype)
expect = Series([None] * 4).astype(dtype)
got = data + None
utils.assert_eq(expect, got)
@pytest.mark.parametrize(
"lhs",
[
1,
3,
4,
pd.Series([5, 6, 2]),
pd.Series([0, 10, 20, 30, 3, 4, 5, 6, 2]),
6,
],
)
@pytest.mark.parametrize("rhs", [1, 3, 4, pd.Series([5, 6, 2])])
@pytest.mark.parametrize(
"ops",
[
(np.remainder, cudf.remainder),
(np.floor_divide, cudf.floor_divide),
(np.subtract, cudf.subtract),
(np.add, cudf.add),
(np.true_divide, cudf.true_divide),
(np.multiply, cudf.multiply),
],
)
def test_ufunc_ops(lhs, rhs, ops):
np_op, cu_op = ops
if isinstance(lhs, pd.Series):
culhs = cudf.from_pandas(lhs)
else:
culhs = lhs
if isinstance(rhs, pd.Series):
curhs = cudf.from_pandas(rhs)
else:
curhs = rhs
expect = np_op(lhs, rhs)
got = cu_op(culhs, curhs)
if np.isscalar(expect):
assert got == expect
else:
utils.assert_eq(
expect, got,
)
def dtype_scalar(val, dtype):
if dtype == "str":
return str(val)
dtype = np.dtype(dtype)
if dtype.type in {np.datetime64, np.timedelta64}:
res, _ = np.datetime_data(dtype)
return dtype.type(val, res)
else:
return dtype.type(val)
def make_valid_scalar_add_data():
valid = set()
valid |= set(
product(
INTEGER_TYPES,
FLOAT_TYPES | DATETIME_TYPES | TIMEDELTA_TYPES | BOOL_TYPES,
)
)
valid |= set(
product(FLOAT_TYPES, INTEGER_TYPES | FLOAT_TYPES | BOOL_TYPES)
)
valid |= set(
product(DATETIME_TYPES, INTEGER_TYPES | TIMEDELTA_TYPES | BOOL_TYPES)
)
valid |= set(
product(TIMEDELTA_TYPES, INTEGER_TYPES | DATETIME_TYPES | BOOL_TYPES)
)
valid |= set(
product(
BOOL_TYPES,
INTEGER_TYPES
| FLOAT_TYPES
| DATETIME_TYPES
| TIMEDELTA_TYPES
| BOOL_TYPES,
)
)
valid |= {("str", "str")}
return sorted(list(valid))
def make_invalid_scalar_add_data():
invalid = set()
invalid |= set(product(FLOAT_TYPES, DATETIME_TYPES))
invalid |= set(product(FLOAT_TYPES, TIMEDELTA_TYPES))
invalid |= set(product(DATETIME_TYPES, FLOAT_TYPES))
invalid |= set(product(DATETIME_TYPES, DATETIME_TYPES))
invalid |= set(product(FLOAT_TYPES, TIMEDELTA_TYPES))
return sorted(list(invalid))
@pytest.mark.parametrize("dtype_l,dtype_r", make_valid_scalar_add_data())
def test_scalar_add(dtype_l, dtype_r):
test_value = 1
lval_host = dtype_scalar(test_value, dtype=dtype_l)
rval_host = dtype_scalar(test_value, dtype=dtype_r)
lval_gpu = cudf.Scalar(test_value, dtype=dtype_l)
rval_gpu = cudf.Scalar(test_value, dtype=dtype_r)
expect = lval_host + rval_host
got = lval_gpu + rval_gpu
assert expect == got.value
if not dtype_l == dtype_r == "str":
assert expect.dtype == got.dtype
@pytest.mark.parametrize("dtype_l,dtype_r", make_invalid_scalar_add_data())
def test_scalar_add_invalid(dtype_l, dtype_r):
test_value = 1
lval_gpu = cudf.Scalar(test_value, dtype=dtype_l)
rval_gpu = cudf.Scalar(test_value, dtype=dtype_r)
with pytest.raises(TypeError):
lval_gpu + rval_gpu
def make_scalar_difference_data():
valid = set()
valid |= set(
product(
INTEGER_TYPES,
INTEGER_TYPES | FLOAT_TYPES | TIMEDELTA_TYPES | BOOL_TYPES,
)
)
valid |= set(
product(FLOAT_TYPES, INTEGER_TYPES | FLOAT_TYPES | BOOL_TYPES)
)
valid |= set(
product(
DATETIME_TYPES,
INTEGER_TYPES | DATETIME_TYPES | TIMEDELTA_TYPES | BOOL_TYPES,
)
)
valid |= set(
product(TIMEDELTA_TYPES, INTEGER_TYPES | TIMEDELTA_TYPES | BOOL_TYPES)
)
valid |= set(
product(BOOL_TYPES, INTEGER_TYPES | FLOAT_TYPES | TIMEDELTA_TYPES)
)
return sorted(list(valid))
def make_scalar_difference_data_invalid():
invalid = set()
invalid |= set(product(INTEGER_TYPES, DATETIME_TYPES))
# we can't subtract a datetime or timedelta from a float
invalid |= set(product(FLOAT_TYPES, DATETIME_TYPES | TIMEDELTA_TYPES))
invalid |= set(product(DATETIME_TYPES | TIMEDELTA_TYPES, FLOAT_TYPES))
# We can't subtract a datetime from a timedelta
invalid |= set(product(TIMEDELTA_TYPES, DATETIME_TYPES))
invalid |= set(product(BOOL_TYPES, BOOL_TYPES | DATETIME_TYPES))
return sorted(list(invalid))
@pytest.mark.parametrize("dtype_l,dtype_r", make_scalar_difference_data())
def test_scalar_difference(dtype_l, dtype_r):
test_value = 1
lval_host = dtype_scalar(test_value, dtype=dtype_l)
rval_host = dtype_scalar(test_value, dtype=dtype_r)
lval_gpu = cudf.Scalar(test_value, dtype=dtype_l)
rval_gpu = cudf.Scalar(test_value, dtype=dtype_r)
expect = lval_host - rval_host
got = lval_gpu - rval_gpu
assert expect == got.value
assert expect.dtype == got.dtype
@pytest.mark.parametrize(
"dtype_l,dtype_r", make_scalar_difference_data_invalid()
)
def test_scalar_difference_invalid(dtype_l, dtype_r):
test_value = 1
lval_gpu = cudf.Scalar(test_value, dtype=dtype_l)
rval_gpu = cudf.Scalar(test_value, dtype=dtype_r)
with pytest.raises(TypeError):
lval_gpu - rval_gpu
def make_scalar_product_data():
valid = set()
# we can multiply an int, or bool by any int, float, timedelta, or bool
valid |= set(
product(
INTEGER_TYPES | BOOL_TYPES,
INTEGER_TYPES | FLOAT_TYPES | TIMEDELTA_TYPES | BOOL_TYPES,
)
)
# we can muliply any timedelta by any int, or bool
valid |= set(product(TIMEDELTA_TYPES, INTEGER_TYPES | BOOL_TYPES))
# we can multiply a float by any int, float, or bool
valid |= set(
product(FLOAT_TYPES, INTEGER_TYPES | FLOAT_TYPES | BOOL_TYPES)
)
return sorted(list(valid))
def make_scalar_product_data_invalid():
invalid = set()
# can't multiply a ints, floats, datetimes, timedeltas,
invalid |= set(
product(
INTEGER_TYPES
| FLOAT_TYPES
| DATETIME_TYPES
| TIMEDELTA_TYPES
| BOOL_TYPES,
DATETIME_TYPES,
)
)
invalid |= set(
product(
DATETIME_TYPES,
INTEGER_TYPES
| FLOAT_TYPES
| DATETIME_TYPES
| TIMEDELTA_TYPES
| BOOL_TYPES,
)
)
# can't multiply timedeltas by timedeltas
invalid |= set(product(TIMEDELTA_TYPES, TIMEDELTA_TYPES))
return sorted(list(invalid))
@pytest.mark.parametrize("dtype_l,dtype_r", make_scalar_product_data())
def test_scalar_product(dtype_l, dtype_r):
test_value = 1
lval_host = dtype_scalar(test_value, dtype=dtype_l)
rval_host = dtype_scalar(test_value, dtype=dtype_r)
lval_gpu = cudf.Scalar(test_value, dtype=dtype_l)
rval_gpu = cudf.Scalar(test_value, dtype=dtype_r)
expect = lval_host * rval_host
got = lval_gpu * rval_gpu
assert expect == got.value
assert expect.dtype == got.dtype
@pytest.mark.parametrize("dtype_l,dtype_r", make_scalar_product_data_invalid())
def test_scalar_product_invalid(dtype_l, dtype_r):
test_value = 1
lval_gpu = cudf.Scalar(test_value, dtype=dtype_l)
rval_gpu = cudf.Scalar(test_value, dtype=dtype_r)
with pytest.raises(TypeError):
lval_gpu * rval_gpu
def make_scalar_floordiv_data():
valid = set()
valid |= set(
product(
INTEGER_TYPES | FLOAT_TYPES,
INTEGER_TYPES | FLOAT_TYPES | BOOL_TYPES,
)
)
valid |= set(
product(TIMEDELTA_TYPES, INTEGER_TYPES | FLOAT_TYPES | TIMEDELTA_TYPES)
)
valid |= set(product(BOOL_TYPES, INTEGER_TYPES | FLOAT_TYPES | BOOL_TYPES))
return sorted(list(valid))
def make_scalar_floordiv_data_invalid():
invalid = set()
invalid |= set(
product(
INTEGER_TYPES | FLOAT_TYPES | BOOL_TYPES,
DATETIME_TYPES | TIMEDELTA_TYPES,
)
)
# we can't divide datetime types into anything
invalid |= set(
product(
DATETIME_TYPES,
INTEGER_TYPES
| FLOAT_TYPES
| DATETIME_TYPES
| TIMEDELTA_TYPES
| BOOL_TYPES,
)
)
invalid |= set(product(TIMEDELTA_TYPES, BOOL_TYPES | DATETIME_TYPES))
return sorted(list(invalid))
@pytest.mark.parametrize("dtype_l,dtype_r", make_scalar_floordiv_data())
def test_scalar_floordiv(dtype_l, dtype_r):
test_value = 1
lval_host = dtype_scalar(test_value, dtype=dtype_l)
rval_host = dtype_scalar(test_value, dtype=dtype_r)
lval_gpu = cudf.Scalar(test_value, dtype=dtype_l)
rval_gpu = cudf.Scalar(test_value, dtype=dtype_r)
expect = lval_host // rval_host
got = lval_gpu // rval_gpu
assert expect == got.value
assert expect.dtype == got.dtype
@pytest.mark.parametrize(
"dtype_l,dtype_r", make_scalar_floordiv_data_invalid()
)
def test_scalar_floordiv_invalid(dtype_l, dtype_r):
test_value = 1
lval_gpu = cudf.Scalar(test_value, dtype=dtype_l)
rval_gpu = cudf.Scalar(test_value, dtype=dtype_r)
with pytest.raises(TypeError):
lval_gpu // rval_gpu
def make_scalar_truediv_data():
valid = set()
# we can true divide ints, floats, or bools by other
# ints, floats or bools
valid |= set(
product(
INTEGER_TYPES | FLOAT_TYPES | BOOL_TYPES,
INTEGER_TYPES | FLOAT_TYPES | BOOL_TYPES,
)
)
# we can true divide timedeltas by ints floats or timedeltas
valid |= set(product(TIMEDELTA_TYPES, INTEGER_TYPES | TIMEDELTA_TYPES))
return sorted(list(valid))
def make_scalar_truediv_data_invalid():
invalid = set()
# we can't divide ints, floats or bools by datetimes
invalid |= set(
product(
INTEGER_TYPES | FLOAT_TYPES | BOOL_TYPES,
DATETIME_TYPES | TIMEDELTA_TYPES,
)
)
invalid |= set(
product(
DATETIME_TYPES,
INTEGER_TYPES
| FLOAT_TYPES
| DATETIME_TYPES
| TIMEDELTA_TYPES
| BOOL_TYPES,
)
)
invalid |= set(
product(TIMEDELTA_TYPES, DATETIME_TYPES | BOOL_TYPES | FLOAT_TYPES)
)
return sorted(list(invalid))
@pytest.mark.parametrize("dtype_l,dtype_r", make_scalar_truediv_data())
def test_scalar_truediv(dtype_l, dtype_r):
test_value = 1
lval_host = dtype_scalar(test_value, dtype=dtype_l)
rval_host = dtype_scalar(test_value, dtype=dtype_r)
lval_gpu = cudf.Scalar(test_value, dtype=dtype_l)
rval_gpu = cudf.Scalar(test_value, dtype=dtype_r)
expect = np.true_divide(lval_host, rval_host)
got = lval_gpu / rval_gpu
assert expect == got.value
if np.dtype(dtype_l).itemsize <= 2 and np.dtype(dtype_r).itemsize <= 2:
assert expect.dtype == "float64" and got.dtype == "float32"
else:
assert expect.dtype == got.dtype
@pytest.mark.parametrize("dtype_l,dtype_r", make_scalar_truediv_data_invalid())
def test_scalar_truediv_invalid(dtype_l, dtype_r):
test_value = 1
lval_gpu = cudf.Scalar(test_value, dtype=dtype_l)
rval_gpu = cudf.Scalar(test_value, dtype=dtype_r)
with pytest.raises(TypeError):
lval_gpu / rval_gpu
def make_scalar_remainder_data():
valid = set()
valid |= set(
product(
INTEGER_TYPES | FLOAT_TYPES | BOOL_TYPES,
INTEGER_TYPES | FLOAT_TYPES | BOOL_TYPES,
)
)
valid |= set(product(TIMEDELTA_TYPES, TIMEDELTA_TYPES))
return sorted(list(valid))
def make_scalar_remainder_data_invalid():
invalid = set()
# against datetimes or timedeltas
invalid |= set(
product(
INTEGER_TYPES | FLOAT_TYPES | BOOL_TYPES | DATETIME_TYPES,
DATETIME_TYPES | TIMEDELTA_TYPES,
)
)
# datetime and timedelta types cant be modded against
# any numeric types
invalid |= set(
product(
DATETIME_TYPES | TIMEDELTA_TYPES,
INTEGER_TYPES | FLOAT_TYPES | BOOL_TYPES,
)
)
# timedeltas cant mod with datetimes
invalid |= set(product(TIMEDELTA_TYPES, DATETIME_TYPES))
return sorted(list(invalid))
@pytest.mark.parametrize("dtype_l,dtype_r", make_scalar_remainder_data())
def test_scalar_remainder(dtype_l, dtype_r):
test_value = 1
lval_host = dtype_scalar(test_value, dtype=dtype_l)
rval_host = dtype_scalar(test_value, dtype=dtype_r)
lval_gpu = cudf.Scalar(test_value, dtype=dtype_l)
rval_gpu = cudf.Scalar(test_value, dtype=dtype_r)
expect = lval_host % rval_host
got = lval_gpu % rval_gpu
assert expect == got.value
assert expect.dtype == got.dtype
@pytest.mark.parametrize(
"dtype_l,dtype_r", make_scalar_remainder_data_invalid()
)
def test_scalar_remainder_invalid(dtype_l, dtype_r):
test_value = 1
lval_gpu = cudf.Scalar(test_value, dtype=dtype_l)
rval_gpu = cudf.Scalar(test_value, dtype=dtype_r)
with pytest.raises(TypeError):
lval_gpu % rval_gpu
def make_scalar_power_data():
# only numeric values form valid operands for power
return sorted(
product(
INTEGER_TYPES | FLOAT_TYPES | BOOL_TYPES,
INTEGER_TYPES | FLOAT_TYPES | BOOL_TYPES,
)
)
def make_scalar_power_data_invalid():
invalid = set()
# datetimes and timedeltas cant go in exponents
invalid |= set(
product(
INTEGER_TYPES
| FLOAT_TYPES
| TIMEDELTA_TYPES
| DATETIME_TYPES
| BOOL_TYPES,
DATETIME_TYPES | TIMEDELTA_TYPES,
)
)
# datetimes and timedeltas may not be raised to
# any exponent of any dtype
invalid |= set(
product(
DATETIME_TYPES | TIMEDELTA_TYPES,
DATETIME_TYPES
| TIMEDELTA_TYPES
| INTEGER_TYPES
| FLOAT_TYPES
| BOOL_TYPES,
)
)
return sorted(list(invalid))
@pytest.mark.parametrize("dtype_l,dtype_r", make_scalar_power_data())
def test_scalar_power(dtype_l, dtype_r):
test_value = 1
lval_host = dtype_scalar(test_value, dtype=dtype_l)
rval_host = dtype_scalar(test_value, dtype=dtype_r)
lval_gpu = cudf.Scalar(test_value, dtype=dtype_l)
rval_gpu = cudf.Scalar(test_value, dtype=dtype_r)
expect = lval_host ** rval_host
got = lval_gpu ** rval_gpu
assert expect == got.value
assert expect.dtype == got.dtype
@pytest.mark.parametrize("dtype_l,dtype_r", make_scalar_power_data_invalid())
def test_scalar_power_invalid(dtype_l, dtype_r):
test_value = 1
lval_gpu = cudf.Scalar(test_value, dtype=dtype_l)
rval_gpu = cudf.Scalar(test_value, dtype=dtype_r)
with pytest.raises(TypeError):
lval_gpu ** rval_gpu
@pytest.mark.parametrize(
"date_col",
[
[
"2000-01-01 00:00:00.012345678",
"2000-01-31 00:00:00.012345678",
"2000-02-29 00:00:00.012345678",
]
],
)
@pytest.mark.parametrize("n_periods", [0, 1, -1, 12, -12])
@pytest.mark.parametrize(
"frequency",
[
"months",
"years",
"days",
"hours",
"minutes",
"seconds",
"microseconds",
pytest.param(
"nanoseconds",
marks=pytest.mark.xfail(
reason="https://github.com/pandas-dev/pandas/issues/36589"
),
),
],
)
@pytest.mark.parametrize(
"dtype",
["datetime64[ns]", "datetime64[us]", "datetime64[ms]", "datetime64[s]"],
)
@pytest.mark.parametrize("op", [operator.add, operator.sub])
def test_datetime_dateoffset_binaryop(
date_col, n_periods, frequency, dtype, op
):
gsr = cudf.Series(date_col, dtype=dtype)
psr = gsr.to_pandas() # converts to nanos
kwargs = {frequency: n_periods}
goffset = cudf.DateOffset(**kwargs)
poffset = pd.DateOffset(**kwargs)
expect = op(psr, poffset)
got = op(gsr, goffset)
utils.assert_eq(expect, got)
expect = op(psr, -poffset)
got = op(gsr, -goffset)
utils.assert_eq(expect, got)
@pytest.mark.parametrize(
"date_col",
[
[
"2000-01-01 00:00:00.012345678",
"2000-01-31 00:00:00.012345678",
"2000-02-29 00:00:00.012345678",
]
],
)
@pytest.mark.parametrize(
"kwargs",
[
{"months": 2, "years": 5},
{"microseconds": 1, "seconds": 1},
{"months": 2, "years": 5, "seconds": 923, "microseconds": 481},
pytest.param(
{"milliseconds": 4},
marks=pytest.mark.xfail(
reason="Pandas gets the wrong answer for milliseconds"
),
),
pytest.param(
{"milliseconds": 4, "years": 2},
marks=pytest.mark.xfail(
reason="Pandas construction fails with these keywords"
),
),
pytest.param(
{"nanoseconds": 12},
marks=pytest.mark.xfail(
reason="Pandas gets the wrong answer for nanoseconds"
),
),
],
)
@pytest.mark.parametrize("op", [operator.add, operator.sub])
def test_datetime_dateoffset_binaryop_multiple(date_col, kwargs, op):
gsr = cudf.Series(date_col, dtype="datetime64[ns]")
psr = gsr.to_pandas()
poffset = pd.DateOffset(**kwargs)
goffset = cudf.DateOffset(**kwargs)
expect = op(psr, poffset)
got = op(gsr, goffset)
utils.assert_eq(expect, got)
@pytest.mark.parametrize(
"date_col",
[
[
"2000-01-01 00:00:00.012345678",
"2000-01-31 00:00:00.012345678",
"2000-02-29 00:00:00.012345678",
]
],
)
@pytest.mark.parametrize("n_periods", [0, 1, -1, 12, -12])
@pytest.mark.parametrize(
"frequency",
[
"months",
"years",
"days",
"hours",
"minutes",
"seconds",
"microseconds",
pytest.param(
"nanoseconds",
marks=pytest.mark.xfail(
reason="https://github.com/pandas-dev/pandas/issues/36589"
),
),
],
)
@pytest.mark.parametrize(
"dtype",
["datetime64[ns]", "datetime64[us]", "datetime64[ms]", "datetime64[s]"],
)
def test_datetime_dateoffset_binaryop_reflected(
date_col, n_periods, frequency, dtype
):
gsr = cudf.Series(date_col, dtype=dtype)
psr = gsr.to_pandas() # converts to nanos
kwargs = {frequency: n_periods}
goffset = cudf.DateOffset(**kwargs)
poffset = pd.DateOffset(**kwargs)
expect = poffset + psr
got = goffset + gsr
utils.assert_eq(expect, got)
with pytest.raises(TypeError):
poffset - psr
with pytest.raises(TypeError):
goffset - gsr
@pytest.mark.parametrize("frame", [cudf.Series, cudf.Index, cudf.DataFrame])
@pytest.mark.parametrize(
"dtype", ["int", "str", "datetime64[s]", "timedelta64[s]", "category"]
)
def test_binops_with_lhs_numpy_scalar(frame, dtype):
data = [1, 2, 3, 4, 5]
data = (
frame({"a": data}, dtype=dtype)
if isinstance(frame, cudf.DataFrame)
else frame(data, dtype=dtype)
)
if dtype == "datetime64[s]":
val = np.dtype(dtype).type(4, "s")
elif dtype == "timedelta64[s]":
val = np.dtype(dtype).type(4, "s")
elif dtype == "category":
val = np.int64(4)
else:
val = np.dtype(dtype).type(4)
expected = val == data.to_pandas()
got = val == data
# In case of index, expected would be a numpy array
if isinstance(data, cudf.Index):
expected = pd.Index(expected)
utils.assert_eq(expected, got)
@pytest.mark.parametrize(
"dtype",
[
"int8",
"int16",
"int32",
"int64",
"uint8",
"uint16",
"uint32",
"uint64",
"float32",
"float64",
"datetime64[ns]",
"datetime64[us]",
"datetime64[ms]",
"datetime64[s]",
"timedelta64[ns]",
"timedelta64[us]",
"timedelta64[ms]",
"timedelta64[s]",
],
)
@pytest.mark.parametrize("op", _operators_comparison)
def test_binops_with_NA_consistent(dtype, op):
data = [1, 2, 3]
sr = cudf.Series(data, dtype=dtype)
result = getattr(sr, op)(cudf.NA)
if dtype in NUMERIC_TYPES:
if op == "ne":
expect_all = True
else:
expect_all = False
assert (result == expect_all).all()
elif dtype in DATETIME_TYPES & TIMEDELTA_TYPES:
assert result._column.null_count == len(data)
def _decimal_series(input, dtype):
return cudf.Series(
[x if x is None else decimal.Decimal(x) for x in input], dtype=dtype,
)
@pytest.mark.parametrize(
"args",
[
(
operator.add,
["1.5", "2.0"],
cudf.Decimal64Dtype(scale=2, precision=2),
["1.5", "2.0"],
cudf.Decimal64Dtype(scale=2, precision=2),
["3.0", "4.0"],
cudf.Decimal64Dtype(scale=2, precision=3),
),
(
operator.add,
["1.5", "2.0"],
cudf.Decimal64Dtype(scale=2, precision=2),
["2.25", "1.005"],
cudf.Decimal64Dtype(scale=3, precision=4),
["3.75", "3.005"],
cudf.Decimal64Dtype(scale=3, precision=5),
),
(
operator.add,
["100", "200"],
cudf.Decimal64Dtype(scale=-2, precision=3),
["0.1", "0.2"],
cudf.Decimal64Dtype(scale=3, precision=4),
["100.1", "200.2"],
cudf.Decimal64Dtype(scale=3, precision=9),
),
(
operator.sub,
["1.5", "2.0"],
cudf.Decimal64Dtype(scale=2, precision=2),
["2.25", "1.005"],
cudf.Decimal64Dtype(scale=3, precision=4),
["-0.75", "0.995"],
cudf.Decimal64Dtype(scale=3, precision=5),
),
(
operator.sub,
["1.5", "2.0"],
cudf.Decimal64Dtype(scale=2, precision=2),
["2.25", "1.005"],
cudf.Decimal64Dtype(scale=3, precision=4),
["-0.75", "0.995"],
cudf.Decimal64Dtype(scale=3, precision=5),
),
(
operator.sub,
["100", "200"],
cudf.Decimal64Dtype(scale=-2, precision=3),
["0.1", "0.2"],
cudf.Decimal64Dtype(scale=3, precision=4),
["99.9", "199.8"],
cudf.Decimal64Dtype(scale=3, precision=9),
),
(
operator.mul,
["1.5", "2.0"],
cudf.Decimal64Dtype(scale=2, precision=2),
["1.5", "3.0"],
cudf.Decimal64Dtype(scale=3, precision=4),
["2.25", "6.0"],
cudf.Decimal64Dtype(scale=5, precision=7),
),
(
operator.mul,
["100", "200"],
cudf.Decimal64Dtype(scale=-2, precision=3),
["0.1", "0.2"],
cudf.Decimal64Dtype(scale=3, precision=4),
["10.0", "40.0"],
cudf.Decimal64Dtype(scale=1, precision=8),
),
(
operator.mul,
["1000", "2000"],
cudf.Decimal64Dtype(scale=-3, precision=4),
["0.343", "0.500"],
cudf.Decimal64Dtype(scale=3, precision=3),
["343.0", "1000.0"],
cudf.Decimal64Dtype(scale=0, precision=8),
),
(
operator.add,
["1.5", None, "2.0"],
cudf.Decimal64Dtype(scale=2, precision=2),
["1.5", None, "2.0"],
cudf.Decimal64Dtype(scale=2, precision=2),
["3.0", None, "4.0"],
cudf.Decimal64Dtype(scale=2, precision=3),
),
(
operator.add,
["1.5", None],
cudf.Decimal64Dtype(scale=2, precision=2),
["2.25", "1.005"],
cudf.Decimal64Dtype(scale=3, precision=4),
["3.75", None],
cudf.Decimal64Dtype(scale=3, precision=5),
),
(
operator.sub,
["1.5", None],
cudf.Decimal64Dtype(scale=2, precision=2),
["2.25", None],
cudf.Decimal64Dtype(scale=3, precision=4),
["-0.75", None],
cudf.Decimal64Dtype(scale=3, precision=5),
),
(
operator.sub,
["1.5", "2.0"],
cudf.Decimal64Dtype(scale=2, precision=2),
["2.25", None],
cudf.Decimal64Dtype(scale=3, precision=4),
["-0.75", None],
cudf.Decimal64Dtype(scale=3, precision=5),
),
(
operator.mul,
["1.5", None],
cudf.Decimal64Dtype(scale=2, precision=2),
["1.5", None],
cudf.Decimal64Dtype(scale=3, precision=4),
["2.25", None],
cudf.Decimal64Dtype(scale=5, precision=7),
),
(
operator.mul,
["100", "200"],
cudf.Decimal64Dtype(scale=-2, precision=3),
["0.1", None],
cudf.Decimal64Dtype(scale=3, precision=4),
["10.0", None],
cudf.Decimal64Dtype(scale=1, precision=8),
),
(
operator.eq,
["0.18", "0.42"],
cudf.Decimal64Dtype(scale=2, precision=3),
["0.18", "0.21"],
cudf.Decimal64Dtype(scale=2, precision=3),
[True, False],
bool,
),
(
operator.eq,
["0.18", "0.42"],
cudf.Decimal64Dtype(scale=2, precision=3),
["0.1800", "0.2100"],
cudf.Decimal64Dtype(scale=4, precision=5),
[True, False],
bool,
),
(
operator.eq,
["100", None],
cudf.Decimal64Dtype(scale=-2, precision=3),
["100", "200"],
cudf.Decimal64Dtype(scale=-1, precision=4),
[True, None],
bool,
),
(
operator.lt,
["0.18", "0.42", "1.00"],
cudf.Decimal64Dtype(scale=2, precision=3),
["0.10", "0.87", "1.00"],
cudf.Decimal64Dtype(scale=2, precision=3),
[False, True, False],
bool,
),
(
operator.lt,
["0.18", "0.42", "1.00"],
cudf.Decimal64Dtype(scale=2, precision=3),
["0.1000", "0.8700", "1.0000"],
cudf.Decimal64Dtype(scale=4, precision=5),
[False, True, False],
bool,
),
(
operator.lt,
["200", None, "100"],
cudf.Decimal64Dtype(scale=-2, precision=3),
["100", "200", "100"],
cudf.Decimal64Dtype(scale=-1, precision=4),
[False, None, False],
bool,
),
(
operator.gt,
["0.18", "0.42", "1.00"],
cudf.Decimal64Dtype(scale=2, precision=3),
["0.10", "0.87", "1.00"],
cudf.Decimal64Dtype(scale=2, precision=3),
[True, False, False],
bool,
),
(
operator.gt,
["0.18", "0.42", "1.00"],
cudf.Decimal64Dtype(scale=2, precision=3),
["0.1000", "0.8700", "1.0000"],
cudf.Decimal64Dtype(scale=4, precision=5),
[True, False, False],
bool,
),
(
operator.gt,
["300", None, "100"],
cudf.Decimal64Dtype(scale=-2, precision=3),
["100", "200", "100"],
cudf.Decimal64Dtype(scale=-1, precision=4),
[True, None, False],
bool,
),
(
operator.le,
["0.18", "0.42", "1.00"],
cudf.Decimal64Dtype(scale=2, precision=3),
["0.10", "0.87", "1.00"],
cudf.Decimal64Dtype(scale=2, precision=3),
[False, True, True],
bool,
),
(
operator.le,
["0.18", "0.42", "1.00"],
cudf.Decimal64Dtype(scale=2, precision=3),
["0.1000", "0.8700", "1.0000"],
cudf.Decimal64Dtype(scale=4, precision=5),
[False, True, True],
bool,
),
(
operator.le,
["300", None, "100"],
cudf.Decimal64Dtype(scale=-2, precision=3),
["100", "200", "100"],
cudf.Decimal64Dtype(scale=-1, precision=4),
[False, None, True],
bool,
),
(
operator.ge,
["0.18", "0.42", "1.00"],
cudf.Decimal64Dtype(scale=2, precision=3),
["0.10", "0.87", "1.00"],
cudf.Decimal64Dtype(scale=2, precision=3),
[True, False, True],
bool,
),
(
operator.ge,
["0.18", "0.42", "1.00"],
cudf.Decimal64Dtype(scale=2, precision=3),
["0.1000", "0.8700", "1.0000"],
cudf.Decimal64Dtype(scale=4, precision=5),
[True, False, True],
bool,
),
(
operator.ge,
["300", None, "100"],
cudf.Decimal64Dtype(scale=-2, precision=3),
["100", "200", "100"],
cudf.Decimal64Dtype(scale=-1, precision=4),
[True, None, True],
bool,
),
],
)
def test_binops_decimal(args):
op, lhs, l_dtype, rhs, r_dtype, expect, expect_dtype = args
a = _decimal_series(lhs, l_dtype)
b = _decimal_series(rhs, r_dtype)
expect = (
_decimal_series(expect, expect_dtype)
if isinstance(expect_dtype, cudf.Decimal64Dtype)
else cudf.Series(expect, dtype=expect_dtype)
)
got = op(a, b)
assert expect.dtype == got.dtype
utils.assert_eq(expect, got)
@pytest.mark.parametrize(
"args",
[
(
operator.eq,
["100", "41", None],
cudf.Decimal64Dtype(scale=0, precision=5),
[100, 42, 12],
cudf.Series([True, False, None], dtype=bool),
cudf.Series([True, False, None], dtype=bool),
),
(
operator.eq,
["100.000", "42.001", None],
cudf.Decimal64Dtype(scale=3, precision=6),
[100, 42, 12],
cudf.Series([True, False, None], dtype=bool),
cudf.Series([True, False, None], dtype=bool),
),
(
operator.eq,
["100", "40", None],
cudf.Decimal64Dtype(scale=-1, precision=3),
[100, 42, 12],
cudf.Series([True, False, None], dtype=bool),
cudf.Series([True, False, None], dtype=bool),
),
(
operator.lt,
["100", "40", "28", None],
cudf.Decimal64Dtype(scale=0, precision=3),
[100, 42, 24, 12],
cudf.Series([False, True, False, None], dtype=bool),
cudf.Series([False, False, True, None], dtype=bool),
),
(
operator.lt,
["100.000", "42.002", "23.999", None],
cudf.Decimal64Dtype(scale=3, precision=6),
[100, 42, 24, 12],
cudf.Series([False, False, True, None], dtype=bool),
cudf.Series([False, True, False, None], dtype=bool),
),
(
operator.lt,
["100", "40", "10", None],
cudf.Decimal64Dtype(scale=-1, precision=3),
[100, 42, 8, 12],
cudf.Series([False, True, False, None], dtype=bool),
cudf.Series([False, False, True, None], dtype=bool),
),
(
operator.gt,
["100", "42", "20", None],
cudf.Decimal64Dtype(scale=0, precision=3),
[100, 40, 24, 12],
cudf.Series([False, True, False, None], dtype=bool),
cudf.Series([False, False, True, None], dtype=bool),
),
(
operator.gt,
["100.000", "42.002", "23.999", None],
cudf.Decimal64Dtype(scale=3, precision=6),
[100, 42, 24, 12],
cudf.Series([False, True, False, None], dtype=bool),
cudf.Series([False, False, True, None], dtype=bool),
),
(
operator.gt,
["100", "40", "10", None],
cudf.Decimal64Dtype(scale=-1, precision=3),
[100, 42, 8, 12],
cudf.Series([False, False, True, None], dtype=bool),
cudf.Series([False, True, False, None], dtype=bool),
),
(
operator.le,
["100", "40", "28", None],
cudf.Decimal64Dtype(scale=0, precision=3),
[100, 42, 24, 12],
cudf.Series([True, True, False, None], dtype=bool),
cudf.Series([True, False, True, None], dtype=bool),
),
(
operator.le,
["100.000", "42.002", "23.999", None],
cudf.Decimal64Dtype(scale=3, precision=6),
[100, 42, 24, 12],
cudf.Series([True, False, True, None], dtype=bool),
cudf.Series([True, True, False, None], dtype=bool),
),
(
operator.le,
["100", "40", "10", None],
cudf.Decimal64Dtype(scale=-1, precision=3),
[100, 42, 8, 12],
cudf.Series([True, True, False, None], dtype=bool),
cudf.Series([True, False, True, None], dtype=bool),
),
(
operator.ge,
["100", "42", "20", None],
cudf.Decimal64Dtype(scale=0, precision=3),
[100, 40, 24, 12],
cudf.Series([True, True, False, None], dtype=bool),
cudf.Series([True, False, True, None], dtype=bool),
),
(
operator.ge,
["100.000", "42.002", "23.999", None],
cudf.Decimal64Dtype(scale=3, precision=6),
[100, 42, 24, 12],
cudf.Series([True, True, False, None], dtype=bool),
cudf.Series([True, False, True, None], dtype=bool),
),
(
operator.ge,
["100", "40", "10", None],
cudf.Decimal64Dtype(scale=-1, precision=3),
[100, 42, 8, 12],
cudf.Series([True, False, True, None], dtype=bool),
cudf.Series([True, True, False, None], dtype=bool),
),
],
)
@pytest.mark.parametrize("integer_dtype", cudf.tests.utils.INTEGER_TYPES)
@pytest.mark.parametrize("reflected", [True, False])
def test_binops_decimal_comp_mixed_integer(args, integer_dtype, reflected):
if not reflected:
op, ldata, ldtype, rdata, expected, _ = args
else:
op, ldata, ldtype, rdata, _, expected = args
lhs = _decimal_series(ldata, ldtype)
rhs = cudf.Series(rdata, dtype=integer_dtype)
if reflected:
rhs, lhs = lhs, rhs
actual = op(lhs, rhs)
utils.assert_eq(expected, actual)
@pytest.mark.parametrize(
"args",
[
(
operator.add,
["100", "200"],
cudf.Decimal64Dtype(scale=-2, precision=3),
decimal.Decimal(1),
["101", "201"],
cudf.Decimal64Dtype(scale=0, precision=6),
False,
),
(
operator.add,
["100", "200"],
cudf.Decimal64Dtype(scale=-2, precision=3),
1,
["101", "201"],
cudf.Decimal64Dtype(scale=0, precision=6),
False,
),
(
operator.add,
["100", "200"],
cudf.Decimal64Dtype(scale=-2, precision=3),
decimal.Decimal("1.5"),
["101.5", "201.5"],
cudf.Decimal64Dtype(scale=1, precision=7),
False,
),
(
operator.add,
["100", "200"],
cudf.Decimal64Dtype(scale=-2, precision=3),
cudf.Scalar(decimal.Decimal("1.5")),
["101.5", "201.5"],
cudf.Decimal64Dtype(scale=1, precision=7),
False,
),
(
operator.add,
["100", "200"],
cudf.Decimal64Dtype(scale=-2, precision=3),
decimal.Decimal(1),
["101", "201"],
cudf.Decimal64Dtype(scale=0, precision=6),
True,
),
(
operator.add,
["100", "200"],
cudf.Decimal64Dtype(scale=-2, precision=3),
1,
["101", "201"],
cudf.Decimal64Dtype(scale=0, precision=6),
True,
),
(
operator.add,
["100", "200"],
cudf.Decimal64Dtype(scale=-2, precision=3),
decimal.Decimal("1.5"),
["101.5", "201.5"],
cudf.Decimal64Dtype(scale=1, precision=7),
True,
),
(
operator.add,
["100", "200"],
cudf.Decimal64Dtype(scale=-2, precision=3),
cudf.Scalar(decimal.Decimal("1.5")),
["101.5", "201.5"],
cudf.Decimal64Dtype(scale=1, precision=7),
True,
),
(
operator.mul,
["100", "200"],
cudf.Decimal64Dtype(scale=-2, precision=3),
1,
["100", "200"],
cudf.Decimal64Dtype(scale=-2, precision=5),
False,
),
(
operator.mul,
["100", "200"],
cudf.Decimal64Dtype(scale=-2, precision=3),
decimal.Decimal(2),
["200", "400"],
cudf.Decimal64Dtype(scale=-2, precision=5),
False,
),
(
operator.mul,
["100", "200"],
cudf.Decimal64Dtype(scale=-2, precision=3),
decimal.Decimal("1.5"),
["150", "300"],
cudf.Decimal64Dtype(scale=-1, precision=6),
False,
),
(
operator.mul,
["100", "200"],
cudf.Decimal64Dtype(scale=-2, precision=3),
cudf.Scalar(decimal.Decimal("1.5")),
["150", "300"],
cudf.Decimal64Dtype(scale=-1, precision=6),
False,
),
(
operator.mul,
["100", "200"],
cudf.Decimal64Dtype(scale=-2, precision=3),
1,
["100", "200"],
cudf.Decimal64Dtype(scale=-2, precision=5),
True,
),
(
operator.mul,
["100", "200"],
cudf.Decimal64Dtype(scale=-2, precision=3),
decimal.Decimal(2),
["200", "400"],
cudf.Decimal64Dtype(scale=-2, precision=5),
True,
),
(
operator.mul,
["100", "200"],
cudf.Decimal64Dtype(scale=-2, precision=3),
decimal.Decimal("1.5"),
["150", "300"],
cudf.Decimal64Dtype(scale=-1, precision=6),
True,
),
(
operator.mul,
["100", "200"],
cudf.Decimal64Dtype(scale=-2, precision=3),
cudf.Scalar(decimal.Decimal("1.5")),
["150", "300"],
cudf.Decimal64Dtype(scale=-1, precision=6),
True,
),
(
operator.sub,
["100", "200"],
cudf.Decimal64Dtype(scale=-2, precision=3),
decimal.Decimal(2),
["98", "198"],
cudf.Decimal64Dtype(scale=0, precision=6),
False,
),
(
operator.sub,
["100", "200"],
cudf.Decimal64Dtype(scale=-2, precision=3),
decimal.Decimal("2.5"),
["97.5", "197.5"],
cudf.Decimal64Dtype(scale=1, precision=7),
False,
),
(
operator.sub,
["100", "200"],
cudf.Decimal64Dtype(scale=-2, precision=3),
4,
["96", "196"],
cudf.Decimal64Dtype(scale=0, precision=6),
False,
),
(
operator.sub,
["100", "200"],
cudf.Decimal64Dtype(scale=-2, precision=3),
cudf.Scalar(decimal.Decimal("2.5")),
["97.5", "197.5"],
cudf.Decimal64Dtype(scale=1, precision=7),
False,
),
(
operator.sub,
["100", "200"],
cudf.Decimal64Dtype(scale=-2, precision=3),
decimal.Decimal(2),
["-98", "-198"],
cudf.Decimal64Dtype(scale=0, precision=6),
True,
),
(
operator.sub,
["100", "200"],
cudf.Decimal64Dtype(scale=-2, precision=3),
4,
["-96", "-196"],
cudf.Decimal64Dtype(scale=0, precision=6),
True,
),
(
operator.sub,
["100", "200"],
cudf.Decimal64Dtype(scale=-2, precision=3),
decimal.Decimal("2.5"),
["-97.5", "-197.5"],
cudf.Decimal64Dtype(scale=1, precision=7),
True,
),
(
operator.sub,
["100", "200"],
cudf.Decimal64Dtype(scale=-2, precision=3),
cudf.Scalar(decimal.Decimal("2.5")),
["-97.5", "-197.5"],
cudf.Decimal64Dtype(scale=1, precision=7),
True,
),
],
)
def test_binops_decimal_scalar(args):
op, lhs, l_dtype, rhs, expect, expect_dtype, reflect = args
def decimal_series(input, dtype):
return cudf.Series(
[x if x is None else decimal.Decimal(x) for x in input],
dtype=dtype,
)
lhs = decimal_series(lhs, l_dtype)
expect = decimal_series(expect, expect_dtype)
if reflect:
lhs, rhs = rhs, lhs
got = op(lhs, rhs)
assert expect.dtype == got.dtype
utils.assert_eq(expect, got)
@pytest.mark.parametrize(
"args",
[
(
operator.eq,
["100.00", "41", None],
cudf.Decimal64Dtype(scale=0, precision=5),
100,
cudf.Series([True, False, None], dtype=bool),
cudf.Series([True, False, None], dtype=bool),
),
(
operator.eq,
["100.123", "41", None],
cudf.Decimal64Dtype(scale=3, precision=6),
decimal.Decimal("100.123"),
cudf.Series([True, False, None], dtype=bool),
cudf.Series([True, False, None], dtype=bool),
),
(
operator.eq,
["100.123", "41", None],
cudf.Decimal64Dtype(scale=3, precision=6),
cudf.Scalar(decimal.Decimal("100.123")),
cudf.Series([True, False, None], dtype=bool),
cudf.Series([True, False, None], dtype=bool),
),
(
operator.gt,
["100.00", "41", "120.21", None],
cudf.Decimal64Dtype(scale=2, precision=5),
100,
cudf.Series([False, False, True, None], dtype=bool),
cudf.Series([False, True, False, None], dtype=bool),
),
(
operator.gt,
["100.123", "41", "120.21", None],
cudf.Decimal64Dtype(scale=3, precision=6),
decimal.Decimal("100.123"),
cudf.Series([False, False, True, None], dtype=bool),
cudf.Series([False, True, False, None], dtype=bool),
),
(
operator.gt,
["100.123", "41", "120.21", None],
cudf.Decimal64Dtype(scale=3, precision=6),
cudf.Scalar(decimal.Decimal("100.123")),
cudf.Series([False, False, True, None], dtype=bool),
cudf.Series([False, True, False, None], dtype=bool),
),
(
operator.ge,
["100.00", "41", "120.21", None],
cudf.Decimal64Dtype(scale=2, precision=5),
100,
cudf.Series([True, False, True, None], dtype=bool),
cudf.Series([True, True, False, None], dtype=bool),
),
(
operator.ge,
["100.123", "41", "120.21", None],
cudf.Decimal64Dtype(scale=3, precision=6),
decimal.Decimal("100.123"),
cudf.Series([True, False, True, None], dtype=bool),
cudf.Series([True, True, False, None], dtype=bool),
),
(
operator.ge,
["100.123", "41", "120.21", None],
cudf.Decimal64Dtype(scale=3, precision=6),
cudf.Scalar(decimal.Decimal("100.123")),
cudf.Series([True, False, True, None], dtype=bool),
cudf.Series([True, True, False, None], dtype=bool),
),
(
operator.lt,
["100.00", "41", "120.21", None],
cudf.Decimal64Dtype(scale=2, precision=5),
100,
cudf.Series([False, True, False, None], dtype=bool),
cudf.Series([False, False, True, None], dtype=bool),
),
(
operator.lt,
["100.123", "41", "120.21", None],
cudf.Decimal64Dtype(scale=3, precision=6),
decimal.Decimal("100.123"),
cudf.Series([False, True, False, None], dtype=bool),
cudf.Series([False, False, True, None], dtype=bool),
),
(
operator.lt,
["100.123", "41", "120.21", None],
cudf.Decimal64Dtype(scale=3, precision=6),
cudf.Scalar(decimal.Decimal("100.123")),
cudf.Series([False, True, False, None], dtype=bool),
cudf.Series([False, False, True, None], dtype=bool),
),
(
operator.le,
["100.00", "41", "120.21", None],
cudf.Decimal64Dtype(scale=2, precision=5),
100,
cudf.Series([True, True, False, None], dtype=bool),
cudf.Series([True, False, True, None], dtype=bool),
),
(
operator.le,
["100.123", "41", "120.21", None],
cudf.Decimal64Dtype(scale=3, precision=6),
decimal.Decimal("100.123"),
cudf.Series([True, True, False, None], dtype=bool),
cudf.Series([True, False, True, None], dtype=bool),
),
(
operator.le,
["100.123", "41", "120.21", None],
cudf.Decimal64Dtype(scale=3, precision=6),
cudf.Scalar(decimal.Decimal("100.123")),
cudf.Series([True, True, False, None], dtype=bool),
cudf.Series([True, False, True, None], dtype=bool),
),
],
)
@pytest.mark.parametrize("reflected", [True, False])
def test_binops_decimal_scalar_compare(args, reflected):
if not reflected:
op, ldata, ldtype, rdata, expected, _ = args
else:
op, ldata, ldtype, rdata, _, expected = args
lhs = _decimal_series(ldata, ldtype)
rhs = rdata
if reflected:
rhs, lhs = lhs, rhs
actual = op(lhs, rhs)
utils.assert_eq(expected, actual)
@pytest.mark.parametrize(
"dtype",
[
"uint8",
"uint16",
"uint32",
"uint64",
"int8",
"int16",
"int32",
"int64",
"float32",
"float64",
"str",
"datetime64[ns]",
"datetime64[us]",
"datetime64[ms]",
"datetime64[s]",
"timedelta64[ns]",
"timedelta64[us]",
"timedelta64[ms]",
"timedelta64[s]",
],
)
@pytest.mark.parametrize("null_scalar", [None, cudf.NA, np.datetime64("NaT")])
@pytest.mark.parametrize("cmpop", _cmpops)
def test_column_null_scalar_comparison(dtype, null_scalar, cmpop):
# This test is meant to validate that comparing
# a series of any dtype with a null scalar produces
# a new series where all the elements are <NA>.
if isinstance(null_scalar, np.datetime64):
if np.dtype(dtype).kind not in "mM":
pytest.skip()
null_scalar = null_scalar.astype(dtype)
dtype = np.dtype(dtype)
data = [1, 2, 3, 4, 5]
sr = cudf.Series(data, dtype=dtype)
result = cmpop(sr, null_scalar)
assert result.isnull().all()
@pytest.mark.parametrize("fn", ["eq", "ne", "lt", "gt", "le", "ge"])
def test_equality_ops_index_mismatch(fn):
a = cudf.Series(
[1, 2, 3, None, None, 4], index=["a", "b", "c", "d", "e", "f"]
)
b = cudf.Series(
[-5, 4, 3, 2, 1, 0, 19, 11],
index=["aa", "b", "c", "d", "e", "f", "y", "z"],
)
pa = a.to_pandas(nullable=True)
pb = b.to_pandas(nullable=True)
expected = getattr(pa, fn)(pb)
actual = getattr(a, fn)(b).to_pandas(nullable=True)
utils.assert_eq(expected, actual)
def generate_test_null_equals_columnops_data():
# Generate tuples of:
# (left_data, right_data, compare_bool
# where compare_bool is the correct answer to
# if the columns should compare as null equals
def set_null_cases(column_l, column_r, case):
if case == "neither":
return column_l, column_r
elif case == "left":
column_l[1] = None
elif case == "right":
column_r[1] = None
elif case == "both":
column_l[1] = None
column_r[1] = None
else:
raise ValueError("Unknown null case")
return column_l, column_r
null_cases = ["neither", "left", "right", "both"]
data = [1, 2, 3]
results = []
# TODO: Numeric types can be cross compared as null equal
for dtype in (
list(NUMERIC_TYPES)
+ list(DATETIME_TYPES)
+ list(TIMEDELTA_TYPES)
+ list(STRING_TYPES)
+ ["category"]
):
for case in null_cases:
left = cudf.Series(data, dtype=dtype)
right = cudf.Series(data, dtype=dtype)
if case in {"left", "right"}:
answer = False
else:
answer = True
left, right = set_null_cases(left, right, case)
results.append((left._column, right._column, answer, case))
return results
@pytest.mark.parametrize(
"lcol,rcol,ans,case", generate_test_null_equals_columnops_data()
)
def test_null_equals_columnops(lcol, rcol, ans, case):
assert lcol._null_equals(rcol).all() == ans
| true
| true
|
f7184ad597b6deed89e33ce74cbeaad1898b35eb
| 13,475
|
py
|
Python
|
catkin_ws/devel_isolated/velodyne_gazebo_plugins/_setup_util.py
|
LiuXiang199x/DRL_Navigation
|
336e847bde8261d429fd2de8111b3d24c0ab4bae
|
[
"MIT"
] | null | null | null |
catkin_ws/devel_isolated/velodyne_gazebo_plugins/_setup_util.py
|
LiuXiang199x/DRL_Navigation
|
336e847bde8261d429fd2de8111b3d24c0ab4bae
|
[
"MIT"
] | null | null | null |
catkin_ws/devel_isolated/velodyne_gazebo_plugins/_setup_util.py
|
LiuXiang199x/DRL_Navigation
|
336e847bde8261d429fd2de8111b3d24c0ab4bae
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python3
# -*- coding: utf-8 -*-
# Software License Agreement (BSD License)
#
# Copyright (c) 2012, Willow Garage, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Willow Garage, Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""This file generates shell code for the setup.SHELL scripts to set environment variables."""
from __future__ import print_function
import argparse
import copy
import errno
import os
import platform
import sys
CATKIN_MARKER_FILE = '.catkin'
system = platform.system()
IS_DARWIN = (system == 'Darwin')
IS_WINDOWS = (system == 'Windows')
PATH_TO_ADD_SUFFIX = ['bin']
if IS_WINDOWS:
# while catkin recommends putting dll's into bin, 3rd party packages often put dll's into lib
# since Windows finds dll's via the PATH variable, prepend it with path to lib
PATH_TO_ADD_SUFFIX.extend([['lib', os.path.join('lib', 'x86_64-linux-gnu')]])
# subfolder of workspace prepended to CMAKE_PREFIX_PATH
ENV_VAR_SUBFOLDERS = {
'CMAKE_PREFIX_PATH': '',
'LD_LIBRARY_PATH' if not IS_DARWIN else 'DYLD_LIBRARY_PATH': ['lib', os.path.join('lib', 'x86_64-linux-gnu')],
'PATH': PATH_TO_ADD_SUFFIX,
'PKG_CONFIG_PATH': [os.path.join('lib', 'pkgconfig'), os.path.join('lib', 'x86_64-linux-gnu', 'pkgconfig')],
'PYTHONPATH': 'lib/python3/dist-packages',
}
def rollback_env_variables(environ, env_var_subfolders):
"""
Generate shell code to reset environment variables.
by unrolling modifications based on all workspaces in CMAKE_PREFIX_PATH.
This does not cover modifications performed by environment hooks.
"""
lines = []
unmodified_environ = copy.copy(environ)
for key in sorted(env_var_subfolders.keys()):
subfolders = env_var_subfolders[key]
if not isinstance(subfolders, list):
subfolders = [subfolders]
value = _rollback_env_variable(unmodified_environ, key, subfolders)
if value is not None:
environ[key] = value
lines.append(assignment(key, value))
if lines:
lines.insert(0, comment('reset environment variables by unrolling modifications based on all workspaces in CMAKE_PREFIX_PATH'))
return lines
def _rollback_env_variable(environ, name, subfolders):
"""
For each catkin workspace in CMAKE_PREFIX_PATH remove the first entry from env[NAME] matching workspace + subfolder.
:param subfolders: list of str '' or subfoldername that may start with '/'
:returns: the updated value of the environment variable.
"""
value = environ[name] if name in environ else ''
env_paths = [path for path in value.split(os.pathsep) if path]
value_modified = False
for subfolder in subfolders:
if subfolder:
if subfolder.startswith(os.path.sep) or (os.path.altsep and subfolder.startswith(os.path.altsep)):
subfolder = subfolder[1:]
if subfolder.endswith(os.path.sep) or (os.path.altsep and subfolder.endswith(os.path.altsep)):
subfolder = subfolder[:-1]
for ws_path in _get_workspaces(environ, include_fuerte=True, include_non_existing=True):
path_to_find = os.path.join(ws_path, subfolder) if subfolder else ws_path
path_to_remove = None
for env_path in env_paths:
env_path_clean = env_path[:-1] if env_path and env_path[-1] in [os.path.sep, os.path.altsep] else env_path
if env_path_clean == path_to_find:
path_to_remove = env_path
break
if path_to_remove:
env_paths.remove(path_to_remove)
value_modified = True
new_value = os.pathsep.join(env_paths)
return new_value if value_modified else None
def _get_workspaces(environ, include_fuerte=False, include_non_existing=False):
"""
Based on CMAKE_PREFIX_PATH return all catkin workspaces.
:param include_fuerte: The flag if paths starting with '/opt/ros/fuerte' should be considered workspaces, ``bool``
"""
# get all cmake prefix paths
env_name = 'CMAKE_PREFIX_PATH'
value = environ[env_name] if env_name in environ else ''
paths = [path for path in value.split(os.pathsep) if path]
# remove non-workspace paths
workspaces = [path for path in paths if os.path.isfile(os.path.join(path, CATKIN_MARKER_FILE)) or (include_fuerte and path.startswith('/opt/ros/fuerte')) or (include_non_existing and not os.path.exists(path))]
return workspaces
def prepend_env_variables(environ, env_var_subfolders, workspaces):
"""Generate shell code to prepend environment variables for the all workspaces."""
lines = []
lines.append(comment('prepend folders of workspaces to environment variables'))
paths = [path for path in workspaces.split(os.pathsep) if path]
prefix = _prefix_env_variable(environ, 'CMAKE_PREFIX_PATH', paths, '')
lines.append(prepend(environ, 'CMAKE_PREFIX_PATH', prefix))
for key in sorted(key for key in env_var_subfolders.keys() if key != 'CMAKE_PREFIX_PATH'):
subfolder = env_var_subfolders[key]
prefix = _prefix_env_variable(environ, key, paths, subfolder)
lines.append(prepend(environ, key, prefix))
return lines
def _prefix_env_variable(environ, name, paths, subfolders):
"""
Return the prefix to prepend to the environment variable NAME.
Adding any path in NEW_PATHS_STR without creating duplicate or empty items.
"""
value = environ[name] if name in environ else ''
environ_paths = [path for path in value.split(os.pathsep) if path]
checked_paths = []
for path in paths:
if not isinstance(subfolders, list):
subfolders = [subfolders]
for subfolder in subfolders:
path_tmp = path
if subfolder:
path_tmp = os.path.join(path_tmp, subfolder)
# skip nonexistent paths
if not os.path.exists(path_tmp):
continue
# exclude any path already in env and any path we already added
if path_tmp not in environ_paths and path_tmp not in checked_paths:
checked_paths.append(path_tmp)
prefix_str = os.pathsep.join(checked_paths)
if prefix_str != '' and environ_paths:
prefix_str += os.pathsep
return prefix_str
def assignment(key, value):
if not IS_WINDOWS:
return 'export %s="%s"' % (key, value)
else:
return 'set %s=%s' % (key, value)
def comment(msg):
if not IS_WINDOWS:
return '# %s' % msg
else:
return 'REM %s' % msg
def prepend(environ, key, prefix):
if key not in environ or not environ[key]:
return assignment(key, prefix)
if not IS_WINDOWS:
return 'export %s="%s$%s"' % (key, prefix, key)
else:
return 'set %s=%s%%%s%%' % (key, prefix, key)
def find_env_hooks(environ, cmake_prefix_path):
"""Generate shell code with found environment hooks for the all workspaces."""
lines = []
lines.append(comment('found environment hooks in workspaces'))
generic_env_hooks = []
generic_env_hooks_workspace = []
specific_env_hooks = []
specific_env_hooks_workspace = []
generic_env_hooks_by_filename = {}
specific_env_hooks_by_filename = {}
generic_env_hook_ext = 'bat' if IS_WINDOWS else 'sh'
specific_env_hook_ext = environ['CATKIN_SHELL'] if not IS_WINDOWS and 'CATKIN_SHELL' in environ and environ['CATKIN_SHELL'] else None
# remove non-workspace paths
workspaces = [path for path in cmake_prefix_path.split(os.pathsep) if path and os.path.isfile(os.path.join(path, CATKIN_MARKER_FILE))]
for workspace in reversed(workspaces):
env_hook_dir = os.path.join(workspace, 'etc', 'catkin', 'profile.d')
if os.path.isdir(env_hook_dir):
for filename in sorted(os.listdir(env_hook_dir)):
if filename.endswith('.%s' % generic_env_hook_ext):
# remove previous env hook with same name if present
if filename in generic_env_hooks_by_filename:
i = generic_env_hooks.index(generic_env_hooks_by_filename[filename])
generic_env_hooks.pop(i)
generic_env_hooks_workspace.pop(i)
# append env hook
generic_env_hooks.append(os.path.join(env_hook_dir, filename))
generic_env_hooks_workspace.append(workspace)
generic_env_hooks_by_filename[filename] = generic_env_hooks[-1]
elif specific_env_hook_ext is not None and filename.endswith('.%s' % specific_env_hook_ext):
# remove previous env hook with same name if present
if filename in specific_env_hooks_by_filename:
i = specific_env_hooks.index(specific_env_hooks_by_filename[filename])
specific_env_hooks.pop(i)
specific_env_hooks_workspace.pop(i)
# append env hook
specific_env_hooks.append(os.path.join(env_hook_dir, filename))
specific_env_hooks_workspace.append(workspace)
specific_env_hooks_by_filename[filename] = specific_env_hooks[-1]
env_hooks = generic_env_hooks + specific_env_hooks
env_hooks_workspace = generic_env_hooks_workspace + specific_env_hooks_workspace
count = len(env_hooks)
lines.append(assignment('_CATKIN_ENVIRONMENT_HOOKS_COUNT', count))
for i in range(count):
lines.append(assignment('_CATKIN_ENVIRONMENT_HOOKS_%d' % i, env_hooks[i]))
lines.append(assignment('_CATKIN_ENVIRONMENT_HOOKS_%d_WORKSPACE' % i, env_hooks_workspace[i]))
return lines
def _parse_arguments(args=None):
parser = argparse.ArgumentParser(description='Generates code blocks for the setup.SHELL script.')
parser.add_argument('--extend', action='store_true', help='Skip unsetting previous environment variables to extend context')
parser.add_argument('--local', action='store_true', help='Only consider this prefix path and ignore other prefix path in the environment')
return parser.parse_known_args(args=args)[0]
if __name__ == '__main__':
try:
try:
args = _parse_arguments()
except Exception as e:
print(e, file=sys.stderr)
sys.exit(1)
if not args.local:
# environment at generation time
CMAKE_PREFIX_PATH = r'/home/agent/ROS/DRL-robot-navigation/catkin_ws/devel_isolated/velodyne_description;/home/agent/ROS/DRL-robot-navigation/catkin_ws/devel_isolated/multi_robot_scenario;/opt/ros/noetic'.split(';')
else:
# don't consider any other prefix path than this one
CMAKE_PREFIX_PATH = []
# prepend current workspace if not already part of CPP
base_path = os.path.dirname(__file__)
# CMAKE_PREFIX_PATH uses forward slash on all platforms, but __file__ is platform dependent
# base_path on Windows contains backward slashes, need to be converted to forward slashes before comparison
if os.path.sep != '/':
base_path = base_path.replace(os.path.sep, '/')
if base_path not in CMAKE_PREFIX_PATH:
CMAKE_PREFIX_PATH.insert(0, base_path)
CMAKE_PREFIX_PATH = os.pathsep.join(CMAKE_PREFIX_PATH)
environ = dict(os.environ)
lines = []
if not args.extend:
lines += rollback_env_variables(environ, ENV_VAR_SUBFOLDERS)
lines += prepend_env_variables(environ, ENV_VAR_SUBFOLDERS, CMAKE_PREFIX_PATH)
lines += find_env_hooks(environ, CMAKE_PREFIX_PATH)
print('\n'.join(lines))
# need to explicitly flush the output
sys.stdout.flush()
except IOError as e:
# and catch potential "broken pipe" if stdout is not writable
# which can happen when piping the output to a file but the disk is full
if e.errno == errno.EPIPE:
print(e, file=sys.stderr)
sys.exit(2)
raise
sys.exit(0)
| 44.180328
| 227
| 0.684453
|
from __future__ import print_function
import argparse
import copy
import errno
import os
import platform
import sys
CATKIN_MARKER_FILE = '.catkin'
system = platform.system()
IS_DARWIN = (system == 'Darwin')
IS_WINDOWS = (system == 'Windows')
PATH_TO_ADD_SUFFIX = ['bin']
if IS_WINDOWS:
PATH_TO_ADD_SUFFIX.extend([['lib', os.path.join('lib', 'x86_64-linux-gnu')]])
# subfolder of workspace prepended to CMAKE_PREFIX_PATH
ENV_VAR_SUBFOLDERS = {
'CMAKE_PREFIX_PATH': '',
'LD_LIBRARY_PATH' if not IS_DARWIN else 'DYLD_LIBRARY_PATH': ['lib', os.path.join('lib', 'x86_64-linux-gnu')],
'PATH': PATH_TO_ADD_SUFFIX,
'PKG_CONFIG_PATH': [os.path.join('lib', 'pkgconfig'), os.path.join('lib', 'x86_64-linux-gnu', 'pkgconfig')],
'PYTHONPATH': 'lib/python3/dist-packages',
}
def rollback_env_variables(environ, env_var_subfolders):
lines = []
unmodified_environ = copy.copy(environ)
for key in sorted(env_var_subfolders.keys()):
subfolders = env_var_subfolders[key]
if not isinstance(subfolders, list):
subfolders = [subfolders]
value = _rollback_env_variable(unmodified_environ, key, subfolders)
if value is not None:
environ[key] = value
lines.append(assignment(key, value))
if lines:
lines.insert(0, comment('reset environment variables by unrolling modifications based on all workspaces in CMAKE_PREFIX_PATH'))
return lines
def _rollback_env_variable(environ, name, subfolders):
value = environ[name] if name in environ else ''
env_paths = [path for path in value.split(os.pathsep) if path]
value_modified = False
for subfolder in subfolders:
if subfolder:
if subfolder.startswith(os.path.sep) or (os.path.altsep and subfolder.startswith(os.path.altsep)):
subfolder = subfolder[1:]
if subfolder.endswith(os.path.sep) or (os.path.altsep and subfolder.endswith(os.path.altsep)):
subfolder = subfolder[:-1]
for ws_path in _get_workspaces(environ, include_fuerte=True, include_non_existing=True):
path_to_find = os.path.join(ws_path, subfolder) if subfolder else ws_path
path_to_remove = None
for env_path in env_paths:
env_path_clean = env_path[:-1] if env_path and env_path[-1] in [os.path.sep, os.path.altsep] else env_path
if env_path_clean == path_to_find:
path_to_remove = env_path
break
if path_to_remove:
env_paths.remove(path_to_remove)
value_modified = True
new_value = os.pathsep.join(env_paths)
return new_value if value_modified else None
def _get_workspaces(environ, include_fuerte=False, include_non_existing=False):
# get all cmake prefix paths
env_name = 'CMAKE_PREFIX_PATH'
value = environ[env_name] if env_name in environ else ''
paths = [path for path in value.split(os.pathsep) if path]
# remove non-workspace paths
workspaces = [path for path in paths if os.path.isfile(os.path.join(path, CATKIN_MARKER_FILE)) or (include_fuerte and path.startswith('/opt/ros/fuerte')) or (include_non_existing and not os.path.exists(path))]
return workspaces
def prepend_env_variables(environ, env_var_subfolders, workspaces):
lines = []
lines.append(comment('prepend folders of workspaces to environment variables'))
paths = [path for path in workspaces.split(os.pathsep) if path]
prefix = _prefix_env_variable(environ, 'CMAKE_PREFIX_PATH', paths, '')
lines.append(prepend(environ, 'CMAKE_PREFIX_PATH', prefix))
for key in sorted(key for key in env_var_subfolders.keys() if key != 'CMAKE_PREFIX_PATH'):
subfolder = env_var_subfolders[key]
prefix = _prefix_env_variable(environ, key, paths, subfolder)
lines.append(prepend(environ, key, prefix))
return lines
def _prefix_env_variable(environ, name, paths, subfolders):
value = environ[name] if name in environ else ''
environ_paths = [path for path in value.split(os.pathsep) if path]
checked_paths = []
for path in paths:
if not isinstance(subfolders, list):
subfolders = [subfolders]
for subfolder in subfolders:
path_tmp = path
if subfolder:
path_tmp = os.path.join(path_tmp, subfolder)
# skip nonexistent paths
if not os.path.exists(path_tmp):
continue
# exclude any path already in env and any path we already added
if path_tmp not in environ_paths and path_tmp not in checked_paths:
checked_paths.append(path_tmp)
prefix_str = os.pathsep.join(checked_paths)
if prefix_str != '' and environ_paths:
prefix_str += os.pathsep
return prefix_str
def assignment(key, value):
if not IS_WINDOWS:
return 'export %s="%s"' % (key, value)
else:
return 'set %s=%s' % (key, value)
def comment(msg):
if not IS_WINDOWS:
return '
else:
return 'REM %s' % msg
def prepend(environ, key, prefix):
if key not in environ or not environ[key]:
return assignment(key, prefix)
if not IS_WINDOWS:
return 'export %s="%s$%s"' % (key, prefix, key)
else:
return 'set %s=%s%%%s%%' % (key, prefix, key)
def find_env_hooks(environ, cmake_prefix_path):
lines = []
lines.append(comment('found environment hooks in workspaces'))
generic_env_hooks = []
generic_env_hooks_workspace = []
specific_env_hooks = []
specific_env_hooks_workspace = []
generic_env_hooks_by_filename = {}
specific_env_hooks_by_filename = {}
generic_env_hook_ext = 'bat' if IS_WINDOWS else 'sh'
specific_env_hook_ext = environ['CATKIN_SHELL'] if not IS_WINDOWS and 'CATKIN_SHELL' in environ and environ['CATKIN_SHELL'] else None
# remove non-workspace paths
workspaces = [path for path in cmake_prefix_path.split(os.pathsep) if path and os.path.isfile(os.path.join(path, CATKIN_MARKER_FILE))]
for workspace in reversed(workspaces):
env_hook_dir = os.path.join(workspace, 'etc', 'catkin', 'profile.d')
if os.path.isdir(env_hook_dir):
for filename in sorted(os.listdir(env_hook_dir)):
if filename.endswith('.%s' % generic_env_hook_ext):
# remove previous env hook with same name if present
if filename in generic_env_hooks_by_filename:
i = generic_env_hooks.index(generic_env_hooks_by_filename[filename])
generic_env_hooks.pop(i)
generic_env_hooks_workspace.pop(i)
# append env hook
generic_env_hooks.append(os.path.join(env_hook_dir, filename))
generic_env_hooks_workspace.append(workspace)
generic_env_hooks_by_filename[filename] = generic_env_hooks[-1]
elif specific_env_hook_ext is not None and filename.endswith('.%s' % specific_env_hook_ext):
# remove previous env hook with same name if present
if filename in specific_env_hooks_by_filename:
i = specific_env_hooks.index(specific_env_hooks_by_filename[filename])
specific_env_hooks.pop(i)
specific_env_hooks_workspace.pop(i)
# append env hook
specific_env_hooks.append(os.path.join(env_hook_dir, filename))
specific_env_hooks_workspace.append(workspace)
specific_env_hooks_by_filename[filename] = specific_env_hooks[-1]
env_hooks = generic_env_hooks + specific_env_hooks
env_hooks_workspace = generic_env_hooks_workspace + specific_env_hooks_workspace
count = len(env_hooks)
lines.append(assignment('_CATKIN_ENVIRONMENT_HOOKS_COUNT', count))
for i in range(count):
lines.append(assignment('_CATKIN_ENVIRONMENT_HOOKS_%d' % i, env_hooks[i]))
lines.append(assignment('_CATKIN_ENVIRONMENT_HOOKS_%d_WORKSPACE' % i, env_hooks_workspace[i]))
return lines
def _parse_arguments(args=None):
parser = argparse.ArgumentParser(description='Generates code blocks for the setup.SHELL script.')
parser.add_argument('--extend', action='store_true', help='Skip unsetting previous environment variables to extend context')
parser.add_argument('--local', action='store_true', help='Only consider this prefix path and ignore other prefix path in the environment')
return parser.parse_known_args(args=args)[0]
if __name__ == '__main__':
try:
try:
args = _parse_arguments()
except Exception as e:
print(e, file=sys.stderr)
sys.exit(1)
if not args.local:
# environment at generation time
CMAKE_PREFIX_PATH = r'/home/agent/ROS/DRL-robot-navigation/catkin_ws/devel_isolated/velodyne_description;/home/agent/ROS/DRL-robot-navigation/catkin_ws/devel_isolated/multi_robot_scenario;/opt/ros/noetic'.split(';')
else:
# don't consider any other prefix path than this one
CMAKE_PREFIX_PATH = []
base_path = os.path.dirname(__file__)
if os.path.sep != '/':
base_path = base_path.replace(os.path.sep, '/')
if base_path not in CMAKE_PREFIX_PATH:
CMAKE_PREFIX_PATH.insert(0, base_path)
CMAKE_PREFIX_PATH = os.pathsep.join(CMAKE_PREFIX_PATH)
environ = dict(os.environ)
lines = []
if not args.extend:
lines += rollback_env_variables(environ, ENV_VAR_SUBFOLDERS)
lines += prepend_env_variables(environ, ENV_VAR_SUBFOLDERS, CMAKE_PREFIX_PATH)
lines += find_env_hooks(environ, CMAKE_PREFIX_PATH)
print('\n'.join(lines))
sys.stdout.flush()
except IOError as e:
if e.errno == errno.EPIPE:
print(e, file=sys.stderr)
sys.exit(2)
raise
sys.exit(0)
| true
| true
|
f7184b0f00971a6741fe7e16776ceadd5f5405f9
| 6,542
|
py
|
Python
|
test_tables.py
|
FredHappyface/MiniEncoding
|
e53341fd493c072267fc86d96f4e4a5970fa5116
|
[
"MIT"
] | null | null | null |
test_tables.py
|
FredHappyface/MiniEncoding
|
e53341fd493c072267fc86d96f4e4a5970fa5116
|
[
"MIT"
] | 1
|
2020-08-23T20:00:02.000Z
|
2020-08-23T20:00:02.000Z
|
test_tables.py
|
FredHappyface/MiniEncoding
|
e53341fd493c072267fc86d96f4e4a5970fa5116
|
[
"MIT"
] | null | null | null |
"""Test the miniencoding lib
A decent testing approach is to test the round trip with a random, valid
string of bytes. by taking this approach, the same error/ bug would have to be
present in both the 'from' and 'to' functions which whilst possible is unlikely
"""
# pylint: disable=invalid-name
import random
import string
from miniencoding.tables import *
def test_CDC1604_MAGTAPE_len():
""" Test CDC1604_MAGTAPE length """
assert len(CDC1604_MAGTAPE) == 64
def test_CDC1604_MAGTAPE():
""" Test CDC1604_MAGTAPE round trip """
testString = "?1234567890#@??? /STUVWXYZ?,%???-JKLMNOPQR0$*???&ABCDEFGHI0.¤???"
assert toUnicode(CDC1604_MAGTAPE, toCharset(CDC1604_MAGTAPE,
testString)) == testString
def test_CDC1604_PUNCHCARD_len():
""" Test CDC1604_PUNCHCARD length """
assert len(CDC1604_PUNCHCARD) == 64
def test_CDC1604_PUNCHCARD():
""" Test CDC1604_PUNCHCARD round trip """
testString = "?1234567890=-??? /STUVWXYZ?,(???—JKLMNOPQR0$*???+ABCDEFGHI0.)???"
assert toUnicode(CDC1604_PUNCHCARD, toCharset(CDC1604_PUNCHCARD,
testString)) == testString
def test_CDC1612_len():
""" Test CDC1612 length """
assert len(CDC1612) == 64
def test_CDC1612():
""" Test CDC1612 round trip """
testString = ":1234567890=≠≤![ /STUVWXYZ],(→≡~—JKLMNOPQR%$*↑↓>+ABCDEFGHI<.)≥?;"
assert toUnicode(CDC1612, toCharset(CDC1612, testString)) == testString
def test_DEC_SIXBIT_len():
""" Test DEC_SIXBIT length """
assert len(DEC_SIXBIT) == 64
def test_DEC_SIXBIT():
""" Test DEC_SIXBIT round trip """
testString = " !\"#$%&'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\\]^_"
assert toUnicode(DEC_SIXBIT, toCharset(DEC_SIXBIT, testString)) == testString
def test_EMCA1_len():
""" Test EMCA1 length """
assert len(EMCA1) == 64
def test_EMCA1():
""" Test EMCA1 round trip """
testString = " \t\n\v\f\r\x0e\x0f()*+,-./0123456789:;<=>?\x00ABCDEFGHIJKLMNOPQRSTUVWXYZ[\\]\x1b\x7f"
assert toUnicode(EMCA1, toCharset(EMCA1, testString)) == testString
def test_ICL_len():
""" Test ICL length """
assert len(ICL) == 64
def test_ICL():
""" Test ICL round trip """
testString = "0123456789:;<=>? !\"#£%&'()*+,-./@ABCDEFGHIJKLMNOPQRSTUVWXYZ[$]↑←"
assert toUnicode(ICL, toCharset(ICL, testString)) == testString
def test_SIXBIT_len():
""" Test SIXBIT length """
assert len(SIXBIT) == 64
def test_SIXBIT():
""" Test SIXBIT round trip """
testString = "@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\\]^_ !\"#$%&'()*+,-./0123456789:;<=>?"
assert toUnicode(SIXBIT, toCharset(SIXBIT, testString)) == testString
def test_GOST_len():
""" Test GOST length """
assert len(GOST) == 64
def test_GOST():
""" Test GOST round trip """
testString = "0123456789+-/,. ⏨↑()×=;[]*‘’≠<>:АБВГДЕЖЗИЙКЛМНОПРСТУФХЦЧШЩЫЬЭЮЯ\x7f"
assert toUnicode(GOST, toCharset(GOST, testString)) == testString
def test_GSM7_len():
""" Test GSM7 length """
assert len(GSM7) == 128
def test_GSM7():
""" Test GSM7 round trip """
testString = "@£$¥èéùìòÇ\nØø\rÅåΔ_ΦΓΛΩΠΨΣΘΞ\x07ÆæßÉ !\"#¤%&'()*+,-./0123456789:;<=>?¡ABCDEFGHIJKLMNOPQRSTUVWXYZÄÖÑܧ¿abcdefghijklmnopqrstuvwxyzäöñüà"
assert toUnicode(GSM7, toCharset(GSM7, testString)) == testString
def test_ASCII7_len():
""" Test ASCII7 length """
assert len(ASCII7) == 128
def test_ASCII7():
""" Test ASCII7 round trip """
testString = bytes(range(0, 128)).decode("utf-8")
assert toUnicode(ASCII7, toCharset(ASCII7, testString)) == testString
def test_IBM48_len():
""" Test IBM48 length """
assert len(IBM48) == 64
def test_IBM48():
""" Test IBM48 round trip """
testString = " 1234567890#@????/STUVWXYZ?,%???-JKLMNOPQR?$*???&ABCDEFGHI?.⌑???"
assert toUnicode(IBM48, toCharset(IBM48, testString)) == testString
def test_IBM704_len():
""" Test IBM704 length """
assert len(IBM704) == 64
def test_IBM704():
""" Test IBM704 round trip """
testString = "0123456789?#@???&ABCDEFGHI?.⌑???-JKLMNOPQR?$*??? /STUVWXYZ?,%???"
assert toUnicode(IBM704, toCharset(IBM704, testString)) == testString
def test_IBM7090_len():
""" Test IBM7090 length """
assert len(IBM7090) == 64
def test_IBM7090():
""" Test IBM7090 round trip """
testString = "0123456789?=\"???&ABCDEFGHI0.)???-JKLMNOPQR0$*??? /STUVWXYZ±,(???"
assert toUnicode(IBM7090, toCharset(IBM7090, testString)) == testString
def test_IBM1401_len():
""" Test IBM1401 length """
assert len(IBM1401) == 64
def test_IBM1401():
""" Test IBM1401 round trip """
testString = " 1234567890#@:>√¢/STUVWXYZ‡,%='\"-JKLMNOPQR!$*);Δ&ABCDEFGHI?.⌑(<⯒"
assert toUnicode(IBM1401, toCharset(IBM1401, testString)) == testString
def test_GBCD_len():
""" Test GBCD length """
assert len(GBCD) == 64
def test_GBCD():
""" Test GBCD round trip """
testString = "0123456789[#@:>? ABCDEFGHI&.](<\\^JKLMNOPQR-$*);'+/STUVWXYZ_,%=\"!"
assert toUnicode(GBCD, toCharset(GBCD, testString)) == testString
def test_BURROUGHS_B5500_len():
""" Test BURROUGHS_B5500 length """
assert len(BURROUGHS_B5500) == 64
def test_BURROUGHS_B5500():
""" Test BURROUGHS_B5500 round trip """
testString = "0123456789#@?:>≥+ABCDEFGHI.[&(<←×JKLMNOPQR$*-);≤ /STUVWXYZ,%≠=]\""
assert toUnicode(BURROUGHS_B5500, toCharset(BURROUGHS_B5500, testString)) == testString
def test_CP353_len():
""" Test CP353 length """
assert len(CP353) == 64
def test_CP353():
""" Test CP353 round trip """
testString = " 1234567890#@:>√␢/STUVWXYZ‡,%γ\\⧻-JKLMNOPQR!#*];Δ&ABCDEFGHI?.⌑[<⯒"
assert toUnicode(CP353, toCharset(CP353, testString)) == testString
def test_CP355_len():
""" Test CP355 length """
assert len(CP355) == 64
def test_CP355():
""" Test CP355 round trip """
testString = " 1234567890#????@/STUVWXYZ‡,?γ??-JKLMNOPQR<$????&ABCDEFGHI).????"
assert toUnicode(CP355, toCharset(CP355, testString)) == testString
def test_CP357_len():
""" Test CP357 length """
assert len(CP357) == 64
def test_CP357():
""" Test CP357 round trip """
testString = " 1234567890=????'/STUVWXYZ‡,????-JKLMNOPQR!$????+ABCDEFGHI?.????"
assert toUnicode(CP357, toCharset(CP357, testString)) == testString
def test_CP358_len():
""" Test CP358 length """
assert len(CP358) == 64
def test_CP358():
""" Test CP358 round trip """
testString = " 1234567890'????!/STUVWXYZ‡,????-JKLMNOPQR<;????=ABCDEFGHI>.????"
assert toUnicode(CP358, toCharset(CP358, testString)) == testString
def test_CP359_len():
""" Test CP359 length """
assert len(CP359) == 64
def test_CP359():
""" Test CP359 round trip """
testString = " 1234567890#????@/STUVWXYZ?,????-JKLMNOPQR?$????&ABCDEFGHI?.????"
assert toUnicode(CP359, toCharset(CP359, testString)) == testString
| 26.811475
| 150
| 0.67594
|
import random
import string
from miniencoding.tables import *
def test_CDC1604_MAGTAPE_len():
assert len(CDC1604_MAGTAPE) == 64
def test_CDC1604_MAGTAPE():
testString = "?1234567890#@??? /STUVWXYZ?,%???-JKLMNOPQR0$*???&ABCDEFGHI0.¤???"
assert toUnicode(CDC1604_MAGTAPE, toCharset(CDC1604_MAGTAPE,
testString)) == testString
def test_CDC1604_PUNCHCARD_len():
assert len(CDC1604_PUNCHCARD) == 64
def test_CDC1604_PUNCHCARD():
testString = "?1234567890=-??? /STUVWXYZ?,(???—JKLMNOPQR0$*???+ABCDEFGHI0.)???"
assert toUnicode(CDC1604_PUNCHCARD, toCharset(CDC1604_PUNCHCARD,
testString)) == testString
def test_CDC1612_len():
assert len(CDC1612) == 64
def test_CDC1612():
testString = ":1234567890=≠≤![ /STUVWXYZ],(→≡~—JKLMNOPQR%$*↑↓>+ABCDEFGHI<.)≥?;"
assert toUnicode(CDC1612, toCharset(CDC1612, testString)) == testString
def test_DEC_SIXBIT_len():
assert len(DEC_SIXBIT) == 64
def test_DEC_SIXBIT():
testString = " !\"
assert toUnicode(DEC_SIXBIT, toCharset(DEC_SIXBIT, testString)) == testString
def test_EMCA1_len():
assert len(EMCA1) == 64
def test_EMCA1():
testString = " \t\n\v\f\r\x0e\x0f()*+,-./0123456789:;<=>?\x00ABCDEFGHIJKLMNOPQRSTUVWXYZ[\\]\x1b\x7f"
assert toUnicode(EMCA1, toCharset(EMCA1, testString)) == testString
def test_ICL_len():
assert len(ICL) == 64
def test_ICL():
testString = "0123456789:;<=>? !\"#£%&'()*+,-./@ABCDEFGHIJKLMNOPQRSTUVWXYZ[$]↑←"
assert toUnicode(ICL, toCharset(ICL, testString)) == testString
def test_SIXBIT_len():
assert len(SIXBIT) == 64
def test_SIXBIT():
testString = "@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\\]^_ !\"
assert toUnicode(SIXBIT, toCharset(SIXBIT, testString)) == testString
def test_GOST_len():
assert len(GOST) == 64
def test_GOST():
testString = "0123456789+-/,. ⏨↑()×=;[]*‘’≠<>:АБВГДЕЖЗИЙКЛМНОПРСТУФХЦЧШЩЫЬЭЮЯ\x7f"
assert toUnicode(GOST, toCharset(GOST, testString)) == testString
def test_GSM7_len():
assert len(GSM7) == 128
def test_GSM7():
testString = "@£$¥èéùìòÇ\nØø\rÅåΔ_ΦΓΛΩΠΨΣΘΞ\x07ÆæßÉ !\"#¤%&'()*+,-./0123456789:;<=>?¡ABCDEFGHIJKLMNOPQRSTUVWXYZÄÖÑܧ¿abcdefghijklmnopqrstuvwxyzäöñüà"
assert toUnicode(GSM7, toCharset(GSM7, testString)) == testString
def test_ASCII7_len():
assert len(ASCII7) == 128
def test_ASCII7():
testString = bytes(range(0, 128)).decode("utf-8")
assert toUnicode(ASCII7, toCharset(ASCII7, testString)) == testString
def test_IBM48_len():
assert len(IBM48) == 64
def test_IBM48():
testString = " 1234567890#@????/STUVWXYZ?,%???-JKLMNOPQR?$*???&ABCDEFGHI?.⌑???"
assert toUnicode(IBM48, toCharset(IBM48, testString)) == testString
def test_IBM704_len():
assert len(IBM704) == 64
def test_IBM704():
testString = "0123456789?#@???&ABCDEFGHI?.⌑???-JKLMNOPQR?$*??? /STUVWXYZ?,%???"
assert toUnicode(IBM704, toCharset(IBM704, testString)) == testString
def test_IBM7090_len():
assert len(IBM7090) == 64
def test_IBM7090():
testString = "0123456789?=\"???&ABCDEFGHI0.)???-JKLMNOPQR0$*??? /STUVWXYZ±,(???"
assert toUnicode(IBM7090, toCharset(IBM7090, testString)) == testString
def test_IBM1401_len():
assert len(IBM1401) == 64
def test_IBM1401():
testString = " 1234567890
assert toUnicode(IBM1401, toCharset(IBM1401, testString)) == testString
def test_GBCD_len():
assert len(GBCD) == 64
def test_GBCD():
testString = "0123456789[#@:>? ABCDEFGHI&.](<\\^JKLMNOPQR-$*);'+/STUVWXYZ_,%=\"!"
assert toUnicode(GBCD, toCharset(GBCD, testString)) == testString
def test_BURROUGHS_B5500_len():
assert len(BURROUGHS_B5500) == 64
def test_BURROUGHS_B5500():
testString = "0123456789
assert toUnicode(BURROUGHS_B5500, toCharset(BURROUGHS_B5500, testString)) == testString
def test_CP353_len():
assert len(CP353) == 64
def test_CP353():
testString = " 1234567890#@:>√␢/STUVWXYZ‡,%γ\\⧻-JKLMNOPQR!#*];Δ&ABCDEFGHI?.⌑[<⯒"
assert toUnicode(CP353, toCharset(CP353, testString)) == testString
def test_CP355_len():
assert len(CP355) == 64
def test_CP355():
testString = " 1234567890#????@/STUVWXYZ‡,?γ??-JKLMNOPQR<$????&ABCDEFGHI).????"
assert toUnicode(CP355, toCharset(CP355, testString)) == testString
def test_CP357_len():
assert len(CP357) == 64
def test_CP357():
testString = " 1234567890=????'/STUVWXYZ‡,????-JKLMNOPQR!$????+ABCDEFGHI?.????"
assert toUnicode(CP357, toCharset(CP357, testString)) == testString
def test_CP358_len():
assert len(CP358) == 64
def test_CP358():
testString = " 1234567890'????!/STUVWXYZ‡,????-JKLMNOPQR<;????=ABCDEFGHI>.????"
assert toUnicode(CP358, toCharset(CP358, testString)) == testString
def test_CP359_len():
assert len(CP359) == 64
def test_CP359():
testString = " 1234567890#????@/STUVWXYZ?,????-JKLMNOPQR?$????&ABCDEFGHI?.????"
assert toUnicode(CP359, toCharset(CP359, testString)) == testString
| true
| true
|
f7184b32e7e21c95e651877c5d44be93c8cd7ddd
| 4,487
|
py
|
Python
|
GhClimHub/app/views.py
|
Techyiad/Climate-Mitigant
|
3fdbd01d4e2230fa95fc184682351cce389ec87a
|
[
"MIT"
] | null | null | null |
GhClimHub/app/views.py
|
Techyiad/Climate-Mitigant
|
3fdbd01d4e2230fa95fc184682351cce389ec87a
|
[
"MIT"
] | null | null | null |
GhClimHub/app/views.py
|
Techyiad/Climate-Mitigant
|
3fdbd01d4e2230fa95fc184682351cce389ec87a
|
[
"MIT"
] | null | null | null |
"""
Definition of views.
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import config
from django.shortcuts import render, HttpResponse
from django.http import HttpRequest, JsonResponse
from django.template import RequestContext
import ee
import datetime as dt
import types
import os
import json
##############################################################################
# Initialization. #
###############################################################################
#############################
ee.Initialize(config.credentials)
from app.dataset_processor import _Getcollection, chart_it, _ReadOptions, palletedata
from app.drought import indices
from app.series import Options, timelapse_data
def home(request):
"""Renders the home page."""
assert isinstance(request, HttpRequest)
return render(
request,
'app/index.html'
)
def mail(request):
"""Renders the contact page."""
assert isinstance(request, HttpRequest)
return render(
request,
'app/mail.html'
)
global palettechoice
def about(request):
"""Renders the about page."""
assert isinstance(request, HttpRequest)
return render( request, 'app/about.html' )
def calcdata(request):
global data,palettedecide
if(request.method=="POST"):
options= _ReadOptions(request)
print(options)
if options["dataset"]=='NDVI':
palettedecide='NDVI'
elif options["dataset"]=='EVI':
palettedecide='EVI'
elif options["dataset"]=='NDWI':
palettedecide='NDWI'
palete=palletedata(palettedecide,None)
data=_Getcollection(options,palete)
return JsonResponse(data)
def indices_compute(request):
global data
if(request.method=="POST"):
options= _ReadOptions(request)
data=indices(options)
return JsonResponse(data)
def indices_download(request):
global data
if(request.method=="POST"):
options= _ReadOptions(request)
data=indices(options)
return JsonResponse(data)
def timeseries(request):
global data
if(request.method=="POST"):
try:
options= Options(request)
print(options)
data=timelapse_data(options)
except ee.EEException as ex:
data={'error':'Failed to Compute Time Series . Error Stated::, '+str(ex)}
pass
return JsonResponse(data)
def map1(request):
global data
if(request.method=="POST"):
options= _ReadOptions(request)
data=indices(options)
return JsonResponse(data)
def map2(request):
global data
if(request.method=="POST"):
options= _ReadOptions(request)
data=indices(options)
return JsonResponse(data)
def map3(request):
global data
if(request.method=="POST"):
options= _ReadOptions(request)
data=indices(options)
return JsonResponse(data)
def map4(request):
global data
if(request.method=="POST"):
options= _ReadOptions(request)
data=indices(options)
return JsonResponse(data)
def download_data(request):
try:
global data,palettedecide
if(request.method=="POST"):
options= _ReadOptions(request)
if options["dataset"]=='NDVI':
palettedecide='NDVI'
elif options["dataset"]=='EVI':
palettedecide='EVI'
elif options["dataset"]=='NDWI':
palettedecide='NDWI'
palete=palletedata(palettedecide,None)
data= _Getcollection(options,palete)
except ee.EEException as e:
data={'error':'Failed to Download Data . Error Stated::, '+str(e)}
return JsonResponse(data)
def chart_data(request):
try:
global data,palettedecide
if(request.method=="POST"):
options= _ReadOptions(request)
if options["dataset"]=='NDVI':
palettedecide='NDVI'
elif options["dataset"]=='EVI':
palettedecide='EVI'
elif options["dataset"]=='NDWI':
palettedecide='NDWI'
palete=palletedata(palettedecide,None)
data= chart_it(options,palete)
except ee.EEException as e:
data={'error':'Failed to Compute Data . Error Stated::, '+str(e)}
return JsonResponse(data)
def cal_drought(request):
global data , options , year , month
if(request.method=="POST"):
year=request.POST.get('useryear')
month=request.POST.get('usermonth')
data = dodrought(request,year,month)
return JsonResponse(data)
def dataset(request):
"""Renders the about page."""
assert isinstance(request, HttpRequest)
return render(
request,
'app/dataset.html')
def compare(request):
"""Renders the about page."""
assert isinstance(request, HttpRequest)
return render( request, 'app/compare.html')
| 20.869767
| 85
| 0.679964
|
from __future__ import absolute_import, division, print_function, unicode_literals
import config
from django.shortcuts import render, HttpResponse
from django.http import HttpRequest, JsonResponse
from django.template import RequestContext
import ee
import datetime as dt
import types
import os
import json
| true
| true
|
f7184bd44950c87f615ff9713fe4e93a8fe0689c
| 1,467
|
py
|
Python
|
prjxray/bitfilter.py
|
marzoul/prjxray
|
7d22a986a22ce21bff8a2a265805d998be9984ed
|
[
"0BSD"
] | 11
|
2022-02-24T10:36:35.000Z
|
2022-03-23T17:44:21.000Z
|
prjxray/bitfilter.py
|
marzoul/prjxray
|
7d22a986a22ce21bff8a2a265805d998be9984ed
|
[
"0BSD"
] | 24
|
2022-02-21T14:39:14.000Z
|
2022-03-26T13:12:27.000Z
|
prjxray/bitfilter.py
|
marzoul/prjxray
|
7d22a986a22ce21bff8a2a265805d998be9984ed
|
[
"0BSD"
] | 4
|
2022-02-24T04:09:49.000Z
|
2022-03-28T14:09:34.000Z
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright (C) 2017-2020 The Project X-Ray Authors.
#
# Use of this source code is governed by a ISC-style
# license that can be found in the LICENSE file or at
# https://opensource.org/licenses/ISC
#
# SPDX-License-Identifier: ISC
from prjxray.util import OpenSafeFile
class Bitfilter(object):
def __init__(
self, frames_to_include=None, frames_to_exclude=[],
bits_to_exclude=[]):
self.frames_to_include = frames_to_include
self.frames_to_exclude = frames_to_exclude
self.bits_to_exclude = bits_to_exclude
def filter(self, frame, bit):
if self.frames_to_include is not None:
if frame in self.frames_to_include:
return True
if frame in self.frames_to_exclude:
return False
if (frame, bit) in self.bits_to_exclude:
return False
return True
BITFILTERS = {
('artix7', 'INT'):
Bitfilter(
frames_to_exclude=[
30,
31,
],
bits_to_exclude=[
#
(0, 36)
]),
}
def get_bitfilter(part, tile):
""" Returns bitfilter for specified part and tile.
Either returns bitfilter to specified part and tile type, or the default
bitfilter, which includes all bits.
"""
key = (part, tile)
if key in BITFILTERS:
return BITFILTERS[key].filter
else:
return None
| 24.04918
| 76
| 0.61486
|
from prjxray.util import OpenSafeFile
class Bitfilter(object):
def __init__(
self, frames_to_include=None, frames_to_exclude=[],
bits_to_exclude=[]):
self.frames_to_include = frames_to_include
self.frames_to_exclude = frames_to_exclude
self.bits_to_exclude = bits_to_exclude
def filter(self, frame, bit):
if self.frames_to_include is not None:
if frame in self.frames_to_include:
return True
if frame in self.frames_to_exclude:
return False
if (frame, bit) in self.bits_to_exclude:
return False
return True
BITFILTERS = {
('artix7', 'INT'):
Bitfilter(
frames_to_exclude=[
30,
31,
],
bits_to_exclude=[
(0, 36)
]),
}
def get_bitfilter(part, tile):
key = (part, tile)
if key in BITFILTERS:
return BITFILTERS[key].filter
else:
return None
| true
| true
|
f7184c3aef6158e81045f4622232b9d88401dd8f
| 740
|
py
|
Python
|
Trabalho 03 - Tutorial Flask/r. Flask Sijax (with Examples)/server.py
|
andressagomes26/tecWeb_UFC
|
5796a73295e799ef1dd33037edc041d4c08ede31
|
[
"MIT"
] | null | null | null |
Trabalho 03 - Tutorial Flask/r. Flask Sijax (with Examples)/server.py
|
andressagomes26/tecWeb_UFC
|
5796a73295e799ef1dd33037edc041d4c08ede31
|
[
"MIT"
] | null | null | null |
Trabalho 03 - Tutorial Flask/r. Flask Sijax (with Examples)/server.py
|
andressagomes26/tecWeb_UFC
|
5796a73295e799ef1dd33037edc041d4c08ede31
|
[
"MIT"
] | null | null | null |
import os
from flask import Flask, g
from flask_sijax import sijax
path = os.path.join('.', os.path.dirname(__file__), 'static/js/sijax/')
app = Flask(__name__)
app.config['SIJAX_STATIC_PATH'] = path
app.config['SIJAX_JSON_URI'] = '/static/js/sijax/json2.js'
flask_sijax.Sijax(app)
@app.route('/')
def index():
return 'Index'
@flask_sijax.route(app, '/hello')
def hello():
def say_hi(obj_response):
obj_response.alert('Hi there!')
if g.sijax.is_sijax_request:
# Sijax request detected - let Sijax handle it
g.sijax.register_callback('say_hi', say_hi)
return g.sijax.process_request()
return _render_template('sijaxexample.html')
if __name__ == '__main__':
app.run(debug=True)
| 23.870968
| 71
| 0.686486
|
import os
from flask import Flask, g
from flask_sijax import sijax
path = os.path.join('.', os.path.dirname(__file__), 'static/js/sijax/')
app = Flask(__name__)
app.config['SIJAX_STATIC_PATH'] = path
app.config['SIJAX_JSON_URI'] = '/static/js/sijax/json2.js'
flask_sijax.Sijax(app)
@app.route('/')
def index():
return 'Index'
@flask_sijax.route(app, '/hello')
def hello():
def say_hi(obj_response):
obj_response.alert('Hi there!')
if g.sijax.is_sijax_request:
g.sijax.register_callback('say_hi', say_hi)
return g.sijax.process_request()
return _render_template('sijaxexample.html')
if __name__ == '__main__':
app.run(debug=True)
| true
| true
|
f7184ccd6b6803c7798939baac5536c41c8729ec
| 351
|
py
|
Python
|
docs/conf.py
|
nestauk/AFS_analysis_childcare_providers
|
be2def68aca3c334a0c42c2bb1390e0dcbf2324e
|
[
"MIT"
] | null | null | null |
docs/conf.py
|
nestauk/AFS_analysis_childcare_providers
|
be2def68aca3c334a0c42c2bb1390e0dcbf2324e
|
[
"MIT"
] | 3
|
2021-07-01T14:47:33.000Z
|
2021-07-12T09:15:11.000Z
|
docs/conf.py
|
nestauk/AFS_analysis_childcare_providers
|
be2def68aca3c334a0c42c2bb1390e0dcbf2324e
|
[
"MIT"
] | null | null | null |
"""Sphinx configuration."""
from datetime import datetime
project = "AFS_analysis_childcare_providers"
author = "Nesta"
copyright = f"{datetime.now().year}, {author}"
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.napoleon",
# "sphinx_click",
"sphinx_rtd_theme",
]
autodoc_typehints = "description"
html_theme = "sphinx_rtd_theme"
| 21.9375
| 46
| 0.7151
|
from datetime import datetime
project = "AFS_analysis_childcare_providers"
author = "Nesta"
copyright = f"{datetime.now().year}, {author}"
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.napoleon",
"sphinx_rtd_theme",
]
autodoc_typehints = "description"
html_theme = "sphinx_rtd_theme"
| true
| true
|
f7184d25a5cbe14001067f661eafef38d03c4323
| 3,457
|
py
|
Python
|
patient/views.py
|
evantoh/patient-management-system
|
6637eb1344775633759165260ed99843581c0e72
|
[
"Unlicense"
] | 1
|
2018-03-22T17:50:24.000Z
|
2018-03-22T17:50:24.000Z
|
patient/views.py
|
evantoh/patient-management-system
|
6637eb1344775633759165260ed99843581c0e72
|
[
"Unlicense"
] | null | null | null |
patient/views.py
|
evantoh/patient-management-system
|
6637eb1344775633759165260ed99843581c0e72
|
[
"Unlicense"
] | null | null | null |
from django.shortcuts import render,redirect
from .forms import UpdateDocForm,addPatientForm,TreatmentForm,NewNextOfKinForm,NewMedicineForm
from django.contrib.auth.decorators import login_required
from django.contrib.auth.models import User
from .models import Doctor,Medicine,NextOfKin,Patient
# Create your views here.
@login_required(login_url='/accounts/login')
def profile(request):
current_user = request.user
doctor = Doctor.objects.get(id='1')
return render(request, 'profile.html', {'doctor':doctor,'current_user':current_user})
@login_required(login_url='/accounts/login')
def welcome(request):
return render(request,'welcome.html')
@login_required(login_url='/accounts/login')
def update_profile(request, username):
current_user = request.user
username = current_user.username
doctor = Doctor.objects.get(id='1')
if request.method == 'POST':
form = UpdateDocForm(request.POST, request.FILES)
if form.is_valid():
doctor = form.save()
return redirect('updatedoc')
else:
form = UpdateDocForm()
return render(request, 'doctor/profile.html', {'form':form, 'doctor':doctor})
@login_required(login_url='/accounts/login')
def addpatient(request):
current_user = request.user
# doctor = Patient.objects.get(id='1')
if request.method == 'POST':
nextkinform = NewNextOfKinForm(request.POST, request.FILES)
addpatform = addPatientForm(request.POST, request.FILES)
newmedform = NewMedicineForm(request.POST, request.FILES)
if nextkinform.is_valid() and addpatform.is_valid() and newmedform.is_valid():
next_of_kin = nextkinform.save()
medicine = newmedform.save()
patient = addpatform.save()
next_of_kin.save()
medicine.save()
patient.save()
return redirect('/')
else:
addpatform = addPatientForm()
nextkinform = NewNextOfKinForm()
newmedform = NewMedicineForm()
return render(request, 'patient/profile.html', { 'addpatform':addpatform,'nextkinform':nextkinform,'newmedform':newmedform})
@login_required(login_url='/accounts/login')
def treatment(request):
# current_user =request.user
if request.method == 'POST':
form = TreatmentForm(request.POST, request.FILES)
if form.is_valid():
treatment = form.save(commit=False)
treatment.save()
return redirect('/')
else:
form = TreatmentForm()
return render(request, 'treatment/treatment.html', { 'form':form})
@login_required(login_url='/accounts/login')
def allpatient(request):
patients=Patient.objects.all()
return render(request,'patient/all-patients.html',{'patients':patients})
@login_required(login_url='/accounts/login')
def search_results(request):
if 'patient' in request.GET and request.GET["patient"]:
search_term = request.GET.get("patient")
searched_patients = Patient.search_by_first_name(search_term)
message = f"{search_term}"
return render(request, 'search.html',{"message":message,"patients": searched_patients})
else:
message = "You haven't searched for any term"
return render(request, 'search.html',{"message":message})
# def single_patient(request,profile_photo_id):
# patient=Patient.objects.get(id=profile_photo_id)
# return render(request,"single_patient.html",{"patient":patient})
| 40.197674
| 128
| 0.692219
|
from django.shortcuts import render,redirect
from .forms import UpdateDocForm,addPatientForm,TreatmentForm,NewNextOfKinForm,NewMedicineForm
from django.contrib.auth.decorators import login_required
from django.contrib.auth.models import User
from .models import Doctor,Medicine,NextOfKin,Patient
@login_required(login_url='/accounts/login')
def profile(request):
current_user = request.user
doctor = Doctor.objects.get(id='1')
return render(request, 'profile.html', {'doctor':doctor,'current_user':current_user})
@login_required(login_url='/accounts/login')
def welcome(request):
return render(request,'welcome.html')
@login_required(login_url='/accounts/login')
def update_profile(request, username):
current_user = request.user
username = current_user.username
doctor = Doctor.objects.get(id='1')
if request.method == 'POST':
form = UpdateDocForm(request.POST, request.FILES)
if form.is_valid():
doctor = form.save()
return redirect('updatedoc')
else:
form = UpdateDocForm()
return render(request, 'doctor/profile.html', {'form':form, 'doctor':doctor})
@login_required(login_url='/accounts/login')
def addpatient(request):
current_user = request.user
if request.method == 'POST':
nextkinform = NewNextOfKinForm(request.POST, request.FILES)
addpatform = addPatientForm(request.POST, request.FILES)
newmedform = NewMedicineForm(request.POST, request.FILES)
if nextkinform.is_valid() and addpatform.is_valid() and newmedform.is_valid():
next_of_kin = nextkinform.save()
medicine = newmedform.save()
patient = addpatform.save()
next_of_kin.save()
medicine.save()
patient.save()
return redirect('/')
else:
addpatform = addPatientForm()
nextkinform = NewNextOfKinForm()
newmedform = NewMedicineForm()
return render(request, 'patient/profile.html', { 'addpatform':addpatform,'nextkinform':nextkinform,'newmedform':newmedform})
@login_required(login_url='/accounts/login')
def treatment(request):
if request.method == 'POST':
form = TreatmentForm(request.POST, request.FILES)
if form.is_valid():
treatment = form.save(commit=False)
treatment.save()
return redirect('/')
else:
form = TreatmentForm()
return render(request, 'treatment/treatment.html', { 'form':form})
@login_required(login_url='/accounts/login')
def allpatient(request):
patients=Patient.objects.all()
return render(request,'patient/all-patients.html',{'patients':patients})
@login_required(login_url='/accounts/login')
def search_results(request):
if 'patient' in request.GET and request.GET["patient"]:
search_term = request.GET.get("patient")
searched_patients = Patient.search_by_first_name(search_term)
message = f"{search_term}"
return render(request, 'search.html',{"message":message,"patients": searched_patients})
else:
message = "You haven't searched for any term"
return render(request, 'search.html',{"message":message})
# def single_patient(request,profile_photo_id):
# patient=Patient.objects.get(id=profile_photo_id)
# return render(request,"single_patient.html",{"patient":patient})
| true
| true
|
f7184db8be5aae4710f320641fc22c1c336bb606
| 2,049
|
py
|
Python
|
lib/ansiblelint/rules/UseHandlerRatherThanWhenChangedRule.py
|
senyoltw/ansible-lint
|
0e53d73c97601351bbac8a6d2eb092efb29609b8
|
[
"MIT"
] | null | null | null |
lib/ansiblelint/rules/UseHandlerRatherThanWhenChangedRule.py
|
senyoltw/ansible-lint
|
0e53d73c97601351bbac8a6d2eb092efb29609b8
|
[
"MIT"
] | 48
|
2021-03-08T21:13:17.000Z
|
2022-02-13T12:05:41.000Z
|
lib/ansiblelint/rules/UseHandlerRatherThanWhenChangedRule.py
|
xlab-steampunk/ansible-lint
|
443b2dcad2b9fd7bea63c8d9378f3fea13b57e7d
|
[
"MIT"
] | null | null | null |
# Copyright (c) 2016 Will Thames <will@thames.id.au>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from ansiblelint.rules import AnsibleLintRule
def _changed_in_when(item):
if not isinstance(item, str):
return False
return any(changed in item for changed in
['.changed', '|changed', '["changed"]', "['changed']"])
class UseHandlerRatherThanWhenChangedRule(AnsibleLintRule):
id = '503'
shortdesc = 'Tasks that run when changed should likely be handlers'
description = (
'If a task has a ``when: result.changed`` setting, it is effectively '
'acting as a handler'
)
severity = 'MEDIUM'
tags = ['task', 'behaviour']
version_added = 'historic'
def matchtask(self, file, task):
if task["__ansible_action_type__"] != 'task':
return False
when = task.get('when')
if isinstance(when, list):
for item in when:
return _changed_in_when(item)
else:
return _changed_in_when(when)
| 38.660377
| 79
| 0.701806
|
from ansiblelint.rules import AnsibleLintRule
def _changed_in_when(item):
if not isinstance(item, str):
return False
return any(changed in item for changed in
['.changed', '|changed', '["changed"]', "['changed']"])
class UseHandlerRatherThanWhenChangedRule(AnsibleLintRule):
id = '503'
shortdesc = 'Tasks that run when changed should likely be handlers'
description = (
'If a task has a ``when: result.changed`` setting, it is effectively '
'acting as a handler'
)
severity = 'MEDIUM'
tags = ['task', 'behaviour']
version_added = 'historic'
def matchtask(self, file, task):
if task["__ansible_action_type__"] != 'task':
return False
when = task.get('when')
if isinstance(when, list):
for item in when:
return _changed_in_when(item)
else:
return _changed_in_when(when)
| true
| true
|
f7184e5c4fd91fba665031f6ca129fc77bd5348b
| 37,578
|
py
|
Python
|
kubernetes_tests/test_kubernetes_pod_operator.py
|
iadi7ya/airflow
|
00ffedb8c402eb5638782628eb706a5f28215eac
|
[
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 1
|
2021-03-12T20:05:38.000Z
|
2021-03-12T20:05:38.000Z
|
kubernetes_tests/test_kubernetes_pod_operator.py
|
iadi7ya/airflow
|
00ffedb8c402eb5638782628eb706a5f28215eac
|
[
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 14
|
2019-12-03T02:54:42.000Z
|
2020-02-27T16:08:10.000Z
|
kubernetes_tests/test_kubernetes_pod_operator.py
|
iadi7ya/airflow
|
00ffedb8c402eb5638782628eb706a5f28215eac
|
[
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 1
|
2021-07-02T04:23:18.000Z
|
2021-07-02T04:23:18.000Z
|
# pylint: disable=unused-argument
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import json
import logging
import os
import random
import shutil
import sys
import textwrap
import unittest
from unittest import mock
from unittest.mock import ANY
import pendulum
from kubernetes.client import models as k8s
from kubernetes.client.api_client import ApiClient
from kubernetes.client.rest import ApiException
from airflow.exceptions import AirflowException
from airflow.kubernetes import kube_client
from airflow.kubernetes.pod_generator import PodDefaults
from airflow.kubernetes.pod_launcher import PodLauncher
from airflow.kubernetes.secret import Secret
from airflow.models import DAG, TaskInstance
from airflow.providers.cncf.kubernetes.operators.kubernetes_pod import KubernetesPodOperator
from airflow.utils import timezone
from airflow.version import version as airflow_version
def create_context(task):
dag = DAG(dag_id="dag")
tzinfo = pendulum.timezone("Europe/Amsterdam")
execution_date = timezone.datetime(2016, 1, 1, 1, 0, 0, tzinfo=tzinfo)
task_instance = TaskInstance(task=task,
execution_date=execution_date)
return {
"dag": dag,
"ts": execution_date.isoformat(),
"task": task,
"ti": task_instance,
}
class TestKubernetesPodOperatorSystem(unittest.TestCase):
def get_current_task_name(self):
# reverse test name to make pod name unique (it has limited length)
return "_" + unittest.TestCase.id(self).replace(".", "_")[::-1]
def setUp(self):
self.maxDiff = None # pylint: disable=invalid-name
self.api_client = ApiClient()
self.expected_pod = {
'apiVersion': 'v1',
'kind': 'Pod',
'metadata': {
'namespace': 'default',
'name': ANY,
'annotations': {},
'labels': {
'foo': 'bar', 'kubernetes_pod_operator': 'True',
'airflow_version': airflow_version.replace('+', '-'),
'execution_date': '2016-01-01T0100000100-a2f50a31f',
'dag_id': 'dag',
'task_id': ANY,
'try_number': '1'},
},
'spec': {
'affinity': {},
'containers': [{
'image': 'ubuntu:16.04',
'args': ["echo 10"],
'command': ["bash", "-cx"],
'env': [],
'envFrom': [],
'resources': {},
'name': 'base',
'ports': [],
'volumeMounts': [],
}],
'hostNetwork': False,
'imagePullSecrets': [],
'initContainers': [],
'nodeSelector': {},
'restartPolicy': 'Never',
'securityContext': {},
'serviceAccountName': 'default',
'tolerations': [],
'volumes': [],
}
}
def tearDown(self) -> None:
client = kube_client.get_kube_client(in_cluster=False)
client.delete_collection_namespaced_pod(namespace="default")
import time
time.sleep(1)
def test_do_xcom_push_defaults_false(self):
new_config_path = '/tmp/kube_config'
old_config_path = os.path.expanduser('~/.kube/config')
shutil.copy(old_config_path, new_config_path)
k = KubernetesPodOperator(
namespace='default',
image="ubuntu:16.04",
cmds=["bash", "-cx"],
arguments=["echo 10"],
labels={"foo": "bar"},
name="test-" + str(random.randint(0, 1000000)),
task_id="task" + self.get_current_task_name(),
in_cluster=False,
do_xcom_push=False,
config_file=new_config_path,
)
self.assertFalse(k.do_xcom_push)
def test_config_path_move(self):
new_config_path = '/tmp/kube_config'
old_config_path = os.path.expanduser('~/.kube/config')
shutil.copy(old_config_path, new_config_path)
k = KubernetesPodOperator(
namespace='default',
image="ubuntu:16.04",
cmds=["bash", "-cx"],
arguments=["echo 10"],
labels={"foo": "bar"},
name="test1",
task_id="task" + self.get_current_task_name(),
in_cluster=False,
do_xcom_push=False,
config_file=new_config_path,
)
context = create_context(k)
k.execute(context)
actual_pod = self.api_client.sanitize_for_serialization(k.pod)
self.assertEqual(self.expected_pod, actual_pod)
def test_working_pod(self):
k = KubernetesPodOperator(
namespace='default',
image="ubuntu:16.04",
cmds=["bash", "-cx"],
arguments=["echo 10"],
labels={"foo": "bar"},
name="test-" + str(random.randint(0, 1000000)),
task_id="task" + self.get_current_task_name(),
in_cluster=False,
do_xcom_push=False,
)
context = create_context(k)
k.execute(context)
actual_pod = self.api_client.sanitize_for_serialization(k.pod)
self.assertEqual(self.expected_pod['spec'], actual_pod['spec'])
self.assertEqual(self.expected_pod['metadata']['labels'], actual_pod['metadata']['labels'])
def test_delete_operator_pod(self):
k = KubernetesPodOperator(
namespace='default',
image="ubuntu:16.04",
cmds=["bash", "-cx"],
arguments=["echo 10"],
labels={"foo": "bar"},
name="test-" + str(random.randint(0, 1000000)),
task_id="task" + self.get_current_task_name(),
in_cluster=False,
do_xcom_push=False,
is_delete_operator_pod=True,
)
context = create_context(k)
k.execute(context)
actual_pod = self.api_client.sanitize_for_serialization(k.pod)
self.assertEqual(self.expected_pod['spec'], actual_pod['spec'])
self.assertEqual(self.expected_pod['metadata']['labels'], actual_pod['metadata']['labels'])
def test_pod_hostnetwork(self):
k = KubernetesPodOperator(
namespace='default',
image="ubuntu:16.04",
cmds=["bash", "-cx"],
arguments=["echo 10"],
labels={"foo": "bar"},
name="test-" + str(random.randint(0, 1000000)),
task_id="task" + self.get_current_task_name(),
in_cluster=False,
do_xcom_push=False,
hostnetwork=True,
)
context = create_context(k)
k.execute(context)
actual_pod = self.api_client.sanitize_for_serialization(k.pod)
self.expected_pod['spec']['hostNetwork'] = True
self.assertEqual(self.expected_pod['spec'], actual_pod['spec'])
self.assertEqual(self.expected_pod['metadata']['labels'], actual_pod['metadata']['labels'])
def test_pod_dnspolicy(self):
dns_policy = "ClusterFirstWithHostNet"
k = KubernetesPodOperator(
namespace='default',
image="ubuntu:16.04",
cmds=["bash", "-cx"],
arguments=["echo 10"],
labels={"foo": "bar"},
name="test-" + str(random.randint(0, 1000000)),
task_id="task" + self.get_current_task_name(),
in_cluster=False,
do_xcom_push=False,
hostnetwork=True,
dnspolicy=dns_policy
)
context = create_context(k)
k.execute(context)
actual_pod = self.api_client.sanitize_for_serialization(k.pod)
self.expected_pod['spec']['hostNetwork'] = True
self.expected_pod['spec']['dnsPolicy'] = dns_policy
self.assertEqual(self.expected_pod['spec'], actual_pod['spec'])
self.assertEqual(self.expected_pod['metadata']['labels'], actual_pod['metadata']['labels'])
def test_pod_schedulername(self):
scheduler_name = "default-scheduler"
k = KubernetesPodOperator(
namespace="default",
image="ubuntu:16.04",
cmds=["bash", "-cx"],
arguments=["echo 10"],
labels={"foo": "bar"},
name="test-" + str(random.randint(0, 1000000)),
task_id="task" + self.get_current_task_name(),
in_cluster=False,
do_xcom_push=False,
schedulername=scheduler_name
)
context = create_context(k)
k.execute(context)
actual_pod = self.api_client.sanitize_for_serialization(k.pod)
self.expected_pod['spec']['schedulerName'] = scheduler_name
self.assertEqual(self.expected_pod, actual_pod)
def test_pod_node_selectors(self):
node_selectors = {
'beta.kubernetes.io/os': 'linux'
}
k = KubernetesPodOperator(
namespace='default',
image="ubuntu:16.04",
cmds=["bash", "-cx"],
arguments=["echo 10"],
labels={"foo": "bar"},
name="test-" + str(random.randint(0, 1000000)),
task_id="task" + self.get_current_task_name(),
in_cluster=False,
do_xcom_push=False,
node_selectors=node_selectors,
)
context = create_context(k)
k.execute(context)
actual_pod = self.api_client.sanitize_for_serialization(k.pod)
self.expected_pod['spec']['nodeSelector'] = node_selectors
self.assertEqual(self.expected_pod, actual_pod)
def test_pod_resources(self):
resources = k8s.V1ResourceRequirements(
requests={
'memory': '64Mi',
'cpu': '250m',
'ephemeral-storage': '1Gi'
},
limits={
'memory': '64Mi',
'cpu': 0.25,
'nvidia.com/gpu': None,
'ephemeral-storage': '2Gi'
}
)
k = KubernetesPodOperator(
namespace='default',
image="ubuntu:16.04",
cmds=["bash", "-cx"],
arguments=["echo 10"],
labels={"foo": "bar"},
name="test-" + str(random.randint(0, 1000000)),
task_id="task" + self.get_current_task_name(),
in_cluster=False,
do_xcom_push=False,
resources=resources,
)
context = create_context(k)
k.execute(context)
actual_pod = self.api_client.sanitize_for_serialization(k.pod)
self.expected_pod['spec']['containers'][0]['resources'] = {
'requests': {
'memory': '64Mi',
'cpu': '250m',
'ephemeral-storage': '1Gi'
},
'limits': {
'memory': '64Mi',
'cpu': 0.25,
'nvidia.com/gpu': None,
'ephemeral-storage': '2Gi'
}
}
self.assertEqual(self.expected_pod, actual_pod)
def test_pod_affinity(self):
affinity = {
'nodeAffinity': {
'requiredDuringSchedulingIgnoredDuringExecution': {
'nodeSelectorTerms': [
{
'matchExpressions': [
{
'key': 'beta.kubernetes.io/os',
'operator': 'In',
'values': ['linux']
}
]
}
]
}
}
}
k = KubernetesPodOperator(
namespace='default',
image="ubuntu:16.04",
cmds=["bash", "-cx"],
arguments=["echo 10"],
labels={"foo": "bar"},
name="test-" + str(random.randint(0, 1000000)),
task_id="task" + self.get_current_task_name(),
in_cluster=False,
do_xcom_push=False,
affinity=affinity,
)
context = create_context(k)
k.execute(context=context)
actual_pod = self.api_client.sanitize_for_serialization(k.pod)
self.expected_pod['spec']['affinity'] = affinity
self.assertEqual(self.expected_pod, actual_pod)
def test_port(self):
port = k8s.V1ContainerPort(
name='http',
container_port=80,
)
k = KubernetesPodOperator(
namespace='default',
image="ubuntu:16.04",
cmds=["bash", "-cx"],
arguments=["echo 10"],
labels={"foo": "bar"},
name="test-" + str(random.randint(0, 1000000)),
task_id="task" + self.get_current_task_name(),
in_cluster=False,
do_xcom_push=False,
ports=[port],
)
context = create_context(k)
k.execute(context=context)
actual_pod = self.api_client.sanitize_for_serialization(k.pod)
self.expected_pod['spec']['containers'][0]['ports'] = [{
'name': 'http',
'containerPort': 80
}]
self.assertEqual(self.expected_pod, actual_pod)
def test_volume_mount(self):
with mock.patch.object(PodLauncher, 'log') as mock_logger:
volume_mount = k8s.V1VolumeMount(
name='test-volume',
mount_path='/tmp/test_volume',
sub_path=None,
read_only=False
)
volume = k8s.V1Volume(
name='test-volume',
persistent_volume_claim=k8s.V1PersistentVolumeClaimVolumeSource(
claim_name='test-volume'
)
)
args = ["echo \"retrieved from mount\" > /tmp/test_volume/test.txt "
"&& cat /tmp/test_volume/test.txt"]
k = KubernetesPodOperator(
namespace='default',
image="ubuntu:16.04",
cmds=["bash", "-cx"],
arguments=args,
labels={"foo": "bar"},
volume_mounts=[volume_mount],
volumes=[volume],
name="test-" + str(random.randint(0, 1000000)),
task_id="task" + self.get_current_task_name(),
in_cluster=False,
do_xcom_push=False,
)
context = create_context(k)
k.execute(context=context)
mock_logger.info.assert_any_call(b"retrieved from mount\n")
actual_pod = self.api_client.sanitize_for_serialization(k.pod)
self.expected_pod['spec']['containers'][0]['args'] = args
self.expected_pod['spec']['containers'][0]['volumeMounts'] = [{
'name': 'test-volume',
'mountPath': '/tmp/test_volume',
'readOnly': False
}]
self.expected_pod['spec']['volumes'] = [{
'name': 'test-volume',
'persistentVolumeClaim': {
'claimName': 'test-volume'
}
}]
self.assertEqual(self.expected_pod, actual_pod)
def test_run_as_user_root(self):
security_context = {
'securityContext': {
'runAsUser': 0,
}
}
k = KubernetesPodOperator(
namespace='default',
image="ubuntu:16.04",
cmds=["bash", "-cx"],
arguments=["echo 10"],
labels={"foo": "bar"},
name="test-" + str(random.randint(0, 1000000)),
task_id="task" + self.get_current_task_name(),
in_cluster=False,
do_xcom_push=False,
security_context=security_context,
)
context = create_context(k)
k.execute(context)
actual_pod = self.api_client.sanitize_for_serialization(k.pod)
self.expected_pod['spec']['securityContext'] = security_context
self.assertEqual(self.expected_pod, actual_pod)
def test_run_as_user_non_root(self):
security_context = {
'securityContext': {
'runAsUser': 1000,
}
}
k = KubernetesPodOperator(
namespace='default',
image="ubuntu:16.04",
cmds=["bash", "-cx"],
arguments=["echo 10"],
labels={"foo": "bar"},
name="test-" + str(random.randint(0, 1000000)),
task_id="task" + self.get_current_task_name(),
in_cluster=False,
do_xcom_push=False,
security_context=security_context,
)
context = create_context(k)
k.execute(context)
actual_pod = self.api_client.sanitize_for_serialization(k.pod)
self.expected_pod['spec']['securityContext'] = security_context
self.assertEqual(self.expected_pod, actual_pod)
def test_fs_group(self):
security_context = {
'securityContext': {
'fsGroup': 1000,
}
}
k = KubernetesPodOperator(
namespace='default',
image="ubuntu:16.04",
cmds=["bash", "-cx"],
arguments=["echo 10"],
labels={"foo": "bar"},
name="test-fs-group",
task_id="task" + self.get_current_task_name(),
in_cluster=False,
do_xcom_push=False,
security_context=security_context,
)
context = create_context(k)
k.execute(context)
actual_pod = self.api_client.sanitize_for_serialization(k.pod)
self.expected_pod['spec']['securityContext'] = security_context
self.assertEqual(self.expected_pod, actual_pod)
def test_faulty_image(self):
bad_image_name = "foobar"
k = KubernetesPodOperator(
namespace='default',
image=bad_image_name,
cmds=["bash", "-cx"],
arguments=["echo 10"],
labels={"foo": "bar"},
name="test-" + str(random.randint(0, 1000000)),
task_id="task" + self.get_current_task_name(),
in_cluster=False,
do_xcom_push=False,
startup_timeout_seconds=5,
)
with self.assertRaises(AirflowException):
context = create_context(k)
k.execute(context)
actual_pod = self.api_client.sanitize_for_serialization(k.pod)
self.expected_pod['spec']['containers'][0]['image'] = bad_image_name
self.assertEqual(self.expected_pod, actual_pod)
def test_faulty_service_account(self):
bad_service_account_name = "foobar"
k = KubernetesPodOperator(
namespace='default',
image="ubuntu:16.04",
cmds=["bash", "-cx"],
arguments=["echo 10"],
labels={"foo": "bar"},
name="test-" + str(random.randint(0, 1000000)),
task_id="task" + self.get_current_task_name(),
in_cluster=False,
do_xcom_push=False,
startup_timeout_seconds=5,
service_account_name=bad_service_account_name,
)
with self.assertRaises(ApiException):
context = create_context(k)
k.execute(context)
actual_pod = self.api_client.sanitize_for_serialization(k.pod)
self.expected_pod['spec']['serviceAccountName'] = bad_service_account_name
self.assertEqual(self.expected_pod, actual_pod)
def test_pod_failure(self):
"""
Tests that the task fails when a pod reports a failure
"""
bad_internal_command = ["foobar 10 "]
k = KubernetesPodOperator(
namespace='default',
image="ubuntu:16.04",
cmds=["bash", "-cx"],
arguments=bad_internal_command,
labels={"foo": "bar"},
name="test-" + str(random.randint(0, 1000000)),
task_id="task" + self.get_current_task_name(),
in_cluster=False,
do_xcom_push=False,
)
with self.assertRaises(AirflowException):
context = create_context(k)
k.execute(context)
actual_pod = self.api_client.sanitize_for_serialization(k.pod)
self.expected_pod['spec']['containers'][0]['args'] = bad_internal_command
self.assertEqual(self.expected_pod, actual_pod)
def test_xcom_push(self):
return_value = '{"foo": "bar"\n, "buzz": 2}'
args = ['echo \'{}\' > /airflow/xcom/return.json'.format(return_value)]
k = KubernetesPodOperator(
namespace='default',
image="ubuntu:16.04",
cmds=["bash", "-cx"],
arguments=args,
labels={"foo": "bar"},
name="test-" + str(random.randint(0, 1000000)),
task_id="task" + self.get_current_task_name(),
in_cluster=False,
do_xcom_push=True,
)
context = create_context(k)
self.assertEqual(k.execute(context), json.loads(return_value))
actual_pod = self.api_client.sanitize_for_serialization(k.pod)
volume = self.api_client.sanitize_for_serialization(PodDefaults.VOLUME)
volume_mount = self.api_client.sanitize_for_serialization(PodDefaults.VOLUME_MOUNT)
container = self.api_client.sanitize_for_serialization(PodDefaults.SIDECAR_CONTAINER)
self.expected_pod['spec']['containers'][0]['args'] = args
self.expected_pod['spec']['containers'][0]['volumeMounts'].insert(0, volume_mount) # noqa
self.expected_pod['spec']['volumes'].insert(0, volume)
self.expected_pod['spec']['containers'].append(container)
self.assertEqual(self.expected_pod, actual_pod)
@mock.patch("airflow.kubernetes.pod_launcher.PodLauncher.start_pod")
@mock.patch("airflow.kubernetes.pod_launcher.PodLauncher.monitor_pod")
@mock.patch("airflow.kubernetes.kube_client.get_kube_client")
def test_envs_from_configmaps(self, mock_client, mock_monitor, mock_start):
# GIVEN
from airflow.utils.state import State
configmap_name = "test-config-map"
env_from = [k8s.V1EnvFromSource(config_map_ref=k8s.V1ConfigMapEnvSource(
name=configmap_name
))]
# WHEN
k = KubernetesPodOperator(
namespace='default',
image="ubuntu:16.04",
cmds=["bash", "-cx"],
arguments=["echo 10"],
labels={"foo": "bar"},
name="test-" + str(random.randint(0, 1000000)),
task_id="task" + self.get_current_task_name(),
in_cluster=False,
do_xcom_push=False,
env_from=env_from
)
# THEN
mock_monitor.return_value = (State.SUCCESS, None)
context = create_context(k)
k.execute(context)
self.assertEqual(
mock_start.call_args[0][0].spec.containers[0].env_from, env_from
)
@mock.patch("airflow.kubernetes.pod_launcher.PodLauncher.start_pod")
@mock.patch("airflow.kubernetes.pod_launcher.PodLauncher.monitor_pod")
@mock.patch("airflow.kubernetes.kube_client.get_kube_client")
def test_envs_from_secrets(self, mock_client, monitor_mock, start_mock):
# GIVEN
from airflow.utils.state import State
secret_ref = 'secret_name'
secrets = [Secret('env', None, secret_ref)]
# WHEN
k = KubernetesPodOperator(
namespace='default',
image="ubuntu:16.04",
cmds=["bash", "-cx"],
arguments=["echo 10"],
secrets=secrets,
labels={"foo": "bar"},
name="test-" + str(random.randint(0, 1000000)),
task_id="task" + self.get_current_task_name(),
in_cluster=False,
do_xcom_push=False,
)
# THEN
monitor_mock.return_value = (State.SUCCESS, None)
context = create_context(k)
k.execute(context)
self.assertEqual(
start_mock.call_args[0][0].spec.containers[0].env_from,
[k8s.V1EnvFromSource(secret_ref=k8s.V1SecretEnvSource(
name=secret_ref
))]
)
def test_env_vars(self):
# WHEN
env_vars = [
k8s.V1EnvVar(
name="ENV1",
value="val1"
),
k8s.V1EnvVar(
name="ENV2",
value="val2"
),
k8s.V1EnvVar(
name="ENV3",
value_from=k8s.V1EnvVarSource(
field_ref=k8s.V1ObjectFieldSelector(
field_path="status.podIP"
)
)
),
]
k = KubernetesPodOperator(
namespace='default',
image="ubuntu:16.04",
cmds=["bash", "-cx"],
arguments=["echo 10"],
env_vars=env_vars,
labels={"foo": "bar"},
name="test-" + str(random.randint(0, 1000000)),
task_id="task" + self.get_current_task_name(),
in_cluster=False,
do_xcom_push=False,
)
context = create_context(k)
k.execute(context)
# THEN
actual_pod = self.api_client.sanitize_for_serialization(k.pod)
self.expected_pod['spec']['containers'][0]['env'] = [
{'name': 'ENV1', 'value': 'val1'},
{'name': 'ENV2', 'value': 'val2'},
{
'name': 'ENV3',
'valueFrom': {
'fieldRef': {
'fieldPath': 'status.podIP'
}
}
}
]
self.assertEqual(self.expected_pod, actual_pod)
def test_pod_template_file_system(self):
fixture = sys.path[0] + '/tests/kubernetes/basic_pod.yaml'
k = KubernetesPodOperator(
task_id="task" + self.get_current_task_name(),
in_cluster=False,
pod_template_file=fixture,
do_xcom_push=True
)
context = create_context(k)
result = k.execute(context)
self.assertIsNotNone(result)
self.assertDictEqual(result, {"hello": "world"})
def test_pod_template_file_with_overrides_system(self):
fixture = sys.path[0] + '/tests/kubernetes/basic_pod.yaml'
k = KubernetesPodOperator(
task_id="task" + self.get_current_task_name(),
labels={"foo": "bar", "fizz": "buzz"},
env_vars=[k8s.V1EnvVar(name="env_name", value="value")],
in_cluster=False,
pod_template_file=fixture,
do_xcom_push=True
)
context = create_context(k)
result = k.execute(context)
self.assertIsNotNone(result)
self.assertEqual(k.pod.metadata.labels, {'fizz': 'buzz', 'foo': 'bar'})
self.assertEqual(k.pod.spec.containers[0].env, [k8s.V1EnvVar(name="env_name", value="value")])
self.assertDictEqual(result, {"hello": "world"})
def test_init_container(self):
# GIVEN
volume_mounts = [k8s.V1VolumeMount(
mount_path='/etc/foo',
name='test-volume',
sub_path=None,
read_only=True
)]
init_environments = [k8s.V1EnvVar(
name='key1',
value='value1'
), k8s.V1EnvVar(
name='key2',
value='value2'
)]
init_container = k8s.V1Container(
name="init-container",
image="ubuntu:16.04",
env=init_environments,
volume_mounts=volume_mounts,
command=["bash", "-cx"],
args=["echo 10"]
)
volume = k8s.V1Volume(
name='test-volume',
persistent_volume_claim=k8s.V1PersistentVolumeClaimVolumeSource(
claim_name='test-volume'
)
)
expected_init_container = {
'name': 'init-container',
'image': 'ubuntu:16.04',
'command': ['bash', '-cx'],
'args': ['echo 10'],
'env': [{
'name': 'key1',
'value': 'value1'
}, {
'name': 'key2',
'value': 'value2'
}],
'volumeMounts': [{
'mountPath': '/etc/foo',
'name': 'test-volume',
'readOnly': True
}],
}
k = KubernetesPodOperator(
namespace='default',
image="ubuntu:16.04",
cmds=["bash", "-cx"],
arguments=["echo 10"],
labels={"foo": "bar"},
name="test-" + str(random.randint(0, 1000000)),
task_id="task" + self.get_current_task_name(),
volumes=[volume],
init_containers=[init_container],
in_cluster=False,
do_xcom_push=False,
)
context = create_context(k)
k.execute(context)
actual_pod = self.api_client.sanitize_for_serialization(k.pod)
self.expected_pod['spec']['initContainers'] = [expected_init_container]
self.expected_pod['spec']['volumes'] = [{
'name': 'test-volume',
'persistentVolumeClaim': {
'claimName': 'test-volume'
}
}]
self.assertEqual(self.expected_pod, actual_pod)
@mock.patch("airflow.kubernetes.pod_launcher.PodLauncher.start_pod")
@mock.patch("airflow.kubernetes.pod_launcher.PodLauncher.monitor_pod")
@mock.patch("airflow.kubernetes.kube_client.get_kube_client")
def test_pod_template_file(
self,
mock_client,
monitor_mock,
start_mock # pylint: disable=unused-argument
):
from airflow.utils.state import State
path = sys.path[0] + '/tests/kubernetes/pod.yaml'
k = KubernetesPodOperator(
task_id="task" + self.get_current_task_name(),
pod_template_file=path,
do_xcom_push=True
)
monitor_mock.return_value = (State.SUCCESS, None)
context = create_context(k)
with self.assertLogs(k.log, level=logging.DEBUG) as cm:
k.execute(context)
expected_line = textwrap.dedent("""\
DEBUG:airflow.task.operators:Starting pod:
api_version: v1
kind: Pod
metadata:
annotations: {}
cluster_name: null
creation_timestamp: null
deletion_grace_period_seconds: null\
""").strip()
self.assertTrue(any(line.startswith(expected_line) for line in cm.output))
actual_pod = self.api_client.sanitize_for_serialization(k.pod)
expected_dict = {'apiVersion': 'v1',
'kind': 'Pod',
'metadata': {'annotations': {},
'labels': {},
'name': 'memory-demo',
'namespace': 'mem-example'},
'spec': {'affinity': {},
'containers': [{'args': ['--vm',
'1',
'--vm-bytes',
'150M',
'--vm-hang',
'1'],
'command': ['stress'],
'env': [],
'envFrom': [],
'image': 'apache/airflow:stress-2020.07.10-1.0.4',
'name': 'base',
'ports': [],
'resources': {'limits': {'memory': '200Mi'},
'requests': {'memory': '100Mi'}},
'volumeMounts': [{'mountPath': '/airflow/xcom',
'name': 'xcom'}]},
{'command': ['sh',
'-c',
'trap "exit 0" INT; while true; do sleep '
'30; done;'],
'image': 'alpine',
'name': 'airflow-xcom-sidecar',
'resources': {'requests': {'cpu': '1m'}},
'volumeMounts': [{'mountPath': '/airflow/xcom',
'name': 'xcom'}]}],
'hostNetwork': False,
'imagePullSecrets': [],
'initContainers': [],
'nodeSelector': {},
'restartPolicy': 'Never',
'securityContext': {},
'serviceAccountName': 'default',
'tolerations': [],
'volumes': [{'emptyDir': {}, 'name': 'xcom'}]}}
self.assertEqual(expected_dict, actual_pod)
@mock.patch("airflow.kubernetes.pod_launcher.PodLauncher.start_pod")
@mock.patch("airflow.kubernetes.pod_launcher.PodLauncher.monitor_pod")
@mock.patch("airflow.kubernetes.kube_client.get_kube_client")
def test_pod_priority_class_name(
self,
mock_client,
monitor_mock,
start_mock # pylint: disable=unused-argument
):
"""Test ability to assign priorityClassName to pod
"""
from airflow.utils.state import State
priority_class_name = "medium-test"
k = KubernetesPodOperator(
namespace='default',
image="ubuntu:16.04",
cmds=["bash", "-cx"],
arguments=["echo 10"],
labels={"foo": "bar"},
name="test-" + str(random.randint(0, 1000000)),
task_id="task" + self.get_current_task_name(),
in_cluster=False,
do_xcom_push=False,
priority_class_name=priority_class_name,
)
monitor_mock.return_value = (State.SUCCESS, None)
context = create_context(k)
k.execute(context)
actual_pod = self.api_client.sanitize_for_serialization(k.pod)
self.expected_pod['spec']['priorityClassName'] = priority_class_name
self.assertEqual(self.expected_pod, actual_pod)
def test_pod_name(self):
pod_name_too_long = "a" * 221
with self.assertRaises(AirflowException):
KubernetesPodOperator(
namespace='default',
image="ubuntu:16.04",
cmds=["bash", "-cx"],
arguments=["echo 10"],
labels={"foo": "bar"},
name=pod_name_too_long,
task_id="task" + self.get_current_task_name(),
in_cluster=False,
do_xcom_push=False,
)
@mock.patch("airflow.kubernetes.pod_launcher.PodLauncher.monitor_pod")
def test_on_kill(self,
monitor_mock): # pylint: disable=unused-argument
from airflow.utils.state import State
client = kube_client.get_kube_client(in_cluster=False)
name = "test"
namespace = "default"
k = KubernetesPodOperator(
namespace='default',
image="ubuntu:16.04",
cmds=["bash", "-cx"],
arguments=["sleep 1000"],
labels={"foo": "bar"},
name="test",
task_id=name,
in_cluster=False,
do_xcom_push=False,
termination_grace_period=0,
)
context = create_context(k)
monitor_mock.return_value = (State.SUCCESS, None)
k.execute(context)
name = k.pod.metadata.name
pod = client.read_namespaced_pod(name=name, namespace=namespace)
self.assertEqual(pod.status.phase, "Running")
k.on_kill()
with self.assertRaises(ApiException):
pod = client.read_namespaced_pod(name=name, namespace=namespace)
# pylint: enable=unused-argument
| 38.30581
| 104
| 0.526159
|
import json
import logging
import os
import random
import shutil
import sys
import textwrap
import unittest
from unittest import mock
from unittest.mock import ANY
import pendulum
from kubernetes.client import models as k8s
from kubernetes.client.api_client import ApiClient
from kubernetes.client.rest import ApiException
from airflow.exceptions import AirflowException
from airflow.kubernetes import kube_client
from airflow.kubernetes.pod_generator import PodDefaults
from airflow.kubernetes.pod_launcher import PodLauncher
from airflow.kubernetes.secret import Secret
from airflow.models import DAG, TaskInstance
from airflow.providers.cncf.kubernetes.operators.kubernetes_pod import KubernetesPodOperator
from airflow.utils import timezone
from airflow.version import version as airflow_version
def create_context(task):
dag = DAG(dag_id="dag")
tzinfo = pendulum.timezone("Europe/Amsterdam")
execution_date = timezone.datetime(2016, 1, 1, 1, 0, 0, tzinfo=tzinfo)
task_instance = TaskInstance(task=task,
execution_date=execution_date)
return {
"dag": dag,
"ts": execution_date.isoformat(),
"task": task,
"ti": task_instance,
}
class TestKubernetesPodOperatorSystem(unittest.TestCase):
def get_current_task_name(self):
return "_" + unittest.TestCase.id(self).replace(".", "_")[::-1]
def setUp(self):
self.maxDiff = None
self.api_client = ApiClient()
self.expected_pod = {
'apiVersion': 'v1',
'kind': 'Pod',
'metadata': {
'namespace': 'default',
'name': ANY,
'annotations': {},
'labels': {
'foo': 'bar', 'kubernetes_pod_operator': 'True',
'airflow_version': airflow_version.replace('+', '-'),
'execution_date': '2016-01-01T0100000100-a2f50a31f',
'dag_id': 'dag',
'task_id': ANY,
'try_number': '1'},
},
'spec': {
'affinity': {},
'containers': [{
'image': 'ubuntu:16.04',
'args': ["echo 10"],
'command': ["bash", "-cx"],
'env': [],
'envFrom': [],
'resources': {},
'name': 'base',
'ports': [],
'volumeMounts': [],
}],
'hostNetwork': False,
'imagePullSecrets': [],
'initContainers': [],
'nodeSelector': {},
'restartPolicy': 'Never',
'securityContext': {},
'serviceAccountName': 'default',
'tolerations': [],
'volumes': [],
}
}
def tearDown(self) -> None:
client = kube_client.get_kube_client(in_cluster=False)
client.delete_collection_namespaced_pod(namespace="default")
import time
time.sleep(1)
def test_do_xcom_push_defaults_false(self):
new_config_path = '/tmp/kube_config'
old_config_path = os.path.expanduser('~/.kube/config')
shutil.copy(old_config_path, new_config_path)
k = KubernetesPodOperator(
namespace='default',
image="ubuntu:16.04",
cmds=["bash", "-cx"],
arguments=["echo 10"],
labels={"foo": "bar"},
name="test-" + str(random.randint(0, 1000000)),
task_id="task" + self.get_current_task_name(),
in_cluster=False,
do_xcom_push=False,
config_file=new_config_path,
)
self.assertFalse(k.do_xcom_push)
def test_config_path_move(self):
new_config_path = '/tmp/kube_config'
old_config_path = os.path.expanduser('~/.kube/config')
shutil.copy(old_config_path, new_config_path)
k = KubernetesPodOperator(
namespace='default',
image="ubuntu:16.04",
cmds=["bash", "-cx"],
arguments=["echo 10"],
labels={"foo": "bar"},
name="test1",
task_id="task" + self.get_current_task_name(),
in_cluster=False,
do_xcom_push=False,
config_file=new_config_path,
)
context = create_context(k)
k.execute(context)
actual_pod = self.api_client.sanitize_for_serialization(k.pod)
self.assertEqual(self.expected_pod, actual_pod)
def test_working_pod(self):
k = KubernetesPodOperator(
namespace='default',
image="ubuntu:16.04",
cmds=["bash", "-cx"],
arguments=["echo 10"],
labels={"foo": "bar"},
name="test-" + str(random.randint(0, 1000000)),
task_id="task" + self.get_current_task_name(),
in_cluster=False,
do_xcom_push=False,
)
context = create_context(k)
k.execute(context)
actual_pod = self.api_client.sanitize_for_serialization(k.pod)
self.assertEqual(self.expected_pod['spec'], actual_pod['spec'])
self.assertEqual(self.expected_pod['metadata']['labels'], actual_pod['metadata']['labels'])
def test_delete_operator_pod(self):
k = KubernetesPodOperator(
namespace='default',
image="ubuntu:16.04",
cmds=["bash", "-cx"],
arguments=["echo 10"],
labels={"foo": "bar"},
name="test-" + str(random.randint(0, 1000000)),
task_id="task" + self.get_current_task_name(),
in_cluster=False,
do_xcom_push=False,
is_delete_operator_pod=True,
)
context = create_context(k)
k.execute(context)
actual_pod = self.api_client.sanitize_for_serialization(k.pod)
self.assertEqual(self.expected_pod['spec'], actual_pod['spec'])
self.assertEqual(self.expected_pod['metadata']['labels'], actual_pod['metadata']['labels'])
def test_pod_hostnetwork(self):
k = KubernetesPodOperator(
namespace='default',
image="ubuntu:16.04",
cmds=["bash", "-cx"],
arguments=["echo 10"],
labels={"foo": "bar"},
name="test-" + str(random.randint(0, 1000000)),
task_id="task" + self.get_current_task_name(),
in_cluster=False,
do_xcom_push=False,
hostnetwork=True,
)
context = create_context(k)
k.execute(context)
actual_pod = self.api_client.sanitize_for_serialization(k.pod)
self.expected_pod['spec']['hostNetwork'] = True
self.assertEqual(self.expected_pod['spec'], actual_pod['spec'])
self.assertEqual(self.expected_pod['metadata']['labels'], actual_pod['metadata']['labels'])
def test_pod_dnspolicy(self):
dns_policy = "ClusterFirstWithHostNet"
k = KubernetesPodOperator(
namespace='default',
image="ubuntu:16.04",
cmds=["bash", "-cx"],
arguments=["echo 10"],
labels={"foo": "bar"},
name="test-" + str(random.randint(0, 1000000)),
task_id="task" + self.get_current_task_name(),
in_cluster=False,
do_xcom_push=False,
hostnetwork=True,
dnspolicy=dns_policy
)
context = create_context(k)
k.execute(context)
actual_pod = self.api_client.sanitize_for_serialization(k.pod)
self.expected_pod['spec']['hostNetwork'] = True
self.expected_pod['spec']['dnsPolicy'] = dns_policy
self.assertEqual(self.expected_pod['spec'], actual_pod['spec'])
self.assertEqual(self.expected_pod['metadata']['labels'], actual_pod['metadata']['labels'])
def test_pod_schedulername(self):
scheduler_name = "default-scheduler"
k = KubernetesPodOperator(
namespace="default",
image="ubuntu:16.04",
cmds=["bash", "-cx"],
arguments=["echo 10"],
labels={"foo": "bar"},
name="test-" + str(random.randint(0, 1000000)),
task_id="task" + self.get_current_task_name(),
in_cluster=False,
do_xcom_push=False,
schedulername=scheduler_name
)
context = create_context(k)
k.execute(context)
actual_pod = self.api_client.sanitize_for_serialization(k.pod)
self.expected_pod['spec']['schedulerName'] = scheduler_name
self.assertEqual(self.expected_pod, actual_pod)
def test_pod_node_selectors(self):
node_selectors = {
'beta.kubernetes.io/os': 'linux'
}
k = KubernetesPodOperator(
namespace='default',
image="ubuntu:16.04",
cmds=["bash", "-cx"],
arguments=["echo 10"],
labels={"foo": "bar"},
name="test-" + str(random.randint(0, 1000000)),
task_id="task" + self.get_current_task_name(),
in_cluster=False,
do_xcom_push=False,
node_selectors=node_selectors,
)
context = create_context(k)
k.execute(context)
actual_pod = self.api_client.sanitize_for_serialization(k.pod)
self.expected_pod['spec']['nodeSelector'] = node_selectors
self.assertEqual(self.expected_pod, actual_pod)
def test_pod_resources(self):
resources = k8s.V1ResourceRequirements(
requests={
'memory': '64Mi',
'cpu': '250m',
'ephemeral-storage': '1Gi'
},
limits={
'memory': '64Mi',
'cpu': 0.25,
'nvidia.com/gpu': None,
'ephemeral-storage': '2Gi'
}
)
k = KubernetesPodOperator(
namespace='default',
image="ubuntu:16.04",
cmds=["bash", "-cx"],
arguments=["echo 10"],
labels={"foo": "bar"},
name="test-" + str(random.randint(0, 1000000)),
task_id="task" + self.get_current_task_name(),
in_cluster=False,
do_xcom_push=False,
resources=resources,
)
context = create_context(k)
k.execute(context)
actual_pod = self.api_client.sanitize_for_serialization(k.pod)
self.expected_pod['spec']['containers'][0]['resources'] = {
'requests': {
'memory': '64Mi',
'cpu': '250m',
'ephemeral-storage': '1Gi'
},
'limits': {
'memory': '64Mi',
'cpu': 0.25,
'nvidia.com/gpu': None,
'ephemeral-storage': '2Gi'
}
}
self.assertEqual(self.expected_pod, actual_pod)
def test_pod_affinity(self):
affinity = {
'nodeAffinity': {
'requiredDuringSchedulingIgnoredDuringExecution': {
'nodeSelectorTerms': [
{
'matchExpressions': [
{
'key': 'beta.kubernetes.io/os',
'operator': 'In',
'values': ['linux']
}
]
}
]
}
}
}
k = KubernetesPodOperator(
namespace='default',
image="ubuntu:16.04",
cmds=["bash", "-cx"],
arguments=["echo 10"],
labels={"foo": "bar"},
name="test-" + str(random.randint(0, 1000000)),
task_id="task" + self.get_current_task_name(),
in_cluster=False,
do_xcom_push=False,
affinity=affinity,
)
context = create_context(k)
k.execute(context=context)
actual_pod = self.api_client.sanitize_for_serialization(k.pod)
self.expected_pod['spec']['affinity'] = affinity
self.assertEqual(self.expected_pod, actual_pod)
def test_port(self):
port = k8s.V1ContainerPort(
name='http',
container_port=80,
)
k = KubernetesPodOperator(
namespace='default',
image="ubuntu:16.04",
cmds=["bash", "-cx"],
arguments=["echo 10"],
labels={"foo": "bar"},
name="test-" + str(random.randint(0, 1000000)),
task_id="task" + self.get_current_task_name(),
in_cluster=False,
do_xcom_push=False,
ports=[port],
)
context = create_context(k)
k.execute(context=context)
actual_pod = self.api_client.sanitize_for_serialization(k.pod)
self.expected_pod['spec']['containers'][0]['ports'] = [{
'name': 'http',
'containerPort': 80
}]
self.assertEqual(self.expected_pod, actual_pod)
def test_volume_mount(self):
with mock.patch.object(PodLauncher, 'log') as mock_logger:
volume_mount = k8s.V1VolumeMount(
name='test-volume',
mount_path='/tmp/test_volume',
sub_path=None,
read_only=False
)
volume = k8s.V1Volume(
name='test-volume',
persistent_volume_claim=k8s.V1PersistentVolumeClaimVolumeSource(
claim_name='test-volume'
)
)
args = ["echo \"retrieved from mount\" > /tmp/test_volume/test.txt "
"&& cat /tmp/test_volume/test.txt"]
k = KubernetesPodOperator(
namespace='default',
image="ubuntu:16.04",
cmds=["bash", "-cx"],
arguments=args,
labels={"foo": "bar"},
volume_mounts=[volume_mount],
volumes=[volume],
name="test-" + str(random.randint(0, 1000000)),
task_id="task" + self.get_current_task_name(),
in_cluster=False,
do_xcom_push=False,
)
context = create_context(k)
k.execute(context=context)
mock_logger.info.assert_any_call(b"retrieved from mount\n")
actual_pod = self.api_client.sanitize_for_serialization(k.pod)
self.expected_pod['spec']['containers'][0]['args'] = args
self.expected_pod['spec']['containers'][0]['volumeMounts'] = [{
'name': 'test-volume',
'mountPath': '/tmp/test_volume',
'readOnly': False
}]
self.expected_pod['spec']['volumes'] = [{
'name': 'test-volume',
'persistentVolumeClaim': {
'claimName': 'test-volume'
}
}]
self.assertEqual(self.expected_pod, actual_pod)
def test_run_as_user_root(self):
security_context = {
'securityContext': {
'runAsUser': 0,
}
}
k = KubernetesPodOperator(
namespace='default',
image="ubuntu:16.04",
cmds=["bash", "-cx"],
arguments=["echo 10"],
labels={"foo": "bar"},
name="test-" + str(random.randint(0, 1000000)),
task_id="task" + self.get_current_task_name(),
in_cluster=False,
do_xcom_push=False,
security_context=security_context,
)
context = create_context(k)
k.execute(context)
actual_pod = self.api_client.sanitize_for_serialization(k.pod)
self.expected_pod['spec']['securityContext'] = security_context
self.assertEqual(self.expected_pod, actual_pod)
def test_run_as_user_non_root(self):
security_context = {
'securityContext': {
'runAsUser': 1000,
}
}
k = KubernetesPodOperator(
namespace='default',
image="ubuntu:16.04",
cmds=["bash", "-cx"],
arguments=["echo 10"],
labels={"foo": "bar"},
name="test-" + str(random.randint(0, 1000000)),
task_id="task" + self.get_current_task_name(),
in_cluster=False,
do_xcom_push=False,
security_context=security_context,
)
context = create_context(k)
k.execute(context)
actual_pod = self.api_client.sanitize_for_serialization(k.pod)
self.expected_pod['spec']['securityContext'] = security_context
self.assertEqual(self.expected_pod, actual_pod)
def test_fs_group(self):
security_context = {
'securityContext': {
'fsGroup': 1000,
}
}
k = KubernetesPodOperator(
namespace='default',
image="ubuntu:16.04",
cmds=["bash", "-cx"],
arguments=["echo 10"],
labels={"foo": "bar"},
name="test-fs-group",
task_id="task" + self.get_current_task_name(),
in_cluster=False,
do_xcom_push=False,
security_context=security_context,
)
context = create_context(k)
k.execute(context)
actual_pod = self.api_client.sanitize_for_serialization(k.pod)
self.expected_pod['spec']['securityContext'] = security_context
self.assertEqual(self.expected_pod, actual_pod)
def test_faulty_image(self):
bad_image_name = "foobar"
k = KubernetesPodOperator(
namespace='default',
image=bad_image_name,
cmds=["bash", "-cx"],
arguments=["echo 10"],
labels={"foo": "bar"},
name="test-" + str(random.randint(0, 1000000)),
task_id="task" + self.get_current_task_name(),
in_cluster=False,
do_xcom_push=False,
startup_timeout_seconds=5,
)
with self.assertRaises(AirflowException):
context = create_context(k)
k.execute(context)
actual_pod = self.api_client.sanitize_for_serialization(k.pod)
self.expected_pod['spec']['containers'][0]['image'] = bad_image_name
self.assertEqual(self.expected_pod, actual_pod)
def test_faulty_service_account(self):
bad_service_account_name = "foobar"
k = KubernetesPodOperator(
namespace='default',
image="ubuntu:16.04",
cmds=["bash", "-cx"],
arguments=["echo 10"],
labels={"foo": "bar"},
name="test-" + str(random.randint(0, 1000000)),
task_id="task" + self.get_current_task_name(),
in_cluster=False,
do_xcom_push=False,
startup_timeout_seconds=5,
service_account_name=bad_service_account_name,
)
with self.assertRaises(ApiException):
context = create_context(k)
k.execute(context)
actual_pod = self.api_client.sanitize_for_serialization(k.pod)
self.expected_pod['spec']['serviceAccountName'] = bad_service_account_name
self.assertEqual(self.expected_pod, actual_pod)
def test_pod_failure(self):
bad_internal_command = ["foobar 10 "]
k = KubernetesPodOperator(
namespace='default',
image="ubuntu:16.04",
cmds=["bash", "-cx"],
arguments=bad_internal_command,
labels={"foo": "bar"},
name="test-" + str(random.randint(0, 1000000)),
task_id="task" + self.get_current_task_name(),
in_cluster=False,
do_xcom_push=False,
)
with self.assertRaises(AirflowException):
context = create_context(k)
k.execute(context)
actual_pod = self.api_client.sanitize_for_serialization(k.pod)
self.expected_pod['spec']['containers'][0]['args'] = bad_internal_command
self.assertEqual(self.expected_pod, actual_pod)
def test_xcom_push(self):
return_value = '{"foo": "bar"\n, "buzz": 2}'
args = ['echo \'{}\' > /airflow/xcom/return.json'.format(return_value)]
k = KubernetesPodOperator(
namespace='default',
image="ubuntu:16.04",
cmds=["bash", "-cx"],
arguments=args,
labels={"foo": "bar"},
name="test-" + str(random.randint(0, 1000000)),
task_id="task" + self.get_current_task_name(),
in_cluster=False,
do_xcom_push=True,
)
context = create_context(k)
self.assertEqual(k.execute(context), json.loads(return_value))
actual_pod = self.api_client.sanitize_for_serialization(k.pod)
volume = self.api_client.sanitize_for_serialization(PodDefaults.VOLUME)
volume_mount = self.api_client.sanitize_for_serialization(PodDefaults.VOLUME_MOUNT)
container = self.api_client.sanitize_for_serialization(PodDefaults.SIDECAR_CONTAINER)
self.expected_pod['spec']['containers'][0]['args'] = args
self.expected_pod['spec']['containers'][0]['volumeMounts'].insert(0, volume_mount)
self.expected_pod['spec']['volumes'].insert(0, volume)
self.expected_pod['spec']['containers'].append(container)
self.assertEqual(self.expected_pod, actual_pod)
@mock.patch("airflow.kubernetes.pod_launcher.PodLauncher.start_pod")
@mock.patch("airflow.kubernetes.pod_launcher.PodLauncher.monitor_pod")
@mock.patch("airflow.kubernetes.kube_client.get_kube_client")
def test_envs_from_configmaps(self, mock_client, mock_monitor, mock_start):
from airflow.utils.state import State
configmap_name = "test-config-map"
env_from = [k8s.V1EnvFromSource(config_map_ref=k8s.V1ConfigMapEnvSource(
name=configmap_name
))]
k = KubernetesPodOperator(
namespace='default',
image="ubuntu:16.04",
cmds=["bash", "-cx"],
arguments=["echo 10"],
labels={"foo": "bar"},
name="test-" + str(random.randint(0, 1000000)),
task_id="task" + self.get_current_task_name(),
in_cluster=False,
do_xcom_push=False,
env_from=env_from
)
mock_monitor.return_value = (State.SUCCESS, None)
context = create_context(k)
k.execute(context)
self.assertEqual(
mock_start.call_args[0][0].spec.containers[0].env_from, env_from
)
@mock.patch("airflow.kubernetes.pod_launcher.PodLauncher.start_pod")
@mock.patch("airflow.kubernetes.pod_launcher.PodLauncher.monitor_pod")
@mock.patch("airflow.kubernetes.kube_client.get_kube_client")
def test_envs_from_secrets(self, mock_client, monitor_mock, start_mock):
from airflow.utils.state import State
secret_ref = 'secret_name'
secrets = [Secret('env', None, secret_ref)]
k = KubernetesPodOperator(
namespace='default',
image="ubuntu:16.04",
cmds=["bash", "-cx"],
arguments=["echo 10"],
secrets=secrets,
labels={"foo": "bar"},
name="test-" + str(random.randint(0, 1000000)),
task_id="task" + self.get_current_task_name(),
in_cluster=False,
do_xcom_push=False,
)
monitor_mock.return_value = (State.SUCCESS, None)
context = create_context(k)
k.execute(context)
self.assertEqual(
start_mock.call_args[0][0].spec.containers[0].env_from,
[k8s.V1EnvFromSource(secret_ref=k8s.V1SecretEnvSource(
name=secret_ref
))]
)
def test_env_vars(self):
env_vars = [
k8s.V1EnvVar(
name="ENV1",
value="val1"
),
k8s.V1EnvVar(
name="ENV2",
value="val2"
),
k8s.V1EnvVar(
name="ENV3",
value_from=k8s.V1EnvVarSource(
field_ref=k8s.V1ObjectFieldSelector(
field_path="status.podIP"
)
)
),
]
k = KubernetesPodOperator(
namespace='default',
image="ubuntu:16.04",
cmds=["bash", "-cx"],
arguments=["echo 10"],
env_vars=env_vars,
labels={"foo": "bar"},
name="test-" + str(random.randint(0, 1000000)),
task_id="task" + self.get_current_task_name(),
in_cluster=False,
do_xcom_push=False,
)
context = create_context(k)
k.execute(context)
actual_pod = self.api_client.sanitize_for_serialization(k.pod)
self.expected_pod['spec']['containers'][0]['env'] = [
{'name': 'ENV1', 'value': 'val1'},
{'name': 'ENV2', 'value': 'val2'},
{
'name': 'ENV3',
'valueFrom': {
'fieldRef': {
'fieldPath': 'status.podIP'
}
}
}
]
self.assertEqual(self.expected_pod, actual_pod)
def test_pod_template_file_system(self):
fixture = sys.path[0] + '/tests/kubernetes/basic_pod.yaml'
k = KubernetesPodOperator(
task_id="task" + self.get_current_task_name(),
in_cluster=False,
pod_template_file=fixture,
do_xcom_push=True
)
context = create_context(k)
result = k.execute(context)
self.assertIsNotNone(result)
self.assertDictEqual(result, {"hello": "world"})
def test_pod_template_file_with_overrides_system(self):
fixture = sys.path[0] + '/tests/kubernetes/basic_pod.yaml'
k = KubernetesPodOperator(
task_id="task" + self.get_current_task_name(),
labels={"foo": "bar", "fizz": "buzz"},
env_vars=[k8s.V1EnvVar(name="env_name", value="value")],
in_cluster=False,
pod_template_file=fixture,
do_xcom_push=True
)
context = create_context(k)
result = k.execute(context)
self.assertIsNotNone(result)
self.assertEqual(k.pod.metadata.labels, {'fizz': 'buzz', 'foo': 'bar'})
self.assertEqual(k.pod.spec.containers[0].env, [k8s.V1EnvVar(name="env_name", value="value")])
self.assertDictEqual(result, {"hello": "world"})
def test_init_container(self):
volume_mounts = [k8s.V1VolumeMount(
mount_path='/etc/foo',
name='test-volume',
sub_path=None,
read_only=True
)]
init_environments = [k8s.V1EnvVar(
name='key1',
value='value1'
), k8s.V1EnvVar(
name='key2',
value='value2'
)]
init_container = k8s.V1Container(
name="init-container",
image="ubuntu:16.04",
env=init_environments,
volume_mounts=volume_mounts,
command=["bash", "-cx"],
args=["echo 10"]
)
volume = k8s.V1Volume(
name='test-volume',
persistent_volume_claim=k8s.V1PersistentVolumeClaimVolumeSource(
claim_name='test-volume'
)
)
expected_init_container = {
'name': 'init-container',
'image': 'ubuntu:16.04',
'command': ['bash', '-cx'],
'args': ['echo 10'],
'env': [{
'name': 'key1',
'value': 'value1'
}, {
'name': 'key2',
'value': 'value2'
}],
'volumeMounts': [{
'mountPath': '/etc/foo',
'name': 'test-volume',
'readOnly': True
}],
}
k = KubernetesPodOperator(
namespace='default',
image="ubuntu:16.04",
cmds=["bash", "-cx"],
arguments=["echo 10"],
labels={"foo": "bar"},
name="test-" + str(random.randint(0, 1000000)),
task_id="task" + self.get_current_task_name(),
volumes=[volume],
init_containers=[init_container],
in_cluster=False,
do_xcom_push=False,
)
context = create_context(k)
k.execute(context)
actual_pod = self.api_client.sanitize_for_serialization(k.pod)
self.expected_pod['spec']['initContainers'] = [expected_init_container]
self.expected_pod['spec']['volumes'] = [{
'name': 'test-volume',
'persistentVolumeClaim': {
'claimName': 'test-volume'
}
}]
self.assertEqual(self.expected_pod, actual_pod)
@mock.patch("airflow.kubernetes.pod_launcher.PodLauncher.start_pod")
@mock.patch("airflow.kubernetes.pod_launcher.PodLauncher.monitor_pod")
@mock.patch("airflow.kubernetes.kube_client.get_kube_client")
def test_pod_template_file(
self,
mock_client,
monitor_mock,
start_mock
):
from airflow.utils.state import State
path = sys.path[0] + '/tests/kubernetes/pod.yaml'
k = KubernetesPodOperator(
task_id="task" + self.get_current_task_name(),
pod_template_file=path,
do_xcom_push=True
)
monitor_mock.return_value = (State.SUCCESS, None)
context = create_context(k)
with self.assertLogs(k.log, level=logging.DEBUG) as cm:
k.execute(context)
expected_line = textwrap.dedent("""\
DEBUG:airflow.task.operators:Starting pod:
api_version: v1
kind: Pod
metadata:
annotations: {}
cluster_name: null
creation_timestamp: null
deletion_grace_period_seconds: null\
""").strip()
self.assertTrue(any(line.startswith(expected_line) for line in cm.output))
actual_pod = self.api_client.sanitize_for_serialization(k.pod)
expected_dict = {'apiVersion': 'v1',
'kind': 'Pod',
'metadata': {'annotations': {},
'labels': {},
'name': 'memory-demo',
'namespace': 'mem-example'},
'spec': {'affinity': {},
'containers': [{'args': ['--vm',
'1',
'--vm-bytes',
'150M',
'--vm-hang',
'1'],
'command': ['stress'],
'env': [],
'envFrom': [],
'image': 'apache/airflow:stress-2020.07.10-1.0.4',
'name': 'base',
'ports': [],
'resources': {'limits': {'memory': '200Mi'},
'requests': {'memory': '100Mi'}},
'volumeMounts': [{'mountPath': '/airflow/xcom',
'name': 'xcom'}]},
{'command': ['sh',
'-c',
'trap "exit 0" INT; while true; do sleep '
'30; done;'],
'image': 'alpine',
'name': 'airflow-xcom-sidecar',
'resources': {'requests': {'cpu': '1m'}},
'volumeMounts': [{'mountPath': '/airflow/xcom',
'name': 'xcom'}]}],
'hostNetwork': False,
'imagePullSecrets': [],
'initContainers': [],
'nodeSelector': {},
'restartPolicy': 'Never',
'securityContext': {},
'serviceAccountName': 'default',
'tolerations': [],
'volumes': [{'emptyDir': {}, 'name': 'xcom'}]}}
self.assertEqual(expected_dict, actual_pod)
@mock.patch("airflow.kubernetes.pod_launcher.PodLauncher.start_pod")
@mock.patch("airflow.kubernetes.pod_launcher.PodLauncher.monitor_pod")
@mock.patch("airflow.kubernetes.kube_client.get_kube_client")
def test_pod_priority_class_name(
self,
mock_client,
monitor_mock,
start_mock
):
from airflow.utils.state import State
priority_class_name = "medium-test"
k = KubernetesPodOperator(
namespace='default',
image="ubuntu:16.04",
cmds=["bash", "-cx"],
arguments=["echo 10"],
labels={"foo": "bar"},
name="test-" + str(random.randint(0, 1000000)),
task_id="task" + self.get_current_task_name(),
in_cluster=False,
do_xcom_push=False,
priority_class_name=priority_class_name,
)
monitor_mock.return_value = (State.SUCCESS, None)
context = create_context(k)
k.execute(context)
actual_pod = self.api_client.sanitize_for_serialization(k.pod)
self.expected_pod['spec']['priorityClassName'] = priority_class_name
self.assertEqual(self.expected_pod, actual_pod)
def test_pod_name(self):
pod_name_too_long = "a" * 221
with self.assertRaises(AirflowException):
KubernetesPodOperator(
namespace='default',
image="ubuntu:16.04",
cmds=["bash", "-cx"],
arguments=["echo 10"],
labels={"foo": "bar"},
name=pod_name_too_long,
task_id="task" + self.get_current_task_name(),
in_cluster=False,
do_xcom_push=False,
)
@mock.patch("airflow.kubernetes.pod_launcher.PodLauncher.monitor_pod")
def test_on_kill(self,
monitor_mock):
from airflow.utils.state import State
client = kube_client.get_kube_client(in_cluster=False)
name = "test"
namespace = "default"
k = KubernetesPodOperator(
namespace='default',
image="ubuntu:16.04",
cmds=["bash", "-cx"],
arguments=["sleep 1000"],
labels={"foo": "bar"},
name="test",
task_id=name,
in_cluster=False,
do_xcom_push=False,
termination_grace_period=0,
)
context = create_context(k)
monitor_mock.return_value = (State.SUCCESS, None)
k.execute(context)
name = k.pod.metadata.name
pod = client.read_namespaced_pod(name=name, namespace=namespace)
self.assertEqual(pod.status.phase, "Running")
k.on_kill()
with self.assertRaises(ApiException):
pod = client.read_namespaced_pod(name=name, namespace=namespace)
| true
| true
|
f7184fb7953e6e5d92cdd9eb99f985d3e77150c6
| 17,204
|
py
|
Python
|
megatron/tokenizer/bert_tokenization_jp.py
|
Xianchao-Wu/megatron2
|
f793c37223b32051cb61d3b1d5661dddd57634bf
|
[
"MIT"
] | 1
|
2022-03-24T11:13:41.000Z
|
2022-03-24T11:13:41.000Z
|
megatron/tokenizer/bert_tokenization_jp.py
|
Xianchao-Wu/megatron2
|
f793c37223b32051cb61d3b1d5661dddd57634bf
|
[
"MIT"
] | null | null | null |
megatron/tokenizer/bert_tokenization_jp.py
|
Xianchao-Wu/megatron2
|
f793c37223b32051cb61d3b1d5661dddd57634bf
|
[
"MIT"
] | null | null | null |
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tokenization classes."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import re
import unicodedata
import six
def validate_case_matches_checkpoint(do_lower_case, init_checkpoint):
"""Checks whether the casing config is consistent with the checkpoint name."""
# The casing has to be passed in by the user and there is no explicit check
# as to whether it matches the checkpoint. The casing information probably
# should have been stored in the bert_config.json file, but it's not, so
# we have to heuristically detect it to validate.
if not init_checkpoint:
return
m = re.match("^.*?([A-Za-z0-9_-]+)/bert_model.ckpt", init_checkpoint)
if m is None:
return
model_name = m.group(1)
lower_models = [
"uncased_L-24_H-1024_A-16", "uncased_L-12_H-768_A-12",
"multilingual_L-12_H-768_A-12", "chinese_L-12_H-768_A-12"
]
cased_models = [
"cased_L-12_H-768_A-12", "cased_L-24_H-1024_A-16",
"multi_cased_L-12_H-768_A-12"
]
is_bad_config = False
if model_name in lower_models and not do_lower_case:
is_bad_config = True
actual_flag = "False"
case_name = "lowercased"
opposite_flag = "True"
if model_name in cased_models and do_lower_case:
is_bad_config = True
actual_flag = "True"
case_name = "cased"
opposite_flag = "False"
if is_bad_config:
raise ValueError(
"You passed in `--do_lower_case=%s` with `--init_checkpoint=%s`. "
"However, `%s` seems to be a %s model, so you "
"should pass in `--do_lower_case=%s` so that the fine-tuning matches "
"how the model was pre-training. If this error is wrong, please "
"just comment out this check." % (actual_flag, init_checkpoint,
model_name, case_name, opposite_flag))
def convert_to_unicode(text): # Ja can have, OKAY
"""Converts `text` to Unicode (if it's not already), assuming utf-8 input."""
if six.PY3: # 返回一个表示当前运行环境是否为python3的boolean值
if isinstance(text, str):
return text
elif isinstance(text, bytes):
return text.decode("utf-8", "ignore")
# errors='ignore', 设置不同的错误处理方案,'strict'的时候,如果编码错误,则会引起一个UnicodeError.
else:
raise ValueError("Unsupported string type: %s" % (type(text)))
elif six.PY2:
if isinstance(text, str):
return text.decode("utf-8", "ignore")
elif isinstance(text, unicode):
return text
else:
raise ValueError("Unsupported string type: %s" % (type(text)))
else:
raise ValueError("Not running on Python2 or Python 3?")
def printable_text(text):
"""Returns text encoded in a way suitable for print or `tf.logging`."""
# These functions want `str` for both Python2 and Python3, but in one case
# it's a Unicode string and in the other it's a byte string.
if six.PY3:
if isinstance(text, str):
return text
elif isinstance(text, bytes):
return text.decode("utf-8", "ignore")
else:
raise ValueError("Unsupported string type: %s" % (type(text)))
elif six.PY2:
if isinstance(text, str):
return text
elif isinstance(text, unicode):
return text.encode("utf-8")
else:
raise ValueError("Unsupported string type: %s" % (type(text)))
else:
raise ValueError("Not running on Python2 or Python 3?")
def load_vocab(vocab_file):
"""Loads a vocabulary file into a dictionary."""
vocab = collections.OrderedDict()
index = 0
with open(vocab_file, "r", encoding="utf-8") as reader:
while True:
token = convert_to_unicode(reader.readline())
if not token:
break # TODO why break? should be 'continue'?
token = token.strip()
vocab[token] = index
index += 1
return vocab
def convert_by_vocab(vocab, items):
"""Converts a sequence of [tokens|ids] using the vocab."""
output = []
for item in items:
output.append(vocab[item]) # 问题,vocab是str:id,如果item不在vocab中呢?
return output
def convert_tokens_to_ids(vocab, tokens):
return convert_by_vocab(vocab, tokens)
def convert_ids_to_tokens(inv_vocab, ids):
return convert_by_vocab(inv_vocab, ids)
def whitespace_tokenize(text):
"""Runs basic whitespace cleaning and splitting on a piece of text."""
text = text.strip()
if not text:
return []
tokens = text.split()
return tokens # 根据' '来切分当前的输入的text,构造出来tokens这个list
class FullTokenizer(object):
"""Runs end-to-end tokenziation."""
# refer to https://github.com/cl-tohoku/bert-japanese/blob/master/tokenization.py
def __init__(self, vocab_file, do_lower_case=True, mecab_dict_path=None):
self.vocab = load_vocab(vocab_file)
# str:id vocab_file 'C:\\Users\\user\\source\\repos\\megatron\\megatron\\pretrained\\bert-large-cased-vocab.txt' str
self.inv_vocab = {v: k for k, v in self.vocab.items()} # id:str, 词典的“逆”
self.basic_tokenizer = MecabBasicTokenizer(do_lower_case=do_lower_case, mecab_dict_path=mecab_dict_path)
self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self.vocab) # keep using existing method (no change)
def tokenize(self, text):
split_tokens = []
for token in self.basic_tokenizer.tokenize(text):
for sub_token in self.wordpiece_tokenizer.tokenize(token):
split_tokens.append(sub_token)
return split_tokens
def convert_tokens_to_ids(self, tokens):
return convert_by_vocab(self.vocab, tokens)
def convert_ids_to_tokens(self, ids):
return convert_by_vocab(self.inv_vocab, ids)
def vocab_size(self):
return len(self.vocab)
class MecabBasicTokenizer(object):
"""Runs basic tokenization (punctuation splitting, lower casing, etc.)."""
def __init__(self, do_lower_case=True, never_split=None, mecab_dict_path=None,
preserve_spaces=False):
"""Constructs a BasicTokenizer.
Args:
do_lower_case: Whether to lower case the input.
"""
self.do_lower_case = do_lower_case
if never_split is None:
never_split = []
self.never_split = never_split
self.preserve_spaces = preserve_spaces
import MeCab # TODO
import ipadic
CHASEN_ARGS = r' -F "%m\t%f[7]\t%f[6]\t%F-[0,1,2,3]\t%f[4]\t%f[5]\n"'
CHASEN_ARGS += r' -U "%m\t%m\t%m\t%F-[0,1,2,3]\t\t\n"'
#tagger = MeCab.Tagger(ipadic.MECAB_ARGS + CHASEN_ARGS)
#import MeCab
if mecab_dict_path is not None:
self.mecab = MeCab.Tagger(ipadic.MECAB_ARGS + CHASEN_ARGS + ' -d {}'.format(mecab_dict_path))
else:
self.mecab = MeCab.Tagger(ipadic.MECAB_ARGS + CHASEN_ARGS)
def tokenize(self, text, never_split=None, with_info=False): #, never_split=None, with_info=False, **kwargs):
"""Tokenizes a piece of text."""
never_split = self.never_split + (never_split if never_split is not None else [])
text = unicodedata.normalize('NFKC', text)
tokens = []
token_infos = []
cursor = 0
for line in self.mecab.parse(text).split('\n'):
if line == 'EOS':
if self.preserve_spaces and len(text[cursor:]) > 0:
tokens.append(text[cursor:])
token_infos.append(None)
break
#print('mecab output line={}, eles={}'.format(line, len(line.split('\t'))))
#token, token_info = line.split('\t')
eles = line.split('\t')
token = eles[0]
token_info = '\t'.join(eles[1:])
token_start = text.index(token, cursor)
token_end = token_start + len(token)
if self.preserve_spaces and cursor < token_start:
tokens.append(text[cursor:token_start])
token_infos.append(None)
if self.do_lower_case and token not in never_split:
token = token.lower()
tokens.append(token)
token_infos.append(token_info)
cursor = token_end
assert len(tokens) == len(token_infos)
if with_info:
return tokens, token_infos
else:
return tokens
def tokenize_old(self, text): # useless method for English bert tokenizer only
"""Tokenizes a piece of text."""
text = convert_to_unicode(text)
text = self._clean_text(text)
# This was added on November 1st, 2018 for the multilingual and Chinese
# models. This is also applied to the English models now, but it doesn't
# matter since the English models were not trained on any Chinese data
# and generally don't have any Chinese data in them (there are Chinese
# characters in the vocabulary because Wikipedia does have some Chinese
# words in the English Wikipedia.).
text = self._tokenize_chinese_chars(text)
orig_tokens = whitespace_tokenize(text)
split_tokens = []
for token in orig_tokens:
if self.do_lower_case:
token = token.lower()
token = self._run_strip_accents(token)
split_tokens.extend(self._run_split_on_punc(token))
output_tokens = whitespace_tokenize(" ".join(split_tokens))
return output_tokens
def _run_strip_accents(self, text):
"""Strips accents from a piece of text."""
# 类似于从"Montréal, über, 12.89, Mère, Françoise, noël, 889"
# 到:Montreal, uber, 12.89, Mere, Francoise, noel, 889
text = unicodedata.normalize("NFD", text) # 'Montréal, über, 12.89, Mère, Françoise, noël, 889'
# -》 'Montréal, über, 12.89, Mère, Françoise, noël, 889' 分离开了字母和逻辑重音
# e ̀
output = []
for char in text:
cat = unicodedata.category(char)
if cat == "Mn":
continue
output.append(char)
return "".join(output)
def _run_split_on_punc(self, text):
"""Splits punctuation on a piece of text."""
chars = list(text)
i = 0
start_new_word = True
output = []
while i < len(chars):
char = chars[i]
if _is_punctuation(char):
output.append([char])
start_new_word = True
else:
if start_new_word:
output.append([])
start_new_word = False
output[-1].append(char)
i += 1
return ["".join(x) for x in output]
def _tokenize_chinese_chars(self, text):
"""Adds whitespace around any CJK character."""
output = []
for char in text:
cp = ord(char)
if self._is_chinese_char(cp):
output.append(" ")
output.append(char) # 中文汉字都独立起来了,不太好!TODO (not used now)
output.append(" ")
else:
output.append(char)
return "".join(output)
def _is_chinese_char(self, cp):
"""Checks whether CP is the codepoint of a CJK character."""
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if ((cp >= 0x4E00 and cp <= 0x9FFF) or #
(cp >= 0x3400 and cp <= 0x4DBF) or #
(cp >= 0x20000 and cp <= 0x2A6DF) or #
(cp >= 0x2A700 and cp <= 0x2B73F) or #
(cp >= 0x2B740 and cp <= 0x2B81F) or #
(cp >= 0x2B820 and cp <= 0x2CEAF) or
(cp >= 0xF900 and cp <= 0xFAFF) or #
(cp >= 0x2F800 and cp <= 0x2FA1F)): #
return True
return False
def _clean_text(self, text):
"""Performs invalid character removal and whitespace cleanup on text."""
output = []
for char in text:
cp = ord(char)
if cp == 0 or cp == 0xfffd or _is_control(char):
continue
if _is_whitespace(char):
output.append(" ")
else:
output.append(char)
return "".join(output)
class WordpieceTokenizer(object):
"""Runs WordPiece tokenziation."""
def __init__(self, vocab, unk_token="[UNK]", max_input_chars_per_word=200):
self.vocab = vocab # str:id的词典 ordereddict
self.unk_token = unk_token
self.max_input_chars_per_word = max_input_chars_per_word
def tokenize(self, text):
"""Tokenizes a piece of text into its word pieces.
This uses a greedy longest-match-first algorithm to perform tokenization
using the given vocabulary.
For example:
input = "unaffable"
output = ["un", "##aff", "##able"]
Args:
text: A single token or whitespace separated tokens. This should have
already been passed through `BasicTokenizer.
Returns:
A list of wordpiece tokens.
"""
text = convert_to_unicode(text)
output_tokens = []
for token in whitespace_tokenize(text):
chars = list(token)
if len(chars) > self.max_input_chars_per_word:
output_tokens.append(self.unk_token)
continue
is_bad = False
start = 0
sub_tokens = []
while start < len(chars):
end = len(chars)
cur_substr = None
while start < end:
substr = "".join(chars[start:end])
if start > 0:
substr = "##" + substr
if substr in self.vocab:
cur_substr = substr
break
end -= 1
if cur_substr is None:
is_bad = True
break
sub_tokens.append(cur_substr)
start = end
if is_bad:
output_tokens.append(self.unk_token)
else:
output_tokens.extend(sub_tokens)
return output_tokens
def _is_whitespace(char):
"""Checks whether `chars` is a whitespace character."""
# \t, \n, and \r are technically contorl characters but we treat them
# as whitespace since they are generally considered as such.
if char == " " or char == "\t" or char == "\n" or char == "\r":
return True
cat = unicodedata.category(char)
if cat == "Zs": # [Zs] Separator, Space
return True
return False
def _is_control(char):
"""Checks whether `chars` is a control character."""
# These are technically control characters but we count them as whitespace
# characters.
if char == "\t" or char == "\n" or char == "\r":
return False
cat = unicodedata.category(char)
if cat in ("Cc", "Cf"): # [Cc] Other, Control; [Cf] Other, Format
return True
return False
def _is_punctuation(char):
"""Checks whether `chars` is a punctuation character."""
cp = ord(char)
# We treat all non-letter/number ASCII as punctuation.
# Characters such as "^", "$", and "`" are not in the Unicode
# Punctuation class but we treat them as punctuation anyways, for
# consistency.
if ((cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or
(cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126)):
return True
cat = unicodedata.category(char)
if cat.startswith("P"):
# [Pc] Punctuation, Connector
# [Pd] Punctuation, Dash
# [Pe] Punctuation, Close
# [Pf] Punctuation, Final quote (may behave like Ps or Pe depending on usage)
# [Pi] Punctuation, Initial quote (may behave like Ps or Pe depending on usage)
# [Po] Punctuation, Other
# [Ps] Punctuation, Open
return True
return False
| 36.142857
| 125
| 0.599337
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import re
import unicodedata
import six
def validate_case_matches_checkpoint(do_lower_case, init_checkpoint):
# we have to heuristically detect it to validate.
if not init_checkpoint:
return
m = re.match("^.*?([A-Za-z0-9_-]+)/bert_model.ckpt", init_checkpoint)
if m is None:
return
model_name = m.group(1)
lower_models = [
"uncased_L-24_H-1024_A-16", "uncased_L-12_H-768_A-12",
"multilingual_L-12_H-768_A-12", "chinese_L-12_H-768_A-12"
]
cased_models = [
"cased_L-12_H-768_A-12", "cased_L-24_H-1024_A-16",
"multi_cased_L-12_H-768_A-12"
]
is_bad_config = False
if model_name in lower_models and not do_lower_case:
is_bad_config = True
actual_flag = "False"
case_name = "lowercased"
opposite_flag = "True"
if model_name in cased_models and do_lower_case:
is_bad_config = True
actual_flag = "True"
case_name = "cased"
opposite_flag = "False"
if is_bad_config:
raise ValueError(
"You passed in `--do_lower_case=%s` with `--init_checkpoint=%s`. "
"However, `%s` seems to be a %s model, so you "
"should pass in `--do_lower_case=%s` so that the fine-tuning matches "
"how the model was pre-training. If this error is wrong, please "
"just comment out this check." % (actual_flag, init_checkpoint,
model_name, case_name, opposite_flag))
def convert_to_unicode(text): # Ja can have, OKAY
if six.PY3: # 返回一个表示当前运行环境是否为python3的boolean值
if isinstance(text, str):
return text
elif isinstance(text, bytes):
return text.decode("utf-8", "ignore")
# errors='ignore', 设置不同的错误处理方案,'strict'的时候,如果编码错误,则会引起一个UnicodeError.
else:
raise ValueError("Unsupported string type: %s" % (type(text)))
elif six.PY2:
if isinstance(text, str):
return text.decode("utf-8", "ignore")
elif isinstance(text, unicode):
return text
else:
raise ValueError("Unsupported string type: %s" % (type(text)))
else:
raise ValueError("Not running on Python2 or Python 3?")
def printable_text(text):
# These functions want `str` for both Python2 and Python3, but in one case
# it's a Unicode string and in the other it's a byte string.
if six.PY3:
if isinstance(text, str):
return text
elif isinstance(text, bytes):
return text.decode("utf-8", "ignore")
else:
raise ValueError("Unsupported string type: %s" % (type(text)))
elif six.PY2:
if isinstance(text, str):
return text
elif isinstance(text, unicode):
return text.encode("utf-8")
else:
raise ValueError("Unsupported string type: %s" % (type(text)))
else:
raise ValueError("Not running on Python2 or Python 3?")
def load_vocab(vocab_file):
vocab = collections.OrderedDict()
index = 0
with open(vocab_file, "r", encoding="utf-8") as reader:
while True:
token = convert_to_unicode(reader.readline())
if not token:
break # TODO why break? should be 'continue'?
token = token.strip()
vocab[token] = index
index += 1
return vocab
def convert_by_vocab(vocab, items):
output = []
for item in items:
output.append(vocab[item]) # 问题,vocab是str:id,如果item不在vocab中呢?
return output
def convert_tokens_to_ids(vocab, tokens):
return convert_by_vocab(vocab, tokens)
def convert_ids_to_tokens(inv_vocab, ids):
return convert_by_vocab(inv_vocab, ids)
def whitespace_tokenize(text):
text = text.strip()
if not text:
return []
tokens = text.split()
return tokens # 根据' '来切分当前的输入的text,构造出来tokens这个list
class FullTokenizer(object):
# refer to https://github.com/cl-tohoku/bert-japanese/blob/master/tokenization.py
def __init__(self, vocab_file, do_lower_case=True, mecab_dict_path=None):
self.vocab = load_vocab(vocab_file)
# str:id vocab_file 'C:\\Users\\user\\source\\repos\\megatron\\megatron\\pretrained\\bert-large-cased-vocab.txt' str
self.inv_vocab = {v: k for k, v in self.vocab.items()} # id:str, 词典的“逆”
self.basic_tokenizer = MecabBasicTokenizer(do_lower_case=do_lower_case, mecab_dict_path=mecab_dict_path)
self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self.vocab) # keep using existing method (no change)
def tokenize(self, text):
split_tokens = []
for token in self.basic_tokenizer.tokenize(text):
for sub_token in self.wordpiece_tokenizer.tokenize(token):
split_tokens.append(sub_token)
return split_tokens
def convert_tokens_to_ids(self, tokens):
return convert_by_vocab(self.vocab, tokens)
def convert_ids_to_tokens(self, ids):
return convert_by_vocab(self.inv_vocab, ids)
def vocab_size(self):
return len(self.vocab)
class MecabBasicTokenizer(object):
def __init__(self, do_lower_case=True, never_split=None, mecab_dict_path=None,
preserve_spaces=False):
self.do_lower_case = do_lower_case
if never_split is None:
never_split = []
self.never_split = never_split
self.preserve_spaces = preserve_spaces
import MeCab # TODO
import ipadic
CHASEN_ARGS = r' -F "%m\t%f[7]\t%f[6]\t%F-[0,1,2,3]\t%f[4]\t%f[5]\n"'
CHASEN_ARGS += r' -U "%m\t%m\t%m\t%F-[0,1,2,3]\t\t\n"'
#tagger = MeCab.Tagger(ipadic.MECAB_ARGS + CHASEN_ARGS)
#import MeCab
if mecab_dict_path is not None:
self.mecab = MeCab.Tagger(ipadic.MECAB_ARGS + CHASEN_ARGS + ' -d {}'.format(mecab_dict_path))
else:
self.mecab = MeCab.Tagger(ipadic.MECAB_ARGS + CHASEN_ARGS)
def tokenize(self, text, never_split=None, with_info=False): #, never_split=None, with_info=False, **kwargs):
never_split = self.never_split + (never_split if never_split is not None else [])
text = unicodedata.normalize('NFKC', text)
tokens = []
token_infos = []
cursor = 0
for line in self.mecab.parse(text).split('\n'):
if line == 'EOS':
if self.preserve_spaces and len(text[cursor:]) > 0:
tokens.append(text[cursor:])
token_infos.append(None)
break
#print('mecab output line={}, eles={}'.format(line, len(line.split('\t'))))
#token, token_info = line.split('\t')
eles = line.split('\t')
token = eles[0]
token_info = '\t'.join(eles[1:])
token_start = text.index(token, cursor)
token_end = token_start + len(token)
if self.preserve_spaces and cursor < token_start:
tokens.append(text[cursor:token_start])
token_infos.append(None)
if self.do_lower_case and token not in never_split:
token = token.lower()
tokens.append(token)
token_infos.append(token_info)
cursor = token_end
assert len(tokens) == len(token_infos)
if with_info:
return tokens, token_infos
else:
return tokens
def tokenize_old(self, text): # useless method for English bert tokenizer only
text = convert_to_unicode(text)
text = self._clean_text(text)
# This was added on November 1st, 2018 for the multilingual and Chinese
# models. This is also applied to the English models now, but it doesn't
# characters in the vocabulary because Wikipedia does have some Chinese
# words in the English Wikipedia.).
text = self._tokenize_chinese_chars(text)
orig_tokens = whitespace_tokenize(text)
split_tokens = []
for token in orig_tokens:
if self.do_lower_case:
token = token.lower()
token = self._run_strip_accents(token)
split_tokens.extend(self._run_split_on_punc(token))
output_tokens = whitespace_tokenize(" ".join(split_tokens))
return output_tokens
def _run_strip_accents(self, text):
# 类似于从"Montréal, über, 12.89, Mère, Françoise, noël, 889"
# 到:Montreal, uber, 12.89, Mere, Francoise, noel, 889
text = unicodedata.normalize("NFD", text) # 'Montréal, über, 12.89, Mère, Françoise, noël, 889'
# -》 'Montréal, über, 12.89, Mère, Françoise, noël, 889' 分离开了字母和逻辑重音
# e ̀
output = []
for char in text:
cat = unicodedata.category(char)
if cat == "Mn":
continue
output.append(char)
return "".join(output)
def _run_split_on_punc(self, text):
chars = list(text)
i = 0
start_new_word = True
output = []
while i < len(chars):
char = chars[i]
if _is_punctuation(char):
output.append([char])
start_new_word = True
else:
if start_new_word:
output.append([])
start_new_word = False
output[-1].append(char)
i += 1
return ["".join(x) for x in output]
def _tokenize_chinese_chars(self, text):
output = []
for char in text:
cp = ord(char)
if self._is_chinese_char(cp):
output.append(" ")
output.append(char) # 中文汉字都独立起来了,不太好!TODO (not used now)
output.append(" ")
else:
output.append(char)
return "".join(output)
def _is_chinese_char(self, cp):
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if ((cp >= 0x4E00 and cp <= 0x9FFF) or #
(cp >= 0x3400 and cp <= 0x4DBF) or #
(cp >= 0x20000 and cp <= 0x2A6DF) or #
(cp >= 0x2A700 and cp <= 0x2B73F) or #
(cp >= 0x2B740 and cp <= 0x2B81F) or #
(cp >= 0x2B820 and cp <= 0x2CEAF) or
(cp >= 0xF900 and cp <= 0xFAFF) or #
(cp >= 0x2F800 and cp <= 0x2FA1F)): #
return True
return False
def _clean_text(self, text):
output = []
for char in text:
cp = ord(char)
if cp == 0 or cp == 0xfffd or _is_control(char):
continue
if _is_whitespace(char):
output.append(" ")
else:
output.append(char)
return "".join(output)
class WordpieceTokenizer(object):
def __init__(self, vocab, unk_token="[UNK]", max_input_chars_per_word=200):
self.vocab = vocab # str:id的词典 ordereddict
self.unk_token = unk_token
self.max_input_chars_per_word = max_input_chars_per_word
def tokenize(self, text):
text = convert_to_unicode(text)
output_tokens = []
for token in whitespace_tokenize(text):
chars = list(token)
if len(chars) > self.max_input_chars_per_word:
output_tokens.append(self.unk_token)
continue
is_bad = False
start = 0
sub_tokens = []
while start < len(chars):
end = len(chars)
cur_substr = None
while start < end:
substr = "".join(chars[start:end])
if start > 0:
substr = "##" + substr
if substr in self.vocab:
cur_substr = substr
break
end -= 1
if cur_substr is None:
is_bad = True
break
sub_tokens.append(cur_substr)
start = end
if is_bad:
output_tokens.append(self.unk_token)
else:
output_tokens.extend(sub_tokens)
return output_tokens
def _is_whitespace(char):
# \t, \n, and \r are technically contorl characters but we treat them
# as whitespace since they are generally considered as such.
if char == " " or char == "\t" or char == "\n" or char == "\r":
return True
cat = unicodedata.category(char)
if cat == "Zs": # [Zs] Separator, Space
return True
return False
def _is_control(char):
# These are technically control characters but we count them as whitespace
# characters.
if char == "\t" or char == "\n" or char == "\r":
return False
cat = unicodedata.category(char)
if cat in ("Cc", "Cf"): # [Cc] Other, Control; [Cf] Other, Format
return True
return False
def _is_punctuation(char):
cp = ord(char)
# We treat all non-letter/number ASCII as punctuation.
# Characters such as "^", "$", and "`" are not in the Unicode
# Punctuation class but we treat them as punctuation anyways, for
# consistency.
if ((cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or
(cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126)):
return True
cat = unicodedata.category(char)
if cat.startswith("P"):
# [Pc] Punctuation, Connector
# [Pd] Punctuation, Dash
# [Pe] Punctuation, Close
# [Pf] Punctuation, Final quote (may behave like Ps or Pe depending on usage)
# [Pi] Punctuation, Initial quote (may behave like Ps or Pe depending on usage)
# [Po] Punctuation, Other
# [Ps] Punctuation, Open
return True
return False
| true
| true
|
f7184fbe3298b23a055b69dc325807e7b96a0395
| 9,213
|
py
|
Python
|
app/organisation/rest.py
|
alphagov/notify-notifications-api
|
e604385e0cf4c2ab8c6451b7120ceb196cce21b5
|
[
"MIT"
] | null | null | null |
app/organisation/rest.py
|
alphagov/notify-notifications-api
|
e604385e0cf4c2ab8c6451b7120ceb196cce21b5
|
[
"MIT"
] | null | null | null |
app/organisation/rest.py
|
alphagov/notify-notifications-api
|
e604385e0cf4c2ab8c6451b7120ceb196cce21b5
|
[
"MIT"
] | null | null | null |
from flask import Blueprint, abort, current_app, jsonify, request
from sqlalchemy.exc import IntegrityError
from app.config import QueueNames
from app.dao.annual_billing_dao import set_default_free_allowance_for_service
from app.dao.dao_utils import transaction
from app.dao.fact_billing_dao import fetch_usage_year_for_organisation
from app.dao.organisation_dao import (
dao_add_service_to_organisation,
dao_add_user_to_organisation,
dao_create_organisation,
dao_get_organisation_by_email_address,
dao_get_organisation_by_id,
dao_get_organisation_services,
dao_get_organisations,
dao_get_users_for_organisation,
dao_remove_user_from_organisation,
dao_update_organisation,
)
from app.dao.services_dao import dao_fetch_service_by_id
from app.dao.templates_dao import dao_get_template_by_id
from app.dao.users_dao import get_user_by_id
from app.errors import InvalidRequest, register_errors
from app.models import KEY_TYPE_NORMAL, NHS_ORGANISATION_TYPES, Organisation
from app.notifications.process_notifications import (
persist_notification,
send_notification_to_queue,
)
from app.organisation.organisation_schema import (
post_create_organisation_schema,
post_link_service_to_organisation_schema,
post_update_organisation_schema,
)
from app.schema_validation import validate
organisation_blueprint = Blueprint('organisation', __name__)
register_errors(organisation_blueprint)
@organisation_blueprint.errorhandler(IntegrityError)
def handle_integrity_error(exc):
"""
Handle integrity errors caused by the unique constraint on ix_organisation_name
"""
if 'ix_organisation_name' in str(exc):
return jsonify(result="error",
message="Organisation name already exists"), 400
if 'duplicate key value violates unique constraint "domain_pkey"' in str(exc):
return jsonify(result='error',
message='Domain already exists'), 400
current_app.logger.exception(exc)
return jsonify(result='error', message="Internal server error"), 500
@organisation_blueprint.route('', methods=['GET'])
def get_organisations():
organisations = [
org.serialize_for_list() for org in dao_get_organisations()
]
return jsonify(organisations)
@organisation_blueprint.route('/<uuid:organisation_id>', methods=['GET'])
def get_organisation_by_id(organisation_id):
organisation = dao_get_organisation_by_id(organisation_id)
return jsonify(organisation.serialize())
@organisation_blueprint.route('/by-domain', methods=['GET'])
def get_organisation_by_domain():
domain = request.args.get('domain')
if not domain or '@' in domain:
abort(400)
organisation = dao_get_organisation_by_email_address(
'example@{}'.format(request.args.get('domain'))
)
if not organisation:
abort(404)
return jsonify(organisation.serialize())
@organisation_blueprint.route('', methods=['POST'])
def create_organisation():
data = request.get_json()
validate(data, post_create_organisation_schema)
if data["organisation_type"] in NHS_ORGANISATION_TYPES:
data["email_branding_id"] = current_app.config['NHS_EMAIL_BRANDING_ID']
organisation = Organisation(**data)
dao_create_organisation(organisation)
return jsonify(organisation.serialize()), 201
@organisation_blueprint.route('/<uuid:organisation_id>', methods=['POST'])
def update_organisation(organisation_id):
data = request.get_json()
validate(data, post_update_organisation_schema)
organisation = dao_get_organisation_by_id(organisation_id)
if data.get('organisation_type') in NHS_ORGANISATION_TYPES and not organisation.email_branding_id:
data["email_branding_id"] = current_app.config['NHS_EMAIL_BRANDING_ID']
result = dao_update_organisation(organisation_id, **data)
if data.get('agreement_signed') is True:
# if a platform admin has manually adjusted the organisation, don't tell people
if data.get('agreement_signed_by_id'):
send_notifications_on_mou_signed(organisation_id)
if result:
return '', 204
else:
raise InvalidRequest("Organisation not found", 404)
@organisation_blueprint.route('/<uuid:organisation_id>/service', methods=['POST'])
def link_service_to_organisation(organisation_id):
data = request.get_json()
validate(data, post_link_service_to_organisation_schema)
service = dao_fetch_service_by_id(data['service_id'])
service.organisation = None
with transaction():
dao_add_service_to_organisation(service, organisation_id)
set_default_free_allowance_for_service(service, year_start=None)
return '', 204
@organisation_blueprint.route('/<uuid:organisation_id>/services', methods=['GET'])
def get_organisation_services(organisation_id):
services = dao_get_organisation_services(organisation_id)
sorted_services = sorted(services, key=lambda s: (-s.active, s.name))
return jsonify([s.serialize_for_org_dashboard() for s in sorted_services])
@organisation_blueprint.route('/<uuid:organisation_id>/services-with-usage', methods=['GET'])
def get_organisation_services_usage(organisation_id):
try:
year = int(request.args.get('year', 'none'))
except ValueError:
return jsonify(result='error', message='No valid year provided'), 400
services = fetch_usage_year_for_organisation(organisation_id, year)
list_services = services.values()
sorted_services = sorted(list_services, key=lambda s: (-s['active'], s['service_name'].lower()))
return jsonify(services=sorted_services)
@organisation_blueprint.route('/<uuid:organisation_id>/users/<uuid:user_id>', methods=['POST'])
def add_user_to_organisation(organisation_id, user_id):
new_org_user = dao_add_user_to_organisation(organisation_id, user_id)
return jsonify(data=new_org_user.serialize())
@organisation_blueprint.route('/<uuid:organisation_id>/users/<uuid:user_id>', methods=['DELETE'])
def remove_user_from_organisation(organisation_id, user_id):
organisation = dao_get_organisation_by_id(organisation_id)
user = get_user_by_id(user_id=user_id)
if user not in organisation.users:
error = 'User not found'
raise InvalidRequest(error, status_code=404)
dao_remove_user_from_organisation(organisation, user)
return {}, 204
@organisation_blueprint.route('/<uuid:organisation_id>/users', methods=['GET'])
def get_organisation_users(organisation_id):
org_users = dao_get_users_for_organisation(organisation_id)
return jsonify(data=[x.serialize() for x in org_users])
def check_request_args(request):
org_id = request.args.get('org_id')
name = request.args.get('name', None)
errors = []
if not org_id:
errors.append({'org_id': ["Can't be empty"]})
if not name:
errors.append({'name': ["Can't be empty"]})
if errors:
raise InvalidRequest(errors, status_code=400)
return org_id, name
def send_notifications_on_mou_signed(organisation_id):
organisation = dao_get_organisation_by_id(organisation_id)
notify_service = dao_fetch_service_by_id(current_app.config['NOTIFY_SERVICE_ID'])
def _send_notification(template_id, recipient, personalisation):
template = dao_get_template_by_id(template_id)
saved_notification = persist_notification(
template_id=template.id,
template_version=template.version,
recipient=recipient,
service=notify_service,
personalisation=personalisation,
notification_type=template.template_type,
api_key_id=None,
key_type=KEY_TYPE_NORMAL,
reply_to_text=notify_service.get_default_reply_to_email_address()
)
send_notification_to_queue(saved_notification, research_mode=False, queue=QueueNames.NOTIFY)
personalisation = {
'mou_link': '{}/agreement/{}.pdf'.format(
current_app.config['ADMIN_BASE_URL'],
'crown' if organisation.crown else 'non-crown'
),
'org_name': organisation.name,
'org_dashboard_link': '{}/organisations/{}'.format(
current_app.config['ADMIN_BASE_URL'],
organisation.id
),
'signed_by_name': organisation.agreement_signed_by.name,
'on_behalf_of_name': organisation.agreement_signed_on_behalf_of_name
}
if not organisation.agreement_signed_on_behalf_of_email_address:
signer_template_id = 'MOU_SIGNER_RECEIPT_TEMPLATE_ID'
else:
signer_template_id = 'MOU_SIGNED_ON_BEHALF_SIGNER_RECEIPT_TEMPLATE_ID'
# let the person who has been signed on behalf of know.
_send_notification(
current_app.config['MOU_SIGNED_ON_BEHALF_ON_BEHALF_RECEIPT_TEMPLATE_ID'],
organisation.agreement_signed_on_behalf_of_email_address,
personalisation
)
# let the person who signed know - the template is different depending on if they signed on behalf of someone
_send_notification(
current_app.config[signer_template_id],
organisation.agreement_signed_by.email_address,
personalisation
)
| 36.705179
| 113
| 0.74308
|
from flask import Blueprint, abort, current_app, jsonify, request
from sqlalchemy.exc import IntegrityError
from app.config import QueueNames
from app.dao.annual_billing_dao import set_default_free_allowance_for_service
from app.dao.dao_utils import transaction
from app.dao.fact_billing_dao import fetch_usage_year_for_organisation
from app.dao.organisation_dao import (
dao_add_service_to_organisation,
dao_add_user_to_organisation,
dao_create_organisation,
dao_get_organisation_by_email_address,
dao_get_organisation_by_id,
dao_get_organisation_services,
dao_get_organisations,
dao_get_users_for_organisation,
dao_remove_user_from_organisation,
dao_update_organisation,
)
from app.dao.services_dao import dao_fetch_service_by_id
from app.dao.templates_dao import dao_get_template_by_id
from app.dao.users_dao import get_user_by_id
from app.errors import InvalidRequest, register_errors
from app.models import KEY_TYPE_NORMAL, NHS_ORGANISATION_TYPES, Organisation
from app.notifications.process_notifications import (
persist_notification,
send_notification_to_queue,
)
from app.organisation.organisation_schema import (
post_create_organisation_schema,
post_link_service_to_organisation_schema,
post_update_organisation_schema,
)
from app.schema_validation import validate
organisation_blueprint = Blueprint('organisation', __name__)
register_errors(organisation_blueprint)
@organisation_blueprint.errorhandler(IntegrityError)
def handle_integrity_error(exc):
if 'ix_organisation_name' in str(exc):
return jsonify(result="error",
message="Organisation name already exists"), 400
if 'duplicate key value violates unique constraint "domain_pkey"' in str(exc):
return jsonify(result='error',
message='Domain already exists'), 400
current_app.logger.exception(exc)
return jsonify(result='error', message="Internal server error"), 500
@organisation_blueprint.route('', methods=['GET'])
def get_organisations():
organisations = [
org.serialize_for_list() for org in dao_get_organisations()
]
return jsonify(organisations)
@organisation_blueprint.route('/<uuid:organisation_id>', methods=['GET'])
def get_organisation_by_id(organisation_id):
organisation = dao_get_organisation_by_id(organisation_id)
return jsonify(organisation.serialize())
@organisation_blueprint.route('/by-domain', methods=['GET'])
def get_organisation_by_domain():
domain = request.args.get('domain')
if not domain or '@' in domain:
abort(400)
organisation = dao_get_organisation_by_email_address(
'example@{}'.format(request.args.get('domain'))
)
if not organisation:
abort(404)
return jsonify(organisation.serialize())
@organisation_blueprint.route('', methods=['POST'])
def create_organisation():
data = request.get_json()
validate(data, post_create_organisation_schema)
if data["organisation_type"] in NHS_ORGANISATION_TYPES:
data["email_branding_id"] = current_app.config['NHS_EMAIL_BRANDING_ID']
organisation = Organisation(**data)
dao_create_organisation(organisation)
return jsonify(organisation.serialize()), 201
@organisation_blueprint.route('/<uuid:organisation_id>', methods=['POST'])
def update_organisation(organisation_id):
data = request.get_json()
validate(data, post_update_organisation_schema)
organisation = dao_get_organisation_by_id(organisation_id)
if data.get('organisation_type') in NHS_ORGANISATION_TYPES and not organisation.email_branding_id:
data["email_branding_id"] = current_app.config['NHS_EMAIL_BRANDING_ID']
result = dao_update_organisation(organisation_id, **data)
if data.get('agreement_signed') is True:
if data.get('agreement_signed_by_id'):
send_notifications_on_mou_signed(organisation_id)
if result:
return '', 204
else:
raise InvalidRequest("Organisation not found", 404)
@organisation_blueprint.route('/<uuid:organisation_id>/service', methods=['POST'])
def link_service_to_organisation(organisation_id):
data = request.get_json()
validate(data, post_link_service_to_organisation_schema)
service = dao_fetch_service_by_id(data['service_id'])
service.organisation = None
with transaction():
dao_add_service_to_organisation(service, organisation_id)
set_default_free_allowance_for_service(service, year_start=None)
return '', 204
@organisation_blueprint.route('/<uuid:organisation_id>/services', methods=['GET'])
def get_organisation_services(organisation_id):
services = dao_get_organisation_services(organisation_id)
sorted_services = sorted(services, key=lambda s: (-s.active, s.name))
return jsonify([s.serialize_for_org_dashboard() for s in sorted_services])
@organisation_blueprint.route('/<uuid:organisation_id>/services-with-usage', methods=['GET'])
def get_organisation_services_usage(organisation_id):
try:
year = int(request.args.get('year', 'none'))
except ValueError:
return jsonify(result='error', message='No valid year provided'), 400
services = fetch_usage_year_for_organisation(organisation_id, year)
list_services = services.values()
sorted_services = sorted(list_services, key=lambda s: (-s['active'], s['service_name'].lower()))
return jsonify(services=sorted_services)
@organisation_blueprint.route('/<uuid:organisation_id>/users/<uuid:user_id>', methods=['POST'])
def add_user_to_organisation(organisation_id, user_id):
new_org_user = dao_add_user_to_organisation(organisation_id, user_id)
return jsonify(data=new_org_user.serialize())
@organisation_blueprint.route('/<uuid:organisation_id>/users/<uuid:user_id>', methods=['DELETE'])
def remove_user_from_organisation(organisation_id, user_id):
organisation = dao_get_organisation_by_id(organisation_id)
user = get_user_by_id(user_id=user_id)
if user not in organisation.users:
error = 'User not found'
raise InvalidRequest(error, status_code=404)
dao_remove_user_from_organisation(organisation, user)
return {}, 204
@organisation_blueprint.route('/<uuid:organisation_id>/users', methods=['GET'])
def get_organisation_users(organisation_id):
org_users = dao_get_users_for_organisation(organisation_id)
return jsonify(data=[x.serialize() for x in org_users])
def check_request_args(request):
org_id = request.args.get('org_id')
name = request.args.get('name', None)
errors = []
if not org_id:
errors.append({'org_id': ["Can't be empty"]})
if not name:
errors.append({'name': ["Can't be empty"]})
if errors:
raise InvalidRequest(errors, status_code=400)
return org_id, name
def send_notifications_on_mou_signed(organisation_id):
organisation = dao_get_organisation_by_id(organisation_id)
notify_service = dao_fetch_service_by_id(current_app.config['NOTIFY_SERVICE_ID'])
def _send_notification(template_id, recipient, personalisation):
template = dao_get_template_by_id(template_id)
saved_notification = persist_notification(
template_id=template.id,
template_version=template.version,
recipient=recipient,
service=notify_service,
personalisation=personalisation,
notification_type=template.template_type,
api_key_id=None,
key_type=KEY_TYPE_NORMAL,
reply_to_text=notify_service.get_default_reply_to_email_address()
)
send_notification_to_queue(saved_notification, research_mode=False, queue=QueueNames.NOTIFY)
personalisation = {
'mou_link': '{}/agreement/{}.pdf'.format(
current_app.config['ADMIN_BASE_URL'],
'crown' if organisation.crown else 'non-crown'
),
'org_name': organisation.name,
'org_dashboard_link': '{}/organisations/{}'.format(
current_app.config['ADMIN_BASE_URL'],
organisation.id
),
'signed_by_name': organisation.agreement_signed_by.name,
'on_behalf_of_name': organisation.agreement_signed_on_behalf_of_name
}
if not organisation.agreement_signed_on_behalf_of_email_address:
signer_template_id = 'MOU_SIGNER_RECEIPT_TEMPLATE_ID'
else:
signer_template_id = 'MOU_SIGNED_ON_BEHALF_SIGNER_RECEIPT_TEMPLATE_ID'
# let the person who has been signed on behalf of know.
_send_notification(
current_app.config['MOU_SIGNED_ON_BEHALF_ON_BEHALF_RECEIPT_TEMPLATE_ID'],
organisation.agreement_signed_on_behalf_of_email_address,
personalisation
)
# let the person who signed know - the template is different depending on if they signed on behalf of someone
_send_notification(
current_app.config[signer_template_id],
organisation.agreement_signed_by.email_address,
personalisation
)
| true
| true
|
f7184fe1e35a9bafa86e0dff622a375b70bb8869
| 24,838
|
py
|
Python
|
src/python/pants/bsp/util_rules/targets.py
|
wonlay/pants
|
53c66503b6898e83c9c9596e56cde5ad9ed6a0d3
|
[
"Apache-2.0"
] | null | null | null |
src/python/pants/bsp/util_rules/targets.py
|
wonlay/pants
|
53c66503b6898e83c9c9596e56cde5ad9ed6a0d3
|
[
"Apache-2.0"
] | null | null | null |
src/python/pants/bsp/util_rules/targets.py
|
wonlay/pants
|
53c66503b6898e83c9c9596e56cde5ad9ed6a0d3
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2022 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import annotations
import itertools
import logging
from collections import defaultdict
from dataclasses import dataclass
from pathlib import Path
from typing import ClassVar, Generic, Sequence, Type, TypeVar
import toml
from typing_extensions import Protocol
from pants.base.build_root import BuildRoot
from pants.base.glob_match_error_behavior import GlobMatchErrorBehavior
from pants.base.specs import AddressSpecs, Specs
from pants.base.specs_parser import SpecsParser
from pants.bsp.goal import BSPGoal
from pants.bsp.protocol import BSPHandlerMapping
from pants.bsp.spec.base import (
BSPData,
BuildTarget,
BuildTargetCapabilities,
BuildTargetIdentifier,
StatusCode,
)
from pants.bsp.spec.targets import (
DependencyModule,
DependencyModulesItem,
DependencyModulesParams,
DependencyModulesResult,
DependencySourcesItem,
DependencySourcesParams,
DependencySourcesResult,
SourceItem,
SourceItemKind,
SourcesItem,
SourcesParams,
SourcesResult,
WorkspaceBuildTargetsParams,
WorkspaceBuildTargetsResult,
)
from pants.engine.fs import DigestContents, PathGlobs, Workspace
from pants.engine.internals.native_engine import EMPTY_DIGEST, Digest, MergeDigests
from pants.engine.internals.selectors import Get, MultiGet
from pants.engine.rules import _uncacheable_rule, collect_rules, rule
from pants.engine.target import (
FieldSet,
SourcesField,
SourcesPaths,
SourcesPathsRequest,
Target,
Targets,
)
from pants.engine.unions import UnionMembership, UnionRule, union
from pants.source.source_root import SourceRootsRequest, SourceRootsResult
from pants.util.frozendict import FrozenDict
from pants.util.ordered_set import FrozenOrderedSet, OrderedSet
from pants.util.strutil import bullet_list
_logger = logging.getLogger(__name__)
_FS = TypeVar("_FS", bound=FieldSet)
@union
@dataclass(frozen=True)
class BSPResolveFieldFactoryRequest(Generic[_FS]):
"""Requests an implementation of `BSPResolveFieldFactory` which can filter resolve fields.
TODO: This is to work around the fact that Field value defaulting cannot have arbitrary
subsystem requirements, and so `JvmResolveField` and `PythonResolveField` have methods
which compute the true value of the field given a subsytem argument. Consumers need to
be type aware, and `@rules` cannot have dynamic requirements.
See https://github.com/pantsbuild/pants/issues/12934 about potentially allowing unions
(including Field registrations) to have `@rule_helper` methods, which would allow the
computation of an AsyncFields to directly require a subsystem.
"""
resolve_prefix: ClassVar[str]
# TODO: Workaround for https://github.com/python/mypy/issues/5485, because we cannot directly use
# a Callable.
class _ResolveFieldFactory(Protocol):
def __call__(self, target: Target) -> str | None:
pass
@dataclass(frozen=True)
class BSPResolveFieldFactoryResult:
"""Computes the resolve field value for a Target, if applicable."""
resolve_field_value: _ResolveFieldFactory
@union
@dataclass(frozen=True)
class BSPBuildTargetsMetadataRequest(Generic[_FS]):
"""Hook to allow language backends to provide metadata for BSP build targets."""
language_id: ClassVar[str]
can_merge_metadata_from: ClassVar[tuple[str, ...]]
field_set_type: ClassVar[Type[_FS]]
field_sets: tuple[_FS, ...]
@dataclass(frozen=True)
class BSPBuildTargetsMetadataResult:
"""Response type for a BSPBuildTargetsMetadataRequest."""
# Metadata for the `data` field of the final `BuildTarget`.
metadata: BSPData | None = None
# Output to write into `.pants.d/bsp` for access by IDE.
digest: Digest = EMPTY_DIGEST
@dataclass(frozen=True)
class BSPTargetDefinition:
display_name: str | None
base_directory: str | None
addresses: tuple[str, ...]
resolve_filter: str | None
@dataclass(frozen=True)
class BSPBuildTargetInternal:
name: str
specs: Specs
definition: BSPTargetDefinition
@property
def bsp_target_id(self) -> BuildTargetIdentifier:
return BuildTargetIdentifier(f"pants:{self.name}")
@dataclass(frozen=True)
class BSPBuildTargetSourcesInfo:
"""Source files and roots for a BSP build target.
It is a separate class so that it is computed lazily only when called for by an RPC call.
"""
source_files: frozenset[str]
source_roots: frozenset[str]
@dataclass(frozen=True)
class BSPBuildTargets:
targets_mapping: FrozenDict[str, BSPBuildTargetInternal]
@dataclass(frozen=True)
class _ParseOneBSPMappingRequest:
name: str
definition: BSPTargetDefinition
@rule
async def parse_one_bsp_mapping(request: _ParseOneBSPMappingRequest) -> BSPBuildTargetInternal:
specs_parser = SpecsParser()
specs = specs_parser.parse_specs(request.definition.addresses)
return BSPBuildTargetInternal(request.name, specs, request.definition)
@rule
async def materialize_bsp_build_targets(bsp_goal: BSPGoal) -> BSPBuildTargets:
definitions: dict[str, BSPTargetDefinition] = {}
for config_file in bsp_goal.groups_config_files:
config_contents = await Get(
DigestContents,
PathGlobs(
[config_file],
glob_match_error_behavior=GlobMatchErrorBehavior.error,
description_of_origin=f"BSP config file `{config_file}`",
),
)
if len(config_contents) == 0:
raise ValueError(f"BSP targets config file `{config_file}` does not exist.")
elif len(config_contents) > 1:
raise ValueError(
f"BSP targets config file specified as `{config_file}` matches multiple files. "
"Please do not use wildcards in config file paths."
)
config = toml.loads(config_contents[0].content.decode())
groups = config.get("groups")
if groups is None:
raise ValueError(
f"BSP targets config file `{config_file}` is missing the `groups` table."
)
if not isinstance(groups, dict):
raise ValueError(
f"BSP targets config file `{config_file}` contains a `groups` key that is not a TOML table."
)
for id, group in groups.items():
if not isinstance(group, dict):
raise ValueError(
f"BSP targets config file `{config_file}` contains an entry for "
"`groups` array that is not a dictionary (index={i})."
)
base_directory = group.get("base_directory")
display_name = group.get("display_name")
addresses = group.get("addresses", [])
if not addresses:
raise ValueError(
f"BSP targets config file `{config_file}` contains group ID `{id}` which has "
"no address specs defined via the `addresses` key. Please specify at least "
"one address spec."
)
resolve_filter = group.get("resolve")
definitions[id] = BSPTargetDefinition(
display_name=display_name,
base_directory=base_directory,
addresses=tuple(addresses),
resolve_filter=resolve_filter,
)
bsp_internal_targets = await MultiGet(
Get(BSPBuildTargetInternal, _ParseOneBSPMappingRequest(name, definition))
for name, definition in definitions.items()
)
target_mapping = {
key: bsp_internal_target
for key, bsp_internal_target in zip(definitions.keys(), bsp_internal_targets)
}
return BSPBuildTargets(FrozenDict(target_mapping))
@rule
async def resolve_bsp_build_target_identifier(
bsp_target_id: BuildTargetIdentifier, bsp_build_targets: BSPBuildTargets
) -> BSPBuildTargetInternal:
scheme, _, target_name = bsp_target_id.uri.partition(":")
if scheme != "pants":
raise ValueError(f"Unknown BSP scheme `{scheme}` for BSP target ID `{bsp_target_id}.")
target_internal = bsp_build_targets.targets_mapping.get(target_name)
if not target_internal:
raise ValueError(f"Unknown BSP target name: {target_name}")
return target_internal
@rule
async def resolve_bsp_build_target_addresses(
bsp_target: BSPBuildTargetInternal,
union_membership: UnionMembership,
) -> Targets:
targets = await Get(Targets, AddressSpecs, bsp_target.specs.address_specs)
if bsp_target.definition.resolve_filter is None:
return targets
resolve_filter = bsp_target.definition.resolve_filter
resolve_prefix, matched, resolve_value = resolve_filter.partition(":")
if not resolve_prefix or not matched:
raise ValueError(
f"The `resolve` filter for `{bsp_target}` must have a platform or language specific "
f"prefix like `$lang:$filter`, but the configured value: `{resolve_filter}` did not."
)
# TODO: See `BSPResolveFieldFactoryRequest` re: this awkwardness.
factories = await MultiGet(
Get(BSPResolveFieldFactoryResult, BSPResolveFieldFactoryRequest, request())
for request in union_membership.get(BSPResolveFieldFactoryRequest)
if request.resolve_prefix == resolve_prefix
)
return Targets(
t
for t in targets
if any((factory.resolve_field_value)(t) == resolve_value for factory in factories)
)
@rule
async def resolve_bsp_build_target_source_roots(
bsp_target: BSPBuildTargetInternal,
) -> BSPBuildTargetSourcesInfo:
targets = await Get(Targets, BSPBuildTargetInternal, bsp_target)
targets_with_sources = [tgt for tgt in targets if tgt.has_field(SourcesField)]
sources_paths = await MultiGet(
Get(SourcesPaths, SourcesPathsRequest(tgt[SourcesField])) for tgt in targets_with_sources
)
merged_source_files: set[str] = set()
for sp in sources_paths:
merged_source_files.update(sp.files)
source_roots_result = await Get(
SourceRootsResult, SourceRootsRequest, SourceRootsRequest.for_files(merged_source_files)
)
source_root_paths = {x.path for x in source_roots_result.path_to_root.values()}
return BSPBuildTargetSourcesInfo(
source_files=frozenset(merged_source_files),
source_roots=frozenset(source_root_paths),
)
# -----------------------------------------------------------------------------------------------
# Workspace Build Targets Request
# See https://build-server-protocol.github.io/docs/specification.html#workspace-build-targets-request
# -----------------------------------------------------------------------------------------------
class WorkspaceBuildTargetsHandlerMapping(BSPHandlerMapping):
method_name = "workspace/buildTargets"
request_type = WorkspaceBuildTargetsParams
response_type = WorkspaceBuildTargetsResult
@dataclass(frozen=True)
class GenerateOneBSPBuildTargetRequest:
bsp_target: BSPBuildTargetInternal
@dataclass(frozen=True)
class GenerateOneBSPBuildTargetResult:
build_target: BuildTarget
digest: Digest = EMPTY_DIGEST
def merge_metadata(
metadata_results_by_request_type: Sequence[
tuple[type[BSPBuildTargetsMetadataRequest], BSPBuildTargetsMetadataResult]
],
) -> BSPData | None:
if not metadata_results_by_request_type:
return None
if len(metadata_results_by_request_type) == 1:
return metadata_results_by_request_type[0][1].metadata
# Naive algorithm (since we only support Java and Scala backends), find the metadata request type that cannot
# merge from another and use that one.
if len(metadata_results_by_request_type) != 2:
raise AssertionError(
"BSP core rules only support naive ordering of language-backend metadata. Contact Pants developers."
)
if not metadata_results_by_request_type[0][0].can_merge_metadata_from:
metadata_index = 1
elif not metadata_results_by_request_type[1][0].can_merge_metadata_from:
metadata_index = 0
else:
raise AssertionError(
"BSP core rules only support naive ordering of language-backend metadata. Contact Pants developers."
)
# Pretend to merge the metadata into a single piece of metadata, but really just choose the metadata
# from the selected provider.
return metadata_results_by_request_type[metadata_index][1].metadata
@rule
async def generate_one_bsp_build_target_request(
request: GenerateOneBSPBuildTargetRequest,
union_membership: UnionMembership,
build_root: BuildRoot,
) -> GenerateOneBSPBuildTargetResult:
# Find all Pants targets that are part of this BSP build target.
targets = await Get(Targets, BSPBuildTargetInternal, request.bsp_target)
# Determine whether the targets are compilable.
can_compile = any(
req_type.field_set_type.is_applicable(t) # type: ignore[misc]
for req_type in union_membership[BSPCompileRequest]
for t in targets
)
# Classify the targets by the language backends that claim to provide metadata for them.
field_sets_by_request_type: dict[
type[BSPBuildTargetsMetadataRequest], OrderedSet[FieldSet]
] = defaultdict(OrderedSet)
metadata_request_types: FrozenOrderedSet[
Type[BSPBuildTargetsMetadataRequest]
] = union_membership.get(BSPBuildTargetsMetadataRequest)
metadata_request_types_by_lang_id: dict[str, type[BSPBuildTargetsMetadataRequest]] = {}
for metadata_request_type in metadata_request_types:
previous = metadata_request_types_by_lang_id.get(metadata_request_type.language_id)
if previous:
raise ValueError(
f"Multiple implementations claim to support `{metadata_request_type.language_id}`:"
f"{bullet_list([previous.__name__, metadata_request_type.__name__])}"
"\n"
"Do you have conflicting language support backends enabled?"
)
metadata_request_types_by_lang_id[metadata_request_type.language_id] = metadata_request_type
for tgt in targets:
for metadata_request_type in metadata_request_types:
field_set_type: Type[FieldSet] = metadata_request_type.field_set_type
if field_set_type.is_applicable(tgt):
field_sets_by_request_type[metadata_request_type].add(field_set_type.create(tgt))
# Request each language backend to provide metadata for the BuildTarget, and then merge it.
metadata_results = await MultiGet(
Get(
BSPBuildTargetsMetadataResult,
BSPBuildTargetsMetadataRequest,
request_type(field_sets=tuple(field_sets)),
)
for request_type, field_sets in field_sets_by_request_type.items()
)
metadata = merge_metadata(list(zip(field_sets_by_request_type.keys(), metadata_results)))
digest = await Get(Digest, MergeDigests([r.digest for r in metadata_results]))
# Determine "base directory" for this build target using source roots.
# TODO: This actually has nothing to do with source roots. It should probably be computed as an ancestor
# directory or else be configurable by the user. It is used as a hint in IntelliJ for where to place the
# corresponding IntelliJ module.
source_info = await Get(BSPBuildTargetSourcesInfo, BSPBuildTargetInternal, request.bsp_target)
if source_info.source_roots:
roots = [build_root.pathlib_path.joinpath(p) for p in source_info.source_roots]
else:
roots = []
base_directory: Path | None = None
if request.bsp_target.definition.base_directory:
base_directory = build_root.pathlib_path.joinpath(
request.bsp_target.definition.base_directory
)
elif roots:
base_directory = roots[0]
return GenerateOneBSPBuildTargetResult(
build_target=BuildTarget(
id=BuildTargetIdentifier(f"pants:{request.bsp_target.name}"),
display_name=request.bsp_target.name,
base_directory=base_directory.as_uri() if base_directory else None,
tags=(),
capabilities=BuildTargetCapabilities(
can_compile=can_compile,
can_debug=False,
# TODO: See https://github.com/pantsbuild/pants/issues/15050.
can_run=False,
can_test=False,
),
language_ids=tuple(sorted(req.language_id for req in field_sets_by_request_type)),
dependencies=(),
data=metadata,
),
digest=digest,
)
@_uncacheable_rule
async def bsp_workspace_build_targets(
_: WorkspaceBuildTargetsParams,
bsp_build_targets: BSPBuildTargets,
workspace: Workspace,
) -> WorkspaceBuildTargetsResult:
bsp_target_results = await MultiGet(
Get(GenerateOneBSPBuildTargetResult, GenerateOneBSPBuildTargetRequest(target_internal))
for target_internal in bsp_build_targets.targets_mapping.values()
)
digest = await Get(Digest, MergeDigests([r.digest for r in bsp_target_results]))
if digest != EMPTY_DIGEST:
workspace.write_digest(digest, path_prefix=".pants.d/bsp")
return WorkspaceBuildTargetsResult(
targets=tuple(r.build_target for r in bsp_target_results),
)
# -----------------------------------------------------------------------------------------------
# Build Target Sources Request
# See https://build-server-protocol.github.io/docs/specification.html#build-target-sources-request
# -----------------------------------------------------------------------------------------------
class BuildTargetSourcesHandlerMapping(BSPHandlerMapping):
method_name = "buildTarget/sources"
request_type = SourcesParams
response_type = SourcesResult
@dataclass(frozen=True)
class MaterializeBuildTargetSourcesRequest:
bsp_target_id: BuildTargetIdentifier
@dataclass(frozen=True)
class MaterializeBuildTargetSourcesResult:
sources_item: SourcesItem
@rule
async def materialize_bsp_build_target_sources(
request: MaterializeBuildTargetSourcesRequest,
build_root: BuildRoot,
) -> MaterializeBuildTargetSourcesResult:
bsp_target = await Get(BSPBuildTargetInternal, BuildTargetIdentifier, request.bsp_target_id)
source_info = await Get(BSPBuildTargetSourcesInfo, BSPBuildTargetInternal, bsp_target)
if source_info.source_roots:
roots = [build_root.pathlib_path.joinpath(p) for p in source_info.source_roots]
else:
roots = [build_root.pathlib_path]
sources_item = SourcesItem(
target=request.bsp_target_id,
sources=tuple(
SourceItem(
uri=build_root.pathlib_path.joinpath(filename).as_uri(),
kind=SourceItemKind.FILE,
generated=False,
)
for filename in sorted(source_info.source_files)
),
roots=tuple(r.as_uri() for r in roots),
)
return MaterializeBuildTargetSourcesResult(sources_item)
@rule
async def bsp_build_target_sources(request: SourcesParams) -> SourcesResult:
sources_items = await MultiGet(
Get(MaterializeBuildTargetSourcesResult, MaterializeBuildTargetSourcesRequest(btgt))
for btgt in request.targets
)
return SourcesResult(items=tuple(si.sources_item for si in sources_items))
# -----------------------------------------------------------------------------------------------
# Dependency Sources Request
# See https://build-server-protocol.github.io/docs/specification.html#dependency-sources-request
# -----------------------------------------------------------------------------------------------
class DependencySourcesHandlerMapping(BSPHandlerMapping):
method_name = "buildTarget/dependencySources"
request_type = DependencySourcesParams
response_type = DependencySourcesResult
@rule
async def bsp_dependency_sources(request: DependencySourcesParams) -> DependencySourcesResult:
# TODO: This is a stub.
return DependencySourcesResult(
tuple(DependencySourcesItem(target=tgt, sources=()) for tgt in request.targets)
)
# -----------------------------------------------------------------------------------------------
# Dependency Modules Request
# See https://build-server-protocol.github.io/docs/specification.html#dependency-modules-request
# -----------------------------------------------------------------------------------------------
@union
@dataclass(frozen=True)
class BSPDependencyModulesRequest(Generic[_FS]):
"""Hook to allow language backends to provide dependency modules."""
field_set_type: ClassVar[Type[_FS]]
field_sets: tuple[_FS, ...]
@dataclass(frozen=True)
class BSPDependencyModulesResult:
modules: tuple[DependencyModule, ...]
digest: Digest = EMPTY_DIGEST
class DependencyModulesHandlerMapping(BSPHandlerMapping):
method_name = "buildTarget/dependencyModules"
request_type = DependencyModulesParams
response_type = DependencyModulesResult
@dataclass(frozen=True)
class ResolveOneDependencyModuleRequest:
bsp_target_id: BuildTargetIdentifier
@dataclass(frozen=True)
class ResolveOneDependencyModuleResult:
bsp_target_id: BuildTargetIdentifier
modules: tuple[DependencyModule, ...] = ()
digest: Digest = EMPTY_DIGEST
@rule
async def resolve_one_dependency_module(
request: ResolveOneDependencyModuleRequest,
union_membership: UnionMembership,
) -> ResolveOneDependencyModuleResult:
targets = await Get(Targets, BuildTargetIdentifier, request.bsp_target_id)
field_sets_by_request_type: dict[
Type[BSPDependencyModulesRequest], list[FieldSet]
] = defaultdict(list)
dep_module_request_types: FrozenOrderedSet[
Type[BSPDependencyModulesRequest]
] = union_membership.get(BSPDependencyModulesRequest)
for tgt in targets:
for dep_module_request_type in dep_module_request_types:
field_set_type = dep_module_request_type.field_set_type
if field_set_type.is_applicable(tgt):
field_set = field_set_type.create(tgt)
field_sets_by_request_type[dep_module_request_type].append(field_set)
if not field_sets_by_request_type:
return ResolveOneDependencyModuleResult(bsp_target_id=request.bsp_target_id)
responses = await MultiGet(
Get(
BSPDependencyModulesResult,
BSPDependencyModulesRequest,
dep_module_request_type(field_sets=tuple(field_sets)),
)
for dep_module_request_type, field_sets in field_sets_by_request_type.items()
)
modules = set(itertools.chain.from_iterable([r.modules for r in responses]))
digest = await Get(Digest, MergeDigests([r.digest for r in responses]))
return ResolveOneDependencyModuleResult(
bsp_target_id=request.bsp_target_id,
modules=tuple(modules),
digest=digest,
)
# Note: VSCode expects this endpoint to exist even if the capability bit for it is set `false`.
@_uncacheable_rule
async def bsp_dependency_modules(
request: DependencyModulesParams, workspace: Workspace
) -> DependencyModulesResult:
responses = await MultiGet(
Get(ResolveOneDependencyModuleResult, ResolveOneDependencyModuleRequest(btgt))
for btgt in request.targets
)
output_digest = await Get(Digest, MergeDigests([r.digest for r in responses]))
workspace.write_digest(output_digest, path_prefix=".pants.d/bsp")
return DependencyModulesResult(
tuple(DependencyModulesItem(target=r.bsp_target_id, modules=r.modules) for r in responses)
)
# -----------------------------------------------------------------------------------------------
# Compile request.
# See https://build-server-protocol.github.io/docs/specification.html#compile-request
# -----------------------------------------------------------------------------------------------
@union
@dataclass(frozen=True)
class BSPCompileRequest(Generic[_FS]):
"""Hook to allow language backends to compile targets."""
field_set_type: ClassVar[Type[_FS]]
bsp_target: BSPBuildTargetInternal
field_sets: tuple[_FS, ...]
@dataclass(frozen=True)
class BSPCompileResult:
"""Result of compilation of a target capable of target compilation."""
status: StatusCode
output_digest: Digest
def rules():
return (
*collect_rules(),
UnionRule(BSPHandlerMapping, WorkspaceBuildTargetsHandlerMapping),
UnionRule(BSPHandlerMapping, BuildTargetSourcesHandlerMapping),
UnionRule(BSPHandlerMapping, DependencySourcesHandlerMapping),
UnionRule(BSPHandlerMapping, DependencyModulesHandlerMapping),
)
| 36.366032
| 113
| 0.697882
|
from __future__ import annotations
import itertools
import logging
from collections import defaultdict
from dataclasses import dataclass
from pathlib import Path
from typing import ClassVar, Generic, Sequence, Type, TypeVar
import toml
from typing_extensions import Protocol
from pants.base.build_root import BuildRoot
from pants.base.glob_match_error_behavior import GlobMatchErrorBehavior
from pants.base.specs import AddressSpecs, Specs
from pants.base.specs_parser import SpecsParser
from pants.bsp.goal import BSPGoal
from pants.bsp.protocol import BSPHandlerMapping
from pants.bsp.spec.base import (
BSPData,
BuildTarget,
BuildTargetCapabilities,
BuildTargetIdentifier,
StatusCode,
)
from pants.bsp.spec.targets import (
DependencyModule,
DependencyModulesItem,
DependencyModulesParams,
DependencyModulesResult,
DependencySourcesItem,
DependencySourcesParams,
DependencySourcesResult,
SourceItem,
SourceItemKind,
SourcesItem,
SourcesParams,
SourcesResult,
WorkspaceBuildTargetsParams,
WorkspaceBuildTargetsResult,
)
from pants.engine.fs import DigestContents, PathGlobs, Workspace
from pants.engine.internals.native_engine import EMPTY_DIGEST, Digest, MergeDigests
from pants.engine.internals.selectors import Get, MultiGet
from pants.engine.rules import _uncacheable_rule, collect_rules, rule
from pants.engine.target import (
FieldSet,
SourcesField,
SourcesPaths,
SourcesPathsRequest,
Target,
Targets,
)
from pants.engine.unions import UnionMembership, UnionRule, union
from pants.source.source_root import SourceRootsRequest, SourceRootsResult
from pants.util.frozendict import FrozenDict
from pants.util.ordered_set import FrozenOrderedSet, OrderedSet
from pants.util.strutil import bullet_list
_logger = logging.getLogger(__name__)
_FS = TypeVar("_FS", bound=FieldSet)
@union
@dataclass(frozen=True)
class BSPResolveFieldFactoryRequest(Generic[_FS]):
resolve_prefix: ClassVar[str]
class _ResolveFieldFactory(Protocol):
def __call__(self, target: Target) -> str | None:
pass
@dataclass(frozen=True)
class BSPResolveFieldFactoryResult:
resolve_field_value: _ResolveFieldFactory
@union
@dataclass(frozen=True)
class BSPBuildTargetsMetadataRequest(Generic[_FS]):
language_id: ClassVar[str]
can_merge_metadata_from: ClassVar[tuple[str, ...]]
field_set_type: ClassVar[Type[_FS]]
field_sets: tuple[_FS, ...]
@dataclass(frozen=True)
class BSPBuildTargetsMetadataResult:
metadata: BSPData | None = None
digest: Digest = EMPTY_DIGEST
@dataclass(frozen=True)
class BSPTargetDefinition:
display_name: str | None
base_directory: str | None
addresses: tuple[str, ...]
resolve_filter: str | None
@dataclass(frozen=True)
class BSPBuildTargetInternal:
name: str
specs: Specs
definition: BSPTargetDefinition
@property
def bsp_target_id(self) -> BuildTargetIdentifier:
return BuildTargetIdentifier(f"pants:{self.name}")
@dataclass(frozen=True)
class BSPBuildTargetSourcesInfo:
source_files: frozenset[str]
source_roots: frozenset[str]
@dataclass(frozen=True)
class BSPBuildTargets:
targets_mapping: FrozenDict[str, BSPBuildTargetInternal]
@dataclass(frozen=True)
class _ParseOneBSPMappingRequest:
name: str
definition: BSPTargetDefinition
@rule
async def parse_one_bsp_mapping(request: _ParseOneBSPMappingRequest) -> BSPBuildTargetInternal:
specs_parser = SpecsParser()
specs = specs_parser.parse_specs(request.definition.addresses)
return BSPBuildTargetInternal(request.name, specs, request.definition)
@rule
async def materialize_bsp_build_targets(bsp_goal: BSPGoal) -> BSPBuildTargets:
definitions: dict[str, BSPTargetDefinition] = {}
for config_file in bsp_goal.groups_config_files:
config_contents = await Get(
DigestContents,
PathGlobs(
[config_file],
glob_match_error_behavior=GlobMatchErrorBehavior.error,
description_of_origin=f"BSP config file `{config_file}`",
),
)
if len(config_contents) == 0:
raise ValueError(f"BSP targets config file `{config_file}` does not exist.")
elif len(config_contents) > 1:
raise ValueError(
f"BSP targets config file specified as `{config_file}` matches multiple files. "
"Please do not use wildcards in config file paths."
)
config = toml.loads(config_contents[0].content.decode())
groups = config.get("groups")
if groups is None:
raise ValueError(
f"BSP targets config file `{config_file}` is missing the `groups` table."
)
if not isinstance(groups, dict):
raise ValueError(
f"BSP targets config file `{config_file}` contains a `groups` key that is not a TOML table."
)
for id, group in groups.items():
if not isinstance(group, dict):
raise ValueError(
f"BSP targets config file `{config_file}` contains an entry for "
"`groups` array that is not a dictionary (index={i})."
)
base_directory = group.get("base_directory")
display_name = group.get("display_name")
addresses = group.get("addresses", [])
if not addresses:
raise ValueError(
f"BSP targets config file `{config_file}` contains group ID `{id}` which has "
"no address specs defined via the `addresses` key. Please specify at least "
"one address spec."
)
resolve_filter = group.get("resolve")
definitions[id] = BSPTargetDefinition(
display_name=display_name,
base_directory=base_directory,
addresses=tuple(addresses),
resolve_filter=resolve_filter,
)
bsp_internal_targets = await MultiGet(
Get(BSPBuildTargetInternal, _ParseOneBSPMappingRequest(name, definition))
for name, definition in definitions.items()
)
target_mapping = {
key: bsp_internal_target
for key, bsp_internal_target in zip(definitions.keys(), bsp_internal_targets)
}
return BSPBuildTargets(FrozenDict(target_mapping))
@rule
async def resolve_bsp_build_target_identifier(
bsp_target_id: BuildTargetIdentifier, bsp_build_targets: BSPBuildTargets
) -> BSPBuildTargetInternal:
scheme, _, target_name = bsp_target_id.uri.partition(":")
if scheme != "pants":
raise ValueError(f"Unknown BSP scheme `{scheme}` for BSP target ID `{bsp_target_id}.")
target_internal = bsp_build_targets.targets_mapping.get(target_name)
if not target_internal:
raise ValueError(f"Unknown BSP target name: {target_name}")
return target_internal
@rule
async def resolve_bsp_build_target_addresses(
bsp_target: BSPBuildTargetInternal,
union_membership: UnionMembership,
) -> Targets:
targets = await Get(Targets, AddressSpecs, bsp_target.specs.address_specs)
if bsp_target.definition.resolve_filter is None:
return targets
resolve_filter = bsp_target.definition.resolve_filter
resolve_prefix, matched, resolve_value = resolve_filter.partition(":")
if not resolve_prefix or not matched:
raise ValueError(
f"The `resolve` filter for `{bsp_target}` must have a platform or language specific "
f"prefix like `$lang:$filter`, but the configured value: `{resolve_filter}` did not."
)
factories = await MultiGet(
Get(BSPResolveFieldFactoryResult, BSPResolveFieldFactoryRequest, request())
for request in union_membership.get(BSPResolveFieldFactoryRequest)
if request.resolve_prefix == resolve_prefix
)
return Targets(
t
for t in targets
if any((factory.resolve_field_value)(t) == resolve_value for factory in factories)
)
@rule
async def resolve_bsp_build_target_source_roots(
bsp_target: BSPBuildTargetInternal,
) -> BSPBuildTargetSourcesInfo:
targets = await Get(Targets, BSPBuildTargetInternal, bsp_target)
targets_with_sources = [tgt for tgt in targets if tgt.has_field(SourcesField)]
sources_paths = await MultiGet(
Get(SourcesPaths, SourcesPathsRequest(tgt[SourcesField])) for tgt in targets_with_sources
)
merged_source_files: set[str] = set()
for sp in sources_paths:
merged_source_files.update(sp.files)
source_roots_result = await Get(
SourceRootsResult, SourceRootsRequest, SourceRootsRequest.for_files(merged_source_files)
)
source_root_paths = {x.path for x in source_roots_result.path_to_root.values()}
return BSPBuildTargetSourcesInfo(
source_files=frozenset(merged_source_files),
source_roots=frozenset(source_root_paths),
)
andlerMapping(BSPHandlerMapping):
method_name = "workspace/buildTargets"
request_type = WorkspaceBuildTargetsParams
response_type = WorkspaceBuildTargetsResult
@dataclass(frozen=True)
class GenerateOneBSPBuildTargetRequest:
bsp_target: BSPBuildTargetInternal
@dataclass(frozen=True)
class GenerateOneBSPBuildTargetResult:
build_target: BuildTarget
digest: Digest = EMPTY_DIGEST
def merge_metadata(
metadata_results_by_request_type: Sequence[
tuple[type[BSPBuildTargetsMetadataRequest], BSPBuildTargetsMetadataResult]
],
) -> BSPData | None:
if not metadata_results_by_request_type:
return None
if len(metadata_results_by_request_type) == 1:
return metadata_results_by_request_type[0][1].metadata
if len(metadata_results_by_request_type) != 2:
raise AssertionError(
"BSP core rules only support naive ordering of language-backend metadata. Contact Pants developers."
)
if not metadata_results_by_request_type[0][0].can_merge_metadata_from:
metadata_index = 1
elif not metadata_results_by_request_type[1][0].can_merge_metadata_from:
metadata_index = 0
else:
raise AssertionError(
"BSP core rules only support naive ordering of language-backend metadata. Contact Pants developers."
)
return metadata_results_by_request_type[metadata_index][1].metadata
@rule
async def generate_one_bsp_build_target_request(
request: GenerateOneBSPBuildTargetRequest,
union_membership: UnionMembership,
build_root: BuildRoot,
) -> GenerateOneBSPBuildTargetResult:
targets = await Get(Targets, BSPBuildTargetInternal, request.bsp_target)
can_compile = any(
req_type.field_set_type.is_applicable(t)
for req_type in union_membership[BSPCompileRequest]
for t in targets
)
field_sets_by_request_type: dict[
type[BSPBuildTargetsMetadataRequest], OrderedSet[FieldSet]
] = defaultdict(OrderedSet)
metadata_request_types: FrozenOrderedSet[
Type[BSPBuildTargetsMetadataRequest]
] = union_membership.get(BSPBuildTargetsMetadataRequest)
metadata_request_types_by_lang_id: dict[str, type[BSPBuildTargetsMetadataRequest]] = {}
for metadata_request_type in metadata_request_types:
previous = metadata_request_types_by_lang_id.get(metadata_request_type.language_id)
if previous:
raise ValueError(
f"Multiple implementations claim to support `{metadata_request_type.language_id}`:"
f"{bullet_list([previous.__name__, metadata_request_type.__name__])}"
"\n"
"Do you have conflicting language support backends enabled?"
)
metadata_request_types_by_lang_id[metadata_request_type.language_id] = metadata_request_type
for tgt in targets:
for metadata_request_type in metadata_request_types:
field_set_type: Type[FieldSet] = metadata_request_type.field_set_type
if field_set_type.is_applicable(tgt):
field_sets_by_request_type[metadata_request_type].add(field_set_type.create(tgt))
metadata_results = await MultiGet(
Get(
BSPBuildTargetsMetadataResult,
BSPBuildTargetsMetadataRequest,
request_type(field_sets=tuple(field_sets)),
)
for request_type, field_sets in field_sets_by_request_type.items()
)
metadata = merge_metadata(list(zip(field_sets_by_request_type.keys(), metadata_results)))
digest = await Get(Digest, MergeDigests([r.digest for r in metadata_results]))
source_info = await Get(BSPBuildTargetSourcesInfo, BSPBuildTargetInternal, request.bsp_target)
if source_info.source_roots:
roots = [build_root.pathlib_path.joinpath(p) for p in source_info.source_roots]
else:
roots = []
base_directory: Path | None = None
if request.bsp_target.definition.base_directory:
base_directory = build_root.pathlib_path.joinpath(
request.bsp_target.definition.base_directory
)
elif roots:
base_directory = roots[0]
return GenerateOneBSPBuildTargetResult(
build_target=BuildTarget(
id=BuildTargetIdentifier(f"pants:{request.bsp_target.name}"),
display_name=request.bsp_target.name,
base_directory=base_directory.as_uri() if base_directory else None,
tags=(),
capabilities=BuildTargetCapabilities(
can_compile=can_compile,
can_debug=False,
can_run=False,
can_test=False,
),
language_ids=tuple(sorted(req.language_id for req in field_sets_by_request_type)),
dependencies=(),
data=metadata,
),
digest=digest,
)
@_uncacheable_rule
async def bsp_workspace_build_targets(
_: WorkspaceBuildTargetsParams,
bsp_build_targets: BSPBuildTargets,
workspace: Workspace,
) -> WorkspaceBuildTargetsResult:
bsp_target_results = await MultiGet(
Get(GenerateOneBSPBuildTargetResult, GenerateOneBSPBuildTargetRequest(target_internal))
for target_internal in bsp_build_targets.targets_mapping.values()
)
digest = await Get(Digest, MergeDigests([r.digest for r in bsp_target_results]))
if digest != EMPTY_DIGEST:
workspace.write_digest(digest, path_prefix=".pants.d/bsp")
return WorkspaceBuildTargetsResult(
targets=tuple(r.build_target for r in bsp_target_results),
)
andlerMapping(BSPHandlerMapping):
method_name = "buildTarget/sources"
request_type = SourcesParams
response_type = SourcesResult
@dataclass(frozen=True)
class MaterializeBuildTargetSourcesRequest:
bsp_target_id: BuildTargetIdentifier
@dataclass(frozen=True)
class MaterializeBuildTargetSourcesResult:
sources_item: SourcesItem
@rule
async def materialize_bsp_build_target_sources(
request: MaterializeBuildTargetSourcesRequest,
build_root: BuildRoot,
) -> MaterializeBuildTargetSourcesResult:
bsp_target = await Get(BSPBuildTargetInternal, BuildTargetIdentifier, request.bsp_target_id)
source_info = await Get(BSPBuildTargetSourcesInfo, BSPBuildTargetInternal, bsp_target)
if source_info.source_roots:
roots = [build_root.pathlib_path.joinpath(p) for p in source_info.source_roots]
else:
roots = [build_root.pathlib_path]
sources_item = SourcesItem(
target=request.bsp_target_id,
sources=tuple(
SourceItem(
uri=build_root.pathlib_path.joinpath(filename).as_uri(),
kind=SourceItemKind.FILE,
generated=False,
)
for filename in sorted(source_info.source_files)
),
roots=tuple(r.as_uri() for r in roots),
)
return MaterializeBuildTargetSourcesResult(sources_item)
@rule
async def bsp_build_target_sources(request: SourcesParams) -> SourcesResult:
sources_items = await MultiGet(
Get(MaterializeBuildTargetSourcesResult, MaterializeBuildTargetSourcesRequest(btgt))
for btgt in request.targets
)
return SourcesResult(items=tuple(si.sources_item for si in sources_items))
HandlerMapping(BSPHandlerMapping):
method_name = "buildTarget/dependencySources"
request_type = DependencySourcesParams
response_type = DependencySourcesResult
@rule
async def bsp_dependency_sources(request: DependencySourcesParams) -> DependencySourcesResult:
return DependencySourcesResult(
tuple(DependencySourcesItem(target=tgt, sources=()) for tgt in request.targets)
)
n=True)
class BSPDependencyModulesRequest(Generic[_FS]):
field_set_type: ClassVar[Type[_FS]]
field_sets: tuple[_FS, ...]
@dataclass(frozen=True)
class BSPDependencyModulesResult:
modules: tuple[DependencyModule, ...]
digest: Digest = EMPTY_DIGEST
class DependencyModulesHandlerMapping(BSPHandlerMapping):
method_name = "buildTarget/dependencyModules"
request_type = DependencyModulesParams
response_type = DependencyModulesResult
@dataclass(frozen=True)
class ResolveOneDependencyModuleRequest:
bsp_target_id: BuildTargetIdentifier
@dataclass(frozen=True)
class ResolveOneDependencyModuleResult:
bsp_target_id: BuildTargetIdentifier
modules: tuple[DependencyModule, ...] = ()
digest: Digest = EMPTY_DIGEST
@rule
async def resolve_one_dependency_module(
request: ResolveOneDependencyModuleRequest,
union_membership: UnionMembership,
) -> ResolveOneDependencyModuleResult:
targets = await Get(Targets, BuildTargetIdentifier, request.bsp_target_id)
field_sets_by_request_type: dict[
Type[BSPDependencyModulesRequest], list[FieldSet]
] = defaultdict(list)
dep_module_request_types: FrozenOrderedSet[
Type[BSPDependencyModulesRequest]
] = union_membership.get(BSPDependencyModulesRequest)
for tgt in targets:
for dep_module_request_type in dep_module_request_types:
field_set_type = dep_module_request_type.field_set_type
if field_set_type.is_applicable(tgt):
field_set = field_set_type.create(tgt)
field_sets_by_request_type[dep_module_request_type].append(field_set)
if not field_sets_by_request_type:
return ResolveOneDependencyModuleResult(bsp_target_id=request.bsp_target_id)
responses = await MultiGet(
Get(
BSPDependencyModulesResult,
BSPDependencyModulesRequest,
dep_module_request_type(field_sets=tuple(field_sets)),
)
for dep_module_request_type, field_sets in field_sets_by_request_type.items()
)
modules = set(itertools.chain.from_iterable([r.modules for r in responses]))
digest = await Get(Digest, MergeDigests([r.digest for r in responses]))
return ResolveOneDependencyModuleResult(
bsp_target_id=request.bsp_target_id,
modules=tuple(modules),
digest=digest,
)
@_uncacheable_rule
async def bsp_dependency_modules(
request: DependencyModulesParams, workspace: Workspace
) -> DependencyModulesResult:
responses = await MultiGet(
Get(ResolveOneDependencyModuleResult, ResolveOneDependencyModuleRequest(btgt))
for btgt in request.targets
)
output_digest = await Get(Digest, MergeDigests([r.digest for r in responses]))
workspace.write_digest(output_digest, path_prefix=".pants.d/bsp")
return DependencyModulesResult(
tuple(DependencyModulesItem(target=r.bsp_target_id, modules=r.modules) for r in responses)
)
class(frozen=True)
class BSPCompileRequest(Generic[_FS]):
field_set_type: ClassVar[Type[_FS]]
bsp_target: BSPBuildTargetInternal
field_sets: tuple[_FS, ...]
@dataclass(frozen=True)
class BSPCompileResult:
status: StatusCode
output_digest: Digest
def rules():
return (
*collect_rules(),
UnionRule(BSPHandlerMapping, WorkspaceBuildTargetsHandlerMapping),
UnionRule(BSPHandlerMapping, BuildTargetSourcesHandlerMapping),
UnionRule(BSPHandlerMapping, DependencySourcesHandlerMapping),
UnionRule(BSPHandlerMapping, DependencyModulesHandlerMapping),
)
| true
| true
|
f71850126d2abeba717e7db8f36f67882ab8adf7
| 2,917
|
py
|
Python
|
ws/src/lab/src/case_suite/case_bringup/launch/opc_r3.launch.py
|
Cobots-Kandidatarbete/cobots
|
8186910e6d30569f95ed6ebe4645ba05ecc53864
|
[
"MIT"
] | 2
|
2022-02-22T13:36:41.000Z
|
2022-02-22T13:39:41.000Z
|
ws/src/lab/src/case_suite/case_bringup/launch/opc_r3.launch.py
|
Cobots-Kandidatarbete/cobots
|
8186910e6d30569f95ed6ebe4645ba05ecc53864
|
[
"MIT"
] | null | null | null |
ws/src/lab/src/case_suite/case_bringup/launch/opc_r3.launch.py
|
Cobots-Kandidatarbete/cobots
|
8186910e6d30569f95ed6ebe4645ba05ecc53864
|
[
"MIT"
] | null | null | null |
import sys
from launch import LaunchDescription, LaunchService
from launch_ros.actions import Node
def generate_launch_description():
opcua_parameters = {
"server_address": "opc.tcp://192.168.100.30:4840/",
"node_ids": ["ns=4;s=|var|CODESYS CONTROL FOR Raspberry Pi MC SL.Application.IO.bool_from_plc_1",
"ns=4;s=|var|CODESYS CONTROL FOR Raspberry Pi MC SL.Application.IO.bool_from_plc_2",
"ns=4;s=|var|CODESYS CONTROL FOR Raspberry Pi MC SL.Application.IO.bool_from_plc_3",
"ns=4;s=|var|CODESYS CONTROL FOR Raspberry Pi MC SL.Application.IO.bool_from_plc_4",
"ns=4;s=|var|CODESYS CONTROL FOR Raspberry Pi MC SL.Application.IO.bool_from_plc_5",
"ns=4;s=|var|CODESYS CONTROL FOR Raspberry Pi MC SL.Application.IO.int_from_plc_1",
"ns=4;s=|var|CODESYS CONTROL FOR Raspberry Pi MC SL.Application.IO.int_from_plc_2",
"ns=4;s=|var|CODESYS CONTROL FOR Raspberry Pi MC SL.Application.IO.int_from_plc_3",
"ns=4;s=|var|CODESYS CONTROL FOR Raspberry Pi MC SL.Application.IO.int_from_plc_4",
"ns=4;s=|var|CODESYS CONTROL FOR Raspberry Pi MC SL.Application.IO.int_from_plc_5",
"ns=4;s=|var|CODESYS CONTROL FOR Raspberry Pi MC SL.Application.IO.bool_to_plc_1",
"ns=4;s=|var|CODESYS CONTROL FOR Raspberry Pi MC SL.Application.IO.bool_to_plc_2",
"ns=4;s=|var|CODESYS CONTROL FOR Raspberry Pi MC SL.Application.IO.bool_to_plc_3",
"ns=4;s=|var|CODESYS CONTROL FOR Raspberry Pi MC SL.Application.IO.bool_to_plc_4",
"ns=4;s=|var|CODESYS CONTROL FOR Raspberry Pi MC SL.Application.IO.bool_to_plc_5",
"ns=4;s=|var|CODESYS CONTROL FOR Raspberry Pi MC SL.Application.IO.int_to_plc_1",
"ns=4;s=|var|CODESYS CONTROL FOR Raspberry Pi MC SL.Application.IO.int_to_plc_2",
"ns=4;s=|var|CODESYS CONTROL FOR Raspberry Pi MC SL.Application.IO.int_to_plc_3",
"ns=4;s=|var|CODESYS CONTROL FOR Raspberry Pi MC SL.Application.IO.int_to_plc_4",
"ns=4;s=|var|CODESYS CONTROL FOR Raspberry Pi MC SL.Application.IO.int_to_plc_5"
]
}
opc_node = Node(
package="opcua_ros2_bridge",
executable="opcua_ros2_bridge",
namespace="",
output="screen",
arguments=["-d"],
parameters=[opcua_parameters],
remappings=[("/tf", "tf"), ("/tf_static", "tf_static")],
emulate_tty=True,
)
nodes_to_start = [
opc_node
]
return LaunchDescription(nodes_to_start)
if __name__ == "__main__":
ls = LaunchService(argv=sys.argv[1:])
ls.include_launch_description(generate_launch_description())
sys.exit(ls.run())
| 54.018519
| 106
| 0.624957
|
import sys
from launch import LaunchDescription, LaunchService
from launch_ros.actions import Node
def generate_launch_description():
opcua_parameters = {
"server_address": "opc.tcp://192.168.100.30:4840/",
"node_ids": ["ns=4;s=|var|CODESYS CONTROL FOR Raspberry Pi MC SL.Application.IO.bool_from_plc_1",
"ns=4;s=|var|CODESYS CONTROL FOR Raspberry Pi MC SL.Application.IO.bool_from_plc_2",
"ns=4;s=|var|CODESYS CONTROL FOR Raspberry Pi MC SL.Application.IO.bool_from_plc_3",
"ns=4;s=|var|CODESYS CONTROL FOR Raspberry Pi MC SL.Application.IO.bool_from_plc_4",
"ns=4;s=|var|CODESYS CONTROL FOR Raspberry Pi MC SL.Application.IO.bool_from_plc_5",
"ns=4;s=|var|CODESYS CONTROL FOR Raspberry Pi MC SL.Application.IO.int_from_plc_1",
"ns=4;s=|var|CODESYS CONTROL FOR Raspberry Pi MC SL.Application.IO.int_from_plc_2",
"ns=4;s=|var|CODESYS CONTROL FOR Raspberry Pi MC SL.Application.IO.int_from_plc_3",
"ns=4;s=|var|CODESYS CONTROL FOR Raspberry Pi MC SL.Application.IO.int_from_plc_4",
"ns=4;s=|var|CODESYS CONTROL FOR Raspberry Pi MC SL.Application.IO.int_from_plc_5",
"ns=4;s=|var|CODESYS CONTROL FOR Raspberry Pi MC SL.Application.IO.bool_to_plc_1",
"ns=4;s=|var|CODESYS CONTROL FOR Raspberry Pi MC SL.Application.IO.bool_to_plc_2",
"ns=4;s=|var|CODESYS CONTROL FOR Raspberry Pi MC SL.Application.IO.bool_to_plc_3",
"ns=4;s=|var|CODESYS CONTROL FOR Raspberry Pi MC SL.Application.IO.bool_to_plc_4",
"ns=4;s=|var|CODESYS CONTROL FOR Raspberry Pi MC SL.Application.IO.bool_to_plc_5",
"ns=4;s=|var|CODESYS CONTROL FOR Raspberry Pi MC SL.Application.IO.int_to_plc_1",
"ns=4;s=|var|CODESYS CONTROL FOR Raspberry Pi MC SL.Application.IO.int_to_plc_2",
"ns=4;s=|var|CODESYS CONTROL FOR Raspberry Pi MC SL.Application.IO.int_to_plc_3",
"ns=4;s=|var|CODESYS CONTROL FOR Raspberry Pi MC SL.Application.IO.int_to_plc_4",
"ns=4;s=|var|CODESYS CONTROL FOR Raspberry Pi MC SL.Application.IO.int_to_plc_5"
]
}
opc_node = Node(
package="opcua_ros2_bridge",
executable="opcua_ros2_bridge",
namespace="",
output="screen",
arguments=["-d"],
parameters=[opcua_parameters],
remappings=[("/tf", "tf"), ("/tf_static", "tf_static")],
emulate_tty=True,
)
nodes_to_start = [
opc_node
]
return LaunchDescription(nodes_to_start)
if __name__ == "__main__":
ls = LaunchService(argv=sys.argv[1:])
ls.include_launch_description(generate_launch_description())
sys.exit(ls.run())
| true
| true
|
f718504c718d307f86f64b15bbb585f07e359260
| 1,681
|
py
|
Python
|
source/sam_spot_bot_function/app.py
|
liangfu/spot-tagging-bot-for-digital-assets
|
81b2a960a87da988904250b1f605e052e7e2c7a8
|
[
"Apache-2.0"
] | 19
|
2020-08-26T02:29:55.000Z
|
2022-01-21T15:26:31.000Z
|
source/sam_spot_bot_function/app.py
|
liangfu/spot-tagging-bot-for-digital-assets
|
81b2a960a87da988904250b1f605e052e7e2c7a8
|
[
"Apache-2.0"
] | 2
|
2020-09-02T07:22:26.000Z
|
2020-11-17T06:41:20.000Z
|
source/sam_spot_bot_function/app.py
|
liangfu/spot-tagging-bot-for-digital-assets
|
81b2a960a87da988904250b1f605e052e7e2c7a8
|
[
"Apache-2.0"
] | 6
|
2020-09-14T06:56:59.000Z
|
2021-10-20T14:46:36.000Z
|
import boto3
import json
import os
from sam_spot_bot_create_job.bot_dao import BotDao
# Global variables are reused across execution contexts (if available)
session = boto3.Session()
def lambda_handler(event, context):
"""
Sample json in API request body -
{
"name": name,
"file_types": file_types,
"bot_image": bot_image,
"bot_image_cmd": bot_image_cmd,
"endpoint_name": endpoint_name,
"endpoint_ecr_image_path": endpoint_ecr_image_path,
"instance_type": instance_type,
"model_s3_path": model_s3_path,
"create_date": create_date,
"update_date": update_date
}
"""
print("Received event: " + json.dumps(event, indent=2))
print("All ENV " + str(os.environ))
method = event["httpMethod"]
request_body = json.loads(event["body"])
botDao = BotDao()
if method is "POST":
botDao.create_one_bot(**request_body)
return {
"statusCode": 201,
"body": "Created"
}
elif method is "PUT":
botDao.update_bot_by_name(**request_body)
return {
"statusCode": 205,
"body": "Reset Content"
}
elif method is "DELETE":
botDao.delete_bot_by_name(request_body["name"])
return {
"statusCode": 202,
"body": "Accepted"
}
elif method is "GET":
bot = botDao.get_bot_def(request_body["name"])
return {
"statusCode": 200,
"body": json.dumps(bot)
}
return {
"statusCode": 405,
"body": "Method not allowed."
}
| 27.112903
| 70
| 0.56395
|
import boto3
import json
import os
from sam_spot_bot_create_job.bot_dao import BotDao
session = boto3.Session()
def lambda_handler(event, context):
print("Received event: " + json.dumps(event, indent=2))
print("All ENV " + str(os.environ))
method = event["httpMethod"]
request_body = json.loads(event["body"])
botDao = BotDao()
if method is "POST":
botDao.create_one_bot(**request_body)
return {
"statusCode": 201,
"body": "Created"
}
elif method is "PUT":
botDao.update_bot_by_name(**request_body)
return {
"statusCode": 205,
"body": "Reset Content"
}
elif method is "DELETE":
botDao.delete_bot_by_name(request_body["name"])
return {
"statusCode": 202,
"body": "Accepted"
}
elif method is "GET":
bot = botDao.get_bot_def(request_body["name"])
return {
"statusCode": 200,
"body": json.dumps(bot)
}
return {
"statusCode": 405,
"body": "Method not allowed."
}
| true
| true
|
f718505155a29c9ef2efeb5cf94702dd1819b526
| 9,900
|
py
|
Python
|
arsdk-xml/ARSDKBuildUtils/Utils/Python/commandLine.py
|
2016-Capstone/PythonController
|
d8b241a4e7efdeb82ddd04830e3e8470eeeb8e34
|
[
"BSD-3-Clause"
] | 114
|
2015-05-20T09:04:18.000Z
|
2021-09-07T22:01:47.000Z
|
arsdk-xml/ARSDKBuildUtils/Utils/Python/commandLine.py
|
2016-Capstone/PythonController
|
d8b241a4e7efdeb82ddd04830e3e8470eeeb8e34
|
[
"BSD-3-Clause"
] | 40
|
2015-01-04T10:30:24.000Z
|
2015-05-18T15:33:50.000Z
|
arsdk-xml/ARSDKBuildUtils/Utils/Python/commandLine.py
|
2016-Capstone/PythonController
|
d8b241a4e7efdeb82ddd04830e3e8470eeeb8e34
|
[
"BSD-3-Clause"
] | 64
|
2015-05-20T04:44:31.000Z
|
2021-06-02T17:32:47.000Z
|
'''
Copyright (C) 2014 Parrot SA
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in
the documentation and/or other materials provided with the
distribution.
* Neither the name of Parrot nor the names
of its contributors may be used to endorse or promote products
derived from this software without specific prior written
permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
SUCH DAMAGE.
'''
import os
import argparse
from ARFuncs import *
defaultBaseRepoUrl = 'https://github.com/Parrot-Developers/'
class CommandLineParser:
"Command line options parser for ARSDK 3 build script"
def __init__(self, targets, libraries, binaries):
self.availableTargets = targets
self.availableLibraries = libraries
self.availableBinaries = binaries
self.activeTargets = []
self.activeLibs = []
self.activeBins = []
self.isClean = False
self.isDebug = False
self.isInHouse = False
self.isForceClean = False
self.isForceCleanup = False
self.genDoc = False
self.installDoc = False
self.doNothing = False
self.noGit = False
self.noDeps = False
self.multiProcess = False
self.threads = -1
self.defaultBaseRepoUrl = defaultBaseRepoUrl
self.repoBaseUrl = defaultBaseRepoUrl
self.extraGitScripts = []
self.archs = []
self.parser = argparse.ArgumentParser()
self.init_parser()
def init_parser(self):
targetsNames = [ t.name for t in self.availableTargets.list ]
librariesNames = [ l.name for l in self.availableLibraries.list ]
binariesNames = [ b.name for b in self.availableBinaries.list ]
self.parser.add_argument('-t', '--target', action="append", choices=targetsNames, help="Target name (cumulative)")
self.parser.add_argument('-l', '--library', action="append", choices=librariesNames, help="Library name (cumulative)")
self.parser.add_argument('-b', '--binary', action="append", choices=binariesNames, help="Binary name (cumulative)")
self.parser.add_argument('-c', '--clean', action="store_true", help="Clean all selected lib/bin")
self.parser.add_argument('-d', '--debug', action="store_true", help="Build selected lib/bin in debug mode")
self.parser.add_argument('--inhouse', action="store_true", help="Build the SDK for inhouse distribution")
self.parser.add_argument('--force-clean', action="store_true", help="Wipe all targets (overrides any other setting)")
self.parser.add_argument('--all-cleanup', action="store_true", help="Implies `--force-clean` and run all cleanup scripts in internal repositories")
self.parser.add_argument('--doc', action="store_true", help="Generate documentation after building")
self.parser.add_argument('--install-doc', action="store_true", help="Implies `--doc` and copy the generated documentation to Docs repository")
self.parser.add_argument('--none', action="store_true", help="Do only GIT Checks, do not build / clean anything")
self.parser.add_argument('--nogit', action="store_true", help="Do not run GIT checks")
self.parser.add_argument('-j', type=int, help="The number of threads to use. Automatically set to the number of CPUs if not set")
self.parser.add_argument('--nodep', action="store_true", help="Do not build deps. Use at your own risks.")
self.parser.add_argument('--repo-base-url', action="store", help=("Use the following base URL instead of " + defaultBaseRepoUrl))
self.parser.add_argument('--extra-git-script', action="append", help="Path (relative to ARSDKBuildUtils directory) to an extra script which will be run before updating git repo (with path as its first argument)")
self.parser.add_argument('--arch', action="append", help="Architectures to be built. May be ignored depending of the target. May fail if an invalid arch name is provided. (Use only if you know what you're doing !)")
self.parser.add_argument('--mp', action="store_true", help="Run in multiprocess mode (experimental !)")
def parse(self, argv):
AL_FILE=ARPathFromHere('.alreadyLaunched')
if len(argv) == 1 and not os.path.exists(AL_FILE):
ARPrint('This is the first time you run this script without arguments.')
ARPrint('Running without arguments will build all available libraries/binaries for all available targets.')
ARPrint('If you want to select which targets/libraries/binaries you want to build, use the command line options.')
ARPrint('')
ARPrint('If you rerun this command again, this message will not be displayed again and the build will be done.')
ARPrint('')
ARPrint(' --> Running with --help to show the possible options')
tmp = open(AL_FILE, 'w')
tmp.close()
argv.append('--help')
args=self.parser.parse_args(argv[1:])
ARLog ('Args = ' + str(args))
# Parse OPTs
if args.force_clean:
self.isForceClean = True
if args.all_cleanup:
self.isForceClean = True
self.isForceCleanup = True
if args.doc:
self.genDoc = True
if args.install_doc:
self.genDoc = True
self.installDoc = True
if args.none:
self.doNothing = True
if args.nogit:
self.noGit = True
if args.nodep:
self.noDeps = True
if args.target:
for arg in args.target:
self.activeTargets.append(self.availableTargets.getTarget(arg))
if args.binary:
for arg in args.binary:
t_bin = self.availableBinaries.getBin(arg)
self.activeBins.append(t_bin)
if args.library:
for arg in args.library:
t_lib = self.availableLibraries.getLib(arg)
self.activeLibs.append(t_lib)
if args.inhouse:
self.isInHouse = True
if args.clean:
self.isClean = True
if args.debug:
self.isDebug = True
if args.j and int(args.j) >= 0:
self.threads = int(args.j)
if args.repo_base_url:
self.repoBaseUrl = args.repo_base_url
if args.extra_git_script:
self.extraGitScripts = args.extra_git_script[:]
if args.arch:
self.archs = args.arch[:]
if args.mp:
self.multiProcess = True
# Fill default values if needed
if not self.activeTargets:
for tar in self.availableTargets.list:
self.activeTargets.append(tar)
if not self.activeBins and not self.activeLibs:
for bin in self.availableBinaries.list:
self.activeBins.append(bin)
for lib in self.availableLibraries.list:
self.activeLibs.append(lib)
if self.threads == 0:
self.threads = 1
elif self.threads < 0:
self.threads = ARGetNumberOfCpus()
ARLog('Using automatic -j --> -j ' + str(self.threads))
# If in clean mode, reverse build order(clean deps after)
if self.isClean:
newLibs = []
for lib in reversed(self.activeLibs):
newLibs.append(lib)
self.activeLibs = newLibs
newBins = []
for bin in reversed(self.activeBins):
newBins.append(bin)
self.activeBins = newBins
def dump(self):
ARLog('Build script called with the following configuration:')
ARLog(' - FORCE CLEANUP = ' + str(self.isForceCleanup))
ARLog(' - FORCE CLEAN = ' + str(self.isForceClean))
ARLog(' - DEBUG = ' + str(self.isDebug))
ARLog(' - CLEAN = ' + str(self.isClean))
ARLog(' - GENERATE DOC = ' + str(self.genDoc))
ARLog(' - INSTALL DOC = ' + str(self.installDoc))
ARLog(' - DO NOTHING = ' + str(self.doNothing))
ARLog(' - NO GIT = ' + str(self.noGit))
ARLog(' - NO DEPS = ' + str(self.noDeps))
ARLog(' - NB THREADS = ' + str(self.threads))
ARLog(' - MULTIPROCESS = ' + str(self.multiProcess))
ARLog('Active targets : {')
for tar in self.activeTargets:
ARLog(' - %(tar)s' % locals())
ARLog('}')
ARLog('Active libraries : {')
for lib in self.activeLibs:
ARLog(' - %(lib)s' % locals())
ARLog('}')
ARLog('Active binaries : {')
for bin in self.activeBins:
ARLog(' - %(bin)s' % locals())
ARLog('}')
ARLog('')
| 48.058252
| 223
| 0.629394
|
import os
import argparse
from ARFuncs import *
defaultBaseRepoUrl = 'https://github.com/Parrot-Developers/'
class CommandLineParser:
def __init__(self, targets, libraries, binaries):
self.availableTargets = targets
self.availableLibraries = libraries
self.availableBinaries = binaries
self.activeTargets = []
self.activeLibs = []
self.activeBins = []
self.isClean = False
self.isDebug = False
self.isInHouse = False
self.isForceClean = False
self.isForceCleanup = False
self.genDoc = False
self.installDoc = False
self.doNothing = False
self.noGit = False
self.noDeps = False
self.multiProcess = False
self.threads = -1
self.defaultBaseRepoUrl = defaultBaseRepoUrl
self.repoBaseUrl = defaultBaseRepoUrl
self.extraGitScripts = []
self.archs = []
self.parser = argparse.ArgumentParser()
self.init_parser()
def init_parser(self):
targetsNames = [ t.name for t in self.availableTargets.list ]
librariesNames = [ l.name for l in self.availableLibraries.list ]
binariesNames = [ b.name for b in self.availableBinaries.list ]
self.parser.add_argument('-t', '--target', action="append", choices=targetsNames, help="Target name (cumulative)")
self.parser.add_argument('-l', '--library', action="append", choices=librariesNames, help="Library name (cumulative)")
self.parser.add_argument('-b', '--binary', action="append", choices=binariesNames, help="Binary name (cumulative)")
self.parser.add_argument('-c', '--clean', action="store_true", help="Clean all selected lib/bin")
self.parser.add_argument('-d', '--debug', action="store_true", help="Build selected lib/bin in debug mode")
self.parser.add_argument('--inhouse', action="store_true", help="Build the SDK for inhouse distribution")
self.parser.add_argument('--force-clean', action="store_true", help="Wipe all targets (overrides any other setting)")
self.parser.add_argument('--all-cleanup', action="store_true", help="Implies `--force-clean` and run all cleanup scripts in internal repositories")
self.parser.add_argument('--doc', action="store_true", help="Generate documentation after building")
self.parser.add_argument('--install-doc', action="store_true", help="Implies `--doc` and copy the generated documentation to Docs repository")
self.parser.add_argument('--none', action="store_true", help="Do only GIT Checks, do not build / clean anything")
self.parser.add_argument('--nogit', action="store_true", help="Do not run GIT checks")
self.parser.add_argument('-j', type=int, help="The number of threads to use. Automatically set to the number of CPUs if not set")
self.parser.add_argument('--nodep', action="store_true", help="Do not build deps. Use at your own risks.")
self.parser.add_argument('--repo-base-url', action="store", help=("Use the following base URL instead of " + defaultBaseRepoUrl))
self.parser.add_argument('--extra-git-script', action="append", help="Path (relative to ARSDKBuildUtils directory) to an extra script which will be run before updating git repo (with path as its first argument)")
self.parser.add_argument('--arch', action="append", help="Architectures to be built. May be ignored depending of the target. May fail if an invalid arch name is provided. (Use only if you know what you're doing !)")
self.parser.add_argument('--mp', action="store_true", help="Run in multiprocess mode (experimental !)")
def parse(self, argv):
AL_FILE=ARPathFromHere('.alreadyLaunched')
if len(argv) == 1 and not os.path.exists(AL_FILE):
ARPrint('This is the first time you run this script without arguments.')
ARPrint('Running without arguments will build all available libraries/binaries for all available targets.')
ARPrint('If you want to select which targets/libraries/binaries you want to build, use the command line options.')
ARPrint('')
ARPrint('If you rerun this command again, this message will not be displayed again and the build will be done.')
ARPrint('')
ARPrint(' --> Running with --help to show the possible options')
tmp = open(AL_FILE, 'w')
tmp.close()
argv.append('--help')
args=self.parser.parse_args(argv[1:])
ARLog ('Args = ' + str(args))
# Parse OPTs
if args.force_clean:
self.isForceClean = True
if args.all_cleanup:
self.isForceClean = True
self.isForceCleanup = True
if args.doc:
self.genDoc = True
if args.install_doc:
self.genDoc = True
self.installDoc = True
if args.none:
self.doNothing = True
if args.nogit:
self.noGit = True
if args.nodep:
self.noDeps = True
if args.target:
for arg in args.target:
self.activeTargets.append(self.availableTargets.getTarget(arg))
if args.binary:
for arg in args.binary:
t_bin = self.availableBinaries.getBin(arg)
self.activeBins.append(t_bin)
if args.library:
for arg in args.library:
t_lib = self.availableLibraries.getLib(arg)
self.activeLibs.append(t_lib)
if args.inhouse:
self.isInHouse = True
if args.clean:
self.isClean = True
if args.debug:
self.isDebug = True
if args.j and int(args.j) >= 0:
self.threads = int(args.j)
if args.repo_base_url:
self.repoBaseUrl = args.repo_base_url
if args.extra_git_script:
self.extraGitScripts = args.extra_git_script[:]
if args.arch:
self.archs = args.arch[:]
if args.mp:
self.multiProcess = True
# Fill default values if needed
if not self.activeTargets:
for tar in self.availableTargets.list:
self.activeTargets.append(tar)
if not self.activeBins and not self.activeLibs:
for bin in self.availableBinaries.list:
self.activeBins.append(bin)
for lib in self.availableLibraries.list:
self.activeLibs.append(lib)
if self.threads == 0:
self.threads = 1
elif self.threads < 0:
self.threads = ARGetNumberOfCpus()
ARLog('Using automatic -j --> -j ' + str(self.threads))
# If in clean mode, reverse build order(clean deps after)
if self.isClean:
newLibs = []
for lib in reversed(self.activeLibs):
newLibs.append(lib)
self.activeLibs = newLibs
newBins = []
for bin in reversed(self.activeBins):
newBins.append(bin)
self.activeBins = newBins
def dump(self):
ARLog('Build script called with the following configuration:')
ARLog(' - FORCE CLEANUP = ' + str(self.isForceCleanup))
ARLog(' - FORCE CLEAN = ' + str(self.isForceClean))
ARLog(' - DEBUG = ' + str(self.isDebug))
ARLog(' - CLEAN = ' + str(self.isClean))
ARLog(' - GENERATE DOC = ' + str(self.genDoc))
ARLog(' - INSTALL DOC = ' + str(self.installDoc))
ARLog(' - DO NOTHING = ' + str(self.doNothing))
ARLog(' - NO GIT = ' + str(self.noGit))
ARLog(' - NO DEPS = ' + str(self.noDeps))
ARLog(' - NB THREADS = ' + str(self.threads))
ARLog(' - MULTIPROCESS = ' + str(self.multiProcess))
ARLog('Active targets : {')
for tar in self.activeTargets:
ARLog(' - %(tar)s' % locals())
ARLog('}')
ARLog('Active libraries : {')
for lib in self.activeLibs:
ARLog(' - %(lib)s' % locals())
ARLog('}')
ARLog('Active binaries : {')
for bin in self.activeBins:
ARLog(' - %(bin)s' % locals())
ARLog('}')
ARLog('')
| true
| true
|
f71851d73851a92028fa4c056721e8e576126e24
| 3,458
|
py
|
Python
|
source/scheduler/cdk/aws_solutions/scheduler/cdk/aws_lambda/update_scheduled_task.py
|
aws-solutions/maintaining-personalized-experiences-with-machine-learning
|
3f6f1b0069df4828eae9b0835b717500189e4f71
|
[
"Apache-2.0"
] | 6
|
2021-09-23T16:33:24.000Z
|
2022-03-31T11:45:13.000Z
|
source/scheduler/cdk/aws_solutions/scheduler/cdk/aws_lambda/update_scheduled_task.py
|
aws-solutions/maintaining-personalized-experiences-with-machine-learning
|
3f6f1b0069df4828eae9b0835b717500189e4f71
|
[
"Apache-2.0"
] | 4
|
2021-09-24T21:34:14.000Z
|
2022-01-27T22:11:08.000Z
|
source/scheduler/cdk/aws_solutions/scheduler/cdk/aws_lambda/update_scheduled_task.py
|
aws-solutions/maintaining-personalized-experiences-with-machine-learning
|
3f6f1b0069df4828eae9b0835b717500189e4f71
|
[
"Apache-2.0"
] | 9
|
2021-09-23T23:24:46.000Z
|
2022-02-12T04:53:16.000Z
|
# ######################################################################################################################
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. #
# #
# Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance #
# with the License. You may obtain a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software distributed under the License is distributed #
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for #
# the specific language governing permissions and limitations under the License. #
# ######################################################################################################################
from pathlib import Path
from typing import Optional
import aws_cdk.aws_iam as iam
from aws_cdk.aws_dynamodb import ITable
from aws_cdk.aws_stepfunctions import IChainable
from constructs import Construct
from aws_solutions.cdk.stepfunctions.solutionstep import SolutionStep
class UpdateScheduledTask(SolutionStep):
def __init__(
self, # NOSONAR (python:S107) - allow large number of method parameters
scope: Construct,
id: str,
layers=None,
failure_state: Optional[IChainable] = None,
scheduler_table: ITable = None,
state_machine_arn: str = None,
state_machine_executions_arn: str = None,
):
self.scheduler_table = scheduler_table
self.state_machine_arn = state_machine_arn
self.state_machine_executions_arn = state_machine_executions_arn
super().__init__(
scope,
id,
layers=layers,
failure_state=failure_state,
function="update_schedule",
entrypoint=Path(__file__).parents[1].resolve()
/ "aws_lambda"
/ "scheduler"
/ "handler.py",
)
def _set_permissions(self):
self.function.add_environment(
"DDB_SCHEDULER_STEPFUNCTION", self.state_machine_arn
)
self.function.add_to_role_policy(
iam.PolicyStatement(
actions=[
"states:StartExecution",
"states:ListExecutions",
"states:StopExecution",
"states:DescribeExecution",
],
effect=iam.Effect.ALLOW,
resources=[
self.state_machine_arn,
self.state_machine_executions_arn,
],
)
)
self.scheduler_table.grant_read_write_data(self.function)
self.function.add_environment(
"DDB_SCHEDULES_TABLE", self.scheduler_table.table_name
)
| 45.5
| 120
| 0.486119
| true
| true
|
|
f718524ed9b3ed02ed271f8ab5bcf8dab7659d7c
| 1,620
|
py
|
Python
|
October/Week1/Combination Sum.py
|
vinaykumar7686/Leetcode-August_Challenge
|
fe1928d8b10a63d7aa561118a70eeaec2f3a2f36
|
[
"MIT"
] | 1
|
2020-08-02T13:41:38.000Z
|
2020-08-02T13:41:38.000Z
|
October/Week1/Combination Sum.py
|
vinaykumar7686/Leetcode-August_Challenge
|
fe1928d8b10a63d7aa561118a70eeaec2f3a2f36
|
[
"MIT"
] | null | null | null |
October/Week1/Combination Sum.py
|
vinaykumar7686/Leetcode-August_Challenge
|
fe1928d8b10a63d7aa561118a70eeaec2f3a2f36
|
[
"MIT"
] | null | null | null |
# Combination Sum
'''
Given an array of distinct integers candidates and a target integer target, return a list of all unique combinations of candidates where the chosen numbers sum to target. You may return the combinations in any order.
The same number may be chosen from candidates an unlimited number of times. Two combinations are unique if the frequency of at least one of the chosen numbers is different.
Example 1:
Input: candidates = [2,3,6,7], target = 7
Output: [[2,2,3],[7]]
Explanation:
2 and 3 are candidates, and 2 + 2 + 3 = 7. Note that 2 can be used multiple times.
7 is a candidate, and 7 = 7.
These are the only two combinations.
Example 2:
Input: candidates = [2,3,5], target = 8
Output: [[2,2,2,2],[2,3,3],[3,5]]
Example 3:
Input: candidates = [2], target = 1
Output: []
Example 4:
Input: candidates = [1], target = 1
Output: [[1]]
Example 5:
Input: candidates = [1], target = 2
Output: [[1,1]]
Constraints:
1 <= candidates.length <= 30
1 <= candidates[i] <= 200
All elements of candidates are distinct.
1 <= target <= 500
'''
class Solution:
def combinationSum(self, nums: List[int], target: int) -> List[List[int]]:
ans = []
def backtrack(val, arr):
if val == target:
arr.sort()
if arr not in ans:
ans.append(arr)
return
if val>target:
return
for num in nums:
if (val+num)<=target:
backtrack(val+num, arr+[num])
backtrack(0, [])
print(ans)
return ans
| 25.714286
| 216
| 0.598148
|
class Solution:
def combinationSum(self, nums: List[int], target: int) -> List[List[int]]:
ans = []
def backtrack(val, arr):
if val == target:
arr.sort()
if arr not in ans:
ans.append(arr)
return
if val>target:
return
for num in nums:
if (val+num)<=target:
backtrack(val+num, arr+[num])
backtrack(0, [])
print(ans)
return ans
| true
| true
|
f718528de2b098c3b1736d5dfd5dd63528268733
| 3,613
|
py
|
Python
|
utils.py
|
pedbrgs/anomaly-detection-tool
|
1b5d89eb1287eb13849d87851a8c3c4cc708a93e
|
[
"MIT"
] | null | null | null |
utils.py
|
pedbrgs/anomaly-detection-tool
|
1b5d89eb1287eb13849d87851a8c3c4cc708a93e
|
[
"MIT"
] | null | null | null |
utils.py
|
pedbrgs/anomaly-detection-tool
|
1b5d89eb1287eb13849d87851a8c3c4cc708a93e
|
[
"MIT"
] | null | null | null |
import cv2
import numpy as np
from PIL import Image
from matplotlib import pyplot as plt
import torch
import torchvision.models as models
from torch.autograd import Variable
import torchvision.transforms as transforms
def plot_image(image, figsize):
""" Display an image """
fig = plt.figure(figsize = figsize)
plt.imshow(image, cmap = 'gray')
plt.title(''), plt.xticks([]), plt.yticks([])
plt.show()
def pattern_detection(img, figsize):
""" Performs object segmentation by morphological filtering """
# BGR to grayscale
imgGray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
img_backup = img.copy()
# Get image size
height, width, _ = np.array(img).shape
# Erosion morphological filter
kernel = np.ones((3,3), np.uint8)
erosion = cv2.erode(imgGray, kernel, iterations = 2)
th = cv2.threshold(erosion, 0, 255, cv2.THRESH_OTSU + cv2.THRESH_BINARY)
# Image binarization
th = erosion.mean()
imBin = erosion > th
# Finding contours
ret, thresh = cv2.threshold(erosion, 127, 255, 0)
contours, _ = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
# Compute contour areas for noise filtering
areas = [cv2.contourArea(cnt) for cnt in contours]
patterns, objects = [], []
# Drawing bounding boxes around the contours
for cnt in contours:
# Filtering large and small bounding boxes
if (cv2.contourArea(cnt) > 50 and cv2.contourArea(cnt) < np.max(areas)):
# Get bounding box coordinates
x, y, w, h = cv2.boundingRect(cnt)
patterns.append([x, y, w, h])
objects.append(cv2.cvtColor(img_backup[y:(y + h), x:(x+w)], cv2.COLOR_BGR2RGB))
# Draw bounding box
img_backup = cv2.rectangle(img_backup, (x, y),(x+w, y+h),(255, 0, 0), 1)
return patterns, objects
def image_loader(image):
""" Load image and returns pytorch tensor """
imsize = 256
loader = transforms.Compose([transforms.Resize(imsize), transforms.ToTensor()])
image = Image.fromarray(image)
image = loader(image).float()
image = Variable(image, requires_grad = True)
image = image.unsqueeze(0)
# .cuda() assumes that you are using GPU
return image
def build_model():
""" Build feature extractor based on ResNet-34 """
# If True, returns a model pre-trained on ImageNet
convnet = models.resnet34(pretrained = True)
convnet = list(convnet.children())[:-2]
convnet = torch.nn.Sequential(*convnet, torch.nn.AdaptiveAvgPool2d(output_size = (4, 4)))
return convnet
def feature_extraction(model, objects, patterns):
""" Feature extraction from all detected patterns """
feature_vectors = []
for i in range(len(patterns)):
x_min, y_min, width, height = patterns[i][0], patterns[i][1], patterns[i][2], patterns[i][3]
image = image_loader(objects[i])
# Forward pass in each pattern
features = model.forward(image)
features = features.flatten().detach().numpy()
feature_vectors.append(features)
return feature_vectors
def pairwise_matrix(feature_vectors):
""" Compute cosine similarity between feature vectors """
cosine_similarity = np.ones((len(feature_vectors[0]), len(feature_vectors[0])))
for i in range(len(feature_vectors)-1):
for j in range(len(feature_vectors)-1):
cosine_similarity[i,j] = np.dot(feature_vectors[i], feature_vectors[j]) / (np.linalg.norm(feature_vectors[i]) * np.linalg.norm(feature_vectors[j]))
return cosine_similarity
| 31.417391
| 159
| 0.66067
|
import cv2
import numpy as np
from PIL import Image
from matplotlib import pyplot as plt
import torch
import torchvision.models as models
from torch.autograd import Variable
import torchvision.transforms as transforms
def plot_image(image, figsize):
fig = plt.figure(figsize = figsize)
plt.imshow(image, cmap = 'gray')
plt.title(''), plt.xticks([]), plt.yticks([])
plt.show()
def pattern_detection(img, figsize):
imgGray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
img_backup = img.copy()
height, width, _ = np.array(img).shape
kernel = np.ones((3,3), np.uint8)
erosion = cv2.erode(imgGray, kernel, iterations = 2)
th = cv2.threshold(erosion, 0, 255, cv2.THRESH_OTSU + cv2.THRESH_BINARY)
th = erosion.mean()
imBin = erosion > th
ret, thresh = cv2.threshold(erosion, 127, 255, 0)
contours, _ = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
areas = [cv2.contourArea(cnt) for cnt in contours]
patterns, objects = [], []
for cnt in contours:
if (cv2.contourArea(cnt) > 50 and cv2.contourArea(cnt) < np.max(areas)):
x, y, w, h = cv2.boundingRect(cnt)
patterns.append([x, y, w, h])
objects.append(cv2.cvtColor(img_backup[y:(y + h), x:(x+w)], cv2.COLOR_BGR2RGB))
img_backup = cv2.rectangle(img_backup, (x, y),(x+w, y+h),(255, 0, 0), 1)
return patterns, objects
def image_loader(image):
imsize = 256
loader = transforms.Compose([transforms.Resize(imsize), transforms.ToTensor()])
image = Image.fromarray(image)
image = loader(image).float()
image = Variable(image, requires_grad = True)
image = image.unsqueeze(0)
return image
def build_model():
convnet = models.resnet34(pretrained = True)
convnet = list(convnet.children())[:-2]
convnet = torch.nn.Sequential(*convnet, torch.nn.AdaptiveAvgPool2d(output_size = (4, 4)))
return convnet
def feature_extraction(model, objects, patterns):
feature_vectors = []
for i in range(len(patterns)):
x_min, y_min, width, height = patterns[i][0], patterns[i][1], patterns[i][2], patterns[i][3]
image = image_loader(objects[i])
features = model.forward(image)
features = features.flatten().detach().numpy()
feature_vectors.append(features)
return feature_vectors
def pairwise_matrix(feature_vectors):
cosine_similarity = np.ones((len(feature_vectors[0]), len(feature_vectors[0])))
for i in range(len(feature_vectors)-1):
for j in range(len(feature_vectors)-1):
cosine_similarity[i,j] = np.dot(feature_vectors[i], feature_vectors[j]) / (np.linalg.norm(feature_vectors[i]) * np.linalg.norm(feature_vectors[j]))
return cosine_similarity
| true
| true
|
f718543ecc3c5723ef58047300881c34e670147d
| 13,253
|
py
|
Python
|
TNT.py
|
cjh3020889729/Regular-season-Palm-pathological-myopia-prediction-May-10th-program
|
325867c0966c803f5b50c8758c1a83dcc6f6ed2c
|
[
"Apache-2.0"
] | null | null | null |
TNT.py
|
cjh3020889729/Regular-season-Palm-pathological-myopia-prediction-May-10th-program
|
325867c0966c803f5b50c8758c1a83dcc6f6ed2c
|
[
"Apache-2.0"
] | null | null | null |
TNT.py
|
cjh3020889729/Regular-season-Palm-pathological-myopia-prediction-May-10th-program
|
325867c0966c803f5b50c8758c1a83dcc6f6ed2c
|
[
"Apache-2.0"
] | null | null | null |
import paddle
from paddle import nn
import math
import numpy as np
def _cfg(url='', **kwargs):
return {
'url': url,
'num_classes': 2, 'input_size': (3, 600, 600), 'pool_size': None,
'crop_pct': .9, 'interpolation': 'bicubic',
'mean': (0.5, 0.5, 0.5), 'std': (0.5, 0.5, 0.5),
'first_conv': 'pixel_embed.proj', 'classifier': 'head',
**kwargs
}
default_cfgs = {
'tnt_s_patch16_224': _cfg(
url='',
mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5),
),
'tnt_b_patch16_224': _cfg(
mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5),
),
}
class Identity(nn.Layer):
r"""A placeholder identity operator that is argument-insensitive.
Args:
args: any argument (unused)
kwargs: any keyword argument (unused)
Examples::
>>> m = nn.Identity(54, unused_argument1=0.1, unused_argument2=False)
>>> input = torch.randn(128, 20)
>>> output = m(input)
>>> print(output.size())
torch.Size([128, 20])
"""
def __init__(self, *args, **kwargs):
super(Identity, self).__init__()
def forward(self, inputs):
return inputs
def drop_path(x, drop_prob: float = 0., training: bool = False):
"""Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
This is the same as the DropConnect impl I created for EfficientNet, etc networks, however,
the original name is misleading as 'Drop Connect' is a different form of dropout in a separate paper...
See discussion: https://github.com/tensorflow/tpu/issues/494#issuecomment-532968956 ... I've opted for
changing the layer and argument names to 'drop path' rather than mix DropConnect as a layer name and use
'survival rate' as the argument.
"""
if drop_prob == 0. or not training:
return x
keep_prob = 1 - drop_prob
shape = (x.shape[0],) + (1,) * (x.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
random_tensor = keep_prob + paddle.rand(shape=shape, dtype=x.dtype, device=x.device)
random_tensor.floor() # binarize
output = x.divide(keep_prob) * random_tensor
return output
class DropPath(nn.Layer):
"""Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
"""
def __init__(self, drop_prob=None):
super(DropPath, self).__init__()
self.drop_prob = drop_prob
def forward(self, x):
return drop_path(x, self.drop_prob, self.training)
class Attention(nn.Layer):
'''
注意力部分
'''
def __init__(self, dim, hidden_dim, num_heads=8, qkv_bias=False, attn_drop=0., proj_drop=0.):
super(Attention, self).__init__()
self.hidden_dim = hidden_dim
self.num_heads = num_heads
head_dim = hidden_dim // num_heads
self.head_dim = head_dim
self.scale = head_dim ** -0.5
self.qk = nn.Linear(dim, hidden_dim * 2, bias_attr=qkv_bias)
self.v = nn.Linear(dim, dim, bias_attr=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop) # no inplace
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
def forward(self, inputs):
x = inputs
B, N, C = x.shape
qk = self.qk(x).reshape((B, N, 2, self.num_heads, self.head_dim)).transpose((2, 0, 3, 1, 4))
q, k = qk[0], qk[1]
v = self.v(x).reshape((B, N, self.num_heads, -1)).transpose((0, 2, 1, 3))
attn = paddle.matmul(q, k.transpose((0, 1, 3, 2))) * self.scale
attn = paddle.nn.functional.softmax(attn, axis=-1)
attn = self.attn_drop(attn)
x = paddle.matmul(attn, v).transpose((0, 2, 1, 3)).reshape((B, N, -1))
x = self.proj(x)
x = self.proj_drop(x)
return x
class Mlp(nn.Layer):
def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.):
super(Mlp, self).__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.fc1 = nn.Linear(in_features, hidden_features)
self.act = act_layer()
self.fc2 = nn.Linear(hidden_features, out_features)
self.drop = nn.Dropout(drop)
def forward(self, x):
x = self.fc1(x)
x = self.act(x)
x = self.drop(x)
x = self.fc2(x)
x = self.drop(x)
return x
class Block(nn.Layer):
""" TNT Block
"""
def __init__(self, dim, in_dim, num_pixel, num_heads=12, in_num_head=4, mlp_ratio=4.,
qkv_bias=False, drop=0., attn_drop=0., drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm):
super(Block, self).__init__()
# Inner transformer
self.norm_in = norm_layer(in_dim)
self.attn_in = Attention(
in_dim, in_dim, num_heads=in_num_head, qkv_bias=qkv_bias,
attn_drop=attn_drop, proj_drop=drop)
self.norm_mlp_in = norm_layer(in_dim)
self.mlp_in = Mlp(in_features=in_dim, hidden_features=int(in_dim * 4),
out_features=in_dim, act_layer=act_layer, drop=drop)
self.norm1_proj = norm_layer(in_dim)
self.proj = nn.Linear(in_dim * num_pixel, dim, bias_attr=True)
# Outer transformer
self.norm_out = norm_layer(dim)
self.attn_out = Attention(
dim, dim, num_heads=num_heads, qkv_bias=qkv_bias,
attn_drop=attn_drop, proj_drop=drop)
self.drop_path = DropPath(drop_path) if drop_path > 0. else Identity()
self.norm_mlp = norm_layer(dim)
self.mlp = Mlp(in_features=dim, hidden_features=int(dim * mlp_ratio),
out_features=dim, act_layer=act_layer, drop=drop)
def forward(self, pixel_embed, patch_embed):
# inner
pixel_embed = pixel_embed + self.drop_path(self.attn_in(self.norm_in(pixel_embed)))
pixel_embed = pixel_embed + self.drop_path(self.mlp_in(self.norm_mlp_in(pixel_embed)))
# outer
B, N, C = patch_embed.shape
patch_embed[:, 1:] = patch_embed[:, 1:] + self.proj(self.norm1_proj(pixel_embed).reshape((B, N - 1, -1)))
patch_embed = patch_embed + self.drop_path(self.attn_out(self.norm_out(patch_embed)))
patch_embed = patch_embed + self.drop_path(self.mlp(self.norm_mlp(patch_embed)))
return pixel_embed, patch_embed
class PixelEmbed(nn.Layer):
""" Image to Pixel Embedding
"""
def __init__(self, img_size=224, patch_size=16, in_chans=3, in_dim=48, stride=4):
super(PixelEmbed, self).__init__()
num_patches = (img_size // patch_size) ** 2
self.img_size = img_size
self.num_patches = num_patches
self.in_dim = in_dim
new_patch_size = math.ceil(patch_size / stride)
self.new_patch_size = new_patch_size
self.proj = nn.Conv2D(in_chans, self.in_dim, kernel_size=7, padding=3, stride=stride)
def forward(self, x, pixel_pos):
B, C, H, W = x.shape
assert H == self.img_size and W == self.img_size, \
f"Input image size ({H}*{W}) doesn't match model ({self.img_size}*{self.img_size})."
x = self.proj(x)
x = nn.functional.unfold(x=x, kernel_sizes=self.new_patch_size, strides=self.new_patch_size)
x = x.transpose((0, 2, 1)).reshape((B * self.num_patches, self.in_dim, self.new_patch_size, self.new_patch_size))
x = x + pixel_pos
x = x.reshape((B * self.num_patches, self.in_dim, -1)).transpose((0, 2, 1))
return x
class TNT(nn.Layer):
""" Transformer in Transformer - https://arxiv.org/abs/2103.00112
"""
def __init__(self, img_size=224, patch_size=16, in_chans=3, num_classes=1000, embed_dim=768, in_dim=48, depth=12,
num_heads=12, in_num_head=4, mlp_ratio=4., qkv_bias=False, drop_rate=0., attn_drop_rate=0.,
drop_path_rate=0., norm_layer=nn.LayerNorm, first_stride=4):
super(TNT, self).__init__()
self.num_classes = num_classes
self.num_features = self.embed_dim = embed_dim # num_features for consistency with other models
self.pixel_embed = PixelEmbed(
img_size=img_size, patch_size=patch_size, in_chans=in_chans, in_dim=in_dim, stride=first_stride)
num_patches = self.pixel_embed.num_patches
self.num_patches = num_patches
new_patch_size = self.pixel_embed.new_patch_size
num_pixel = new_patch_size ** 2
self.norm1_proj = norm_layer(num_pixel * in_dim)
self.proj = nn.Linear(num_pixel * in_dim, embed_dim)
self.norm2_proj = norm_layer(embed_dim)
# 创建参数
self.cls_token = paddle.create_parameter((1, 1, embed_dim), 'float32', attr=nn.initializer.Assign(paddle.zeros((1, 1, embed_dim))))
self.patch_pos = paddle.create_parameter((1, num_patches + 1, embed_dim), 'float32', attr=nn.initializer.Assign(paddle.zeros((1, num_patches + 1, embed_dim))))
self.pixel_pos = paddle.create_parameter((1, in_dim, new_patch_size, new_patch_size), 'float32', attr=nn.initializer.Assign(paddle.zeros((1, in_dim, new_patch_size, new_patch_size))))
self.pos_drop = nn.Dropout(p=drop_rate)
dpr = [x for x in paddle.linspace(0, drop_path_rate, depth)] # stochastic depth decay rule
blocks = []
for i in range(depth):
blocks.append(Block(
dim=embed_dim, in_dim=in_dim, num_pixel=num_pixel, num_heads=num_heads, in_num_head=in_num_head,
mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, drop=drop_rate, attn_drop=attn_drop_rate,
drop_path=dpr[i], norm_layer=norm_layer))
self.blocks = nn.LayerList(blocks)
self.norm = norm_layer(embed_dim)
self.head = nn.Linear(embed_dim, num_classes) if num_classes > 0 else nn.Identity()
with paddle.no_grad():
self.cls_token = paddle.create_parameter(self.cls_token.shape, 'float32', attr=nn.initializer.Assign(paddle.normal(self.cls_token, std=.02)))
self.patch_pos = paddle.create_parameter(self.patch_pos.shape, 'float32', attr=nn.initializer.Assign(paddle.normal(self.patch_pos, std=.02)))
self.pixel_pos = paddle.create_parameter(self.pixel_pos.shape, 'float32', attr=nn.initializer.Assign(paddle.normal(self.pixel_pos, std=.02)))
self.apply(self._init_weights)
def _init_weights(self, m):
if isinstance(m, nn.Linear):
with paddle.no_grad():
m.weight = paddle.create_parameter(m.weight.shape, 'float32', attr=nn.initializer.Assign(paddle.normal(m.weight, std=.02)))
if isinstance(m, nn.Linear) and m.bias is not None:
m.bias = paddle.create_parameter(m.bias.shape, 'float32', attr=nn.initializer.Constant(value=0.))
elif isinstance(m, nn.LayerNorm):
m.bias = paddle.create_parameter(m.bias.shape, 'float32', attr=nn.initializer.Constant(value=0.))
m.weight = paddle.create_parameter(m.weight.shape, 'float32', attr=nn.initializer.Constant(value=1.))
def no_weight_decay(self):
return {'patch_pos', 'pixel_pos', 'cls_token'}
def get_classifier(self):
return self.head
def reset_classifier(self, num_classes, global_pool=''):
self.num_classes = num_classes
self.head = nn.Linear(self.embed_dim, num_classes) if num_classes > 0 else nn.Identity()
def forward_features(self, x):
B = x.shape[0]
pixel_embed = self.pixel_embed(x, self.pixel_pos)
patch_embed = self.norm2_proj(self.proj(self.norm1_proj(pixel_embed.reshape((B, self.num_patches, -1)))))
patch_embed = paddle.concat((self.cls_token.expand([B, self.cls_token.shape[1],self.cls_token.shape[2]]), patch_embed), axis=1) # expand
patch_embed = patch_embed + self.patch_pos
patch_embed = self.pos_drop(patch_embed)
for blk in self.blocks:
pixel_embed, patch_embed = blk(pixel_embed, patch_embed)
patch_embed = self.norm(patch_embed)
return patch_embed[:, 0]
def forward(self, x):
x = self.forward_features(x)
x = self.head(x)
return x
def tnt_s_patch16_224(pretrained=False, **kwargs):
model = TNT(patch_size=16, embed_dim=384, in_dim=24, depth=12, num_heads=6, in_num_head=4,
qkv_bias=False, **kwargs)
model.default_cfg = default_cfgs['tnt_s_patch16_224']
if pretrained:
load_pretrained(
model, num_classes=model.num_classes, in_chans=kwargs.get('in_chans', 3))
return model
def tnt_b_patch16_224(pretrained=False, **kwargs):
model = TNT(patch_size=16, embed_dim=640, in_dim=40, depth=12, num_heads=10, in_num_head=4,
qkv_bias=False, **kwargs)
model.default_cfg = default_cfgs['tnt_b_patch16_224']
if pretrained:
load_pretrained(
model, num_classes=model.num_classes, in_chans=kwargs.get('in_chans', 3))
return model
| 43.169381
| 192
| 0.628688
|
import paddle
from paddle import nn
import math
import numpy as np
def _cfg(url='', **kwargs):
return {
'url': url,
'num_classes': 2, 'input_size': (3, 600, 600), 'pool_size': None,
'crop_pct': .9, 'interpolation': 'bicubic',
'mean': (0.5, 0.5, 0.5), 'std': (0.5, 0.5, 0.5),
'first_conv': 'pixel_embed.proj', 'classifier': 'head',
**kwargs
}
default_cfgs = {
'tnt_s_patch16_224': _cfg(
url='',
mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5),
),
'tnt_b_patch16_224': _cfg(
mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5),
),
}
class Identity(nn.Layer):
def __init__(self, *args, **kwargs):
super(Identity, self).__init__()
def forward(self, inputs):
return inputs
def drop_path(x, drop_prob: float = 0., training: bool = False):
if drop_prob == 0. or not training:
return x
keep_prob = 1 - drop_prob
shape = (x.shape[0],) + (1,) * (x.ndim - 1)
random_tensor = keep_prob + paddle.rand(shape=shape, dtype=x.dtype, device=x.device)
random_tensor.floor()
output = x.divide(keep_prob) * random_tensor
return output
class DropPath(nn.Layer):
def __init__(self, drop_prob=None):
super(DropPath, self).__init__()
self.drop_prob = drop_prob
def forward(self, x):
return drop_path(x, self.drop_prob, self.training)
class Attention(nn.Layer):
def __init__(self, dim, hidden_dim, num_heads=8, qkv_bias=False, attn_drop=0., proj_drop=0.):
super(Attention, self).__init__()
self.hidden_dim = hidden_dim
self.num_heads = num_heads
head_dim = hidden_dim // num_heads
self.head_dim = head_dim
self.scale = head_dim ** -0.5
self.qk = nn.Linear(dim, hidden_dim * 2, bias_attr=qkv_bias)
self.v = nn.Linear(dim, dim, bias_attr=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
def forward(self, inputs):
x = inputs
B, N, C = x.shape
qk = self.qk(x).reshape((B, N, 2, self.num_heads, self.head_dim)).transpose((2, 0, 3, 1, 4))
q, k = qk[0], qk[1]
v = self.v(x).reshape((B, N, self.num_heads, -1)).transpose((0, 2, 1, 3))
attn = paddle.matmul(q, k.transpose((0, 1, 3, 2))) * self.scale
attn = paddle.nn.functional.softmax(attn, axis=-1)
attn = self.attn_drop(attn)
x = paddle.matmul(attn, v).transpose((0, 2, 1, 3)).reshape((B, N, -1))
x = self.proj(x)
x = self.proj_drop(x)
return x
class Mlp(nn.Layer):
def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.):
super(Mlp, self).__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.fc1 = nn.Linear(in_features, hidden_features)
self.act = act_layer()
self.fc2 = nn.Linear(hidden_features, out_features)
self.drop = nn.Dropout(drop)
def forward(self, x):
x = self.fc1(x)
x = self.act(x)
x = self.drop(x)
x = self.fc2(x)
x = self.drop(x)
return x
class Block(nn.Layer):
def __init__(self, dim, in_dim, num_pixel, num_heads=12, in_num_head=4, mlp_ratio=4.,
qkv_bias=False, drop=0., attn_drop=0., drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm):
super(Block, self).__init__()
self.norm_in = norm_layer(in_dim)
self.attn_in = Attention(
in_dim, in_dim, num_heads=in_num_head, qkv_bias=qkv_bias,
attn_drop=attn_drop, proj_drop=drop)
self.norm_mlp_in = norm_layer(in_dim)
self.mlp_in = Mlp(in_features=in_dim, hidden_features=int(in_dim * 4),
out_features=in_dim, act_layer=act_layer, drop=drop)
self.norm1_proj = norm_layer(in_dim)
self.proj = nn.Linear(in_dim * num_pixel, dim, bias_attr=True)
self.norm_out = norm_layer(dim)
self.attn_out = Attention(
dim, dim, num_heads=num_heads, qkv_bias=qkv_bias,
attn_drop=attn_drop, proj_drop=drop)
self.drop_path = DropPath(drop_path) if drop_path > 0. else Identity()
self.norm_mlp = norm_layer(dim)
self.mlp = Mlp(in_features=dim, hidden_features=int(dim * mlp_ratio),
out_features=dim, act_layer=act_layer, drop=drop)
def forward(self, pixel_embed, patch_embed):
pixel_embed = pixel_embed + self.drop_path(self.attn_in(self.norm_in(pixel_embed)))
pixel_embed = pixel_embed + self.drop_path(self.mlp_in(self.norm_mlp_in(pixel_embed)))
B, N, C = patch_embed.shape
patch_embed[:, 1:] = patch_embed[:, 1:] + self.proj(self.norm1_proj(pixel_embed).reshape((B, N - 1, -1)))
patch_embed = patch_embed + self.drop_path(self.attn_out(self.norm_out(patch_embed)))
patch_embed = patch_embed + self.drop_path(self.mlp(self.norm_mlp(patch_embed)))
return pixel_embed, patch_embed
class PixelEmbed(nn.Layer):
def __init__(self, img_size=224, patch_size=16, in_chans=3, in_dim=48, stride=4):
super(PixelEmbed, self).__init__()
num_patches = (img_size // patch_size) ** 2
self.img_size = img_size
self.num_patches = num_patches
self.in_dim = in_dim
new_patch_size = math.ceil(patch_size / stride)
self.new_patch_size = new_patch_size
self.proj = nn.Conv2D(in_chans, self.in_dim, kernel_size=7, padding=3, stride=stride)
def forward(self, x, pixel_pos):
B, C, H, W = x.shape
assert H == self.img_size and W == self.img_size, \
f"Input image size ({H}*{W}) doesn't match model ({self.img_size}*{self.img_size})."
x = self.proj(x)
x = nn.functional.unfold(x=x, kernel_sizes=self.new_patch_size, strides=self.new_patch_size)
x = x.transpose((0, 2, 1)).reshape((B * self.num_patches, self.in_dim, self.new_patch_size, self.new_patch_size))
x = x + pixel_pos
x = x.reshape((B * self.num_patches, self.in_dim, -1)).transpose((0, 2, 1))
return x
class TNT(nn.Layer):
def __init__(self, img_size=224, patch_size=16, in_chans=3, num_classes=1000, embed_dim=768, in_dim=48, depth=12,
num_heads=12, in_num_head=4, mlp_ratio=4., qkv_bias=False, drop_rate=0., attn_drop_rate=0.,
drop_path_rate=0., norm_layer=nn.LayerNorm, first_stride=4):
super(TNT, self).__init__()
self.num_classes = num_classes
self.num_features = self.embed_dim = embed_dim # num_features for consistency with other models
self.pixel_embed = PixelEmbed(
img_size=img_size, patch_size=patch_size, in_chans=in_chans, in_dim=in_dim, stride=first_stride)
num_patches = self.pixel_embed.num_patches
self.num_patches = num_patches
new_patch_size = self.pixel_embed.new_patch_size
num_pixel = new_patch_size ** 2
self.norm1_proj = norm_layer(num_pixel * in_dim)
self.proj = nn.Linear(num_pixel * in_dim, embed_dim)
self.norm2_proj = norm_layer(embed_dim)
# 创建参数
self.cls_token = paddle.create_parameter((1, 1, embed_dim), 'float32', attr=nn.initializer.Assign(paddle.zeros((1, 1, embed_dim))))
self.patch_pos = paddle.create_parameter((1, num_patches + 1, embed_dim), 'float32', attr=nn.initializer.Assign(paddle.zeros((1, num_patches + 1, embed_dim))))
self.pixel_pos = paddle.create_parameter((1, in_dim, new_patch_size, new_patch_size), 'float32', attr=nn.initializer.Assign(paddle.zeros((1, in_dim, new_patch_size, new_patch_size))))
self.pos_drop = nn.Dropout(p=drop_rate)
dpr = [x for x in paddle.linspace(0, drop_path_rate, depth)] # stochastic depth decay rule
blocks = []
for i in range(depth):
blocks.append(Block(
dim=embed_dim, in_dim=in_dim, num_pixel=num_pixel, num_heads=num_heads, in_num_head=in_num_head,
mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, drop=drop_rate, attn_drop=attn_drop_rate,
drop_path=dpr[i], norm_layer=norm_layer))
self.blocks = nn.LayerList(blocks)
self.norm = norm_layer(embed_dim)
self.head = nn.Linear(embed_dim, num_classes) if num_classes > 0 else nn.Identity()
with paddle.no_grad():
self.cls_token = paddle.create_parameter(self.cls_token.shape, 'float32', attr=nn.initializer.Assign(paddle.normal(self.cls_token, std=.02)))
self.patch_pos = paddle.create_parameter(self.patch_pos.shape, 'float32', attr=nn.initializer.Assign(paddle.normal(self.patch_pos, std=.02)))
self.pixel_pos = paddle.create_parameter(self.pixel_pos.shape, 'float32', attr=nn.initializer.Assign(paddle.normal(self.pixel_pos, std=.02)))
self.apply(self._init_weights)
def _init_weights(self, m):
if isinstance(m, nn.Linear):
with paddle.no_grad():
m.weight = paddle.create_parameter(m.weight.shape, 'float32', attr=nn.initializer.Assign(paddle.normal(m.weight, std=.02)))
if isinstance(m, nn.Linear) and m.bias is not None:
m.bias = paddle.create_parameter(m.bias.shape, 'float32', attr=nn.initializer.Constant(value=0.))
elif isinstance(m, nn.LayerNorm):
m.bias = paddle.create_parameter(m.bias.shape, 'float32', attr=nn.initializer.Constant(value=0.))
m.weight = paddle.create_parameter(m.weight.shape, 'float32', attr=nn.initializer.Constant(value=1.))
def no_weight_decay(self):
return {'patch_pos', 'pixel_pos', 'cls_token'}
def get_classifier(self):
return self.head
def reset_classifier(self, num_classes, global_pool=''):
self.num_classes = num_classes
self.head = nn.Linear(self.embed_dim, num_classes) if num_classes > 0 else nn.Identity()
def forward_features(self, x):
B = x.shape[0]
pixel_embed = self.pixel_embed(x, self.pixel_pos)
patch_embed = self.norm2_proj(self.proj(self.norm1_proj(pixel_embed.reshape((B, self.num_patches, -1)))))
patch_embed = paddle.concat((self.cls_token.expand([B, self.cls_token.shape[1],self.cls_token.shape[2]]), patch_embed), axis=1) # expand
patch_embed = patch_embed + self.patch_pos
patch_embed = self.pos_drop(patch_embed)
for blk in self.blocks:
pixel_embed, patch_embed = blk(pixel_embed, patch_embed)
patch_embed = self.norm(patch_embed)
return patch_embed[:, 0]
def forward(self, x):
x = self.forward_features(x)
x = self.head(x)
return x
def tnt_s_patch16_224(pretrained=False, **kwargs):
model = TNT(patch_size=16, embed_dim=384, in_dim=24, depth=12, num_heads=6, in_num_head=4,
qkv_bias=False, **kwargs)
model.default_cfg = default_cfgs['tnt_s_patch16_224']
if pretrained:
load_pretrained(
model, num_classes=model.num_classes, in_chans=kwargs.get('in_chans', 3))
return model
def tnt_b_patch16_224(pretrained=False, **kwargs):
model = TNT(patch_size=16, embed_dim=640, in_dim=40, depth=12, num_heads=10, in_num_head=4,
qkv_bias=False, **kwargs)
model.default_cfg = default_cfgs['tnt_b_patch16_224']
if pretrained:
load_pretrained(
model, num_classes=model.num_classes, in_chans=kwargs.get('in_chans', 3))
return model
| true
| true
|
f71854cf216fd9c15655470c36650db459061d05
| 28,170
|
py
|
Python
|
KSFD/ksfdtimeseries.py
|
leonavery/KSFD
|
090e388df13a2674676cbaa53171f2a87291ba9b
|
[
"MIT"
] | null | null | null |
KSFD/ksfdtimeseries.py
|
leonavery/KSFD
|
090e388df13a2674676cbaa53171f2a87291ba9b
|
[
"MIT"
] | null | null | null |
KSFD/ksfdtimeseries.py
|
leonavery/KSFD
|
090e388df13a2674676cbaa53171f2a87291ba9b
|
[
"MIT"
] | null | null | null |
"""
MPI-aware read and write PETSc Vec to HDF5
The goal of this module is to save snapshots of a PETSc Vec to HDF5
files, and obviously to read them again later. The obvious way to do
this is parallel HDF5. Unfortunately, distributions of HDF5 and h5py
may be built without support for parallel operation. (In particular,
the conda-forge version doesn't have it.) This is accomplished through
the following kludge:
When a KSFD.TimeSeries is created with name tsname and argument mpiok
True, the runtime envirnoment is checked to find out if parallel HDF5
is enabled (using h5py.getconfig().mpi). If so, the data are stored in
an HDF5 file named
'{name}MPI.h5'.format(name=tsname).
Note: there is a serious problem with parallel HDF5: variable length
records can't be written. If you try, you get this exception:
OSError: Can't write data (Parallel IO does not support writing VL
datatypes yet)
Since that makes parallel HDF5 a nonstarter for my purposes, mpiok
defaults to False. You won't get parallel MPI unless you specifically
ask for it, and then dealing with the lack of VL records is your
problem.
If not, each process stores the data it owns in a file named
'{name}s{size}r{rank}.h5'.format(name=tsname, size=comm.size, rank=comm.rank)
where comm is the MPI communicator. If run sequentially the data will
all be stored in a file called '{name}s1r0.h5'. It is intended that
the *MPI.h5 file created using parallele HDF5 and the *s1r0.h5 file
created when running sequentially and parallel HDF5 is not available
will be the same.
The same procedure is used for finding the filename when opening in
read/write mode ('r+' or 'a').
When opening a TimeSeries for read (mode 'r') TimeSeries checks (in
order) for the *s<size>r<rank>.h5 file, then the *MPI.h5 file ,and
finally a *s1r0.h5 file, and opens the first it finds. In this case
the retrieve methods will only return the components of the vector
owned by the local process.
Finally, I will write a simple script to merge all the files of
*s<size>r<rank>.h5 series into a single *MPI.h5 file. In this way an
MPi process group of any size will be able to retrieve data written by
a process group of any size.
"""
import h5py, os, re, gc, time
import traceback as tb
import numpy as np
import petsc4py
from mpi4py import MPI
#
# These imports are placed inside a try/except so that this script can
# be executed standalone to check for syntax errors.
#
try:
from .ksfddebug import log
from .ksfdgrid import Grid
except ImportError:
from ksfddebug import log
from ksfdgrid import Grid
def logSERIES(*args, **kwargs):
log(*args, system='SERIES', **kwargs)
class KSFDTimeSeries:
"""
Base class for TimeSeries
KSFDTimeSeries is intended as an abstract base class for reading and
writing time series from KSFD solutions to HDF5 files. It is not
formally defined as an ABC: you can instantiate it if you really
wish, but it is not designed to make that a useful thing to do.
"""
def __init__(
self,
basename,
size=1,
rank=0,
mpiok=False,
mode='r+',
retries=0,
retry_interval=60
):
"""
Required parameter:
basename: the prefix of the filename.
Optional keyword parameters:
size=1: Number of MPI processes. This typically corresponds to
comm.size for an MPI communicator comm.
rank=0: Number of the MPI process that created this
file. Typically comm.rank.
mpiok=True: Whether parallel HDF5 should be used to store to
store all the data from all MPI processes in a single
file.
mode='r+': The file mode for opening the h5py.File.
retries=0. If nonzero, retry faile dopens this many times.
retry_interval=60: time (in secodns) between successive
retries. Note: the open will block while waiting for a
successful retry.
size, rank, and mpiok are used mostly to figure out what
filename to use. They need not correspond to the actual
current MPU configuration. For instance, they may correspond
to the config when the time series was created.
"""
self.get_filename(basename, size, rank, mpiok, mode)
self.retries = retries
self.retry_interval = retry_interval
self._size = size
self._rank = rank
self._mode = mode
self._tsf = self.open_with_retry()
_ = self.info # make sure '/info' exists
self.try_to_set('size', self.size)
self.try_to_set('rank', self.rank)
if 'times' in self.tsf:
self.ts = np.array(self.tsf['times'][()])
try:
self.ks = np.array(self.tsf['ks'][()])
except KeyError:
self.ks = np.arange(len(self.ts))
self.order = np.array(self.tsf['order'][()])
else:
self.ts = np.array([], dtype=float)
self.ks = np.array([], dtype=int)
self.order = np.array([], dtype=int)
self.lastk = self.ks.size - 1
self.sorted = False
self.tsf.flush()
def parse_filename(filename):
"""
filename is a name like 'bases2r1.h5'. parse_filename returns
(basename, size, rank, mpi) (('base', 2, 1, False) for the
example). For a filename like 'tests/test1mpi.h5', returns
('base', 1, 0, True).
"""
mpipat = '(.*)MPI\.h5'
nompi_pat = '(.*)s(\d+)r(\d+)\.h5'
res = re.fullmatch(mpipat, filename)
if res:
return (res[1], 1, 0, True)
res = re.fullmatch(nompi_pat, filename)
if res:
return (res[1], res[2], res[3], False)
raise ValueError(
"Couldn't parse filename {fname}".format(fname=filename)
)
def set_grid(self, grid):
self._grid = grid
self._dim = grid.dim
self._dof = grid.dof
if self.rank_owns_file:
self._ranges = grid.ranges
# if (
# 'ranges' in self.tsf and
# not np.all(self.tsf['ranges'][()] == self.ranges)
# ):
# raise ValueError(
# "data ranges {filerange} in {file} doesn't " +
# "match grid range {gridrange}".format(
# filerange=str(self.tsf['ranges'][()]),
# file=self.filename,
# gridrange=str(grid.ranges)
# )
# )
self.myslice = (slice(0, None),)*(self.dim + 1)
else:
self._ranges = tuple((0, np) for np in grid.nps)
#
# Slice of the global array belonging to this process:
self.myslice = (slice(0, None),) + tuple(
slice(*r) for r in grid.ranges
)
self.try_to_set('ranges', self.ranges)
def get_filename(self, basename, size=1, rank=0, mpiok=True,
mode='r+'):
"""
Get name of file to be opened by this process
self.filename is set to the name of the HDF5 file to be
opened. This is also returned as the function value. In
addition, the following flags are set:
self.creating: True if creating a new file.
self.rank_owns_file: True if the file will be exclusively
owned by this process.
"""
self.usempi = mpiok and h5py.get_config().mpi
name_nompi = '{name}s{size}r{rank}.h5'.format(
name=basename,
size=size,
rank=rank
)
name_mpi = '{name}MPI.h5'.format(name=basename)
name_seq = '{name}s1r0.h5'.format(name=basename)
self.driver = None
if self.usempi and os.path.isfile(name_mpi):
self.creating = mode[0] == 'w' or mode[0] == 'x'
self.rank_owns_file = size == 1
self.filename = name_mpi
elif self.usempi and (mode[0] == 'w' or mode[0] == 'x'):
self.creating = True
self.rank_owns_file = size == 1
self.filename = name_mpi
elif os.path.isfile(name_nompi):
self.creating = mode[0] == 'w' or mode[0] == 'x'
self.rank_owns_file = True
self.filename = name_nompi
elif (mode == 'r' or mode == 'a') and os.path.isfile(name_seq):
self.creating = False
self.rank_owns_file = size == 1
self.filename = name_seq
# Allow reading from MPi file even if we're not using MPI:
elif (mode == 'r' or mode == 'a') and os.path.isfile(name_mpi):
self.creating = False
self.rank_owns_file = size == 1
self.filename = name_mpi
else:
self.creating = mode != 'r'
self.rank_owns_file = not self.usempi
self.filename = name_mpi if self.usempi else name_nompi
if self.creating and not self.rank_owns_file and self.usempi:
self.driver = 'mpio'
if self.creating:
os.makedirs(os.path.dirname(self.filename), exist_ok=True)
logSERIES('self.filename', self.filename)
logSERIES('self.creating', self.creating)
logSERIES('self.rank_owns_file', self.rank_owns_file)
logSERIES('self.driver', self.driver)
logSERIES('self.usempi', self.usempi)
return self.filename
def open(self, filename, usempi, mode):
if mode in ['w', 'w-', 'x', 'a']:
dirname = os.path.dirname(os.path.abspath(filename))
try:
os.makedirs(dirname, exist_ok=True)
except FileExistsError:
pass
def grid_save(self):
grid = self.grid
attrs = ['dim', 'dof', 'nps', 'bounds', 'spacing', 'order',
'stencil_width', 'stencil_type', 'boundary_type',
'globalSshape', 'globalVshape', 'globalCshape', 'Slshape',
'Vlshape', 'ranges', 'Clshape', 'Cashape',
'coordsNoGhosts', 'coordsWithGhosts',
]
for a in attrs:
self.try_to_set('/grid/' + a, getattr(grid, a))
def grid_read(self):
"""Reads grid params from open file, returns dict"""
ggroup = self.tsf['grid']
gd = {}
attrs = ['dim', 'dof', 'nps', 'bounds', 'spacing', 'order',
'stencil_width', 'stencil_type', 'boundary_type',
'globalSshape', 'globalVshape', 'globalCshape', 'Slshape',
'Vlshape', 'ranges', 'Clshape', 'Cashape',
'coordsNoGhosts', 'coordsWithGhosts',
]
for a in attrs:
try:
val = ggroup[a][()]
if a.endswith('shape'):
gd[a] = tuple(val)
elif np.isscalar(val):
gd[a] = val.item()
else:
gd[a] = val
except KeyError:
gd[a] = None
gd['width'] = gd['bounds'][0]
gd['height'] = gd['bounds'][1] if gd['dim'] > 1 else 1.0
gd['depth'] = gd['bounds'][2] if gd['dim'] > 2 else 1.0
gd['nx'] = gd['nps'][0]
gd['ny'] = gd['nps'][1] if gd['dim'] > 1 else 8
gd['nz'] = gd['nps'][2] if gd['dim'] > 2 else 8
return gd
def grid_load(self, gd=None):
"""Reads grid params from open file and creates new Grid."""
if gd is None:
gd = self.grid_read()
grid = Grid(
dim=gd['dim'],
width=gd['width'],
height=gd['height'],
depth=gd['depth'],
nx=gd['nx'],
ny=gd['ny'],
nz=gd['nz'],
dof=gd['dof'],
order=gd['order'],
stencil_width=gd['stencil_width'],
stencil_type=gd['stencil_type'],
boundary_type=gd['boundary_type']
)
self.set_grid(grid)
#
# info is a place for caller to store stuff
@property
def info(self):
"""Place for caller to store extra stuff"""
if not hasattr(self, '_info') or not self._info:
self._info = self.tsf.require_group('/info')
return self._info
@property
def tsFile(self):
"""The open h5File object"""
return self._tsf
@property
def tsf(self):
return self._tsf
@property
def size(self):
return self._size
@property
def rank(self):
return self._rank
@property
def mode(self):
return self._mode
@property
def ranges(self):
return self._ranges
@property
def comm(self):
return self._comm
@property
def grid(self):
return self._grid
@property
def dim(self):
return self._dim
@property
def dof(self):
return self._dof
def try_to_set(self, key, val):
"""Try to set self.tsf[key] to val, but ignore exceptions"""
if (self.mode == 'r'): return
try:
del self.tsf[key]
except KeyError:
pass
try:
self.tsf[key] = val
except ValueError:
pass
def _sort(self):
if getattr(self, 'sorted', False): return
ts = getattr(self, 'ts', np.array([]))
self.try_to_set('times', ts)
self.order = ts.argsort()
self.try_to_set('order', self.order)
self.sts = ts
self.sts.sort()
ks = getattr(self, 'ks', [])
lastk = getattr(self, 'lastk', -1)
self.try_to_set('ks', ks)
self.try_to_set('lastk', lastk)
self.sorted = True
def flush(self):
self._sort()
self.tsf.flush()
def temp_close(self):
"""
temp_close closes the HDF5 file in which the TimeSeries is
stored without destroying associated information. The file
can be reopened with little loss of time. temp_close and
reopen are intended for use during long solutions. If there is
a crash during solution, a temp-closed TimeSeries will be left
in a valid state for later use.
"""
self._sort()
self.tsf.close()
def open_with_retry(
self,
fname=None,
mode=None,
driver=None,
comm=None
):
if fname is None:
fname = self.filename
if mode is None:
mode = self.mode
if driver is None:
driver = self.driver
if comm is None:
comm = self.comm
if isinstance(comm, petsc4py.PETSc.Comm):
comm = comm.tompi4py()
logSERIES('fname, mode, driver, comm', fname, mode, driver, comm)
try:
if driver == 'mpio':
logSERIES('trying 4-argument open')
comm.Barrier()
logSERIES('comm.rank, comm.size', comm.rank, comm.size)
tsf = h5py.File(fname, mode=mode,
driver=driver, comm=comm)
else:
logSERIES('trying 3-argument open')
tsf = h5py.File(fname, mode=mode,
driver=driver)
except OSError:
retries_left = self.retries
if retries_left <= 0:
logSERIES('open failed: re-raising exception')
raise
while retries_left > 0:
logSERIES('reopen failed with OSError: {n} retries left'.format(
n=retries_left
))
logSERIES('tb.format_exc()', tb.format_exc())
time.sleep(self.retry_interval)
try:
if driver == 'mpio':
logSERIES('trying 4-argument open')
comm.Barrier()
logSERIES('comm.rank, comm.size', comm.rank, comm.size)
tsf = h5py.File(fname, mode=mode,
driver=driver, comm=comm)
else:
logSERIES('trying 3-argument open')
tsf = h5py.File(fname, mode=mode,
driver=driver)
failed = False
except OSError:
failed = True
if retries_left <= 1:
raise
if not failed:
break
retries_left -= 1
return tsf
def reopen(self):
"""
Reopen a temp_closed TimeSeries
"""
mode = self.mode if self.mode == 'r' else 'r+'
self._tsf = self.open_with_retry(mode=mode)
def close(self):
if not hasattr(self, '_tsf') or not self._tsf:
self.reopen()
self._sort()
self.tsf.close()
del self._tsf
gc.collect()
# def __del__(self):
# self.close()
def store(self, data, t, k=None):
if isinstance(data, petsc4py.PETSc.Vec):
vals = data.array.reshape(self.grid.Vlshape, order='F')
else:
vals = data.reshape(self.grid.Vlshape, order='F')
logSERIES('k, t', k, t)
if k is None:
k = self.lastk + 1
self.lastk = k
self.ks = np.append(self.ks, k)
self.ts = np.append(self.ts, t)
key = 'data' + str(k)
try:
dset = self.tsf.create_dataset(key, self.grid.Vlshape,
dtype=vals.dtype)
except OSError:
dset = self.tsf[key] # dset already exists
Cvals = vals.copy(order='C') # h5py requires C order
if self.rank_owns_file:
dset.write_direct(Cvals)
else:
dset[self.myslice] = Cvals
dset.attrs['k'] = k
dset.attrs['t'] = t
self.sorted = False
self.tsf.flush()
def store_slice(self, ranges, data, t, tol=1e-7):
shape = (self.grid.dof,) + tuple(
r[1] - r[0] for r in ranges
)
slc = (slice(0, None),) + tuple(
slice(*r) for r in ranges
)
vals = data.reshape(shape, order='F')
na, nb, ta, tb = self.find_time(t)
logSERIES('na, nb, ta, tb', na, nb, ta, tb)
if abs(t-ta) <= abs(tb-t):
n, tn = na, ta
else:
n, tn = nb, tb
if (
(not (t == 0.0 and tn == 0.0)) and
((self.sts.size <= n) or
(abs(t-tn)/max(abs(t), abs(tn)) > tol))
):
#
# New time point: append it to the lists
#
k = self.lastk + 1
self.lastk = k
self.ks = np.append(self.ks, k)
self.ts = np.append(self.ts, t)
key = 'data' + str(k)
dset = self.tsf.create_dataset(key, self.grid.Vlshape,
dtype=vals.dtype)
logSERIES('k, t', k, t)
dset.attrs['k'] = k
dset.attrs['t'] = t
self.sorted = False
else:
k = n
key = 'data' + str(k)
dset = self.tsf[key]
dset[slc] = vals
self.tsf.flush()
def times(self):
self._sort()
return self.ts
def steps(self):
self._sort()
return self.ks
def sorted_times(self):
self._sort()
return self.sts
def sorted_steps(self):
self._sort()
return self.order
def retrieve_by_number(self, k):
key = 'data' + str(k)
dset = self.tsf[key]
if self.rank_owns_file:
return np.array(dset)
else:
return np.array(dset)[self.myslice]
def find_time(self, t):
"""
Find the time points closest to t
Returns tuple (a, b, ta, tb)
a and b are the numbers (ints) of the points flanking t. ta
and tb (floats) are the corresponding times. If there is a
time point exactly matchig nt, than a == b, ta == tb == t.
"""
self._sort()
if self.sts.size == 0:
return (0, 0, t - 1.0, t - 1.0)
if (t <= self.sts[0]):
a = 0
return (self.ks[a], self.ks[a], self.sts[a], self.sts[a])
elif (t >= self.sts[-1]):
a = len(self.sts) - 1
return (self.ks[a], self.ks[a], self.sts[a], self.sts[a])
else:
b = self.sts.searchsorted(t)
nb = self.order[b]
tb = self.sts[b]
if (b >= len(self.order) - 1):
return(b, b, self.sts[b], self.sts[b])
elif tb == t:
return(b, b, tb, tb)
a = b - 1
na = self.order[a]
ta = self.sts[a]
return (a, b, ta, tb)
def retrieve_by_time(self, t):
"""
Retrieve a time point.
Arguments:
t: the time to be retrieved.
"""
na, nb, ta, tb = self.find_time(t)
adata = self.retrieve_by_number(na)
if na == nb:
return adata
bdata = self.retrieve_by_number(nb)
data = ((t-ta)*bdata + (tb-t)*adata)/(tb-ta)
return(data)
class TimeSeries(KSFDTimeSeries):
def __init__(
self,
basename,
grid=None,
comm=None,
mpiok=False,
mode='r+',
retries=0,
retry_interval=60
):
"""
Open a KSFD.TimeSeries
Required parameters:
basename: the name of the TimeSeries. (This is a prefix of the
names of the HDF5 files in which data are stored.)
Optional parameters:
grid: The KSFD.Grid on which the PETSc Vecs to be saved are
defined. This must be supplied when creating a new
TimeSeries. When opening an existig nseries, it will be
read from the file if not supplied.
comm: the MPI communicator. (If not supplied, grid.comm is
used.)
mpiok=False: whether it is Ok to use parallel HDF5.
mode: the file mode (See h5py.h5File.)
retries=0. If nonzero, retry faile dopens this many times.
retry_interval=60: time (in secodns) between successive
retries. Note: the open will block while waiting for a
successful retry.
"""
if comm:
self._comm = comm
elif grid:
self._comm = grid.comm
else:
self._comm = MPI.COMM_SELF
self._mode = mode
self._size = self.comm.size
self._rank = self.comm.rank
self.mpiok = mpiok
super().__init__(basename, size=self.size, rank=self.rank,
mpiok=mpiok, mode=mode, retries=retries,
retry_interval=retry_interval)
if (grid):
self.set_grid(grid)
self.grid_save()
else:
self.grid_load()
class Gatherer(KSFDTimeSeries):
"""
Gatherer is a special-purpose iterator to allow a single
sequential process to read the separate files written by a
TimeSeries run under MPI. For instance, to reconstruct the global
vector at the last time (assuming it fits in memory in a single
process):
gather = Gatherer(basename='base', size=4)
grid = gather.grid
lastk = gather.sorted_steps()[-1]
vec = grid.Vdmda.createGlobalVec()
vecarray = vec.array.reshape(grid.globalVshape, order='F')
for series in gather:
vec = grid.Vdmda.createGlobalVec()
rank = series.rank
vecarray[series.slice] = series.retrieve_by_number(lastk)
<do something with vec...>
This gatherer would iterate through files bases4r0.h5,
bases4r1.h5, bases4r2.h5, and bases4r3.h5. Note that with every
iteration it closes the last file and opens the next. Thus, if you
want to iterate over all times, it is more efficient to nest the
loops like this:
for series in gather:
for t in series.times():
<do something for this file at this time)
than the other way. (The other way would be more intuitive, but my
expectation is that this class will be used mostly to gather all
TimeSeries files into a single file, which then can be processed
efficiently as a TimeSeries.)
"""
def __init__(
self,
basename,
size=None,
retries=0,
retry_interval=60
):
"""
Required positional parameter
basename: the prefix of the filenames for the TimeSeries being
read. As a convenience, this can be a special filename
that matches the regular expression '(.+)s(\d+)@.*' (That
is a literal '@'. Then the basename is the (.+) and the
size is the (\d+) following the 's' and preceding
'@'. For example, "bases4@' or 'bases4@.h5' would both
serve for a series with basename 'base' and size 4.
Optional keyword parameter:
size=None: This argument can be omitted only if the basename
has the special @ filename format. Otherwise, it must be
supplied.
Gatherer is read-only (mode 'r').
"""
self._comm = MPI.COMM_SELF
self.retries = retries
self.retry_interval = retry_interval
gatherre = '(.+)s(\d+)@.*'
fname_match = re.fullmatch(gatherre, basename)
if fname_match:
base = fname_match[1]
size = int(fname_match[2])
else:
base = basename
size = size
self.basename = base
if not isinstance(size, int) or size <= 0:
raise ValueError(
'size {size} is not a positive int'
)
#
# This opens the first file. We have to do that so as to read
# and initialize things like grid, times, etc.
#
super().__init__(
basename=base,
size=size,
rank=0,
mpiok=False,
mode='r',
retries=retries,
retry_interval=retry_interval
)
self.set_ranges()
#
# Since we have to open the rank 0 file before startig
# iteration, the following flag is used to determine whether
# to open a new file when __iter__ is called
#
self.iter_started = False
self.iter_stopped = False
def set_ranges(self):
self.rank_owns_file = True
gd = self.grid_read()
self.grid_load(gd)
self._ranges = gd['ranges']
self._shape = (self.dof,) + tuple(
r[1] - r[0] for r in self.ranges
)
self._slice = (slice(0, None),) + tuple(
slice(*r) for r in self.ranges
)
@property
def slice(self):
return self._slice
@property
def shape(self):
return self._shape
def __iter__(self):
return self
def __next__(self):
if self.iter_stopped:
#
# We previously exhausted the iteration. Restart it
#
self.tsf.close()
self.__init__(self.basename,
self.size,
retries=self.retries,
retry_interval=self.retry_interval
)
elif self.iter_started:
#
# We're not just starting: move on to next file
#
self.tsf.close()
self._rank = self.rank + 1
if self.rank >= self.size:
self.iter_stopped = True
raise StopIteration
super().__init__(
basename=self.basename,
size=self.size,
rank=self.rank,
mpiok=False,
mode='r',
retries=self.retries,
retry_interval=self.retry_interval
)
self.set_ranges()
self.iter_started = True
self.iter_stopped = False
return self
| 33.939759
| 80
| 0.540469
|
import h5py, os, re, gc, time
import traceback as tb
import numpy as np
import petsc4py
from mpi4py import MPI
try:
from .ksfddebug import log
from .ksfdgrid import Grid
except ImportError:
from ksfddebug import log
from ksfdgrid import Grid
def logSERIES(*args, **kwargs):
log(*args, system='SERIES', **kwargs)
class KSFDTimeSeries:
def __init__(
self,
basename,
size=1,
rank=0,
mpiok=False,
mode='r+',
retries=0,
retry_interval=60
):
self.get_filename(basename, size, rank, mpiok, mode)
self.retries = retries
self.retry_interval = retry_interval
self._size = size
self._rank = rank
self._mode = mode
self._tsf = self.open_with_retry()
_ = self.info
self.try_to_set('size', self.size)
self.try_to_set('rank', self.rank)
if 'times' in self.tsf:
self.ts = np.array(self.tsf['times'][()])
try:
self.ks = np.array(self.tsf['ks'][()])
except KeyError:
self.ks = np.arange(len(self.ts))
self.order = np.array(self.tsf['order'][()])
else:
self.ts = np.array([], dtype=float)
self.ks = np.array([], dtype=int)
self.order = np.array([], dtype=int)
self.lastk = self.ks.size - 1
self.sorted = False
self.tsf.flush()
def parse_filename(filename):
mpipat = '(.*)MPI\.h5'
nompi_pat = '(.*)s(\d+)r(\d+)\.h5'
res = re.fullmatch(mpipat, filename)
if res:
return (res[1], 1, 0, True)
res = re.fullmatch(nompi_pat, filename)
if res:
return (res[1], res[2], res[3], False)
raise ValueError(
"Couldn't parse filename {fname}".format(fname=filename)
)
def set_grid(self, grid):
self._grid = grid
self._dim = grid.dim
self._dof = grid.dof
if self.rank_owns_file:
self._ranges = grid.ranges
# if (
# 'ranges' in self.tsf and
# not np.all(self.tsf['ranges'][()] == self.ranges)
# ):
# raise ValueError(
# "data ranges {filerange} in {file} doesn't " +
self.myslice = (slice(0, None),)*(self.dim + 1)
else:
self._ranges = tuple((0, np) for np in grid.nps)
self.myslice = (slice(0, None),) + tuple(
slice(*r) for r in grid.ranges
)
self.try_to_set('ranges', self.ranges)
def get_filename(self, basename, size=1, rank=0, mpiok=True,
mode='r+'):
self.usempi = mpiok and h5py.get_config().mpi
name_nompi = '{name}s{size}r{rank}.h5'.format(
name=basename,
size=size,
rank=rank
)
name_mpi = '{name}MPI.h5'.format(name=basename)
name_seq = '{name}s1r0.h5'.format(name=basename)
self.driver = None
if self.usempi and os.path.isfile(name_mpi):
self.creating = mode[0] == 'w' or mode[0] == 'x'
self.rank_owns_file = size == 1
self.filename = name_mpi
elif self.usempi and (mode[0] == 'w' or mode[0] == 'x'):
self.creating = True
self.rank_owns_file = size == 1
self.filename = name_mpi
elif os.path.isfile(name_nompi):
self.creating = mode[0] == 'w' or mode[0] == 'x'
self.rank_owns_file = True
self.filename = name_nompi
elif (mode == 'r' or mode == 'a') and os.path.isfile(name_seq):
self.creating = False
self.rank_owns_file = size == 1
self.filename = name_seq
elif (mode == 'r' or mode == 'a') and os.path.isfile(name_mpi):
self.creating = False
self.rank_owns_file = size == 1
self.filename = name_mpi
else:
self.creating = mode != 'r'
self.rank_owns_file = not self.usempi
self.filename = name_mpi if self.usempi else name_nompi
if self.creating and not self.rank_owns_file and self.usempi:
self.driver = 'mpio'
if self.creating:
os.makedirs(os.path.dirname(self.filename), exist_ok=True)
logSERIES('self.filename', self.filename)
logSERIES('self.creating', self.creating)
logSERIES('self.rank_owns_file', self.rank_owns_file)
logSERIES('self.driver', self.driver)
logSERIES('self.usempi', self.usempi)
return self.filename
def open(self, filename, usempi, mode):
if mode in ['w', 'w-', 'x', 'a']:
dirname = os.path.dirname(os.path.abspath(filename))
try:
os.makedirs(dirname, exist_ok=True)
except FileExistsError:
pass
def grid_save(self):
grid = self.grid
attrs = ['dim', 'dof', 'nps', 'bounds', 'spacing', 'order',
'stencil_width', 'stencil_type', 'boundary_type',
'globalSshape', 'globalVshape', 'globalCshape', 'Slshape',
'Vlshape', 'ranges', 'Clshape', 'Cashape',
'coordsNoGhosts', 'coordsWithGhosts',
]
for a in attrs:
self.try_to_set('/grid/' + a, getattr(grid, a))
def grid_read(self):
ggroup = self.tsf['grid']
gd = {}
attrs = ['dim', 'dof', 'nps', 'bounds', 'spacing', 'order',
'stencil_width', 'stencil_type', 'boundary_type',
'globalSshape', 'globalVshape', 'globalCshape', 'Slshape',
'Vlshape', 'ranges', 'Clshape', 'Cashape',
'coordsNoGhosts', 'coordsWithGhosts',
]
for a in attrs:
try:
val = ggroup[a][()]
if a.endswith('shape'):
gd[a] = tuple(val)
elif np.isscalar(val):
gd[a] = val.item()
else:
gd[a] = val
except KeyError:
gd[a] = None
gd['width'] = gd['bounds'][0]
gd['height'] = gd['bounds'][1] if gd['dim'] > 1 else 1.0
gd['depth'] = gd['bounds'][2] if gd['dim'] > 2 else 1.0
gd['nx'] = gd['nps'][0]
gd['ny'] = gd['nps'][1] if gd['dim'] > 1 else 8
gd['nz'] = gd['nps'][2] if gd['dim'] > 2 else 8
return gd
def grid_load(self, gd=None):
if gd is None:
gd = self.grid_read()
grid = Grid(
dim=gd['dim'],
width=gd['width'],
height=gd['height'],
depth=gd['depth'],
nx=gd['nx'],
ny=gd['ny'],
nz=gd['nz'],
dof=gd['dof'],
order=gd['order'],
stencil_width=gd['stencil_width'],
stencil_type=gd['stencil_type'],
boundary_type=gd['boundary_type']
)
self.set_grid(grid)
#
# info is a place for caller to store stuff
@property
def info(self):
if not hasattr(self, '_info') or not self._info:
self._info = self.tsf.require_group('/info')
return self._info
@property
def tsFile(self):
return self._tsf
@property
def tsf(self):
return self._tsf
@property
def size(self):
return self._size
@property
def rank(self):
return self._rank
@property
def mode(self):
return self._mode
@property
def ranges(self):
return self._ranges
@property
def comm(self):
return self._comm
@property
def grid(self):
return self._grid
@property
def dim(self):
return self._dim
@property
def dof(self):
return self._dof
def try_to_set(self, key, val):
if (self.mode == 'r'): return
try:
del self.tsf[key]
except KeyError:
pass
try:
self.tsf[key] = val
except ValueError:
pass
def _sort(self):
if getattr(self, 'sorted', False): return
ts = getattr(self, 'ts', np.array([]))
self.try_to_set('times', ts)
self.order = ts.argsort()
self.try_to_set('order', self.order)
self.sts = ts
self.sts.sort()
ks = getattr(self, 'ks', [])
lastk = getattr(self, 'lastk', -1)
self.try_to_set('ks', ks)
self.try_to_set('lastk', lastk)
self.sorted = True
def flush(self):
self._sort()
self.tsf.flush()
def temp_close(self):
self._sort()
self.tsf.close()
def open_with_retry(
self,
fname=None,
mode=None,
driver=None,
comm=None
):
if fname is None:
fname = self.filename
if mode is None:
mode = self.mode
if driver is None:
driver = self.driver
if comm is None:
comm = self.comm
if isinstance(comm, petsc4py.PETSc.Comm):
comm = comm.tompi4py()
logSERIES('fname, mode, driver, comm', fname, mode, driver, comm)
try:
if driver == 'mpio':
logSERIES('trying 4-argument open')
comm.Barrier()
logSERIES('comm.rank, comm.size', comm.rank, comm.size)
tsf = h5py.File(fname, mode=mode,
driver=driver, comm=comm)
else:
logSERIES('trying 3-argument open')
tsf = h5py.File(fname, mode=mode,
driver=driver)
except OSError:
retries_left = self.retries
if retries_left <= 0:
logSERIES('open failed: re-raising exception')
raise
while retries_left > 0:
logSERIES('reopen failed with OSError: {n} retries left'.format(
n=retries_left
))
logSERIES('tb.format_exc()', tb.format_exc())
time.sleep(self.retry_interval)
try:
if driver == 'mpio':
logSERIES('trying 4-argument open')
comm.Barrier()
logSERIES('comm.rank, comm.size', comm.rank, comm.size)
tsf = h5py.File(fname, mode=mode,
driver=driver, comm=comm)
else:
logSERIES('trying 3-argument open')
tsf = h5py.File(fname, mode=mode,
driver=driver)
failed = False
except OSError:
failed = True
if retries_left <= 1:
raise
if not failed:
break
retries_left -= 1
return tsf
def reopen(self):
mode = self.mode if self.mode == 'r' else 'r+'
self._tsf = self.open_with_retry(mode=mode)
def close(self):
if not hasattr(self, '_tsf') or not self._tsf:
self.reopen()
self._sort()
self.tsf.close()
del self._tsf
gc.collect()
# def __del__(self):
# self.close()
def store(self, data, t, k=None):
if isinstance(data, petsc4py.PETSc.Vec):
vals = data.array.reshape(self.grid.Vlshape, order='F')
else:
vals = data.reshape(self.grid.Vlshape, order='F')
logSERIES('k, t', k, t)
if k is None:
k = self.lastk + 1
self.lastk = k
self.ks = np.append(self.ks, k)
self.ts = np.append(self.ts, t)
key = 'data' + str(k)
try:
dset = self.tsf.create_dataset(key, self.grid.Vlshape,
dtype=vals.dtype)
except OSError:
dset = self.tsf[key] # dset already exists
Cvals = vals.copy(order='C') # h5py requires C order
if self.rank_owns_file:
dset.write_direct(Cvals)
else:
dset[self.myslice] = Cvals
dset.attrs['k'] = k
dset.attrs['t'] = t
self.sorted = False
self.tsf.flush()
def store_slice(self, ranges, data, t, tol=1e-7):
shape = (self.grid.dof,) + tuple(
r[1] - r[0] for r in ranges
)
slc = (slice(0, None),) + tuple(
slice(*r) for r in ranges
)
vals = data.reshape(shape, order='F')
na, nb, ta, tb = self.find_time(t)
logSERIES('na, nb, ta, tb', na, nb, ta, tb)
if abs(t-ta) <= abs(tb-t):
n, tn = na, ta
else:
n, tn = nb, tb
if (
(not (t == 0.0 and tn == 0.0)) and
((self.sts.size <= n) or
(abs(t-tn)/max(abs(t), abs(tn)) > tol))
):
#
# New time point: append it to the lists
#
k = self.lastk + 1
self.lastk = k
self.ks = np.append(self.ks, k)
self.ts = np.append(self.ts, t)
key = 'data' + str(k)
dset = self.tsf.create_dataset(key, self.grid.Vlshape,
dtype=vals.dtype)
logSERIES('k, t', k, t)
dset.attrs['k'] = k
dset.attrs['t'] = t
self.sorted = False
else:
k = n
key = 'data' + str(k)
dset = self.tsf[key]
dset[slc] = vals
self.tsf.flush()
def times(self):
self._sort()
return self.ts
def steps(self):
self._sort()
return self.ks
def sorted_times(self):
self._sort()
return self.sts
def sorted_steps(self):
self._sort()
return self.order
def retrieve_by_number(self, k):
key = 'data' + str(k)
dset = self.tsf[key]
if self.rank_owns_file:
return np.array(dset)
else:
return np.array(dset)[self.myslice]
def find_time(self, t):
self._sort()
if self.sts.size == 0:
return (0, 0, t - 1.0, t - 1.0)
if (t <= self.sts[0]):
a = 0
return (self.ks[a], self.ks[a], self.sts[a], self.sts[a])
elif (t >= self.sts[-1]):
a = len(self.sts) - 1
return (self.ks[a], self.ks[a], self.sts[a], self.sts[a])
else:
b = self.sts.searchsorted(t)
nb = self.order[b]
tb = self.sts[b]
if (b >= len(self.order) - 1):
return(b, b, self.sts[b], self.sts[b])
elif tb == t:
return(b, b, tb, tb)
a = b - 1
na = self.order[a]
ta = self.sts[a]
return (a, b, ta, tb)
def retrieve_by_time(self, t):
na, nb, ta, tb = self.find_time(t)
adata = self.retrieve_by_number(na)
if na == nb:
return adata
bdata = self.retrieve_by_number(nb)
data = ((t-ta)*bdata + (tb-t)*adata)/(tb-ta)
return(data)
class TimeSeries(KSFDTimeSeries):
def __init__(
self,
basename,
grid=None,
comm=None,
mpiok=False,
mode='r+',
retries=0,
retry_interval=60
):
if comm:
self._comm = comm
elif grid:
self._comm = grid.comm
else:
self._comm = MPI.COMM_SELF
self._mode = mode
self._size = self.comm.size
self._rank = self.comm.rank
self.mpiok = mpiok
super().__init__(basename, size=self.size, rank=self.rank,
mpiok=mpiok, mode=mode, retries=retries,
retry_interval=retry_interval)
if (grid):
self.set_grid(grid)
self.grid_save()
else:
self.grid_load()
class Gatherer(KSFDTimeSeries):
def __init__(
self,
basename,
size=None,
retries=0,
retry_interval=60
):
self._comm = MPI.COMM_SELF
self.retries = retries
self.retry_interval = retry_interval
gatherre = '(.+)s(\d+)@.*'
fname_match = re.fullmatch(gatherre, basename)
if fname_match:
base = fname_match[1]
size = int(fname_match[2])
else:
base = basename
size = size
self.basename = base
if not isinstance(size, int) or size <= 0:
raise ValueError(
'size {size} is not a positive int'
)
#
# This opens the first file. We have to do that so as to read
# and initialize things like grid, times, etc.
#
super().__init__(
basename=base,
size=size,
rank=0,
mpiok=False,
mode='r',
retries=retries,
retry_interval=retry_interval
)
self.set_ranges()
#
# Since we have to open the rank 0 file before startig
# iteration, the following flag is used to determine whether
# to open a new file when __iter__ is called
#
self.iter_started = False
self.iter_stopped = False
def set_ranges(self):
self.rank_owns_file = True
gd = self.grid_read()
self.grid_load(gd)
self._ranges = gd['ranges']
self._shape = (self.dof,) + tuple(
r[1] - r[0] for r in self.ranges
)
self._slice = (slice(0, None),) + tuple(
slice(*r) for r in self.ranges
)
@property
def slice(self):
return self._slice
@property
def shape(self):
return self._shape
def __iter__(self):
return self
def __next__(self):
if self.iter_stopped:
#
# We previously exhausted the iteration. Restart it
#
self.tsf.close()
self.__init__(self.basename,
self.size,
retries=self.retries,
retry_interval=self.retry_interval
)
elif self.iter_started:
#
# We're not just starting: move on to next file
self.tsf.close()
self._rank = self.rank + 1
if self.rank >= self.size:
self.iter_stopped = True
raise StopIteration
super().__init__(
basename=self.basename,
size=self.size,
rank=self.rank,
mpiok=False,
mode='r',
retries=self.retries,
retry_interval=self.retry_interval
)
self.set_ranges()
self.iter_started = True
self.iter_stopped = False
return self
| true
| true
|
f71855f208ab8ced26bd5c8f92d1e3a2a33c6e63
| 3,436
|
py
|
Python
|
infra/src/custom_constructs/construct_sagemaker_role.py
|
elangovana/pubmed-bpe-tokeniser
|
d5268280c11403a5fe4e740bd1b1953ed1fb5792
|
[
"Apache-2.0"
] | 1
|
2020-10-25T11:25:05.000Z
|
2020-10-25T11:25:05.000Z
|
infra/src/custom_constructs/construct_sagemaker_role.py
|
elangovana/pubmed-bpe-tokeniser
|
d5268280c11403a5fe4e740bd1b1953ed1fb5792
|
[
"Apache-2.0"
] | null | null | null |
infra/src/custom_constructs/construct_sagemaker_role.py
|
elangovana/pubmed-bpe-tokeniser
|
d5268280c11403a5fe4e740bd1b1953ed1fb5792
|
[
"Apache-2.0"
] | null | null | null |
# *****************************************************************************
# * Copyright 2020 Amazon.com, Inc. and its affiliates. All Rights Reserved. *
# *
# Licensed under the Amazon Software License (the "License"). *
# You may not use this file except in compliance with the License. *
# A copy of the License is located at *
# *
# http://aws.amazon.com/asl/ *
# *
# or in the "license" file accompanying this file. This file is distributed *
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either *
# express or implied. See the License for the specific language governing *
# permissions and limitations under the License. *
# *****************************************************************************
from aws_cdk import aws_iam, core
from aws_cdk.aws_iam import IManagedPolicy, ServicePrincipal
class ConstructSageMakerRole(aws_iam.Role):
"""
Custom SageMaker role construct , with minimum permissions required to run the preprocessor
"""
def __init__(self, scope: core.Construct, id: str, managed_policy: IManagedPolicy, role_name: str = None):
# S3 Bucket for SageMaker internal access
s3_sagemaker_bucket_access = aws_iam.PolicyDocument(
statements=[
# S3 SageMaker Internal access
aws_iam.PolicyStatement(actions=["s3:GetObject",
"s3:PutObject",
"s3:ListBucket"],
resources=["arn:aws:s3:::*sagemaker*"])
]
)
# SageMaker Cloud Watch Access
cloudwatch_access = aws_iam.PolicyDocument(
statements=[aws_iam.PolicyStatement(actions=["cloudwatch:PutMetricData",
"cloudwatch:GetMetricData",
"cloudwatch:GetMetricStatistics",
"cloudwatch:ListMetrics",
"logs:CreateLogGroup",
"logs:CreateLogStream",
"logs:DescribeLogStreams",
"logs:PutLogEvents",
"logs:GetLogEvents"],
resources=["*"])
])
super().__init__(scope, id,
assumed_by=ServicePrincipal("sagemaker.amazonaws.com"),
description="The sagemaker role to access the data and ecr",
inline_policies={
"S3SageMakerBucketAccess": s3_sagemaker_bucket_access,
"CloudWatchAccess": cloudwatch_access
},
managed_policies=[managed_policy], role_name=role_name
)
| 54.539683
| 110
| 0.425786
|
from aws_cdk import aws_iam, core
from aws_cdk.aws_iam import IManagedPolicy, ServicePrincipal
class ConstructSageMakerRole(aws_iam.Role):
def __init__(self, scope: core.Construct, id: str, managed_policy: IManagedPolicy, role_name: str = None):
s3_sagemaker_bucket_access = aws_iam.PolicyDocument(
statements=[
aws_iam.PolicyStatement(actions=["s3:GetObject",
"s3:PutObject",
"s3:ListBucket"],
resources=["arn:aws:s3:::*sagemaker*"])
]
)
cloudwatch_access = aws_iam.PolicyDocument(
statements=[aws_iam.PolicyStatement(actions=["cloudwatch:PutMetricData",
"cloudwatch:GetMetricData",
"cloudwatch:GetMetricStatistics",
"cloudwatch:ListMetrics",
"logs:CreateLogGroup",
"logs:CreateLogStream",
"logs:DescribeLogStreams",
"logs:PutLogEvents",
"logs:GetLogEvents"],
resources=["*"])
])
super().__init__(scope, id,
assumed_by=ServicePrincipal("sagemaker.amazonaws.com"),
description="The sagemaker role to access the data and ecr",
inline_policies={
"S3SageMakerBucketAccess": s3_sagemaker_bucket_access,
"CloudWatchAccess": cloudwatch_access
},
managed_policies=[managed_policy], role_name=role_name
)
| true
| true
|
f71856627f584e51686016c94360ad7a2de56085
| 1,143
|
py
|
Python
|
aoc2021/day7.py
|
jonsth131/aoc
|
f5d82bdcdeb2eea13dec3135dd0590b4a3bf1ebd
|
[
"MIT"
] | null | null | null |
aoc2021/day7.py
|
jonsth131/aoc
|
f5d82bdcdeb2eea13dec3135dd0590b4a3bf1ebd
|
[
"MIT"
] | null | null | null |
aoc2021/day7.py
|
jonsth131/aoc
|
f5d82bdcdeb2eea13dec3135dd0590b4a3bf1ebd
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
import fileutils
def part1(lst):
positions = parse_positions(lst)
def calc(x, i):
return abs(x - i)
return get_min_fuel(positions, calc)
def part2(lst):
positions = parse_positions(lst)
def calc(x, i):
n = abs(x - i)
return n * (n + 1) / 2
return get_min_fuel(positions, calc)
def get_min_fuel(positions, calculation):
fuel = [None] * max(positions.keys())
for i in range(len(fuel)):
value = 0
for x in positions.keys():
value += calculation(x, i) * positions.get(x)
fuel[i] = int(value)
return min(fuel)
def parse_positions(data):
positions = {}
for value in [int(x) for x in data.split(',')]:
existing = positions.get(value)
if existing is None:
positions.update({value: 1})
else:
positions.update({value: existing + 1})
return positions
if __name__ == "__main__":
challenge_input = fileutils.read_lines("inputs/day7.txt")[0]
print("=== Day 7 ===")
print("Part 1:", part1(challenge_input))
print("Part 2:", part2(challenge_input))
| 22.411765
| 64
| 0.593176
|
import fileutils
def part1(lst):
positions = parse_positions(lst)
def calc(x, i):
return abs(x - i)
return get_min_fuel(positions, calc)
def part2(lst):
positions = parse_positions(lst)
def calc(x, i):
n = abs(x - i)
return n * (n + 1) / 2
return get_min_fuel(positions, calc)
def get_min_fuel(positions, calculation):
fuel = [None] * max(positions.keys())
for i in range(len(fuel)):
value = 0
for x in positions.keys():
value += calculation(x, i) * positions.get(x)
fuel[i] = int(value)
return min(fuel)
def parse_positions(data):
positions = {}
for value in [int(x) for x in data.split(',')]:
existing = positions.get(value)
if existing is None:
positions.update({value: 1})
else:
positions.update({value: existing + 1})
return positions
if __name__ == "__main__":
challenge_input = fileutils.read_lines("inputs/day7.txt")[0]
print("=== Day 7 ===")
print("Part 1:", part1(challenge_input))
print("Part 2:", part2(challenge_input))
| true
| true
|
f71857a3cbaddb52fc4da082f504fcbc5c405bd9
| 7,297
|
py
|
Python
|
tensorflow/python/kernel_tests/manip_ops_test.py
|
knightvishal/tensorflow
|
5d3dd19b7146d954fc1b4e9e44e9881e75d363c1
|
[
"Apache-2.0"
] | 52
|
2018-11-12T06:39:35.000Z
|
2022-03-08T05:31:27.000Z
|
tensorflow/python/kernel_tests/manip_ops_test.py
|
knightvishal/tensorflow
|
5d3dd19b7146d954fc1b4e9e44e9881e75d363c1
|
[
"Apache-2.0"
] | 2
|
2018-12-04T08:35:40.000Z
|
2020-10-22T16:17:39.000Z
|
tensorflow/python/kernel_tests/manip_ops_test.py
|
knightvishal/tensorflow
|
5d3dd19b7146d954fc1b4e9e44e9881e75d363c1
|
[
"Apache-2.0"
] | 17
|
2019-03-11T01:17:16.000Z
|
2022-02-21T00:44:47.000Z
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for manip_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import manip_ops
from tensorflow.python.platform import test as test_lib
# pylint: disable=g-import-not-at-top
try:
from distutils.version import StrictVersion as Version
# numpy.roll for multiple shifts was introduced in numpy version 1.12.0
NP_ROLL_CAN_MULTISHIFT = Version(np.version.version) >= Version("1.12.0")
except ImportError:
NP_ROLL_CAN_MULTISHIFT = False
# pylint: enable=g-import-not-at-top
class RollTest(test_util.TensorFlowTestCase):
def _testRoll(self, np_input, shift, axis):
expected_roll = np.roll(np_input, shift, axis)
with self.cached_session():
roll = manip_ops.roll(np_input, shift, axis)
self.assertAllEqual(roll.eval(), expected_roll)
def _testGradient(self, np_input, shift, axis):
with self.cached_session():
inx = constant_op.constant(np_input.tolist())
xs = list(np_input.shape)
y = manip_ops.roll(inx, shift, axis)
# Expected y's shape to be the same
ys = xs
jacob_t, jacob_n = gradient_checker.compute_gradient(
inx, xs, y, ys, x_init_value=np_input)
self.assertAllClose(jacob_t, jacob_n, rtol=1e-5, atol=1e-5)
def _testAll(self, np_input, shift, axis):
self._testRoll(np_input, shift, axis)
if np_input.dtype == np.float32:
self._testGradient(np_input, shift, axis)
def testIntTypes(self):
for t in [np.int32, np.int64]:
self._testAll(np.random.randint(-100, 100, (5)).astype(t), 3, 0)
if NP_ROLL_CAN_MULTISHIFT:
self._testAll(
np.random.randint(-100, 100, (4, 4, 3)).astype(t), [1, -2, 3],
[0, 1, 2])
self._testAll(
np.random.randint(-100, 100, (4, 2, 1, 3)).astype(t), [0, 1, -2],
[1, 2, 3])
def testFloatTypes(self):
for t in [np.float32, np.float64]:
self._testAll(np.random.rand(5).astype(t), 2, 0)
if NP_ROLL_CAN_MULTISHIFT:
self._testAll(np.random.rand(3, 4).astype(t), [1, 2], [1, 0])
self._testAll(np.random.rand(1, 3, 4).astype(t), [1, 0, -3], [0, 1, 2])
def testComplexTypes(self):
for t in [np.complex64, np.complex128]:
x = np.random.rand(4, 4).astype(t)
self._testAll(x + 1j * x, 2, 0)
if NP_ROLL_CAN_MULTISHIFT:
x = np.random.rand(2, 5).astype(t)
self._testAll(x + 1j * x, [1, 2], [1, 0])
x = np.random.rand(3, 2, 1, 1).astype(t)
self._testAll(x + 1j * x, [2, 1, 1, 0], [0, 3, 1, 2])
def testNegativeAxis(self):
self._testAll(np.random.randint(-100, 100, (5)).astype(np.int32), 3, -1)
self._testAll(np.random.randint(-100, 100, (4, 4)).astype(np.int32), 3, -2)
# Make sure negative axis should be 0 <= axis + dims < dims
with self.cached_session():
with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
"is out of range"):
manip_ops.roll(np.random.randint(-100, 100, (4, 4)).astype(np.int32),
3, -10).eval()
def testInvalidInputShape(self):
# The input should be 1-D or higher, checked in shape function.
with self.assertRaisesRegexp(
ValueError, "Shape must be at least rank 1 but is rank 0"):
manip_ops.roll(7, 1, 0)
def testRollInputMustVectorHigherRaises(self):
# The input should be 1-D or higher, checked in kernel.
tensor = array_ops.placeholder(dtype=dtypes.int32)
shift = 1
axis = 0
with self.cached_session():
with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
"input must be 1-D or higher"):
manip_ops.roll(tensor, shift, axis).eval(feed_dict={tensor: 7})
def testInvalidAxisShape(self):
# The axis should be a scalar or 1-D, checked in shape function.
with self.assertRaisesRegexp(
ValueError, "Shape must be at most rank 1 but is rank 2"):
manip_ops.roll([[1, 2], [3, 4]], 1, [[0, 1]])
def testRollAxisMustBeScalarOrVectorRaises(self):
# The axis should be a scalar or 1-D, checked in kernel.
tensor = [[1, 2], [3, 4]]
shift = 1
axis = array_ops.placeholder(dtype=dtypes.int32)
with self.cached_session():
with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
"axis must be a scalar or a 1-D vector"):
manip_ops.roll(tensor, shift, axis).eval(feed_dict={axis: [[0, 1]]})
def testInvalidShiftShape(self):
# The shift should be a scalar or 1-D, checked in shape function.
with self.assertRaisesRegexp(
ValueError, "Shape must be at most rank 1 but is rank 2"):
manip_ops.roll([[1, 2], [3, 4]], [[0, 1]], 1)
def testRollShiftMustBeScalarOrVectorRaises(self):
# The shift should be a scalar or 1-D, checked in kernel.
tensor = [[1, 2], [3, 4]]
shift = array_ops.placeholder(dtype=dtypes.int32)
axis = 1
with self.cached_session():
with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
"shift must be a scalar or a 1-D vector"):
manip_ops.roll(tensor, shift, axis).eval(feed_dict={shift: [[0, 1]]})
def testInvalidShiftAndAxisNotEqualShape(self):
# The shift and axis must be same size, checked in shape function.
with self.assertRaisesRegexp(ValueError, "both shapes must be equal"):
manip_ops.roll([[1, 2], [3, 4]], [1], [0, 1])
def testRollShiftAndAxisMustBeSameSizeRaises(self):
# The shift and axis must be same size, checked in kernel.
tensor = [[1, 2], [3, 4]]
shift = array_ops.placeholder(dtype=dtypes.int32)
axis = [0, 1]
with self.cached_session():
with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
"shift and axis must have the same size"):
manip_ops.roll(tensor, shift, axis).eval(feed_dict={shift: [1]})
def testRollAxisOutOfRangeRaises(self):
tensor = [1, 2]
shift = 1
axis = 1
with self.cached_session():
with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
"is out of range"):
manip_ops.roll(tensor, shift, axis).eval()
if __name__ == "__main__":
test_lib.main()
| 40.994382
| 80
| 0.65383
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import manip_ops
from tensorflow.python.platform import test as test_lib
try:
from distutils.version import StrictVersion as Version
NP_ROLL_CAN_MULTISHIFT = Version(np.version.version) >= Version("1.12.0")
except ImportError:
NP_ROLL_CAN_MULTISHIFT = False
class RollTest(test_util.TensorFlowTestCase):
def _testRoll(self, np_input, shift, axis):
expected_roll = np.roll(np_input, shift, axis)
with self.cached_session():
roll = manip_ops.roll(np_input, shift, axis)
self.assertAllEqual(roll.eval(), expected_roll)
def _testGradient(self, np_input, shift, axis):
with self.cached_session():
inx = constant_op.constant(np_input.tolist())
xs = list(np_input.shape)
y = manip_ops.roll(inx, shift, axis)
ys = xs
jacob_t, jacob_n = gradient_checker.compute_gradient(
inx, xs, y, ys, x_init_value=np_input)
self.assertAllClose(jacob_t, jacob_n, rtol=1e-5, atol=1e-5)
def _testAll(self, np_input, shift, axis):
self._testRoll(np_input, shift, axis)
if np_input.dtype == np.float32:
self._testGradient(np_input, shift, axis)
def testIntTypes(self):
for t in [np.int32, np.int64]:
self._testAll(np.random.randint(-100, 100, (5)).astype(t), 3, 0)
if NP_ROLL_CAN_MULTISHIFT:
self._testAll(
np.random.randint(-100, 100, (4, 4, 3)).astype(t), [1, -2, 3],
[0, 1, 2])
self._testAll(
np.random.randint(-100, 100, (4, 2, 1, 3)).astype(t), [0, 1, -2],
[1, 2, 3])
def testFloatTypes(self):
for t in [np.float32, np.float64]:
self._testAll(np.random.rand(5).astype(t), 2, 0)
if NP_ROLL_CAN_MULTISHIFT:
self._testAll(np.random.rand(3, 4).astype(t), [1, 2], [1, 0])
self._testAll(np.random.rand(1, 3, 4).astype(t), [1, 0, -3], [0, 1, 2])
def testComplexTypes(self):
for t in [np.complex64, np.complex128]:
x = np.random.rand(4, 4).astype(t)
self._testAll(x + 1j * x, 2, 0)
if NP_ROLL_CAN_MULTISHIFT:
x = np.random.rand(2, 5).astype(t)
self._testAll(x + 1j * x, [1, 2], [1, 0])
x = np.random.rand(3, 2, 1, 1).astype(t)
self._testAll(x + 1j * x, [2, 1, 1, 0], [0, 3, 1, 2])
def testNegativeAxis(self):
self._testAll(np.random.randint(-100, 100, (5)).astype(np.int32), 3, -1)
self._testAll(np.random.randint(-100, 100, (4, 4)).astype(np.int32), 3, -2)
# Make sure negative axis should be 0 <= axis + dims < dims
with self.cached_session():
with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
"is out of range"):
manip_ops.roll(np.random.randint(-100, 100, (4, 4)).astype(np.int32),
3, -10).eval()
def testInvalidInputShape(self):
# The input should be 1-D or higher, checked in shape function.
with self.assertRaisesRegexp(
ValueError, "Shape must be at least rank 1 but is rank 0"):
manip_ops.roll(7, 1, 0)
def testRollInputMustVectorHigherRaises(self):
# The input should be 1-D or higher, checked in kernel.
tensor = array_ops.placeholder(dtype=dtypes.int32)
shift = 1
axis = 0
with self.cached_session():
with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
"input must be 1-D or higher"):
manip_ops.roll(tensor, shift, axis).eval(feed_dict={tensor: 7})
def testInvalidAxisShape(self):
# The axis should be a scalar or 1-D, checked in shape function.
with self.assertRaisesRegexp(
ValueError, "Shape must be at most rank 1 but is rank 2"):
manip_ops.roll([[1, 2], [3, 4]], 1, [[0, 1]])
def testRollAxisMustBeScalarOrVectorRaises(self):
# The axis should be a scalar or 1-D, checked in kernel.
tensor = [[1, 2], [3, 4]]
shift = 1
axis = array_ops.placeholder(dtype=dtypes.int32)
with self.cached_session():
with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
"axis must be a scalar or a 1-D vector"):
manip_ops.roll(tensor, shift, axis).eval(feed_dict={axis: [[0, 1]]})
def testInvalidShiftShape(self):
# The shift should be a scalar or 1-D, checked in shape function.
with self.assertRaisesRegexp(
ValueError, "Shape must be at most rank 1 but is rank 2"):
manip_ops.roll([[1, 2], [3, 4]], [[0, 1]], 1)
def testRollShiftMustBeScalarOrVectorRaises(self):
# The shift should be a scalar or 1-D, checked in kernel.
tensor = [[1, 2], [3, 4]]
shift = array_ops.placeholder(dtype=dtypes.int32)
axis = 1
with self.cached_session():
with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
"shift must be a scalar or a 1-D vector"):
manip_ops.roll(tensor, shift, axis).eval(feed_dict={shift: [[0, 1]]})
def testInvalidShiftAndAxisNotEqualShape(self):
# The shift and axis must be same size, checked in shape function.
with self.assertRaisesRegexp(ValueError, "both shapes must be equal"):
manip_ops.roll([[1, 2], [3, 4]], [1], [0, 1])
def testRollShiftAndAxisMustBeSameSizeRaises(self):
# The shift and axis must be same size, checked in kernel.
tensor = [[1, 2], [3, 4]]
shift = array_ops.placeholder(dtype=dtypes.int32)
axis = [0, 1]
with self.cached_session():
with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
"shift and axis must have the same size"):
manip_ops.roll(tensor, shift, axis).eval(feed_dict={shift: [1]})
def testRollAxisOutOfRangeRaises(self):
tensor = [1, 2]
shift = 1
axis = 1
with self.cached_session():
with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
"is out of range"):
manip_ops.roll(tensor, shift, axis).eval()
if __name__ == "__main__":
test_lib.main()
| true
| true
|
f71857baefe7eed8b229f6b0364b386030558999
| 10,986
|
py
|
Python
|
Configs/explore_configs/S7_explore_Lifestyle_n_Activity.py
|
yochaiedlitz/T2DM_UKB_predictions
|
1e6b22e3d51d515eb065d7d5f46408f86f33d0b8
|
[
"MIT"
] | 1
|
2022-01-17T13:13:02.000Z
|
2022-01-17T13:13:02.000Z
|
Configs/explore_configs/S7_explore_Lifestyle_n_Activity.py
|
yochaiedlitz/T2DM_UKB_predictions
|
1e6b22e3d51d515eb065d7d5f46408f86f33d0b8
|
[
"MIT"
] | null | null | null |
Configs/explore_configs/S7_explore_Lifestyle_n_Activity.py
|
yochaiedlitz/T2DM_UKB_predictions
|
1e6b22e3d51d515eb065d7d5f46408f86f33d0b8
|
[
"MIT"
] | null | null | null |
import collections # Used for ordered dictionary
from PRS import PRS_sumstats
from UKBB_Functions import PROBA_FOLDER
import sys
Top_Gen_Dict = PRS_sumstats.Get_Top_Gen_Dict()
Hyp_Param_Dict_A = collections.OrderedDict()
Hyp_Param_Dict_R = collections.OrderedDict()
# TRAIN_PATH = '/net/mraid08/export/jafar/UKBioBank/Data/ukb29741_Diabetes_returned_extended_train.csv'
# TEST_PATH = '/net/mraid08/export/jafar/UKBioBank/Data/ukb29741_Diabetes_returned_extended_test.csv'
TRAIN_PATH=Imputed_TRAIN_TEST_PATH = '/net/mraid08/export/jafar/UKBioBank/Data/ukb29741_Diabetes_returned_extended_Imputed_train.csv'
TEST_PATH = '/net/mraid08/export/jafar/UKBioBank/Data/ukb29741_Diabetes_returned_extended_Imputed_test.csv'
# ['Diabetes_all','Age_and_Sex','Anthropometry','Blood_Tests','BP_and_HR',
# 'Diet','Early_Life_Factors','Family_and_Ethnicity','Lifestyle_and_physical_activity','Medication',
# 'Mental_health','Non_Diabetes_Diagnosis','Physical_health','Socio_demographics','HbA1c']
ALL_TEST_AS_VAL = True
BASIC_JOB_NAME = ['Lifestyle_and_physical_activity']#['Mental_health','Non_Diabetes_Diagnosis','Physical_health','Socio_demographics','HbA1c']
BASIC_PROB_BASED_JOB_NAME = ["Val_" + x for x in BASIC_JOB_NAME]
Sub_Class_array = ["All"] # "All",, "All"
Job_ID = ["2443-0.0"]
RET_FEAT_file_names = BASIC_JOB_NAME
feat_list_folder="Diabetes_Features_lists/For_article/" #Folder where the features lists located
FEAT_file_names = [
"Diabetes_Features_0705"] # Diabetes_Features.csv,Diabetes_Features_No_Baseline.csv,Baseline_Features.csv,Diabetes_Features_Lifestyle.csv,Diabetes_Features_No_Baseline.csv, Full_Diabetes_Features # "Diabetes_Features.csv","Diabetes_Features.csv","Diabetes_Features.csv",BMI_Features_Lifestyle.csv
# Features File name without ending
# Features File name without ending
FEAT_PATH = [feat_list_folder + x + ".csv" for x in FEAT_file_names]
RET_FEAT_PATH = [feat_list_folder + x + ".csv" for x in RET_FEAT_file_names]
#
# Data_Job_Names = {"6150-0.0": "Vascular", "2443-0.0": "Diabetes", "2453-0.0": "Cancer", "4041-0.0": "Gestational diabetes","21001-0.0":'BMI'}
CHARAC_SELECTED = {"Age at last visit": "All", "Sex": "All", "Ethnic background": "All",
"Type of special diet followed": "All"}
DISEASE_PROBA_DICT = {"Diabetes Probabilities": PROBA_FOLDER + "Diabetes_OnlyPROB.csv",
"CVD Probabilities": PROBA_FOLDER + "Vascular_OnlyPROB.csv",
"Cancer Probabilities": PROBA_FOLDER + "Cancer_OnlyPROB.csv"}
# PRS_COLS -Adding PRS -Only final score for each phenotype for each user
PRS_COLS = ['PRS_MAGIC_HbA1C', 'PRS_cigs_per_day', 'PRS_MAGIC_Scott_FG', 'PRS_ln_HOMA-IR', 'PRS_MAGIC_Scott_FI',
'PRS_height', 'PRS_Manning_FI', 'PRS_Leptin_BMI', 'PRS_cardio', 'PRS_triglycerides',
'PRS_Manning_FG', 'PRS_anorexia', 'PRS_Magic_2hrGlucose', 'PRS_Non_Diabetic_glucose2', 'PRS_ever_smoked',
'PRS_age_smoke', 'PRS_MAGIC_fastingProinsulin', 'PRS_Leptin_Unadjusted_BMI',
'PRS_MAGIC_Scott_FI_adjBMI', 'PRS_MAGIC_Scott_2hGlu', 'PRS_glucose_iris', 'PRS_ln_FastingInsulin',
'PRS_bmi', 'PRS_overweight', 'PRS_hba1c', 'PRS_alzheimer', 'PRS_whr', 'PRS_ln_HOMA-B',
'PRS_ldl', 'PRS_obesity_class2', 'PRS_obesity_class1', 'PRS_diabetes_BMI_Unadjusted',
'PRS_Manning_BMI_ADJ_FG', 'PRS_waist', 'PRS_ashtma', 'PRS_HBA1C_ISI', 'PRS_HbA1c_MANTRA',
'PRS_diabetes_BMI_Adjusted', 'PRS_Heart_Rate', 'PRS_Manning_BMI_ADJ_FI', 'PRS_cholesterol', 'PRS_hdl',
'PRS_FastingGlucose', 'PRS_hips']
# Select_Top_Traits_Gen_arr_names = ['HbA1c_MANTRA','t2d_mega_meta',"MAGIC_Scott_FG","triglycerides",'Magic_2hrGlucose','Manning_Fasting_Insulin'] #Keep empty if None
Select_Top_Traits_Gen_arr_names = ['HbA1c_MANTRA', 't2d_mega_meta', "MAGIC_Scott_FG", 'Magic_2hrGlucose',
'bmi', 'anorexia', 'cardio', 'hips', 'waist', "overweight", 'obesity_class1',
'obesity_class2',
"ever_smoked", "hdl", "ldl", 'triglycerides', 'cholesterol',
'diabetes_BMI_Unadjusted',
'diabetes_BMI_Adjusted', 'FastingGlucose', 'ln_HOMA-B', 'ln_HOMA-IR',
'ln_FastingInsulin',
'Leptin_BMI', 'Leptin_Unadjusted_BMI', 'Heart_Rate', 'MAGIC_fastingProinsulin',
'MAGIC_Scott_FI_adjBMI', 'MAGIC_Scott_FI', 'MAGIC_HbA1C', 'Manning_FG',
'Manning_BMI_ADJ_FG',
'Manning_Fasting_Insulin', 'Manning_BMI_ADJ_FI', 'HBA1C_ISI'] #
USE_FAKE_QUE = False
NROWS = None # 1-500000 or None
NROWS_RETURN = None # How many returning participants to load
Split = True #Wheter or not to split data to train and test, should be false only for final testing
Use_imp_flag=True
Logistic_regression=False #"Should be LR for Linear regression or LGBM for treees"
DEBUG = False
USE_PROBA = True # Whether or not to either calculate probability if working on all participants or to use probabilities
# calculated if working with returning participants
USE_PRS = False # wether to use PRS reults
Use_SNPs = False
NFOLD = 5
Choose_N_Fold = 3 # How many CV to make for the initial Cross validation when choosing the hyperparameters
Basic_HYP_PAR_ITER = 20
Prob_HYP_PAR_ITER = 100
MEM = '30G'
N_THREADS = 10
P_THREADS = 2
Calc_Base_Prob = False
CALC_SHAP = True # Whether or not to calculate the SHAP values for the basic probabilities
SORT = True # Used mostly for debugging to activate the SORT_AUC_APS function
# Refit_model - path to model to be refitted in the first visit
Refit_Model = None # '/net/mraid08/export/jafar/UKBioBank/Yochai/UKBB_Runs/Refit/Refit_BL2AF_Diabetes/Diabetes_Results/Diabetes_shap_model.txt'#None##Name of the model to be refitted or None
# /net/mraid08/export/jafar/Yochai/UKBB_Runs/AF_To_refit2_Diabetes/Diabetes_Results
Finalize_Only = False
Calc_Prob_Based_Prob = True
RE_USE_PROBA = False
Calc_Transfer_Learning = False # Used when we would like torefit several base models and not a specific model
REFIT_SERIAL_MODELS = False # #Checking wether to refit a model folder just made in previous step, or use a pedefined folder
# Refit_Return_Model_Path - path to model to be refitted in the first visit
Refit_Return_Model_Path = None # '/net/mraid08/export/jafar/Yochai/UKBB_Runs/mock_refit/Diabetes_Results/'#'/net/mraid08/export/jafar/UKBioBank/Yochai/UKBB_Runs/Refit/Refit_BL2AF_Diabetes/Diabetes_Results/'#None#
HowHow = "left" # "inner" - take only participants who has probabilities for other disease as well, "left" - take all
CALC_P_SHAP = True # Whether or not to calculate the SHAP values for the Preob based predictions
SORT_Prob = True
Finalize_Prob_Based_Only = False
if REFIT_SERIAL_MODELS or Refit_Return_Model_Path:
Refit_Returned = True
else:
Refit_Returned = False
VISITS = [0, 1, 2] # [0,1,2]
NUM_OF_DEP_PLOT = 10
Lite = False # Used for debug
Thresh_in_Column = 0.7
Thresh_in_Row = 0.7
# CHARAC_SELECTED = {"Age at last visit": "All", "Sex": "All", "Ethnic background": "All",
# "Type of special diet followed": "All"}
CHARAC_ID = {"Age at last visit": "21022-0.0", "Sex": "31-0.0", "Ethnic background": "21000-0.0",
"Type of special diet followed": "20086-0.0"}
ETHNIC_CODE = {-3: "Prefer not to answer", -1: "Do not know", 1: "White", 2: "Mixed", 3: "Asian",
4: "Black or Black British", 5: "Chinese", 6: "Other ethnic group", 1001: "British", 1002: "Irish",
1003: "Any other white background", 2001: "White and Black Caribbean",
2002: "White and Black African", 2003: "White and Asian", 2004: "Any other mixed background",
3001: "Indian", 3002: "Pakistani", 3003: "Bangladeshi", 3004: "Any other Asian background",
4001: "Caribbean", 4002: "African", 4003: "Any other Black background"}
SEX_CODE = {"Female": 0, "Male": 1}
DIET_CODE = {"Gluten-free": 8, "Lactose-free": 9, "Low calorie": 10, "Vegetarian": 11, "Vegan": 12, "Other": 13}
Job_name_dict = {"6150-0.0": "Vascular", "2443-0.0": "Diabetes", "2453-0.0": "Cancer",
"4041-0.0": "Gestational diabetes",
"21001-0.0": 'BMI'} # ,"Diabetes", "Cancer", "Gestational diabetes","Vascular"
No_symp_dict = {"6150-0.0": -7, "2443-0.0": 0, '2453-0.0': 0, '21001-0.0': "nan"}
# Hyp_Param_Dict_A['max_depth']=[2,4,8,16]
Hyp_Param_Dict_A['num_leaves'] = [4, 8, 16, 32, 64, 128, 256]
Hyp_Param_Dict_A['is_unbalance'] = [True]
Hyp_Param_Dict_A['objective'] = ['binary']
Hyp_Param_Dict_A['boosting_type'] = ['gbdt'] # ,'rf','dart','goss'
Hyp_Param_Dict_A['metric'] = ["auc"] # MAP, aliases: mean_average_precision,kldiv, Kullback-Leibler divergence, aliases: kullback_leibler
Hyp_Param_Dict_A['num_boost_round'] = [10, 50, 100, 250, 500, 1000] # ,1000, 2000, 4000, 8000
Hyp_Param_Dict_A['learning_rate'] = [0.005, 0.01, 0.05, 0.1]
Hyp_Param_Dict_A["min_child_samples"] = [10, 25, 50, 250, 500]
Hyp_Param_Dict_A["subsample"] = [0.1, 0.25, 0.5, 0.7, 0.9, 1]
Hyp_Param_Dict_A["colsample_bytree"] = [0.03, 0.1, 0.25, 0.5, 0.7, 1]
Hyp_Param_Dict_A["boost_from_average"] = [True]
Hyp_Param_Dict_A['num_threads'] = [N_THREADS]
Hyp_Param_Dict_A['lambda_l1'] = [0, 0.5, 0.9, 0.99, 0.999]
Hyp_Param_Dict_A['lambda_l2'] = [0, 0.5, 0.9, 0.99, 0.999]
Hyp_Param_Dict_A['bagging_freq'] = [0, 1, 5]
Hyp_Param_Dict_A['bagging_fraction'] = [0.25, 0.5, 0.75, 1]
# Hyp_Param_Dict_R['max_depth']=[2,4,8,16]
Hyp_Param_Dict_A['num_leaves'] = [2, 4, 8, 16, 32, 64, 128]
Hyp_Param_Dict_R['is_unbalance'] = [True]
Hyp_Param_Dict_R['objective'] = ['binary']
Hyp_Param_Dict_R['boosting_type'] = ['gbdt']
Hyp_Param_Dict_R['metric'] = [
"auc"] # MAP, aliases: mean_average_precision,kldiv, Kullback-Leibler divergence, aliases: kullback_leibler
Hyp_Param_Dict_R['num_boost_round'] = [50, 100, 250, 500, 1000] # ,,1000, 2000, 4000, 8000
Hyp_Param_Dict_R['verbose'] = [-1]
Hyp_Param_Dict_R['learning_rate'] = [0.005, 0.01, 0.05]
Hyp_Param_Dict_R["min_child_samples"] = [5, 10, 25, 50]
Hyp_Param_Dict_R["subsample"] = [0.5, 0.7, 0.9, 1]
Hyp_Param_Dict_R["colsample_bytree"] = [0.01, 0.05, 0.1, 0.25, 0.5, 0.7, 1]
Hyp_Param_Dict_R["boost_from_average"] = [True]
Hyp_Param_Dict_R['num_threads'] = [P_THREADS]
Hyp_Param_Dict_R['lambda_l1'] = [0, 0.25, 0.5, 0.9, 0.99, 0.999]
Hyp_Param_Dict_R['lambda_l2'] = [0, 0.25, 0.5, 0.9, 0.99, 0.999]
Hyp_Param_Dict_A['bagging_freq'] = [0, 1, 5]
Hyp_Param_Dict_A['bagging_fraction'] = [0.5, 0.75, 1]
Select_Traits_Gen = {}
for name in Select_Top_Traits_Gen_arr_names:
Select_Traits_Gen[name] = Top_Gen_Dict[name]
if (len(BASIC_JOB_NAME) != len(Sub_Class_array) or (len(BASIC_JOB_NAME) != len(Sub_Class_array)) or
(len(BASIC_JOB_NAME) != len(Job_ID))):
sys.exit("BASIC_JOB_NAME,Sub_Class_array and Job_ID should be same size")
| 60.032787
| 301
| 0.706354
|
import collections
from PRS import PRS_sumstats
from UKBB_Functions import PROBA_FOLDER
import sys
Top_Gen_Dict = PRS_sumstats.Get_Top_Gen_Dict()
Hyp_Param_Dict_A = collections.OrderedDict()
Hyp_Param_Dict_R = collections.OrderedDict()
TRAIN_PATH=Imputed_TRAIN_TEST_PATH = '/net/mraid08/export/jafar/UKBioBank/Data/ukb29741_Diabetes_returned_extended_Imputed_train.csv'
TEST_PATH = '/net/mraid08/export/jafar/UKBioBank/Data/ukb29741_Diabetes_returned_extended_Imputed_test.csv'
ALL_TEST_AS_VAL = True
BASIC_JOB_NAME = ['Lifestyle_and_physical_activity']
BASIC_PROB_BASED_JOB_NAME = ["Val_" + x for x in BASIC_JOB_NAME]
Sub_Class_array = ["All"]
Job_ID = ["2443-0.0"]
RET_FEAT_file_names = BASIC_JOB_NAME
feat_list_folder="Diabetes_Features_lists/For_article/"
FEAT_file_names = [
"Diabetes_Features_0705"] folder + x + ".csv" for x in RET_FEAT_file_names]
CHARAC_SELECTED = {"Age at last visit": "All", "Sex": "All", "Ethnic background": "All",
"Type of special diet followed": "All"}
DISEASE_PROBA_DICT = {"Diabetes Probabilities": PROBA_FOLDER + "Diabetes_OnlyPROB.csv",
"CVD Probabilities": PROBA_FOLDER + "Vascular_OnlyPROB.csv",
"Cancer Probabilities": PROBA_FOLDER + "Cancer_OnlyPROB.csv"}
PRS_COLS = ['PRS_MAGIC_HbA1C', 'PRS_cigs_per_day', 'PRS_MAGIC_Scott_FG', 'PRS_ln_HOMA-IR', 'PRS_MAGIC_Scott_FI',
'PRS_height', 'PRS_Manning_FI', 'PRS_Leptin_BMI', 'PRS_cardio', 'PRS_triglycerides',
'PRS_Manning_FG', 'PRS_anorexia', 'PRS_Magic_2hrGlucose', 'PRS_Non_Diabetic_glucose2', 'PRS_ever_smoked',
'PRS_age_smoke', 'PRS_MAGIC_fastingProinsulin', 'PRS_Leptin_Unadjusted_BMI',
'PRS_MAGIC_Scott_FI_adjBMI', 'PRS_MAGIC_Scott_2hGlu', 'PRS_glucose_iris', 'PRS_ln_FastingInsulin',
'PRS_bmi', 'PRS_overweight', 'PRS_hba1c', 'PRS_alzheimer', 'PRS_whr', 'PRS_ln_HOMA-B',
'PRS_ldl', 'PRS_obesity_class2', 'PRS_obesity_class1', 'PRS_diabetes_BMI_Unadjusted',
'PRS_Manning_BMI_ADJ_FG', 'PRS_waist', 'PRS_ashtma', 'PRS_HBA1C_ISI', 'PRS_HbA1c_MANTRA',
'PRS_diabetes_BMI_Adjusted', 'PRS_Heart_Rate', 'PRS_Manning_BMI_ADJ_FI', 'PRS_cholesterol', 'PRS_hdl',
'PRS_FastingGlucose', 'PRS_hips']
Gen_arr_names = ['HbA1c_MANTRA', 't2d_mega_meta', "MAGIC_Scott_FG", 'Magic_2hrGlucose',
'bmi', 'anorexia', 'cardio', 'hips', 'waist', "overweight", 'obesity_class1',
'obesity_class2',
"ever_smoked", "hdl", "ldl", 'triglycerides', 'cholesterol',
'diabetes_BMI_Unadjusted',
'diabetes_BMI_Adjusted', 'FastingGlucose', 'ln_HOMA-B', 'ln_HOMA-IR',
'ln_FastingInsulin',
'Leptin_BMI', 'Leptin_Unadjusted_BMI', 'Heart_Rate', 'MAGIC_fastingProinsulin',
'MAGIC_Scott_FI_adjBMI', 'MAGIC_Scott_FI', 'MAGIC_HbA1C', 'Manning_FG',
'Manning_BMI_ADJ_FG',
'Manning_Fasting_Insulin', 'Manning_BMI_ADJ_FI', 'HBA1C_ISI']
USE_FAKE_QUE = False
NROWS = None
NROWS_RETURN = None
Split = True
Use_imp_flag=True
Logistic_regression=False
DEBUG = False
USE_PROBA = True
USE_PRS = False
Use_SNPs = False
NFOLD = 5
Choose_N_Fold = 3
Basic_HYP_PAR_ITER = 20
Prob_HYP_PAR_ITER = 100
MEM = '30G'
N_THREADS = 10
P_THREADS = 2
Calc_Base_Prob = False
CALC_SHAP = True
SORT = True
Refit_Model = None alse
Refit_Returned = False
VISITS = [0, 1, 2]
NUM_OF_DEP_PLOT = 10
Lite = False
Thresh_in_Column = 0.7
Thresh_in_Row = 0.7
CHARAC_ID = {"Age at last visit": "21022-0.0", "Sex": "31-0.0", "Ethnic background": "21000-0.0",
"Type of special diet followed": "20086-0.0"}
ETHNIC_CODE = {-3: "Prefer not to answer", -1: "Do not know", 1: "White", 2: "Mixed", 3: "Asian",
4: "Black or Black British", 5: "Chinese", 6: "Other ethnic group", 1001: "British", 1002: "Irish",
1003: "Any other white background", 2001: "White and Black Caribbean",
2002: "White and Black African", 2003: "White and Asian", 2004: "Any other mixed background",
3001: "Indian", 3002: "Pakistani", 3003: "Bangladeshi", 3004: "Any other Asian background",
4001: "Caribbean", 4002: "African", 4003: "Any other Black background"}
SEX_CODE = {"Female": 0, "Male": 1}
DIET_CODE = {"Gluten-free": 8, "Lactose-free": 9, "Low calorie": 10, "Vegetarian": 11, "Vegan": 12, "Other": 13}
Job_name_dict = {"6150-0.0": "Vascular", "2443-0.0": "Diabetes", "2453-0.0": "Cancer",
"4041-0.0": "Gestational diabetes",
"21001-0.0": 'BMI'}
No_symp_dict = {"6150-0.0": -7, "2443-0.0": 0, '2453-0.0': 0, '21001-0.0': "nan"}
Hyp_Param_Dict_A['num_leaves'] = [4, 8, 16, 32, 64, 128, 256]
Hyp_Param_Dict_A['is_unbalance'] = [True]
Hyp_Param_Dict_A['objective'] = ['binary']
Hyp_Param_Dict_A['boosting_type'] = ['gbdt']
Hyp_Param_Dict_A['metric'] = ["auc"]
Hyp_Param_Dict_A['num_boost_round'] = [10, 50, 100, 250, 500, 1000]
Hyp_Param_Dict_A['learning_rate'] = [0.005, 0.01, 0.05, 0.1]
Hyp_Param_Dict_A["min_child_samples"] = [10, 25, 50, 250, 500]
Hyp_Param_Dict_A["subsample"] = [0.1, 0.25, 0.5, 0.7, 0.9, 1]
Hyp_Param_Dict_A["colsample_bytree"] = [0.03, 0.1, 0.25, 0.5, 0.7, 1]
Hyp_Param_Dict_A["boost_from_average"] = [True]
Hyp_Param_Dict_A['num_threads'] = [N_THREADS]
Hyp_Param_Dict_A['lambda_l1'] = [0, 0.5, 0.9, 0.99, 0.999]
Hyp_Param_Dict_A['lambda_l2'] = [0, 0.5, 0.9, 0.99, 0.999]
Hyp_Param_Dict_A['bagging_freq'] = [0, 1, 5]
Hyp_Param_Dict_A['bagging_fraction'] = [0.25, 0.5, 0.75, 1]
Hyp_Param_Dict_A['num_leaves'] = [2, 4, 8, 16, 32, 64, 128]
Hyp_Param_Dict_R['is_unbalance'] = [True]
Hyp_Param_Dict_R['objective'] = ['binary']
Hyp_Param_Dict_R['boosting_type'] = ['gbdt']
Hyp_Param_Dict_R['metric'] = [
"auc"]
Hyp_Param_Dict_R['num_boost_round'] = [50, 100, 250, 500, 1000]
Hyp_Param_Dict_R['verbose'] = [-1]
Hyp_Param_Dict_R['learning_rate'] = [0.005, 0.01, 0.05]
Hyp_Param_Dict_R["min_child_samples"] = [5, 10, 25, 50]
Hyp_Param_Dict_R["subsample"] = [0.5, 0.7, 0.9, 1]
Hyp_Param_Dict_R["colsample_bytree"] = [0.01, 0.05, 0.1, 0.25, 0.5, 0.7, 1]
Hyp_Param_Dict_R["boost_from_average"] = [True]
Hyp_Param_Dict_R['num_threads'] = [P_THREADS]
Hyp_Param_Dict_R['lambda_l1'] = [0, 0.25, 0.5, 0.9, 0.99, 0.999]
Hyp_Param_Dict_R['lambda_l2'] = [0, 0.25, 0.5, 0.9, 0.99, 0.999]
Hyp_Param_Dict_A['bagging_freq'] = [0, 1, 5]
Hyp_Param_Dict_A['bagging_fraction'] = [0.5, 0.75, 1]
Select_Traits_Gen = {}
for name in Select_Top_Traits_Gen_arr_names:
Select_Traits_Gen[name] = Top_Gen_Dict[name]
if (len(BASIC_JOB_NAME) != len(Sub_Class_array) or (len(BASIC_JOB_NAME) != len(Sub_Class_array)) or
(len(BASIC_JOB_NAME) != len(Job_ID))):
sys.exit("BASIC_JOB_NAME,Sub_Class_array and Job_ID should be same size")
| true
| true
|
f718581e08eecfa5071a2fbf325d02e2ff15bedc
| 22
|
py
|
Python
|
vida/vida/__init__.py
|
smesdaghi/vida
|
271c897b332f0c24e00a23c1fe86f5172fb9dd30
|
[
"MIT"
] | 2
|
2016-01-09T15:45:46.000Z
|
2019-04-28T03:56:13.000Z
|
vida/vida/__init__.py
|
smesdaghi/vida
|
271c897b332f0c24e00a23c1fe86f5172fb9dd30
|
[
"MIT"
] | 3
|
2015-09-26T16:31:19.000Z
|
2015-10-07T13:03:00.000Z
|
vida/vida/__init__.py
|
smesdaghi/vida
|
271c897b332f0c24e00a23c1fe86f5172fb9dd30
|
[
"MIT"
] | 4
|
2016-01-20T13:06:31.000Z
|
2019-09-13T14:52:00.000Z
|
__author__ = 's30244'
| 11
| 21
| 0.727273
|
__author__ = 's30244'
| true
| true
|
f71858bb17f89fb6223fdd8d8e5d81ceec6d75a9
| 3,307
|
py
|
Python
|
tacker/nfvo/workflows/vim_monitor/vim_monitor_utils.py
|
h1r0mu/tacker
|
8c69dda51fcfe215c4878a86b82018d2b96e5561
|
[
"Apache-2.0"
] | 116
|
2015-10-18T02:57:08.000Z
|
2022-03-15T04:09:18.000Z
|
tacker/nfvo/workflows/vim_monitor/vim_monitor_utils.py
|
h1r0mu/tacker
|
8c69dda51fcfe215c4878a86b82018d2b96e5561
|
[
"Apache-2.0"
] | 6
|
2016-11-07T22:15:54.000Z
|
2021-05-09T06:13:08.000Z
|
tacker/nfvo/workflows/vim_monitor/vim_monitor_utils.py
|
h1r0mu/tacker
|
8c69dda51fcfe215c4878a86b82018d2b96e5561
|
[
"Apache-2.0"
] | 166
|
2015-10-20T15:31:52.000Z
|
2021-11-12T08:39:49.000Z
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import yaml
from oslo_config import cfg
from oslo_log import log as logging
from tacker.common import rpc
from tacker.mistral.actionrpc import kill_action as killaction
from tacker.mistral import mistral_client
from tacker.nfvo.workflows.vim_monitor import workflow_generator
from tacker.vnfm import keystone
LOG = logging.getLogger(__name__)
def get_mistral_client(auth_dict):
return mistral_client.MistralClient(
keystone.Keystone().initialize_client(**auth_dict),
auth_dict['token']).get_client()
def prepare_and_create_workflow(mistral_client, vim_id, action,
kwargs):
wg = workflow_generator.WorkflowGenerator(vim_id, action)
wg.task(**kwargs)
yaml.SafeDumper.ignore_aliases = lambda self, data: True
definition_yaml = yaml.safe_dump(wg.definition, default_flow_style=False)
LOG.debug('vim monitor workflow: %s', definition_yaml)
workflow = mistral_client.workflows.create(definition_yaml)
return {'id': workflow[0].id, 'input': wg.get_input_dict()}
def execute_workflow(mistral_client, workflow):
return mistral_client.executions.create(
wf_identifier=workflow['id'],
workflow_input=workflow['input'],
wf_params={})
def delete_executions(mistral_client, vim_id):
executions = mistral_client.executions.list(
workflow_name='vim_id_' + vim_id)
for execution in executions:
mistral_client.executions.delete(execution.id, force=True)
def delete_workflow(mistral_client, vim_id):
return mistral_client.workflows.delete('vim_id_' + vim_id)
def monitor_vim(auth_dict, vim_obj):
mc = get_mistral_client(auth_dict)
auth_url = vim_obj["auth_url"]
vim_type = vim_obj['type']
if vim_type == 'openstack':
vim_ip = auth_url.split("//")[-1].split(":")[0].split("/")[0]
elif vim_type == 'kubernetes':
vim_ip = auth_url.split("//")[-1].split(":")[0]
workflow_input_dict = {
'vim_id': vim_obj['id'],
'count': cfg.CONF.vim_monitor.count,
'timeout': cfg.CONF.vim_monitor.timeout,
'interval': cfg.CONF.vim_monitor.interval,
'targetip': vim_ip}
workflow = prepare_and_create_workflow(
mc, vim_obj['id'], 'monitor',
workflow_input_dict)
execute_workflow(mc, workflow)
def kill_action(context, vim_obj):
target = killaction.MistralActionKillRPC.target
rpc_client = rpc.get_client(target)
cctxt = rpc_client.prepare(server=vim_obj['id'])
cctxt.cast(context, 'killAction')
def delete_vim_monitor(context, auth_dict, vim_obj):
mc = get_mistral_client(auth_dict)
delete_executions(mc, vim_obj['id'])
delete_workflow(mc, vim_obj['id'])
kill_action(context, vim_obj)
| 34.810526
| 77
| 0.713335
|
import yaml
from oslo_config import cfg
from oslo_log import log as logging
from tacker.common import rpc
from tacker.mistral.actionrpc import kill_action as killaction
from tacker.mistral import mistral_client
from tacker.nfvo.workflows.vim_monitor import workflow_generator
from tacker.vnfm import keystone
LOG = logging.getLogger(__name__)
def get_mistral_client(auth_dict):
return mistral_client.MistralClient(
keystone.Keystone().initialize_client(**auth_dict),
auth_dict['token']).get_client()
def prepare_and_create_workflow(mistral_client, vim_id, action,
kwargs):
wg = workflow_generator.WorkflowGenerator(vim_id, action)
wg.task(**kwargs)
yaml.SafeDumper.ignore_aliases = lambda self, data: True
definition_yaml = yaml.safe_dump(wg.definition, default_flow_style=False)
LOG.debug('vim monitor workflow: %s', definition_yaml)
workflow = mistral_client.workflows.create(definition_yaml)
return {'id': workflow[0].id, 'input': wg.get_input_dict()}
def execute_workflow(mistral_client, workflow):
return mistral_client.executions.create(
wf_identifier=workflow['id'],
workflow_input=workflow['input'],
wf_params={})
def delete_executions(mistral_client, vim_id):
executions = mistral_client.executions.list(
workflow_name='vim_id_' + vim_id)
for execution in executions:
mistral_client.executions.delete(execution.id, force=True)
def delete_workflow(mistral_client, vim_id):
return mistral_client.workflows.delete('vim_id_' + vim_id)
def monitor_vim(auth_dict, vim_obj):
mc = get_mistral_client(auth_dict)
auth_url = vim_obj["auth_url"]
vim_type = vim_obj['type']
if vim_type == 'openstack':
vim_ip = auth_url.split("//")[-1].split(":")[0].split("/")[0]
elif vim_type == 'kubernetes':
vim_ip = auth_url.split("//")[-1].split(":")[0]
workflow_input_dict = {
'vim_id': vim_obj['id'],
'count': cfg.CONF.vim_monitor.count,
'timeout': cfg.CONF.vim_monitor.timeout,
'interval': cfg.CONF.vim_monitor.interval,
'targetip': vim_ip}
workflow = prepare_and_create_workflow(
mc, vim_obj['id'], 'monitor',
workflow_input_dict)
execute_workflow(mc, workflow)
def kill_action(context, vim_obj):
target = killaction.MistralActionKillRPC.target
rpc_client = rpc.get_client(target)
cctxt = rpc_client.prepare(server=vim_obj['id'])
cctxt.cast(context, 'killAction')
def delete_vim_monitor(context, auth_dict, vim_obj):
mc = get_mistral_client(auth_dict)
delete_executions(mc, vim_obj['id'])
delete_workflow(mc, vim_obj['id'])
kill_action(context, vim_obj)
| true
| true
|
f71859abfa33f2df3bfb30d851d602fbf51a1f8a
| 525
|
py
|
Python
|
src/cmd_exec/util/SystemUtil.py
|
ahuyuktepe/cmd-exec
|
835037a4b7784d4901bf35db5eaa88a0757c5ce9
|
[
"MIT"
] | null | null | null |
src/cmd_exec/util/SystemUtil.py
|
ahuyuktepe/cmd-exec
|
835037a4b7784d4901bf35db5eaa88a0757c5ce9
|
[
"MIT"
] | 1
|
2021-06-07T21:25:52.000Z
|
2021-06-07T21:25:52.000Z
|
src/cmd_exec/util/SystemUtil.py
|
ahuyuktepe/cmd-exec
|
835037a4b7784d4901bf35db5eaa88a0757c5ce9
|
[
"MIT"
] | null | null | null |
import getpass
import os
import platform
class SystemUtil:
__SYSTEM_NAMES_WINDOWS: set = ['Windows']
@staticmethod
def isWindows() -> bool:
osName: str = platform.system()
return osName in SystemUtil.__SYSTEM_NAMES_WINDOWS
@staticmethod
def getCurrentUserName() -> str:
username = getpass.getuser()
return username
@staticmethod
def getCurrentUserGroups() -> list:
if not SystemUtil.isWindows():
return os.getgroups()
| 22.826087
| 59
| 0.630476
|
import getpass
import os
import platform
class SystemUtil:
__SYSTEM_NAMES_WINDOWS: set = ['Windows']
@staticmethod
def isWindows() -> bool:
osName: str = platform.system()
return osName in SystemUtil.__SYSTEM_NAMES_WINDOWS
@staticmethod
def getCurrentUserName() -> str:
username = getpass.getuser()
return username
@staticmethod
def getCurrentUserGroups() -> list:
if not SystemUtil.isWindows():
return os.getgroups()
| true
| true
|
f71859cec54c2858e6e96dfaa122fa325313a2ed
| 5,325
|
py
|
Python
|
artellapipe/core/assetfile.py
|
ArtellaPipe/artellapipe
|
3400f6a55f124f639143fe01c559059eaba23b22
|
[
"MIT"
] | 7
|
2019-10-28T05:18:30.000Z
|
2020-08-21T05:36:52.000Z
|
artellapipe/core/assetfile.py
|
tpoveda/artellapipe
|
3400f6a55f124f639143fe01c559059eaba23b22
|
[
"MIT"
] | 4
|
2020-01-22T02:41:54.000Z
|
2020-03-17T10:49:12.000Z
|
artellapipe/core/assetfile.py
|
tpoveda/artellapipe
|
3400f6a55f124f639143fe01c559059eaba23b22
|
[
"MIT"
] | null | null | null |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
"""
Module that contains implementations for asset files
"""
from __future__ import print_function, division, absolute_import
__author__ = "Tomas Poveda"
__license__ = "MIT"
__maintainer__ = "Tomas Poveda"
__email__ = "tpovedatd@gmail.com"
import os
import logging
import tpDcc as tp
from tpDcc.libs.python import osplatform, path as path_utils
import artellapipe
from artellapipe.core import defines, file
LOGGER = logging.getLogger('artellapipe')
class ArtellaAssetFile(file.ArtellaFile, object):
def __init__(self, file_asset=None, file_path=None, file_name=None):
self._asset = file_asset
file_name = file_name or self._asset.get_name() if self._asset else None
super(ArtellaAssetFile, self).__init__(file_name=file_name, file_path=file_path)
@property
def asset(self):
"""
Returns asset linked to this file type
:return: ArtellaAssset
"""
return self._asset
def has_valid_object(self):
"""
Implements base ArtellaFile has_valid_object function
Returns whether valid object is attached to this file
:return: bool
"""
return bool(self._asset)
def get_template_dict(self, **kwargs):
"""
Returns dictionary with the template data for this file
:param extension: str
:return: dict
"""
template_dict = {
'project_id': self._project.id,
'project_id_number': self._project.id_number,
'asset_name': self._asset.get_name(),
'asset_type': self._asset.get_category(),
'file_extension': kwargs.get('extension', self.FILE_EXTENSIONS[0])
}
return template_dict
def get_project(self):
"""
Implements base ArtellaFile get_project function
Returns project where this asset file belongs to
:return: ArtellaProject
"""
return self._asset.project
def get_file(
self, status=defines.ArtellaFileStatus.WORKING, extension=None, fix_path=False, version=None, **kwargs):
"""
Implements base ArtellaFile get_file function
Returns file of the attached object
:param file_type: str
:param status: str
:param extension: str
:param fix_path: bool
:param version: str
:return: str
"""
template_dict = self.get_template_dict()
return self._asset.get_file(
file_type=self.FILE_TYPE, status=status, extension=extension, fix_path=fix_path,
version=version, extra_dict=template_dict)
def get_path(self):
"""
Implements base ArtellaFile get_path function
Returns path of the attached object
:return: str
"""
return self._asset.get_path()
def get_name(self):
"""
Returns name of the attached object
:return: str
"""
return self._asset.get_name()
def get_extension(self):
"""
Returns the extension of the aseet file
:return: str
"""
return self.get_project().assets_library_file_types.get()
def get_latest_published_versions(self):
"""
Implements base ArtellaFile get_path function
Returns latest published version of file
:return: str
"""
file_path = self.get_path()
return artellapipe.AssetsMgr().get_latest_published_versions(file_path, file_type=self.FILE_TYPE)
def get_file_paths(self, return_first=False, fix_path=True, **kwargs):
if self.FILE_TYPE not in self._asset.FILES:
LOGGER.warning(
'FileType "{}" is not a valid file for Assets of type "{}"'.format(
self.FILE_TYPE, self._asset.FILE_TYPE))
return list()
file_paths = super(
ArtellaAssetFile, self).get_file_paths(return_first=return_first, fix_path=fix_path, **kwargs)
if file_paths:
return file_paths
status = kwargs.get('status', defines.ArtellaFileStatus.PUBLISHED)
if status == defines.ArtellaFileStatus.WORKING:
file_path = self.get_working_path()
else:
file_path = self.get_latest_local_published_path()
if not file_path:
return None if return_first else file_paths
if fix_path:
file_path = artellapipe.FilesMgr().fix_path(file_path)
if return_first:
return file_path
else:
return [file_path]
def _open_file(self, file_path):
if file_path and os.path.isfile(file_path):
if path_utils.clean_path(tp.Dcc.scene_name()) == path_utils.clean_path(file_path):
return True
tp.Dcc.open_file(file_path)
return True
elif file_path and os.path.isdir(file_path):
osplatform.open_file(file_path)
return True
else:
if file_path:
folder_path = os.path.dirname(file_path)
if os.path.isdir(folder_path):
osplatform.open_file(folder_path)
return True
LOGGER.warning('Impossible to open file: "{}"'.format(file_path))
return False
| 29.583333
| 116
| 0.624977
|
from __future__ import print_function, division, absolute_import
__author__ = "Tomas Poveda"
__license__ = "MIT"
__maintainer__ = "Tomas Poveda"
__email__ = "tpovedatd@gmail.com"
import os
import logging
import tpDcc as tp
from tpDcc.libs.python import osplatform, path as path_utils
import artellapipe
from artellapipe.core import defines, file
LOGGER = logging.getLogger('artellapipe')
class ArtellaAssetFile(file.ArtellaFile, object):
def __init__(self, file_asset=None, file_path=None, file_name=None):
self._asset = file_asset
file_name = file_name or self._asset.get_name() if self._asset else None
super(ArtellaAssetFile, self).__init__(file_name=file_name, file_path=file_path)
@property
def asset(self):
return self._asset
def has_valid_object(self):
return bool(self._asset)
def get_template_dict(self, **kwargs):
template_dict = {
'project_id': self._project.id,
'project_id_number': self._project.id_number,
'asset_name': self._asset.get_name(),
'asset_type': self._asset.get_category(),
'file_extension': kwargs.get('extension', self.FILE_EXTENSIONS[0])
}
return template_dict
def get_project(self):
return self._asset.project
def get_file(
self, status=defines.ArtellaFileStatus.WORKING, extension=None, fix_path=False, version=None, **kwargs):
template_dict = self.get_template_dict()
return self._asset.get_file(
file_type=self.FILE_TYPE, status=status, extension=extension, fix_path=fix_path,
version=version, extra_dict=template_dict)
def get_path(self):
return self._asset.get_path()
def get_name(self):
return self._asset.get_name()
def get_extension(self):
return self.get_project().assets_library_file_types.get()
def get_latest_published_versions(self):
file_path = self.get_path()
return artellapipe.AssetsMgr().get_latest_published_versions(file_path, file_type=self.FILE_TYPE)
def get_file_paths(self, return_first=False, fix_path=True, **kwargs):
if self.FILE_TYPE not in self._asset.FILES:
LOGGER.warning(
'FileType "{}" is not a valid file for Assets of type "{}"'.format(
self.FILE_TYPE, self._asset.FILE_TYPE))
return list()
file_paths = super(
ArtellaAssetFile, self).get_file_paths(return_first=return_first, fix_path=fix_path, **kwargs)
if file_paths:
return file_paths
status = kwargs.get('status', defines.ArtellaFileStatus.PUBLISHED)
if status == defines.ArtellaFileStatus.WORKING:
file_path = self.get_working_path()
else:
file_path = self.get_latest_local_published_path()
if not file_path:
return None if return_first else file_paths
if fix_path:
file_path = artellapipe.FilesMgr().fix_path(file_path)
if return_first:
return file_path
else:
return [file_path]
def _open_file(self, file_path):
if file_path and os.path.isfile(file_path):
if path_utils.clean_path(tp.Dcc.scene_name()) == path_utils.clean_path(file_path):
return True
tp.Dcc.open_file(file_path)
return True
elif file_path and os.path.isdir(file_path):
osplatform.open_file(file_path)
return True
else:
if file_path:
folder_path = os.path.dirname(file_path)
if os.path.isdir(folder_path):
osplatform.open_file(folder_path)
return True
LOGGER.warning('Impossible to open file: "{}"'.format(file_path))
return False
| true
| true
|
f7185b30e364d852691b3186ed2a5799603f94f0
| 58,998
|
py
|
Python
|
python/ccxt/bitmex.py
|
myhlcb/ccxt
|
828a373821269d846f418c056f6e4c922d56d18c
|
[
"MIT"
] | 1
|
2021-01-21T23:29:27.000Z
|
2021-01-21T23:29:27.000Z
|
python/ccxt/bitmex.py
|
myhlcb/ccxt
|
828a373821269d846f418c056f6e4c922d56d18c
|
[
"MIT"
] | 1
|
2020-09-17T13:57:58.000Z
|
2020-09-17T13:57:58.000Z
|
python/ccxt/bitmex.py
|
myhlcb/ccxt
|
828a373821269d846f418c056f6e4c922d56d18c
|
[
"MIT"
] | 2
|
2020-06-17T14:28:46.000Z
|
2022-02-26T13:36:02.000Z
|
# -*- coding: utf-8 -*-
# PLEASE DO NOT EDIT THIS FILE, IT IS GENERATED AND WILL BE OVERWRITTEN:
# https://github.com/ccxt/ccxt/blob/master/CONTRIBUTING.md#how-to-contribute-code
from ccxt.base.exchange import Exchange
from ccxt.base.errors import ExchangeError
from ccxt.base.errors import AuthenticationError
from ccxt.base.errors import PermissionDenied
from ccxt.base.errors import BadRequest
from ccxt.base.errors import InsufficientFunds
from ccxt.base.errors import InvalidOrder
from ccxt.base.errors import OrderNotFound
from ccxt.base.errors import DDoSProtection
from ccxt.base.errors import ExchangeNotAvailable
from ccxt.base.decimal_to_precision import TICK_SIZE
class bitmex(Exchange):
def describe(self):
return self.deep_extend(super(bitmex, self).describe(), {
'id': 'bitmex',
'name': 'BitMEX',
'countries': ['SC'], # Seychelles
'version': 'v1',
'userAgent': None,
'rateLimit': 2000,
'pro': True,
'has': {
'cancelAllOrders': True,
'cancelOrder': True,
'CORS': False,
'createOrder': True,
'editOrder': True,
'fetchBalance': True,
'fetchClosedOrders': True,
'fetchLedger': True,
'fetchMarkets': True,
'fetchMyTrades': True,
'fetchOHLCV': True,
'fetchOpenOrders': True,
'fetchOrder': True,
'fetchOrderBook': True,
'fetchOrders': True,
'fetchTicker': True,
'fetchTickers': True,
'fetchTrades': True,
'fetchTransactions': 'emulated',
'withdraw': True,
},
'timeframes': {
'1m': '1m',
'5m': '5m',
'1h': '1h',
'1d': '1d',
},
'urls': {
'test': {
'public': 'https://testnet.bitmex.com',
'private': 'https://testnet.bitmex.com',
},
'logo': 'https://user-images.githubusercontent.com/1294454/27766319-f653c6e6-5ed4-11e7-933d-f0bc3699ae8f.jpg',
'api': {
'public': 'https://www.bitmex.com',
'private': 'https://www.bitmex.com',
},
'www': 'https://www.bitmex.com',
'doc': [
'https://www.bitmex.com/app/apiOverview',
'https://github.com/BitMEX/api-connectors/tree/master/official-http',
],
'fees': 'https://www.bitmex.com/app/fees',
'referral': 'https://www.bitmex.com/register/upZpOX',
},
'api': {
'public': {
'get': [
'announcement',
'announcement/urgent',
'funding',
'instrument',
'instrument/active',
'instrument/activeAndIndices',
'instrument/activeIntervals',
'instrument/compositeIndex',
'instrument/indices',
'insurance',
'leaderboard',
'liquidation',
'orderBook',
'orderBook/L2',
'quote',
'quote/bucketed',
'schema',
'schema/websocketHelp',
'settlement',
'stats',
'stats/history',
'trade',
'trade/bucketed',
],
},
'private': {
'get': [
'apiKey',
'chat',
'chat/channels',
'chat/connected',
'execution',
'execution/tradeHistory',
'notification',
'order',
'position',
'user',
'user/affiliateStatus',
'user/checkReferralCode',
'user/commission',
'user/depositAddress',
'user/executionHistory',
'user/margin',
'user/minWithdrawalFee',
'user/wallet',
'user/walletHistory',
'user/walletSummary',
],
'post': [
'apiKey',
'apiKey/disable',
'apiKey/enable',
'chat',
'order',
'order/bulk',
'order/cancelAllAfter',
'order/closePosition',
'position/isolate',
'position/leverage',
'position/riskLimit',
'position/transferMargin',
'user/cancelWithdrawal',
'user/confirmEmail',
'user/confirmEnableTFA',
'user/confirmWithdrawal',
'user/disableTFA',
'user/logout',
'user/logoutAll',
'user/preferences',
'user/requestEnableTFA',
'user/requestWithdrawal',
],
'put': [
'order',
'order/bulk',
'user',
],
'delete': [
'apiKey',
'order',
'order/all',
],
},
},
'exceptions': {
'exact': {
'Invalid API Key.': AuthenticationError,
'This key is disabled.': PermissionDenied,
'Access Denied': PermissionDenied,
'Duplicate clOrdID': InvalidOrder,
'orderQty is invalid': InvalidOrder,
'Invalid price': InvalidOrder,
'Invalid stopPx for ordType': InvalidOrder,
},
'broad': {
'Signature not valid': AuthenticationError,
'overloaded': ExchangeNotAvailable,
'Account has insufficient Available Balance': InsufficientFunds,
'Service unavailable': ExchangeNotAvailable, # {"error":{"message":"Service unavailable","name":"HTTPError"}}
},
},
'precisionMode': TICK_SIZE,
'options': {
# https://blog.bitmex.com/api_announcement/deprecation-of-api-nonce-header/
# https://github.com/ccxt/ccxt/issues/4789
'api-expires': 5, # in seconds
'fetchOHLCVOpenTimestamp': True,
},
})
def fetch_markets(self, params={}):
response = self.publicGetInstrumentActiveAndIndices(params)
result = []
for i in range(0, len(response)):
market = response[i]
active = (market['state'] != 'Unlisted')
id = market['symbol']
baseId = market['underlying']
quoteId = market['quoteCurrency']
basequote = baseId + quoteId
base = self.safe_currency_code(baseId)
quote = self.safe_currency_code(quoteId)
swap = (id == basequote)
# 'positionCurrency' may be empty("", as Bitmex currently returns for ETHUSD)
# so let's take the quote currency first and then adjust if needed
positionId = self.safe_string_2(market, 'positionCurrency', 'quoteCurrency')
type = None
future = False
prediction = False
position = self.safe_currency_code(positionId)
symbol = id
if swap:
type = 'swap'
symbol = base + '/' + quote
elif id.find('B_') >= 0:
prediction = True
type = 'prediction'
else:
future = True
type = 'future'
precision = {
'amount': None,
'price': None,
}
lotSize = self.safe_float(market, 'lotSize')
tickSize = self.safe_float(market, 'tickSize')
if lotSize is not None:
precision['amount'] = lotSize
if tickSize is not None:
precision['price'] = tickSize
limits = {
'amount': {
'min': None,
'max': None,
},
'price': {
'min': tickSize,
'max': self.safe_float(market, 'maxPrice'),
},
'cost': {
'min': None,
'max': None,
},
}
limitField = 'cost' if (position == quote) else 'amount'
limits[limitField] = {
'min': lotSize,
'max': self.safe_float(market, 'maxOrderQty'),
}
result.append({
'id': id,
'symbol': symbol,
'base': base,
'quote': quote,
'baseId': baseId,
'quoteId': quoteId,
'active': active,
'precision': precision,
'limits': limits,
'taker': self.safe_float(market, 'takerFee'),
'maker': self.safe_float(market, 'makerFee'),
'type': type,
'spot': False,
'swap': swap,
'future': future,
'prediction': prediction,
'info': market,
})
return result
def parse_balance_response(self, response):
#
# [
# {
# "account":1455728,
# "currency":"XBt",
# "riskLimit":1000000000000,
# "prevState":"",
# "state":"",
# "action":"",
# "amount":263542,
# "pendingCredit":0,
# "pendingDebit":0,
# "confirmedDebit":0,
# "prevRealisedPnl":0,
# "prevUnrealisedPnl":0,
# "grossComm":0,
# "grossOpenCost":0,
# "grossOpenPremium":0,
# "grossExecCost":0,
# "grossMarkValue":0,
# "riskValue":0,
# "taxableMargin":0,
# "initMargin":0,
# "maintMargin":0,
# "sessionMargin":0,
# "targetExcessMargin":0,
# "varMargin":0,
# "realisedPnl":0,
# "unrealisedPnl":0,
# "indicativeTax":0,
# "unrealisedProfit":0,
# "syntheticMargin":null,
# "walletBalance":263542,
# "marginBalance":263542,
# "marginBalancePcnt":1,
# "marginLeverage":0,
# "marginUsedPcnt":0,
# "excessMargin":263542,
# "excessMarginPcnt":1,
# "availableMargin":263542,
# "withdrawableMargin":263542,
# "timestamp":"2020-08-03T12:01:01.246Z",
# "grossLastValue":0,
# "commission":null
# }
# ]
#
result = {'info': response}
for i in range(0, len(response)):
balance = response[i]
currencyId = self.safe_string(balance, 'currency')
code = self.safe_currency_code(currencyId)
account = self.account()
free = self.safe_float(balance, 'availableMargin')
total = self.safe_float(balance, 'marginBalance')
if code == 'BTC':
if free is not None:
free /= 100000000
if total is not None:
total /= 100000000
account['free'] = free
account['total'] = total
result[code] = account
return self.parse_balance(result)
def fetch_balance(self, params={}):
self.load_markets()
request = {
'currency': 'all',
}
response = self.privateGetUserMargin(self.extend(request, params))
#
# [
# {
# "account":1455728,
# "currency":"XBt",
# "riskLimit":1000000000000,
# "prevState":"",
# "state":"",
# "action":"",
# "amount":263542,
# "pendingCredit":0,
# "pendingDebit":0,
# "confirmedDebit":0,
# "prevRealisedPnl":0,
# "prevUnrealisedPnl":0,
# "grossComm":0,
# "grossOpenCost":0,
# "grossOpenPremium":0,
# "grossExecCost":0,
# "grossMarkValue":0,
# "riskValue":0,
# "taxableMargin":0,
# "initMargin":0,
# "maintMargin":0,
# "sessionMargin":0,
# "targetExcessMargin":0,
# "varMargin":0,
# "realisedPnl":0,
# "unrealisedPnl":0,
# "indicativeTax":0,
# "unrealisedProfit":0,
# "syntheticMargin":null,
# "walletBalance":263542,
# "marginBalance":263542,
# "marginBalancePcnt":1,
# "marginLeverage":0,
# "marginUsedPcnt":0,
# "excessMargin":263542,
# "excessMarginPcnt":1,
# "availableMargin":263542,
# "withdrawableMargin":263542,
# "timestamp":"2020-08-03T12:01:01.246Z",
# "grossLastValue":0,
# "commission":null
# }
# ]
#
return self.parse_balance_response(response)
def fetch_order_book(self, symbol, limit=None, params={}):
self.load_markets()
market = self.market(symbol)
request = {
'symbol': market['id'],
}
if limit is not None:
request['depth'] = limit
response = self.publicGetOrderBookL2(self.extend(request, params))
result = {
'bids': [],
'asks': [],
'timestamp': None,
'datetime': None,
'nonce': None,
}
for i in range(0, len(response)):
order = response[i]
side = 'asks' if (order['side'] == 'Sell') else 'bids'
amount = self.safe_float(order, 'size')
price = self.safe_float(order, 'price')
# https://github.com/ccxt/ccxt/issues/4926
# https://github.com/ccxt/ccxt/issues/4927
# the exchange sometimes returns null price in the orderbook
if price is not None:
result[side].append([price, amount])
result['bids'] = self.sort_by(result['bids'], 0, True)
result['asks'] = self.sort_by(result['asks'], 0)
return result
def fetch_order(self, id, symbol=None, params={}):
filter = {
'filter': {
'orderID': id,
},
}
response = self.fetch_orders(symbol, None, None, self.deep_extend(filter, params))
numResults = len(response)
if numResults == 1:
return response[0]
raise OrderNotFound(self.id + ': The order ' + id + ' not found.')
def fetch_orders(self, symbol=None, since=None, limit=None, params={}):
self.load_markets()
market = None
request = {}
if symbol is not None:
market = self.market(symbol)
request['symbol'] = market['id']
if since is not None:
request['startTime'] = self.iso8601(since)
if limit is not None:
request['count'] = limit
request = self.deep_extend(request, params)
# why the hassle? urlencode in python is kinda broken for nested dicts.
# E.g. self.urlencode({"filter": {"open": True}}) will return "filter={'open':+True}"
# Bitmex doesn't like that. Hence resorting to self hack.
if 'filter' in request:
request['filter'] = self.json(request['filter'])
response = self.privateGetOrder(request)
return self.parse_orders(response, market, since, limit)
def fetch_open_orders(self, symbol=None, since=None, limit=None, params={}):
request = {
'filter': {
'open': True,
},
}
return self.fetch_orders(symbol, since, limit, self.deep_extend(request, params))
def fetch_closed_orders(self, symbol=None, since=None, limit=None, params={}):
# Bitmex barfs if you set 'open': False in the filter...
orders = self.fetch_orders(symbol, since, limit, params)
return self.filter_by(orders, 'status', 'closed')
def fetch_my_trades(self, symbol=None, since=None, limit=None, params={}):
self.load_markets()
market = None
request = {}
if symbol is not None:
market = self.market(symbol)
request['symbol'] = market['id']
if since is not None:
request['startTime'] = self.iso8601(since)
if limit is not None:
request['count'] = limit
request = self.deep_extend(request, params)
# why the hassle? urlencode in python is kinda broken for nested dicts.
# E.g. self.urlencode({"filter": {"open": True}}) will return "filter={'open':+True}"
# Bitmex doesn't like that. Hence resorting to self hack.
if 'filter' in request:
request['filter'] = self.json(request['filter'])
response = self.privateGetExecutionTradeHistory(request)
#
# [
# {
# "execID": "string",
# "orderID": "string",
# "clOrdID": "string",
# "clOrdLinkID": "string",
# "account": 0,
# "symbol": "string",
# "side": "string",
# "lastQty": 0,
# "lastPx": 0,
# "underlyingLastPx": 0,
# "lastMkt": "string",
# "lastLiquidityInd": "string",
# "simpleOrderQty": 0,
# "orderQty": 0,
# "price": 0,
# "displayQty": 0,
# "stopPx": 0,
# "pegOffsetValue": 0,
# "pegPriceType": "string",
# "currency": "string",
# "settlCurrency": "string",
# "execType": "string",
# "ordType": "string",
# "timeInForce": "string",
# "execInst": "string",
# "contingencyType": "string",
# "exDestination": "string",
# "ordStatus": "string",
# "triggered": "string",
# "workingIndicator": True,
# "ordRejReason": "string",
# "simpleLeavesQty": 0,
# "leavesQty": 0,
# "simpleCumQty": 0,
# "cumQty": 0,
# "avgPx": 0,
# "commission": 0,
# "tradePublishIndicator": "string",
# "multiLegReportingType": "string",
# "text": "string",
# "trdMatchID": "string",
# "execCost": 0,
# "execComm": 0,
# "homeNotional": 0,
# "foreignNotional": 0,
# "transactTime": "2019-03-05T12:47:02.762Z",
# "timestamp": "2019-03-05T12:47:02.762Z"
# }
# ]
#
return self.parse_trades(response, market, since, limit)
def parse_ledger_entry_type(self, type):
types = {
'Withdrawal': 'transaction',
'RealisedPNL': 'margin',
'UnrealisedPNL': 'margin',
'Deposit': 'transaction',
'Transfer': 'transfer',
'AffiliatePayout': 'referral',
}
return self.safe_string(types, type, type)
def parse_ledger_entry(self, item, currency=None):
#
# {
# transactID: "69573da3-7744-5467-3207-89fd6efe7a47",
# account: 24321,
# currency: "XBt",
# transactType: "Withdrawal", # "AffiliatePayout", "Transfer", "Deposit", "RealisedPNL", ...
# amount: -1000000,
# fee: 300000,
# transactStatus: "Completed", # "Canceled", ...
# address: "1Ex4fkF4NhQaQdRWNoYpqiPbDBbq18Kdd9",
# tx: "3BMEX91ZhhKoWtsH9QRb5dNXnmnGpiEetA",
# text: "",
# transactTime: "2017-03-21T20:05:14.388Z",
# walletBalance: 0, # balance after
# marginBalance: null,
# timestamp: "2017-03-22T13:09:23.514Z"
# }
#
# ButMEX returns the unrealized pnl from the wallet history endpoint.
# The unrealized pnl transaction has an empty timestamp.
# It is not related to historical pnl it has status set to "Pending".
# Therefore it's not a part of the history at all.
# https://github.com/ccxt/ccxt/issues/6047
#
# {
# "transactID":"00000000-0000-0000-0000-000000000000",
# "account":121210,
# "currency":"XBt",
# "transactType":"UnrealisedPNL",
# "amount":-5508,
# "fee":0,
# "transactStatus":"Pending",
# "address":"XBTUSD",
# "tx":"",
# "text":"",
# "transactTime":null, # ←---------------------------- null
# "walletBalance":139198767,
# "marginBalance":139193259,
# "timestamp":null # ←---------------------------- null
# }
#
id = self.safe_string(item, 'transactID')
account = self.safe_string(item, 'account')
referenceId = self.safe_string(item, 'tx')
referenceAccount = None
type = self.parse_ledger_entry_type(self.safe_string(item, 'transactType'))
currencyId = self.safe_string(item, 'currency')
code = self.safe_currency_code(currencyId, currency)
amount = self.safe_float(item, 'amount')
if amount is not None:
amount = amount / 100000000
timestamp = self.parse8601(self.safe_string(item, 'transactTime'))
if timestamp is None:
# https://github.com/ccxt/ccxt/issues/6047
# set the timestamp to zero, 1970 Jan 1 00:00:00
# for unrealized pnl and other transactions without a timestamp
timestamp = 0 # see comments above
feeCost = self.safe_float(item, 'fee', 0)
if feeCost is not None:
feeCost = feeCost / 100000000
fee = {
'cost': feeCost,
'currency': code,
}
after = self.safe_float(item, 'walletBalance')
if after is not None:
after = after / 100000000
before = self.sum(after, -amount)
direction = None
if amount < 0:
direction = 'out'
amount = abs(amount)
else:
direction = 'in'
status = self.parse_transaction_status(self.safe_string(item, 'transactStatus'))
return {
'id': id,
'info': item,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'direction': direction,
'account': account,
'referenceId': referenceId,
'referenceAccount': referenceAccount,
'type': type,
'currency': code,
'amount': amount,
'before': before,
'after': after,
'status': status,
'fee': fee,
}
def fetch_ledger(self, code=None, since=None, limit=None, params={}):
self.load_markets()
currency = None
if code is not None:
currency = self.currency(code)
request = {
# 'start': 123,
}
#
# if since is not None:
# # date-based pagination not supported
# }
#
if limit is not None:
request['count'] = limit
response = self.privateGetUserWalletHistory(self.extend(request, params))
#
# [
# {
# transactID: "69573da3-7744-5467-3207-89fd6efe7a47",
# account: 24321,
# currency: "XBt",
# transactType: "Withdrawal", # "AffiliatePayout", "Transfer", "Deposit", "RealisedPNL", ...
# amount: -1000000,
# fee: 300000,
# transactStatus: "Completed", # "Canceled", ...
# address: "1Ex4fkF4NhQaQdRWNoYpqiPbDBbq18Kdd9",
# tx: "3BMEX91ZhhKoWtsH9QRb5dNXnmnGpiEetA",
# text: "",
# transactTime: "2017-03-21T20:05:14.388Z",
# walletBalance: 0, # balance after
# marginBalance: null,
# timestamp: "2017-03-22T13:09:23.514Z"
# }
# ]
#
return self.parse_ledger(response, currency, since, limit)
def fetch_transactions(self, code=None, since=None, limit=None, params={}):
self.load_markets()
request = {
# 'start': 123,
}
#
# if since is not None:
# # date-based pagination not supported
# }
#
if limit is not None:
request['count'] = limit
response = self.privateGetUserWalletHistory(self.extend(request, params))
transactions = self.filter_by_array(response, 'transactType', ['Withdrawal', 'Deposit'], False)
currency = None
if code is not None:
currency = self.currency(code)
return self.parse_transactions(transactions, currency, since, limit)
def parse_transaction_status(self, status):
statuses = {
'Canceled': 'canceled',
'Completed': 'ok',
'Pending': 'pending',
}
return self.safe_string(statuses, status, status)
def parse_transaction(self, transaction, currency=None):
#
# {
# 'transactID': 'ffe699c2-95ee-4c13-91f9-0faf41daec25',
# 'account': 123456,
# 'currency': 'XBt',
# 'transactType': 'Withdrawal',
# 'amount': -100100000,
# 'fee': 100000,
# 'transactStatus': 'Completed',
# 'address': '385cR5DM96n1HvBDMzLHPYcw89fZAXULJP',
# 'tx': '3BMEXabcdefghijklmnopqrstuvwxyz123',
# 'text': '',
# 'transactTime': '2019-01-02T01:00:00.000Z',
# 'walletBalance': 99900000,
# 'marginBalance': None,
# 'timestamp': '2019-01-02T13:00:00.000Z'
# }
#
id = self.safe_string(transaction, 'transactID')
# For deposits, transactTime == timestamp
# For withdrawals, transactTime is submission, timestamp is processed
transactTime = self.parse8601(self.safe_string(transaction, 'transactTime'))
timestamp = self.parse8601(self.safe_string(transaction, 'timestamp'))
type = self.safe_string_lower(transaction, 'transactType')
# Deposits have no from address or to address, withdrawals have both
address = None
addressFrom = None
addressTo = None
if type == 'withdrawal':
address = self.safe_string(transaction, 'address')
addressFrom = self.safe_string(transaction, 'tx')
addressTo = address
amount = self.safe_integer(transaction, 'amount')
if amount is not None:
amount = abs(amount) / 10000000
feeCost = self.safe_integer(transaction, 'fee')
if feeCost is not None:
feeCost = feeCost / 10000000
fee = {
'cost': feeCost,
'currency': 'BTC',
}
status = self.safe_string(transaction, 'transactStatus')
if status is not None:
status = self.parse_transaction_status(status)
return {
'info': transaction,
'id': id,
'txid': None,
'timestamp': transactTime,
'datetime': self.iso8601(transactTime),
'addressFrom': addressFrom,
'address': address,
'addressTo': addressTo,
'tagFrom': None,
'tag': None,
'tagTo': None,
'type': type,
'amount': amount,
# BTC is the only currency on Bitmex
'currency': 'BTC',
'status': status,
'updated': timestamp,
'comment': None,
'fee': fee,
}
def fetch_ticker(self, symbol, params={}):
self.load_markets()
market = self.market(symbol)
if not market['active']:
raise ExchangeError(self.id + ': symbol ' + symbol + ' is delisted')
tickers = self.fetch_tickers([symbol], params)
ticker = self.safe_value(tickers, symbol)
if ticker is None:
raise ExchangeError(self.id + ' ticker symbol ' + symbol + ' not found')
return ticker
def fetch_tickers(self, symbols=None, params={}):
self.load_markets()
response = self.publicGetInstrumentActiveAndIndices(params)
result = {}
for i in range(0, len(response)):
ticker = self.parse_ticker(response[i])
symbol = self.safe_string(ticker, 'symbol')
if symbol is not None:
result[symbol] = ticker
return self.filter_by_array(result, 'symbol', symbols)
def parse_ticker(self, ticker, market=None):
#
# { symbol: "ETHH19",
# rootSymbol: "ETH",
# state: "Open",
# typ: "FFCCSX",
# listing: "2018-12-17T04:00:00.000Z",
# front: "2019-02-22T12:00:00.000Z",
# expiry: "2019-03-29T12:00:00.000Z",
# settle: "2019-03-29T12:00:00.000Z",
# relistInterval: null,
# inverseLeg: "",
# sellLeg: "",
# buyLeg: "",
# optionStrikePcnt: null,
# optionStrikeRound: null,
# optionStrikePrice: null,
# optionMultiplier: null,
# positionCurrency: "ETH",
# underlying: "ETH",
# quoteCurrency: "XBT",
# underlyingSymbol: "ETHXBT=",
# reference: "BMEX",
# referenceSymbol: ".BETHXBT30M",
# calcInterval: null,
# publishInterval: null,
# publishTime: null,
# maxOrderQty: 100000000,
# maxPrice: 10,
# lotSize: 1,
# tickSize: 0.00001,
# multiplier: 100000000,
# settlCurrency: "XBt",
# underlyingToPositionMultiplier: 1,
# underlyingToSettleMultiplier: null,
# quoteToSettleMultiplier: 100000000,
# isQuanto: False,
# isInverse: False,
# initMargin: 0.02,
# maintMargin: 0.01,
# riskLimit: 5000000000,
# riskStep: 5000000000,
# limit: null,
# capped: False,
# taxed: True,
# deleverage: True,
# makerFee: -0.0005,
# takerFee: 0.0025,
# settlementFee: 0,
# insuranceFee: 0,
# fundingBaseSymbol: "",
# fundingQuoteSymbol: "",
# fundingPremiumSymbol: "",
# fundingTimestamp: null,
# fundingInterval: null,
# fundingRate: null,
# indicativeFundingRate: null,
# rebalanceTimestamp: null,
# rebalanceInterval: null,
# openingTimestamp: "2019-02-13T08:00:00.000Z",
# closingTimestamp: "2019-02-13T09:00:00.000Z",
# sessionInterval: "2000-01-01T01:00:00.000Z",
# prevClosePrice: 0.03347,
# limitDownPrice: null,
# limitUpPrice: null,
# bankruptLimitDownPrice: null,
# bankruptLimitUpPrice: null,
# prevTotalVolume: 1386531,
# totalVolume: 1387062,
# volume: 531,
# volume24h: 17118,
# prevTotalTurnover: 4741294246000,
# totalTurnover: 4743103466000,
# turnover: 1809220000,
# turnover24h: 57919845000,
# homeNotional24h: 17118,
# foreignNotional24h: 579.19845,
# prevPrice24h: 0.03349,
# vwap: 0.03383564,
# highPrice: 0.03458,
# lowPrice: 0.03329,
# lastPrice: 0.03406,
# lastPriceProtected: 0.03406,
# lastTickDirection: "ZeroMinusTick",
# lastChangePcnt: 0.017,
# bidPrice: 0.03406,
# midPrice: 0.034065,
# askPrice: 0.03407,
# impactBidPrice: 0.03406,
# impactMidPrice: 0.034065,
# impactAskPrice: 0.03407,
# hasLiquidity: True,
# openInterest: 83679,
# openValue: 285010674000,
# fairMethod: "ImpactMidPrice",
# fairBasisRate: 0,
# fairBasis: 0,
# fairPrice: 0.03406,
# markMethod: "FairPrice",
# markPrice: 0.03406,
# indicativeTaxRate: 0,
# indicativeSettlePrice: 0.03406,
# optionUnderlyingPrice: null,
# settledPrice: null,
# timestamp: "2019-02-13T08:40:30.000Z",
# }
#
symbol = None
marketId = self.safe_string(ticker, 'symbol')
market = self.safe_value(self.markets_by_id, marketId, market)
if market is not None:
symbol = market['symbol']
timestamp = self.parse8601(self.safe_string(ticker, 'timestamp'))
open = self.safe_float(ticker, 'prevPrice24h')
last = self.safe_float(ticker, 'lastPrice')
change = None
percentage = None
if last is not None and open is not None:
change = last - open
if open > 0:
percentage = change / open * 100
return {
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'high': self.safe_float(ticker, 'highPrice'),
'low': self.safe_float(ticker, 'lowPrice'),
'bid': self.safe_float(ticker, 'bidPrice'),
'bidVolume': None,
'ask': self.safe_float(ticker, 'askPrice'),
'askVolume': None,
'vwap': self.safe_float(ticker, 'vwap'),
'open': open,
'close': last,
'last': last,
'previousClose': None,
'change': change,
'percentage': percentage,
'average': self.sum(open, last) / 2,
'baseVolume': self.safe_float(ticker, 'homeNotional24h'),
'quoteVolume': self.safe_float(ticker, 'foreignNotional24h'),
'info': ticker,
}
def parse_ohlcv(self, ohlcv, market=None):
#
# {
# "timestamp":"2015-09-25T13:38:00.000Z",
# "symbol":"XBTUSD",
# "open":237.45,
# "high":237.45,
# "low":237.45,
# "close":237.45,
# "trades":0,
# "volume":0,
# "vwap":null,
# "lastSize":null,
# "turnover":0,
# "homeNotional":0,
# "foreignNotional":0
# }
#
return [
self.parse8601(self.safe_string(ohlcv, 'timestamp')),
self.safe_float(ohlcv, 'open'),
self.safe_float(ohlcv, 'high'),
self.safe_float(ohlcv, 'low'),
self.safe_float(ohlcv, 'close'),
self.safe_float(ohlcv, 'volume'),
]
def fetch_ohlcv(self, symbol, timeframe='1m', since=None, limit=None, params={}):
self.load_markets()
# send JSON key/value pairs, such as {"key": "value"}
# filter by individual fields and do advanced queries on timestamps
# filter = {'key': 'value'}
# send a bare series(e.g. XBU) to nearest expiring contract in that series
# you can also send a timeframe, e.g. XBU:monthly
# timeframes: daily, weekly, monthly, quarterly, and biquarterly
market = self.market(symbol)
request = {
'symbol': market['id'],
'binSize': self.timeframes[timeframe],
'partial': True, # True == include yet-incomplete current bins
# 'filter': filter, # filter by individual fields and do advanced queries
# 'columns': [], # will return all columns if omitted
# 'start': 0, # starting point for results(wtf?)
# 'reverse': False, # True == newest first
# 'endTime': '', # ending date filter for results
}
if limit is not None:
request['count'] = limit # default 100, max 500
duration = self.parse_timeframe(timeframe) * 1000
fetchOHLCVOpenTimestamp = self.safe_value(self.options, 'fetchOHLCVOpenTimestamp', True)
# if since is not set, they will return candles starting from 2017-01-01
if since is not None:
timestamp = since
if fetchOHLCVOpenTimestamp:
timestamp = self.sum(timestamp, duration)
ymdhms = self.ymdhms(timestamp)
request['startTime'] = ymdhms # starting date filter for results
else:
request['reverse'] = True
response = self.publicGetTradeBucketed(self.extend(request, params))
#
# [
# {"timestamp":"2015-09-25T13:38:00.000Z","symbol":"XBTUSD","open":237.45,"high":237.45,"low":237.45,"close":237.45,"trades":0,"volume":0,"vwap":null,"lastSize":null,"turnover":0,"homeNotional":0,"foreignNotional":0},
# {"timestamp":"2015-09-25T13:39:00.000Z","symbol":"XBTUSD","open":237.45,"high":237.45,"low":237.45,"close":237.45,"trades":0,"volume":0,"vwap":null,"lastSize":null,"turnover":0,"homeNotional":0,"foreignNotional":0},
# {"timestamp":"2015-09-25T13:40:00.000Z","symbol":"XBTUSD","open":237.45,"high":237.45,"low":237.45,"close":237.45,"trades":0,"volume":0,"vwap":null,"lastSize":null,"turnover":0,"homeNotional":0,"foreignNotional":0}
# ]
#
result = self.parse_ohlcvs(response, market, timeframe, since, limit)
if fetchOHLCVOpenTimestamp:
# bitmex returns the candle's close timestamp - https://github.com/ccxt/ccxt/issues/4446
# we can emulate the open timestamp by shifting all the timestamps one place
# so the previous close becomes the current open, and we drop the first candle
for i in range(0, len(result)):
result[i][0] = result[i][0] - duration
return result
def parse_trade(self, trade, market=None):
#
# fetchTrades(public)
#
# {
# timestamp: '2018-08-28T00:00:02.735Z',
# symbol: 'XBTUSD',
# side: 'Buy',
# size: 2000,
# price: 6906.5,
# tickDirection: 'PlusTick',
# trdMatchID: 'b9a42432-0a46-6a2f-5ecc-c32e9ca4baf8',
# grossValue: 28958000,
# homeNotional: 0.28958,
# foreignNotional: 2000
# }
#
# fetchMyTrades(private)
#
# {
# "execID": "string",
# "orderID": "string",
# "clOrdID": "string",
# "clOrdLinkID": "string",
# "account": 0,
# "symbol": "string",
# "side": "string",
# "lastQty": 0,
# "lastPx": 0,
# "underlyingLastPx": 0,
# "lastMkt": "string",
# "lastLiquidityInd": "string",
# "simpleOrderQty": 0,
# "orderQty": 0,
# "price": 0,
# "displayQty": 0,
# "stopPx": 0,
# "pegOffsetValue": 0,
# "pegPriceType": "string",
# "currency": "string",
# "settlCurrency": "string",
# "execType": "string",
# "ordType": "string",
# "timeInForce": "string",
# "execInst": "string",
# "contingencyType": "string",
# "exDestination": "string",
# "ordStatus": "string",
# "triggered": "string",
# "workingIndicator": True,
# "ordRejReason": "string",
# "simpleLeavesQty": 0,
# "leavesQty": 0,
# "simpleCumQty": 0,
# "cumQty": 0,
# "avgPx": 0,
# "commission": 0,
# "tradePublishIndicator": "string",
# "multiLegReportingType": "string",
# "text": "string",
# "trdMatchID": "string",
# "execCost": 0,
# "execComm": 0,
# "homeNotional": 0,
# "foreignNotional": 0,
# "transactTime": "2019-03-05T12:47:02.762Z",
# "timestamp": "2019-03-05T12:47:02.762Z"
# }
#
timestamp = self.parse8601(self.safe_string(trade, 'timestamp'))
price = self.safe_float(trade, 'price')
amount = self.safe_float_2(trade, 'size', 'lastQty')
id = self.safe_string(trade, 'trdMatchID')
order = self.safe_string(trade, 'orderID')
side = self.safe_string_lower(trade, 'side')
# price * amount doesn't work for all symbols(e.g. XBT, ETH)
cost = self.safe_float(trade, 'execCost')
if cost is not None:
cost = abs(cost) / 100000000
fee = None
if 'execComm' in trade:
feeCost = self.safe_float(trade, 'execComm')
feeCost = feeCost / 100000000
currencyId = self.safe_string(trade, 'settlCurrency')
feeCurrency = self.safe_currency_code(currencyId)
feeRate = self.safe_float(trade, 'commission')
fee = {
'cost': feeCost,
'currency': feeCurrency,
'rate': feeRate,
}
takerOrMaker = None
if fee is not None:
takerOrMaker = 'maker' if (fee['cost'] < 0) else 'taker'
symbol = None
marketId = self.safe_string(trade, 'symbol')
if marketId is not None:
if marketId in self.markets_by_id:
market = self.markets_by_id[marketId]
symbol = market['symbol']
else:
symbol = marketId
type = self.safe_string_lower(trade, 'ordType')
return {
'info': trade,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'symbol': symbol,
'id': id,
'order': order,
'type': type,
'takerOrMaker': takerOrMaker,
'side': side,
'price': price,
'cost': cost,
'amount': amount,
'fee': fee,
}
def parse_order_status(self, status):
statuses = {
'New': 'open',
'PartiallyFilled': 'open',
'Filled': 'closed',
'DoneForDay': 'open',
'Canceled': 'canceled',
'PendingCancel': 'open',
'PendingNew': 'open',
'Rejected': 'rejected',
'Expired': 'expired',
'Stopped': 'open',
'Untriggered': 'open',
'Triggered': 'open',
}
return self.safe_string(statuses, status, status)
def parse_order(self, order, market=None):
status = self.parse_order_status(self.safe_string(order, 'ordStatus'))
symbol = None
if market is not None:
symbol = market['symbol']
else:
marketId = self.safe_string(order, 'symbol')
if marketId in self.markets_by_id:
market = self.markets_by_id[marketId]
symbol = market['symbol']
timestamp = self.parse8601(self.safe_string(order, 'timestamp'))
lastTradeTimestamp = self.parse8601(self.safe_string(order, 'transactTime'))
price = self.safe_float(order, 'price')
amount = self.safe_float(order, 'orderQty')
filled = self.safe_float(order, 'cumQty', 0.0)
remaining = None
if amount is not None:
if filled is not None:
remaining = max(amount - filled, 0.0)
average = self.safe_float(order, 'avgPx')
cost = None
if filled is not None:
if average is not None:
cost = average * filled
elif price is not None:
cost = price * filled
id = self.safe_string(order, 'orderID')
type = self.safe_string_lower(order, 'ordType')
side = self.safe_string_lower(order, 'side')
clientOrderId = self.safe_string(order, 'clOrdID')
return {
'info': order,
'id': id,
'clientOrderId': clientOrderId,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'lastTradeTimestamp': lastTradeTimestamp,
'symbol': symbol,
'type': type,
'side': side,
'price': price,
'amount': amount,
'cost': cost,
'average': average,
'filled': filled,
'remaining': remaining,
'status': status,
'fee': None,
'trades': None,
}
def fetch_trades(self, symbol, since=None, limit=None, params={}):
self.load_markets()
market = self.market(symbol)
request = {
'symbol': market['id'],
}
if since is not None:
request['startTime'] = self.iso8601(since)
else:
# by default reverse=false, i.e. trades are fetched since the time of market inception(year 2015 for XBTUSD)
request['reverse'] = True
if limit is not None:
request['count'] = limit
response = self.publicGetTrade(self.extend(request, params))
#
# [
# {
# timestamp: '2018-08-28T00:00:02.735Z',
# symbol: 'XBTUSD',
# side: 'Buy',
# size: 2000,
# price: 6906.5,
# tickDirection: 'PlusTick',
# trdMatchID: 'b9a42432-0a46-6a2f-5ecc-c32e9ca4baf8',
# grossValue: 28958000,
# homeNotional: 0.28958,
# foreignNotional: 2000
# },
# {
# timestamp: '2018-08-28T00:00:03.778Z',
# symbol: 'XBTUSD',
# side: 'Sell',
# size: 1000,
# price: 6906,
# tickDirection: 'MinusTick',
# trdMatchID: '0d4f1682-5270-a800-569b-4a0eb92db97c',
# grossValue: 14480000,
# homeNotional: 0.1448,
# foreignNotional: 1000
# },
# ]
#
return self.parse_trades(response, market, since, limit)
def create_order(self, symbol, type, side, amount, price=None, params={}):
self.load_markets()
market = self.market(symbol)
request = {
'symbol': market['id'],
'side': self.capitalize(side),
'orderQty': amount,
'ordType': self.capitalize(type),
}
if price is not None:
request['price'] = price
clientOrderId = self.safe_string_2(params, 'clOrdID', 'clientOrderId')
if clientOrderId is not None:
request['clOrdID'] = clientOrderId
params = self.omit(params, ['clOrdID', 'clientOrderId'])
response = self.privatePostOrder(self.extend(request, params))
return self.parse_order(response, market)
def edit_order(self, id, symbol, type, side, amount=None, price=None, params={}):
self.load_markets()
request = {}
origClOrdID = self.safe_string_2(params, 'origClOrdID', 'clientOrderId')
if origClOrdID is not None:
request['origClOrdID'] = origClOrdID
clientOrderId = self.safe_string(params, 'clOrdID', 'clientOrderId')
if clientOrderId is not None:
request['clOrdID'] = clientOrderId
params = self.omit(params, ['origClOrdID', 'clOrdID', 'clientOrderId'])
else:
request['orderID'] = id
if amount is not None:
request['orderQty'] = amount
if price is not None:
request['price'] = price
response = self.privatePutOrder(self.extend(request, params))
return self.parse_order(response)
def cancel_order(self, id, symbol=None, params={}):
self.load_markets()
# https://github.com/ccxt/ccxt/issues/6507
clientOrderId = self.safe_string_2(params, 'clOrdID', 'clientOrderId')
request = {}
if clientOrderId is None:
request['orderID'] = id
else:
request['clOrdID'] = clientOrderId
params = self.omit(params, ['clOrdID', 'clientOrderId'])
response = self.privateDeleteOrder(self.extend(request, params))
order = self.safe_value(response, 0, {})
error = self.safe_string(order, 'error')
if error is not None:
if error.find('Unable to cancel order due to existing state') >= 0:
raise OrderNotFound(self.id + ' cancelOrder() failed: ' + error)
return self.parse_order(order)
def cancel_all_orders(self, symbol=None, params={}):
self.load_markets()
request = {}
market = None
if symbol is not None:
market = self.market(symbol)
request['symbol'] = market['id']
response = self.privateDeleteOrderAll(self.extend(request, params))
#
# [
# {
# "orderID": "string",
# "clOrdID": "string",
# "clOrdLinkID": "string",
# "account": 0,
# "symbol": "string",
# "side": "string",
# "simpleOrderQty": 0,
# "orderQty": 0,
# "price": 0,
# "displayQty": 0,
# "stopPx": 0,
# "pegOffsetValue": 0,
# "pegPriceType": "string",
# "currency": "string",
# "settlCurrency": "string",
# "ordType": "string",
# "timeInForce": "string",
# "execInst": "string",
# "contingencyType": "string",
# "exDestination": "string",
# "ordStatus": "string",
# "triggered": "string",
# "workingIndicator": True,
# "ordRejReason": "string",
# "simpleLeavesQty": 0,
# "leavesQty": 0,
# "simpleCumQty": 0,
# "cumQty": 0,
# "avgPx": 0,
# "multiLegReportingType": "string",
# "text": "string",
# "transactTime": "2020-06-01T09:36:35.290Z",
# "timestamp": "2020-06-01T09:36:35.290Z"
# }
# ]
#
return self.parse_orders(response, market)
def is_fiat(self, currency):
if currency == 'EUR':
return True
if currency == 'PLN':
return True
return False
def withdraw(self, code, amount, address, tag=None, params={}):
self.check_address(address)
self.load_markets()
# currency = self.currency(code)
if code != 'BTC':
raise ExchangeError(self.id + ' supoprts BTC withdrawals only, other currencies coming soon...')
request = {
'currency': 'XBt', # temporarily
'amount': amount,
'address': address,
# 'otpToken': '123456', # requires if two-factor auth(OTP) is enabled
# 'fee': 0.001, # bitcoin network fee
}
response = self.privatePostUserRequestWithdrawal(self.extend(request, params))
return {
'info': response,
'id': self.safe_string(response, 'transactID'),
}
def handle_errors(self, code, reason, url, method, headers, body, response, requestHeaders, requestBody):
if response is None:
return
if code == 429:
raise DDoSProtection(self.id + ' ' + body)
if code >= 400:
error = self.safe_value(response, 'error', {})
message = self.safe_string(error, 'message')
feedback = self.id + ' ' + body
self.throw_exactly_matched_exception(self.exceptions['exact'], message, feedback)
self.throw_broadly_matched_exception(self.exceptions['broad'], message, feedback)
if code == 400:
raise BadRequest(feedback)
raise ExchangeError(feedback) # unknown message
def nonce(self):
return self.milliseconds()
def sign(self, path, api='public', method='GET', params={}, headers=None, body=None):
query = '/api/' + self.version + '/' + path
if method == 'GET':
if params:
query += '?' + self.urlencode(params)
else:
format = self.safe_string(params, '_format')
if format is not None:
query += '?' + self.urlencode({'_format': format})
params = self.omit(params, '_format')
url = self.urls['api'][api] + query
if self.apiKey and self.secret:
auth = method + query
expires = self.safe_integer(self.options, 'api-expires')
headers = {
'Content-Type': 'application/json',
'api-key': self.apiKey,
}
expires = self.sum(self.seconds(), expires)
expires = str(expires)
auth += expires
headers['api-expires'] = expires
if method == 'POST' or method == 'PUT' or method == 'DELETE':
if params:
body = self.json(params)
auth += body
headers['api-signature'] = self.hmac(self.encode(auth), self.encode(self.secret))
return {'url': url, 'method': method, 'body': body, 'headers': headers}
| 41.402105
| 233
| 0.45927
|
ge import Exchange
from ccxt.base.errors import ExchangeError
from ccxt.base.errors import AuthenticationError
from ccxt.base.errors import PermissionDenied
from ccxt.base.errors import BadRequest
from ccxt.base.errors import InsufficientFunds
from ccxt.base.errors import InvalidOrder
from ccxt.base.errors import OrderNotFound
from ccxt.base.errors import DDoSProtection
from ccxt.base.errors import ExchangeNotAvailable
from ccxt.base.decimal_to_precision import TICK_SIZE
class bitmex(Exchange):
def describe(self):
return self.deep_extend(super(bitmex, self).describe(), {
'id': 'bitmex',
'name': 'BitMEX',
'countries': ['SC'],
'version': 'v1',
'userAgent': None,
'rateLimit': 2000,
'pro': True,
'has': {
'cancelAllOrders': True,
'cancelOrder': True,
'CORS': False,
'createOrder': True,
'editOrder': True,
'fetchBalance': True,
'fetchClosedOrders': True,
'fetchLedger': True,
'fetchMarkets': True,
'fetchMyTrades': True,
'fetchOHLCV': True,
'fetchOpenOrders': True,
'fetchOrder': True,
'fetchOrderBook': True,
'fetchOrders': True,
'fetchTicker': True,
'fetchTickers': True,
'fetchTrades': True,
'fetchTransactions': 'emulated',
'withdraw': True,
},
'timeframes': {
'1m': '1m',
'5m': '5m',
'1h': '1h',
'1d': '1d',
},
'urls': {
'test': {
'public': 'https://testnet.bitmex.com',
'private': 'https://testnet.bitmex.com',
},
'logo': 'https://user-images.githubusercontent.com/1294454/27766319-f653c6e6-5ed4-11e7-933d-f0bc3699ae8f.jpg',
'api': {
'public': 'https://www.bitmex.com',
'private': 'https://www.bitmex.com',
},
'www': 'https://www.bitmex.com',
'doc': [
'https://www.bitmex.com/app/apiOverview',
'https://github.com/BitMEX/api-connectors/tree/master/official-http',
],
'fees': 'https://www.bitmex.com/app/fees',
'referral': 'https://www.bitmex.com/register/upZpOX',
},
'api': {
'public': {
'get': [
'announcement',
'announcement/urgent',
'funding',
'instrument',
'instrument/active',
'instrument/activeAndIndices',
'instrument/activeIntervals',
'instrument/compositeIndex',
'instrument/indices',
'insurance',
'leaderboard',
'liquidation',
'orderBook',
'orderBook/L2',
'quote',
'quote/bucketed',
'schema',
'schema/websocketHelp',
'settlement',
'stats',
'stats/history',
'trade',
'trade/bucketed',
],
},
'private': {
'get': [
'apiKey',
'chat',
'chat/channels',
'chat/connected',
'execution',
'execution/tradeHistory',
'notification',
'order',
'position',
'user',
'user/affiliateStatus',
'user/checkReferralCode',
'user/commission',
'user/depositAddress',
'user/executionHistory',
'user/margin',
'user/minWithdrawalFee',
'user/wallet',
'user/walletHistory',
'user/walletSummary',
],
'post': [
'apiKey',
'apiKey/disable',
'apiKey/enable',
'chat',
'order',
'order/bulk',
'order/cancelAllAfter',
'order/closePosition',
'position/isolate',
'position/leverage',
'position/riskLimit',
'position/transferMargin',
'user/cancelWithdrawal',
'user/confirmEmail',
'user/confirmEnableTFA',
'user/confirmWithdrawal',
'user/disableTFA',
'user/logout',
'user/logoutAll',
'user/preferences',
'user/requestEnableTFA',
'user/requestWithdrawal',
],
'put': [
'order',
'order/bulk',
'user',
],
'delete': [
'apiKey',
'order',
'order/all',
],
},
},
'exceptions': {
'exact': {
'Invalid API Key.': AuthenticationError,
'This key is disabled.': PermissionDenied,
'Access Denied': PermissionDenied,
'Duplicate clOrdID': InvalidOrder,
'orderQty is invalid': InvalidOrder,
'Invalid price': InvalidOrder,
'Invalid stopPx for ordType': InvalidOrder,
},
'broad': {
'Signature not valid': AuthenticationError,
'overloaded': ExchangeNotAvailable,
'Account has insufficient Available Balance': InsufficientFunds,
'Service unavailable': ExchangeNotAvailable,
},
},
'precisionMode': TICK_SIZE,
'options': {
'api-expires': 5,
'fetchOHLCVOpenTimestamp': True,
},
})
def fetch_markets(self, params={}):
response = self.publicGetInstrumentActiveAndIndices(params)
result = []
for i in range(0, len(response)):
market = response[i]
active = (market['state'] != 'Unlisted')
id = market['symbol']
baseId = market['underlying']
quoteId = market['quoteCurrency']
basequote = baseId + quoteId
base = self.safe_currency_code(baseId)
quote = self.safe_currency_code(quoteId)
swap = (id == basequote)
positionId = self.safe_string_2(market, 'positionCurrency', 'quoteCurrency')
type = None
future = False
prediction = False
position = self.safe_currency_code(positionId)
symbol = id
if swap:
type = 'swap'
symbol = base + '/' + quote
elif id.find('B_') >= 0:
prediction = True
type = 'prediction'
else:
future = True
type = 'future'
precision = {
'amount': None,
'price': None,
}
lotSize = self.safe_float(market, 'lotSize')
tickSize = self.safe_float(market, 'tickSize')
if lotSize is not None:
precision['amount'] = lotSize
if tickSize is not None:
precision['price'] = tickSize
limits = {
'amount': {
'min': None,
'max': None,
},
'price': {
'min': tickSize,
'max': self.safe_float(market, 'maxPrice'),
},
'cost': {
'min': None,
'max': None,
},
}
limitField = 'cost' if (position == quote) else 'amount'
limits[limitField] = {
'min': lotSize,
'max': self.safe_float(market, 'maxOrderQty'),
}
result.append({
'id': id,
'symbol': symbol,
'base': base,
'quote': quote,
'baseId': baseId,
'quoteId': quoteId,
'active': active,
'precision': precision,
'limits': limits,
'taker': self.safe_float(market, 'takerFee'),
'maker': self.safe_float(market, 'makerFee'),
'type': type,
'spot': False,
'swap': swap,
'future': future,
'prediction': prediction,
'info': market,
})
return result
def parse_balance_response(self, response):
#
# [
# {
# "account":1455728,
# "currency":"XBt",
# "riskLimit":1000000000000,
# "prevState":"",
# "state":"",
# "action":"",
# "amount":263542,
# "pendingCredit":0,
# "pendingDebit":0,
# "confirmedDebit":0,
# "prevRealisedPnl":0,
# "prevUnrealisedPnl":0,
# "grossComm":0,
# "grossOpenCost":0,
# "grossOpenPremium":0,
# "grossExecCost":0,
# "grossMarkValue":0,
# "riskValue":0,
# "taxableMargin":0,
# "initMargin":0,
# "maintMargin":0,
# "sessionMargin":0,
# "targetExcessMargin":0,
# "varMargin":0,
# "realisedPnl":0,
# "unrealisedPnl":0,
# "indicativeTax":0,
# "unrealisedProfit":0,
# "syntheticMargin":null,
# "walletBalance":263542,
# "marginBalance":263542,
# "marginBalancePcnt":1,
# "marginLeverage":0,
# "marginUsedPcnt":0,
# "excessMargin":263542,
# "excessMarginPcnt":1,
# "availableMargin":263542,
# "withdrawableMargin":263542,
# "timestamp":"2020-08-03T12:01:01.246Z",
# "grossLastValue":0,
# "commission":null
# }
# ]
#
result = {'info': response}
for i in range(0, len(response)):
balance = response[i]
currencyId = self.safe_string(balance, 'currency')
code = self.safe_currency_code(currencyId)
account = self.account()
free = self.safe_float(balance, 'availableMargin')
total = self.safe_float(balance, 'marginBalance')
if code == 'BTC':
if free is not None:
free /= 100000000
if total is not None:
total /= 100000000
account['free'] = free
account['total'] = total
result[code] = account
return self.parse_balance(result)
def fetch_balance(self, params={}):
self.load_markets()
request = {
'currency': 'all',
}
response = self.privateGetUserMargin(self.extend(request, params))
#
# [
# {
# "account":1455728,
# "currency":"XBt",
# "riskLimit":1000000000000,
# "prevState":"",
# "state":"",
# "action":"",
# "amount":263542,
# "pendingCredit":0,
# "pendingDebit":0,
# "confirmedDebit":0,
# "prevRealisedPnl":0,
# "prevUnrealisedPnl":0,
# "grossComm":0,
# "grossOpenCost":0,
# "grossOpenPremium":0,
# "grossExecCost":0,
# "grossMarkValue":0,
# "riskValue":0,
# "taxableMargin":0,
# "initMargin":0,
# "maintMargin":0,
# "sessionMargin":0,
# "targetExcessMargin":0,
# "varMargin":0,
# "realisedPnl":0,
# "unrealisedPnl":0,
# "indicativeTax":0,
# "unrealisedProfit":0,
# "syntheticMargin":null,
# "walletBalance":263542,
# "marginBalance":263542,
# "marginBalancePcnt":1,
# "marginLeverage":0,
# "marginUsedPcnt":0,
# "excessMargin":263542,
# "excessMarginPcnt":1,
# "availableMargin":263542,
# "withdrawableMargin":263542,
# "timestamp":"2020-08-03T12:01:01.246Z",
# "grossLastValue":0,
# "commission":null
# }
# ]
#
return self.parse_balance_response(response)
def fetch_order_book(self, symbol, limit=None, params={}):
self.load_markets()
market = self.market(symbol)
request = {
'symbol': market['id'],
}
if limit is not None:
request['depth'] = limit
response = self.publicGetOrderBookL2(self.extend(request, params))
result = {
'bids': [],
'asks': [],
'timestamp': None,
'datetime': None,
'nonce': None,
}
for i in range(0, len(response)):
order = response[i]
side = 'asks' if (order['side'] == 'Sell') else 'bids'
amount = self.safe_float(order, 'size')
price = self.safe_float(order, 'price')
# https://github.com/ccxt/ccxt/issues/4926
# https://github.com/ccxt/ccxt/issues/4927
# the exchange sometimes returns null price in the orderbook
if price is not None:
result[side].append([price, amount])
result['bids'] = self.sort_by(result['bids'], 0, True)
result['asks'] = self.sort_by(result['asks'], 0)
return result
def fetch_order(self, id, symbol=None, params={}):
filter = {
'filter': {
'orderID': id,
},
}
response = self.fetch_orders(symbol, None, None, self.deep_extend(filter, params))
numResults = len(response)
if numResults == 1:
return response[0]
raise OrderNotFound(self.id + ': The order ' + id + ' not found.')
def fetch_orders(self, symbol=None, since=None, limit=None, params={}):
self.load_markets()
market = None
request = {}
if symbol is not None:
market = self.market(symbol)
request['symbol'] = market['id']
if since is not None:
request['startTime'] = self.iso8601(since)
if limit is not None:
request['count'] = limit
request = self.deep_extend(request, params)
# why the hassle? urlencode in python is kinda broken for nested dicts.
# E.g. self.urlencode({"filter": {"open": True}}) will return "filter={'open':+True}"
# Bitmex doesn't like that. Hence resorting to self hack.
if 'filter' in request:
request['filter'] = self.json(request['filter'])
response = self.privateGetOrder(request)
return self.parse_orders(response, market, since, limit)
def fetch_open_orders(self, symbol=None, since=None, limit=None, params={}):
request = {
'filter': {
'open': True,
},
}
return self.fetch_orders(symbol, since, limit, self.deep_extend(request, params))
def fetch_closed_orders(self, symbol=None, since=None, limit=None, params={}):
orders = self.fetch_orders(symbol, since, limit, params)
return self.filter_by(orders, 'status', 'closed')
def fetch_my_trades(self, symbol=None, since=None, limit=None, params={}):
self.load_markets()
market = None
request = {}
if symbol is not None:
market = self.market(symbol)
request['symbol'] = market['id']
if since is not None:
request['startTime'] = self.iso8601(since)
if limit is not None:
request['count'] = limit
request = self.deep_extend(request, params)
if 'filter' in request:
request['filter'] = self.json(request['filter'])
response = self.privateGetExecutionTradeHistory(request)
#
# [
# {
# "execID": "string",
# "orderID": "string",
# "clOrdID": "string",
# "clOrdLinkID": "string",
# "account": 0,
# "symbol": "string",
# "side": "string",
# "lastQty": 0,
# "lastPx": 0,
# "underlyingLastPx": 0,
# "lastMkt": "string",
# "lastLiquidityInd": "string",
# "simpleOrderQty": 0,
# "orderQty": 0,
# "price": 0,
# "displayQty": 0,
# "stopPx": 0,
# "pegOffsetValue": 0,
# "pegPriceType": "string",
# "currency": "string",
# "settlCurrency": "string",
# "execType": "string",
# "ordType": "string",
# "timeInForce": "string",
# "execInst": "string",
# "contingencyType": "string",
# "exDestination": "string",
# "ordStatus": "string",
# "triggered": "string",
# "workingIndicator": True,
# "ordRejReason": "string",
# "simpleLeavesQty": 0,
# "leavesQty": 0,
# "simpleCumQty": 0,
# "cumQty": 0,
# "avgPx": 0,
# "commission": 0,
# "tradePublishIndicator": "string",
# "multiLegReportingType": "string",
# "text": "string",
# "trdMatchID": "string",
# "execCost": 0,
# "execComm": 0,
# "homeNotional": 0,
# "foreignNotional": 0,
# "transactTime": "2019-03-05T12:47:02.762Z",
# "timestamp": "2019-03-05T12:47:02.762Z"
# }
# ]
#
return self.parse_trades(response, market, since, limit)
def parse_ledger_entry_type(self, type):
types = {
'Withdrawal': 'transaction',
'RealisedPNL': 'margin',
'UnrealisedPNL': 'margin',
'Deposit': 'transaction',
'Transfer': 'transfer',
'AffiliatePayout': 'referral',
}
return self.safe_string(types, type, type)
def parse_ledger_entry(self, item, currency=None):
#
# {
# transactID: "69573da3-7744-5467-3207-89fd6efe7a47",
# account: 24321,
# currency: "XBt",
# transactType: "Withdrawal", # "AffiliatePayout", "Transfer", "Deposit", "RealisedPNL", ...
# amount: -1000000,
# fee: 300000,
# transactStatus: "Completed", # "Canceled", ...
# address: "1Ex4fkF4NhQaQdRWNoYpqiPbDBbq18Kdd9",
# tx: "3BMEX91ZhhKoWtsH9QRb5dNXnmnGpiEetA",
# text: "",
# transactTime: "2017-03-21T20:05:14.388Z",
# walletBalance: 0, # balance after
# marginBalance: null,
# timestamp: "2017-03-22T13:09:23.514Z"
# }
#
# ButMEX returns the unrealized pnl from the wallet history endpoint.
# The unrealized pnl transaction has an empty timestamp.
# It is not related to historical pnl it has status set to "Pending".
# Therefore it's not a part of the history at all.
ing(item, 'transactID')
account = self.safe_string(item, 'account')
referenceId = self.safe_string(item, 'tx')
referenceAccount = None
type = self.parse_ledger_entry_type(self.safe_string(item, 'transactType'))
currencyId = self.safe_string(item, 'currency')
code = self.safe_currency_code(currencyId, currency)
amount = self.safe_float(item, 'amount')
if amount is not None:
amount = amount / 100000000
timestamp = self.parse8601(self.safe_string(item, 'transactTime'))
if timestamp is None:
timestamp = 0
feeCost = self.safe_float(item, 'fee', 0)
if feeCost is not None:
feeCost = feeCost / 100000000
fee = {
'cost': feeCost,
'currency': code,
}
after = self.safe_float(item, 'walletBalance')
if after is not None:
after = after / 100000000
before = self.sum(after, -amount)
direction = None
if amount < 0:
direction = 'out'
amount = abs(amount)
else:
direction = 'in'
status = self.parse_transaction_status(self.safe_string(item, 'transactStatus'))
return {
'id': id,
'info': item,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'direction': direction,
'account': account,
'referenceId': referenceId,
'referenceAccount': referenceAccount,
'type': type,
'currency': code,
'amount': amount,
'before': before,
'after': after,
'status': status,
'fee': fee,
}
def fetch_ledger(self, code=None, since=None, limit=None, params={}):
self.load_markets()
currency = None
if code is not None:
currency = self.currency(code)
request = {
}
s not None:
request['count'] = limit
response = self.privateGetUserWalletHistory(self.extend(request, params))
return self.parse_ledger(response, currency, since, limit)
def fetch_transactions(self, code=None, since=None, limit=None, params={}):
self.load_markets()
request = {
}
s not None:
request['count'] = limit
response = self.privateGetUserWalletHistory(self.extend(request, params))
transactions = self.filter_by_array(response, 'transactType', ['Withdrawal', 'Deposit'], False)
currency = None
if code is not None:
currency = self.currency(code)
return self.parse_transactions(transactions, currency, since, limit)
def parse_transaction_status(self, status):
statuses = {
'Canceled': 'canceled',
'Completed': 'ok',
'Pending': 'pending',
}
return self.safe_string(statuses, status, status)
def parse_transaction(self, transaction, currency=None):
id = self.safe_string(transaction, 'transactID')
transactTime = self.parse8601(self.safe_string(transaction, 'transactTime'))
timestamp = self.parse8601(self.safe_string(transaction, 'timestamp'))
type = self.safe_string_lower(transaction, 'transactType')
address = None
addressFrom = None
addressTo = None
if type == 'withdrawal':
address = self.safe_string(transaction, 'address')
addressFrom = self.safe_string(transaction, 'tx')
addressTo = address
amount = self.safe_integer(transaction, 'amount')
if amount is not None:
amount = abs(amount) / 10000000
feeCost = self.safe_integer(transaction, 'fee')
if feeCost is not None:
feeCost = feeCost / 10000000
fee = {
'cost': feeCost,
'currency': 'BTC',
}
status = self.safe_string(transaction, 'transactStatus')
if status is not None:
status = self.parse_transaction_status(status)
return {
'info': transaction,
'id': id,
'txid': None,
'timestamp': transactTime,
'datetime': self.iso8601(transactTime),
'addressFrom': addressFrom,
'address': address,
'addressTo': addressTo,
'tagFrom': None,
'tag': None,
'tagTo': None,
'type': type,
'amount': amount,
'currency': 'BTC',
'status': status,
'updated': timestamp,
'comment': None,
'fee': fee,
}
def fetch_ticker(self, symbol, params={}):
self.load_markets()
market = self.market(symbol)
if not market['active']:
raise ExchangeError(self.id + ': symbol ' + symbol + ' is delisted')
tickers = self.fetch_tickers([symbol], params)
ticker = self.safe_value(tickers, symbol)
if ticker is None:
raise ExchangeError(self.id + ' ticker symbol ' + symbol + ' not found')
return ticker
def fetch_tickers(self, symbols=None, params={}):
self.load_markets()
response = self.publicGetInstrumentActiveAndIndices(params)
result = {}
for i in range(0, len(response)):
ticker = self.parse_ticker(response[i])
symbol = self.safe_string(ticker, 'symbol')
if symbol is not None:
result[symbol] = ticker
return self.filter_by_array(result, 'symbol', symbols)
def parse_ticker(self, ticker, market=None):
symbol = None
marketId = self.safe_string(ticker, 'symbol')
market = self.safe_value(self.markets_by_id, marketId, market)
if market is not None:
symbol = market['symbol']
timestamp = self.parse8601(self.safe_string(ticker, 'timestamp'))
open = self.safe_float(ticker, 'prevPrice24h')
last = self.safe_float(ticker, 'lastPrice')
change = None
percentage = None
if last is not None and open is not None:
change = last - open
if open > 0:
percentage = change / open * 100
return {
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'high': self.safe_float(ticker, 'highPrice'),
'low': self.safe_float(ticker, 'lowPrice'),
'bid': self.safe_float(ticker, 'bidPrice'),
'bidVolume': None,
'ask': self.safe_float(ticker, 'askPrice'),
'askVolume': None,
'vwap': self.safe_float(ticker, 'vwap'),
'open': open,
'close': last,
'last': last,
'previousClose': None,
'change': change,
'percentage': percentage,
'average': self.sum(open, last) / 2,
'baseVolume': self.safe_float(ticker, 'homeNotional24h'),
'quoteVolume': self.safe_float(ticker, 'foreignNotional24h'),
'info': ticker,
}
def parse_ohlcv(self, ohlcv, market=None):
return [
self.parse8601(self.safe_string(ohlcv, 'timestamp')),
self.safe_float(ohlcv, 'open'),
self.safe_float(ohlcv, 'high'),
self.safe_float(ohlcv, 'low'),
self.safe_float(ohlcv, 'close'),
self.safe_float(ohlcv, 'volume'),
]
def fetch_ohlcv(self, symbol, timeframe='1m', since=None, limit=None, params={}):
self.load_markets()
market = self.market(symbol)
request = {
'symbol': market['id'],
'binSize': self.timeframes[timeframe],
'partial': True,
rame) * 1000
fetchOHLCVOpenTimestamp = self.safe_value(self.options, 'fetchOHLCVOpenTimestamp', True)
if since is not None:
timestamp = since
if fetchOHLCVOpenTimestamp:
timestamp = self.sum(timestamp, duration)
ymdhms = self.ymdhms(timestamp)
request['startTime'] = ymdhms
else:
request['reverse'] = True
response = self.publicGetTradeBucketed(self.extend(request, params))
result = self.parse_ohlcvs(response, market, timeframe, since, limit)
if fetchOHLCVOpenTimestamp:
# we can emulate the open timestamp by shifting all the timestamps one place
# so the previous close becomes the current open, and we drop the first candle
for i in range(0, len(result)):
result[i][0] = result[i][0] - duration
return result
def parse_trade(self, trade, market=None):
#
# fetchTrades(public)
#
# {
# timestamp: '2018-08-28T00:00:02.735Z',
# symbol: 'XBTUSD',
# side: 'Buy',
# size: 2000,
# price: 6906.5,
# tickDirection: 'PlusTick',
# trdMatchID: 'b9a42432-0a46-6a2f-5ecc-c32e9ca4baf8',
# grossValue: 28958000,
# homeNotional: 0.28958,
# foreignNotional: 2000
# }
#
# fetchMyTrades(private)
#
# {
# "execID": "string",
# "orderID": "string",
# "clOrdID": "string",
# "clOrdLinkID": "string",
# "account": 0,
# "symbol": "string",
# "side": "string",
# "lastQty": 0,
# "lastPx": 0,
# "underlyingLastPx": 0,
# "lastMkt": "string",
# "lastLiquidityInd": "string",
# "simpleOrderQty": 0,
# "orderQty": 0,
# "price": 0,
# "displayQty": 0,
# "stopPx": 0,
# "pegOffsetValue": 0,
# "pegPriceType": "string",
# "currency": "string",
# "settlCurrency": "string",
# "execType": "string",
# "ordType": "string",
# "timeInForce": "string",
# "execInst": "string",
# "contingencyType": "string",
# "exDestination": "string",
# "ordStatus": "string",
# "triggered": "string",
# "workingIndicator": True,
# "ordRejReason": "string",
# "simpleLeavesQty": 0,
# "leavesQty": 0,
# "simpleCumQty": 0,
# "cumQty": 0,
# "avgPx": 0,
# "commission": 0,
# "tradePublishIndicator": "string",
# "multiLegReportingType": "string",
# "text": "string",
# "trdMatchID": "string",
# "execCost": 0,
# "execComm": 0,
# "homeNotional": 0,
# "foreignNotional": 0,
# "transactTime": "2019-03-05T12:47:02.762Z",
# "timestamp": "2019-03-05T12:47:02.762Z"
# }
#
timestamp = self.parse8601(self.safe_string(trade, 'timestamp'))
price = self.safe_float(trade, 'price')
amount = self.safe_float_2(trade, 'size', 'lastQty')
id = self.safe_string(trade, 'trdMatchID')
order = self.safe_string(trade, 'orderID')
side = self.safe_string_lower(trade, 'side')
# price * amount doesn't work for all symbols(e.g. XBT, ETH)
cost = self.safe_float(trade, 'execCost')
if cost is not None:
cost = abs(cost) / 100000000
fee = None
if 'execComm' in trade:
feeCost = self.safe_float(trade, 'execComm')
feeCost = feeCost / 100000000
currencyId = self.safe_string(trade, 'settlCurrency')
feeCurrency = self.safe_currency_code(currencyId)
feeRate = self.safe_float(trade, 'commission')
fee = {
'cost': feeCost,
'currency': feeCurrency,
'rate': feeRate,
}
takerOrMaker = None
if fee is not None:
takerOrMaker = 'maker' if (fee['cost'] < 0) else 'taker'
symbol = None
marketId = self.safe_string(trade, 'symbol')
if marketId is not None:
if marketId in self.markets_by_id:
market = self.markets_by_id[marketId]
symbol = market['symbol']
else:
symbol = marketId
type = self.safe_string_lower(trade, 'ordType')
return {
'info': trade,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'symbol': symbol,
'id': id,
'order': order,
'type': type,
'takerOrMaker': takerOrMaker,
'side': side,
'price': price,
'cost': cost,
'amount': amount,
'fee': fee,
}
def parse_order_status(self, status):
statuses = {
'New': 'open',
'PartiallyFilled': 'open',
'Filled': 'closed',
'DoneForDay': 'open',
'Canceled': 'canceled',
'PendingCancel': 'open',
'PendingNew': 'open',
'Rejected': 'rejected',
'Expired': 'expired',
'Stopped': 'open',
'Untriggered': 'open',
'Triggered': 'open',
}
return self.safe_string(statuses, status, status)
def parse_order(self, order, market=None):
status = self.parse_order_status(self.safe_string(order, 'ordStatus'))
symbol = None
if market is not None:
symbol = market['symbol']
else:
marketId = self.safe_string(order, 'symbol')
if marketId in self.markets_by_id:
market = self.markets_by_id[marketId]
symbol = market['symbol']
timestamp = self.parse8601(self.safe_string(order, 'timestamp'))
lastTradeTimestamp = self.parse8601(self.safe_string(order, 'transactTime'))
price = self.safe_float(order, 'price')
amount = self.safe_float(order, 'orderQty')
filled = self.safe_float(order, 'cumQty', 0.0)
remaining = None
if amount is not None:
if filled is not None:
remaining = max(amount - filled, 0.0)
average = self.safe_float(order, 'avgPx')
cost = None
if filled is not None:
if average is not None:
cost = average * filled
elif price is not None:
cost = price * filled
id = self.safe_string(order, 'orderID')
type = self.safe_string_lower(order, 'ordType')
side = self.safe_string_lower(order, 'side')
clientOrderId = self.safe_string(order, 'clOrdID')
return {
'info': order,
'id': id,
'clientOrderId': clientOrderId,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'lastTradeTimestamp': lastTradeTimestamp,
'symbol': symbol,
'type': type,
'side': side,
'price': price,
'amount': amount,
'cost': cost,
'average': average,
'filled': filled,
'remaining': remaining,
'status': status,
'fee': None,
'trades': None,
}
def fetch_trades(self, symbol, since=None, limit=None, params={}):
self.load_markets()
market = self.market(symbol)
request = {
'symbol': market['id'],
}
if since is not None:
request['startTime'] = self.iso8601(since)
else:
request['reverse'] = True
if limit is not None:
request['count'] = limit
response = self.publicGetTrade(self.extend(request, params))
return self.parse_trades(response, market, since, limit)
def create_order(self, symbol, type, side, amount, price=None, params={}):
self.load_markets()
market = self.market(symbol)
request = {
'symbol': market['id'],
'side': self.capitalize(side),
'orderQty': amount,
'ordType': self.capitalize(type),
}
if price is not None:
request['price'] = price
clientOrderId = self.safe_string_2(params, 'clOrdID', 'clientOrderId')
if clientOrderId is not None:
request['clOrdID'] = clientOrderId
params = self.omit(params, ['clOrdID', 'clientOrderId'])
response = self.privatePostOrder(self.extend(request, params))
return self.parse_order(response, market)
def edit_order(self, id, symbol, type, side, amount=None, price=None, params={}):
self.load_markets()
request = {}
origClOrdID = self.safe_string_2(params, 'origClOrdID', 'clientOrderId')
if origClOrdID is not None:
request['origClOrdID'] = origClOrdID
clientOrderId = self.safe_string(params, 'clOrdID', 'clientOrderId')
if clientOrderId is not None:
request['clOrdID'] = clientOrderId
params = self.omit(params, ['origClOrdID', 'clOrdID', 'clientOrderId'])
else:
request['orderID'] = id
if amount is not None:
request['orderQty'] = amount
if price is not None:
request['price'] = price
response = self.privatePutOrder(self.extend(request, params))
return self.parse_order(response)
def cancel_order(self, id, symbol=None, params={}):
self.load_markets()
clientOrderId = self.safe_string_2(params, 'clOrdID', 'clientOrderId')
request = {}
if clientOrderId is None:
request['orderID'] = id
else:
request['clOrdID'] = clientOrderId
params = self.omit(params, ['clOrdID', 'clientOrderId'])
response = self.privateDeleteOrder(self.extend(request, params))
order = self.safe_value(response, 0, {})
error = self.safe_string(order, 'error')
if error is not None:
if error.find('Unable to cancel order due to existing state') >= 0:
raise OrderNotFound(self.id + ' cancelOrder() failed: ' + error)
return self.parse_order(order)
def cancel_all_orders(self, symbol=None, params={}):
self.load_markets()
request = {}
market = None
if symbol is not None:
market = self.market(symbol)
request['symbol'] = market['id']
response = self.privateDeleteOrderAll(self.extend(request, params))
return self.parse_orders(response, market)
def is_fiat(self, currency):
if currency == 'EUR':
return True
if currency == 'PLN':
return True
return False
def withdraw(self, code, amount, address, tag=None, params={}):
self.check_address(address)
self.load_markets()
if code != 'BTC':
raise ExchangeError(self.id + ' supoprts BTC withdrawals only, other currencies coming soon...')
request = {
'currency': 'XBt',
'amount': amount,
'address': address,
uestWithdrawal(self.extend(request, params))
return {
'info': response,
'id': self.safe_string(response, 'transactID'),
}
def handle_errors(self, code, reason, url, method, headers, body, response, requestHeaders, requestBody):
if response is None:
return
if code == 429:
raise DDoSProtection(self.id + ' ' + body)
if code >= 400:
error = self.safe_value(response, 'error', {})
message = self.safe_string(error, 'message')
feedback = self.id + ' ' + body
self.throw_exactly_matched_exception(self.exceptions['exact'], message, feedback)
self.throw_broadly_matched_exception(self.exceptions['broad'], message, feedback)
if code == 400:
raise BadRequest(feedback)
raise ExchangeError(feedback)
def nonce(self):
return self.milliseconds()
def sign(self, path, api='public', method='GET', params={}, headers=None, body=None):
query = '/api/' + self.version + '/' + path
if method == 'GET':
if params:
query += '?' + self.urlencode(params)
else:
format = self.safe_string(params, '_format')
if format is not None:
query += '?' + self.urlencode({'_format': format})
params = self.omit(params, '_format')
url = self.urls['api'][api] + query
if self.apiKey and self.secret:
auth = method + query
expires = self.safe_integer(self.options, 'api-expires')
headers = {
'Content-Type': 'application/json',
'api-key': self.apiKey,
}
expires = self.sum(self.seconds(), expires)
expires = str(expires)
auth += expires
headers['api-expires'] = expires
if method == 'POST' or method == 'PUT' or method == 'DELETE':
if params:
body = self.json(params)
auth += body
headers['api-signature'] = self.hmac(self.encode(auth), self.encode(self.secret))
return {'url': url, 'method': method, 'body': body, 'headers': headers}
| true
| true
|
f7185c26056ae7dced24a2c6d6d3e11cf667b77f
| 4,543
|
py
|
Python
|
utils/avg_checkpoints.py
|
didichuxing/delta
|
31dfebc8f20b7cb282b62f291ff25a87e403cc86
|
[
"Apache-2.0"
] | 1,442
|
2019-07-09T07:34:28.000Z
|
2020-11-15T09:52:09.000Z
|
utils/avg_checkpoints.py
|
didichuxing/delta
|
31dfebc8f20b7cb282b62f291ff25a87e403cc86
|
[
"Apache-2.0"
] | 93
|
2019-07-22T09:20:20.000Z
|
2020-11-13T01:59:30.000Z
|
utils/avg_checkpoints.py
|
didichuxing/delta
|
31dfebc8f20b7cb282b62f291ff25a87e403cc86
|
[
"Apache-2.0"
] | 296
|
2019-07-09T07:35:28.000Z
|
2020-11-16T02:27:51.000Z
|
#!/usr/bin/env python3
# Copyright 2019 The Tensor2Tensor Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Script to average values of variables in a list of checkpoint files."""
import os
import six
from absl import app
from absl import flags
from absl import logging
from six.moves import zip # pylint: disable=redefined-builtin
import numpy as np
import delta.compat as tf
FLAGS = flags.FLAGS
flags.DEFINE_string("checkpoints", "",
"Comma-separated list of checkpoints to average.")
flags.DEFINE_integer(
"num_last_checkpoints", 0, "Averages the last N saved checkpoints."
" If the checkpoints flag is set, this is ignored.")
flags.DEFINE_string("prefix", "",
"Prefix (e.g., directory) to append to each checkpoint.")
flags.DEFINE_string("output_path", "/tmp/averaged.ckpt",
"Path to output the averaged checkpoint to.")
def checkpoint_exists(path):
return (tf.io.gfile.exists(path) or tf.io.gfile.exists(path + ".meta") or
tf.io.gfile.exists(path + ".index"))
def main(_):
if FLAGS.checkpoints:
# Get the checkpoints list from flags and run some basic checks.
checkpoints = [c.strip() for c in FLAGS.checkpoints.split(",")]
checkpoints = [c for c in checkpoints if c]
if not checkpoints:
raise ValueError("No checkpoints provided for averaging.")
if FLAGS.prefix:
checkpoints = [FLAGS.prefix + c for c in checkpoints]
else:
assert FLAGS.num_last_checkpoints >= 1, "Must average at least one model"
assert FLAGS.prefix, ("Prefix must be provided when averaging last"
" N checkpoints")
checkpoint_state = tf.train.get_checkpoint_state(
os.path.dirname(FLAGS.prefix))
# Checkpoints are ordered from oldest to newest.
checkpoints = checkpoint_state.all_model_checkpoint_paths[
-FLAGS.num_last_checkpoints:]
checkpoints = [c for c in checkpoints if checkpoint_exists(c)]
if not checkpoints:
if FLAGS.checkpoints:
raise ValueError("None of the provided checkpoints exist. %s" %
FLAGS.checkpoints)
else:
raise ValueError("Could not find checkpoints at %s" %
os.path.dirname(FLAGS.prefix))
# Read variables from all checkpoints and average them.
logging.info("Reading variables and averaging checkpoints:")
for c in checkpoints:
logging.info("%s ", c)
var_list = tf.train.list_variables(checkpoints[0])
var_values, var_dtypes = {}, {}
for (name, shape) in var_list:
if not name.startswith("global_step"):
var_values[name] = np.zeros(shape)
for checkpoint in checkpoints:
reader = tf.train.load_checkpoint(checkpoint)
for name in var_values:
tensor = reader.get_tensor(name)
var_dtypes[name] = tensor.dtype
var_values[name] += tensor
logging.info("Read from checkpoint %s", checkpoint)
for name in var_values: # Average.
var_values[name] /= len(checkpoints)
with tf.variable_scope(tf.get_variable_scope(), reuse=tf.AUTO_REUSE):
tf_vars = [
tf.get_variable(v, shape=var_values[v].shape, dtype=var_dtypes[v])
for v in var_values
]
placeholders = [tf.placeholder(v.dtype, shape=v.shape) for v in tf_vars]
assign_ops = [tf.assign(v, p) for (v, p) in zip(tf_vars, placeholders)]
global_step = tf.Variable(
0, name="global_step", trainable=False, dtype=tf.int64)
saver = tf.train.Saver(tf.all_variables())
# Build a model consisting only of variables, set them to the average values.
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for p, assign_op, (name, value) in zip(placeholders, assign_ops,
six.iteritems(var_values)):
sess.run(assign_op, {p: value})
# Use the built saver to save the averaged checkpoint.
saver.save(sess, FLAGS.output_path, global_step=global_step)
logging.info("Averaged checkpoints saved in %s", FLAGS.output_path)
if __name__ == "__main__":
app.run(main)
| 39.504348
| 79
| 0.693374
|
import os
import six
from absl import app
from absl import flags
from absl import logging
from six.moves import zip
import numpy as np
import delta.compat as tf
FLAGS = flags.FLAGS
flags.DEFINE_string("checkpoints", "",
"Comma-separated list of checkpoints to average.")
flags.DEFINE_integer(
"num_last_checkpoints", 0, "Averages the last N saved checkpoints."
" If the checkpoints flag is set, this is ignored.")
flags.DEFINE_string("prefix", "",
"Prefix (e.g., directory) to append to each checkpoint.")
flags.DEFINE_string("output_path", "/tmp/averaged.ckpt",
"Path to output the averaged checkpoint to.")
def checkpoint_exists(path):
return (tf.io.gfile.exists(path) or tf.io.gfile.exists(path + ".meta") or
tf.io.gfile.exists(path + ".index"))
def main(_):
if FLAGS.checkpoints:
checkpoints = [c.strip() for c in FLAGS.checkpoints.split(",")]
checkpoints = [c for c in checkpoints if c]
if not checkpoints:
raise ValueError("No checkpoints provided for averaging.")
if FLAGS.prefix:
checkpoints = [FLAGS.prefix + c for c in checkpoints]
else:
assert FLAGS.num_last_checkpoints >= 1, "Must average at least one model"
assert FLAGS.prefix, ("Prefix must be provided when averaging last"
" N checkpoints")
checkpoint_state = tf.train.get_checkpoint_state(
os.path.dirname(FLAGS.prefix))
checkpoints = checkpoint_state.all_model_checkpoint_paths[
-FLAGS.num_last_checkpoints:]
checkpoints = [c for c in checkpoints if checkpoint_exists(c)]
if not checkpoints:
if FLAGS.checkpoints:
raise ValueError("None of the provided checkpoints exist. %s" %
FLAGS.checkpoints)
else:
raise ValueError("Could not find checkpoints at %s" %
os.path.dirname(FLAGS.prefix))
logging.info("Reading variables and averaging checkpoints:")
for c in checkpoints:
logging.info("%s ", c)
var_list = tf.train.list_variables(checkpoints[0])
var_values, var_dtypes = {}, {}
for (name, shape) in var_list:
if not name.startswith("global_step"):
var_values[name] = np.zeros(shape)
for checkpoint in checkpoints:
reader = tf.train.load_checkpoint(checkpoint)
for name in var_values:
tensor = reader.get_tensor(name)
var_dtypes[name] = tensor.dtype
var_values[name] += tensor
logging.info("Read from checkpoint %s", checkpoint)
for name in var_values:
var_values[name] /= len(checkpoints)
with tf.variable_scope(tf.get_variable_scope(), reuse=tf.AUTO_REUSE):
tf_vars = [
tf.get_variable(v, shape=var_values[v].shape, dtype=var_dtypes[v])
for v in var_values
]
placeholders = [tf.placeholder(v.dtype, shape=v.shape) for v in tf_vars]
assign_ops = [tf.assign(v, p) for (v, p) in zip(tf_vars, placeholders)]
global_step = tf.Variable(
0, name="global_step", trainable=False, dtype=tf.int64)
saver = tf.train.Saver(tf.all_variables())
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for p, assign_op, (name, value) in zip(placeholders, assign_ops,
six.iteritems(var_values)):
sess.run(assign_op, {p: value})
saver.save(sess, FLAGS.output_path, global_step=global_step)
logging.info("Averaged checkpoints saved in %s", FLAGS.output_path)
if __name__ == "__main__":
app.run(main)
| true
| true
|
f7185c76e2aade5c78a8e61bdc23ad067dcf6e03
| 1,742
|
py
|
Python
|
src/compas/data/coercion.py
|
Sam-Bouten/compas
|
011c7779ded9b69bb602568b470bb0443e336f62
|
[
"MIT"
] | null | null | null |
src/compas/data/coercion.py
|
Sam-Bouten/compas
|
011c7779ded9b69bb602568b470bb0443e336f62
|
[
"MIT"
] | null | null | null |
src/compas/data/coercion.py
|
Sam-Bouten/compas
|
011c7779ded9b69bb602568b470bb0443e336f62
|
[
"MIT"
] | null | null | null |
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
from .validators import is_item_iterable
def coerce_sequence_of_tuple(sequence):
"""Make sure all items of a sequence are of type tuple.
Parameters
----------
sequence : sequence
A sequence of items.
Returns
-------
list[tuple]
A list containing the items of the original sequence,
with each iterable item converted to a tuple,
and non-iterable items wrapped in a tuple.
Examples
--------
>>> items = coerce_sequence_of_tuple(['a', 1, (None, ), [2.0, 3.0]])
>>> is_sequence_of_tuple(items)
True
"""
items = []
for item in sequence:
if not isinstance(item, tuple):
if not is_item_iterable(item):
item = (item, )
else:
item = tuple(item)
items.append(item)
return items
def coerce_sequence_of_list(sequence):
"""Make sure all items of a sequence are of type list.
Parameters
----------
sequence : sequence
A sequence of items.
Returns
-------
list[list]
A list containing the items of the original sequence,
with each iterable item converted to a list,
and non-iterable items wrapped in a list.
Examples
--------
>>> items = coerce_sequence_of_list(['a', 1, (None, ), [2.0, 3.0]])
>>> is_sequence_of_list(items)
True
"""
items = []
for item in sequence:
if not isinstance(item, list):
if not is_item_iterable(item):
item = [item]
else:
item = list(item)
items.append(item)
return items
| 24.194444
| 72
| 0.584386
|
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
from .validators import is_item_iterable
def coerce_sequence_of_tuple(sequence):
items = []
for item in sequence:
if not isinstance(item, tuple):
if not is_item_iterable(item):
item = (item, )
else:
item = tuple(item)
items.append(item)
return items
def coerce_sequence_of_list(sequence):
items = []
for item in sequence:
if not isinstance(item, list):
if not is_item_iterable(item):
item = [item]
else:
item = list(item)
items.append(item)
return items
| true
| true
|
f7185d6f12e9bb7cb5506952d7aa10df068def6e
| 1,777
|
py
|
Python
|
ansible/lib/ansible/compat/six/__init__.py
|
kiv-box/redis
|
966a0c3f0a51282cd173b42a6e249d23f4e89dec
|
[
"Apache-2.0"
] | null | null | null |
ansible/lib/ansible/compat/six/__init__.py
|
kiv-box/redis
|
966a0c3f0a51282cd173b42a6e249d23f4e89dec
|
[
"Apache-2.0"
] | null | null | null |
ansible/lib/ansible/compat/six/__init__.py
|
kiv-box/redis
|
966a0c3f0a51282cd173b42a6e249d23f4e89dec
|
[
"Apache-2.0"
] | null | null | null |
# (c) 2014, Toshio Kuratomi <tkuratomi@ansible.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
'''
Compat six library. RHEL7 has python-six 1.3.0 which is too old
'''
# The following makes it easier for us to script updates of the bundled code
_BUNDLED_METADATA = { "pypi_name": "six", "version": "1.10.0" }
import os.path
try:
import six as _system_six
except ImportError:
_system_six = None
if _system_six:
# If we need some things from even newer versions of six, then we need to
# use our bundled copy instead
if ( # Added in six-1.8.0
not hasattr(_system_six.moves, 'shlex_quote') or
# Added in six-1.4.0
not hasattr(_system_six, 'byte2int') or
not hasattr(_system_six, 'add_metaclass') or
not hasattr(_system_six.moves, 'urllib')
):
_system_six = False
if _system_six:
six = _system_six
else:
from . import _six as six
six_py_file = '{0}.py'.format(os.path.splitext(six.__file__)[0])
exec(open(six_py_file, 'rb').read())
| 32.309091
| 77
| 0.702307
|
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
_BUNDLED_METADATA = { "pypi_name": "six", "version": "1.10.0" }
import os.path
try:
import six as _system_six
except ImportError:
_system_six = None
if _system_six:
if (
not hasattr(_system_six.moves, 'shlex_quote') or
not hasattr(_system_six, 'byte2int') or
not hasattr(_system_six, 'add_metaclass') or
not hasattr(_system_six.moves, 'urllib')
):
_system_six = False
if _system_six:
six = _system_six
else:
from . import _six as six
six_py_file = '{0}.py'.format(os.path.splitext(six.__file__)[0])
exec(open(six_py_file, 'rb').read())
| true
| true
|
f7185df1c90f20a4b94d48d8d0269d3b6a165204
| 278
|
py
|
Python
|
run_trainer.py
|
yizhibaiwuya/LibFewShot
|
3ce44c2fe61ee5e4074789aa165be461282c240b
|
[
"MIT"
] | null | null | null |
run_trainer.py
|
yizhibaiwuya/LibFewShot
|
3ce44c2fe61ee5e4074789aa165be461282c240b
|
[
"MIT"
] | null | null | null |
run_trainer.py
|
yizhibaiwuya/LibFewShot
|
3ce44c2fe61ee5e4074789aa165be461282c240b
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
import sys
sys.dont_write_bytecode = True
from core.config import Config
from core import Trainer
if __name__ == "__main__":
config = Config("./config/negative_margin.yaml").get_config_dict()
trainer = Trainer(config)
trainer.train_loop()
| 21.384615
| 70
| 0.715827
|
import sys
sys.dont_write_bytecode = True
from core.config import Config
from core import Trainer
if __name__ == "__main__":
config = Config("./config/negative_margin.yaml").get_config_dict()
trainer = Trainer(config)
trainer.train_loop()
| true
| true
|
f7185df3593f46979319b2f7f315cdd018a46131
| 6,853
|
py
|
Python
|
preprocessing/extra_preprocessing.py
|
acp19tag/skill-extraction-dataset
|
fd188bda8a3aa17fbdf56958b7a8ff9e84099ba7
|
[
"CC-BY-4.0"
] | null | null | null |
preprocessing/extra_preprocessing.py
|
acp19tag/skill-extraction-dataset
|
fd188bda8a3aa17fbdf56958b7a8ff9e84099ba7
|
[
"CC-BY-4.0"
] | null | null | null |
preprocessing/extra_preprocessing.py
|
acp19tag/skill-extraction-dataset
|
fd188bda8a3aa17fbdf56958b7a8ff9e84099ba7
|
[
"CC-BY-4.0"
] | null | null | null |
#! /usr/bin/python3
"""
contains extra preprocessing steps for raw data, including:
- using regular expression to capture misclassified Skills in Experience class
- separating terms with special characters (e.g. '/', ',')
"""
from preprocessing.src.utils import * # pylint: disable=all
import re
import inflect # pylint: disable=all
import pandas as pd # pylint: disable=all
from pandas.core.common import SettingWithCopyWarning
# import warnings filter
from warnings import simplefilter
simplefilter(action='ignore', category=FutureWarning)
simplefilter(action='ignore', category=SettingWithCopyWarning)
def get_class_from_tag(full_tag):
""" strips the BIO prefix from the tag and returns the class """
if full_tag == 'O':
return full_tag
return full_tag.split('-')[1]
def get_BIO_from_tag(full_tag):
""" strips the class from the tag and returns the BIO prefix """
if full_tag == 'O':
return full_tag
return full_tag.split('-')[0]
def identify_misclassified_exp(text):
""" identifies whether a span classed as Exp is likely to be a misclassified Skill """
misclassified = True
# check if there is a valid number in number format (regex)
if bool(re.search('[0-9]', text)):
misclassified = False
# check if there is a valid number in text format (inflect)
inflect_engine = inflect.engine()
text_numbers = {inflect_engine.number_to_words(x) for x in range(100)}
for token in re.findall(r"[\w]+|[^\s\w]", text):
if token.lower() in text_numbers:
misclassified = False
# check if there is a valid experience time period (base python)
time_periods = {
"week", "month", "year"
}
for time_period in time_periods:
if bool(re.search(time_period, text.lower())):
misclassified = False
return misclassified
def update_misclassified_tags(input_data, output_data, iloc_span):
""" updates the output data with correct tags """
for i in range(iloc_span[0], iloc_span[1]+1):
original_tag = str(input_data['tag'].iloc[i])
# print(f"original tag:{original_tag}")
if get_BIO_from_tag(original_tag) == 'B':
new_tag = 'B-Skill'
output_data['tag'].iloc[i] = new_tag
elif get_BIO_from_tag(original_tag) == 'I':
new_tag = 'I-Skill'
output_data['tag'].iloc[i] = new_tag
# print(f"new tag: {new_tag}\n")
return output_data
def capture_misclassified_skills(input_data):
""" uses regex to reassign misclassified Skills in Experience class """
output_data = input_data.copy(deep=True)
# initialise start and stop index to identify span
iloc_span = [0,0]
capture = False
# iterate over rows in input data
for row in input_data.itertuples():
# if capture is off, and tag is B-Experience, set capture to True
if not capture and row.tag == "B-Experience":
capture = True
iloc_span[0] = row.Index
# if capture is on, and tag is not I-Experience:
elif capture and row.tag != "I-Experience":
capture = False
iloc_span[1] = row.Index - 1
# print(iloc_span)
# print(input_data['word'].iloc[iloc_span[0]])
# print(input_data['word'].iloc[iloc_span[1]])
text = " ".join(list(input_data['word'].iloc[iloc_span[0]:iloc_span[1]+1]))
# print(text)
# identify if misclassified
if identify_misclassified_exp(text):
# if misclassified, set tags in output_data with same index to B-Skill and I-Skill accordingly
output_data = update_misclassified_tags(input_data, output_data, iloc_span)
# if capture is on, check misclassification one more time (for final span)
if capture:
iloc_span[1] = len(input_data.index)
# identify if misclassified
if identify_misclassified_exp(text):
# if misclassified, set tags in output_data with same index to B-Skill and I-Skill accordingly
output_data = update_misclassified_tags(input_data, output_data, iloc_span)
return output_data
def split_spans_by_character(input_data, output_data, iloc_span, punctuation = {"/", "\\", ",", ".", ':', ';', '?', '!', '\/', '\,'}):
""" splits spans by spcecial characters and reclassifies accordingly """
try:
span_dict = {
x: input_data['word'].iloc[x] for x in range(iloc_span[0], iloc_span[1] + 1)
}
except:
span_dict = {
x: input_data['word'].iloc[x] for x in range(iloc_span[0], iloc_span[1])
}
special_character_indices = [
index for index, value in span_dict.items() if value in punctuation
]
# set tags of special characters to O
# set BIO prefix of subsequent token (if one exists) to B
for special_character_index in special_character_indices:
output_data['tag'].iloc[special_character_index] = 'O'
if special_character_index < iloc_span[1]:
tag = get_class_from_tag(input_data['tag'].iloc[special_character_index + 1])
if output_data['tag'].iloc[special_character_index + 1] != 'O':
output_data['tag'].iloc[special_character_index + 1] = 'B-' + tag
return output_data
def separate_terms(input_data):
""" separates terms with special characters """
output_data = input_data.copy(deep=True)
# initialise start and stop index to identify span
iloc_span = [0,0]
current_tag = None
capture = False
# iterate over rows in input data
for row in input_data.itertuples():
prefix = get_BIO_from_tag(row.tag)
tag = get_class_from_tag(row.tag)
# if capture is off, and tag begins 'B', set capture to True and current_tag to current
if not capture and prefix == 'B':
capture = True
current_tag = tag
iloc_span[0] = row.Index
# if capture is on, and tag is different to current_tag, close the span and capture
elif capture and tag != current_tag:
capture = False
iloc_span[1] = row.Index - 1
output_data = split_spans_by_character(input_data, output_data, iloc_span)
# if capture is on, check current span one last time
if capture:
iloc_span[1] = len(input_data.index)
output_data = split_spans_by_character(input_data, output_data, iloc_span)
return output_data
def extra_preprocessing(input_data):
""" combines above preprocessing into one function call """
output_data = input_data.copy(deep=True)
output_data = capture_misclassified_skills(output_data)
output_data = separate_terms(output_data)
return output_data
| 34.265
| 134
| 0.647454
|
from preprocessing.src.utils import *
import re
import inflect
import pandas as pd
from pandas.core.common import SettingWithCopyWarning
from warnings import simplefilter
simplefilter(action='ignore', category=FutureWarning)
simplefilter(action='ignore', category=SettingWithCopyWarning)
def get_class_from_tag(full_tag):
if full_tag == 'O':
return full_tag
return full_tag.split('-')[1]
def get_BIO_from_tag(full_tag):
if full_tag == 'O':
return full_tag
return full_tag.split('-')[0]
def identify_misclassified_exp(text):
misclassified = True
if bool(re.search('[0-9]', text)):
misclassified = False
inflect_engine = inflect.engine()
text_numbers = {inflect_engine.number_to_words(x) for x in range(100)}
for token in re.findall(r"[\w]+|[^\s\w]", text):
if token.lower() in text_numbers:
misclassified = False
time_periods = {
"week", "month", "year"
}
for time_period in time_periods:
if bool(re.search(time_period, text.lower())):
misclassified = False
return misclassified
def update_misclassified_tags(input_data, output_data, iloc_span):
for i in range(iloc_span[0], iloc_span[1]+1):
original_tag = str(input_data['tag'].iloc[i])
if get_BIO_from_tag(original_tag) == 'B':
new_tag = 'B-Skill'
output_data['tag'].iloc[i] = new_tag
elif get_BIO_from_tag(original_tag) == 'I':
new_tag = 'I-Skill'
output_data['tag'].iloc[i] = new_tag
return output_data
def capture_misclassified_skills(input_data):
output_data = input_data.copy(deep=True)
iloc_span = [0,0]
capture = False
for row in input_data.itertuples():
if not capture and row.tag == "B-Experience":
capture = True
iloc_span[0] = row.Index
elif capture and row.tag != "I-Experience":
capture = False
iloc_span[1] = row.Index - 1
text = " ".join(list(input_data['word'].iloc[iloc_span[0]:iloc_span[1]+1]))
if identify_misclassified_exp(text):
output_data = update_misclassified_tags(input_data, output_data, iloc_span)
if capture:
iloc_span[1] = len(input_data.index)
if identify_misclassified_exp(text):
output_data = update_misclassified_tags(input_data, output_data, iloc_span)
return output_data
def split_spans_by_character(input_data, output_data, iloc_span, punctuation = {"/", "\\", ",", ".", ':', ';', '?', '!', '\/', '\,'}):
try:
span_dict = {
x: input_data['word'].iloc[x] for x in range(iloc_span[0], iloc_span[1] + 1)
}
except:
span_dict = {
x: input_data['word'].iloc[x] for x in range(iloc_span[0], iloc_span[1])
}
special_character_indices = [
index for index, value in span_dict.items() if value in punctuation
]
for special_character_index in special_character_indices:
output_data['tag'].iloc[special_character_index] = 'O'
if special_character_index < iloc_span[1]:
tag = get_class_from_tag(input_data['tag'].iloc[special_character_index + 1])
if output_data['tag'].iloc[special_character_index + 1] != 'O':
output_data['tag'].iloc[special_character_index + 1] = 'B-' + tag
return output_data
def separate_terms(input_data):
output_data = input_data.copy(deep=True)
iloc_span = [0,0]
current_tag = None
capture = False
for row in input_data.itertuples():
prefix = get_BIO_from_tag(row.tag)
tag = get_class_from_tag(row.tag)
if not capture and prefix == 'B':
capture = True
current_tag = tag
iloc_span[0] = row.Index
elif capture and tag != current_tag:
capture = False
iloc_span[1] = row.Index - 1
output_data = split_spans_by_character(input_data, output_data, iloc_span)
if capture:
iloc_span[1] = len(input_data.index)
output_data = split_spans_by_character(input_data, output_data, iloc_span)
return output_data
def extra_preprocessing(input_data):
output_data = input_data.copy(deep=True)
output_data = capture_misclassified_skills(output_data)
output_data = separate_terms(output_data)
return output_data
| true
| true
|
f7185df4c6e431a56ba7ff8190c50e11369902b6
| 3,027
|
py
|
Python
|
app/app_3rdtry.py
|
TemsyChen/Spotifinder
|
b069ffcd63bd7654e1afd51cde3288c9678d121a
|
[
"MIT"
] | null | null | null |
app/app_3rdtry.py
|
TemsyChen/Spotifinder
|
b069ffcd63bd7654e1afd51cde3288c9678d121a
|
[
"MIT"
] | null | null | null |
app/app_3rdtry.py
|
TemsyChen/Spotifinder
|
b069ffcd63bd7654e1afd51cde3288c9678d121a
|
[
"MIT"
] | null | null | null |
import dash
import dash_core_components as dcc
import dash_html_components as html
import plotly.express as px
from dash.dependencies import Input, Output
import pandas as pd
import pickle
# from os.path import dirname
# DIR = dirname(__file__)
# MODELS_DIR = DIR + '/../models/'
# DATA_DIR = DIR + '/../data/'
# data_filename = DATA_DIR + 'NLP_songs_data.zip'
# model_filename = MODELS_DIR + 'nlp_model.pkl'
# dtm_filename = MODELS_DIR + 'nlp_dtm.pkl'
# df = None
# loaded_model = None
# dtm = None
# def load_files():
# global df, loaded_model, dtm
# df = pd.read_csv(data_filename)
# loaded_model = pickle.load(open(model_filename, 'rb'))
# dtm = pickle.load(open(dtm_filename, 'rb'))
# load_files()
data_filename = r'C:\Users\temsy\Documents\GitHub\Spotifinder\data\NLP_songs_data.zip'
df = pd.read_csv(data_filename)
loaded_model = pickle.load(open(r'C:\Users\temsy\Documents\GitHub\Spotifinder\models\nlp_model.pkl', 'rb'))
dtm = pickle.load(open(r'C:\Users\temsy\Documents\GitHub\Spotifinder\models\nlp_dtm.pkl', 'rb'))
#Plotly Dash
external_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css']
app = dash.Dash(__name__, external_stylesheets=external_stylesheets, requests_pathname_prefix = '/dash/')
app.layout = html.Div([
html.Label("Artist:", style={'fontSize':30, 'textAlign':'center'}),
dcc.Dropdown(
id='Artist',
options=[{
'label': c,
'value': c}
for c in df['track_artist']],
value = df['track_artist'][0]
),
html.Label("Songs:", style={'fontSize':30, 'textAlign':'center'}),
dcc.Dropdown(id='Songs',
multi=False),
html.Label("Recommendations:", style={'fontSize':30, 'textAlign':'center'}),
html.Div(id='Recommendations')
])
@app.callback(
Output('Songs', 'options'),
[Input('Artist', 'value')]
)
def set_options(artist):
dff = df[df.track_artist == artist]
dicosongs = [{'label': c, 'value': c} for c in sorted(dff.track_name.unique())]
return dicosongs
@app.callback(
Output('Recommendations', 'dicorecs')
[Input('Songs', 'value')],
[Input('Artist', 'value')]
)
def predict(artist, song):
# if dtm is None:
# load_files()
#translate artist, song into doc dtm.iloc[x].values
artist_songs = df.loc[df['track_artist'] == artist]
selected_song = artist_songs.loc[artist_songs['track_name'] == song]
x = selected_song.index
x = x[0]
x = x.item()
doc = dtm.loc[x].values
result = loaded_model.kneighbors([doc], n_neighbors=6)
songs = []
# rec_songs = {"artist": [], "song": []};
for i in range(5):
song = result[1][0][1 + i]
# translate the loc into an artist and song title
artist = df.loc[song]['track_artist']
song = df.loc[song]['track_name']
# rec_songs['artist'].append(artist)
# rec_songs['song'].append(song)
songs.append(song)
return result[1][0]
if __name__ == '__main__':
app.run_server(debug=True)
| 29.38835
| 107
| 0.645524
|
import dash
import dash_core_components as dcc
import dash_html_components as html
import plotly.express as px
from dash.dependencies import Input, Output
import pandas as pd
import pickle
data_filename = r'C:\Users\temsy\Documents\GitHub\Spotifinder\data\NLP_songs_data.zip'
df = pd.read_csv(data_filename)
loaded_model = pickle.load(open(r'C:\Users\temsy\Documents\GitHub\Spotifinder\models\nlp_model.pkl', 'rb'))
dtm = pickle.load(open(r'C:\Users\temsy\Documents\GitHub\Spotifinder\models\nlp_dtm.pkl', 'rb'))
external_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css']
app = dash.Dash(__name__, external_stylesheets=external_stylesheets, requests_pathname_prefix = '/dash/')
app.layout = html.Div([
html.Label("Artist:", style={'fontSize':30, 'textAlign':'center'}),
dcc.Dropdown(
id='Artist',
options=[{
'label': c,
'value': c}
for c in df['track_artist']],
value = df['track_artist'][0]
),
html.Label("Songs:", style={'fontSize':30, 'textAlign':'center'}),
dcc.Dropdown(id='Songs',
multi=False),
html.Label("Recommendations:", style={'fontSize':30, 'textAlign':'center'}),
html.Div(id='Recommendations')
])
@app.callback(
Output('Songs', 'options'),
[Input('Artist', 'value')]
)
def set_options(artist):
dff = df[df.track_artist == artist]
dicosongs = [{'label': c, 'value': c} for c in sorted(dff.track_name.unique())]
return dicosongs
@app.callback(
Output('Recommendations', 'dicorecs')
[Input('Songs', 'value')],
[Input('Artist', 'value')]
)
def predict(artist, song):
artist_songs = df.loc[df['track_artist'] == artist]
selected_song = artist_songs.loc[artist_songs['track_name'] == song]
x = selected_song.index
x = x[0]
x = x.item()
doc = dtm.loc[x].values
result = loaded_model.kneighbors([doc], n_neighbors=6)
songs = []
for i in range(5):
song = result[1][0][1 + i]
artist = df.loc[song]['track_artist']
song = df.loc[song]['track_name']
songs.append(song)
return result[1][0]
if __name__ == '__main__':
app.run_server(debug=True)
| true
| true
|
f7185efe2378f4b0a00acfbd3db707f8598d6702
| 11,532
|
py
|
Python
|
github_explorer/main.py
|
michal-raska/github_explorer
|
aba3b059eaa06a78d4a0df34c2416e1f1e218d1d
|
[
"MIT"
] | null | null | null |
github_explorer/main.py
|
michal-raska/github_explorer
|
aba3b059eaa06a78d4a0df34c2416e1f1e218d1d
|
[
"MIT"
] | null | null | null |
github_explorer/main.py
|
michal-raska/github_explorer
|
aba3b059eaa06a78d4a0df34c2416e1f1e218d1d
|
[
"MIT"
] | null | null | null |
import argparse
import getpass
import os
import re
import socket
from datetime import datetime
import github
from dateutil.relativedelta import relativedelta
from github import Github
from termcolor import colored
CHANGED_FILES_PAD = 50
STATE_OPEN = 'open'
STATE_MERGED = 'merged'
STATE_CLOSED = 'closed'
class PullRequestsCounts:
AUTHORS_COUNTS_ALL_KEY = 'all'
AUTHORS_COUNTS_OPEN_KEY = 'open'
AUTHORS_COUNTS_CLOSED_KEY = 'closed'
AUTHORS_COUNTS_MERGED_KEY = 'merged'
AUTHORS_COUNTS_OFFENSIVE_KEY = 'offensive'
SUMMARY_PAD = 47
AUTHORS_PAD = 35
def __init__(self, jira_key=None):
self.__jira_key = jira_key
self.__open_requests = 0
self.__closed_requests = 0
self.__merged_requests = 0
self.__offensive_requests = 0
self.__authors = {}
def count_pull(self, pull):
author = pull.user.login
self.__ensure_author_counts(author)
self.__authors[author][self.AUTHORS_COUNTS_ALL_KEY] += 1
if pull.state == STATE_OPEN:
self.__open_requests += 1
self.__authors[author][self.AUTHORS_COUNTS_OPEN_KEY] += 1
if pull.state == STATE_CLOSED:
self.__closed_requests += 1
self.__authors[author][self.AUTHORS_COUNTS_CLOSED_KEY] += 1
if pull.merged:
self.__merged_requests += 1
self.__authors[author][self.AUTHORS_COUNTS_MERGED_KEY] += 1
if self.is_offensive(pull):
self.__offensive_requests += 1
self.__authors[author][self.AUTHORS_COUNTS_OFFENSIVE_KEY] += 1
def is_offensive(self, pull):
return self.jira_key and self.jira_key not in pull.title
def print_authors(self):
section_header('AUTHORS')
sorted_authors = sorted(self.__authors.items(), key=lambda author: author[1][self.AUTHORS_COUNTS_MERGED_KEY],
reverse=True)
for author in sorted_authors:
labeled_text(0, author[0], label_color='green')
labeled_text(1, '# merged', author[1][self.AUTHORS_COUNTS_MERGED_KEY], pad=self.AUTHORS_PAD)
labeled_text(1, '# open', author[1][self.AUTHORS_COUNTS_OPEN_KEY], pad=self.AUTHORS_PAD)
labeled_text(1, '# closed', author[1][self.AUTHORS_COUNTS_CLOSED_KEY], pad=self.AUTHORS_PAD)
labeled_text(1, '# closed w/o merge', author[1][self.AUTHORS_COUNTS_CLOSED_KEY] - author[1][self.AUTHORS_COUNTS_MERGED_KEY], pad=self.AUTHORS_PAD)
if self.jira_key:
color = self.__offensive_label_color(author[1][self.AUTHORS_COUNTS_OFFENSIVE_KEY])
labeled_text(1, '# offensive', author[1][self.AUTHORS_COUNTS_OFFENSIVE_KEY], label_color=color, pad=self.AUTHORS_PAD)
labeled_text(1, '# all', author[1][self.AUTHORS_COUNTS_ALL_KEY], pad=self.AUTHORS_PAD)
print()
section_end()
def print_summary(self):
section_header('SUMMARY')
labeled_text(0, '# merged pull requests', self.merged_requests, pad=self.SUMMARY_PAD)
labeled_text(0, '# open pull requests', self.open_requests, pad=self.SUMMARY_PAD)
labeled_text(0, '# closed pull requests', self.closed_requests, pad=self.SUMMARY_PAD)
labeled_text(0, '# closed pull requests w/o merge', self.closed_requests - self.merged_requests, pad=self.SUMMARY_PAD)
if self.jira_key:
color = self.__offensive_label_color(self.offensive_requests)
labeled_text(0, '# offensive pull requests', self.offensive_requests, label_color=color,
pad=self.SUMMARY_PAD)
labeled_text(0, '# all pull requests', self.all_requests, pad=self.SUMMARY_PAD)
section_end()
def __ensure_author_counts(self, author):
if self.__authors.get(author) is None:
self.__authors[author] = {
self.AUTHORS_COUNTS_OPEN_KEY: 0,
self.AUTHORS_COUNTS_MERGED_KEY: 0,
self.AUTHORS_COUNTS_CLOSED_KEY: 0,
self.AUTHORS_COUNTS_ALL_KEY: 0,
self.AUTHORS_COUNTS_OFFENSIVE_KEY: 0
}
@staticmethod
def __offensive_label_color(count):
if count > 0:
return 'red'
return 'blue'
@property
def jira_key(self):
return self.__jira_key
@property
def open_requests(self):
return self.__open_requests
@property
def closed_requests(self):
return self.__closed_requests
@property
def merged_requests(self):
return self.__merged_requests
@property
def all_requests(self):
return self.open_requests + self.closed_requests
@property
def offensive_requests(self):
return self.__offensive_requests
def create_args_parser():
parser = argparse.ArgumentParser()
parser.add_argument('--repo', help='Name of the repository', required=True)
parser.add_argument('--history', help='Since when to list the PRs', default='1 day')
parser.add_argument('--jira-key', help='Prefix of the JIRA Issue', default=None)
return parser
def create_github_accessor():
print('Please enter your GitHub credentials. To proceed without authentication, use empty username and password')
username = input('Username: ')
password = getpass.getpass()
if username == '' and password == '':
print(colored(
'Warning: No authentication supplied, rate limit may apply.\n',
'yellow'))
return Github()
else:
return Github(username, password)
def check_access(repo):
repo.name
def labeled_text(indent_level, label, text=None, label_color='blue', text_color='white', pad=30):
indent = indent_level * '\t'
label_text = colored("%s%s: " % (indent, label), label_color).ljust(pad)
value_text = ''
if text is not None:
value_text = colored(text, text_color)
print(label_text + value_text)
def section_header(header):
decorated_header = "# %s #" % header
colored_header = "# %s %s" % (colored(header, 'green'), colored('#', 'blue'))
header_length = len(decorated_header)
print(colored(header_length * '#', 'blue'))
print(colored(colored_header, 'blue'))
print(colored(header_length * '#', 'blue'))
print()
def section_end():
print()
print(colored(40 * '-', 'blue'))
print()
def state_colored(state):
if state == STATE_CLOSED:
return colored(STATE_CLOSED, 'yellow')
if state == STATE_OPEN:
return colored(STATE_OPEN, 'red')
if state == STATE_MERGED:
return colored(STATE_MERGED, 'green')
return state
def process_repo_details(repo):
section_header('REPO DETAILS')
labeled_text(0, 'Name', repo.name)
labeled_text(0, 'Description', repo.description)
labeled_text(0, 'Modified', repo.last_modified)
section_end()
def timedelta_from_history_arg(history_arg):
if not re.compile('\d+ (hour(s|)|day(s|)|week(s|)|month(s|)|year(s|))').match(history_arg):
notify_history_argument_invalid()
(count, unit) = history_arg.split(' ')
if int(count) == 0:
notify_history_argument_invalid()
if unit in ['hour', 'hours']:
return relativedelta(hours=int(count))
if unit in ['day', 'days']:
return relativedelta(days=int(count))
if unit in ['week', 'weeks']:
return relativedelta(weeks=int(count))
if unit in ['month', 'months']:
return relativedelta(months=int(count))
if unit in ['year', 'years']:
return relativedelta(years=int(count))
raise ValueError('History unit %s not supported' % unit)
def notify_history_argument_invalid():
print(colored(
'ERROR: \tArgument --history not valid. Valid argument contains a positive number and an unit. For example \'1 '
'month\' or \'2 days\'. Supported units are: hour, day, month, year',
'red'))
exit(1)
def process_pull_files_change(repo, pull):
changed_files = repo.get_pull(pull.number).get_files()
changed_extensions = {}
for changed_file in changed_files:
file, ext = os.path.splitext(changed_file.filename)
if changed_extensions.get(ext):
changed_extensions[ext] += 1
else:
changed_extensions[ext] = 1
for (ext, count) in changed_extensions.items():
if ext == '':
ext = 'no ext.'
labeled_text(2, '# %s files changed' % ext, count, pad=CHANGED_FILES_PAD)
def process_pull_details(repo, pull, pull_requests_counts):
labeled_text(0, pull.title, label_color='green')
if pull_requests_counts.is_offensive(pull):
labeled_text(1, 'offensive flag', 'OFFENSIVE', text_color='red')
labeled_text(1, '#', pull.number)
labeled_text(1, 'created by', pull.user.login)
labeled_text(1, 'created at', pull.created_at)
if pull.merged:
labeled_text(1, 'state', state_colored(STATE_MERGED))
else:
labeled_text(1, 'state', state_colored(pull.state))
if pull.merged:
labeled_text(1, 'merge', '')
labeled_text(2, 'by', pull.merged_by.login)
labeled_text(2, 'at', pull.merged_at)
labeled_text(2, 'after', pull.merged_at - pull.created_at)
labeled_text(1, 'files', '')
labeled_text(2, '# changed', pull.changed_files, pad=CHANGED_FILES_PAD)
process_pull_files_change(repo, pull)
print()
def process_pulls_details(pulls, pull_requests_counts, print_header=True, print_section_end=True):
if print_header:
section_header('PULL REQUESTS')
requested_history = timedelta_from_history_arg(args.history)
for pull in pulls:
merged_before_requested_frame = pull.merged and pull.merged_at < datetime.now() - requested_history
created_before_requested_frame = pull.created_at < datetime.now() - requested_history
if merged_before_requested_frame or created_before_requested_frame:
break
process_pull_details(repo, pull, pull_requests_counts)
pull_requests_counts.count_pull(pull)
if print_section_end:
section_end()
return pull_requests_counts
if __name__ == '__main__':
args = create_args_parser().parse_args()
if args.jira_key is None:
print(colored(
'Warning: Jira key not set, offensive commits will not be marked. JIRA issue key can be set with the --jira-key <KEY> switch.\n',
'yellow'))
try:
github_accessor = create_github_accessor()
repo = github_accessor.get_repo(args.repo)
check_access(repo)
process_repo_details(repo)
pulls = repo.get_pulls(state=STATE_OPEN)
pull_requests_counts = PullRequestsCounts(args.jira_key)
process_pulls_details(pulls, pull_requests_counts, print_section_end=False)
pulls = repo.get_pulls(state=STATE_CLOSED)
process_pulls_details(pulls, pull_requests_counts, print_header=False)
pull_requests_counts.print_authors()
pull_requests_counts.print_summary()
except github.BadCredentialsException:
print(colored('ERROR: Invalid credentials.', 'red'))
exit(1)
except github.UnknownObjectException:
print(colored('ERROR: Cannot find repo %s.' % args.repo, 'red'))
exit(1)
except github.RateLimitExceededException:
print(colored('ERROR: Rate limit exceeded. Please authenticate.', 'red'))
exit(1)
except (socket.timeout, socket.gaierror):
print(colored('ERROR: Cannot reach Github. Please check your Internet connection.', 'red'))
| 36.0375
| 159
| 0.667881
|
import argparse
import getpass
import os
import re
import socket
from datetime import datetime
import github
from dateutil.relativedelta import relativedelta
from github import Github
from termcolor import colored
CHANGED_FILES_PAD = 50
STATE_OPEN = 'open'
STATE_MERGED = 'merged'
STATE_CLOSED = 'closed'
class PullRequestsCounts:
AUTHORS_COUNTS_ALL_KEY = 'all'
AUTHORS_COUNTS_OPEN_KEY = 'open'
AUTHORS_COUNTS_CLOSED_KEY = 'closed'
AUTHORS_COUNTS_MERGED_KEY = 'merged'
AUTHORS_COUNTS_OFFENSIVE_KEY = 'offensive'
SUMMARY_PAD = 47
AUTHORS_PAD = 35
def __init__(self, jira_key=None):
self.__jira_key = jira_key
self.__open_requests = 0
self.__closed_requests = 0
self.__merged_requests = 0
self.__offensive_requests = 0
self.__authors = {}
def count_pull(self, pull):
author = pull.user.login
self.__ensure_author_counts(author)
self.__authors[author][self.AUTHORS_COUNTS_ALL_KEY] += 1
if pull.state == STATE_OPEN:
self.__open_requests += 1
self.__authors[author][self.AUTHORS_COUNTS_OPEN_KEY] += 1
if pull.state == STATE_CLOSED:
self.__closed_requests += 1
self.__authors[author][self.AUTHORS_COUNTS_CLOSED_KEY] += 1
if pull.merged:
self.__merged_requests += 1
self.__authors[author][self.AUTHORS_COUNTS_MERGED_KEY] += 1
if self.is_offensive(pull):
self.__offensive_requests += 1
self.__authors[author][self.AUTHORS_COUNTS_OFFENSIVE_KEY] += 1
def is_offensive(self, pull):
return self.jira_key and self.jira_key not in pull.title
def print_authors(self):
section_header('AUTHORS')
sorted_authors = sorted(self.__authors.items(), key=lambda author: author[1][self.AUTHORS_COUNTS_MERGED_KEY],
reverse=True)
for author in sorted_authors:
labeled_text(0, author[0], label_color='green')
labeled_text(1, '# merged', author[1][self.AUTHORS_COUNTS_MERGED_KEY], pad=self.AUTHORS_PAD)
labeled_text(1, '# open', author[1][self.AUTHORS_COUNTS_OPEN_KEY], pad=self.AUTHORS_PAD)
labeled_text(1, '# closed', author[1][self.AUTHORS_COUNTS_CLOSED_KEY], pad=self.AUTHORS_PAD)
labeled_text(1, '# closed w/o merge', author[1][self.AUTHORS_COUNTS_CLOSED_KEY] - author[1][self.AUTHORS_COUNTS_MERGED_KEY], pad=self.AUTHORS_PAD)
if self.jira_key:
color = self.__offensive_label_color(author[1][self.AUTHORS_COUNTS_OFFENSIVE_KEY])
labeled_text(1, '# offensive', author[1][self.AUTHORS_COUNTS_OFFENSIVE_KEY], label_color=color, pad=self.AUTHORS_PAD)
labeled_text(1, '# all', author[1][self.AUTHORS_COUNTS_ALL_KEY], pad=self.AUTHORS_PAD)
print()
section_end()
def print_summary(self):
section_header('SUMMARY')
labeled_text(0, '# merged pull requests', self.merged_requests, pad=self.SUMMARY_PAD)
labeled_text(0, '# open pull requests', self.open_requests, pad=self.SUMMARY_PAD)
labeled_text(0, '# closed pull requests', self.closed_requests, pad=self.SUMMARY_PAD)
labeled_text(0, '# closed pull requests w/o merge', self.closed_requests - self.merged_requests, pad=self.SUMMARY_PAD)
if self.jira_key:
color = self.__offensive_label_color(self.offensive_requests)
labeled_text(0, '# offensive pull requests', self.offensive_requests, label_color=color,
pad=self.SUMMARY_PAD)
labeled_text(0, '# all pull requests', self.all_requests, pad=self.SUMMARY_PAD)
section_end()
def __ensure_author_counts(self, author):
if self.__authors.get(author) is None:
self.__authors[author] = {
self.AUTHORS_COUNTS_OPEN_KEY: 0,
self.AUTHORS_COUNTS_MERGED_KEY: 0,
self.AUTHORS_COUNTS_CLOSED_KEY: 0,
self.AUTHORS_COUNTS_ALL_KEY: 0,
self.AUTHORS_COUNTS_OFFENSIVE_KEY: 0
}
@staticmethod
def __offensive_label_color(count):
if count > 0:
return 'red'
return 'blue'
@property
def jira_key(self):
return self.__jira_key
@property
def open_requests(self):
return self.__open_requests
@property
def closed_requests(self):
return self.__closed_requests
@property
def merged_requests(self):
return self.__merged_requests
@property
def all_requests(self):
return self.open_requests + self.closed_requests
@property
def offensive_requests(self):
return self.__offensive_requests
def create_args_parser():
parser = argparse.ArgumentParser()
parser.add_argument('--repo', help='Name of the repository', required=True)
parser.add_argument('--history', help='Since when to list the PRs', default='1 day')
parser.add_argument('--jira-key', help='Prefix of the JIRA Issue', default=None)
return parser
def create_github_accessor():
print('Please enter your GitHub credentials. To proceed without authentication, use empty username and password')
username = input('Username: ')
password = getpass.getpass()
if username == '' and password == '':
print(colored(
'Warning: No authentication supplied, rate limit may apply.\n',
'yellow'))
return Github()
else:
return Github(username, password)
def check_access(repo):
repo.name
def labeled_text(indent_level, label, text=None, label_color='blue', text_color='white', pad=30):
indent = indent_level * '\t'
label_text = colored("%s%s: " % (indent, label), label_color).ljust(pad)
value_text = ''
if text is not None:
value_text = colored(text, text_color)
print(label_text + value_text)
def section_header(header):
decorated_header = "# %s #" % header
colored_header = "# %s %s" % (colored(header, 'green'), colored('#', 'blue'))
header_length = len(decorated_header)
print(colored(header_length * '#', 'blue'))
print(colored(colored_header, 'blue'))
print(colored(header_length * '#', 'blue'))
print()
def section_end():
print()
print(colored(40 * '-', 'blue'))
print()
def state_colored(state):
if state == STATE_CLOSED:
return colored(STATE_CLOSED, 'yellow')
if state == STATE_OPEN:
return colored(STATE_OPEN, 'red')
if state == STATE_MERGED:
return colored(STATE_MERGED, 'green')
return state
def process_repo_details(repo):
section_header('REPO DETAILS')
labeled_text(0, 'Name', repo.name)
labeled_text(0, 'Description', repo.description)
labeled_text(0, 'Modified', repo.last_modified)
section_end()
def timedelta_from_history_arg(history_arg):
if not re.compile('\d+ (hour(s|)|day(s|)|week(s|)|month(s|)|year(s|))').match(history_arg):
notify_history_argument_invalid()
(count, unit) = history_arg.split(' ')
if int(count) == 0:
notify_history_argument_invalid()
if unit in ['hour', 'hours']:
return relativedelta(hours=int(count))
if unit in ['day', 'days']:
return relativedelta(days=int(count))
if unit in ['week', 'weeks']:
return relativedelta(weeks=int(count))
if unit in ['month', 'months']:
return relativedelta(months=int(count))
if unit in ['year', 'years']:
return relativedelta(years=int(count))
raise ValueError('History unit %s not supported' % unit)
def notify_history_argument_invalid():
print(colored(
'ERROR: \tArgument --history not valid. Valid argument contains a positive number and an unit. For example \'1 '
'month\' or \'2 days\'. Supported units are: hour, day, month, year',
'red'))
exit(1)
def process_pull_files_change(repo, pull):
changed_files = repo.get_pull(pull.number).get_files()
changed_extensions = {}
for changed_file in changed_files:
file, ext = os.path.splitext(changed_file.filename)
if changed_extensions.get(ext):
changed_extensions[ext] += 1
else:
changed_extensions[ext] = 1
for (ext, count) in changed_extensions.items():
if ext == '':
ext = 'no ext.'
labeled_text(2, '# %s files changed' % ext, count, pad=CHANGED_FILES_PAD)
def process_pull_details(repo, pull, pull_requests_counts):
labeled_text(0, pull.title, label_color='green')
if pull_requests_counts.is_offensive(pull):
labeled_text(1, 'offensive flag', 'OFFENSIVE', text_color='red')
labeled_text(1, '#', pull.number)
labeled_text(1, 'created by', pull.user.login)
labeled_text(1, 'created at', pull.created_at)
if pull.merged:
labeled_text(1, 'state', state_colored(STATE_MERGED))
else:
labeled_text(1, 'state', state_colored(pull.state))
if pull.merged:
labeled_text(1, 'merge', '')
labeled_text(2, 'by', pull.merged_by.login)
labeled_text(2, 'at', pull.merged_at)
labeled_text(2, 'after', pull.merged_at - pull.created_at)
labeled_text(1, 'files', '')
labeled_text(2, '# changed', pull.changed_files, pad=CHANGED_FILES_PAD)
process_pull_files_change(repo, pull)
print()
def process_pulls_details(pulls, pull_requests_counts, print_header=True, print_section_end=True):
if print_header:
section_header('PULL REQUESTS')
requested_history = timedelta_from_history_arg(args.history)
for pull in pulls:
merged_before_requested_frame = pull.merged and pull.merged_at < datetime.now() - requested_history
created_before_requested_frame = pull.created_at < datetime.now() - requested_history
if merged_before_requested_frame or created_before_requested_frame:
break
process_pull_details(repo, pull, pull_requests_counts)
pull_requests_counts.count_pull(pull)
if print_section_end:
section_end()
return pull_requests_counts
if __name__ == '__main__':
args = create_args_parser().parse_args()
if args.jira_key is None:
print(colored(
'Warning: Jira key not set, offensive commits will not be marked. JIRA issue key can be set with the --jira-key <KEY> switch.\n',
'yellow'))
try:
github_accessor = create_github_accessor()
repo = github_accessor.get_repo(args.repo)
check_access(repo)
process_repo_details(repo)
pulls = repo.get_pulls(state=STATE_OPEN)
pull_requests_counts = PullRequestsCounts(args.jira_key)
process_pulls_details(pulls, pull_requests_counts, print_section_end=False)
pulls = repo.get_pulls(state=STATE_CLOSED)
process_pulls_details(pulls, pull_requests_counts, print_header=False)
pull_requests_counts.print_authors()
pull_requests_counts.print_summary()
except github.BadCredentialsException:
print(colored('ERROR: Invalid credentials.', 'red'))
exit(1)
except github.UnknownObjectException:
print(colored('ERROR: Cannot find repo %s.' % args.repo, 'red'))
exit(1)
except github.RateLimitExceededException:
print(colored('ERROR: Rate limit exceeded. Please authenticate.', 'red'))
exit(1)
except (socket.timeout, socket.gaierror):
print(colored('ERROR: Cannot reach Github. Please check your Internet connection.', 'red'))
| true
| true
|
f71860515aa0c48a7527206271305a67a617026e
| 5,375
|
py
|
Python
|
entropylab/instruments/tests/test_qcodes_dummy.py
|
IgorQM/entropy
|
8cbd3da356d8196e89eb9d810e643c80d6608481
|
[
"BSD-3-Clause"
] | null | null | null |
entropylab/instruments/tests/test_qcodes_dummy.py
|
IgorQM/entropy
|
8cbd3da356d8196e89eb9d810e643c80d6608481
|
[
"BSD-3-Clause"
] | null | null | null |
entropylab/instruments/tests/test_qcodes_dummy.py
|
IgorQM/entropy
|
8cbd3da356d8196e89eb9d810e643c80d6608481
|
[
"BSD-3-Clause"
] | null | null | null |
from typing import Optional, Dict, Any
import pytest
@pytest.mark.skip()
def test_qcodes_dummy():
from qcodes.instrument.base import InstrumentBase as qcodes_InstrumentBase
from entropylab.instruments.qcodes_adapter import QcodesAdapter
class MockQcodesDriver(qcodes_InstrumentBase):
def __init__(
self, name: str, metadata: Optional[Dict[Any, Any]] = None
) -> None:
super().__init__(name, metadata)
self.add_parameter("p")
setter = lambda val: print(val)
getter = lambda: 1
self.add_parameter("s", set_cmd=self.setter, get_cmd=self.getter)
self.add_parameter("g", set_cmd=setter, get_cmd=getter)
def setter(self, val):
print(val)
self.s = val
def getter(self):
return self.s
def free_function(self):
print("i'm free")
class QcodesDummy(QcodesAdapter):
def __init__(self):
super().__init__(MockQcodesDriver, "QcodesDummy")
def revert_to_snapshot(self, snapshot: str):
pass
dummy = QcodesDummy()
print(dummy)
dummy.connect()
instance = dummy.get_instance()
instance.set("s", "printed")
instance.free_function()
instance.set("g", "g")
assert instance.get("s") == "printed"
assert instance.get("g") == 1
dummy.teardown()
@pytest.mark.skip()
def test_qcodes_dummy_object():
# Importing in test so general pytest discovery wont enforce qcodes installation
from qcodes.instrument.base import InstrumentBase as qcodes_InstrumentBase
from entropylab.instruments.qcodes_adapter import QcodesAdapter
class MockQcodesDriver(qcodes_InstrumentBase):
def __init__(
self, name: str, metadata: Optional[Dict[Any, Any]] = None
) -> None:
super().__init__(name, metadata)
self.add_parameter("p")
setter = lambda val: print(val)
getter = lambda: 1
self.add_parameter("s", set_cmd=self.setter, get_cmd=self.getter)
self.add_parameter("g", set_cmd=setter, get_cmd=getter)
def setter(self, val):
print(val)
self.s = val
def getter(self):
return self.s
def free_function(self):
print("i'm free")
dummy = QcodesAdapter(MockQcodesDriver, "dummy_inst")
dummy.connect()
instance = dummy.get_instance()
instance.set("s", "printed")
instance.free_function()
instance.set("g", "g")
assert instance.get("s") == "printed"
assert instance.get("g") == 1
dummy.teardown()
@pytest.mark.skip()
def test_qcodes_dummy_object_dynamic_spec():
# Importing in test so general pytest discovery wont enforce qcodes installation
from qcodes.instrument.base import InstrumentBase as qcodes_InstrumentBase
from entropylab.instruments.qcodes_adapter import QcodesAdapter
class MockQcodesDriver(qcodes_InstrumentBase):
def __init__(
self, name: str, metadata: Optional[Dict[Any, Any]] = None
) -> None:
super().__init__(name, metadata)
self.add_parameter("p")
setter = lambda val: print(val)
getter = lambda: 1
self.add_parameter("s", set_cmd=self.setter, get_cmd=self.getter)
self.add_parameter("g", set_cmd=setter, get_cmd=getter)
def setter(self, val):
print(val)
self.s = val
def getter(self):
return self.s
def free_function(self):
print("i'm free")
dummy = QcodesAdapter(MockQcodesDriver, "dummy_inst")
driver_spec = dummy.get_dynamic_driver_specs()
print(driver_spec)
assert len(driver_spec.parameters) == 3
assert driver_spec.parameters[0].name == "p"
assert driver_spec.parameters[1].name == "s"
assert driver_spec.parameters[2].name == "g"
assert len(driver_spec.functions) == 0
assert len(driver_spec.undeclared_functions) == 3
assert driver_spec.undeclared_functions[0].name == "free_function"
@pytest.mark.skip()
def test_qcodes_dummy_snapshot():
# Importing in test so general pytest discovery wont enforce qcodes installation
from qcodes.instrument.base import InstrumentBase as qcodes_InstrumentBase
from entropylab.instruments.qcodes_adapter import QcodesAdapter
class MockQcodesDriver(qcodes_InstrumentBase):
def __init__(
self, name: str, metadata: Optional[Dict[Any, Any]] = None
) -> None:
super().__init__(name, metadata)
self.add_parameter("p")
setter = lambda val: print(val)
getter = lambda: 1
self.add_parameter("s", set_cmd=self.setter, get_cmd=self.getter)
self.add_parameter("g", set_cmd=setter, get_cmd=getter)
def setter(self, val):
print(val)
self.s = val
def getter(self):
return self.s
def free_function(self):
print("i'm free")
dummy = QcodesAdapter(MockQcodesDriver, "dummy_inst")
dummy.connect()
snapshot = dummy.snapshot(True)
print(snapshot)
assert len(snapshot) > 0
| 33.59375
| 85
| 0.616744
|
from typing import Optional, Dict, Any
import pytest
@pytest.mark.skip()
def test_qcodes_dummy():
from qcodes.instrument.base import InstrumentBase as qcodes_InstrumentBase
from entropylab.instruments.qcodes_adapter import QcodesAdapter
class MockQcodesDriver(qcodes_InstrumentBase):
def __init__(
self, name: str, metadata: Optional[Dict[Any, Any]] = None
) -> None:
super().__init__(name, metadata)
self.add_parameter("p")
setter = lambda val: print(val)
getter = lambda: 1
self.add_parameter("s", set_cmd=self.setter, get_cmd=self.getter)
self.add_parameter("g", set_cmd=setter, get_cmd=getter)
def setter(self, val):
print(val)
self.s = val
def getter(self):
return self.s
def free_function(self):
print("i'm free")
class QcodesDummy(QcodesAdapter):
def __init__(self):
super().__init__(MockQcodesDriver, "QcodesDummy")
def revert_to_snapshot(self, snapshot: str):
pass
dummy = QcodesDummy()
print(dummy)
dummy.connect()
instance = dummy.get_instance()
instance.set("s", "printed")
instance.free_function()
instance.set("g", "g")
assert instance.get("s") == "printed"
assert instance.get("g") == 1
dummy.teardown()
@pytest.mark.skip()
def test_qcodes_dummy_object():
# Importing in test so general pytest discovery wont enforce qcodes installation
from qcodes.instrument.base import InstrumentBase as qcodes_InstrumentBase
from entropylab.instruments.qcodes_adapter import QcodesAdapter
class MockQcodesDriver(qcodes_InstrumentBase):
def __init__(
self, name: str, metadata: Optional[Dict[Any, Any]] = None
) -> None:
super().__init__(name, metadata)
self.add_parameter("p")
setter = lambda val: print(val)
getter = lambda: 1
self.add_parameter("s", set_cmd=self.setter, get_cmd=self.getter)
self.add_parameter("g", set_cmd=setter, get_cmd=getter)
def setter(self, val):
print(val)
self.s = val
def getter(self):
return self.s
def free_function(self):
print("i'm free")
dummy = QcodesAdapter(MockQcodesDriver, "dummy_inst")
dummy.connect()
instance = dummy.get_instance()
instance.set("s", "printed")
instance.free_function()
instance.set("g", "g")
assert instance.get("s") == "printed"
assert instance.get("g") == 1
dummy.teardown()
@pytest.mark.skip()
def test_qcodes_dummy_object_dynamic_spec():
from qcodes.instrument.base import InstrumentBase as qcodes_InstrumentBase
from entropylab.instruments.qcodes_adapter import QcodesAdapter
class MockQcodesDriver(qcodes_InstrumentBase):
def __init__(
self, name: str, metadata: Optional[Dict[Any, Any]] = None
) -> None:
super().__init__(name, metadata)
self.add_parameter("p")
setter = lambda val: print(val)
getter = lambda: 1
self.add_parameter("s", set_cmd=self.setter, get_cmd=self.getter)
self.add_parameter("g", set_cmd=setter, get_cmd=getter)
def setter(self, val):
print(val)
self.s = val
def getter(self):
return self.s
def free_function(self):
print("i'm free")
dummy = QcodesAdapter(MockQcodesDriver, "dummy_inst")
driver_spec = dummy.get_dynamic_driver_specs()
print(driver_spec)
assert len(driver_spec.parameters) == 3
assert driver_spec.parameters[0].name == "p"
assert driver_spec.parameters[1].name == "s"
assert driver_spec.parameters[2].name == "g"
assert len(driver_spec.functions) == 0
assert len(driver_spec.undeclared_functions) == 3
assert driver_spec.undeclared_functions[0].name == "free_function"
@pytest.mark.skip()
def test_qcodes_dummy_snapshot():
# Importing in test so general pytest discovery wont enforce qcodes installation
from qcodes.instrument.base import InstrumentBase as qcodes_InstrumentBase
from entropylab.instruments.qcodes_adapter import QcodesAdapter
class MockQcodesDriver(qcodes_InstrumentBase):
def __init__(
self, name: str, metadata: Optional[Dict[Any, Any]] = None
) -> None:
super().__init__(name, metadata)
self.add_parameter("p")
setter = lambda val: print(val)
getter = lambda: 1
self.add_parameter("s", set_cmd=self.setter, get_cmd=self.getter)
self.add_parameter("g", set_cmd=setter, get_cmd=getter)
def setter(self, val):
print(val)
self.s = val
def getter(self):
return self.s
def free_function(self):
print("i'm free")
dummy = QcodesAdapter(MockQcodesDriver, "dummy_inst")
dummy.connect()
snapshot = dummy.snapshot(True)
print(snapshot)
assert len(snapshot) > 0
| true
| true
|
f71860d421e7ec3d53fd94f7266b4caa0c5935a1
| 813
|
py
|
Python
|
setup.py
|
iheartradio/all2vec
|
1070655dc2b7df719ac8641616ab2a10b964d956
|
[
"Apache-2.0"
] | 10
|
2016-08-11T20:25:45.000Z
|
2017-05-04T14:10:19.000Z
|
setup.py
|
iheartradio/all2vec
|
1070655dc2b7df719ac8641616ab2a10b964d956
|
[
"Apache-2.0"
] | 11
|
2016-08-11T20:02:46.000Z
|
2018-06-18T18:31:11.000Z
|
setup.py
|
iheartradio/all2vec
|
1070655dc2b7df719ac8641616ab2a10b964d956
|
[
"Apache-2.0"
] | 10
|
2016-08-11T19:45:17.000Z
|
2019-04-24T22:07:30.000Z
|
from setuptools import setup, find_packages
setup(
name='all2vec',
version='0.6.0',
author='Ravi Mody, Jon Banafato',
author_email='datascience@iheartmedia.com',
description='Store and compare high dimensional vectors',
packages=find_packages(exclude=['tests']),
zip_safe=True,
install_requires=[
'annoy==1.8.3'
, 'boto3>=1.4'
, 'dill>=0.2'
, 'numpy>=1.12'
],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Natural Language :: English',
'Operating System :: POSIX',
'Programming Language :: Python',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3 :: Only',
]
)
| 27.1
| 61
| 0.583026
|
from setuptools import setup, find_packages
setup(
name='all2vec',
version='0.6.0',
author='Ravi Mody, Jon Banafato',
author_email='datascience@iheartmedia.com',
description='Store and compare high dimensional vectors',
packages=find_packages(exclude=['tests']),
zip_safe=True,
install_requires=[
'annoy==1.8.3'
, 'boto3>=1.4'
, 'dill>=0.2'
, 'numpy>=1.12'
],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Natural Language :: English',
'Operating System :: POSIX',
'Programming Language :: Python',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3 :: Only',
]
)
| true
| true
|
f71861b294a65dddd71d9bdc515017b5fb0cd7fc
| 738
|
py
|
Python
|
kornia/__init__.py
|
Ishticode/kornia
|
974abb43ec72d12dbd244a2fb247bbbab8498de0
|
[
"ECL-2.0",
"Apache-2.0"
] | 1
|
2022-03-15T02:24:30.000Z
|
2022-03-15T02:24:30.000Z
|
kornia/__init__.py
|
Ishticode/kornia
|
974abb43ec72d12dbd244a2fb247bbbab8498de0
|
[
"ECL-2.0",
"Apache-2.0"
] | 14
|
2021-09-26T11:07:56.000Z
|
2022-03-20T11:11:15.000Z
|
kornia/__init__.py
|
Ishticode/kornia
|
974abb43ec72d12dbd244a2fb247bbbab8498de0
|
[
"ECL-2.0",
"Apache-2.0"
] | 1
|
2020-08-12T16:34:06.000Z
|
2020-08-12T16:34:06.000Z
|
# import the version variable
from ._version import __version__
# NOTE: kornia filters and geometry must go first since are the core of the library
# and by changing the import order you might get into a circular dependencies issue.
from . import filters
from . import geometry
# import the other modules for convenience
from . import (
augmentation,
color,
contrib,
enhance,
feature,
losses,
metrics,
morphology,
tracking,
utils,
x,
)
# NOTE: we are going to expose to top level very few things
from kornia.constants import pi
from kornia.testing import xla_is_available
from kornia.utils import (
eye_like,
vec_like,
create_meshgrid,
image_to_tensor,
tensor_to_image,
)
| 22.363636
| 84
| 0.724932
|
from ._version import __version__
from . import filters
from . import geometry
from . import (
augmentation,
color,
contrib,
enhance,
feature,
losses,
metrics,
morphology,
tracking,
utils,
x,
)
from kornia.constants import pi
from kornia.testing import xla_is_available
from kornia.utils import (
eye_like,
vec_like,
create_meshgrid,
image_to_tensor,
tensor_to_image,
)
| true
| true
|
f71862fe1c337e03b7bd761bd77a93e15fb437ca
| 3,490
|
py
|
Python
|
plugins/powershell/komand_powershell/actions/powershell_string/action.py
|
lukaszlaszuk/insightconnect-plugins
|
8c6ce323bfbb12c55f8b5a9c08975d25eb9f8892
|
[
"MIT"
] | 46
|
2019-06-05T20:47:58.000Z
|
2022-03-29T10:18:01.000Z
|
plugins/powershell/komand_powershell/actions/powershell_string/action.py
|
lukaszlaszuk/insightconnect-plugins
|
8c6ce323bfbb12c55f8b5a9c08975d25eb9f8892
|
[
"MIT"
] | 386
|
2019-06-07T20:20:39.000Z
|
2022-03-30T17:35:01.000Z
|
plugins/powershell/komand_powershell/actions/powershell_string/action.py
|
lukaszlaszuk/insightconnect-plugins
|
8c6ce323bfbb12c55f8b5a9c08975d25eb9f8892
|
[
"MIT"
] | 43
|
2019-07-09T14:13:58.000Z
|
2022-03-28T12:04:46.000Z
|
import komand
from .schema import PowershellStringInput, PowershellStringOutput
# Custom imports below
from komand_powershell.util import util
class PowershellString(komand.Action):
def __init__(self):
super(self.__class__, self).__init__(
name="powershell_string",
description="Execute PowerShell script on a remote host or locally",
input=PowershellStringInput(),
output=PowershellStringOutput(),
)
def run(self, params={}):
auth = self.connection.auth_type
host_ip = params.get("address")
powershell_script = params.get("script")
username = self.connection.username
password = self.connection.password
port = self.connection.port
# Set variables for Kerberos
host_name = params.get("host_name")
kdc = self.connection.kdc
domain = self.connection.domain
self.logger.debug(powershell_script)
# This will run PowerShell on the linux VM
if auth == "None" or not host_ip:
data = util.local(action=self, powershell_script=powershell_script)
output = data.get("output")
stderr = data.get("stderr")
if output:
output = self.safe_encode(output)
if stderr:
stderr = self.safe_encode(stderr)
return {"stdout": output, "stderr": stderr}
# This code will run a PowerShell script with a NTLM connection
if auth == "NTLM":
data = util.ntlm(
action=self,
host_ip=host_ip,
powershell_script=powershell_script,
username=username,
password=password,
port=port,
)
output = data.get("output")
stderr = data.get("stderr")
if output:
output = self.safe_encode(output)
if stderr:
stderr = self.safe_encode(stderr)
return {"stdout": output, "stderr": stderr}
# This code will run a PowerShell script with a Kerberos account
if auth == "Kerberos":
data = util.kerberos(
action=self,
host_ip=host_ip,
kdc=kdc,
domain=domain,
host_name=host_name,
powershell_script=powershell_script,
password=password,
username=username,
port=port,
)
output = data.get("output")
stderr = data.get("stderr")
if output:
output = self.safe_encode(output)
if stderr:
stderr = self.safe_encode(stderr)
return {"stdout": output, "stderr": stderr}
if auth == "CredSSP":
data = util.credssp(
action=self,
host_ip=host_ip,
powershell_script=powershell_script,
username=username,
password=password,
port=port,
)
output = data.get("output")
stderr = data.get("stderr")
if output:
output = self.safe_encode(output)
if stderr:
stderr = self.safe_encode(stderr)
return {"stdout": output, "stderr": stderr}
def safe_encode(self, in_byte):
new_string = str(in_byte)
return in_byte.replace("\u0000", "")
| 31.441441
| 80
| 0.540974
|
import komand
from .schema import PowershellStringInput, PowershellStringOutput
from komand_powershell.util import util
class PowershellString(komand.Action):
def __init__(self):
super(self.__class__, self).__init__(
name="powershell_string",
description="Execute PowerShell script on a remote host or locally",
input=PowershellStringInput(),
output=PowershellStringOutput(),
)
def run(self, params={}):
auth = self.connection.auth_type
host_ip = params.get("address")
powershell_script = params.get("script")
username = self.connection.username
password = self.connection.password
port = self.connection.port
host_name = params.get("host_name")
kdc = self.connection.kdc
domain = self.connection.domain
self.logger.debug(powershell_script)
if auth == "None" or not host_ip:
data = util.local(action=self, powershell_script=powershell_script)
output = data.get("output")
stderr = data.get("stderr")
if output:
output = self.safe_encode(output)
if stderr:
stderr = self.safe_encode(stderr)
return {"stdout": output, "stderr": stderr}
if auth == "NTLM":
data = util.ntlm(
action=self,
host_ip=host_ip,
powershell_script=powershell_script,
username=username,
password=password,
port=port,
)
output = data.get("output")
stderr = data.get("stderr")
if output:
output = self.safe_encode(output)
if stderr:
stderr = self.safe_encode(stderr)
return {"stdout": output, "stderr": stderr}
if auth == "Kerberos":
data = util.kerberos(
action=self,
host_ip=host_ip,
kdc=kdc,
domain=domain,
host_name=host_name,
powershell_script=powershell_script,
password=password,
username=username,
port=port,
)
output = data.get("output")
stderr = data.get("stderr")
if output:
output = self.safe_encode(output)
if stderr:
stderr = self.safe_encode(stderr)
return {"stdout": output, "stderr": stderr}
if auth == "CredSSP":
data = util.credssp(
action=self,
host_ip=host_ip,
powershell_script=powershell_script,
username=username,
password=password,
port=port,
)
output = data.get("output")
stderr = data.get("stderr")
if output:
output = self.safe_encode(output)
if stderr:
stderr = self.safe_encode(stderr)
return {"stdout": output, "stderr": stderr}
def safe_encode(self, in_byte):
new_string = str(in_byte)
return in_byte.replace("\u0000", "")
| true
| true
|
f718636819d7daacb6f2782f278cee37154f4006
| 38,114
|
py
|
Python
|
flink-python/pyflink/datastream/state_backend.py
|
waychan23/flink
|
f4e2473f2a1a65b93537f2b03867683c35da85e1
|
[
"Apache-2.0"
] | 2
|
2019-10-22T08:20:29.000Z
|
2019-10-22T08:20:31.000Z
|
flink-python/pyflink/datastream/state_backend.py
|
waychan23/flink
|
f4e2473f2a1a65b93537f2b03867683c35da85e1
|
[
"Apache-2.0"
] | 1
|
2020-05-19T08:20:26.000Z
|
2020-05-19T08:20:26.000Z
|
flink-python/pyflink/datastream/state_backend.py
|
waychan23/flink
|
f4e2473f2a1a65b93537f2b03867683c35da85e1
|
[
"Apache-2.0"
] | 1
|
2019-11-09T00:45:46.000Z
|
2019-11-09T00:45:46.000Z
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
from abc import ABCMeta
from py4j.java_gateway import get_java_class
from pyflink.java_gateway import get_gateway
from pyflink.util.utils import load_java_class
__all__ = [
'StateBackend',
'MemoryStateBackend',
'FsStateBackend',
'RocksDBStateBackend',
'CustomStateBackend',
'PredefinedOptions']
def _from_j_state_backend(j_state_backend):
if j_state_backend is None:
return None
gateway = get_gateway()
JStateBackend = gateway.jvm.org.apache.flink.runtime.state.StateBackend
JMemoryStateBackend = gateway.jvm.org.apache.flink.runtime.state.memory.MemoryStateBackend
JFsStateBackend = gateway.jvm.org.apache.flink.runtime.state.filesystem.FsStateBackend
JRocksDBStateBackend = gateway.jvm.org.apache.flink.contrib.streaming.state.RocksDBStateBackend
j_clz = j_state_backend.getClass()
if not get_java_class(JStateBackend).isAssignableFrom(j_clz):
raise TypeError("The input %s is not an instance of StateBackend." % j_state_backend)
if get_java_class(JMemoryStateBackend).isAssignableFrom(j_state_backend.getClass()):
return MemoryStateBackend(j_memory_state_backend=j_state_backend)
elif get_java_class(JFsStateBackend).isAssignableFrom(j_state_backend.getClass()):
return FsStateBackend(j_fs_state_backend=j_state_backend)
elif get_java_class(JRocksDBStateBackend).isAssignableFrom(j_state_backend.getClass()):
return RocksDBStateBackend(j_rocks_db_state_backend=j_state_backend)
else:
return CustomStateBackend(j_state_backend) # users' customized state backend
class StateBackend(object):
"""
A **State Backend** defines how the state of a streaming application is stored and
checkpointed. Different State Backends store their state in different fashions, and use
different data structures to hold the state of a running application.
For example, the :class:`MemoryStateBackend` keeps working state in the memory of the
TaskManager and stores checkpoints in the memory of the JobManager. The backend is
lightweight and without additional dependencies, but not highly available and supports only
small state.
The :class:`FsStateBackend` keeps working state in the memory of the TaskManager and stores
state checkpoints in a filesystem(typically a replicated highly-available filesystem,
like `HDFS <https://hadoop.apache.org/>`_, `Ceph <https://ceph.com/>`_,
`S3 <https://aws.amazon.com/documentation/s3/>`_, `GCS <https://cloud.google.com/storage/>`_,
etc).
The :class:`RocksDBStateBackend` stores working state in `RocksDB <http://rocksdb.org/>`_,
and checkpoints the state by default to a filesystem (similar to the :class:`FsStateBackend`).
**Raw Bytes Storage and Backends**
The :class:`StateBackend` creates services for *raw bytes storage* and for *keyed state*
and *operator state*.
The *raw bytes storage* (through the `org.apache.flink.runtime.state.CheckpointStreamFactory`)
is the fundamental service that simply stores bytes in a fault tolerant fashion. This service
is used by the JobManager to store checkpoint and recovery metadata and is typically also used
by the keyed- and operator state backends to store checkpointed state.
The `org.apache.flink.runtime.state.AbstractKeyedStateBackend and
`org.apache.flink.runtime.state.OperatorStateBackend` created by this state backend define how
to hold the working state for keys and operators. They also define how to checkpoint that
state, frequently using the raw bytes storage (via the
`org.apache.flink.runtime.state.CheckpointStreamFactory`). However, it is also possible that
for example a keyed state backend simply implements the bridge to a key/value store, and that
it does not need to store anything in the raw byte storage upon a checkpoint.
**Serializability**
State Backends need to be serializable(`java.io.Serializable`), because they distributed
across parallel processes (for distributed execution) together with the streaming application
code.
Because of that, :class:`StateBackend` implementations are meant to be like *factories* that
create the proper states stores that provide access to the persistent storage and hold the
keyed- and operator state data structures. That way, the State Backend can be very lightweight
(contain only configurations) which makes it easier to be serializable.
**Thread Safety**
State backend implementations have to be thread-safe. Multiple threads may be creating
streams and keyed-/operator state backends concurrently.
"""
__metaclass__ = ABCMeta
def __init__(self, j_state_backend):
self._j_state_backend = j_state_backend
class MemoryStateBackend(StateBackend):
"""
This state backend holds the working state in the memory (JVM heap) of the TaskManagers.
The state backend checkpoints state directly to the JobManager's memory (hence the backend's
name), but the checkpoints will be persisted to a file system for high-availability setups and
savepoints. The MemoryStateBackend is consequently a FileSystem-based backend that can work
without a file system dependency in simple setups.
This state backend should be used only for experimentation, quick local setups,
or for streaming applications that have very small state: Because it requires checkpoints to
go through the JobManager's memory, larger state will occupy larger portions of the
JobManager's main memory, reducing operational stability.
For any other setup, the :class:`FsStateBackend` should be used. The :class:`FsStateBackend`
holds the working state on the TaskManagers in the same way, but checkpoints state directly to
files rather then to the JobManager's memory, thus supporting large state sizes.
**State Size Considerations**
State checkpointing with this state backend is subject to the following conditions:
- Each individual state must not exceed the configured maximum state size
(see :func:`get_max_state_size`.
- All state from one task (i.e., the sum of all operator states and keyed states from all
chained operators of the task) must not exceed what the RPC system supports, which is
be default < 10 MB. That limit can be configured up, but that is typically not advised.
- The sum of all states in the application times all retained checkpoints must comfortably
fit into the JobManager's JVM heap space.
**Persistence Guarantees**
For the use cases where the state sizes can be handled by this backend, the backend does
guarantee persistence for savepoints, externalized checkpoints (of configured), and checkpoints
(when high-availability is configured).
**Configuration**
As for all state backends, this backend can either be configured within the application (by
creating the backend with the respective constructor parameters and setting it on the execution
environment) or by specifying it in the Flink configuration.
If the state backend was specified in the application, it may pick up additional configuration
parameters from the Flink configuration. For example, if the backend if configured in the
application without a default savepoint directory, it will pick up a default savepoint
directory specified in the Flink configuration of the running job/cluster. That behavior is
implemented via the :func:`configure` method.
"""
# The default maximal size that the snapshotted memory state may have (5 MiBytes).
DEFAULT_MAX_STATE_SIZE = 5 * 1024 * 1024
def __init__(self,
checkpoint_path=None,
savepoint_path=None,
max_state_size=None,
using_asynchronous_snapshots=None,
j_memory_state_backend=None):
"""
Creates a new MemoryStateBackend, setting optionally the paths to persist checkpoint
metadata and savepoints to, as well as configuring state thresholds and asynchronous
operations.
WARNING: Increasing the size of this value beyond the default value
(:data:`DEFAULT_MAX_STATE_SIZE`) should be done with care.
The checkpointed state needs to be send to the JobManager via limited size RPC messages,
and there and the JobManager needs to be able to hold all aggregated state in its memory.
Example:
::
>>> state_backend = MemoryStateBackend()
:param checkpoint_path: The path to write checkpoint metadata to. If none, the value from
the runtime configuration will be used.
:param savepoint_path: The path to write savepoints to. If none, the value from
the runtime configuration will be used.
:param max_state_size: The maximal size of the serialized state. If none, the
:data:`DEFAULT_MAX_STATE_SIZE` will be used.
:param using_asynchronous_snapshots: Flag to switch between synchronous and asynchronous
snapshot mode. If null, the value configured in the
runtime configuration will be used.
:param j_memory_state_backend: For internal use, please keep none.
"""
if j_memory_state_backend is None:
gateway = get_gateway()
JTernaryBoolean = gateway.jvm.org.apache.flink.util.TernaryBoolean
JMemoryStateBackend = gateway.jvm.org.apache.flink.runtime.state.memory\
.MemoryStateBackend
if using_asynchronous_snapshots is None:
j_asynchronous_snapshots = JTernaryBoolean.UNDEFINED
elif using_asynchronous_snapshots is True:
j_asynchronous_snapshots = JTernaryBoolean.TRUE
elif using_asynchronous_snapshots is False:
j_asynchronous_snapshots = JTernaryBoolean.FALSE
else:
raise TypeError("Unsupported input for 'using_asynchronous_snapshots': %s, "
"the value of the parameter should be None or"
"True or False.")
if max_state_size is None:
max_state_size = JMemoryStateBackend.DEFAULT_MAX_STATE_SIZE
j_memory_state_backend = JMemoryStateBackend(checkpoint_path,
savepoint_path,
max_state_size,
j_asynchronous_snapshots)
self._j_memory_state_backend = j_memory_state_backend
super(MemoryStateBackend, self).__init__(j_memory_state_backend)
def get_max_state_size(self):
"""
Gets the maximum size that an individual state can have, as configured in the
constructor (by default :data:`DEFAULT_MAX_STATE_SIZE`).
:return: The maximum size that an individual state can have.
"""
return self._j_memory_state_backend.getMaxStateSize()
def is_using_asynchronous_snapshots(self):
"""
Gets whether the key/value data structures are asynchronously snapshotted.
If not explicitly configured, this is the default value of
``org.apache.flink.configuration.CheckpointingOptions.ASYNC_SNAPSHOTS``.
:return: True if the key/value data structures are asynchronously snapshotted,
false otherwise.
"""
return self._j_memory_state_backend.isUsingAsynchronousSnapshots()
def __str__(self):
return self._j_memory_state_backend.toString()
class FsStateBackend(StateBackend):
"""
This state backend holds the working state in the memory (JVM heap) of the TaskManagers.
The state backend checkpoints state as files to a file system (hence the backend's name).
Each checkpoint individually will store all its files in a subdirectory that includes the
checkpoint number, such as ``hdfs://namenode:port/flink-checkpoints/chk-17/``.
**State Size Considerations**
Working state is kept on the TaskManager heap. If a TaskManager executes multiple
tasks concurrently (if the TaskManager has multiple slots, or if slot-sharing is used)
then the aggregate state of all tasks needs to fit into that TaskManager's memory.
This state backend stores small state chunks directly with the metadata, to avoid creating
many small files. The threshold for that is configurable. When increasing this threshold, the
size of the checkpoint metadata increases. The checkpoint metadata of all retained completed
checkpoints needs to fit into the JobManager's heap memory. This is typically not a problem,
unless the threshold :func:`get_min_file_size_threshold` is increased significantly.
**Persistence Guarantees**
Checkpoints from this state backend are as persistent and available as filesystem that is
written to. If the file system is a persistent distributed file system, this state backend
supports highly available setups. The backend additionally supports savepoints and externalized
checkpoints.
**Configuration**
As for all state backends, this backend can either be configured within the application (by
creating the backend with the respective constructor parameters and setting it on the execution
environment) or by specifying it in the Flink configuration.
If the state backend was specified in the application, it may pick up additional configuration
parameters from the Flink configuration. For example, if the backend if configured in the
application without a default savepoint directory, it will pick up a default savepoint
directory specified in the Flink configuration of the running job/cluster. That behavior is
implemented via the :func:`configure` method.
"""
def __init__(self,
checkpoint_directory_uri=None,
default_savepoint_directory_uri=None,
file_state_size_threshold=None,
write_buffer_size=None,
using_asynchronous_snapshots=None,
j_fs_state_backend=None):
"""
Creates a new state backend that stores its checkpoint data in the file system and location
defined by the given URI.
A file system for the file system scheme in the URI (e.g., 'file://', 'hdfs://', or
'S3://') must be accessible via ``org.apache.flink.core.fs.FileSystem.get(URI)``.
For a state backend targeting HDFS, this means that the URI must either specify the
authority (host and port), or that the Hadoop configuration that describes that information
must be in the classpath.
Example:
::
>>> state_backend = FsStateBackend("file://var/checkpoints/")
:param checkpoint_directory_uri: The path to write checkpoint metadata to, required.
:param default_savepoint_directory_uri: The path to write savepoints to. If none, the value
from the runtime configuration will be used, or
savepoint target locations need to be passed when
triggering a savepoint.
:param file_state_size_threshold: State below this size will be stored as part of the
metadata, rather than in files. If none, the value
configured in the runtime configuration will be used, or
the default value (1KB) if nothing is configured.
:param write_buffer_size: Write buffer size used to serialize state. If -1, the value
configured in the runtime configuration will be used, or the
default value (4KB) if nothing is configured.
:param using_asynchronous_snapshots: Flag to switch between synchronous and asynchronous
snapshot mode. If none, the value configured in
the runtime configuration will be used.
:param j_fs_state_backend: For internal use, please keep none.
"""
if j_fs_state_backend is None:
gateway = get_gateway()
JTernaryBoolean = gateway.jvm.org.apache.flink.util.TernaryBoolean
JFsStateBackend = gateway.jvm.org.apache.flink.runtime.state.filesystem\
.FsStateBackend
JPath = gateway.jvm.org.apache.flink.core.fs.Path
if checkpoint_directory_uri is None:
raise ValueError("The parameter 'checkpoint_directory_uri' is required!")
j_checkpoint_directory_uri = JPath(checkpoint_directory_uri).toUri()
if default_savepoint_directory_uri is None:
j_default_savepoint_directory_uri = None
else:
j_default_savepoint_directory_uri = JPath(default_savepoint_directory_uri).toUri()
if file_state_size_threshold is None:
file_state_size_threshold = -1
if write_buffer_size is None:
write_buffer_size = -1
if using_asynchronous_snapshots is None:
j_asynchronous_snapshots = JTernaryBoolean.UNDEFINED
elif using_asynchronous_snapshots is True:
j_asynchronous_snapshots = JTernaryBoolean.TRUE
elif using_asynchronous_snapshots is False:
j_asynchronous_snapshots = JTernaryBoolean.FALSE
else:
raise TypeError("Unsupported input for 'using_asynchronous_snapshots': %s, "
"the value of the parameter should be None or"
"True or False.")
j_fs_state_backend = JFsStateBackend(j_checkpoint_directory_uri,
j_default_savepoint_directory_uri,
file_state_size_threshold,
write_buffer_size,
j_asynchronous_snapshots)
self._j_fs_state_backend = j_fs_state_backend
super(FsStateBackend, self).__init__(j_fs_state_backend)
def get_checkpoint_path(self):
"""
Gets the base directory where all the checkpoints are stored.
The job-specific checkpoint directory is created inside this directory.
:return: The base directory for checkpoints.
"""
return self._j_fs_state_backend.getCheckpointPath().toString()
def get_min_file_size_threshold(self):
"""
Gets the threshold below which state is stored as part of the metadata, rather than in
files. This threshold ensures that the backend does not create a large amount of very
small files, where potentially the file pointers are larger than the state itself.
If not explicitly configured, this is the default value of
``org.apache.flink.configuration.CheckpointingOptions.FS_SMALL_FILE_THRESHOLD``.
:return: The file size threshold, in bytes.
"""
return self._j_fs_state_backend.getMinFileSizeThreshold()
def is_using_asynchronous_snapshots(self):
"""
Gets whether the key/value data structures are asynchronously snapshotted.
If not explicitly configured, this is the default value of
``org.apache.flink.configuration.CheckpointingOptions.ASYNC_SNAPSHOTS``.
:return: True if the key/value data structures are asynchronously snapshotted,
false otherwise.
"""
return self._j_fs_state_backend.isUsingAsynchronousSnapshots()
def get_write_buffer_size(self):
"""
Gets the write buffer size for created checkpoint stream.
If not explicitly configured, this is the default value of
``org.apache.flink.configuration.CheckpointingOptions.FS_WRITE_BUFFER_SIZE``.
:return: The write buffer size, in bytes.
"""
return self._j_fs_state_backend.getWriteBufferSize()
class RocksDBStateBackend(StateBackend):
"""
A State Backend that stores its state in ``RocksDB``. This state backend can
store very large state that exceeds memory and spills to disk.
All key/value state (including windows) is stored in the key/value index of RocksDB.
For persistence against loss of machines, checkpoints take a snapshot of the
RocksDB database, and persist that snapshot in a file system (by default) or
another configurable state backend.
The behavior of the RocksDB instances can be parametrized by setting RocksDB Options
using the methods :func:`set_predefined_options` and :func:`set_options`.
"""
def __init__(self,
checkpoint_data_uri=None,
enable_incremental_checkpointing=None,
checkpoint_stream_backend=None,
j_rocks_db_state_backend=None):
"""
Creates a new :class:`RocksDBStateBackend` that stores its checkpoint data in the given
state backend or the location of given URI.
If using state backend, typically, one would supply a filesystem or database state backend
here where the snapshots from RocksDB would be stored.
If using URI, a state backend that stores checkpoints in HDFS or S3 must specify the file
system host and port in the URI, or have the Hadoop configuration that describes the file
system (host / high-availability group / possibly credentials) either referenced from the
Flink config, or included in the classpath.
Example:
::
>>> state_backend = RocksDBStateBackend("file://var/checkpoints/")
:param checkpoint_data_uri: The URI describing the filesystem and path to the checkpoint
data directory.
:param enable_incremental_checkpointing: True if incremental checkpointing is enabled.
:param checkpoint_stream_backend: The backend write the checkpoint streams to.
:param j_rocks_db_state_backend: For internal use, please keep none.
"""
if j_rocks_db_state_backend is None:
gateway = get_gateway()
JTernaryBoolean = gateway.jvm.org.apache.flink.util.TernaryBoolean
JRocksDBStateBackend = gateway.jvm.org.apache.flink.contrib.streaming.state \
.RocksDBStateBackend
if enable_incremental_checkpointing not in (None, True, False):
raise TypeError("Unsupported input for 'enable_incremental_checkpointing': %s, "
"the value of the parameter should be None or"
"True or False.")
if checkpoint_data_uri is not None:
if enable_incremental_checkpointing is None:
j_rocks_db_state_backend = JRocksDBStateBackend(checkpoint_data_uri)
else:
j_rocks_db_state_backend = \
JRocksDBStateBackend(checkpoint_data_uri, enable_incremental_checkpointing)
elif isinstance(checkpoint_stream_backend, StateBackend):
if enable_incremental_checkpointing is None:
j_enable_incremental_checkpointing = JTernaryBoolean.UNDEFINED
elif enable_incremental_checkpointing is True:
j_enable_incremental_checkpointing = JTernaryBoolean.TRUE
else:
j_enable_incremental_checkpointing = JTernaryBoolean.FALSE
j_rocks_db_state_backend = \
JRocksDBStateBackend(checkpoint_stream_backend._j_state_backend,
j_enable_incremental_checkpointing)
self._j_rocks_db_state_backend = j_rocks_db_state_backend
super(RocksDBStateBackend, self).__init__(j_rocks_db_state_backend)
def get_checkpoint_backend(self):
"""
Gets the state backend that this RocksDB state backend uses to persist
its bytes to.
This RocksDB state backend only implements the RocksDB specific parts, it
relies on the 'CheckpointBackend' to persist the checkpoint and savepoint bytes
streams.
:return: The state backend to persist the checkpoint and savepoint bytes streams.
"""
j_state_backend = self._j_rocks_db_state_backend.getCheckpointBackend()
return _from_j_state_backend(j_state_backend)
def set_db_storage_paths(self, *paths):
"""
Sets the directories in which the local RocksDB database puts its files (like SST and
metadata files). These directories do not need to be persistent, they can be ephemeral,
meaning that they are lost on a machine failure, because state in RocksDB is persisted
in checkpoints.
If nothing is configured, these directories default to the TaskManager's local
temporary file directories.
Each distinct state will be stored in one path, but when the state backend creates
multiple states, they will store their files on different paths.
Passing ``None`` to this function restores the default behavior, where the configured
temp directories will be used.
:param paths: The paths across which the local RocksDB database files will be spread. this
parameter is optional.
"""
if len(paths) < 1:
self._j_rocks_db_state_backend.setDbStoragePath(None)
else:
gateway = get_gateway()
j_path_array = gateway.new_array(gateway.jvm.String, len(paths))
for i in range(0, len(paths)):
j_path_array[i] = paths[i]
self._j_rocks_db_state_backend.setDbStoragePaths(j_path_array)
def get_db_storage_paths(self):
"""
Gets the configured local DB storage paths, or null, if none were configured.
Under these directories on the TaskManager, RocksDB stores its SST files and
metadata files. These directories do not need to be persistent, they can be ephermeral,
meaning that they are lost on a machine failure, because state in RocksDB is persisted
in checkpoints.
If nothing is configured, these directories default to the TaskManager's local
temporary file directories.
:return: The list of configured local DB storage paths.
"""
return list(self._j_rocks_db_state_backend.getDbStoragePaths())
def is_incremental_checkpoints_enabled(self):
"""
Gets whether incremental checkpoints are enabled for this state backend.
:return: True if incremental checkpoints are enabled, false otherwise.
"""
return self._j_rocks_db_state_backend.isIncrementalCheckpointsEnabled()
def is_ttl_compaction_filter_enabled(self):
"""
Gets whether compaction filter to cleanup state with TTL is enabled.
:return: True if enabled, false otherwise.
"""
return self._j_rocks_db_state_backend.isTtlCompactionFilterEnabled()
def enable_ttl_compaction_filter(self):
"""
Enable compaction filter to cleanup state with TTL.
.. note::
User can still decide in state TTL configuration in state descriptor
whether the filter is active for particular state or not.
"""
self._j_rocks_db_state_backend.enableTtlCompactionFilter()
def set_predefined_options(self, options):
"""
Sets the predefined options for RocksDB.
If user-configured options within ``RocksDBConfigurableOptions`` is set (through
flink-conf.yaml) or a user-defined options factory is set (via :func:`setOptions`),
then the options from the factory are applied on top of the here specified
predefined options and customized options.
Example:
::
>>> state_backend.set_predefined_options(PredefinedOptions.SPINNING_DISK_OPTIMIZED)
:param options: The options to set (must not be null), see :class:`PredefinedOptions`.
"""
gateway = get_gateway()
JPredefinedOptions = gateway.jvm.org.apache.flink.contrib.streaming.state.PredefinedOptions
if options == PredefinedOptions.DEFAULT:
self._j_rocks_db_state_backend.setPredefinedOptions(JPredefinedOptions.DEFAULT)
elif options == PredefinedOptions.SPINNING_DISK_OPTIMIZED:
self._j_rocks_db_state_backend.setPredefinedOptions(
JPredefinedOptions.SPINNING_DISK_OPTIMIZED)
elif options == PredefinedOptions.SPINNING_DISK_OPTIMIZED_HIGH_MEM:
self._j_rocks_db_state_backend.setPredefinedOptions(
JPredefinedOptions.SPINNING_DISK_OPTIMIZED_HIGH_MEM)
elif options == PredefinedOptions.FLASH_SSD_OPTIMIZED:
self._j_rocks_db_state_backend.setPredefinedOptions(
JPredefinedOptions.FLASH_SSD_OPTIMIZED)
else:
raise TypeError("Unsupported options: %s, the supported options are: "
"PredefinedOptions.DEFAULT, PredefinedOptions.SPINNING_DISK_OPTIMIZED,"
" PredefinedOptions.SPINNING_DISK_OPTIMIZED_HIGH_MEM and "
"PredefinedOptions.FLASH_SSD_OPTIMIZED")
def get_predefined_options(self):
"""
Gets the current predefined options for RocksDB.
The default options (if nothing was set via :func:`setPredefinedOptions`)
are :data:`PredefinedOptions.DEFAULT`.
If user-configured options within ``RocksDBConfigurableOptions`` is set (through
flink-conf.yaml) or a user-defined options factory is set (via :func:`setOptions`),
then the options from the factory are applied on top of the predefined and customized
options.
.. seealso:: :func:`set_predefined_options`
:return: Current predefined options.
"""
j_predefined_options = self._j_rocks_db_state_backend.getPredefinedOptions()
gateway = get_gateway()
JPredefinedOptions = gateway.jvm.org.apache.flink.contrib.streaming.state.PredefinedOptions
if j_predefined_options == JPredefinedOptions.DEFAULT:
return PredefinedOptions.DEFAULT
elif j_predefined_options == JPredefinedOptions.FLASH_SSD_OPTIMIZED:
return PredefinedOptions.FLASH_SSD_OPTIMIZED
elif j_predefined_options == JPredefinedOptions.SPINNING_DISK_OPTIMIZED:
return PredefinedOptions.SPINNING_DISK_OPTIMIZED
elif j_predefined_options == JPredefinedOptions.SPINNING_DISK_OPTIMIZED_HIGH_MEM:
return PredefinedOptions.SPINNING_DISK_OPTIMIZED_HIGH_MEM
else:
raise Exception("Unsupported java options: %s" % j_predefined_options)
def set_options(self, options_factory_class_name):
"""
Sets ``org.rocksdb.Options`` for the RocksDB instances.
Because the options are not serializable and hold native code references,
they must be specified through a factory.
The options created by the factory here are applied on top of the pre-defined
options profile selected via :func:`set_predefined_options`.
If the pre-defined options profile is the default (:data:`PredefinedOptions.DEFAULT`),
then the factory fully controls the RocksDB options.
:param options_factory_class_name: The fully-qualified class name of the options
factory in Java that lazily creates the RocksDB options.
The options factory must have a default constructor.
"""
gateway = get_gateway()
JOptionsFactory = gateway.jvm.org.apache.flink.contrib.streaming.state.OptionsFactory
j_options_factory_clz = load_java_class(options_factory_class_name)
if not get_java_class(JOptionsFactory).isAssignableFrom(j_options_factory_clz):
raise ValueError("The input class not implements OptionsFactory.")
self._j_rocks_db_state_backend.setOptions(j_options_factory_clz.newInstance())
def get_options(self):
"""
Gets the fully-qualified class name of the options factory in Java that lazily creates
the RocksDB options.
:return: The fully-qualified class name of the options factory in Java.
"""
j_options_factory = self._j_rocks_db_state_backend.getOptions()
if j_options_factory is not None:
return j_options_factory.getClass().getName()
else:
return None
def get_number_of_transfering_threads(self):
"""
Gets the number of threads used to transfer files while snapshotting/restoring.
:return: The number of threads used to transfer files while snapshotting/restoring.
"""
return self._j_rocks_db_state_backend.getNumberOfTransferingThreads()
def set_number_of_transfering_threads(self, number_of_transfering_threads):
"""
Sets the number of threads used to transfer files while snapshotting/restoring.
:param number_of_transfering_threads: The number of threads used to transfer files while
snapshotting/restoring.
"""
self._j_rocks_db_state_backend.setNumberOfTransferingThreads(number_of_transfering_threads)
def __str__(self):
return self._j_rocks_db_state_backend.toString()
class PredefinedOptions(object):
"""
The :class:`PredefinedOptions` are configuration settings for the :class:`RocksDBStateBackend`.
The various pre-defined choices are configurations that have been empirically
determined to be beneficial for performance under different settings.
Some of these settings are based on experiments by the Flink community, some follow
guides from the RocksDB project.
:data:`DEFAULT`:
Default options for all settings, except that writes are not forced to the
disk.
.. note::
Because Flink does not rely on RocksDB data on disk for recovery,
there is no need to sync data to stable storage.
:data:`SPINNING_DISK_OPTIMIZED`:
Pre-defined options for regular spinning hard disks.
This constant configures RocksDB with some options that lead empirically
to better performance when the machines executing the system use
regular spinning hard disks.
The following options are set:
- setCompactionStyle(CompactionStyle.LEVEL)
- setLevelCompactionDynamicLevelBytes(true)
- setIncreaseParallelism(4)
- setUseFsync(false)
- setDisableDataSync(true)
- setMaxOpenFiles(-1)
.. note::
Because Flink does not rely on RocksDB data on disk for recovery,
there is no need to sync data to stable storage.
:data:`SPINNING_DISK_OPTIMIZED_HIGH_MEM`:
Pre-defined options for better performance on regular spinning hard disks,
at the cost of a higher memory consumption.
.. note::
These settings will cause RocksDB to consume a lot of memory for
block caching and compactions. If you experience out-of-memory problems related to,
RocksDB, consider switching back to :data:`SPINNING_DISK_OPTIMIZED`.
The following options are set:
- setLevelCompactionDynamicLevelBytes(true)
- setTargetFileSizeBase(256 MBytes)
- setMaxBytesForLevelBase(1 GByte)
- setWriteBufferSize(64 MBytes)
- setIncreaseParallelism(4)
- setMinWriteBufferNumberToMerge(3)
- setMaxWriteBufferNumber(4)
- setUseFsync(false)
- setMaxOpenFiles(-1)
- BlockBasedTableConfig.setBlockCacheSize(256 MBytes)
- BlockBasedTableConfigsetBlockSize(128 KBytes)
.. note::
Because Flink does not rely on RocksDB data on disk for recovery,
there is no need to sync data to stable storage.
:data:`FLASH_SSD_OPTIMIZED`:
Pre-defined options for Flash SSDs.
This constant configures RocksDB with some options that lead empirically
to better performance when the machines executing the system use SSDs.
The following options are set:
- setIncreaseParallelism(4)
- setUseFsync(false)
- setDisableDataSync(true)
- setMaxOpenFiles(-1)
.. note::
Because Flink does not rely on RocksDB data on disk for recovery,
there is no need to sync data to stable storage.
"""
DEFAULT = 0
SPINNING_DISK_OPTIMIZED = 1
SPINNING_DISK_OPTIMIZED_HIGH_MEM = 2
FLASH_SSD_OPTIMIZED = 3
class CustomStateBackend(StateBackend):
"""
A wrapper of customized java state backend created from the provided `StateBackendFactory`.
"""
def __init__(self, j_custom_state_backend):
super(CustomStateBackend, self).__init__(j_custom_state_backend)
| 47.88191
| 99
| 0.690849
|
self):
return self._j_fs_state_backend.getCheckpointPath().toString()
def get_min_file_size_threshold(self):
return self._j_fs_state_backend.getMinFileSizeThreshold()
def is_using_asynchronous_snapshots(self):
return self._j_fs_state_backend.isUsingAsynchronousSnapshots()
def get_write_buffer_size(self):
return self._j_fs_state_backend.getWriteBufferSize()
class RocksDBStateBackend(StateBackend):
def __init__(self,
checkpoint_data_uri=None,
enable_incremental_checkpointing=None,
checkpoint_stream_backend=None,
j_rocks_db_state_backend=None):
if j_rocks_db_state_backend is None:
gateway = get_gateway()
JTernaryBoolean = gateway.jvm.org.apache.flink.util.TernaryBoolean
JRocksDBStateBackend = gateway.jvm.org.apache.flink.contrib.streaming.state \
.RocksDBStateBackend
if enable_incremental_checkpointing not in (None, True, False):
raise TypeError("Unsupported input for 'enable_incremental_checkpointing': %s, "
"the value of the parameter should be None or"
"True or False.")
if checkpoint_data_uri is not None:
if enable_incremental_checkpointing is None:
j_rocks_db_state_backend = JRocksDBStateBackend(checkpoint_data_uri)
else:
j_rocks_db_state_backend = \
JRocksDBStateBackend(checkpoint_data_uri, enable_incremental_checkpointing)
elif isinstance(checkpoint_stream_backend, StateBackend):
if enable_incremental_checkpointing is None:
j_enable_incremental_checkpointing = JTernaryBoolean.UNDEFINED
elif enable_incremental_checkpointing is True:
j_enable_incremental_checkpointing = JTernaryBoolean.TRUE
else:
j_enable_incremental_checkpointing = JTernaryBoolean.FALSE
j_rocks_db_state_backend = \
JRocksDBStateBackend(checkpoint_stream_backend._j_state_backend,
j_enable_incremental_checkpointing)
self._j_rocks_db_state_backend = j_rocks_db_state_backend
super(RocksDBStateBackend, self).__init__(j_rocks_db_state_backend)
def get_checkpoint_backend(self):
j_state_backend = self._j_rocks_db_state_backend.getCheckpointBackend()
return _from_j_state_backend(j_state_backend)
def set_db_storage_paths(self, *paths):
if len(paths) < 1:
self._j_rocks_db_state_backend.setDbStoragePath(None)
else:
gateway = get_gateway()
j_path_array = gateway.new_array(gateway.jvm.String, len(paths))
for i in range(0, len(paths)):
j_path_array[i] = paths[i]
self._j_rocks_db_state_backend.setDbStoragePaths(j_path_array)
def get_db_storage_paths(self):
return list(self._j_rocks_db_state_backend.getDbStoragePaths())
def is_incremental_checkpoints_enabled(self):
return self._j_rocks_db_state_backend.isIncrementalCheckpointsEnabled()
def is_ttl_compaction_filter_enabled(self):
return self._j_rocks_db_state_backend.isTtlCompactionFilterEnabled()
def enable_ttl_compaction_filter(self):
self._j_rocks_db_state_backend.enableTtlCompactionFilter()
def set_predefined_options(self, options):
gateway = get_gateway()
JPredefinedOptions = gateway.jvm.org.apache.flink.contrib.streaming.state.PredefinedOptions
if options == PredefinedOptions.DEFAULT:
self._j_rocks_db_state_backend.setPredefinedOptions(JPredefinedOptions.DEFAULT)
elif options == PredefinedOptions.SPINNING_DISK_OPTIMIZED:
self._j_rocks_db_state_backend.setPredefinedOptions(
JPredefinedOptions.SPINNING_DISK_OPTIMIZED)
elif options == PredefinedOptions.SPINNING_DISK_OPTIMIZED_HIGH_MEM:
self._j_rocks_db_state_backend.setPredefinedOptions(
JPredefinedOptions.SPINNING_DISK_OPTIMIZED_HIGH_MEM)
elif options == PredefinedOptions.FLASH_SSD_OPTIMIZED:
self._j_rocks_db_state_backend.setPredefinedOptions(
JPredefinedOptions.FLASH_SSD_OPTIMIZED)
else:
raise TypeError("Unsupported options: %s, the supported options are: "
"PredefinedOptions.DEFAULT, PredefinedOptions.SPINNING_DISK_OPTIMIZED,"
" PredefinedOptions.SPINNING_DISK_OPTIMIZED_HIGH_MEM and "
"PredefinedOptions.FLASH_SSD_OPTIMIZED")
def get_predefined_options(self):
j_predefined_options = self._j_rocks_db_state_backend.getPredefinedOptions()
gateway = get_gateway()
JPredefinedOptions = gateway.jvm.org.apache.flink.contrib.streaming.state.PredefinedOptions
if j_predefined_options == JPredefinedOptions.DEFAULT:
return PredefinedOptions.DEFAULT
elif j_predefined_options == JPredefinedOptions.FLASH_SSD_OPTIMIZED:
return PredefinedOptions.FLASH_SSD_OPTIMIZED
elif j_predefined_options == JPredefinedOptions.SPINNING_DISK_OPTIMIZED:
return PredefinedOptions.SPINNING_DISK_OPTIMIZED
elif j_predefined_options == JPredefinedOptions.SPINNING_DISK_OPTIMIZED_HIGH_MEM:
return PredefinedOptions.SPINNING_DISK_OPTIMIZED_HIGH_MEM
else:
raise Exception("Unsupported java options: %s" % j_predefined_options)
def set_options(self, options_factory_class_name):
gateway = get_gateway()
JOptionsFactory = gateway.jvm.org.apache.flink.contrib.streaming.state.OptionsFactory
j_options_factory_clz = load_java_class(options_factory_class_name)
if not get_java_class(JOptionsFactory).isAssignableFrom(j_options_factory_clz):
raise ValueError("The input class not implements OptionsFactory.")
self._j_rocks_db_state_backend.setOptions(j_options_factory_clz.newInstance())
def get_options(self):
j_options_factory = self._j_rocks_db_state_backend.getOptions()
if j_options_factory is not None:
return j_options_factory.getClass().getName()
else:
return None
def get_number_of_transfering_threads(self):
return self._j_rocks_db_state_backend.getNumberOfTransferingThreads()
def set_number_of_transfering_threads(self, number_of_transfering_threads):
self._j_rocks_db_state_backend.setNumberOfTransferingThreads(number_of_transfering_threads)
def __str__(self):
return self._j_rocks_db_state_backend.toString()
class PredefinedOptions(object):
DEFAULT = 0
SPINNING_DISK_OPTIMIZED = 1
SPINNING_DISK_OPTIMIZED_HIGH_MEM = 2
FLASH_SSD_OPTIMIZED = 3
class CustomStateBackend(StateBackend):
def __init__(self, j_custom_state_backend):
super(CustomStateBackend, self).__init__(j_custom_state_backend)
| true
| true
|
f71863ba6ce37bb3f9c1ddaa3068907ac1126cca
| 347
|
py
|
Python
|
line_chain/configs/__init__.py
|
smuelpeng/line-chain
|
4b561fab001ff0cf15ac3b30d4bcf06f2ba92f0e
|
[
"MIT"
] | 1
|
2019-05-31T06:51:52.000Z
|
2019-05-31T06:51:52.000Z
|
torch_basic_models/configs/__init__.py
|
FebruaryBreeze/torch-basic-models
|
ec819c93f7eee8cc99688cfe97bda12d1c55c5f3
|
[
"MIT"
] | 1
|
2021-01-04T07:27:40.000Z
|
2021-01-04T15:27:39.000Z
|
torch_basic_models/configs/__init__.py
|
FebruaryBreeze/torch-basic-models
|
ec819c93f7eee8cc99688cfe97bda12d1c55c5f3
|
[
"MIT"
] | 2
|
2019-05-31T07:11:23.000Z
|
2021-01-04T07:08:23.000Z
|
from pathlib import Path
import json_schema_to_class
current_dir: Path = Path(__file__).parent
json_schema_to_class.generate_dir(
schema_dir=current_dir.parent / 'schema',
output_dir=current_dir / 'build'
)
if __name__ != '__main__':
from .build import * # noqa: F403
del json_schema_to_class
del current_dir
del Path
| 20.411765
| 45
| 0.737752
|
from pathlib import Path
import json_schema_to_class
current_dir: Path = Path(__file__).parent
json_schema_to_class.generate_dir(
schema_dir=current_dir.parent / 'schema',
output_dir=current_dir / 'build'
)
if __name__ != '__main__':
from .build import *
del json_schema_to_class
del current_dir
del Path
| true
| true
|
f71864d5160f3b67a35171531477e8a6ec7afbf2
| 543
|
py
|
Python
|
test/test_add_contact.py
|
agaklo2/python_training
|
2a2efcdd7b3c3043b6cade3f43c130a266b0d6c0
|
[
"Apache-2.0"
] | null | null | null |
test/test_add_contact.py
|
agaklo2/python_training
|
2a2efcdd7b3c3043b6cade3f43c130a266b0d6c0
|
[
"Apache-2.0"
] | null | null | null |
test/test_add_contact.py
|
agaklo2/python_training
|
2a2efcdd7b3c3043b6cade3f43c130a266b0d6c0
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
from model.contact import Contact
def test_add_contact(app, db, json_contacts, check_ui):
contact = json_contacts
old_contacts = db.get_contact_list()
app.contact.add_new_contact(contact)
new_contacts = db.get_contact_list()
old_contacts.append(contact)
assert sorted(old_contacts, key=Contact.id_or_max) == sorted(new_contacts, key=Contact.id_or_max)
if check_ui:
assert sorted(new_contacts, key=Contact.id_or_max) == sorted(app.contact.get_contact_list(), key=Contact.id_or_max)
| 38.785714
| 123
| 0.745856
|
from model.contact import Contact
def test_add_contact(app, db, json_contacts, check_ui):
contact = json_contacts
old_contacts = db.get_contact_list()
app.contact.add_new_contact(contact)
new_contacts = db.get_contact_list()
old_contacts.append(contact)
assert sorted(old_contacts, key=Contact.id_or_max) == sorted(new_contacts, key=Contact.id_or_max)
if check_ui:
assert sorted(new_contacts, key=Contact.id_or_max) == sorted(app.contact.get_contact_list(), key=Contact.id_or_max)
| true
| true
|
f71864dca9bfab98675c8966d25abcbf121065d9
| 4,725
|
py
|
Python
|
library/panos_lic.py
|
rtodto/ansible-pan
|
b38bfec1883b456a4188112605d24e0e170134f7
|
[
"Apache-2.0"
] | 1
|
2019-04-19T23:08:27.000Z
|
2019-04-19T23:08:27.000Z
|
library/panos_lic.py
|
rtodto/ansible-pan
|
b38bfec1883b456a4188112605d24e0e170134f7
|
[
"Apache-2.0"
] | null | null | null |
library/panos_lic.py
|
rtodto/ansible-pan
|
b38bfec1883b456a4188112605d24e0e170134f7
|
[
"Apache-2.0"
] | 2
|
2019-01-31T02:51:08.000Z
|
2020-09-03T15:45:52.000Z
|
#!/usr/bin/env python
# Copyright 2016 Palo Alto Networks, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: panos_lic
short_description: apply authcode to a device/instance
description:
- Apply an authcode to a device.
- The authcode should have been previously registered on the Palo Alto Networks support portal.
- The device should have Internet access.
author: "Luigi Mori (@jtschichold), Ivan Bojer (@ivanbojer)"
version_added: "2.3"
requirements:
- pan-python
options:
ip_address:
description:
- IP address (or hostname) of PAN-OS device
required: true
password:
description:
- password for authentication
required: true
username:
description:
- username for authentication
required: false
default: "admin"
auth_code:
description:
- authcode to be applied
required: true
force:
description:
- whether to apply authcode even if device is already licensed
required: false
default: "false"
'''
EXAMPLES = '''
- hosts: localhost
connection: local
tasks:
- name: fetch license
panos_lic:
ip_address: "192.168.1.1"
password: "paloalto"
auth_code: "IBADCODE"
register: result
- name: Display serialnumber (if already registered)
debug:
var: "{{result.serialnumber}}"
'''
RETURN = '''
serialnumber:
description: serialnumber of the device in case that it has been already registered
returned: success
type: string
sample: 007200004214
'''
from ansible.module_utils.basic import AnsibleModule
try:
import pan.xapi
HAS_LIB = True
except ImportError:
HAS_LIB = False
def get_serial(xapi, module):
xapi.op(cmd="show system info", cmd_xml=True)
r = xapi.element_root
serial = r.find('.//serial')
if serial is None:
module.fail_json(msg="No <serial> tag in show system info")
serial = serial.text
return serial
def apply_authcode(xapi, module, auth_code):
try:
xapi.op(cmd='request license fetch auth-code "%s"' % auth_code,
cmd_xml=True)
except pan.xapi.PanXapiError:
if hasattr(xapi, 'xml_document'):
if 'Successfully' in xapi.xml_document:
return
if 'Invalid Auth Code' in xapi.xml_document:
module.fail_json(msg="Invalid Auth Code")
raise
return
def fetch_authcode(xapi, module):
try:
xapi.op(cmd='request license fetch', cmd_xml=True)
except pan.xapi.PanXapiError:
if hasattr(xapi, 'xml_document'):
if 'Successfully' in xapi.xml_document:
return
if 'Invalid Auth Code' in xapi.xml_document:
module.fail_json(msg="Invalid Auth Code")
raise
return
def main():
argument_spec = dict(
ip_address=dict(required=True),
password=dict(required=True, no_log=True),
auth_code=dict(),
username=dict(default='admin'),
force=dict(type='bool', default=False)
)
module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=False)
if not HAS_LIB:
module.fail_json(msg='pan-python is required for this module')
ip_address = module.params["ip_address"]
password = module.params["password"]
auth_code = module.params["auth_code"]
force = module.params['force']
username = module.params['username']
xapi = pan.xapi.PanXapi(
hostname=ip_address,
api_username=username,
api_password=password
)
if not force:
serialnumber = get_serial(xapi, module)
if serialnumber != 'unknown':
return module.exit_json(changed=False, serialnumber=serialnumber)
if auth_code:
apply_authcode(xapi, module, auth_code)
else:
fetch_authcode(xapi, module)
module.exit_json(changed=True, msg="okey dokey")
if __name__ == '__main__':
main()
| 27.47093
| 99
| 0.641905
|
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: panos_lic
short_description: apply authcode to a device/instance
description:
- Apply an authcode to a device.
- The authcode should have been previously registered on the Palo Alto Networks support portal.
- The device should have Internet access.
author: "Luigi Mori (@jtschichold), Ivan Bojer (@ivanbojer)"
version_added: "2.3"
requirements:
- pan-python
options:
ip_address:
description:
- IP address (or hostname) of PAN-OS device
required: true
password:
description:
- password for authentication
required: true
username:
description:
- username for authentication
required: false
default: "admin"
auth_code:
description:
- authcode to be applied
required: true
force:
description:
- whether to apply authcode even if device is already licensed
required: false
default: "false"
'''
EXAMPLES = '''
- hosts: localhost
connection: local
tasks:
- name: fetch license
panos_lic:
ip_address: "192.168.1.1"
password: "paloalto"
auth_code: "IBADCODE"
register: result
- name: Display serialnumber (if already registered)
debug:
var: "{{result.serialnumber}}"
'''
RETURN = '''
serialnumber:
description: serialnumber of the device in case that it has been already registered
returned: success
type: string
sample: 007200004214
'''
from ansible.module_utils.basic import AnsibleModule
try:
import pan.xapi
HAS_LIB = True
except ImportError:
HAS_LIB = False
def get_serial(xapi, module):
xapi.op(cmd="show system info", cmd_xml=True)
r = xapi.element_root
serial = r.find('.//serial')
if serial is None:
module.fail_json(msg="No <serial> tag in show system info")
serial = serial.text
return serial
def apply_authcode(xapi, module, auth_code):
try:
xapi.op(cmd='request license fetch auth-code "%s"' % auth_code,
cmd_xml=True)
except pan.xapi.PanXapiError:
if hasattr(xapi, 'xml_document'):
if 'Successfully' in xapi.xml_document:
return
if 'Invalid Auth Code' in xapi.xml_document:
module.fail_json(msg="Invalid Auth Code")
raise
return
def fetch_authcode(xapi, module):
try:
xapi.op(cmd='request license fetch', cmd_xml=True)
except pan.xapi.PanXapiError:
if hasattr(xapi, 'xml_document'):
if 'Successfully' in xapi.xml_document:
return
if 'Invalid Auth Code' in xapi.xml_document:
module.fail_json(msg="Invalid Auth Code")
raise
return
def main():
argument_spec = dict(
ip_address=dict(required=True),
password=dict(required=True, no_log=True),
auth_code=dict(),
username=dict(default='admin'),
force=dict(type='bool', default=False)
)
module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=False)
if not HAS_LIB:
module.fail_json(msg='pan-python is required for this module')
ip_address = module.params["ip_address"]
password = module.params["password"]
auth_code = module.params["auth_code"]
force = module.params['force']
username = module.params['username']
xapi = pan.xapi.PanXapi(
hostname=ip_address,
api_username=username,
api_password=password
)
if not force:
serialnumber = get_serial(xapi, module)
if serialnumber != 'unknown':
return module.exit_json(changed=False, serialnumber=serialnumber)
if auth_code:
apply_authcode(xapi, module, auth_code)
else:
fetch_authcode(xapi, module)
module.exit_json(changed=True, msg="okey dokey")
if __name__ == '__main__':
main()
| true
| true
|
f718673d1ce70cc80951a336ff5598237edaceba
| 2,939
|
py
|
Python
|
plotly/validators/bar/_error_y.py
|
gnestor/plotly.py
|
a8ae062795ddbf9867b8578fe6d9e244948c15ff
|
[
"MIT"
] | 12
|
2020-04-18T18:10:22.000Z
|
2021-12-06T10:11:15.000Z
|
plotly/validators/bar/_error_y.py
|
gnestor/plotly.py
|
a8ae062795ddbf9867b8578fe6d9e244948c15ff
|
[
"MIT"
] | 27
|
2020-04-28T21:23:12.000Z
|
2021-06-25T15:36:38.000Z
|
plotly/validators/bar/_error_y.py
|
gnestor/plotly.py
|
a8ae062795ddbf9867b8578fe6d9e244948c15ff
|
[
"MIT"
] | 6
|
2020-04-18T23:07:08.000Z
|
2021-11-18T07:53:06.000Z
|
import _plotly_utils.basevalidators
class ErrorYValidator(_plotly_utils.basevalidators.CompoundValidator):
def __init__(self, plotly_name='error_y', parent_name='bar', **kwargs):
super(ErrorYValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop('data_class_str', 'ErrorY'),
data_docs=kwargs.pop(
'data_docs', """
array
Sets the data corresponding the length of each
error bar. Values are plotted relative to the
underlying data.
arrayminus
Sets the data corresponding the length of each
error bar in the bottom (left) direction for
vertical (horizontal) bars Values are plotted
relative to the underlying data.
arrayminussrc
Sets the source reference on plot.ly for
arrayminus .
arraysrc
Sets the source reference on plot.ly for array
.
color
Sets the stoke color of the error bars.
symmetric
Determines whether or not the error bars have
the same length in both direction (top/bottom
for vertical bars, left/right for horizontal
bars.
thickness
Sets the thickness (in px) of the error bars.
traceref
tracerefminus
type
Determines the rule used to generate the error
bars. If *constant`, the bar lengths are of a
constant value. Set this constant in `value`.
If "percent", the bar lengths correspond to a
percentage of underlying data. Set this
percentage in `value`. If "sqrt", the bar
lengths correspond to the sqaure of the
underlying data. If "array", the bar lengths
are set with data set `array`.
value
Sets the value of either the percentage (if
`type` is set to "percent") or the constant (if
`type` is set to "constant") corresponding to
the lengths of the error bars.
valueminus
Sets the value of either the percentage (if
`type` is set to "percent") or the constant (if
`type` is set to "constant") corresponding to
the lengths of the error bars in the bottom
(left) direction for vertical (horizontal) bars
visible
Determines whether or not this set of error
bars is visible.
width
Sets the width (in px) of the cross-bar at both
ends of the error bars.
"""
),
**kwargs
)
| 40.819444
| 75
| 0.544403
|
import _plotly_utils.basevalidators
class ErrorYValidator(_plotly_utils.basevalidators.CompoundValidator):
def __init__(self, plotly_name='error_y', parent_name='bar', **kwargs):
super(ErrorYValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop('data_class_str', 'ErrorY'),
data_docs=kwargs.pop(
'data_docs', """
array
Sets the data corresponding the length of each
error bar. Values are plotted relative to the
underlying data.
arrayminus
Sets the data corresponding the length of each
error bar in the bottom (left) direction for
vertical (horizontal) bars Values are plotted
relative to the underlying data.
arrayminussrc
Sets the source reference on plot.ly for
arrayminus .
arraysrc
Sets the source reference on plot.ly for array
.
color
Sets the stoke color of the error bars.
symmetric
Determines whether or not the error bars have
the same length in both direction (top/bottom
for vertical bars, left/right for horizontal
bars.
thickness
Sets the thickness (in px) of the error bars.
traceref
tracerefminus
type
Determines the rule used to generate the error
bars. If *constant`, the bar lengths are of a
constant value. Set this constant in `value`.
If "percent", the bar lengths correspond to a
percentage of underlying data. Set this
percentage in `value`. If "sqrt", the bar
lengths correspond to the sqaure of the
underlying data. If "array", the bar lengths
are set with data set `array`.
value
Sets the value of either the percentage (if
`type` is set to "percent") or the constant (if
`type` is set to "constant") corresponding to
the lengths of the error bars.
valueminus
Sets the value of either the percentage (if
`type` is set to "percent") or the constant (if
`type` is set to "constant") corresponding to
the lengths of the error bars in the bottom
(left) direction for vertical (horizontal) bars
visible
Determines whether or not this set of error
bars is visible.
width
Sets the width (in px) of the cross-bar at both
ends of the error bars.
"""
),
**kwargs
)
| true
| true
|
f71868a4b8ebf3789e41169b56a1d66e8c56afee
| 3,995
|
py
|
Python
|
tools/evo-plot/evo/tools/settings_template.py
|
jiexuan/evaluation_tools
|
d8cab5cea2c859ef6067aaedc8cf11be102ad7f8
|
[
"MIT"
] | 12
|
2019-05-13T10:20:47.000Z
|
2022-02-16T03:40:47.000Z
|
tools/evo-plot/evo/tools/settings_template.py
|
michaelczhou/evaluation_tools
|
1ef3f6d65869990eb35b6e69106a77e0baf2c0b4
|
[
"MIT"
] | null | null | null |
tools/evo-plot/evo/tools/settings_template.py
|
michaelczhou/evaluation_tools
|
1ef3f6d65869990eb35b6e69106a77e0baf2c0b4
|
[
"MIT"
] | 7
|
2019-04-24T02:33:09.000Z
|
2021-01-13T08:33:38.000Z
|
"""
default package settings definition
author: Michael Grupp
This file is part of evo (github.com/MichaelGrupp/evo).
evo is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
evo is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with evo. If not, see <http://www.gnu.org/licenses/>.
"""
import pkgutil
def get_default_plot_backend():
backends = {"PyQt5": "Qt5Agg", "PyQt4": "Qt4Agg"}
for pkg in backends:
if pkgutil.find_loader(pkg) is not None:
return backends[pkg]
return "TkAgg"
# default settings with documentation
DEFAULT_SETTINGS_DICT_DOC = {
"plot_xyz_realistic": (
True,
"Equal axes ratio in 'xyz' plot mode for realistic trajectory plots."
),
"plot_backend": (
get_default_plot_backend(),
"matplotlib backend - default: 'Qt{4, 5}Agg' (if PyQt is installed) or 'TkAgg'."
),
"plot_hideref": (
False,
"Hide the reference trajectory in trajectory plots."
),
"plot_linewidth": (
1.5,
"Line width value supported by matplotlib."
),
"plot_usetex": (
False,
"Use the LaTeX renderer configured in plot_texsystem for plots.",
),
"plot_texsystem": (
"pdflatex",
"'xelatex', 'lualatex' or 'pdflatex', see: https://matplotlib.org/users/pgf.html",
),
"plot_fontfamily": (
"sans-serif",
"Font family string supported by matplotlib."
),
"plot_fontscale": (
1.0,
"Font scale value, see: https://seaborn.pydata.org/generated/seaborn.set.html"
),
"plot_split": (
False,
"Show / save each figure separately instead of a collection."
),
"plot_figsize": (
[6, 6],
"The default size of one (sub)plot figure (width, height)."
),
"plot_trajectory_cmap": (
"jet",
"matplotlib color map used for mapping values on a trajectory.",
),
"plot_multi_cmap": (
"none",
"Color map for coloring plots from multiple data sources.\n"
+ "'none' will use the default color palette, see plot_seaborn_palette."
),
"plot_invert_xaxis": (
False,
"Invert the x-axis of plots."
),
"plot_invert_yaxis": (
False,
"Invert the y-axis of plots."
),
"plot_seaborn_style": (
"darkgrid",
"Defines the plot background/grid.\n"
+ "Options: 'whitegrid', 'darkgrid', 'white' or 'dark'."
),
"plot_seaborn_palette": (
"deep",
"Default color palette of seaborn. Can also be a list of colors.\n"
+ "See: https://seaborn.pydata.org/generated/seaborn.color_palette.html"
),
"plot_export_format": (
"pdf",
"File format supported by matplotlib for exporting plots."
),
"table_export_format": (
"csv",
"Format for exporting tables, e.g. 'csv', 'excel', 'latex', 'json'...",
),
"table_export_data": (
"stats",
"Which data to export: 'info', 'stats' or 'error_array'.",
),
"table_export_transpose": (
True,
"Transpose tables for export."
),
"save_traj_in_zip": (
False,
"Store backup trajectories in result zip files (increases size)."
),
"logging_format": (
"%(message)s",
"Format string for the logging module (console only)."
),
"logfile_enabled": (
False,
"Whether to write a logfile to the home folder."
)
}
# without documentation
DEFAULT_SETTINGS_DICT = {k: v[0] for k, v in DEFAULT_SETTINGS_DICT_DOC.items()}
| 30.037594
| 90
| 0.617772
|
import pkgutil
def get_default_plot_backend():
backends = {"PyQt5": "Qt5Agg", "PyQt4": "Qt4Agg"}
for pkg in backends:
if pkgutil.find_loader(pkg) is not None:
return backends[pkg]
return "TkAgg"
DEFAULT_SETTINGS_DICT_DOC = {
"plot_xyz_realistic": (
True,
"Equal axes ratio in 'xyz' plot mode for realistic trajectory plots."
),
"plot_backend": (
get_default_plot_backend(),
"matplotlib backend - default: 'Qt{4, 5}Agg' (if PyQt is installed) or 'TkAgg'."
),
"plot_hideref": (
False,
"Hide the reference trajectory in trajectory plots."
),
"plot_linewidth": (
1.5,
"Line width value supported by matplotlib."
),
"plot_usetex": (
False,
"Use the LaTeX renderer configured in plot_texsystem for plots.",
),
"plot_texsystem": (
"pdflatex",
"'xelatex', 'lualatex' or 'pdflatex', see: https://matplotlib.org/users/pgf.html",
),
"plot_fontfamily": (
"sans-serif",
"Font family string supported by matplotlib."
),
"plot_fontscale": (
1.0,
"Font scale value, see: https://seaborn.pydata.org/generated/seaborn.set.html"
),
"plot_split": (
False,
"Show / save each figure separately instead of a collection."
),
"plot_figsize": (
[6, 6],
"The default size of one (sub)plot figure (width, height)."
),
"plot_trajectory_cmap": (
"jet",
"matplotlib color map used for mapping values on a trajectory.",
),
"plot_multi_cmap": (
"none",
"Color map for coloring plots from multiple data sources.\n"
+ "'none' will use the default color palette, see plot_seaborn_palette."
),
"plot_invert_xaxis": (
False,
"Invert the x-axis of plots."
),
"plot_invert_yaxis": (
False,
"Invert the y-axis of plots."
),
"plot_seaborn_style": (
"darkgrid",
"Defines the plot background/grid.\n"
+ "Options: 'whitegrid', 'darkgrid', 'white' or 'dark'."
),
"plot_seaborn_palette": (
"deep",
"Default color palette of seaborn. Can also be a list of colors.\n"
+ "See: https://seaborn.pydata.org/generated/seaborn.color_palette.html"
),
"plot_export_format": (
"pdf",
"File format supported by matplotlib for exporting plots."
),
"table_export_format": (
"csv",
"Format for exporting tables, e.g. 'csv', 'excel', 'latex', 'json'...",
),
"table_export_data": (
"stats",
"Which data to export: 'info', 'stats' or 'error_array'.",
),
"table_export_transpose": (
True,
"Transpose tables for export."
),
"save_traj_in_zip": (
False,
"Store backup trajectories in result zip files (increases size)."
),
"logging_format": (
"%(message)s",
"Format string for the logging module (console only)."
),
"logfile_enabled": (
False,
"Whether to write a logfile to the home folder."
)
}
DEFAULT_SETTINGS_DICT = {k: v[0] for k, v in DEFAULT_SETTINGS_DICT_DOC.items()}
| true
| true
|
f718694017ecb8bcebe5c98f1ca749ae5c0352a5
| 24,505
|
py
|
Python
|
lib-python/2.7/test/test_fractions.py
|
FloMom/pypy
|
d0cf0c5ed26a8b22a23b80779e5181a6bc9847c9
|
[
"Apache-2.0",
"OpenSSL"
] | null | null | null |
lib-python/2.7/test/test_fractions.py
|
FloMom/pypy
|
d0cf0c5ed26a8b22a23b80779e5181a6bc9847c9
|
[
"Apache-2.0",
"OpenSSL"
] | null | null | null |
lib-python/2.7/test/test_fractions.py
|
FloMom/pypy
|
d0cf0c5ed26a8b22a23b80779e5181a6bc9847c9
|
[
"Apache-2.0",
"OpenSSL"
] | 1
|
2021-10-10T13:53:32.000Z
|
2021-10-10T13:53:32.000Z
|
"""Tests for Lib/fractions.py."""
from decimal import Decimal
from test.test_support import run_unittest
import math
import numbers
import operator
import fractions
import unittest
from copy import copy, deepcopy
from cPickle import dumps, loads
F = fractions.Fraction
gcd = fractions.gcd
# decorator for skipping tests on non-IEEE 754 platforms
requires_IEEE_754 = unittest.skipUnless(
float.__getformat__("double").startswith("IEEE"),
"test requires IEEE 754 doubles")
class DummyFloat(object):
"""Dummy float class for testing comparisons with Fractions"""
def __init__(self, value):
if not isinstance(value, float):
raise TypeError("DummyFloat can only be initialized from float")
self.value = value
def _richcmp(self, other, op):
if isinstance(other, numbers.Rational):
return op(F.from_float(self.value), other)
elif isinstance(other, DummyFloat):
return op(self.value, other.value)
else:
return NotImplemented
def __eq__(self, other): return self._richcmp(other, operator.eq)
def __le__(self, other): return self._richcmp(other, operator.le)
def __lt__(self, other): return self._richcmp(other, operator.lt)
def __ge__(self, other): return self._richcmp(other, operator.ge)
def __gt__(self, other): return self._richcmp(other, operator.gt)
# shouldn't be calling __float__ at all when doing comparisons
def __float__(self):
assert False, "__float__ should not be invoked for comparisons"
# same goes for subtraction
def __sub__(self, other):
assert False, "__sub__ should not be invoked for comparisons"
__rsub__ = __sub__
# Silence Py3k warning
__hash__ = None
class DummyRational(object):
"""Test comparison of Fraction with a naive rational implementation."""
def __init__(self, num, den):
g = gcd(num, den)
self.num = num // g
self.den = den // g
def __eq__(self, other):
if isinstance(other, fractions.Fraction):
return (self.num == other._numerator and
self.den == other._denominator)
else:
return NotImplemented
def __lt__(self, other):
return(self.num * other._denominator < self.den * other._numerator)
def __gt__(self, other):
return(self.num * other._denominator > self.den * other._numerator)
def __le__(self, other):
return(self.num * other._denominator <= self.den * other._numerator)
def __ge__(self, other):
return(self.num * other._denominator >= self.den * other._numerator)
# this class is for testing comparisons; conversion to float
# should never be used for a comparison, since it loses accuracy
def __float__(self):
assert False, "__float__ should not be invoked"
# Silence Py3k warning
__hash__ = None
class DummyFraction(fractions.Fraction):
"""Dummy Fraction subclass for copy and deepcopy testing."""
class GcdTest(unittest.TestCase):
def testMisc(self):
self.assertEqual(0, gcd(0, 0))
self.assertEqual(1, gcd(1, 0))
self.assertEqual(-1, gcd(-1, 0))
self.assertEqual(1, gcd(0, 1))
self.assertEqual(-1, gcd(0, -1))
self.assertEqual(1, gcd(7, 1))
self.assertEqual(-1, gcd(7, -1))
self.assertEqual(1, gcd(-23, 15))
self.assertEqual(12, gcd(120, 84))
self.assertEqual(-12, gcd(84, -120))
def _components(r):
return (r.numerator, r.denominator)
class FractionTest(unittest.TestCase):
def assertTypedEquals(self, expected, actual):
"""Asserts that both the types and values are the same."""
self.assertEqual(type(expected), type(actual))
self.assertEqual(expected, actual)
def assertRaisesMessage(self, exc_type, message,
callable, *args, **kwargs):
"""Asserts that callable(*args, **kwargs) raises exc_type(message)."""
try:
callable(*args, **kwargs)
except exc_type, e:
self.assertEqual(message, str(e))
else:
self.fail("%s not raised" % exc_type.__name__)
def testInit(self):
self.assertEqual((0, 1), _components(F()))
self.assertEqual((7, 1), _components(F(7)))
self.assertEqual((7, 3), _components(F(F(7, 3))))
self.assertEqual((-1, 1), _components(F(-1, 1)))
self.assertEqual((-1, 1), _components(F(1, -1)))
self.assertEqual((1, 1), _components(F(-2, -2)))
self.assertEqual((1, 2), _components(F(5, 10)))
self.assertEqual((7, 15), _components(F(7, 15)))
self.assertEqual((10**23, 1), _components(F(10**23)))
self.assertEqual((3, 77), _components(F(F(3, 7), 11)))
self.assertEqual((-9, 5), _components(F(2, F(-10, 9))))
self.assertEqual((2486, 2485), _components(F(F(22, 7), F(355, 113))))
self.assertRaisesMessage(ZeroDivisionError, "Fraction(12, 0)",
F, 12, 0)
self.assertRaises(TypeError, F, 1.5 + 3j)
self.assertRaises(TypeError, F, "3/2", 3)
self.assertRaises(TypeError, F, 3, 0j)
self.assertRaises(TypeError, F, 3, 1j)
@requires_IEEE_754
def testInitFromFloat(self):
self.assertEqual((5, 2), _components(F(2.5)))
self.assertEqual((0, 1), _components(F(-0.0)))
self.assertEqual((3602879701896397, 36028797018963968),
_components(F(0.1)))
self.assertRaises(TypeError, F, float('nan'))
self.assertRaises(TypeError, F, float('inf'))
self.assertRaises(TypeError, F, float('-inf'))
def testInitFromDecimal(self):
self.assertEqual((11, 10),
_components(F(Decimal('1.1'))))
self.assertEqual((7, 200),
_components(F(Decimal('3.5e-2'))))
self.assertEqual((0, 1),
_components(F(Decimal('.000e20'))))
self.assertRaises(TypeError, F, Decimal('nan'))
self.assertRaises(TypeError, F, Decimal('snan'))
self.assertRaises(TypeError, F, Decimal('inf'))
self.assertRaises(TypeError, F, Decimal('-inf'))
def testFromString(self):
self.assertEqual((5, 1), _components(F("5")))
self.assertEqual((3, 2), _components(F("3/2")))
self.assertEqual((3, 2), _components(F(" \n +3/2")))
self.assertEqual((-3, 2), _components(F("-3/2 ")))
self.assertEqual((13, 2), _components(F(" 013/02 \n ")))
self.assertEqual((13, 2), _components(F(u" 013/02 \n ")))
self.assertEqual((16, 5), _components(F(" 3.2 ")))
self.assertEqual((-16, 5), _components(F(u" -3.2 ")))
self.assertEqual((-3, 1), _components(F(u" -3. ")))
self.assertEqual((3, 5), _components(F(u" .6 ")))
self.assertEqual((1, 3125), _components(F("32.e-5")))
self.assertEqual((1000000, 1), _components(F("1E+06")))
self.assertEqual((-12300, 1), _components(F("-1.23e4")))
self.assertEqual((0, 1), _components(F(" .0e+0\t")))
self.assertEqual((0, 1), _components(F("-0.000e0")))
self.assertRaisesMessage(
ZeroDivisionError, "Fraction(3, 0)",
F, "3/0")
self.assertRaisesMessage(
ValueError, "Invalid literal for Fraction: '3/'",
F, "3/")
self.assertRaisesMessage(
ValueError, "Invalid literal for Fraction: '/2'",
F, "/2")
self.assertRaisesMessage(
ValueError, "Invalid literal for Fraction: '3 /2'",
F, "3 /2")
self.assertRaisesMessage(
# Denominators don't need a sign.
ValueError, "Invalid literal for Fraction: '3/+2'",
F, "3/+2")
self.assertRaisesMessage(
# Imitate float's parsing.
ValueError, "Invalid literal for Fraction: '+ 3/2'",
F, "+ 3/2")
self.assertRaisesMessage(
# Avoid treating '.' as a regex special character.
ValueError, "Invalid literal for Fraction: '3a2'",
F, "3a2")
self.assertRaisesMessage(
# Don't accept combinations of decimals and fractions.
ValueError, "Invalid literal for Fraction: '3/7.2'",
F, "3/7.2")
self.assertRaisesMessage(
# Don't accept combinations of decimals and fractions.
ValueError, "Invalid literal for Fraction: '3.2/7'",
F, "3.2/7")
self.assertRaisesMessage(
# Allow 3. and .3, but not .
ValueError, "Invalid literal for Fraction: '.'",
F, ".")
def testImmutable(self):
r = F(7, 3)
r.__init__(2, 15)
self.assertEqual((7, 3), _components(r))
self.assertRaises(AttributeError, setattr, r, 'numerator', 12)
self.assertRaises(AttributeError, setattr, r, 'denominator', 6)
self.assertEqual((7, 3), _components(r))
# But if you _really_ need to:
r._numerator = 4
r._denominator = 2
self.assertEqual((4, 2), _components(r))
# Which breaks some important operations:
self.assertNotEqual(F(4, 2), r)
def testFromFloat(self):
self.assertRaises(TypeError, F.from_float, 3+4j)
self.assertEqual((10, 1), _components(F.from_float(10)))
bigint = 1234567890123456789
self.assertEqual((bigint, 1), _components(F.from_float(bigint)))
self.assertEqual((0, 1), _components(F.from_float(-0.0)))
self.assertEqual((10, 1), _components(F.from_float(10.0)))
self.assertEqual((-5, 2), _components(F.from_float(-2.5)))
self.assertEqual((99999999999999991611392, 1),
_components(F.from_float(1e23)))
self.assertEqual(float(10**23), float(F.from_float(1e23)))
self.assertEqual((3602879701896397, 1125899906842624),
_components(F.from_float(3.2)))
self.assertEqual(3.2, float(F.from_float(3.2)))
inf = 1e1000
nan = inf - inf
self.assertRaisesMessage(
TypeError, "Cannot convert inf to Fraction.",
F.from_float, inf)
self.assertRaisesMessage(
TypeError, "Cannot convert -inf to Fraction.",
F.from_float, -inf)
self.assertRaisesMessage(
TypeError, "Cannot convert nan to Fraction.",
F.from_float, nan)
def testFromDecimal(self):
self.assertRaises(TypeError, F.from_decimal, 3+4j)
self.assertEqual(F(10, 1), F.from_decimal(10))
self.assertEqual(F(0), F.from_decimal(Decimal("-0")))
self.assertEqual(F(5, 10), F.from_decimal(Decimal("0.5")))
self.assertEqual(F(5, 1000), F.from_decimal(Decimal("5e-3")))
self.assertEqual(F(5000), F.from_decimal(Decimal("5e3")))
self.assertEqual(1 - F(1, 10**30),
F.from_decimal(Decimal("0." + "9" * 30)))
self.assertRaisesMessage(
TypeError, "Cannot convert Infinity to Fraction.",
F.from_decimal, Decimal("inf"))
self.assertRaisesMessage(
TypeError, "Cannot convert -Infinity to Fraction.",
F.from_decimal, Decimal("-inf"))
self.assertRaisesMessage(
TypeError, "Cannot convert NaN to Fraction.",
F.from_decimal, Decimal("nan"))
self.assertRaisesMessage(
TypeError, "Cannot convert sNaN to Fraction.",
F.from_decimal, Decimal("snan"))
def testLimitDenominator(self):
rpi = F('3.1415926535897932')
self.assertEqual(rpi.limit_denominator(10000), F(355, 113))
self.assertEqual(-rpi.limit_denominator(10000), F(-355, 113))
self.assertEqual(rpi.limit_denominator(113), F(355, 113))
self.assertEqual(rpi.limit_denominator(112), F(333, 106))
self.assertEqual(F(201, 200).limit_denominator(100), F(1))
self.assertEqual(F(201, 200).limit_denominator(101), F(102, 101))
self.assertEqual(F(0).limit_denominator(10000), F(0))
for i in (0, -1):
self.assertRaisesMessage(
ValueError, "max_denominator should be at least 1",
F(1).limit_denominator, i)
def testConversions(self):
self.assertTypedEquals(-1, math.trunc(F(-11, 10)))
self.assertTypedEquals(-1, int(F(-11, 10)))
self.assertTypedEquals(1, math.trunc(F(11, 10)))
self.assertEqual(False, bool(F(0, 1)))
self.assertEqual(True, bool(F(3, 2)))
self.assertTypedEquals(0.1, float(F(1, 10)))
# Check that __float__ isn't implemented by converting the
# numerator and denominator to float before dividing.
self.assertRaises(OverflowError, float, long('2'*400+'7'))
self.assertAlmostEqual(2.0/3,
float(F(long('2'*400+'7'), long('3'*400+'1'))))
self.assertTypedEquals(0.1+0j, complex(F(1,10)))
def testArithmetic(self):
self.assertEqual(F(1, 2), F(1, 10) + F(2, 5))
self.assertEqual(F(-3, 10), F(1, 10) - F(2, 5))
self.assertEqual(F(1, 25), F(1, 10) * F(2, 5))
self.assertEqual(F(1, 4), F(1, 10) / F(2, 5))
self.assertTypedEquals(2, F(9, 10) // F(2, 5))
self.assertTypedEquals(10**23, F(10**23, 1) // F(1))
self.assertEqual(F(2, 3), F(-7, 3) % F(3, 2))
self.assertEqual(F(8, 27), F(2, 3) ** F(3))
self.assertEqual(F(27, 8), F(2, 3) ** F(-3))
self.assertTypedEquals(2.0, F(4) ** F(1, 2))
self.assertEqual(F(1, 1), +F(1, 1))
# Will return 1j in 3.0:
self.assertRaises(ValueError, pow, F(-1), F(1, 2))
def testMixedArithmetic(self):
self.assertTypedEquals(F(11, 10), F(1, 10) + 1)
self.assertTypedEquals(1.1, F(1, 10) + 1.0)
self.assertTypedEquals(1.1 + 0j, F(1, 10) + (1.0 + 0j))
self.assertTypedEquals(F(11, 10), 1 + F(1, 10))
self.assertTypedEquals(1.1, 1.0 + F(1, 10))
self.assertTypedEquals(1.1 + 0j, (1.0 + 0j) + F(1, 10))
self.assertTypedEquals(F(-9, 10), F(1, 10) - 1)
self.assertTypedEquals(-0.9, F(1, 10) - 1.0)
self.assertTypedEquals(-0.9 + 0j, F(1, 10) - (1.0 + 0j))
self.assertTypedEquals(F(9, 10), 1 - F(1, 10))
self.assertTypedEquals(0.9, 1.0 - F(1, 10))
self.assertTypedEquals(0.9 + 0j, (1.0 + 0j) - F(1, 10))
self.assertTypedEquals(F(1, 10), F(1, 10) * 1)
self.assertTypedEquals(0.1, F(1, 10) * 1.0)
self.assertTypedEquals(0.1 + 0j, F(1, 10) * (1.0 + 0j))
self.assertTypedEquals(F(1, 10), 1 * F(1, 10))
self.assertTypedEquals(0.1, 1.0 * F(1, 10))
self.assertTypedEquals(0.1 + 0j, (1.0 + 0j) * F(1, 10))
self.assertTypedEquals(F(1, 10), F(1, 10) / 1)
self.assertTypedEquals(0.1, F(1, 10) / 1.0)
self.assertTypedEquals(0.1 + 0j, F(1, 10) / (1.0 + 0j))
self.assertTypedEquals(F(10, 1), 1 / F(1, 10))
self.assertTypedEquals(10.0, 1.0 / F(1, 10))
self.assertTypedEquals(10.0 + 0j, (1.0 + 0j) / F(1, 10))
self.assertTypedEquals(0, F(1, 10) // 1)
self.assertTypedEquals(0.0, F(1, 10) // 1.0)
self.assertTypedEquals(10, 1 // F(1, 10))
self.assertTypedEquals(10**23, 10**22 // F(1, 10))
self.assertTypedEquals(10.0, 1.0 // F(1, 10))
self.assertTypedEquals(F(1, 10), F(1, 10) % 1)
self.assertTypedEquals(0.1, F(1, 10) % 1.0)
self.assertTypedEquals(F(0, 1), 1 % F(1, 10))
self.assertTypedEquals(0.0, 1.0 % F(1, 10))
# No need for divmod since we don't override it.
# ** has more interesting conversion rules.
self.assertTypedEquals(F(100, 1), F(1, 10) ** -2)
self.assertTypedEquals(F(100, 1), F(10, 1) ** 2)
self.assertTypedEquals(0.1, F(1, 10) ** 1.0)
self.assertTypedEquals(0.1 + 0j, F(1, 10) ** (1.0 + 0j))
self.assertTypedEquals(4 , 2 ** F(2, 1))
# Will return 1j in 3.0:
self.assertRaises(ValueError, pow, (-1), F(1, 2))
self.assertTypedEquals(F(1, 4) , 2 ** F(-2, 1))
self.assertTypedEquals(2.0 , 4 ** F(1, 2))
self.assertTypedEquals(0.25, 2.0 ** F(-2, 1))
self.assertTypedEquals(1.0 + 0j, (1.0 + 0j) ** F(1, 10))
def testMixingWithDecimal(self):
# Decimal refuses mixed comparisons.
self.assertRaisesMessage(
TypeError,
"unsupported operand type(s) for +: 'Fraction' and 'Decimal'",
operator.add, F(3,11), Decimal('3.1415926'))
self.assertRaisesMessage(
TypeError,
"unsupported operand type(s) for +: 'Decimal' and 'Fraction'",
operator.add, Decimal('3.1415926'), F(3,11))
self.assertNotEqual(F(5, 2), Decimal('2.5'))
def testComparisons(self):
self.assertTrue(F(1, 2) < F(2, 3))
self.assertFalse(F(1, 2) < F(1, 2))
self.assertTrue(F(1, 2) <= F(2, 3))
self.assertTrue(F(1, 2) <= F(1, 2))
self.assertFalse(F(2, 3) <= F(1, 2))
self.assertTrue(F(1, 2) == F(1, 2))
self.assertFalse(F(1, 2) == F(1, 3))
self.assertFalse(F(1, 2) != F(1, 2))
self.assertTrue(F(1, 2) != F(1, 3))
def testComparisonsDummyRational(self):
self.assertTrue(F(1, 2) == DummyRational(1, 2))
self.assertTrue(DummyRational(1, 2) == F(1, 2))
self.assertFalse(F(1, 2) == DummyRational(3, 4))
self.assertFalse(DummyRational(3, 4) == F(1, 2))
self.assertTrue(F(1, 2) < DummyRational(3, 4))
self.assertFalse(F(1, 2) < DummyRational(1, 2))
self.assertFalse(F(1, 2) < DummyRational(1, 7))
self.assertFalse(F(1, 2) > DummyRational(3, 4))
self.assertFalse(F(1, 2) > DummyRational(1, 2))
self.assertTrue(F(1, 2) > DummyRational(1, 7))
self.assertTrue(F(1, 2) <= DummyRational(3, 4))
self.assertTrue(F(1, 2) <= DummyRational(1, 2))
self.assertFalse(F(1, 2) <= DummyRational(1, 7))
self.assertFalse(F(1, 2) >= DummyRational(3, 4))
self.assertTrue(F(1, 2) >= DummyRational(1, 2))
self.assertTrue(F(1, 2) >= DummyRational(1, 7))
self.assertTrue(DummyRational(1, 2) < F(3, 4))
self.assertFalse(DummyRational(1, 2) < F(1, 2))
self.assertFalse(DummyRational(1, 2) < F(1, 7))
self.assertFalse(DummyRational(1, 2) > F(3, 4))
self.assertFalse(DummyRational(1, 2) > F(1, 2))
self.assertTrue(DummyRational(1, 2) > F(1, 7))
self.assertTrue(DummyRational(1, 2) <= F(3, 4))
self.assertTrue(DummyRational(1, 2) <= F(1, 2))
self.assertFalse(DummyRational(1, 2) <= F(1, 7))
self.assertFalse(DummyRational(1, 2) >= F(3, 4))
self.assertTrue(DummyRational(1, 2) >= F(1, 2))
self.assertTrue(DummyRational(1, 2) >= F(1, 7))
def testComparisonsDummyFloat(self):
x = DummyFloat(1./3.)
y = F(1, 3)
self.assertTrue(x != y)
self.assertTrue(x < y or x > y)
self.assertFalse(x == y)
self.assertFalse(x <= y and x >= y)
self.assertTrue(y != x)
self.assertTrue(y < x or y > x)
self.assertFalse(y == x)
self.assertFalse(y <= x and y >= x)
def testMixedLess(self):
self.assertTrue(2 < F(5, 2))
self.assertFalse(2 < F(4, 2))
self.assertTrue(F(5, 2) < 3)
self.assertFalse(F(4, 2) < 2)
self.assertTrue(F(1, 2) < 0.6)
self.assertFalse(F(1, 2) < 0.4)
self.assertTrue(0.4 < F(1, 2))
self.assertFalse(0.5 < F(1, 2))
self.assertFalse(float('inf') < F(1, 2))
self.assertTrue(float('-inf') < F(0, 10))
self.assertFalse(float('nan') < F(-3, 7))
self.assertTrue(F(1, 2) < float('inf'))
self.assertFalse(F(17, 12) < float('-inf'))
self.assertFalse(F(144, -89) < float('nan'))
def testMixedLessEqual(self):
self.assertTrue(0.5 <= F(1, 2))
self.assertFalse(0.6 <= F(1, 2))
self.assertTrue(F(1, 2) <= 0.5)
self.assertFalse(F(1, 2) <= 0.4)
self.assertTrue(2 <= F(4, 2))
self.assertFalse(2 <= F(3, 2))
self.assertTrue(F(4, 2) <= 2)
self.assertFalse(F(5, 2) <= 2)
self.assertFalse(float('inf') <= F(1, 2))
self.assertTrue(float('-inf') <= F(0, 10))
self.assertFalse(float('nan') <= F(-3, 7))
self.assertTrue(F(1, 2) <= float('inf'))
self.assertFalse(F(17, 12) <= float('-inf'))
self.assertFalse(F(144, -89) <= float('nan'))
def testBigFloatComparisons(self):
# Because 10**23 can't be represented exactly as a float:
self.assertFalse(F(10**23) == float(10**23))
# The first test demonstrates why these are important.
self.assertFalse(1e23 < float(F(math.trunc(1e23) + 1)))
self.assertTrue(1e23 < F(math.trunc(1e23) + 1))
self.assertFalse(1e23 <= F(math.trunc(1e23) - 1))
self.assertTrue(1e23 > F(math.trunc(1e23) - 1))
self.assertFalse(1e23 >= F(math.trunc(1e23) + 1))
def testBigComplexComparisons(self):
self.assertFalse(F(10**23) == complex(10**23))
self.assertRaises(TypeError, operator.gt, F(10**23), complex(10**23))
self.assertRaises(TypeError, operator.le, F(10**23), complex(10**23))
x = F(3, 8)
z = complex(0.375, 0.0)
w = complex(0.375, 0.2)
self.assertTrue(x == z)
self.assertFalse(x != z)
self.assertFalse(x == w)
self.assertTrue(x != w)
for op in operator.lt, operator.le, operator.gt, operator.ge:
self.assertRaises(TypeError, op, x, z)
self.assertRaises(TypeError, op, z, x)
self.assertRaises(TypeError, op, x, w)
self.assertRaises(TypeError, op, w, x)
def testMixedEqual(self):
self.assertTrue(0.5 == F(1, 2))
self.assertFalse(0.6 == F(1, 2))
self.assertTrue(F(1, 2) == 0.5)
self.assertFalse(F(1, 2) == 0.4)
self.assertTrue(2 == F(4, 2))
self.assertFalse(2 == F(3, 2))
self.assertTrue(F(4, 2) == 2)
self.assertFalse(F(5, 2) == 2)
self.assertFalse(F(5, 2) == float('nan'))
self.assertFalse(float('nan') == F(3, 7))
self.assertFalse(F(5, 2) == float('inf'))
self.assertFalse(float('-inf') == F(2, 5))
def testStringification(self):
self.assertEqual("Fraction(7, 3)", repr(F(7, 3)))
self.assertEqual("Fraction(6283185307, 2000000000)",
repr(F('3.1415926535')))
self.assertEqual("Fraction(-1, 100000000000000000000)",
repr(F(1, -10**20)))
self.assertEqual("7/3", str(F(7, 3)))
self.assertEqual("7", str(F(7, 1)))
def testHash(self):
self.assertEqual(hash(2.5), hash(F(5, 2)))
self.assertEqual(hash(10**50), hash(F(10**50)))
self.assertNotEqual(hash(float(10**23)), hash(F(10**23)))
def testApproximatePi(self):
# Algorithm borrowed from
# http://docs.python.org/lib/decimal-recipes.html
three = F(3)
lasts, t, s, n, na, d, da = 0, three, 3, 1, 0, 0, 24
while abs(s - lasts) > F(1, 10**9):
lasts = s
n, na = n+na, na+8
d, da = d+da, da+32
t = (t * n) / d
s += t
self.assertAlmostEqual(math.pi, s)
def testApproximateCos1(self):
# Algorithm borrowed from
# http://docs.python.org/lib/decimal-recipes.html
x = F(1)
i, lasts, s, fact, num, sign = 0, 0, F(1), 1, 1, 1
while abs(s - lasts) > F(1, 10**9):
lasts = s
i += 2
fact *= i * (i-1)
num *= x * x
sign *= -1
s += num / fact * sign
self.assertAlmostEqual(math.cos(1), s)
def test_copy_deepcopy_pickle(self):
r = F(13, 7)
dr = DummyFraction(13, 7)
self.assertEqual(r, loads(dumps(r)))
self.assertEqual(id(r), id(copy(r)))
self.assertEqual(id(r), id(deepcopy(r)))
self.assertNotEqual(id(dr), id(copy(dr)))
self.assertNotEqual(id(dr), id(deepcopy(dr)))
self.assertTypedEquals(dr, copy(dr))
self.assertTypedEquals(dr, deepcopy(dr))
def test_slots(self):
# Issue 4998
r = F(13, 7)
self.assertRaises(AttributeError, setattr, r, 'a', 10)
def test_main():
run_unittest(FractionTest, GcdTest)
if __name__ == '__main__':
test_main()
| 40.504132
| 79
| 0.574128
|
"""Tests for Lib/fractions.py."""
from decimal import Decimal
from test.test_support import run_unittest
import math
import numbers
import operator
import fractions
import unittest
from copy import copy, deepcopy
from cPickle import dumps, loads
F = fractions.Fraction
gcd = fractions.gcd
requires_IEEE_754 = unittest.skipUnless(
float.__getformat__("double").startswith("IEEE"),
"test requires IEEE 754 doubles")
class DummyFloat(object):
"""Dummy float class for testing comparisons with Fractions"""
def __init__(self, value):
if not isinstance(value, float):
raise TypeError("DummyFloat can only be initialized from float")
self.value = value
def _richcmp(self, other, op):
if isinstance(other, numbers.Rational):
return op(F.from_float(self.value), other)
elif isinstance(other, DummyFloat):
return op(self.value, other.value)
else:
return NotImplemented
def __eq__(self, other): return self._richcmp(other, operator.eq)
def __le__(self, other): return self._richcmp(other, operator.le)
def __lt__(self, other): return self._richcmp(other, operator.lt)
def __ge__(self, other): return self._richcmp(other, operator.ge)
def __gt__(self, other): return self._richcmp(other, operator.gt)
def __float__(self):
assert False, "__float__ should not be invoked for comparisons"
# same goes for subtraction
def __sub__(self, other):
assert False, "__sub__ should not be invoked for comparisons"
__rsub__ = __sub__
# Silence Py3k warning
__hash__ = None
class DummyRational(object):
"""Test comparison of Fraction with a naive rational implementation."""
def __init__(self, num, den):
g = gcd(num, den)
self.num = num // g
self.den = den // g
def __eq__(self, other):
if isinstance(other, fractions.Fraction):
return (self.num == other._numerator and
self.den == other._denominator)
else:
return NotImplemented
def __lt__(self, other):
return(self.num * other._denominator < self.den * other._numerator)
def __gt__(self, other):
return(self.num * other._denominator > self.den * other._numerator)
def __le__(self, other):
return(self.num * other._denominator <= self.den * other._numerator)
def __ge__(self, other):
return(self.num * other._denominator >= self.den * other._numerator)
# this class is for testing comparisons; conversion to float
# should never be used for a comparison, since it loses accuracy
def __float__(self):
assert False, "__float__ should not be invoked"
# Silence Py3k warning
__hash__ = None
class DummyFraction(fractions.Fraction):
"""Dummy Fraction subclass for copy and deepcopy testing."""
class GcdTest(unittest.TestCase):
def testMisc(self):
self.assertEqual(0, gcd(0, 0))
self.assertEqual(1, gcd(1, 0))
self.assertEqual(-1, gcd(-1, 0))
self.assertEqual(1, gcd(0, 1))
self.assertEqual(-1, gcd(0, -1))
self.assertEqual(1, gcd(7, 1))
self.assertEqual(-1, gcd(7, -1))
self.assertEqual(1, gcd(-23, 15))
self.assertEqual(12, gcd(120, 84))
self.assertEqual(-12, gcd(84, -120))
def _components(r):
return (r.numerator, r.denominator)
class FractionTest(unittest.TestCase):
def assertTypedEquals(self, expected, actual):
"""Asserts that both the types and values are the same."""
self.assertEqual(type(expected), type(actual))
self.assertEqual(expected, actual)
def assertRaisesMessage(self, exc_type, message,
callable, *args, **kwargs):
"""Asserts that callable(*args, **kwargs) raises exc_type(message)."""
try:
callable(*args, **kwargs)
except exc_type, e:
self.assertEqual(message, str(e))
else:
self.fail("%s not raised" % exc_type.__name__)
def testInit(self):
self.assertEqual((0, 1), _components(F()))
self.assertEqual((7, 1), _components(F(7)))
self.assertEqual((7, 3), _components(F(F(7, 3))))
self.assertEqual((-1, 1), _components(F(-1, 1)))
self.assertEqual((-1, 1), _components(F(1, -1)))
self.assertEqual((1, 1), _components(F(-2, -2)))
self.assertEqual((1, 2), _components(F(5, 10)))
self.assertEqual((7, 15), _components(F(7, 15)))
self.assertEqual((10**23, 1), _components(F(10**23)))
self.assertEqual((3, 77), _components(F(F(3, 7), 11)))
self.assertEqual((-9, 5), _components(F(2, F(-10, 9))))
self.assertEqual((2486, 2485), _components(F(F(22, 7), F(355, 113))))
self.assertRaisesMessage(ZeroDivisionError, "Fraction(12, 0)",
F, 12, 0)
self.assertRaises(TypeError, F, 1.5 + 3j)
self.assertRaises(TypeError, F, "3/2", 3)
self.assertRaises(TypeError, F, 3, 0j)
self.assertRaises(TypeError, F, 3, 1j)
@requires_IEEE_754
def testInitFromFloat(self):
self.assertEqual((5, 2), _components(F(2.5)))
self.assertEqual((0, 1), _components(F(-0.0)))
self.assertEqual((3602879701896397, 36028797018963968),
_components(F(0.1)))
self.assertRaises(TypeError, F, float('nan'))
self.assertRaises(TypeError, F, float('inf'))
self.assertRaises(TypeError, F, float('-inf'))
def testInitFromDecimal(self):
self.assertEqual((11, 10),
_components(F(Decimal('1.1'))))
self.assertEqual((7, 200),
_components(F(Decimal('3.5e-2'))))
self.assertEqual((0, 1),
_components(F(Decimal('.000e20'))))
self.assertRaises(TypeError, F, Decimal('nan'))
self.assertRaises(TypeError, F, Decimal('snan'))
self.assertRaises(TypeError, F, Decimal('inf'))
self.assertRaises(TypeError, F, Decimal('-inf'))
def testFromString(self):
self.assertEqual((5, 1), _components(F("5")))
self.assertEqual((3, 2), _components(F("3/2")))
self.assertEqual((3, 2), _components(F(" \n +3/2")))
self.assertEqual((-3, 2), _components(F("-3/2 ")))
self.assertEqual((13, 2), _components(F(" 013/02 \n ")))
self.assertEqual((13, 2), _components(F(u" 013/02 \n ")))
self.assertEqual((16, 5), _components(F(" 3.2 ")))
self.assertEqual((-16, 5), _components(F(u" -3.2 ")))
self.assertEqual((-3, 1), _components(F(u" -3. ")))
self.assertEqual((3, 5), _components(F(u" .6 ")))
self.assertEqual((1, 3125), _components(F("32.e-5")))
self.assertEqual((1000000, 1), _components(F("1E+06")))
self.assertEqual((-12300, 1), _components(F("-1.23e4")))
self.assertEqual((0, 1), _components(F(" .0e+0\t")))
self.assertEqual((0, 1), _components(F("-0.000e0")))
self.assertRaisesMessage(
ZeroDivisionError, "Fraction(3, 0)",
F, "3/0")
self.assertRaisesMessage(
ValueError, "Invalid literal for Fraction: '3/'",
F, "3/")
self.assertRaisesMessage(
ValueError, "Invalid literal for Fraction: '/2'",
F, "/2")
self.assertRaisesMessage(
ValueError, "Invalid literal for Fraction: '3 /2'",
F, "3 /2")
self.assertRaisesMessage(
# Denominators don't need a sign.
ValueError, "Invalid literal for Fraction: '3/+2'",
F, "3/+2")
self.assertRaisesMessage(
ValueError, "Invalid literal for Fraction: '+ 3/2'",
F, "+ 3/2")
self.assertRaisesMessage(
# Avoid treating '.' as a regex special character.
ValueError, "Invalid literal for Fraction: '3a2'",
F, "3a2")
self.assertRaisesMessage(
# Don't accept combinations of decimals and fractions.
ValueError, "Invalid literal for Fraction: '3/7.2'",
F, "3/7.2")
self.assertRaisesMessage(
ValueError, "Invalid literal for Fraction: '3.2/7'",
F, "3.2/7")
self.assertRaisesMessage(
# Allow 3. and .3, but not .
ValueError, "Invalid literal for Fraction: '.'",
F, ".")
def testImmutable(self):
r = F(7, 3)
r.__init__(2, 15)
self.assertEqual((7, 3), _components(r))
self.assertRaises(AttributeError, setattr, r, 'numerator', 12)
self.assertRaises(AttributeError, setattr, r, 'denominator', 6)
self.assertEqual((7, 3), _components(r))
# But if you _really_ need to:
r._numerator = 4
r._denominator = 2
self.assertEqual((4, 2), _components(r))
# Which breaks some important operations:
self.assertNotEqual(F(4, 2), r)
def testFromFloat(self):
self.assertRaises(TypeError, F.from_float, 3+4j)
self.assertEqual((10, 1), _components(F.from_float(10)))
bigint = 1234567890123456789
self.assertEqual((bigint, 1), _components(F.from_float(bigint)))
self.assertEqual((0, 1), _components(F.from_float(-0.0)))
self.assertEqual((10, 1), _components(F.from_float(10.0)))
self.assertEqual((-5, 2), _components(F.from_float(-2.5)))
self.assertEqual((99999999999999991611392, 1),
_components(F.from_float(1e23)))
self.assertEqual(float(10**23), float(F.from_float(1e23)))
self.assertEqual((3602879701896397, 1125899906842624),
_components(F.from_float(3.2)))
self.assertEqual(3.2, float(F.from_float(3.2)))
inf = 1e1000
nan = inf - inf
self.assertRaisesMessage(
TypeError, "Cannot convert inf to Fraction.",
F.from_float, inf)
self.assertRaisesMessage(
TypeError, "Cannot convert -inf to Fraction.",
F.from_float, -inf)
self.assertRaisesMessage(
TypeError, "Cannot convert nan to Fraction.",
F.from_float, nan)
def testFromDecimal(self):
self.assertRaises(TypeError, F.from_decimal, 3+4j)
self.assertEqual(F(10, 1), F.from_decimal(10))
self.assertEqual(F(0), F.from_decimal(Decimal("-0")))
self.assertEqual(F(5, 10), F.from_decimal(Decimal("0.5")))
self.assertEqual(F(5, 1000), F.from_decimal(Decimal("5e-3")))
self.assertEqual(F(5000), F.from_decimal(Decimal("5e3")))
self.assertEqual(1 - F(1, 10**30),
F.from_decimal(Decimal("0." + "9" * 30)))
self.assertRaisesMessage(
TypeError, "Cannot convert Infinity to Fraction.",
F.from_decimal, Decimal("inf"))
self.assertRaisesMessage(
TypeError, "Cannot convert -Infinity to Fraction.",
F.from_decimal, Decimal("-inf"))
self.assertRaisesMessage(
TypeError, "Cannot convert NaN to Fraction.",
F.from_decimal, Decimal("nan"))
self.assertRaisesMessage(
TypeError, "Cannot convert sNaN to Fraction.",
F.from_decimal, Decimal("snan"))
def testLimitDenominator(self):
rpi = F('3.1415926535897932')
self.assertEqual(rpi.limit_denominator(10000), F(355, 113))
self.assertEqual(-rpi.limit_denominator(10000), F(-355, 113))
self.assertEqual(rpi.limit_denominator(113), F(355, 113))
self.assertEqual(rpi.limit_denominator(112), F(333, 106))
self.assertEqual(F(201, 200).limit_denominator(100), F(1))
self.assertEqual(F(201, 200).limit_denominator(101), F(102, 101))
self.assertEqual(F(0).limit_denominator(10000), F(0))
for i in (0, -1):
self.assertRaisesMessage(
ValueError, "max_denominator should be at least 1",
F(1).limit_denominator, i)
def testConversions(self):
self.assertTypedEquals(-1, math.trunc(F(-11, 10)))
self.assertTypedEquals(-1, int(F(-11, 10)))
self.assertTypedEquals(1, math.trunc(F(11, 10)))
self.assertEqual(False, bool(F(0, 1)))
self.assertEqual(True, bool(F(3, 2)))
self.assertTypedEquals(0.1, float(F(1, 10)))
# Check that __float__ isn't implemented by converting the
self.assertRaises(OverflowError, float, long('2'*400+'7'))
self.assertAlmostEqual(2.0/3,
float(F(long('2'*400+'7'), long('3'*400+'1'))))
self.assertTypedEquals(0.1+0j, complex(F(1,10)))
def testArithmetic(self):
self.assertEqual(F(1, 2), F(1, 10) + F(2, 5))
self.assertEqual(F(-3, 10), F(1, 10) - F(2, 5))
self.assertEqual(F(1, 25), F(1, 10) * F(2, 5))
self.assertEqual(F(1, 4), F(1, 10) / F(2, 5))
self.assertTypedEquals(2, F(9, 10) // F(2, 5))
self.assertTypedEquals(10**23, F(10**23, 1) // F(1))
self.assertEqual(F(2, 3), F(-7, 3) % F(3, 2))
self.assertEqual(F(8, 27), F(2, 3) ** F(3))
self.assertEqual(F(27, 8), F(2, 3) ** F(-3))
self.assertTypedEquals(2.0, F(4) ** F(1, 2))
self.assertEqual(F(1, 1), +F(1, 1))
self.assertRaises(ValueError, pow, F(-1), F(1, 2))
def testMixedArithmetic(self):
self.assertTypedEquals(F(11, 10), F(1, 10) + 1)
self.assertTypedEquals(1.1, F(1, 10) + 1.0)
self.assertTypedEquals(1.1 + 0j, F(1, 10) + (1.0 + 0j))
self.assertTypedEquals(F(11, 10), 1 + F(1, 10))
self.assertTypedEquals(1.1, 1.0 + F(1, 10))
self.assertTypedEquals(1.1 + 0j, (1.0 + 0j) + F(1, 10))
self.assertTypedEquals(F(-9, 10), F(1, 10) - 1)
self.assertTypedEquals(-0.9, F(1, 10) - 1.0)
self.assertTypedEquals(-0.9 + 0j, F(1, 10) - (1.0 + 0j))
self.assertTypedEquals(F(9, 10), 1 - F(1, 10))
self.assertTypedEquals(0.9, 1.0 - F(1, 10))
self.assertTypedEquals(0.9 + 0j, (1.0 + 0j) - F(1, 10))
self.assertTypedEquals(F(1, 10), F(1, 10) * 1)
self.assertTypedEquals(0.1, F(1, 10) * 1.0)
self.assertTypedEquals(0.1 + 0j, F(1, 10) * (1.0 + 0j))
self.assertTypedEquals(F(1, 10), 1 * F(1, 10))
self.assertTypedEquals(0.1, 1.0 * F(1, 10))
self.assertTypedEquals(0.1 + 0j, (1.0 + 0j) * F(1, 10))
self.assertTypedEquals(F(1, 10), F(1, 10) / 1)
self.assertTypedEquals(0.1, F(1, 10) / 1.0)
self.assertTypedEquals(0.1 + 0j, F(1, 10) / (1.0 + 0j))
self.assertTypedEquals(F(10, 1), 1 / F(1, 10))
self.assertTypedEquals(10.0, 1.0 / F(1, 10))
self.assertTypedEquals(10.0 + 0j, (1.0 + 0j) / F(1, 10))
self.assertTypedEquals(0, F(1, 10) // 1)
self.assertTypedEquals(0.0, F(1, 10) // 1.0)
self.assertTypedEquals(10, 1 // F(1, 10))
self.assertTypedEquals(10**23, 10**22 // F(1, 10))
self.assertTypedEquals(10.0, 1.0 // F(1, 10))
self.assertTypedEquals(F(1, 10), F(1, 10) % 1)
self.assertTypedEquals(0.1, F(1, 10) % 1.0)
self.assertTypedEquals(F(0, 1), 1 % F(1, 10))
self.assertTypedEquals(0.0, 1.0 % F(1, 10))
# ** has more interesting conversion rules.
self.assertTypedEquals(F(100, 1), F(1, 10) ** -2)
self.assertTypedEquals(F(100, 1), F(10, 1) ** 2)
self.assertTypedEquals(0.1, F(1, 10) ** 1.0)
self.assertTypedEquals(0.1 + 0j, F(1, 10) ** (1.0 + 0j))
self.assertTypedEquals(4 , 2 ** F(2, 1))
# Will return 1j in 3.0:
self.assertRaises(ValueError, pow, (-1), F(1, 2))
self.assertTypedEquals(F(1, 4) , 2 ** F(-2, 1))
self.assertTypedEquals(2.0 , 4 ** F(1, 2))
self.assertTypedEquals(0.25, 2.0 ** F(-2, 1))
self.assertTypedEquals(1.0 + 0j, (1.0 + 0j) ** F(1, 10))
def testMixingWithDecimal(self):
# Decimal refuses mixed comparisons.
self.assertRaisesMessage(
TypeError,
"unsupported operand type(s) for +: 'Fraction' and 'Decimal'",
operator.add, F(3,11), Decimal('3.1415926'))
self.assertRaisesMessage(
TypeError,
"unsupported operand type(s) for +: 'Decimal' and 'Fraction'",
operator.add, Decimal('3.1415926'), F(3,11))
self.assertNotEqual(F(5, 2), Decimal('2.5'))
def testComparisons(self):
self.assertTrue(F(1, 2) < F(2, 3))
self.assertFalse(F(1, 2) < F(1, 2))
self.assertTrue(F(1, 2) <= F(2, 3))
self.assertTrue(F(1, 2) <= F(1, 2))
self.assertFalse(F(2, 3) <= F(1, 2))
self.assertTrue(F(1, 2) == F(1, 2))
self.assertFalse(F(1, 2) == F(1, 3))
self.assertFalse(F(1, 2) != F(1, 2))
self.assertTrue(F(1, 2) != F(1, 3))
def testComparisonsDummyRational(self):
self.assertTrue(F(1, 2) == DummyRational(1, 2))
self.assertTrue(DummyRational(1, 2) == F(1, 2))
self.assertFalse(F(1, 2) == DummyRational(3, 4))
self.assertFalse(DummyRational(3, 4) == F(1, 2))
self.assertTrue(F(1, 2) < DummyRational(3, 4))
self.assertFalse(F(1, 2) < DummyRational(1, 2))
self.assertFalse(F(1, 2) < DummyRational(1, 7))
self.assertFalse(F(1, 2) > DummyRational(3, 4))
self.assertFalse(F(1, 2) > DummyRational(1, 2))
self.assertTrue(F(1, 2) > DummyRational(1, 7))
self.assertTrue(F(1, 2) <= DummyRational(3, 4))
self.assertTrue(F(1, 2) <= DummyRational(1, 2))
self.assertFalse(F(1, 2) <= DummyRational(1, 7))
self.assertFalse(F(1, 2) >= DummyRational(3, 4))
self.assertTrue(F(1, 2) >= DummyRational(1, 2))
self.assertTrue(F(1, 2) >= DummyRational(1, 7))
self.assertTrue(DummyRational(1, 2) < F(3, 4))
self.assertFalse(DummyRational(1, 2) < F(1, 2))
self.assertFalse(DummyRational(1, 2) < F(1, 7))
self.assertFalse(DummyRational(1, 2) > F(3, 4))
self.assertFalse(DummyRational(1, 2) > F(1, 2))
self.assertTrue(DummyRational(1, 2) > F(1, 7))
self.assertTrue(DummyRational(1, 2) <= F(3, 4))
self.assertTrue(DummyRational(1, 2) <= F(1, 2))
self.assertFalse(DummyRational(1, 2) <= F(1, 7))
self.assertFalse(DummyRational(1, 2) >= F(3, 4))
self.assertTrue(DummyRational(1, 2) >= F(1, 2))
self.assertTrue(DummyRational(1, 2) >= F(1, 7))
def testComparisonsDummyFloat(self):
x = DummyFloat(1./3.)
y = F(1, 3)
self.assertTrue(x != y)
self.assertTrue(x < y or x > y)
self.assertFalse(x == y)
self.assertFalse(x <= y and x >= y)
self.assertTrue(y != x)
self.assertTrue(y < x or y > x)
self.assertFalse(y == x)
self.assertFalse(y <= x and y >= x)
def testMixedLess(self):
self.assertTrue(2 < F(5, 2))
self.assertFalse(2 < F(4, 2))
self.assertTrue(F(5, 2) < 3)
self.assertFalse(F(4, 2) < 2)
self.assertTrue(F(1, 2) < 0.6)
self.assertFalse(F(1, 2) < 0.4)
self.assertTrue(0.4 < F(1, 2))
self.assertFalse(0.5 < F(1, 2))
self.assertFalse(float('inf') < F(1, 2))
self.assertTrue(float('-inf') < F(0, 10))
self.assertFalse(float('nan') < F(-3, 7))
self.assertTrue(F(1, 2) < float('inf'))
self.assertFalse(F(17, 12) < float('-inf'))
self.assertFalse(F(144, -89) < float('nan'))
def testMixedLessEqual(self):
self.assertTrue(0.5 <= F(1, 2))
self.assertFalse(0.6 <= F(1, 2))
self.assertTrue(F(1, 2) <= 0.5)
self.assertFalse(F(1, 2) <= 0.4)
self.assertTrue(2 <= F(4, 2))
self.assertFalse(2 <= F(3, 2))
self.assertTrue(F(4, 2) <= 2)
self.assertFalse(F(5, 2) <= 2)
self.assertFalse(float('inf') <= F(1, 2))
self.assertTrue(float('-inf') <= F(0, 10))
self.assertFalse(float('nan') <= F(-3, 7))
self.assertTrue(F(1, 2) <= float('inf'))
self.assertFalse(F(17, 12) <= float('-inf'))
self.assertFalse(F(144, -89) <= float('nan'))
def testBigFloatComparisons(self):
# Because 10**23 can't be represented exactly as a float:
self.assertFalse(F(10**23) == float(10**23))
self.assertFalse(1e23 < float(F(math.trunc(1e23) + 1)))
self.assertTrue(1e23 < F(math.trunc(1e23) + 1))
self.assertFalse(1e23 <= F(math.trunc(1e23) - 1))
self.assertTrue(1e23 > F(math.trunc(1e23) - 1))
self.assertFalse(1e23 >= F(math.trunc(1e23) + 1))
def testBigComplexComparisons(self):
self.assertFalse(F(10**23) == complex(10**23))
self.assertRaises(TypeError, operator.gt, F(10**23), complex(10**23))
self.assertRaises(TypeError, operator.le, F(10**23), complex(10**23))
x = F(3, 8)
z = complex(0.375, 0.0)
w = complex(0.375, 0.2)
self.assertTrue(x == z)
self.assertFalse(x != z)
self.assertFalse(x == w)
self.assertTrue(x != w)
for op in operator.lt, operator.le, operator.gt, operator.ge:
self.assertRaises(TypeError, op, x, z)
self.assertRaises(TypeError, op, z, x)
self.assertRaises(TypeError, op, x, w)
self.assertRaises(TypeError, op, w, x)
def testMixedEqual(self):
self.assertTrue(0.5 == F(1, 2))
self.assertFalse(0.6 == F(1, 2))
self.assertTrue(F(1, 2) == 0.5)
self.assertFalse(F(1, 2) == 0.4)
self.assertTrue(2 == F(4, 2))
self.assertFalse(2 == F(3, 2))
self.assertTrue(F(4, 2) == 2)
self.assertFalse(F(5, 2) == 2)
self.assertFalse(F(5, 2) == float('nan'))
self.assertFalse(float('nan') == F(3, 7))
self.assertFalse(F(5, 2) == float('inf'))
self.assertFalse(float('-inf') == F(2, 5))
def testStringification(self):
self.assertEqual("Fraction(7, 3)", repr(F(7, 3)))
self.assertEqual("Fraction(6283185307, 2000000000)",
repr(F('3.1415926535')))
self.assertEqual("Fraction(-1, 100000000000000000000)",
repr(F(1, -10**20)))
self.assertEqual("7/3", str(F(7, 3)))
self.assertEqual("7", str(F(7, 1)))
def testHash(self):
self.assertEqual(hash(2.5), hash(F(5, 2)))
self.assertEqual(hash(10**50), hash(F(10**50)))
self.assertNotEqual(hash(float(10**23)), hash(F(10**23)))
def testApproximatePi(self):
three = F(3)
lasts, t, s, n, na, d, da = 0, three, 3, 1, 0, 0, 24
while abs(s - lasts) > F(1, 10**9):
lasts = s
n, na = n+na, na+8
d, da = d+da, da+32
t = (t * n) / d
s += t
self.assertAlmostEqual(math.pi, s)
def testApproximateCos1(self):
x = F(1)
i, lasts, s, fact, num, sign = 0, 0, F(1), 1, 1, 1
while abs(s - lasts) > F(1, 10**9):
lasts = s
i += 2
fact *= i * (i-1)
num *= x * x
sign *= -1
s += num / fact * sign
self.assertAlmostEqual(math.cos(1), s)
def test_copy_deepcopy_pickle(self):
r = F(13, 7)
dr = DummyFraction(13, 7)
self.assertEqual(r, loads(dumps(r)))
self.assertEqual(id(r), id(copy(r)))
self.assertEqual(id(r), id(deepcopy(r)))
self.assertNotEqual(id(dr), id(copy(dr)))
self.assertNotEqual(id(dr), id(deepcopy(dr)))
self.assertTypedEquals(dr, copy(dr))
self.assertTypedEquals(dr, deepcopy(dr))
def test_slots(self):
r = F(13, 7)
self.assertRaises(AttributeError, setattr, r, 'a', 10)
def test_main():
run_unittest(FractionTest, GcdTest)
if __name__ == '__main__':
test_main()
| false
| true
|
f718698cc99b9998bc182475d5ca68afff243272
| 46,449
|
py
|
Python
|
external_apps/django-rosetta/rosetta/polib.py
|
spreeker/democracygame
|
525139955cb739c295051f317ab670049511bcf8
|
[
"BSD-3-Clause"
] | 2
|
2016-05-09T04:57:34.000Z
|
2017-03-03T14:22:24.000Z
|
external_apps/django-rosetta/rosetta/polib.py
|
spreeker/democracygame
|
525139955cb739c295051f317ab670049511bcf8
|
[
"BSD-3-Clause"
] | null | null | null |
external_apps/django-rosetta/rosetta/polib.py
|
spreeker/democracygame
|
525139955cb739c295051f317ab670049511bcf8
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# License: MIT (see LICENSE file provided)
# vim: set expandtab tabstop=4 shiftwidth=4 softtabstop=4:
"""
**polib** allows you to manipulate, create, modify gettext files (pot, po
and mo files). You can load existing files, iterate through it's entries,
add, modify entries, comments or metadata, etc... or create new po files
from scratch.
**polib** provides a simple and pythonic API, exporting only three
convenience functions (*pofile*, *mofile* and *detect_encoding*), and the
four core classes, *POFile*, *MOFile*, *POEntry* and *MOEntry* for creating
new files/entries.
**Basic example**:
>>> import polib
>>> # load an existing po file
>>> po = polib.pofile('tests/test_utf8.po')
>>> for entry in po:
... # do something with entry...
... pass
>>> # add an entry
>>> entry = polib.POEntry(msgid='Welcome', msgstr='Bienvenue')
>>> entry.occurrences = [('welcome.py', '12'), ('anotherfile.py', '34')]
>>> po.append(entry)
>>> # to save our modified po file:
>>> # po.save()
>>> # or you may want to compile the po file
>>> # po.save_as_mofile('tests/test_utf8.mo')
"""
__author__ = 'David JEAN LOUIS <izimobil@gmail.com>'
__version__ = '0.4.1'
__all__ = ['pofile', 'POFile', 'POEntry', 'mofile', 'MOFile', 'MOEntry',
'detect_encoding', 'escape', 'unescape']
import struct
import textwrap
import warnings
default_encoding = 'utf-8'
# function pofile() {{{
def pofile(fpath, **kwargs):
"""
Convenience function that parse the po/pot file *fpath* and return
a POFile instance.
**Keyword arguments**:
- *fpath*: string, full or relative path to the po/pot file to parse
- *wrapwidth*: integer, the wrap width, only useful when -w option was
passed to xgettext (optional, default to 78)
- *autodetect_encoding*: boolean, if set to False the function will
not try to detect the po file encoding (optional, default to True)
- *encoding*: string, an encoding, only relevant if autodetect_encoding
is set to False
**Example**:
>>> import polib
>>> po = polib.pofile('tests/test_weird_occurrences.po')
>>> po #doctest: +ELLIPSIS
<POFile instance at ...>
>>> import os, tempfile
>>> for fname in ['test_iso-8859-15.po', 'test_utf8.po']:
... orig_po = polib.pofile('tests/'+fname)
... tmpf = tempfile.NamedTemporaryFile().name
... orig_po.save(tmpf)
... try:
... new_po = polib.pofile(tmpf)
... for old, new in zip(orig_po, new_po):
... if old.msgid != new.msgid:
... old.msgid
... new.msgid
... if old.msgstr != new.msgstr:
... old.msgid
... new.msgid
... finally:
... os.unlink(tmpf)
"""
if kwargs.get('autodetect_encoding', True) == True:
enc = detect_encoding(fpath)
else:
enc = kwargs.get('encoding', default_encoding)
parser = _POFileParser(fpath)
instance = parser.parse()
instance.wrapwidth = kwargs.get('wrapwidth', 78)
instance.encoding = enc
return instance
# }}}
# function mofile() {{{
def mofile(fpath, **kwargs):
"""
Convenience function that parse the mo file *fpath* and return
a MOFile instance.
**Keyword arguments**:
- *fpath*: string, full or relative path to the mo file to parse
- *wrapwidth*: integer, the wrap width, only useful when -w option was
passed to xgettext to generate the po file that was used to format
the mo file (optional, default to 78)
- *autodetect_encoding*: boolean, if set to False the function will
not try to detect the po file encoding (optional, default to True)
- *encoding*: string, an encoding, only relevant if autodetect_encoding
is set to False
**Example**:
>>> import polib
>>> mo = polib.mofile('tests/test_utf8.mo')
>>> mo #doctest: +ELLIPSIS
<MOFile instance at ...>
>>> import os, tempfile
>>> for fname in ['test_iso-8859-15.mo', 'test_utf8.mo']:
... orig_mo = polib.mofile('tests/'+fname)
... tmpf = tempfile.NamedTemporaryFile().name
... orig_mo.save(tmpf)
... try:
... new_mo = polib.mofile(tmpf)
... for old, new in zip(orig_mo, new_mo):
... if old.msgid != new.msgid:
... old.msgstr
... new.msgstr
... finally:
... os.unlink(tmpf)
"""
if kwargs.get('autodetect_encoding', True) == True:
enc = detect_encoding(fpath)
else:
enc = kwargs.get('encoding', default_encoding)
parser = _MOFileParser(fpath)
instance = parser.parse()
instance.wrapwidth = kwargs.get('wrapwidth', 78)
instance.encoding = enc
return instance
# }}}
# function detect_encoding() {{{
def detect_encoding(fpath):
"""
Try to detect the encoding used by the file *fpath*. The function will
return polib default *encoding* if it's unable to detect it.
**Keyword argument**:
- *fpath*: string, full or relative path to the mo file to parse.
**Examples**:
>>> print(detect_encoding('tests/test_noencoding.po'))
utf-8
>>> print(detect_encoding('tests/test_utf8.po'))
UTF-8
>>> print(detect_encoding('tests/test_utf8.mo'))
UTF-8
>>> print(detect_encoding('tests/test_iso-8859-15.po'))
ISO_8859-15
>>> print(detect_encoding('tests/test_iso-8859-15.mo'))
ISO_8859-15
"""
import re
rx = re.compile(r'"?Content-Type:.+? charset=([\w_\-:\.]+)')
f = open(fpath)
for l in f:
match = rx.search(l)
if match:
f.close()
return match.group(1).strip()
f.close()
return default_encoding
# }}}
# function escape() {{{
def escape(st):
"""
Escape special chars and return the given string *st*.
**Examples**:
>>> escape('\\t and \\n and \\r and " and \\\\')
'\\\\t and \\\\n and \\\\r and \\\\" and \\\\\\\\'
"""
st = st.replace('\\', r'\\')
st = st.replace('\t', r'\t')
st = st.replace('\r', r'\r')
st = st.replace('\n', r'\n')
st = st.replace('\"', r'\"')
return st
# }}}
# function unescape() {{{
def unescape(st):
"""
Unescape special chars and return the given string *st*.
**Examples**:
>>> unescape('\\\\t and \\\\n and \\\\r and \\\\" and \\\\\\\\')
'\\t and \\n and \\r and " and \\\\'
"""
st = st.replace(r'\"', '"')
st = st.replace(r'\n', '\n')
st = st.replace(r'\r', '\r')
st = st.replace(r'\t', '\t')
st = st.replace(r'\\', '\\')
return st
# }}}
# class _BaseFile {{{
class _BaseFile(list):
"""
Common parent class for POFile and MOFile classes.
This class must **not** be instanciated directly.
"""
def __init__(self, fpath=None, wrapwidth=78, encoding=default_encoding):
"""
Constructor.
**Keyword arguments**:
- *fpath*: string, path to po or mo file
- *wrapwidth*: integer, the wrap width, only useful when -w option
was passed to xgettext to generate the po file that was used to
format the mo file, default to 78 (optional).
"""
list.__init__(self)
# the opened file handle
self.fpath = fpath
# the width at which lines should be wrapped
self.wrapwidth = wrapwidth
# the file encoding
self.encoding = encoding
# header
self.header = ''
# both po and mo files have metadata
self.metadata = {}
self.metadata_is_fuzzy = 0
def __str__(self):
"""String representation of the file."""
ret = []
entries = [self.metadata_as_entry()] + \
[e for e in self if not e.obsolete]
for entry in entries:
ret.append(entry.__str__(self.wrapwidth))
for entry in self.obsolete_entries():
ret.append(entry.__str__(self.wrapwidth))
return '\n'.join(ret)
def __repr__(self):
"""Return the official string representation of the object."""
return '<%s instance at %x>' % (self.__class__.__name__, id(self))
def metadata_as_entry(self):
"""Return the metadata as an entry"""
e = POEntry(msgid='')
mdata = self.ordered_metadata()
if mdata:
strs = []
for name, value in mdata:
# Strip whitespace off each line in a multi-line entry
value = '\n'.join([v.strip() for v in value.split('\n')])
strs.append('%s: %s' % (name, value))
e.msgstr = '\n'.join(strs) + '\n'
return e
def save(self, fpath=None, repr_method='__str__'):
"""
Save the po file to file *fpath* if no file handle exists for
the object. If there's already an open file and no fpath is
provided, then the existing file is rewritten with the modified
data.
**Keyword arguments**:
- *fpath*: string, full or relative path to the file.
- *repr_method*: string, the method to use for output.
"""
if self.fpath is None and fpath is None:
raise IOError('You must provide a file path to save() method')
contents = getattr(self, repr_method)()
if fpath is None:
fpath = self.fpath
mode = 'w'
if repr_method == 'to_binary':
mode += 'b'
fhandle = open(fpath, mode)
fhandle.write(contents)
fhandle.close()
def find(self, st, by='msgid'):
"""
Find entry which msgid (or property identified by the *by*
attribute) matches the string *st*.
**Keyword arguments**:
- *st*: string, the string to search for
- *by*: string, the comparison attribute
**Examples**:
>>> po = pofile('tests/test_utf8.po')
>>> entry = po.find('Thursday')
>>> entry.msgstr
'Jueves'
>>> entry = po.find('Some unexistant msgid')
>>> entry is None
True
>>> entry = po.find('Jueves', 'msgstr')
>>> entry.msgid
'Thursday'
"""
try:
return [e for e in self if getattr(e, by) == st][0]
except IndexError:
return None
def ordered_metadata(self):
"""
Convenience method that return the metadata ordered. The return
value is list of tuples (metadata name, metadata_value).
"""
# copy the dict first
metadata = self.metadata.copy()
data_order = [
'Project-Id-Version',
'Report-Msgid-Bugs-To',
'POT-Creation-Date',
'PO-Revision-Date',
'Last-Translator',
'Language-Team',
'MIME-Version',
'Content-Type',
'Content-Transfer-Encoding'
]
ordered_data = []
for data in data_order:
try:
value = metadata.pop(data)
ordered_data.append((data, value))
except KeyError:
pass
# the rest of the metadata won't be ordered there are no specs for this
keys = metadata.keys()
list(keys).sort()
for data in keys:
value = metadata[data]
ordered_data.append((data, value))
return ordered_data
def to_binary(self):
"""Return the mofile binary representation."""
import struct
import array
output = ''
offsets = []
ids = strs = ''
entries = self.translated_entries()
# the keys are sorted in the .mo file
def cmp(_self, other):
if _self.msgid > other.msgid:
return 1
elif _self.msgid < other.msgid:
return -1
else:
return 0
entries.sort(cmp)
# add metadata entry
mentry = self.metadata_as_entry()
mentry.msgstr = mentry.msgstr.replace('\\n', '').lstrip() + '\n'
entries = [mentry] + entries
entries_len = len(entries)
for e in entries:
# For each string, we need size and file offset. Each string is
# NUL terminated; the NUL does not count into the size.
msgid = e._decode(e.msgid)
msgstr = e._decode(e.msgstr)
offsets.append((len(ids), len(msgid), len(strs), len(msgstr)))
ids += msgid + '\0'
strs += msgstr + '\0'
# The header is 7 32-bit unsigned integers.
keystart = 7*4+16*entries_len
# and the values start after the keys
valuestart = keystart + len(ids)
koffsets = []
voffsets = []
# The string table first has the list of keys, then the list of values.
# Each entry has first the size of the string, then the file offset.
for o1, l1, o2, l2 in offsets:
koffsets += [l1, o1+keystart]
voffsets += [l2, o2+valuestart]
offsets = koffsets + voffsets
output = struct.pack("IIIIIII",
0x950412de, # Magic number
0, # Version
entries_len, # # of entries
7*4, # start of key index
7*4+entries_len*8, # start of value index
0, 0) # size and offset of hash table
output += array.array("I", offsets).tostring()
output += ids
output += strs
return output
# }}}
# class POFile {{{
class POFile(_BaseFile):
'''
Po (or Pot) file reader/writer.
POFile objects inherit the list objects methods.
**Example**:
>>> po = POFile()
>>> entry1 = POEntry(
... msgid="Some english text",
... msgstr="Un texte en anglais"
... )
>>> entry1.occurrences = [('testfile', 12),('another_file', 1)]
>>> entry1.comment = "Some useful comment"
>>> entry2 = POEntry(
... msgid="Peace in some languages",
... msgstr="Pace سلام שלום Hasîtî 和平"
... )
>>> entry2.occurrences = [('testfile', 15),('another_file', 5)]
>>> entry2.comment = "Another useful comment"
>>> entry3 = POEntry(
... msgid='Some entry with quotes " \\"',
... msgstr='Un message unicode avec des quotes " \\"'
... )
>>> entry3.comment = "Test string quoting"
>>> po.append(entry1)
>>> po.append(entry2)
>>> po.append(entry3)
>>> po.header = "Some Header"
>>> print(po)
# Some Header
msgid ""
msgstr ""
<BLANKLINE>
#. Some useful comment
#: testfile:12 another_file:1
msgid "Some english text"
msgstr "Un texte en anglais"
<BLANKLINE>
#. Another useful comment
#: testfile:15 another_file:5
msgid "Peace in some languages"
msgstr "Pace سلام שלום Hasîtî 和平"
<BLANKLINE>
#. Test string quoting
msgid "Some entry with quotes \\" \\""
msgstr "Un message unicode avec des quotes \\" \\""
<BLANKLINE>
'''
def __str__(self):
"""Return the string representation of the po file"""
ret, headers = '', self.header.split('\n')
for header in headers:
if header[:1] in [',', ':']:
ret += '#%s\n' % header
else:
ret += '# %s\n' % header
return ret + _BaseFile.__str__(self)
def save_as_mofile(self, fpath):
"""
Save the binary representation of the file to *fpath*.
**Keyword arguments**:
- *fpath*: string, full or relative path to the file.
"""
_BaseFile.save(self, fpath, 'to_binary')
def percent_translated(self):
"""
Convenience method that return the percentage of translated
messages.
**Example**:
>>> import polib
>>> po = polib.pofile('tests/test_pofile_helpers.po')
>>> po.percent_translated()
50
>>> po = POFile()
>>> po.percent_translated()
100
"""
total = len([e for e in self if not e.obsolete])
if total == 0:
return 100
translated = len(self.translated_entries())
return int((100.00 / float(total)) * translated)
def translated_entries(self):
"""
Convenience method that return a list of translated entries.
**Example**:
>>> import polib
>>> po = polib.pofile('tests/test_pofile_helpers.po')
>>> len(po.translated_entries())
6
"""
return [e for e in self if e.translated() and not e.obsolete]
def untranslated_entries(self):
"""
Convenience method that return a list of untranslated entries.
**Example**:
>>> import polib
>>> po = polib.pofile('tests/test_pofile_helpers.po')
>>> len(po.untranslated_entries())
6
"""
return [e for e in self if not e.translated() and not e.obsolete]
def fuzzy_entries(self):
"""
Convenience method that return the list of 'fuzzy' entries.
**Example**:
>>> import polib
>>> po = polib.pofile('tests/test_pofile_helpers.po')
>>> len(po.fuzzy_entries())
2
"""
return [e for e in self if 'fuzzy' in e.flags]
def obsolete_entries(self):
"""
Convenience method that return the list of obsolete entries.
**Example**:
>>> import polib
>>> po = polib.pofile('tests/test_pofile_helpers.po')
>>> len(po.obsolete_entries())
4
"""
return [e for e in self if e.obsolete]
def merge(self, refpot):
"""
XXX this could not work if encodings are different, needs thinking
and general refactoring of how polib handles encoding...
Convenience method that merge the current pofile with the pot file
provided. It behaves exactly as the gettext msgmerge utility:
- comments of this file will be preserved, but extracted comments
and occurrences will be discarded
- any translations or comments in the file will be discarded,
however dot comments and file positions will be preserved
**Keyword argument**:
- *refpot*: object POFile, the reference catalog.
**Example**:
>>> import polib
>>> refpot = polib.pofile('tests/test_merge.pot')
>>> po = polib.pofile('tests/test_merge_before.po')
>>> po.merge(refpot)
>>> expected_po = polib.pofile('tests/test_merge_after.po')
>>> str(po) == str(expected_po)
True
"""
for entry in refpot:
e = self.find(entry.msgid)
if e is None:
e = POEntry()
self.append(e)
e.merge(entry)
# ok, now we must "obsolete" entries that are not in the refpot
# anymore
for entry in self:
if refpot.find(entry.msgid) is None:
entry.obsolete = True
# }}}
# class MOFile {{{
class MOFile(_BaseFile):
'''
Mo file reader/writer.
MOFile objects inherit the list objects methods.
**Example**:
>>> mo = MOFile()
>>> entry1 = POEntry(
... msgid="Some english text",
... msgstr="Un texte en anglais"
... )
>>> entry2 = POEntry(
... msgid="I need my dirty cheese",
... msgstr="Je veux mon sale fromage"
... )
>>> entry3 = MOEntry(
... msgid='Some entry with quotes " \\"',
... msgstr='Un message unicode avec des quotes " \\"'
... )
>>> mo.append(entry1)
>>> mo.append(entry2)
>>> mo.append(entry3)
>>> print(mo)
msgid ""
msgstr ""
<BLANKLINE>
msgid "Some english text"
msgstr "Un texte en anglais"
<BLANKLINE>
msgid "I need my dirty cheese"
msgstr "Je veux mon sale fromage"
<BLANKLINE>
msgid "Some entry with quotes \\" \\""
msgstr "Un message unicode avec des quotes \\" \\""
<BLANKLINE>
'''
def __init__(self, fpath=None, wrapwidth=78):
"""
MOFile constructor.
See _BaseFile.__construct.
"""
_BaseFile.__init__(self, fpath, wrapwidth)
self.magic_number = None
self.version = 0
def save_as_pofile(self, fpath):
"""
Save the string representation of the file to *fpath*.
**Keyword argument**:
- *fpath*: string, full or relative path to the file.
"""
_BaseFile.save(self, fpath)
def save(self, fpath):
"""
Save the binary representation of the file to *fpath*.
**Keyword argument**:
- *fpath*: string, full or relative path to the file.
"""
_BaseFile.save(self, fpath, 'to_binary')
def percent_translated(self):
"""
Convenience method to keep the same interface with POFile instances.
"""
return 100
def translated_entries(self):
"""
Convenience method to keep the same interface with POFile instances.
"""
return self
def untranslated_entries(self):
"""
Convenience method to keep the same interface with POFile instances.
"""
return []
def fuzzy_entries(self):
"""
Convenience method to keep the same interface with POFile instances.
"""
return []
def obsolete_entries(self):
"""
Convenience method to keep the same interface with POFile instances.
"""
return []
# }}}
# class _BaseEntry {{{
class _BaseEntry(object):
"""
Base class for POEntry or MOEntry objects.
This class must *not* be instanciated directly.
"""
def __init__(self, *args, **kwargs):
"""Base Entry constructor."""
self.msgid = kwargs.get('msgid', '')
self.msgstr = kwargs.get('msgstr', '')
self.msgid_plural = kwargs.get('msgid_plural', '')
self.msgstr_plural = kwargs.get('msgstr_plural', {})
self.obsolete = kwargs.get('obsolete', False)
self.encoding = kwargs.get('encoding', default_encoding)
def __repr__(self):
"""Return the official string representation of the object."""
return '<%s instance at %x>' % (self.__class__.__name__, id(self))
def __str__(self, wrapwidth=78):
"""
Common string representation of the POEntry and MOEntry
objects.
"""
if self.obsolete:
delflag = '#~ '
else:
delflag = ''
# write the msgid
ret = []
ret += self._str_field("msgid", delflag, "", self.msgid)
# write the msgid_plural if any
if self.msgid_plural:
ret += self._str_field("msgid_plural", delflag, "", self.msgid_plural)
if self.msgstr_plural:
# write the msgstr_plural if any
msgstrs = self.msgstr_plural
keys = list(msgstrs)
keys.sort()
for index in keys:
msgstr = msgstrs[index]
plural_index = '[%s]' % index
ret += self._str_field("msgstr", delflag, plural_index, msgstr)
else:
# otherwise write the msgstr
ret += self._str_field("msgstr", delflag, "", self.msgstr)
ret.append('')
return '\n'.join(ret)
def _str_field(self, fieldname, delflag, plural_index, field):
field = self._decode(field)
lines = field.splitlines(True) # keep line breaks in strings
# potentially, we could do line-wrapping here, but textwrap.wrap
# treats whitespace too carelessly for us to use it.
if len(lines) > 1:
lines = ['']+lines # start with initial empty line
else:
lines = [field] # needed for the empty string case
ret = ['%s%s%s "%s"' % (delflag, fieldname, plural_index,
escape(lines.pop(0)))]
for mstr in lines:
ret.append('%s"%s"' % (delflag, escape(mstr)))
return ret
def _decode(self, st):
try:
if isinstance(st, unicode):
st = st.encode(self.encoding)
return st
except:
return st
# }}}
# class POEntry {{{
class POEntry(_BaseEntry):
"""
Represents a po file entry.
**Examples**:
>>> entry = POEntry(msgid='Welcome', msgstr='Bienvenue')
>>> entry.occurrences = [('welcome.py', 12), ('anotherfile.py', 34)]
>>> print(entry)
#: welcome.py:12 anotherfile.py:34
msgid "Welcome"
msgstr "Bienvenue"
<BLANKLINE>
>>> entry = POEntry()
>>> entry.occurrences = [('src/some-very-long-filename-that-should-not-be-wrapped-even-if-it-is-larger-than-the-wrap-limit.c', 32), ('src/eggs.c', 45)]
>>> entry.comment = 'A plural translation. This is a very very very long line please do not wrap, this is just for testing comment wrapping...'
>>> entry.tcomment = 'A plural translation. This is a very very very long line please do not wrap, this is just for testing comment wrapping...'
>>> entry.flags.append('c-format')
>>> entry.msgid = 'I have spam but no egg !'
>>> entry.msgid_plural = 'I have spam and %d eggs !'
>>> entry.msgstr_plural[0] = "J'ai du jambon mais aucun oeuf !"
>>> entry.msgstr_plural[1] = "J'ai du jambon et %d oeufs !"
>>> print(entry)
#. A plural translation. This is a very very very long line please do not
#. wrap, this is just for testing comment wrapping...
# A plural translation. This is a very very very long line please do not wrap,
# this is just for testing comment wrapping...
#: src/some-very-long-filename-that-should-not-be-wrapped-even-if-it-is-larger-than-the-wrap-limit.c:32
#: src/eggs.c:45
#, c-format
msgid "I have spam but no egg !"
msgid_plural "I have spam and %d eggs !"
msgstr[0] "J'ai du jambon mais aucun oeuf !"
msgstr[1] "J'ai du jambon et %d oeufs !"
<BLANKLINE>
"""
def __init__(self, *args, **kwargs):
"""POEntry constructor."""
_BaseEntry.__init__(self, *args, **kwargs)
self.comment = kwargs.get('comment', '')
self.tcomment = kwargs.get('tcomment', '')
self.occurrences = kwargs.get('occurrences', [])
self.flags = kwargs.get('flags', [])
def __str__(self, wrapwidth=78):
"""
Return the string representation of the entry.
"""
if self.obsolete:
return _BaseEntry.__str__(self)
ret = []
# comment first, if any (with text wrapping as xgettext does)
if self.comment != '':
comments = self._decode(self.comment).split('\n')
for comment in comments:
if wrapwidth > 0 and len(comment) > wrapwidth-3:
ret += textwrap.wrap(comment, wrapwidth,
initial_indent='#. ',
subsequent_indent='#. ',
break_long_words=False)
else:
ret.append('#. %s' % comment)
# translator comment, if any (with text wrapping as xgettext does)
if self.tcomment != '':
tcomments = self._decode(self.tcomment).split('\n')
for tcomment in tcomments:
if wrapwidth > 0 and len(tcomment) > wrapwidth-2:
ret += textwrap.wrap(tcomment, wrapwidth,
initial_indent='# ',
subsequent_indent='# ',
break_long_words=False)
else:
ret.append('# %s' % tcomment)
# occurrences (with text wrapping as xgettext does)
if self.occurrences:
filelist = []
for fpath, lineno in self.occurrences:
if lineno:
filelist.append('%s:%s' % (self._decode(fpath), lineno))
else:
filelist.append('%s' % (self._decode(fpath)))
filestr = ' '.join(filelist)
if wrapwidth > 0 and len(filestr)+3 > wrapwidth:
# XXX textwrap split words that contain hyphen, this is not
# what we want for filenames, so the dirty hack is to
# temporally replace hyphens with a char that a file cannot
# contain, like "*"
lines = textwrap.wrap(filestr.replace('-', '*'),
wrapwidth,
initial_indent='#: ',
subsequent_indent='#: ',
break_long_words=False)
# end of the replace hack
for line in lines:
ret.append(line.replace('*', '-'))
else:
ret.append('#: '+filestr)
# flags
if self.flags:
flags = []
for flag in self.flags:
flags.append(flag)
ret.append('#, %s' % ', '.join(flags))
ret.append(_BaseEntry.__str__(self))
return '\n'.join(ret)
def __cmp__(self, other):
'''
Called by comparison operations if rich comparison is not defined.
**Tests**:
>>> a = POEntry(msgid='a', occurrences=[('b.py', 1), ('b.py', 3)])
>>> b = POEntry(msgid='b', occurrences=[('b.py', 1), ('b.py', 3)])
>>> c1 = POEntry(msgid='c1', occurrences=[('a.py', 1), ('b.py', 1)])
>>> c2 = POEntry(msgid='c2', occurrences=[('a.py', 1), ('a.py', 3)])
>>> po = POFile()
>>> po.append(a)
>>> po.append(b)
>>> po.append(c1)
>>> po.append(c2)
>>> po.sort()
>>> print(po)
#
msgid ""
msgstr ""
<BLANKLINE>
#: a.py:1 a.py:3
msgid "c2"
msgstr ""
<BLANKLINE>
#: a.py:1 b.py:1
msgid "c1"
msgstr ""
<BLANKLINE>
#: b.py:1 b.py:3
msgid "a"
msgstr ""
<BLANKLINE>
#: b.py:1 b.py:3
msgid "b"
msgstr ""
<BLANKLINE>
'''
def compare_occurrences(a, b):
"""
Compare an entry occurrence with another one.
"""
if a[0] != b[0]:
return a[0] < b[0]
if a[1] != b[1]:
return a[1] < b[1]
return 0
# First: Obsolete test
if self.obsolete != other.obsolete:
if self.obsolete:
return -1
else:
return 1
# Work on a copy to protect original
occ1 = self.occurrences[:]
occ2 = other.occurrences[:]
# Sorting using compare method
occ1.sort(compare_occurrences)
occ2.sort(compare_occurrences)
# Comparing sorted occurrences
pos = 0
for entry1 in occ1:
try:
entry2 = occ2[pos]
except IndexError:
return 1
pos = pos + 1
if entry1[0] != entry2[0]:
if entry1[0] > entry2[0]:
return 1
else:
return -1
if entry1[1] != entry2[1]:
if entry1[1] > entry2[1]:
return 1
else:
return -1
# Finally: Compare message ID
if self.msgid > other.msgid: return 1
else: return -1
def translated(self):
"""
Return True if the entry has been translated or False.
"""
if self.obsolete or 'fuzzy' in self.flags:
return False
if self.msgstr != '':
return True
if self.msgstr_plural:
for pos in self.msgstr_plural:
if self.msgstr_plural[pos] == '':
return False
return True
return False
def merge(self, other):
"""
Merge the current entry with the given pot entry.
"""
self.msgid = other.msgid
self.occurrences = other.occurrences
self.comment = other.comment
self.flags = other.flags
self.msgid_plural = other.msgid_plural
if other.msgstr_plural:
for pos in other.msgstr_plural:
try:
# keep existing translation at pos if any
self.msgstr_plural[pos]
except KeyError:
self.msgstr_plural[pos] = ''
# }}}
# class MOEntry {{{
class MOEntry(_BaseEntry):
"""
Represents a mo file entry.
**Examples**:
>>> entry = MOEntry()
>>> entry.msgid = 'translate me !'
>>> entry.msgstr = 'traduisez moi !'
>>> print(entry)
msgid "translate me !"
msgstr "traduisez moi !"
<BLANKLINE>
"""
def __str__(self, wrapwidth=78):
"""
Return the string representation of the entry.
"""
return _BaseEntry.__str__(self, wrapwidth)
# }}}
# class _POFileParser {{{
class _POFileParser(object):
"""
A finite state machine to parse efficiently and correctly po
file format.
"""
def __init__(self, fpath):
"""
Constructor.
**Keyword argument**:
- *fpath*: string, path to the po file
"""
self.fhandle = open(fpath, 'r')
self.instance = POFile(fpath=fpath)
self.transitions = {}
self.current_entry = POEntry()
self.current_state = 'ST'
self.current_token = None
# two memo flags used in handlers
self.msgstr_index = 0
self.entry_obsolete = 0
# Configure the state machine, by adding transitions.
# Signification of symbols:
# * ST: Beginning of the file (start)
# * HE: Header
# * TC: a translation comment
# * GC: a generated comment
# * OC: a file/line occurence
# * FL: a flags line
# * MI: a msgid
# * MP: a msgid plural
# * MS: a msgstr
# * MX: a msgstr plural
# * MC: a msgid or msgstr continuation line
all_ = ['ST', 'HE', 'GC', 'OC', 'FL', 'TC', 'MS', 'MP', 'MX', 'MI']
self.add('TC', ['ST', 'HE'], 'HE')
self.add('TC', ['GC', 'OC', 'FL', 'TC', 'MS', 'MP', 'MX', 'MI'], 'TC')
self.add('GC', all_, 'GC')
self.add('OC', all_, 'OC')
self.add('FL', all_, 'FL')
self.add('MI', ['ST', 'HE', 'GC', 'OC', 'FL', 'TC', 'MS', 'MX'], 'MI')
self.add('MP', ['TC', 'GC', 'MI'], 'MP')
self.add('MS', ['MI', 'MP', 'TC'], 'MS')
self.add('MX', ['MI', 'MX', 'MP', 'TC'], 'MX')
self.add('MC', ['MI', 'MP', 'MS', 'MX'], 'MC')
def parse(self):
"""
Run the state machine, parse the file line by line and call process()
with the current matched symbol.
"""
i, lastlen = 1, 0
for line in self.fhandle:
line = line.strip()
if line == '':
i = i+1
continue
if line[:3] == '#~ ':
line = line[3:]
self.entry_obsolete = 1
else:
self.entry_obsolete = 0
self.current_token = line
if line[:2] == '#:':
# we are on a occurrences line
self.process('OC', i)
elif line[:7] == 'msgid "':
# we are on a msgid
self.process('MI', i)
elif line[:8] == 'msgstr "':
# we are on a msgstr
self.process('MS', i)
elif line[:1] == '"':
# we are on a continuation line or some metadata
self.process('MC', i)
elif line[:14] == 'msgid_plural "':
# we are on a msgid plural
self.process('MP', i)
elif line[:7] == 'msgstr[':
# we are on a msgstr plural
self.process('MX', i)
elif line[:3] == '#, ':
# we are on a flags line
self.process('FL', i)
elif line[:2] == '# ' or line == '#':
if line == '#': line = line + ' '
# we are on a translator comment line
self.process('TC', i)
elif line[:2] == '#.':
# we are on a generated comment line
self.process('GC', i)
i = i+1
if self.current_entry:
# since entries are added when another entry is found, we must add
# the last entry here (only if there are lines)
self.instance.append(self.current_entry)
# before returning the instance, check if there's metadata and if
# so extract it in a dict
firstentry = self.instance[0]
if firstentry.msgid == '': # metadata found
# remove the entry
firstentry = self.instance.pop(0)
self.instance.metadata_is_fuzzy = firstentry.flags
key = None
for msg in firstentry.msgstr.splitlines():
try:
key, val = msg.split(':', 1)
self.instance.metadata[key] = val.strip()
except:
if key is not None:
self.instance.metadata[key] += '\n'+ msg.strip()
# close opened file
self.fhandle.close()
return self.instance
def add(self, symbol, states, next_state):
"""
Add a transition to the state machine.
Keywords arguments:
symbol -- string, the matched token (two chars symbol)
states -- list, a list of states (two chars symbols)
next_state -- the next state the fsm will have after the action
"""
for state in states:
action = getattr(self, 'handle_%s' % next_state.lower())
self.transitions[(symbol, state)] = (action, next_state)
def process(self, symbol, linenum):
"""
Process the transition corresponding to the current state and the
symbol provided.
Keywords arguments:
symbol -- string, the matched token (two chars symbol)
linenum -- integer, the current line number of the parsed file
"""
try:
(action, state) = self.transitions[(symbol, self.current_state)]
if action():
self.current_state = state
except Exception, exc:
raise IOError('Syntax error in po file (line %s)' % linenum)
# state handlers
def handle_he(self):
"""Handle a header comment."""
if self.instance.header != '':
self.instance.header += '\n'
self.instance.header += self.current_token[2:]
return 1
def handle_tc(self):
"""Handle a translator comment."""
if self.current_state in ['MC', 'MS', 'MX']:
self.instance.append(self.current_entry)
self.current_entry = POEntry()
if self.current_entry.tcomment != '':
self.current_entry.tcomment += '\n'
self.current_entry.tcomment += self.current_token[2:]
return True
def handle_gc(self):
"""Handle a generated comment."""
if self.current_state in ['MC', 'MS', 'MX']:
self.instance.append(self.current_entry)
self.current_entry = POEntry()
if self.current_entry.comment != '':
self.current_entry.comment += '\n'
self.current_entry.comment += self.current_token[3:]
return True
def handle_oc(self):
"""Handle a file:num occurence."""
if self.current_state in ['MC', 'MS', 'MX']:
self.instance.append(self.current_entry)
self.current_entry = POEntry()
occurrences = self.current_token[3:].split()
for occurrence in occurrences:
if occurrence != '':
try:
fil, line = occurrence.split(':')
if not line.isdigit():
fil = fil + line
line = ''
self.current_entry.occurrences.append((fil, line))
except:
self.current_entry.occurrences.append((occurrence, ''))
return True
def handle_fl(self):
"""Handle a flags line."""
if self.current_state in ['MC', 'MS', 'MX']:
self.instance.append(self.current_entry)
self.current_entry = POEntry()
self.current_entry.flags += self.current_token[3:].split(', ')
return True
def handle_mi(self):
"""Handle a msgid."""
if self.current_state in ['MC', 'MS', 'MX']:
self.instance.append(self.current_entry)
self.current_entry = POEntry()
self.current_entry.obsolete = self.entry_obsolete
self.current_entry.msgid = unescape(self.current_token[7:-1])
return True
def handle_mp(self):
"""Handle a msgid plural."""
self.current_entry.msgid_plural = unescape(self.current_token[14:-1])
return True
def handle_ms(self):
"""Handle a msgstr."""
self.current_entry.msgstr = unescape(self.current_token[8:-1])
return True
def handle_mx(self):
"""Handle a msgstr plural."""
index, value = self.current_token[7], self.current_token[11:-1]
self.current_entry.msgstr_plural[index] = unescape(value)
self.msgstr_index = index
return True
def handle_mc(self):
"""Handle a msgid or msgstr continuation line."""
if self.current_state == 'MI':
self.current_entry.msgid += unescape(self.current_token[1:-1])
elif self.current_state == 'MP':
self.current_entry.msgid_plural += \
unescape(self.current_token[1:-1])
elif self.current_state == 'MS':
self.current_entry.msgstr += unescape(self.current_token[1:-1])
elif self.current_state == 'MX':
msgstr = self.current_entry.msgstr_plural[self.msgstr_index] +\
unescape(self.current_token[1:-1])
self.current_entry.msgstr_plural[self.msgstr_index] = msgstr
# don't change the current state
return False
# }}}
# class _MOFileParser {{{
class _MOFileParser(object):
"""
A class to parse binary mo files.
"""
BIG_ENDIAN = 0xde120495
LITTLE_ENDIAN = 0x950412de
def __init__(self, fpath):
"""_MOFileParser constructor."""
self.fhandle = open(fpath, 'rb')
self.instance = MOFile(fpath)
def parse_magicnumber(self):
"""
Parse the magic number and raise an exception if not valid.
"""
def parse(self):
"""
Build the instance with the file handle provided in the
constructor.
"""
magic_number = self._readbinary('<I', 4)
if magic_number == self.LITTLE_ENDIAN:
ii = '<II'
elif magic_number == self.BIG_ENDIAN:
ii = '>II'
else:
raise IOError('Invalid mo file, magic number is incorrect !')
self.instance.magic_number = magic_number
# parse the version number and the number of strings
self.instance.version, numofstrings = self._readbinary(ii, 8)
# original strings and translation strings hash table offset
msgids_hash_offset, msgstrs_hash_offset = self._readbinary(ii, 8)
# move to msgid hash table and read length and offset of msgids
self.fhandle.seek(msgids_hash_offset)
msgids_index = []
for i in range(numofstrings):
msgids_index.append(self._readbinary(ii, 8))
# move to msgstr hash table and read length and offset of msgstrs
self.fhandle.seek(msgstrs_hash_offset)
msgstrs_index = []
for i in range(numofstrings):
msgstrs_index.append(self._readbinary(ii, 8))
# build entries
for i in range(numofstrings):
self.fhandle.seek(msgids_index[i][1])
msgid = self.fhandle.read(msgids_index[i][0])
self.fhandle.seek(msgstrs_index[i][1])
msgstr = self.fhandle.read(msgstrs_index[i][0])
if i == 0: # metadata
raw_metadata, metadata = msgstr.split('\n'), {}
for line in raw_metadata:
tokens = line.split(':', 1)
if tokens[0] != '':
try:
metadata[tokens[0]] = tokens[1].strip()
except IndexError:
metadata[tokens[0]] = ''
self.instance.metadata = metadata
continue
entry = MOEntry(msgid=msgid, msgstr=msgstr)
self.instance.append(entry)
# close opened file
self.fhandle.close()
return self.instance
def _readbinary(self, fmt, numbytes):
"""
Private method that unpack n bytes of data using format <fmt>.
It returns a tuple or a mixed value if the tuple length is 1.
"""
bytes = self.fhandle.read(numbytes)
tup = struct.unpack(fmt, bytes)
if len(tup) == 1:
return tup[0]
return tup
# }}}
# __main__ {{{
if __name__ == '__main__':
"""
**Main function**::
- to **test** the module just run: *python polib.py [-v]*
- to **profile** the module: *python polib.py -p <some_pofile.po>*
"""
import sys
if len(sys.argv) > 2 and sys.argv[1] == '-p':
def test(f):
if f.endswith('po'):
p = pofile(f)
else:
p = mofile(f)
s = str(p)
import profile
profile.run('test("'+sys.argv[2]+'")')
else:
import doctest
doctest.testmod()
# }}}
| 33.929145
| 155
| 0.534543
|
"""
**polib** allows you to manipulate, create, modify gettext files (pot, po
and mo files). You can load existing files, iterate through it's entries,
add, modify entries, comments or metadata, etc... or create new po files
from scratch.
**polib** provides a simple and pythonic API, exporting only three
convenience functions (*pofile*, *mofile* and *detect_encoding*), and the
four core classes, *POFile*, *MOFile*, *POEntry* and *MOEntry* for creating
new files/entries.
**Basic example**:
>>> import polib
>>> # load an existing po file
>>> po = polib.pofile('tests/test_utf8.po')
>>> for entry in po:
... # do something with entry...
... pass
>>> # add an entry
>>> entry = polib.POEntry(msgid='Welcome', msgstr='Bienvenue')
>>> entry.occurrences = [('welcome.py', '12'), ('anotherfile.py', '34')]
>>> po.append(entry)
>>> # to save our modified po file:
>>> # po.save()
>>> # or you may want to compile the po file
>>> # po.save_as_mofile('tests/test_utf8.mo')
"""
__author__ = 'David JEAN LOUIS <izimobil@gmail.com>'
__version__ = '0.4.1'
__all__ = ['pofile', 'POFile', 'POEntry', 'mofile', 'MOFile', 'MOEntry',
'detect_encoding', 'escape', 'unescape']
import struct
import textwrap
import warnings
default_encoding = 'utf-8'
# function pofile() {{{
def pofile(fpath, **kwargs):
"""
Convenience function that parse the po/pot file *fpath* and return
a POFile instance.
**Keyword arguments**:
- *fpath*: string, full or relative path to the po/pot file to parse
- *wrapwidth*: integer, the wrap width, only useful when -w option was
passed to xgettext (optional, default to 78)
- *autodetect_encoding*: boolean, if set to False the function will
not try to detect the po file encoding (optional, default to True)
- *encoding*: string, an encoding, only relevant if autodetect_encoding
is set to False
**Example**:
>>> import polib
>>> po = polib.pofile('tests/test_weird_occurrences.po')
>>> po #doctest: +ELLIPSIS
<POFile instance at ...>
>>> import os, tempfile
>>> for fname in ['test_iso-8859-15.po', 'test_utf8.po']:
... orig_po = polib.pofile('tests/'+fname)
... tmpf = tempfile.NamedTemporaryFile().name
... orig_po.save(tmpf)
... try:
... new_po = polib.pofile(tmpf)
... for old, new in zip(orig_po, new_po):
... if old.msgid != new.msgid:
... old.msgid
... new.msgid
... if old.msgstr != new.msgstr:
... old.msgid
... new.msgid
... finally:
... os.unlink(tmpf)
"""
if kwargs.get('autodetect_encoding', True) == True:
enc = detect_encoding(fpath)
else:
enc = kwargs.get('encoding', default_encoding)
parser = _POFileParser(fpath)
instance = parser.parse()
instance.wrapwidth = kwargs.get('wrapwidth', 78)
instance.encoding = enc
return instance
# }}}
# function mofile() {{{
def mofile(fpath, **kwargs):
"""
Convenience function that parse the mo file *fpath* and return
a MOFile instance.
**Keyword arguments**:
- *fpath*: string, full or relative path to the mo file to parse
- *wrapwidth*: integer, the wrap width, only useful when -w option was
passed to xgettext to generate the po file that was used to format
the mo file (optional, default to 78)
- *autodetect_encoding*: boolean, if set to False the function will
not try to detect the po file encoding (optional, default to True)
- *encoding*: string, an encoding, only relevant if autodetect_encoding
is set to False
**Example**:
>>> import polib
>>> mo = polib.mofile('tests/test_utf8.mo')
>>> mo #doctest: +ELLIPSIS
<MOFile instance at ...>
>>> import os, tempfile
>>> for fname in ['test_iso-8859-15.mo', 'test_utf8.mo']:
... orig_mo = polib.mofile('tests/'+fname)
... tmpf = tempfile.NamedTemporaryFile().name
... orig_mo.save(tmpf)
... try:
... new_mo = polib.mofile(tmpf)
... for old, new in zip(orig_mo, new_mo):
... if old.msgid != new.msgid:
... old.msgstr
... new.msgstr
... finally:
... os.unlink(tmpf)
"""
if kwargs.get('autodetect_encoding', True) == True:
enc = detect_encoding(fpath)
else:
enc = kwargs.get('encoding', default_encoding)
parser = _MOFileParser(fpath)
instance = parser.parse()
instance.wrapwidth = kwargs.get('wrapwidth', 78)
instance.encoding = enc
return instance
# }}}
# function detect_encoding() {{{
def detect_encoding(fpath):
"""
Try to detect the encoding used by the file *fpath*. The function will
return polib default *encoding* if it's unable to detect it.
**Keyword argument**:
- *fpath*: string, full or relative path to the mo file to parse.
**Examples**:
>>> print(detect_encoding('tests/test_noencoding.po'))
utf-8
>>> print(detect_encoding('tests/test_utf8.po'))
UTF-8
>>> print(detect_encoding('tests/test_utf8.mo'))
UTF-8
>>> print(detect_encoding('tests/test_iso-8859-15.po'))
ISO_8859-15
>>> print(detect_encoding('tests/test_iso-8859-15.mo'))
ISO_8859-15
"""
import re
rx = re.compile(r'"?Content-Type:.+? charset=([\w_\-:\.]+)')
f = open(fpath)
for l in f:
match = rx.search(l)
if match:
f.close()
return match.group(1).strip()
f.close()
return default_encoding
# }}}
# function escape() {{{
def escape(st):
"""
Escape special chars and return the given string *st*.
**Examples**:
>>> escape('\\t and \\n and \\r and " and \\\\')
'\\\\t and \\\\n and \\\\r and \\\\" and \\\\\\\\'
"""
st = st.replace('\\', r'\\')
st = st.replace('\t', r'\t')
st = st.replace('\r', r'\r')
st = st.replace('\n', r'\n')
st = st.replace('\"', r'\"')
return st
# }}}
# function unescape() {{{
def unescape(st):
"""
Unescape special chars and return the given string *st*.
**Examples**:
>>> unescape('\\\\t and \\\\n and \\\\r and \\\\" and \\\\\\\\')
'\\t and \\n and \\r and " and \\\\'
"""
st = st.replace(r'\"', '"')
st = st.replace(r'\n', '\n')
st = st.replace(r'\r', '\r')
st = st.replace(r'\t', '\t')
st = st.replace(r'\\', '\\')
return st
# }}}
# class _BaseFile {{{
class _BaseFile(list):
"""
Common parent class for POFile and MOFile classes.
This class must **not** be instanciated directly.
"""
def __init__(self, fpath=None, wrapwidth=78, encoding=default_encoding):
"""
Constructor.
**Keyword arguments**:
- *fpath*: string, path to po or mo file
- *wrapwidth*: integer, the wrap width, only useful when -w option
was passed to xgettext to generate the po file that was used to
format the mo file, default to 78 (optional).
"""
list.__init__(self)
# the opened file handle
self.fpath = fpath
# the width at which lines should be wrapped
self.wrapwidth = wrapwidth
# the file encoding
self.encoding = encoding
# header
self.header = ''
# both po and mo files have metadata
self.metadata = {}
self.metadata_is_fuzzy = 0
def __str__(self):
"""String representation of the file."""
ret = []
entries = [self.metadata_as_entry()] + \
[e for e in self if not e.obsolete]
for entry in entries:
ret.append(entry.__str__(self.wrapwidth))
for entry in self.obsolete_entries():
ret.append(entry.__str__(self.wrapwidth))
return '\n'.join(ret)
def __repr__(self):
"""Return the official string representation of the object."""
return '<%s instance at %x>' % (self.__class__.__name__, id(self))
def metadata_as_entry(self):
"""Return the metadata as an entry"""
e = POEntry(msgid='')
mdata = self.ordered_metadata()
if mdata:
strs = []
for name, value in mdata:
# Strip whitespace off each line in a multi-line entry
value = '\n'.join([v.strip() for v in value.split('\n')])
strs.append('%s: %s' % (name, value))
e.msgstr = '\n'.join(strs) + '\n'
return e
def save(self, fpath=None, repr_method='__str__'):
"""
Save the po file to file *fpath* if no file handle exists for
the object. If there's already an open file and no fpath is
provided, then the existing file is rewritten with the modified
data.
**Keyword arguments**:
- *fpath*: string, full or relative path to the file.
- *repr_method*: string, the method to use for output.
"""
if self.fpath is None and fpath is None:
raise IOError('You must provide a file path to save() method')
contents = getattr(self, repr_method)()
if fpath is None:
fpath = self.fpath
mode = 'w'
if repr_method == 'to_binary':
mode += 'b'
fhandle = open(fpath, mode)
fhandle.write(contents)
fhandle.close()
def find(self, st, by='msgid'):
"""
Find entry which msgid (or property identified by the *by*
attribute) matches the string *st*.
**Keyword arguments**:
- *st*: string, the string to search for
- *by*: string, the comparison attribute
**Examples**:
>>> po = pofile('tests/test_utf8.po')
>>> entry = po.find('Thursday')
>>> entry.msgstr
'Jueves'
>>> entry = po.find('Some unexistant msgid')
>>> entry is None
True
>>> entry = po.find('Jueves', 'msgstr')
>>> entry.msgid
'Thursday'
"""
try:
return [e for e in self if getattr(e, by) == st][0]
except IndexError:
return None
def ordered_metadata(self):
"""
Convenience method that return the metadata ordered. The return
value is list of tuples (metadata name, metadata_value).
"""
# copy the dict first
metadata = self.metadata.copy()
data_order = [
'Project-Id-Version',
'Report-Msgid-Bugs-To',
'POT-Creation-Date',
'PO-Revision-Date',
'Last-Translator',
'Language-Team',
'MIME-Version',
'Content-Type',
'Content-Transfer-Encoding'
]
ordered_data = []
for data in data_order:
try:
value = metadata.pop(data)
ordered_data.append((data, value))
except KeyError:
pass
# the rest of the metadata won't be ordered there are no specs for this
keys = metadata.keys()
list(keys).sort()
for data in keys:
value = metadata[data]
ordered_data.append((data, value))
return ordered_data
def to_binary(self):
"""Return the mofile binary representation."""
import struct
import array
output = ''
offsets = []
ids = strs = ''
entries = self.translated_entries()
# the keys are sorted in the .mo file
def cmp(_self, other):
if _self.msgid > other.msgid:
return 1
elif _self.msgid < other.msgid:
return -1
else:
return 0
entries.sort(cmp)
# add metadata entry
mentry = self.metadata_as_entry()
mentry.msgstr = mentry.msgstr.replace('\\n', '').lstrip() + '\n'
entries = [mentry] + entries
entries_len = len(entries)
for e in entries:
# For each string, we need size and file offset. Each string is
# NUL terminated; the NUL does not count into the size.
msgid = e._decode(e.msgid)
msgstr = e._decode(e.msgstr)
offsets.append((len(ids), len(msgid), len(strs), len(msgstr)))
ids += msgid + '\0'
strs += msgstr + '\0'
# The header is 7 32-bit unsigned integers.
keystart = 7*4+16*entries_len
# and the values start after the keys
valuestart = keystart + len(ids)
koffsets = []
voffsets = []
# The string table first has the list of keys, then the list of values.
# Each entry has first the size of the string, then the file offset.
for o1, l1, o2, l2 in offsets:
koffsets += [l1, o1+keystart]
voffsets += [l2, o2+valuestart]
offsets = koffsets + voffsets
output = struct.pack("IIIIIII",
0x950412de, # Magic number
0, # Version
entries_len, # # of entries
7*4, # start of key index
7*4+entries_len*8, # start of value index
0, 0) # size and offset of hash table
output += array.array("I", offsets).tostring()
output += ids
output += strs
return output
# }}}
# class POFile {{{
class POFile(_BaseFile):
'''
Po (or Pot) file reader/writer.
POFile objects inherit the list objects methods.
**Example**:
>>> po = POFile()
>>> entry1 = POEntry(
... msgid="Some english text",
... msgstr="Un texte en anglais"
... )
>>> entry1.occurrences = [('testfile', 12),('another_file', 1)]
>>> entry1.comment = "Some useful comment"
>>> entry2 = POEntry(
... msgid="Peace in some languages",
... msgstr="Pace سلام שלום Hasîtî 和平"
... )
>>> entry2.occurrences = [('testfile', 15),('another_file', 5)]
>>> entry2.comment = "Another useful comment"
>>> entry3 = POEntry(
... msgid='Some entry with quotes " \\"',
... msgstr='Un message unicode avec des quotes " \\"'
... )
>>> entry3.comment = "Test string quoting"
>>> po.append(entry1)
>>> po.append(entry2)
>>> po.append(entry3)
>>> po.header = "Some Header"
>>> print(po)
# Some Header
msgid ""
msgstr ""
<BLANKLINE>
#. Some useful comment
#: testfile:12 another_file:1
msgid "Some english text"
msgstr "Un texte en anglais"
<BLANKLINE>
#. Another useful comment
#: testfile:15 another_file:5
msgid "Peace in some languages"
msgstr "Pace سلام שלום Hasîtî 和平"
<BLANKLINE>
#. Test string quoting
msgid "Some entry with quotes \\" \\""
msgstr "Un message unicode avec des quotes \\" \\""
<BLANKLINE>
'''
def __str__(self):
"""Return the string representation of the po file"""
ret, headers = '', self.header.split('\n')
for header in headers:
if header[:1] in [',', ':']:
ret += '#%s\n' % header
else:
ret += '# %s\n' % header
return ret + _BaseFile.__str__(self)
def save_as_mofile(self, fpath):
"""
Save the binary representation of the file to *fpath*.
**Keyword arguments**:
- *fpath*: string, full or relative path to the file.
"""
_BaseFile.save(self, fpath, 'to_binary')
def percent_translated(self):
"""
Convenience method that return the percentage of translated
messages.
**Example**:
>>> import polib
>>> po = polib.pofile('tests/test_pofile_helpers.po')
>>> po.percent_translated()
50
>>> po = POFile()
>>> po.percent_translated()
100
"""
total = len([e for e in self if not e.obsolete])
if total == 0:
return 100
translated = len(self.translated_entries())
return int((100.00 / float(total)) * translated)
def translated_entries(self):
"""
Convenience method that return a list of translated entries.
**Example**:
>>> import polib
>>> po = polib.pofile('tests/test_pofile_helpers.po')
>>> len(po.translated_entries())
6
"""
return [e for e in self if e.translated() and not e.obsolete]
def untranslated_entries(self):
"""
Convenience method that return a list of untranslated entries.
**Example**:
>>> import polib
>>> po = polib.pofile('tests/test_pofile_helpers.po')
>>> len(po.untranslated_entries())
6
"""
return [e for e in self if not e.translated() and not e.obsolete]
def fuzzy_entries(self):
"""
Convenience method that return the list of 'fuzzy' entries.
**Example**:
>>> import polib
>>> po = polib.pofile('tests/test_pofile_helpers.po')
>>> len(po.fuzzy_entries())
2
"""
return [e for e in self if 'fuzzy' in e.flags]
def obsolete_entries(self):
"""
Convenience method that return the list of obsolete entries.
**Example**:
>>> import polib
>>> po = polib.pofile('tests/test_pofile_helpers.po')
>>> len(po.obsolete_entries())
4
"""
return [e for e in self if e.obsolete]
def merge(self, refpot):
"""
XXX this could not work if encodings are different, needs thinking
and general refactoring of how polib handles encoding...
Convenience method that merge the current pofile with the pot file
provided. It behaves exactly as the gettext msgmerge utility:
- comments of this file will be preserved, but extracted comments
and occurrences will be discarded
- any translations or comments in the file will be discarded,
however dot comments and file positions will be preserved
**Keyword argument**:
- *refpot*: object POFile, the reference catalog.
**Example**:
>>> import polib
>>> refpot = polib.pofile('tests/test_merge.pot')
>>> po = polib.pofile('tests/test_merge_before.po')
>>> po.merge(refpot)
>>> expected_po = polib.pofile('tests/test_merge_after.po')
>>> str(po) == str(expected_po)
True
"""
for entry in refpot:
e = self.find(entry.msgid)
if e is None:
e = POEntry()
self.append(e)
e.merge(entry)
# ok, now we must "obsolete" entries that are not in the refpot
# anymore
for entry in self:
if refpot.find(entry.msgid) is None:
entry.obsolete = True
# }}}
# class MOFile {{{
class MOFile(_BaseFile):
'''
Mo file reader/writer.
MOFile objects inherit the list objects methods.
**Example**:
>>> mo = MOFile()
>>> entry1 = POEntry(
... msgid="Some english text",
... msgstr="Un texte en anglais"
... )
>>> entry2 = POEntry(
... msgid="I need my dirty cheese",
... msgstr="Je veux mon sale fromage"
... )
>>> entry3 = MOEntry(
... msgid='Some entry with quotes " \\"',
... msgstr='Un message unicode avec des quotes " \\"'
... )
>>> mo.append(entry1)
>>> mo.append(entry2)
>>> mo.append(entry3)
>>> print(mo)
msgid ""
msgstr ""
<BLANKLINE>
msgid "Some english text"
msgstr "Un texte en anglais"
<BLANKLINE>
msgid "I need my dirty cheese"
msgstr "Je veux mon sale fromage"
<BLANKLINE>
msgid "Some entry with quotes \\" \\""
msgstr "Un message unicode avec des quotes \\" \\""
<BLANKLINE>
'''
def __init__(self, fpath=None, wrapwidth=78):
"""
MOFile constructor.
See _BaseFile.__construct.
"""
_BaseFile.__init__(self, fpath, wrapwidth)
self.magic_number = None
self.version = 0
def save_as_pofile(self, fpath):
"""
Save the string representation of the file to *fpath*.
**Keyword argument**:
- *fpath*: string, full or relative path to the file.
"""
_BaseFile.save(self, fpath)
def save(self, fpath):
"""
Save the binary representation of the file to *fpath*.
**Keyword argument**:
- *fpath*: string, full or relative path to the file.
"""
_BaseFile.save(self, fpath, 'to_binary')
def percent_translated(self):
"""
Convenience method to keep the same interface with POFile instances.
"""
return 100
def translated_entries(self):
"""
Convenience method to keep the same interface with POFile instances.
"""
return self
def untranslated_entries(self):
"""
Convenience method to keep the same interface with POFile instances.
"""
return []
def fuzzy_entries(self):
"""
Convenience method to keep the same interface with POFile instances.
"""
return []
def obsolete_entries(self):
"""
Convenience method to keep the same interface with POFile instances.
"""
return []
# }}}
# class _BaseEntry {{{
class _BaseEntry(object):
"""
Base class for POEntry or MOEntry objects.
This class must *not* be instanciated directly.
"""
def __init__(self, *args, **kwargs):
"""Base Entry constructor."""
self.msgid = kwargs.get('msgid', '')
self.msgstr = kwargs.get('msgstr', '')
self.msgid_plural = kwargs.get('msgid_plural', '')
self.msgstr_plural = kwargs.get('msgstr_plural', {})
self.obsolete = kwargs.get('obsolete', False)
self.encoding = kwargs.get('encoding', default_encoding)
def __repr__(self):
"""Return the official string representation of the object."""
return '<%s instance at %x>' % (self.__class__.__name__, id(self))
def __str__(self, wrapwidth=78):
"""
Common string representation of the POEntry and MOEntry
objects.
"""
if self.obsolete:
delflag = '#~ '
else:
delflag = ''
# write the msgid
ret = []
ret += self._str_field("msgid", delflag, "", self.msgid)
# write the msgid_plural if any
if self.msgid_plural:
ret += self._str_field("msgid_plural", delflag, "", self.msgid_plural)
if self.msgstr_plural:
# write the msgstr_plural if any
msgstrs = self.msgstr_plural
keys = list(msgstrs)
keys.sort()
for index in keys:
msgstr = msgstrs[index]
plural_index = '[%s]' % index
ret += self._str_field("msgstr", delflag, plural_index, msgstr)
else:
# otherwise write the msgstr
ret += self._str_field("msgstr", delflag, "", self.msgstr)
ret.append('')
return '\n'.join(ret)
def _str_field(self, fieldname, delflag, plural_index, field):
field = self._decode(field)
lines = field.splitlines(True) # keep line breaks in strings
# potentially, we could do line-wrapping here, but textwrap.wrap
# treats whitespace too carelessly for us to use it.
if len(lines) > 1:
lines = ['']+lines # start with initial empty line
else:
lines = [field] # needed for the empty string case
ret = ['%s%s%s "%s"' % (delflag, fieldname, plural_index,
escape(lines.pop(0)))]
for mstr in lines:
ret.append('%s"%s"' % (delflag, escape(mstr)))
return ret
def _decode(self, st):
try:
if isinstance(st, unicode):
st = st.encode(self.encoding)
return st
except:
return st
# }}}
# class POEntry {{{
class POEntry(_BaseEntry):
"""
Represents a po file entry.
**Examples**:
>>> entry = POEntry(msgid='Welcome', msgstr='Bienvenue')
>>> entry.occurrences = [('welcome.py', 12), ('anotherfile.py', 34)]
>>> print(entry)
#: welcome.py:12 anotherfile.py:34
msgid "Welcome"
msgstr "Bienvenue"
<BLANKLINE>
>>> entry = POEntry()
>>> entry.occurrences = [('src/some-very-long-filename-that-should-not-be-wrapped-even-if-it-is-larger-than-the-wrap-limit.c', 32), ('src/eggs.c', 45)]
>>> entry.comment = 'A plural translation. This is a very very very long line please do not wrap, this is just for testing comment wrapping...'
>>> entry.tcomment = 'A plural translation. This is a very very very long line please do not wrap, this is just for testing comment wrapping...'
>>> entry.flags.append('c-format')
>>> entry.msgid = 'I have spam but no egg !'
>>> entry.msgid_plural = 'I have spam and %d eggs !'
>>> entry.msgstr_plural[0] = "J'ai du jambon mais aucun oeuf !"
>>> entry.msgstr_plural[1] = "J'ai du jambon et %d oeufs !"
>>> print(entry)
#. A plural translation. This is a very very very long line please do not
#. wrap, this is just for testing comment wrapping...
# A plural translation. This is a very very very long line please do not wrap,
# this is just for testing comment wrapping...
#: src/some-very-long-filename-that-should-not-be-wrapped-even-if-it-is-larger-than-the-wrap-limit.c:32
#: src/eggs.c:45
#, c-format
msgid "I have spam but no egg !"
msgid_plural "I have spam and %d eggs !"
msgstr[0] "J'ai du jambon mais aucun oeuf !"
msgstr[1] "J'ai du jambon et %d oeufs !"
<BLANKLINE>
"""
def __init__(self, *args, **kwargs):
"""POEntry constructor."""
_BaseEntry.__init__(self, *args, **kwargs)
self.comment = kwargs.get('comment', '')
self.tcomment = kwargs.get('tcomment', '')
self.occurrences = kwargs.get('occurrences', [])
self.flags = kwargs.get('flags', [])
def __str__(self, wrapwidth=78):
"""
Return the string representation of the entry.
"""
if self.obsolete:
return _BaseEntry.__str__(self)
ret = []
# comment first, if any (with text wrapping as xgettext does)
if self.comment != '':
comments = self._decode(self.comment).split('\n')
for comment in comments:
if wrapwidth > 0 and len(comment) > wrapwidth-3:
ret += textwrap.wrap(comment, wrapwidth,
initial_indent='#. ',
subsequent_indent='#. ',
break_long_words=False)
else:
ret.append('#. %s' % comment)
# translator comment, if any (with text wrapping as xgettext does)
if self.tcomment != '':
tcomments = self._decode(self.tcomment).split('\n')
for tcomment in tcomments:
if wrapwidth > 0 and len(tcomment) > wrapwidth-2:
ret += textwrap.wrap(tcomment, wrapwidth,
initial_indent='# ',
subsequent_indent='# ',
break_long_words=False)
else:
ret.append('# %s' % tcomment)
# occurrences (with text wrapping as xgettext does)
if self.occurrences:
filelist = []
for fpath, lineno in self.occurrences:
if lineno:
filelist.append('%s:%s' % (self._decode(fpath), lineno))
else:
filelist.append('%s' % (self._decode(fpath)))
filestr = ' '.join(filelist)
if wrapwidth > 0 and len(filestr)+3 > wrapwidth:
# XXX textwrap split words that contain hyphen, this is not
# what we want for filenames, so the dirty hack is to
# temporally replace hyphens with a char that a file cannot
# contain, like "*"
lines = textwrap.wrap(filestr.replace('-', '*'),
wrapwidth,
initial_indent='#: ',
subsequent_indent='#: ',
break_long_words=False)
# end of the replace hack
for line in lines:
ret.append(line.replace('*', '-'))
else:
ret.append('#: '+filestr)
# flags
if self.flags:
flags = []
for flag in self.flags:
flags.append(flag)
ret.append('#, %s' % ', '.join(flags))
ret.append(_BaseEntry.__str__(self))
return '\n'.join(ret)
def __cmp__(self, other):
'''
Called by comparison operations if rich comparison is not defined.
**Tests**:
>>> a = POEntry(msgid='a', occurrences=[('b.py', 1), ('b.py', 3)])
>>> b = POEntry(msgid='b', occurrences=[('b.py', 1), ('b.py', 3)])
>>> c1 = POEntry(msgid='c1', occurrences=[('a.py', 1), ('b.py', 1)])
>>> c2 = POEntry(msgid='c2', occurrences=[('a.py', 1), ('a.py', 3)])
>>> po = POFile()
>>> po.append(a)
>>> po.append(b)
>>> po.append(c1)
>>> po.append(c2)
>>> po.sort()
>>> print(po)
#
msgid ""
msgstr ""
<BLANKLINE>
#: a.py:1 a.py:3
msgid "c2"
msgstr ""
<BLANKLINE>
#: a.py:1 b.py:1
msgid "c1"
msgstr ""
<BLANKLINE>
#: b.py:1 b.py:3
msgid "a"
msgstr ""
<BLANKLINE>
#: b.py:1 b.py:3
msgid "b"
msgstr ""
<BLANKLINE>
'''
def compare_occurrences(a, b):
"""
Compare an entry occurrence with another one.
"""
if a[0] != b[0]:
return a[0] < b[0]
if a[1] != b[1]:
return a[1] < b[1]
return 0
# First: Obsolete test
if self.obsolete != other.obsolete:
if self.obsolete:
return -1
else:
return 1
# Work on a copy to protect original
occ1 = self.occurrences[:]
occ2 = other.occurrences[:]
# Sorting using compare method
occ1.sort(compare_occurrences)
occ2.sort(compare_occurrences)
# Comparing sorted occurrences
pos = 0
for entry1 in occ1:
try:
entry2 = occ2[pos]
except IndexError:
return 1
pos = pos + 1
if entry1[0] != entry2[0]:
if entry1[0] > entry2[0]:
return 1
else:
return -1
if entry1[1] != entry2[1]:
if entry1[1] > entry2[1]:
return 1
else:
return -1
# Finally: Compare message ID
if self.msgid > other.msgid: return 1
else: return -1
def translated(self):
"""
Return True if the entry has been translated or False.
"""
if self.obsolete or 'fuzzy' in self.flags:
return False
if self.msgstr != '':
return True
if self.msgstr_plural:
for pos in self.msgstr_plural:
if self.msgstr_plural[pos] == '':
return False
return True
return False
def merge(self, other):
"""
Merge the current entry with the given pot entry.
"""
self.msgid = other.msgid
self.occurrences = other.occurrences
self.comment = other.comment
self.flags = other.flags
self.msgid_plural = other.msgid_plural
if other.msgstr_plural:
for pos in other.msgstr_plural:
try:
# keep existing translation at pos if any
self.msgstr_plural[pos]
except KeyError:
self.msgstr_plural[pos] = ''
# }}}
# class MOEntry {{{
class MOEntry(_BaseEntry):
"""
Represents a mo file entry.
**Examples**:
>>> entry = MOEntry()
>>> entry.msgid = 'translate me !'
>>> entry.msgstr = 'traduisez moi !'
>>> print(entry)
msgid "translate me !"
msgstr "traduisez moi !"
<BLANKLINE>
"""
def __str__(self, wrapwidth=78):
"""
Return the string representation of the entry.
"""
return _BaseEntry.__str__(self, wrapwidth)
# }}}
# class _POFileParser {{{
class _POFileParser(object):
"""
A finite state machine to parse efficiently and correctly po
file format.
"""
def __init__(self, fpath):
"""
Constructor.
**Keyword argument**:
- *fpath*: string, path to the po file
"""
self.fhandle = open(fpath, 'r')
self.instance = POFile(fpath=fpath)
self.transitions = {}
self.current_entry = POEntry()
self.current_state = 'ST'
self.current_token = None
# two memo flags used in handlers
self.msgstr_index = 0
self.entry_obsolete = 0
# Configure the state machine, by adding transitions.
# Signification of symbols:
# * ST: Beginning of the file (start)
# * HE: Header
# * TC: a translation comment
# * GC: a generated comment
# * OC: a file/line occurence
# * FL: a flags line
# * MI: a msgid
# * MP: a msgid plural
# * MS: a msgstr
# * MX: a msgstr plural
# * MC: a msgid or msgstr continuation line
all_ = ['ST', 'HE', 'GC', 'OC', 'FL', 'TC', 'MS', 'MP', 'MX', 'MI']
self.add('TC', ['ST', 'HE'], 'HE')
self.add('TC', ['GC', 'OC', 'FL', 'TC', 'MS', 'MP', 'MX', 'MI'], 'TC')
self.add('GC', all_, 'GC')
self.add('OC', all_, 'OC')
self.add('FL', all_, 'FL')
self.add('MI', ['ST', 'HE', 'GC', 'OC', 'FL', 'TC', 'MS', 'MX'], 'MI')
self.add('MP', ['TC', 'GC', 'MI'], 'MP')
self.add('MS', ['MI', 'MP', 'TC'], 'MS')
self.add('MX', ['MI', 'MX', 'MP', 'TC'], 'MX')
self.add('MC', ['MI', 'MP', 'MS', 'MX'], 'MC')
def parse(self):
"""
Run the state machine, parse the file line by line and call process()
with the current matched symbol.
"""
i, lastlen = 1, 0
for line in self.fhandle:
line = line.strip()
if line == '':
i = i+1
continue
if line[:3] == '#~ ':
line = line[3:]
self.entry_obsolete = 1
else:
self.entry_obsolete = 0
self.current_token = line
if line[:2] == '#:':
# we are on a occurrences line
self.process('OC', i)
elif line[:7] == 'msgid "':
self.process('MI', i)
elif line[:8] == 'msgstr "':
# we are on a msgstr
self.process('MS', i)
elif line[:1] == '"':
self.process('MC', i)
elif line[:14] == 'msgid_plural "':
# we are on a msgid plural
self.process('MP', i)
elif line[:7] == 'msgstr[':
# we are on a msgstr plural
self.process('MX', i)
elif line[:3] == '#, ':
# we are on a flags line
self.process('FL', i)
elif line[:2] == '# ' or line == '#':
if line == '#': line = line + ' '
# we are on a translator comment line
self.process('TC', i)
elif line[:2] == '#.':
# we are on a generated comment line
self.process('GC', i)
i = i+1
if self.current_entry:
# since entries are added when another entry is found, we must add
# the last entry here (only if there are lines)
self.instance.append(self.current_entry)
# before returning the instance, check if there's metadata and if
# so extract it in a dict
firstentry = self.instance[0]
if firstentry.msgid == '': # metadata found
# remove the entry
firstentry = self.instance.pop(0)
self.instance.metadata_is_fuzzy = firstentry.flags
key = None
for msg in firstentry.msgstr.splitlines():
try:
key, val = msg.split(':', 1)
self.instance.metadata[key] = val.strip()
except:
if key is not None:
self.instance.metadata[key] += '\n'+ msg.strip()
# close opened file
self.fhandle.close()
return self.instance
def add(self, symbol, states, next_state):
"""
Add a transition to the state machine.
Keywords arguments:
symbol -- string, the matched token (two chars symbol)
states -- list, a list of states (two chars symbols)
next_state -- the next state the fsm will have after the action
"""
for state in states:
action = getattr(self, 'handle_%s' % next_state.lower())
self.transitions[(symbol, state)] = (action, next_state)
def process(self, symbol, linenum):
"""
Process the transition corresponding to the current state and the
symbol provided.
Keywords arguments:
symbol -- string, the matched token (two chars symbol)
linenum -- integer, the current line number of the parsed file
"""
try:
(action, state) = self.transitions[(symbol, self.current_state)]
if action():
self.current_state = state
except Exception, exc:
raise IOError('Syntax error in po file (line %s)' % linenum)
# state handlers
def handle_he(self):
"""Handle a header comment."""
if self.instance.header != '':
self.instance.header += '\n'
self.instance.header += self.current_token[2:]
return 1
def handle_tc(self):
"""Handle a translator comment."""
if self.current_state in ['MC', 'MS', 'MX']:
self.instance.append(self.current_entry)
self.current_entry = POEntry()
if self.current_entry.tcomment != '':
self.current_entry.tcomment += '\n'
self.current_entry.tcomment += self.current_token[2:]
return True
def handle_gc(self):
"""Handle a generated comment."""
if self.current_state in ['MC', 'MS', 'MX']:
self.instance.append(self.current_entry)
self.current_entry = POEntry()
if self.current_entry.comment != '':
self.current_entry.comment += '\n'
self.current_entry.comment += self.current_token[3:]
return True
def handle_oc(self):
"""Handle a file:num occurence."""
if self.current_state in ['MC', 'MS', 'MX']:
self.instance.append(self.current_entry)
self.current_entry = POEntry()
occurrences = self.current_token[3:].split()
for occurrence in occurrences:
if occurrence != '':
try:
fil, line = occurrence.split(':')
if not line.isdigit():
fil = fil + line
line = ''
self.current_entry.occurrences.append((fil, line))
except:
self.current_entry.occurrences.append((occurrence, ''))
return True
def handle_fl(self):
"""Handle a flags line."""
if self.current_state in ['MC', 'MS', 'MX']:
self.instance.append(self.current_entry)
self.current_entry = POEntry()
self.current_entry.flags += self.current_token[3:].split(', ')
return True
def handle_mi(self):
"""Handle a msgid."""
if self.current_state in ['MC', 'MS', 'MX']:
self.instance.append(self.current_entry)
self.current_entry = POEntry()
self.current_entry.obsolete = self.entry_obsolete
self.current_entry.msgid = unescape(self.current_token[7:-1])
return True
def handle_mp(self):
"""Handle a msgid plural."""
self.current_entry.msgid_plural = unescape(self.current_token[14:-1])
return True
def handle_ms(self):
"""Handle a msgstr."""
self.current_entry.msgstr = unescape(self.current_token[8:-1])
return True
def handle_mx(self):
"""Handle a msgstr plural."""
index, value = self.current_token[7], self.current_token[11:-1]
self.current_entry.msgstr_plural[index] = unescape(value)
self.msgstr_index = index
return True
def handle_mc(self):
"""Handle a msgid or msgstr continuation line."""
if self.current_state == 'MI':
self.current_entry.msgid += unescape(self.current_token[1:-1])
elif self.current_state == 'MP':
self.current_entry.msgid_plural += \
unescape(self.current_token[1:-1])
elif self.current_state == 'MS':
self.current_entry.msgstr += unescape(self.current_token[1:-1])
elif self.current_state == 'MX':
msgstr = self.current_entry.msgstr_plural[self.msgstr_index] +\
unescape(self.current_token[1:-1])
self.current_entry.msgstr_plural[self.msgstr_index] = msgstr
# don't change the current state
return False
# }}}
# class _MOFileParser {{{
class _MOFileParser(object):
"""
A class to parse binary mo files.
"""
BIG_ENDIAN = 0xde120495
LITTLE_ENDIAN = 0x950412de
def __init__(self, fpath):
"""_MOFileParser constructor."""
self.fhandle = open(fpath, 'rb')
self.instance = MOFile(fpath)
def parse_magicnumber(self):
"""
Parse the magic number and raise an exception if not valid.
"""
def parse(self):
"""
Build the instance with the file handle provided in the
constructor.
"""
magic_number = self._readbinary('<I', 4)
if magic_number == self.LITTLE_ENDIAN:
ii = '<II'
elif magic_number == self.BIG_ENDIAN:
ii = '>II'
else:
raise IOError('Invalid mo file, magic number is incorrect !')
self.instance.magic_number = magic_number
# parse the version number and the number of strings
self.instance.version, numofstrings = self._readbinary(ii, 8)
# original strings and translation strings hash table offset
msgids_hash_offset, msgstrs_hash_offset = self._readbinary(ii, 8)
# move to msgid hash table and read length and offset of msgids
self.fhandle.seek(msgids_hash_offset)
msgids_index = []
for i in range(numofstrings):
msgids_index.append(self._readbinary(ii, 8))
# move to msgstr hash table and read length and offset of msgstrs
self.fhandle.seek(msgstrs_hash_offset)
msgstrs_index = []
for i in range(numofstrings):
msgstrs_index.append(self._readbinary(ii, 8))
# build entries
for i in range(numofstrings):
self.fhandle.seek(msgids_index[i][1])
msgid = self.fhandle.read(msgids_index[i][0])
self.fhandle.seek(msgstrs_index[i][1])
msgstr = self.fhandle.read(msgstrs_index[i][0])
if i == 0: # metadata
raw_metadata, metadata = msgstr.split('\n'), {}
for line in raw_metadata:
tokens = line.split(':', 1)
if tokens[0] != '':
try:
metadata[tokens[0]] = tokens[1].strip()
except IndexError:
metadata[tokens[0]] = ''
self.instance.metadata = metadata
continue
entry = MOEntry(msgid=msgid, msgstr=msgstr)
self.instance.append(entry)
# close opened file
self.fhandle.close()
return self.instance
def _readbinary(self, fmt, numbytes):
"""
Private method that unpack n bytes of data using format <fmt>.
It returns a tuple or a mixed value if the tuple length is 1.
"""
bytes = self.fhandle.read(numbytes)
tup = struct.unpack(fmt, bytes)
if len(tup) == 1:
return tup[0]
return tup
# }}}
# __main__ {{{
if __name__ == '__main__':
"""
**Main function**::
- to **test** the module just run: *python polib.py [-v]*
- to **profile** the module: *python polib.py -p <some_pofile.po>*
"""
import sys
if len(sys.argv) > 2 and sys.argv[1] == '-p':
def test(f):
if f.endswith('po'):
p = pofile(f)
else:
p = mofile(f)
s = str(p)
import profile
profile.run('test("'+sys.argv[2]+'")')
else:
import doctest
doctest.testmod()
# }}}
| false
| true
|
f7186a787fb70368642bb14dc3fd2c4b8114cb16
| 16,032
|
py
|
Python
|
tests/test_invoices.py
|
sm6xmm/epcon
|
8bec4391f8a1fd234e644198a438c7613258655a
|
[
"BSD-2-Clause"
] | null | null | null |
tests/test_invoices.py
|
sm6xmm/epcon
|
8bec4391f8a1fd234e644198a438c7613258655a
|
[
"BSD-2-Clause"
] | null | null | null |
tests/test_invoices.py
|
sm6xmm/epcon
|
8bec4391f8a1fd234e644198a438c7613258655a
|
[
"BSD-2-Clause"
] | null | null | null |
import csv
import decimal
from datetime import date, datetime
from decimal import Decimal
import random
import json
from django.http import QueryDict
from pytest import mark
from django.core.urlresolvers import reverse
from django.conf import settings
from django.utils import timezone
from django_factory_boy import auth as auth_factories
from freezegun import freeze_time
import responses
from assopy.models import Invoice, Order, Vat
from tests.factories import AssopyUserFactory, FareFactory, OrderFactory
from conference.models import AttendeeProfile, Fare, Conference
from conference.invoicing import (
ACPYSS_16,
PYTHON_ITALIA_17,
EPS_18,
CSV_2018_REPORT_COLUMNS,
)
from conference.currencies import (
DAILY_ECB_URL,
EXAMPLE_ECB_DAILY_XML,
EXAMPLE_ECB_DATE,
fetch_and_store_latest_ecb_exrates,
)
from conference.fares import (
pre_create_typical_fares_for_conference,
)
from email_template.models import Email
from tests.common_tools import template_used, make_user
def _prepare_invoice_for_basic_test(order_code, invoice_code):
# default password is 'password123' per django_factory_boy
user = make_user()
# FYI(artcz): Order.objects.create is overloaded method on
# OrderManager, that sets up a lot of unused stuff, going with manual
# .save().
order = Order(user=user.assopy_user, code=order_code)
order.save()
# create some random Vat instance to the invoice creation works
vat_10 = Vat.objects.create(value=10)
return Invoice.objects.create(
code=invoice_code,
order=order,
emit_date=date.today(),
price=Decimal(1337),
vat=vat_10,
html="<html>Here goes full html</html>",
exchange_rate_date=date.today(),
)
@mark.django_db
def test_invoice_html(client):
# invoice_code must be validated via ASSOPY_IS_REAL_INVOICE
invoice_code, order_code = "I123", "asdf"
_prepare_invoice_for_basic_test(order_code, invoice_code)
client.login(email="joedoe@example.com", password="password123")
invoice_url = reverse(
"assopy-invoice-html",
kwargs={"order_code": order_code, "code": invoice_code},
)
response = client.get(invoice_url)
assert (
response.content.decode("utf-8") == "<html>Here goes full html</html>"
)
@mark.django_db
def test_invoice_pdf(client):
# invoice_code must be validated via ASSOPY_IS_REAL_INVOICE
invoice_code, order_code = "I123", "asdf"
_prepare_invoice_for_basic_test(order_code, invoice_code)
client.login(email="joedoe@example.com", password="password123")
invoice_url = reverse(
"assopy-invoice-pdf",
kwargs={"order_code": order_code, "code": invoice_code},
)
response = client.get(invoice_url)
assert response.status_code == 200
assert response["Content-type"] == "application/pdf"
def create_order_and_invoice(assopy_user, fare):
order = OrderFactory(user=assopy_user, items=[(fare, {"qty": 1})])
with responses.RequestsMock() as rsps:
# mocking responses for the invoice VAT exchange rate feature
rsps.add(responses.GET, DAILY_ECB_URL, body=EXAMPLE_ECB_DAILY_XML)
fetch_and_store_latest_ecb_exrates()
order.confirm_order(timezone.now())
# confirm_order by default creates placeholders, but for most of the tests
# we can upgrade them to proper invoices anyway.
invoice = Invoice.objects.get(order=order)
return invoice
@mark.django_db
def test_if_invoice_stores_information_about_the_seller(client):
"""
Testing #591
https://github.com/EuroPython/epcon/issues/591
"""
Conference.objects.create(
code=settings.CONFERENCE_CONFERENCE, name=settings.CONFERENCE_NAME
)
# need this email to generate invoices/orders
Email.objects.create(code="purchase-complete")
fare = FareFactory()
user = make_user()
def invoice_url(invoice):
return reverse(
"assopy-invoice-html",
kwargs={"code": invoice.code, "order_code": invoice.order.code},
)
with freeze_time("2016-01-01"):
# We need to log in again after every time travel, just in case.
client.login(email="joedoe@example.com", password="password123")
invoice = create_order_and_invoice(user.assopy_user, fare)
assert invoice.code == "I/16.0001"
assert invoice.emit_date == date(2016, 1, 1)
assert invoice.issuer == ACPYSS_16
assert invoice.html.startswith("<!DOCTYPE")
response = client.get(invoice_url(invoice))
assert ACPYSS_16 in response.content.decode("utf-8")
with freeze_time("2017-01-01"):
# We need to log in again after every time travel, just in case.
client.login(email="joedoe@example.com", password="password123")
invoice = create_order_and_invoice(user.assopy_user, fare)
assert invoice.code == "I/17.0001"
assert invoice.emit_date == date(2017, 1, 1)
assert invoice.issuer == PYTHON_ITALIA_17
assert invoice.html.startswith("<!DOCTYPE")
response = client.get(invoice_url(invoice))
assert PYTHON_ITALIA_17 in response.content.decode("utf-8")
with freeze_time("2018-01-01"):
# We need to log in again after every time travel, just in case.
client.login(email="joedoe@example.com", password="password123")
invoice = create_order_and_invoice(user.assopy_user, fare)
assert invoice.code == "I/18.0001"
assert invoice.emit_date == date(2018, 1, 1)
assert invoice.issuer == EPS_18
assert invoice.html.startswith("<!DOCTYPE")
response = client.get(invoice_url(invoice))
assert EPS_18 in response.content.decode("utf-8")
@mark.django_db
@responses.activate
def test_vat_in_GBP_for_2018(client):
"""
https://github.com/EuroPython/epcon/issues/617
"""
responses.add(responses.GET, DAILY_ECB_URL, body=EXAMPLE_ECB_DAILY_XML)
Conference.objects.create(
code=settings.CONFERENCE_CONFERENCE, name=settings.CONFERENCE_NAME
)
Email.objects.create(code="purchase-complete")
fare = FareFactory()
user = make_user()
with freeze_time("2018-05-05"):
client.login(email="joedoe@example.com", password="password123")
invoice = create_order_and_invoice(user.assopy_user, fare)
assert invoice.html.startswith("<!DOCTYPE")
assert invoice.vat_value() == Decimal("1.67")
assert invoice.vat_in_local_currency == Decimal("1.49")
assert invoice.local_currency == "GBP"
assert invoice.exchange_rate == Decimal("0.89165")
assert invoice.exchange_rate_date == EXAMPLE_ECB_DATE
response = client.get(invoice.get_html_url())
content = response.content.decode("utf-8")
# The wording used to be different, so we had both checks in one line,
# but beacuse of template change we had to separate them
assert 'local-currency="GBP"' in content
assert 'total-vat-in-local-currency="1.49"' in content
# we're going to use whatever the date was received/cached from ECB XML
# doesnt matter what emit date is
assert (
"ECB rate used for VAT is 0.89165 GBP/EUR from 2018-03-06"
in content
)
response = client.get(invoice.get_absolute_url())
assert response["Content-Type"] == "application/pdf"
with freeze_time("2017-05-05"):
client.login(email="joedoe@example.com", password="password123")
invoice = create_order_and_invoice(user.assopy_user, fare)
assert invoice.html.startswith("<!DOCTYPE")
assert invoice.vat_value() == Decimal("1.67")
assert invoice.vat_in_local_currency == Decimal("1.67")
assert invoice.local_currency == "EUR"
assert invoice.exchange_rate == Decimal("1.0")
assert invoice.exchange_rate_date == date(2017, 5, 5)
response = client.get(invoice.get_html_url())
content = response.content.decode("utf-8")
# not showing any VAT conversion because in 2017 we had just EUR
assert "EUR" in content
assert "Total VAT is" not in content
assert "ECB rate" not in content
response = client.get(invoice.get_absolute_url())
assert response["Content-Type"] == "application/pdf"
@mark.django_db
@responses.activate
@freeze_time("2018-05-05")
def test_create_invoice_with_many_items(client):
"""
This test is meant to be used to test invoice template design.
It creates a lot of different items on the invoice, and after that we can
use serve(content) to easily check in the browser how the Invoice looks
like.
Freezing it at 2018 so we can easily check EP2018 invoices.
"""
responses.add(responses.GET, DAILY_ECB_URL, body=EXAMPLE_ECB_DAILY_XML)
Email.objects.create(code="purchase-complete")
user = make_user()
vat_rate_20, _ = Vat.objects.get_or_create(value=20)
CONFERENCE = settings.CONFERENCE_CONFERENCE
pre_create_typical_fares_for_conference(CONFERENCE, vat_rate_20)
# Don't need to set dates for this test.
# set_early_bird_fare_dates(CONFERENCE, yesterday, tomorrow)
# set_regular_fare_dates(CONFERENCE, yesterday, tomorrow)
random_fares = random.sample(list(Fare.objects.all()), 3)
order = OrderFactory(
user=user.assopy_user,
items=[(fare, {"qty": i}) for i, fare in enumerate(random_fares, 1)],
)
with responses.RequestsMock() as rsps:
# mocking responses for the invoice VAT exchange rate feature
rsps.add(responses.GET, DAILY_ECB_URL, body=EXAMPLE_ECB_DAILY_XML)
fetch_and_store_latest_ecb_exrates()
order.confirm_order(timezone.now())
@mark.django_db
@responses.activate
def test_export_invoice_csv(client):
Conference.objects.create(
code=settings.CONFERENCE_CONFERENCE, name=settings.CONFERENCE_NAME
)
responses.add(responses.GET, DAILY_ECB_URL, body=EXAMPLE_ECB_DAILY_XML)
Email.objects.create(code="purchase-complete")
fare = FareFactory()
user = make_user(is_staff=True)
client.login(email=user.email, password="password123")
with freeze_time("2018-05-05"):
invoice1 = create_order_and_invoice(user.assopy_user, fare)
query_dict = QueryDict(mutable=True)
query_dict["start_date"] = date(2018, 1, 1)
query_dict["end_date"] = date.today()
query_string = query_dict.urlencode()
response = client.get(
reverse("debug_panel_invoice_export_for_tax_report_csv")
+ "?"
+ query_string
)
assert response.status_code == 200
assert response["content-type"] == "text/csv"
invoice_reader = csv.reader(response.content.decode("utf-8").splitlines())
next(invoice_reader) # skip header
invoice = next(invoice_reader)
iter_column = iter(invoice)
assert next(iter_column) == invoice1.code
assert next(iter_column) == "2018-05-05"
assert next(iter_column) == invoice1.order.user.user.get_full_name()
assert next(iter_column) == invoice1.order.card_name
next(iter_column) # ignore the address
assert next(iter_column) == invoice1.order.country.name
assert next(iter_column) == invoice1.order.vat_number
next(iter_column) # ignore the currency
assert (
decimal.Decimal(next(iter_column))
== invoice1.net_price_in_local_currency
)
assert decimal.Decimal(next(iter_column)) == invoice1.vat_in_local_currency
assert (
decimal.Decimal(next(iter_column)) == invoice1.price_in_local_currency
)
@mark.django_db
@responses.activate
def test_export_invoice_csv_before_period(client):
Conference.objects.create(
code=settings.CONFERENCE_CONFERENCE, name=settings.CONFERENCE_NAME
)
responses.add(responses.GET, DAILY_ECB_URL, body=EXAMPLE_ECB_DAILY_XML)
Email.objects.create(code="purchase-complete")
fare = FareFactory()
user = make_user(is_staff=True)
client.login(email=user.email, password="password123")
with freeze_time("2018-04-05"):
create_order_and_invoice(user.assopy_user, fare)
query_dict = QueryDict(mutable=True)
query_dict["start_date"] = date(2018, 5, 1)
query_dict["end_date"] = date.today()
query_string = query_dict.urlencode()
response = client.get(
reverse("debug_panel_invoice_export_for_tax_report_csv")
+ "?"
+ query_string
)
assert response.status_code == 200
assert response["content-type"] == "text/csv"
invoice_reader = csv.reader(response.content.decode("utf-8").splitlines())
header = next(invoice_reader)
assert header == CSV_2018_REPORT_COLUMNS
assert next(invoice_reader, None) is None
@mark.django_db
@responses.activate
def test_export_invoice(client):
Conference.objects.create(
code=settings.CONFERENCE_CONFERENCE, name=settings.CONFERENCE_NAME
)
responses.add(responses.GET, DAILY_ECB_URL, body=EXAMPLE_ECB_DAILY_XML)
Email.objects.create(code="purchase-complete")
fare = FareFactory()
user = make_user(is_staff=True)
client.login(email=user.email, password="password123")
with freeze_time("2018-05-05"):
invoice1 = create_order_and_invoice(user.assopy_user, fare)
query_dict = QueryDict(mutable=True)
query_dict["start_date"] = date(2018, 1, 1)
query_dict["end_date"] = date.today()
query_string = query_dict.urlencode()
response = client.get(
reverse("debug_panel_invoice_export_for_tax_report")
+ "?"
+ query_string
)
assert response.status_code == 200
assert response["content-type"].startswith("text/html")
assert '<tr id="invoice_{0}">'.format(
invoice1.id
) in response.content.decode("utf-8")
@mark.django_db
@responses.activate
def test_export_invoice_accounting_json(client):
Conference.objects.create(
code=settings.CONFERENCE_CONFERENCE, name=settings.CONFERENCE_NAME
)
responses.add(responses.GET, DAILY_ECB_URL, body=EXAMPLE_ECB_DAILY_XML)
Email.objects.create(code="purchase-complete")
fare = FareFactory()
user = make_user(is_staff=True)
client.login(email=user.email, password="password123")
with freeze_time("2018-05-05"):
invoice1 = create_order_and_invoice(user.assopy_user, fare)
query_dict = QueryDict(mutable=True)
query_dict["start_date"] = date(2018, 1, 1)
query_dict["end_date"] = date.today()
query_string = query_dict.urlencode()
response = client.get(
reverse("debug_panel_invoice_export_for_payment_reconciliation_json")
+ "?"
+ query_string
)
assert response.status_code == 200
assert response["content-type"].startswith("application/json")
data = json.loads(response.content)["invoices"]
assert len(data) == 1
assert data[0]["ID"] == invoice1.code
assert decimal.Decimal(data[0]["net"]) == invoice1.net_price()
assert decimal.Decimal(data[0]["vat"]) == invoice1.vat_value()
assert decimal.Decimal(data[0]["gross"]) == invoice1.price
assert data[0]["order"] == invoice1.order.code
assert data[0]["stripe"] == invoice1.order.stripe_charge_id
def test_reissue_invoice(admin_client):
Conference.objects.create(
code=settings.CONFERENCE_CONFERENCE, name=settings.CONFERENCE_NAME
)
invoice_code, order_code = "I123", "asdf"
invoice = _prepare_invoice_for_basic_test(order_code, invoice_code)
NEW_CUSTOMER = "NEW CUSTOMER"
assert Invoice.objects.all().count() == 1
assert NEW_CUSTOMER not in Invoice.objects.latest("id").html
url = reverse("debug_panel_reissue_invoice", args=[invoice.id])
response = admin_client.get(url)
assert response.status_code == 200
response = admin_client.post(
url, {"emit_date": "2018-01-01", "customer": NEW_CUSTOMER}
)
assert response.status_code == 302
assert Invoice.objects.all().count() == 2
assert NEW_CUSTOMER in Invoice.objects.latest("id").html
| 34.403433
| 79
| 0.70141
|
import csv
import decimal
from datetime import date, datetime
from decimal import Decimal
import random
import json
from django.http import QueryDict
from pytest import mark
from django.core.urlresolvers import reverse
from django.conf import settings
from django.utils import timezone
from django_factory_boy import auth as auth_factories
from freezegun import freeze_time
import responses
from assopy.models import Invoice, Order, Vat
from tests.factories import AssopyUserFactory, FareFactory, OrderFactory
from conference.models import AttendeeProfile, Fare, Conference
from conference.invoicing import (
ACPYSS_16,
PYTHON_ITALIA_17,
EPS_18,
CSV_2018_REPORT_COLUMNS,
)
from conference.currencies import (
DAILY_ECB_URL,
EXAMPLE_ECB_DAILY_XML,
EXAMPLE_ECB_DATE,
fetch_and_store_latest_ecb_exrates,
)
from conference.fares import (
pre_create_typical_fares_for_conference,
)
from email_template.models import Email
from tests.common_tools import template_used, make_user
def _prepare_invoice_for_basic_test(order_code, invoice_code):
user = make_user()
order = Order(user=user.assopy_user, code=order_code)
order.save()
vat_10 = Vat.objects.create(value=10)
return Invoice.objects.create(
code=invoice_code,
order=order,
emit_date=date.today(),
price=Decimal(1337),
vat=vat_10,
html="<html>Here goes full html</html>",
exchange_rate_date=date.today(),
)
@mark.django_db
def test_invoice_html(client):
invoice_code, order_code = "I123", "asdf"
_prepare_invoice_for_basic_test(order_code, invoice_code)
client.login(email="joedoe@example.com", password="password123")
invoice_url = reverse(
"assopy-invoice-html",
kwargs={"order_code": order_code, "code": invoice_code},
)
response = client.get(invoice_url)
assert (
response.content.decode("utf-8") == "<html>Here goes full html</html>"
)
@mark.django_db
def test_invoice_pdf(client):
invoice_code, order_code = "I123", "asdf"
_prepare_invoice_for_basic_test(order_code, invoice_code)
client.login(email="joedoe@example.com", password="password123")
invoice_url = reverse(
"assopy-invoice-pdf",
kwargs={"order_code": order_code, "code": invoice_code},
)
response = client.get(invoice_url)
assert response.status_code == 200
assert response["Content-type"] == "application/pdf"
def create_order_and_invoice(assopy_user, fare):
order = OrderFactory(user=assopy_user, items=[(fare, {"qty": 1})])
with responses.RequestsMock() as rsps:
rsps.add(responses.GET, DAILY_ECB_URL, body=EXAMPLE_ECB_DAILY_XML)
fetch_and_store_latest_ecb_exrates()
order.confirm_order(timezone.now())
invoice = Invoice.objects.get(order=order)
return invoice
@mark.django_db
def test_if_invoice_stores_information_about_the_seller(client):
Conference.objects.create(
code=settings.CONFERENCE_CONFERENCE, name=settings.CONFERENCE_NAME
)
Email.objects.create(code="purchase-complete")
fare = FareFactory()
user = make_user()
def invoice_url(invoice):
return reverse(
"assopy-invoice-html",
kwargs={"code": invoice.code, "order_code": invoice.order.code},
)
with freeze_time("2016-01-01"):
client.login(email="joedoe@example.com", password="password123")
invoice = create_order_and_invoice(user.assopy_user, fare)
assert invoice.code == "I/16.0001"
assert invoice.emit_date == date(2016, 1, 1)
assert invoice.issuer == ACPYSS_16
assert invoice.html.startswith("<!DOCTYPE")
response = client.get(invoice_url(invoice))
assert ACPYSS_16 in response.content.decode("utf-8")
with freeze_time("2017-01-01"):
client.login(email="joedoe@example.com", password="password123")
invoice = create_order_and_invoice(user.assopy_user, fare)
assert invoice.code == "I/17.0001"
assert invoice.emit_date == date(2017, 1, 1)
assert invoice.issuer == PYTHON_ITALIA_17
assert invoice.html.startswith("<!DOCTYPE")
response = client.get(invoice_url(invoice))
assert PYTHON_ITALIA_17 in response.content.decode("utf-8")
with freeze_time("2018-01-01"):
client.login(email="joedoe@example.com", password="password123")
invoice = create_order_and_invoice(user.assopy_user, fare)
assert invoice.code == "I/18.0001"
assert invoice.emit_date == date(2018, 1, 1)
assert invoice.issuer == EPS_18
assert invoice.html.startswith("<!DOCTYPE")
response = client.get(invoice_url(invoice))
assert EPS_18 in response.content.decode("utf-8")
@mark.django_db
@responses.activate
def test_vat_in_GBP_for_2018(client):
responses.add(responses.GET, DAILY_ECB_URL, body=EXAMPLE_ECB_DAILY_XML)
Conference.objects.create(
code=settings.CONFERENCE_CONFERENCE, name=settings.CONFERENCE_NAME
)
Email.objects.create(code="purchase-complete")
fare = FareFactory()
user = make_user()
with freeze_time("2018-05-05"):
client.login(email="joedoe@example.com", password="password123")
invoice = create_order_and_invoice(user.assopy_user, fare)
assert invoice.html.startswith("<!DOCTYPE")
assert invoice.vat_value() == Decimal("1.67")
assert invoice.vat_in_local_currency == Decimal("1.49")
assert invoice.local_currency == "GBP"
assert invoice.exchange_rate == Decimal("0.89165")
assert invoice.exchange_rate_date == EXAMPLE_ECB_DATE
response = client.get(invoice.get_html_url())
content = response.content.decode("utf-8")
assert 'local-currency="GBP"' in content
assert 'total-vat-in-local-currency="1.49"' in content
# doesnt matter what emit date is
assert (
"ECB rate used for VAT is 0.89165 GBP/EUR from 2018-03-06"
in content
)
response = client.get(invoice.get_absolute_url())
assert response["Content-Type"] == "application/pdf"
with freeze_time("2017-05-05"):
client.login(email="joedoe@example.com", password="password123")
invoice = create_order_and_invoice(user.assopy_user, fare)
assert invoice.html.startswith("<!DOCTYPE")
assert invoice.vat_value() == Decimal("1.67")
assert invoice.vat_in_local_currency == Decimal("1.67")
assert invoice.local_currency == "EUR"
assert invoice.exchange_rate == Decimal("1.0")
assert invoice.exchange_rate_date == date(2017, 5, 5)
response = client.get(invoice.get_html_url())
content = response.content.decode("utf-8")
# not showing any VAT conversion because in 2017 we had just EUR
assert "EUR" in content
assert "Total VAT is" not in content
assert "ECB rate" not in content
response = client.get(invoice.get_absolute_url())
assert response["Content-Type"] == "application/pdf"
@mark.django_db
@responses.activate
@freeze_time("2018-05-05")
def test_create_invoice_with_many_items(client):
responses.add(responses.GET, DAILY_ECB_URL, body=EXAMPLE_ECB_DAILY_XML)
Email.objects.create(code="purchase-complete")
user = make_user()
vat_rate_20, _ = Vat.objects.get_or_create(value=20)
CONFERENCE = settings.CONFERENCE_CONFERENCE
pre_create_typical_fares_for_conference(CONFERENCE, vat_rate_20)
# Don't need to set dates for this test.
random_fares = random.sample(list(Fare.objects.all()), 3)
order = OrderFactory(
user=user.assopy_user,
items=[(fare, {"qty": i}) for i, fare in enumerate(random_fares, 1)],
)
with responses.RequestsMock() as rsps:
rsps.add(responses.GET, DAILY_ECB_URL, body=EXAMPLE_ECB_DAILY_XML)
fetch_and_store_latest_ecb_exrates()
order.confirm_order(timezone.now())
@mark.django_db
@responses.activate
def test_export_invoice_csv(client):
Conference.objects.create(
code=settings.CONFERENCE_CONFERENCE, name=settings.CONFERENCE_NAME
)
responses.add(responses.GET, DAILY_ECB_URL, body=EXAMPLE_ECB_DAILY_XML)
Email.objects.create(code="purchase-complete")
fare = FareFactory()
user = make_user(is_staff=True)
client.login(email=user.email, password="password123")
with freeze_time("2018-05-05"):
invoice1 = create_order_and_invoice(user.assopy_user, fare)
query_dict = QueryDict(mutable=True)
query_dict["start_date"] = date(2018, 1, 1)
query_dict["end_date"] = date.today()
query_string = query_dict.urlencode()
response = client.get(
reverse("debug_panel_invoice_export_for_tax_report_csv")
+ "?"
+ query_string
)
assert response.status_code == 200
assert response["content-type"] == "text/csv"
invoice_reader = csv.reader(response.content.decode("utf-8").splitlines())
next(invoice_reader)
invoice = next(invoice_reader)
iter_column = iter(invoice)
assert next(iter_column) == invoice1.code
assert next(iter_column) == "2018-05-05"
assert next(iter_column) == invoice1.order.user.user.get_full_name()
assert next(iter_column) == invoice1.order.card_name
next(iter_column)
assert next(iter_column) == invoice1.order.country.name
assert next(iter_column) == invoice1.order.vat_number
next(iter_column)
assert (
decimal.Decimal(next(iter_column))
== invoice1.net_price_in_local_currency
)
assert decimal.Decimal(next(iter_column)) == invoice1.vat_in_local_currency
assert (
decimal.Decimal(next(iter_column)) == invoice1.price_in_local_currency
)
@mark.django_db
@responses.activate
def test_export_invoice_csv_before_period(client):
Conference.objects.create(
code=settings.CONFERENCE_CONFERENCE, name=settings.CONFERENCE_NAME
)
responses.add(responses.GET, DAILY_ECB_URL, body=EXAMPLE_ECB_DAILY_XML)
Email.objects.create(code="purchase-complete")
fare = FareFactory()
user = make_user(is_staff=True)
client.login(email=user.email, password="password123")
with freeze_time("2018-04-05"):
create_order_and_invoice(user.assopy_user, fare)
query_dict = QueryDict(mutable=True)
query_dict["start_date"] = date(2018, 5, 1)
query_dict["end_date"] = date.today()
query_string = query_dict.urlencode()
response = client.get(
reverse("debug_panel_invoice_export_for_tax_report_csv")
+ "?"
+ query_string
)
assert response.status_code == 200
assert response["content-type"] == "text/csv"
invoice_reader = csv.reader(response.content.decode("utf-8").splitlines())
header = next(invoice_reader)
assert header == CSV_2018_REPORT_COLUMNS
assert next(invoice_reader, None) is None
@mark.django_db
@responses.activate
def test_export_invoice(client):
Conference.objects.create(
code=settings.CONFERENCE_CONFERENCE, name=settings.CONFERENCE_NAME
)
responses.add(responses.GET, DAILY_ECB_URL, body=EXAMPLE_ECB_DAILY_XML)
Email.objects.create(code="purchase-complete")
fare = FareFactory()
user = make_user(is_staff=True)
client.login(email=user.email, password="password123")
with freeze_time("2018-05-05"):
invoice1 = create_order_and_invoice(user.assopy_user, fare)
query_dict = QueryDict(mutable=True)
query_dict["start_date"] = date(2018, 1, 1)
query_dict["end_date"] = date.today()
query_string = query_dict.urlencode()
response = client.get(
reverse("debug_panel_invoice_export_for_tax_report")
+ "?"
+ query_string
)
assert response.status_code == 200
assert response["content-type"].startswith("text/html")
assert '<tr id="invoice_{0}">'.format(
invoice1.id
) in response.content.decode("utf-8")
@mark.django_db
@responses.activate
def test_export_invoice_accounting_json(client):
Conference.objects.create(
code=settings.CONFERENCE_CONFERENCE, name=settings.CONFERENCE_NAME
)
responses.add(responses.GET, DAILY_ECB_URL, body=EXAMPLE_ECB_DAILY_XML)
Email.objects.create(code="purchase-complete")
fare = FareFactory()
user = make_user(is_staff=True)
client.login(email=user.email, password="password123")
with freeze_time("2018-05-05"):
invoice1 = create_order_and_invoice(user.assopy_user, fare)
query_dict = QueryDict(mutable=True)
query_dict["start_date"] = date(2018, 1, 1)
query_dict["end_date"] = date.today()
query_string = query_dict.urlencode()
response = client.get(
reverse("debug_panel_invoice_export_for_payment_reconciliation_json")
+ "?"
+ query_string
)
assert response.status_code == 200
assert response["content-type"].startswith("application/json")
data = json.loads(response.content)["invoices"]
assert len(data) == 1
assert data[0]["ID"] == invoice1.code
assert decimal.Decimal(data[0]["net"]) == invoice1.net_price()
assert decimal.Decimal(data[0]["vat"]) == invoice1.vat_value()
assert decimal.Decimal(data[0]["gross"]) == invoice1.price
assert data[0]["order"] == invoice1.order.code
assert data[0]["stripe"] == invoice1.order.stripe_charge_id
def test_reissue_invoice(admin_client):
Conference.objects.create(
code=settings.CONFERENCE_CONFERENCE, name=settings.CONFERENCE_NAME
)
invoice_code, order_code = "I123", "asdf"
invoice = _prepare_invoice_for_basic_test(order_code, invoice_code)
NEW_CUSTOMER = "NEW CUSTOMER"
assert Invoice.objects.all().count() == 1
assert NEW_CUSTOMER not in Invoice.objects.latest("id").html
url = reverse("debug_panel_reissue_invoice", args=[invoice.id])
response = admin_client.get(url)
assert response.status_code == 200
response = admin_client.post(
url, {"emit_date": "2018-01-01", "customer": NEW_CUSTOMER}
)
assert response.status_code == 302
assert Invoice.objects.all().count() == 2
assert NEW_CUSTOMER in Invoice.objects.latest("id").html
| true
| true
|
f7186b0bfcb6fa0d28db71225539a73b5880267f
| 1,277
|
py
|
Python
|
panel/models.py
|
SebastinSanty/QuarkWebsite2017
|
30215f81d606e79820971edd91de4ab2ff95cc1f
|
[
"Apache-2.0"
] | 1
|
2016-12-19T09:42:44.000Z
|
2016-12-19T09:42:44.000Z
|
panel/models.py
|
SebastinSanty/QuarkWebsite2017
|
30215f81d606e79820971edd91de4ab2ff95cc1f
|
[
"Apache-2.0"
] | 8
|
2016-12-29T08:08:43.000Z
|
2017-01-28T18:11:47.000Z
|
panel/models.py
|
SebastinSanty/QuarkWebsite2017
|
30215f81d606e79820971edd91de4ab2ff95cc1f
|
[
"Apache-2.0"
] | 4
|
2016-12-21T12:51:33.000Z
|
2017-07-21T07:06:05.000Z
|
from django.db import models
from django.contrib.auth.models import User
from django.db.models import signals
import registration
GENDER_CHOICES = (
(u'M',u'Male'),
(u'F',u'Female'),
(u'N',u"Don't wish to reveal")
)
YEAR_CHOICES = (
(u'U1',u'Undergraduate 1st year'),
(u'U2',u'Undergraduate 2nd year'),
(u'U3',u'Undergraduate 3rd year'),
(u'U4',u'Undergraduate 4th year'),
(u'P1',u'Postgraduate 1st year'),
(u'P2',u'Postgraduate 2nd year'),
(u'SS',u'Schooling'),
(u'PH',u'PhD.'),
)
# Create your models here.
class Profile(models.Model):
user = models.OneToOneField(User, on_delete = models.CASCADE)
name = models.CharField(max_length = 120)
email = models.EmailField()
mobile = models.CharField(max_length = 10)
institute = models.CharField(max_length = 120)
gender = models.CharField(max_length = 1, choices=GENDER_CHOICES)
dob = models.DateField(auto_now_add = True, auto_now = False)
year = models.CharField(max_length =2, choices = YEAR_CHOICES)
updatedtime = models.DateTimeField(auto_now_add = False, auto_now = True)
settime = models.DateTimeField(auto_now_add = True, auto_now = False)
def __str__(self):
return self.name
class Institute(models.Model):
name = models.CharField(max_length=120)
def __str__(self):
return self.name
| 27.170213
| 74
| 0.723571
|
from django.db import models
from django.contrib.auth.models import User
from django.db.models import signals
import registration
GENDER_CHOICES = (
(u'M',u'Male'),
(u'F',u'Female'),
(u'N',u"Don't wish to reveal")
)
YEAR_CHOICES = (
(u'U1',u'Undergraduate 1st year'),
(u'U2',u'Undergraduate 2nd year'),
(u'U3',u'Undergraduate 3rd year'),
(u'U4',u'Undergraduate 4th year'),
(u'P1',u'Postgraduate 1st year'),
(u'P2',u'Postgraduate 2nd year'),
(u'SS',u'Schooling'),
(u'PH',u'PhD.'),
)
# Create your models here.
class Profile(models.Model):
user = models.OneToOneField(User, on_delete = models.CASCADE)
name = models.CharField(max_length = 120)
email = models.EmailField()
mobile = models.CharField(max_length = 10)
institute = models.CharField(max_length = 120)
gender = models.CharField(max_length = 1, choices=GENDER_CHOICES)
dob = models.DateField(auto_now_add = True, auto_now = False)
year = models.CharField(max_length =2, choices = YEAR_CHOICES)
updatedtime = models.DateTimeField(auto_now_add = False, auto_now = True)
settime = models.DateTimeField(auto_now_add = True, auto_now = False)
def __str__(self):
return self.name
class Institute(models.Model):
name = models.CharField(max_length=120)
def __str__(self):
return self.name
| true
| true
|
f7186b640388fb4012d907794b1497584c64f454
| 317
|
py
|
Python
|
pandapipes/multinet/timeseries/__init__.py
|
e2nIEE/pandapipes
|
d7b5b91cf0a4dcdfdb255dadae6383d61385b802
|
[
"BSD-3-Clause"
] | 48
|
2020-02-14T13:16:31.000Z
|
2022-03-30T07:15:55.000Z
|
pandapipes/multinet/timeseries/__init__.py
|
e2nIEE/pandapipes
|
d7b5b91cf0a4dcdfdb255dadae6383d61385b802
|
[
"BSD-3-Clause"
] | 279
|
2020-02-20T13:06:56.000Z
|
2022-03-14T12:29:59.000Z
|
pandapipes/multinet/timeseries/__init__.py
|
e2nIEE/pandapipes
|
d7b5b91cf0a4dcdfdb255dadae6383d61385b802
|
[
"BSD-3-Clause"
] | 30
|
2020-02-14T15:38:24.000Z
|
2022-02-21T13:37:12.000Z
|
# Copyright (c) 2020-2021 by Fraunhofer Institute for Energy Economics
# and Energy System Technology (IEE), Kassel, and University of Kassel. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be found in the LICENSE file.
from .run_time_series_multinet import run_timeseries
| 63.4
| 99
| 0.801262
|
from .run_time_series_multinet import run_timeseries
| true
| true
|
f7186ba5ebfc513d05b9791631132efa31987d9a
| 4,426
|
py
|
Python
|
zoidbot_tools/src/zoidbot_tools/player.py
|
LCAS/zoidberg
|
39599c053d6902a9f2252d510036af171ce5b899
|
[
"MIT"
] | null | null | null |
zoidbot_tools/src/zoidbot_tools/player.py
|
LCAS/zoidberg
|
39599c053d6902a9f2252d510036af171ce5b899
|
[
"MIT"
] | null | null | null |
zoidbot_tools/src/zoidbot_tools/player.py
|
LCAS/zoidberg
|
39599c053d6902a9f2252d510036af171ce5b899
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python2
import sys
import os
from time import sleep
from threading import Timer
import rospy
import baxter_interface
from baxter_interface import CHECK_VERSION
class JointPlayer(object):
def __init__(self, filename, loops=1):
self.filename=filename
self.loops=loops
def try_float(self, x):
try:
return float(x)
except ValueError:
return None
def clean_line(self, line, names):
"""
Cleans a single line of recorded joint positions
@param line: the line described in a list to process
@param names: joint name keys
"""
#convert the line of strings to a float or None
line = [self.try_float(x) for x in line.rstrip().split(',')]
#zip the values with the joint names
combined = zip(names[1:], line[1:])
#take out any tuples that have a none value
cleaned = [x for x in combined if x[1] is not None]
#convert it to a dictionary with only valid commands
command = dict(cleaned)
left_command = dict((key, command[key]) for key in command.keys()
if key[:-2] == 'left_')
right_command = dict((key, command[key]) for key in command.keys()
if key[:-2] == 'right_')
return (command, left_command, right_command, line)
def play_file(self):
"""
Loops through csv file
@param filename: the file to play
@param loops: number of times to loop
values < 0 mean 'infinite'
Does not loop indefinitely, but only until the file is read
and processed. Reads each line, split up in columns and
formats each line into a controller command in the form of
name/value pairs. Names come from the column headers
first column is the time stamp
"""
filename = self.filename
loops = self.loops
left = baxter_interface.Limb('left')
right = baxter_interface.Limb('right')
grip_left = baxter_interface.Gripper('left', CHECK_VERSION)
grip_right = baxter_interface.Gripper('right', CHECK_VERSION)
rate = rospy.Rate(1000)
if grip_left.error():
grip_left.reset()
if grip_right.error():
grip_right.reset()
if (not grip_left.calibrated() and
grip_left.type() != 'custom'):
grip_left.calibrate()
if (not grip_right.calibrated() and
grip_right.type() != 'custom'):
grip_right.calibrate()
print("Playing back: %s" % (filename,))
with open(filename, 'r') as f:
lines = f.readlines()
keys = lines[0].rstrip().split(',')
l = 0
# # If specified, repeat the file playback 'loops' number of times
# while loops < 1 or l < loops:
# i = 0
l += 1
print("Moving to start position...")
i = 0
_cmd, lcmd_start, rcmd_start, _raw = self.clean_line(lines[1], keys)
left.move_to_joint_positions(lcmd_start)
right.move_to_joint_positions(rcmd_start)
start_time = rospy.get_time()
for values in lines[1:]:
i += 1
loopstr = str(loops) if loops > 0 else "forever"
sys.stdout.write("\r Record %d of %d, loop %d of %s" %
(i, len(lines) - 1, l, loopstr))
sys.stdout.flush()
cmd, lcmd, rcmd, values = self.clean_line(values, keys)
#command this set of commands until the next frame
while (rospy.get_time() - start_time) < values[0]:
if rospy.is_shutdown():
print("\n Aborting - ROS shutdown")
return False
if len(lcmd):
left.set_joint_positions(lcmd)
if len(rcmd):
right.set_joint_positions(rcmd)
if ('left_gripper' in cmd and
grip_left.type() != 'custom'):
grip_left.command_position(cmd['left_gripper'])
if ('right_gripper' in cmd and
grip_right.type() != 'custom'):
grip_right.command_position(cmd['right_gripper'])
rate.sleep()
print
#print "DONEEEE"
return True
| 34.850394
| 76
| 0.554677
|
import sys
import os
from time import sleep
from threading import Timer
import rospy
import baxter_interface
from baxter_interface import CHECK_VERSION
class JointPlayer(object):
def __init__(self, filename, loops=1):
self.filename=filename
self.loops=loops
def try_float(self, x):
try:
return float(x)
except ValueError:
return None
def clean_line(self, line, names):
line = [self.try_float(x) for x in line.rstrip().split(',')]
combined = zip(names[1:], line[1:])
cleaned = [x for x in combined if x[1] is not None]
command = dict(cleaned)
left_command = dict((key, command[key]) for key in command.keys()
if key[:-2] == 'left_')
right_command = dict((key, command[key]) for key in command.keys()
if key[:-2] == 'right_')
return (command, left_command, right_command, line)
def play_file(self):
filename = self.filename
loops = self.loops
left = baxter_interface.Limb('left')
right = baxter_interface.Limb('right')
grip_left = baxter_interface.Gripper('left', CHECK_VERSION)
grip_right = baxter_interface.Gripper('right', CHECK_VERSION)
rate = rospy.Rate(1000)
if grip_left.error():
grip_left.reset()
if grip_right.error():
grip_right.reset()
if (not grip_left.calibrated() and
grip_left.type() != 'custom'):
grip_left.calibrate()
if (not grip_right.calibrated() and
grip_right.type() != 'custom'):
grip_right.calibrate()
print("Playing back: %s" % (filename,))
with open(filename, 'r') as f:
lines = f.readlines()
keys = lines[0].rstrip().split(',')
l = 0
i = 0
_cmd, lcmd_start, rcmd_start, _raw = self.clean_line(lines[1], keys)
left.move_to_joint_positions(lcmd_start)
right.move_to_joint_positions(rcmd_start)
start_time = rospy.get_time()
for values in lines[1:]:
i += 1
loopstr = str(loops) if loops > 0 else "forever"
sys.stdout.write("\r Record %d of %d, loop %d of %s" %
(i, len(lines) - 1, l, loopstr))
sys.stdout.flush()
cmd, lcmd, rcmd, values = self.clean_line(values, keys)
while (rospy.get_time() - start_time) < values[0]:
if rospy.is_shutdown():
print("\n Aborting - ROS shutdown")
return False
if len(lcmd):
left.set_joint_positions(lcmd)
if len(rcmd):
right.set_joint_positions(rcmd)
if ('left_gripper' in cmd and
grip_left.type() != 'custom'):
grip_left.command_position(cmd['left_gripper'])
if ('right_gripper' in cmd and
grip_right.type() != 'custom'):
grip_right.command_position(cmd['right_gripper'])
rate.sleep()
print
return True
| true
| true
|
f7186bd3518a0b4de17b8aacb089858c3ed016c3
| 320
|
py
|
Python
|
src.bak/const.py
|
amolabs/ecosim
|
b4aedc6496aa87facd357c9f153352bb68f42769
|
[
"Apache-2.0"
] | 1
|
2022-01-05T02:10:37.000Z
|
2022-01-05T02:10:37.000Z
|
src.bak/const.py
|
amolabs/ecosim
|
b4aedc6496aa87facd357c9f153352bb68f42769
|
[
"Apache-2.0"
] | null | null | null |
src.bak/const.py
|
amolabs/ecosim
|
b4aedc6496aa87facd357c9f153352bb68f42769
|
[
"Apache-2.0"
] | null | null | null |
# vim: set sw=4 ts=4 expandtab :
oneamo = 1000000000000000000
moteperamo = 1000000000000000000
DELTA_AMO = 0.000000001 # 10^-9 AMO
DELTA_MOTE = 1000000000 # 10^9 mote
BLKSHOUR = 60*60
BLKSDAY = 60*60*24
BLKSWEEK = 60*60*24*7
BLKSMONTH = 60*60*24*30
BLKSQUARTER = 60*60*24*90
BLKSYEAR = 60*60*24*365
| 21.333333
| 37
| 0.6875
|
oneamo = 1000000000000000000
moteperamo = 1000000000000000000
DELTA_AMO = 0.000000001
DELTA_MOTE = 1000000000
BLKSHOUR = 60*60
BLKSDAY = 60*60*24
BLKSWEEK = 60*60*24*7
BLKSMONTH = 60*60*24*30
BLKSQUARTER = 60*60*24*90
BLKSYEAR = 60*60*24*365
| true
| true
|
f7186c0ae96e4ece6c4d1bb8ccc4bbe0a86ebbb1
| 690
|
py
|
Python
|
catalog/migrations/0007_auto_20210703_1655.py
|
l-a-motta/talehub
|
970f27bda5625576cc66a5a31224adce031f7404
|
[
"MIT"
] | null | null | null |
catalog/migrations/0007_auto_20210703_1655.py
|
l-a-motta/talehub
|
970f27bda5625576cc66a5a31224adce031f7404
|
[
"MIT"
] | null | null | null |
catalog/migrations/0007_auto_20210703_1655.py
|
l-a-motta/talehub
|
970f27bda5625576cc66a5a31224adce031f7404
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.2.5 on 2021-07-03 19:55
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('catalog', '0006_chapter_votes'),
]
operations = [
migrations.AddField(
model_name='book',
name='published_at',
field=models.DateTimeField(default=django.utils.timezone.now),
preserve_default=False,
),
migrations.AddField(
model_name='chapter',
name='published_at',
field=models.DateTimeField(default=django.utils.timezone.now),
preserve_default=False,
),
]
| 25.555556
| 74
| 0.608696
|
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('catalog', '0006_chapter_votes'),
]
operations = [
migrations.AddField(
model_name='book',
name='published_at',
field=models.DateTimeField(default=django.utils.timezone.now),
preserve_default=False,
),
migrations.AddField(
model_name='chapter',
name='published_at',
field=models.DateTimeField(default=django.utils.timezone.now),
preserve_default=False,
),
]
| true
| true
|
f7186c110ded7def9a81ed670b676696fc854c60
| 408
|
py
|
Python
|
config/wsgi.py
|
SNFernandes24/church-ims
|
944b7e65e926276adfe376ace01cf0adf135b954
|
[
"MIT"
] | 1
|
2021-09-11T17:22:37.000Z
|
2021-09-11T17:22:37.000Z
|
config/wsgi.py
|
SNFernandes24/church-ims
|
944b7e65e926276adfe376ace01cf0adf135b954
|
[
"MIT"
] | 39
|
2021-06-26T02:01:37.000Z
|
2021-07-14T17:11:53.000Z
|
config/wsgi.py
|
SNFernandes24/church-ims
|
944b7e65e926276adfe376ace01cf0adf135b954
|
[
"MIT"
] | 2
|
2021-07-19T08:00:58.000Z
|
2022-02-05T16:38:02.000Z
|
"""
WSGI config for the Church IMS project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "config.settings.production")
application = get_wsgi_application()
| 24
| 78
| 0.786765
|
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "config.settings.production")
application = get_wsgi_application()
| true
| true
|
f7186c2877327744d960f3a5c9f0de88bb1a77e8
| 2,017
|
py
|
Python
|
two-body-mond.py
|
alifianmahardhika/galaxy_simpy
|
799d11b00a3b14991d89ddac0aabf0bcd447b800
|
[
"Apache-2.0"
] | null | null | null |
two-body-mond.py
|
alifianmahardhika/galaxy_simpy
|
799d11b00a3b14991d89ddac0aabf0bcd447b800
|
[
"Apache-2.0"
] | null | null | null |
two-body-mond.py
|
alifianmahardhika/galaxy_simpy
|
799d11b00a3b14991d89ddac0aabf0bcd447b800
|
[
"Apache-2.0"
] | null | null | null |
import matplotlib.pyplot as plt
from numpy import sin,cos,pi,sqrt,exp,floor,zeros,copy,array
from numpy.random import normal
from numpy.linalg import norm
from random import uniform
from time import time
start = time()
def euler(x,v):
for i in range(n_particles):
sigmaF = zeros(2)
for j in range(n_particles):
if(i!=j):
sigmaF += f(x[i],x[j])
x[i] += v[i]*dt
v[i] += a_0*phi_inv(norm(sigmaF)/a_0)*(sigmaF/norm(sigmaF))*dt
def symplectic(x,v):
for i in range(n_particles):
sigmaF = zeros(2)
for j in range(n_particles):
if(i!=j):
sigmaF += f(x[i],x[j])
v[i] += G*sigmaF*dt
x[i] += v[i]*dt
def f(xi,xj):
rij = xj-xi
return (G*m*rij)/(norm(rij)+epsilon)**3
def init_two():
x1 = ([R*cos(omega*0),R*sin(omega*0)])
x2 = -copy(x1)
v1 = ([omega*x1[1],omega*x1[0]])
v2 = -copy(v1)
x = array([x1,x2])
v = array([v1,v2])
return x,v
def kinetic_energy():
sigmaN = 0.0
for i in range(n_particles):
sigmaN += 0.5*m*norm(v[i])**2
return sigmaN
def phi_inv(q):
return sqrt(q)*sqrt((1.0+sqrt(1.0+(4.0/r**2)))/2.0)
#Global parameter
n_particles = 2 #particles
d = 2 #dimension
m = 10e11/n_particles #[MO]
R = 2.9 #[kpc]
G = 13.34*10e-11 #[kpc^3 MO^-1 gy^-2]
omega = sqrt((G*m)/(4*R**3)) #velocities
epsilon = 1e-3
T = 100
dt = 0.001
N = int(floor(T/dt))
scale = 30.0
a_0 = 10e-1
#initial condition
x,v = init_two()
#x = get_init_coordinates()
#v = get_init_velocities()
print(x)
#main loop
plt.plot(x[:,0],x[:,1], 'ro')
for k in range(N):
euler(x,v)
#print(kinetic_energy())
#plt.plot(xe[:,0],xe[:,1], 'b.')
#plt.xlim(right=scale,left=-scale)
#plt.ylim(top=scale,bottom=-scale)
#plt.axes(aspect='equal')
if(k%100==0):
plt.plot(x[:,0],x[:,1], 'b.')
#filename='./figures/plot.png'
#plt.savefig(filename)
print("Time for running ", N, "iteration :", time()-start, "seconds")
print(x)
plt.show()
| 26.539474
| 70
| 0.57412
|
import matplotlib.pyplot as plt
from numpy import sin,cos,pi,sqrt,exp,floor,zeros,copy,array
from numpy.random import normal
from numpy.linalg import norm
from random import uniform
from time import time
start = time()
def euler(x,v):
for i in range(n_particles):
sigmaF = zeros(2)
for j in range(n_particles):
if(i!=j):
sigmaF += f(x[i],x[j])
x[i] += v[i]*dt
v[i] += a_0*phi_inv(norm(sigmaF)/a_0)*(sigmaF/norm(sigmaF))*dt
def symplectic(x,v):
for i in range(n_particles):
sigmaF = zeros(2)
for j in range(n_particles):
if(i!=j):
sigmaF += f(x[i],x[j])
v[i] += G*sigmaF*dt
x[i] += v[i]*dt
def f(xi,xj):
rij = xj-xi
return (G*m*rij)/(norm(rij)+epsilon)**3
def init_two():
x1 = ([R*cos(omega*0),R*sin(omega*0)])
x2 = -copy(x1)
v1 = ([omega*x1[1],omega*x1[0]])
v2 = -copy(v1)
x = array([x1,x2])
v = array([v1,v2])
return x,v
def kinetic_energy():
sigmaN = 0.0
for i in range(n_particles):
sigmaN += 0.5*m*norm(v[i])**2
return sigmaN
def phi_inv(q):
return sqrt(q)*sqrt((1.0+sqrt(1.0+(4.0/r**2)))/2.0)
n_particles = 2
d = 2
m = 10e11/n_particles
R = 2.9
G = 13.34*10e-11
omega = sqrt((G*m)/(4*R**3))
epsilon = 1e-3
T = 100
dt = 0.001
N = int(floor(T/dt))
scale = 30.0
a_0 = 10e-1
x,v = init_two()
print(x)
plt.plot(x[:,0],x[:,1], 'ro')
for k in range(N):
euler(x,v)
if(k%100==0):
plt.plot(x[:,0],x[:,1], 'b.')
print("Time for running ", N, "iteration :", time()-start, "seconds")
print(x)
plt.show()
| true
| true
|
f7186c640d025037dc50ce370ed2fcdc2d73a4ac
| 1,903
|
py
|
Python
|
tests/examples/minlplib/tln2.py
|
ouyang-w-19/decogo
|
52546480e49776251d4d27856e18a46f40c824a1
|
[
"MIT"
] | 2
|
2021-07-03T13:19:10.000Z
|
2022-02-06T10:48:13.000Z
|
tests/examples/minlplib/tln2.py
|
ouyang-w-19/decogo
|
52546480e49776251d4d27856e18a46f40c824a1
|
[
"MIT"
] | 1
|
2021-07-04T14:52:14.000Z
|
2021-07-15T10:17:11.000Z
|
tests/examples/minlplib/tln2.py
|
ouyang-w-19/decogo
|
52546480e49776251d4d27856e18a46f40c824a1
|
[
"MIT"
] | null | null | null |
# MINLP written by GAMS Convert at 04/21/18 13:54:54
#
# Equation counts
# Total E G L N X C B
# 13 1 0 12 0 0 0 0
#
# Variable counts
# x b i s1s s2s sc si
# Total cont binary integer sos1 sos2 scont sint
# 9 1 2 6 0 0 0 0
# FX 0 0 0 0 0 0 0 0
#
# Nonzero counts
# Total const NL DLL
# 33 25 8 0
#
# Reformulation has removed 1 variable and 1 equation
from pyomo.environ import *
model = m = ConcreteModel()
m.b1 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b2 = Var(within=Binary,bounds=(0,1),initialize=0)
m.i3 = Var(within=Integers,bounds=(0,15),initialize=1)
m.i4 = Var(within=Integers,bounds=(0,15),initialize=1)
m.i5 = Var(within=Integers,bounds=(0,5),initialize=1)
m.i6 = Var(within=Integers,bounds=(0,5),initialize=1)
m.i7 = Var(within=Integers,bounds=(0,5),initialize=1)
m.i8 = Var(within=Integers,bounds=(0,5),initialize=1)
m.obj = Objective(expr= 0.1*m.b1 + 0.2*m.b2 + m.i3 + m.i4, sense=minimize)
m.c2 = Constraint(expr= 460*m.i5 + 570*m.i7 <= 1900)
m.c3 = Constraint(expr= 460*m.i6 + 570*m.i8 <= 1900)
m.c4 = Constraint(expr= - 460*m.i5 - 570*m.i7 <= -1700)
m.c5 = Constraint(expr= - 460*m.i6 - 570*m.i8 <= -1700)
m.c6 = Constraint(expr= m.i5 + m.i7 <= 5)
m.c7 = Constraint(expr= m.i6 + m.i8 <= 5)
m.c8 = Constraint(expr= m.b1 - m.i3 <= 0)
m.c9 = Constraint(expr= m.b2 - m.i4 <= 0)
m.c10 = Constraint(expr= - 15*m.b1 + m.i3 <= 0)
m.c11 = Constraint(expr= - 15*m.b2 + m.i4 <= 0)
m.c12 = Constraint(expr=-(m.i3*m.i5 + m.i4*m.i6) <= -8)
m.c13 = Constraint(expr=-(m.i3*m.i7 + m.i4*m.i8) <= -7)
| 32.254237
| 76
| 0.513925
|
from pyomo.environ import *
model = m = ConcreteModel()
m.b1 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b2 = Var(within=Binary,bounds=(0,1),initialize=0)
m.i3 = Var(within=Integers,bounds=(0,15),initialize=1)
m.i4 = Var(within=Integers,bounds=(0,15),initialize=1)
m.i5 = Var(within=Integers,bounds=(0,5),initialize=1)
m.i6 = Var(within=Integers,bounds=(0,5),initialize=1)
m.i7 = Var(within=Integers,bounds=(0,5),initialize=1)
m.i8 = Var(within=Integers,bounds=(0,5),initialize=1)
m.obj = Objective(expr= 0.1*m.b1 + 0.2*m.b2 + m.i3 + m.i4, sense=minimize)
m.c2 = Constraint(expr= 460*m.i5 + 570*m.i7 <= 1900)
m.c3 = Constraint(expr= 460*m.i6 + 570*m.i8 <= 1900)
m.c4 = Constraint(expr= - 460*m.i5 - 570*m.i7 <= -1700)
m.c5 = Constraint(expr= - 460*m.i6 - 570*m.i8 <= -1700)
m.c6 = Constraint(expr= m.i5 + m.i7 <= 5)
m.c7 = Constraint(expr= m.i6 + m.i8 <= 5)
m.c8 = Constraint(expr= m.b1 - m.i3 <= 0)
m.c9 = Constraint(expr= m.b2 - m.i4 <= 0)
m.c10 = Constraint(expr= - 15*m.b1 + m.i3 <= 0)
m.c11 = Constraint(expr= - 15*m.b2 + m.i4 <= 0)
m.c12 = Constraint(expr=-(m.i3*m.i5 + m.i4*m.i6) <= -8)
m.c13 = Constraint(expr=-(m.i3*m.i7 + m.i4*m.i8) <= -7)
| true
| true
|
f7186cbad53b1731dfb03dfb8d6594a4f2662ad1
| 1,124
|
py
|
Python
|
setup.py
|
mr-bo-jangles/wagtail-metadata
|
838c8d3329796575bad3986926419ee03a4ba073
|
[
"BSD-3-Clause"
] | null | null | null |
setup.py
|
mr-bo-jangles/wagtail-metadata
|
838c8d3329796575bad3986926419ee03a4ba073
|
[
"BSD-3-Clause"
] | null | null | null |
setup.py
|
mr-bo-jangles/wagtail-metadata
|
838c8d3329796575bad3986926419ee03a4ba073
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python
"""
Install wagtail-metadata using setuptools
"""
from setuptools import find_packages, setup
with open('README.rst', 'r') as f:
readme = f.read()
setup(
name='wagtail-metadata',
version='2.0.0',
description="A tool to assist with metadata for social media.",
long_description=readme,
author='Liam Brenner',
author_email='liam@takeflight.com.au',
url='https://github.com/takeflight/wagtail-metadata',
install_requires=[
'wagtail~=2.0',
],
zip_safe=False,
license='BSD License',
python_requires='>=3',
packages=find_packages(exclude=['tests', 'tests*']),
include_package_data=True,
package_data={},
classifiers=[
'Environment :: Web Environment',
'Intended Audience :: Developers',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Framework :: Django',
'License :: OSI Approved :: BSD License',
],
)
| 26.139535
| 67
| 0.623665
|
from setuptools import find_packages, setup
with open('README.rst', 'r') as f:
readme = f.read()
setup(
name='wagtail-metadata',
version='2.0.0',
description="A tool to assist with metadata for social media.",
long_description=readme,
author='Liam Brenner',
author_email='liam@takeflight.com.au',
url='https://github.com/takeflight/wagtail-metadata',
install_requires=[
'wagtail~=2.0',
],
zip_safe=False,
license='BSD License',
python_requires='>=3',
packages=find_packages(exclude=['tests', 'tests*']),
include_package_data=True,
package_data={},
classifiers=[
'Environment :: Web Environment',
'Intended Audience :: Developers',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Framework :: Django',
'License :: OSI Approved :: BSD License',
],
)
| true
| true
|
f7186d02b626ff5fe789875170c8f2f71eaeb809
| 530
|
py
|
Python
|
examples/volumetric/slicePlane2.py
|
leftwillow/vedo
|
b2e2cfc3453bbd118b6c81a2227b8ce6f1d22b7b
|
[
"CC0-1.0"
] | 1
|
2021-04-25T06:28:01.000Z
|
2021-04-25T06:28:01.000Z
|
examples/volumetric/slicePlane2.py
|
leftwillow/vedo
|
b2e2cfc3453bbd118b6c81a2227b8ce6f1d22b7b
|
[
"CC0-1.0"
] | null | null | null |
examples/volumetric/slicePlane2.py
|
leftwillow/vedo
|
b2e2cfc3453bbd118b6c81a2227b8ce6f1d22b7b
|
[
"CC0-1.0"
] | null | null | null |
"""Slice a Volume with multiple planes
Make low values of the scalar completely transparent"""
from vedo import *
vol = Volume(dataurl+'embryo.slc').alpha([0,0,0.5]).c('k')
slices = []
for i in range(4):
sl = vol.slicePlane(origin=[150,150,i*50+50], normal=(0,-1,1))
slices.append(sl)
amap = [0, 1, 1, 1, 1] # hide low value points giving them alpha 0
mslices = merge(slices) # merge all slices into a single Mesh
mslices.cmap('hot_r', alpha=amap).lighting('off').addScalarBar3D()
show(vol, mslices, __doc__, axes=1)
| 31.176471
| 67
| 0.686792
|
from vedo import *
vol = Volume(dataurl+'embryo.slc').alpha([0,0,0.5]).c('k')
slices = []
for i in range(4):
sl = vol.slicePlane(origin=[150,150,i*50+50], normal=(0,-1,1))
slices.append(sl)
amap = [0, 1, 1, 1, 1]
mslices = merge(slices)
mslices.cmap('hot_r', alpha=amap).lighting('off').addScalarBar3D()
show(vol, mslices, __doc__, axes=1)
| true
| true
|
f7186e374732b0c52d8561de3f1a8eb7e950ff16
| 133,069
|
py
|
Python
|
aesara/scan/op.py
|
sagartomar/aesara
|
477f4e5dd757b1ccd3deaf59bf75fc27d7ab9cf6
|
[
"BSD-3-Clause"
] | 1
|
2021-12-30T00:44:32.000Z
|
2021-12-30T00:44:32.000Z
|
aesara/scan/op.py
|
sagartomar/aesara
|
477f4e5dd757b1ccd3deaf59bf75fc27d7ab9cf6
|
[
"BSD-3-Clause"
] | null | null | null |
aesara/scan/op.py
|
sagartomar/aesara
|
477f4e5dd757b1ccd3deaf59bf75fc27d7ab9cf6
|
[
"BSD-3-Clause"
] | null | null | null |
"""This module provides the `Scan` `Op`.
Memory reuse in scan
--------------------
To reduce the number of memory allocations and copies associated with calling
the inner function and recovering the outputs at every iteration, Scan uses a
memory pre-allocation mechanism for some of its outputs. Instead of repeatedly
calling the inner function and copying the outputs to designated locations,
it tries to make the inner function write the outputs directly to the
designated locations.
This is achieved by initializing, at every iteration, the output storage
of the inner function with references to previously allocated memory. Other
than the code in the Python and Cython backends to do this and to ensure that
the pre-allocated memory has been used, the memory pre-allocation mechanism
relies on the following elements to work properly :
- In make_thunk(), when compiling the inner function, the borrow flag must
be set to False for the inputs. This will prevent aliasing between the
inputs and the outputs of the inner function which could lead to invalid
results.
- In make_thunk(), again, the borrow flag must be set to True for the outputs.
This will make Aesara consider the output storages as persistent and make
Aesara provide them as pre-allocated storage to the ops that compute the
outputs of the inner function instead of letting these ops allocate their
own output storage.
- The ops that produce the outputs of the inner function must be prevented
from working inplace because if they do, they're not using the pre-allocated
storage. This is achieved by including the optimization
'add_no_output_from_inplace' to the compilation mode used by scan. It
prevents other optimizations from altering the graph such that outputs are
produced by inplace operations.
- The ScanSaveMem optimization, whose goal is to limit the amount of memory
used by scan, needs to allocate buffers large enough to be able, at every
iteration, to simultaneously read the needed previous states and storing
the new states. Before the memory reuse feature, the buffers could be
smaller because, often, Scan only needed buffers large enough to read the
needed previous states. This is because all the outputs of the inner
function were computed before any of them was stored in the buffers. Now,
the outputs are stored as they are computed which means that, if the buffer
is too small, computing an output can overwrite an input that is still
needed to compute another output.
"""
import copy
import itertools
import logging
import time
from collections import OrderedDict
import numpy as np
import aesara
from aesara import tensor as aet
from aesara.compile.builders import infer_shape
from aesara.compile.function import function
from aesara.compile.io import In, Out
from aesara.compile.mode import AddFeatureOptimizer, get_mode
from aesara.compile.profiling import ScanProfileStats, register_profiler_printer
from aesara.configdefaults import config
from aesara.gradient import DisconnectedType, NullType, Rop, grad, grad_undefined
from aesara.graph.basic import (
Apply,
Constant,
Variable,
clone_replace,
equal_computations,
graph_inputs,
io_connection_pattern,
)
from aesara.graph.features import NoOutputFromInplace
from aesara.graph.fg import MissingInputError
from aesara.graph.op import Op, ops_with_inner_function
from aesara.link.c.basic import CLinker
from aesara.link.c.exceptions import MissingGXX
from aesara.link.utils import raise_with_op
from aesara.scan.utils import Validator, forced_replace, hash_listsDictsTuples, safe_new
from aesara.tensor.basic import as_tensor_variable
from aesara.tensor.math import minimum
from aesara.tensor.shape import Shape_i
from aesara.tensor.type import TensorType, integer_dtypes
from aesara.tensor.var import TensorVariable
__docformat__ = "restructedtext en"
__authors__ = (
"Razvan Pascanu "
"Frederic Bastien "
"James Bergstra "
"Pascal Lamblin "
"PyMC Developers "
"Aesara Developers "
)
__copyright__ = "(c) 2010, Universite de Montreal"
# Logging function for sending warning or info
_logger = logging.getLogger("aesara.scan.op")
class Scan(Op):
"""
Parameters
----------
inputs
Inputs of the inner function of scan.
outputs
Outputs of the inner function of scan.
info
Dictionary containing different properties of the scan op (like number
of different types of arguments, name, mode, if it should run on GPU or
not, etc.).
typeConstructor
Function that constructs an equivalent to Aesara TensorType.
Notes
-----
``typeConstructor`` had been added to refactor how
Aesara deals with the GPU. If it runs on the GPU, scan needs
to construct certain outputs (those who reside in the GPU
memory) as the GPU-specific type. However we can not import
gpu code in this file (as it is in sandbox, and not available
on each machine) so the workaround is that the GPU
optimization passes to the constructor of this class a
function that is able to construct a GPU type. This way the
class Scan does not need to be aware of the details for the
GPU, it just constructs any tensor using this function (which
by default constructs normal tensors).
"""
def __init__(
self,
inputs,
outputs,
info,
typeConstructor=None,
):
# adding properties into self
self.inputs = inputs
self.outputs = outputs
self.__dict__.update(info)
# I keep a version of info in self, to use in __eq__ and __hash__,
# since info contains all tunable parameters of the op, so for two
# scan to be equal this tunable parameters should be the same
self.info = info
# build a list of output types for any Apply node using this op.
self.output_types = []
idx = 0
jdx = 0
def tensorConstructor(broadcastable, dtype):
return TensorType(broadcastable=broadcastable, dtype=dtype)
if typeConstructor is None:
typeConstructor = tensorConstructor
while idx < self.n_mit_mot_outs:
# Not that for mit_mot there are several output slices per
# output sequence
o = outputs[idx]
self.output_types.append(
typeConstructor(
broadcastable=(False,) + o.type.broadcastable, dtype=o.type.dtype
)
)
idx += len(self.mit_mot_out_slices[jdx])
jdx += 1
# mit_sot / sit_sot / nit_sot
end = idx + self.n_mit_sot + self.n_sit_sot + self.n_nit_sot
for o in outputs[idx:end]:
self.output_types.append(
typeConstructor(
broadcastable=(False,) + o.type.broadcastable, dtype=o.type.dtype
)
)
# shared outputs + possibly the ending condition
for o in outputs[end:]:
self.output_types.append(o.type)
if self.as_while:
self.output_types = self.output_types[:-1]
mode_instance = get_mode(self.mode)
# Clone mode_instance, altering "allow_gc" for the linker,
# and adding a message if we profile
if self.name:
message = self.name + " sub profile"
else:
message = "Scan sub profile"
self.mode_instance = mode_instance.clone(
link_kwargs=dict(allow_gc=self.allow_gc), message=message
)
if not hasattr(self, "name") or self.name is None:
self.name = "scan_fn"
# to have a fair __eq__ comparison later on, we update the info with
# the actual mode used to compile the function and the name of the
# function that we set in case none was given
self.info["name"] = self.name
# Pre-computing some values to speed up perform
self.mintaps = [np.min(x) for x in self.tap_array]
self.mintaps += [0 for x in range(self.n_nit_sot)]
self.seqs_arg_offset = 1 + self.n_seqs
self.shared_arg_offset = (
self.seqs_arg_offset + self.n_mit_mot + self.n_mit_sot + self.n_sit_sot
)
self.nit_sot_arg_offset = self.shared_arg_offset + self.n_shared_outs
self.n_outs = self.n_mit_mot + self.n_mit_sot + self.n_sit_sot
self.n_tap_outs = self.n_mit_mot + self.n_mit_sot
if self.info["gpua"]:
self._hash_inner_graph = self.info["gpu_hash"]
else:
# Do the missing inputs check here to have the error early.
for var in graph_inputs(self.outputs, self.inputs):
if var not in self.inputs and not isinstance(var, Constant):
raise MissingInputError(f"ScanOp is missing an input: {repr(var)}")
self._cmodule_key = CLinker().cmodule_key_variables(
self.inputs, self.outputs, []
)
self._hash_inner_graph = hash(self._cmodule_key)
# Compute mappings between outer inputs, outer outputs, inner
# inputs and inner outputs to determine with variables are associated
# with the same states.
self.var_mappings = self.get_oinp_iinp_iout_oout_mappings()
def validate_inner_graph(self):
"""
Perform some elementary validations on the inner graph to ensure
that it is coherent.
"""
# For every recurrent output, iterate over the associated inner
# inputs and output and ensure that they have the same dtype
nb_recurr_outputs = self.n_mit_mot + self.n_mit_sot + self.n_sit_sot
for outer_oidx in range(nb_recurr_outputs):
inner_iidxs = self.var_mappings["inner_inp_from_outer_out"][outer_oidx]
inner_oidxs = self.var_mappings["inner_out_from_outer_out"][outer_oidx]
for (inner_iidx, inner_oidx) in itertools.product(inner_iidxs, inner_oidxs):
type_input = self.inputs[inner_iidx].type
type_output = self.outputs[inner_oidx].type
if type_input != type_output:
raise TypeError(
"Inconsistency in the inner graph of "
f"scan '{self.name}' : an input and an output are "
"associated with the same recurrent state "
"and should have the same type but have "
f"type '{type_input}' and '{type_output}' respectively."
)
# If scan has the flag 'gpua' set to false (meaning that is shouldn't
# use the gpuarray gpu backend ), ensure that is has no input and no
# output with type GpuArrayType
from aesara.gpuarray import GpuArrayType
if not self.info.get("gpua", False):
for inp in self.inputs:
if isinstance(inp.type, GpuArrayType):
raise TypeError(
"Inconsistency in the inner graph of "
f"scan '{self.name}' : one of the inputs to the "
"inner graph is of type GpuArrayType but "
"the attributes of the scan op indicate "
"that it shouldn't be the case"
)
for out in self.outputs:
if isinstance(out.type, GpuArrayType):
raise TypeError(
"Inconsistency in the inner graph of "
f"scan '{self.name}' : one of the outputs to the "
"inner graph is of type GpuArrayType but "
"the attributes of the scan op indicate "
"that it shouldn't be the case"
)
def __setstate__(self, d):
self.__dict__.update(d)
if "allow_gc" not in self.__dict__:
self.allow_gc = True
self.info["allow_gc"] = True
if not hasattr(self, "var_mappings"):
# Generate the mappings between inner and outer inputs and outputs
# if they haven't already been generated.
self.var_mappings = self.get_oinp_iinp_iout_oout_mappings()
if hasattr(self, "fn"):
if not hasattr(self, "thunk_mit_mot_out_slices"):
# The thunk has been compiled before mit_mot preallocation
# feature was implemented. Mark every mit_mot output tap as
# not having been preallocated
self.mitmots_preallocated = [False] * self.n_mit_mot_outs
if not hasattr(self, "outs_is_tensor"):
# The thunk has been compiled before the analysis, at
# compilation time, of the location of the inputs and outputs.
# Perform this analysis here.
self.inps_is_tensor = [
isinstance(out, TensorVariable)
for out in self.fn.maker.fgraph.inputs
]
self.outs_is_tensor = [
isinstance(out, TensorVariable)
for out in self.fn.maker.fgraph.outputs
]
# Ensure that the graph associated with the inner function is valid.
self.validate_inner_graph()
def make_node(self, *inputs):
"""
Conventions:
inner_X - the variable corresponding to X in the inner function
of scan (the lambda function executed at every time
step)
outer_X - the variable corresponding to X in the outer graph,
i.e. the main graph (where the scan op lives)
inner_X_out - the variable representing the new value of X after
executing one step of scan (i.e. outputs given by
the inner function)
"""
assert np.all(isinstance(i, Variable) for i in inputs)
# Check that the number of inputs to the Scan node corresponds to
# the number of inputs of the inner function of scan
n_outer_ins = len(inputs) - len(self.outer_nitsot(inputs)) - 1
n_inner_ins = (
len(self.inner_seqs(self.inputs))
+ len(self.mitmot_taps())
+ len(self.mitsot_taps())
+ len(self.inner_sitsot(self.inputs))
+ len(self.inner_shared(self.inputs))
+ len(self.inner_non_seqs(self.inputs))
)
assert n_outer_ins == n_inner_ins, (
"The number of inputs given to the inner function of scan"
" does not match the number of inputs given to scan."
)
# Force the inputs to be on the CPU
new_inputs = [as_tensor_variable(inputs[0])]
# assert dtype is consistent
err_msg1 = (
"When compiling the inner function of scan (the "
"function called by scan in each of its iterations) "
"the following error has been encountered: The "
"%s %s (argument number %d) has dtype "
"%s and %d dimension(s). The corresponding variable "
"in the inner function of scan %s "
"however has dtype %s and %d dimension(s). This "
"variable in the inner function of scan should "
"have the same dtype and one fewer dimension "
"compared to its corresponding variable in the initial "
"state (outputs_info in scan nomenclature). For example, "
"if the inner function of scan returns a vector "
"of size d and scan uses the values of "
"the previous time-step, then the initial state in scan "
"should be a matrix of shape (1, d). "
"The first dimension of this "
"matrix corresponds to the number of previous time-steps "
"that scan uses in each of its iterations. "
"In order to solve this issue if the two variable currently "
"have the same dimensionality, you can increase the "
"dimensionality of the varialbe in the initial state of scan "
"by using dimshuffle or shape_padleft. "
)
err_msg2 = (
"When compiling the inner function of scan the "
"following error has been encountered: The "
"initial state (`outputs_info` in scan nomenclature) "
"of variable %s (argument number %d) "
"has dtype %s, while the result of the inner function "
"(`fn`) has dtype %s. This can happen if the inner "
"function of scan results in an upcast or downcast."
)
err_msg3 = (
"When compiling the inner function of scan (the "
"function called by scan in each of its iterations) "
"the following error has been encountered: The "
"initial state (`outputs_info` in scan nomenclature) "
"of variable %s (argument number %d) has %d dimension(s), "
"while the corresponding variable in the result of the inner "
"function of scan (`fn`) has %d dimension(s) (it should "
"be one less than the initial state). For example, "
"if the inner function of scan returns a vector "
"of size d and scan uses the values of "
"the previous time-step, then the initial state in scan "
"should be a matrix of shape (1, d). "
"The first dimension of this "
"matrix corresponds to the number of previous time-steps "
"that scan uses in each of its iterations. "
"In order to solve this issue if the two varialbe currently "
"have the same dimensionality, you can increase the "
"dimensionality of the variable in the initial state of scan "
"by using dimshuffle or shape_padleft. "
)
def check_broadcast(v1, v2):
"""Checks that the broadcast pattern of v1 and v2.
Controls that the broadcast pattern of the variable provided as
input to `scan` matches the broadcast pattern provided in
`output_info`. It raises an error when they don't match. The
typical case is when the user provides either the input or the
`output_info` (but not both) with a dimension fixed to 1,
which may wrongly be interpreted as broadcastable.
"""
if not hasattr(v1, "broadcastable") and not hasattr(v2, "broadcastable"):
return
msg = (
"The broadcast pattern of the output of scan (%s) is "
"inconsistent with the one provided in `output_info` "
"(%s). The output on axis %d is `%r`, but it is `%r` on "
"axis %d in `output_info`. This can happen if one of the "
"dimension is fixed to 1 in the input, while it is still "
"variable in the output, or vice-verca. You have to make "
"them consistent, e.g. using aesara.tensor."
"{patternbroadcast,unbroadcast,addbroadcast}."
)
size = min(len(v1.broadcastable), len(v2.broadcastable))
for n, (b1, b2) in enumerate(
zip(v1.broadcastable[-size:], v2.broadcastable[-size:])
):
if b1 != b2:
a1 = n + size - len(v1.broadcastable) + 1
a2 = n + size - len(v2.broadcastable) + 1
raise TypeError(msg % (v1.type, v2.type, a1, b1, b2, a2))
def format(var, as_var):
"""
This functions ensures that ``out`` has the same dtype as
``inp`` as well as calling filter_variable to make sure
they are both TensorType or GpuArrayType. It internally
deals with the corner case where inp.ndim + 1 = out.ndim
"""
if not hasattr(var, "dtype"):
return var
rval = var
if rval.type.dtype != as_var.type.dtype:
rval = rval.astype(as_var.type.dtype)
if rval.ndim == as_var.ndim:
rval = as_var.type.filter_variable(rval)
else:
tmp = as_var.type.clone(
broadcastable=(
tuple(var.broadcastable[:1]) + tuple(as_var.broadcastable)
)
)
rval = tmp.filter_variable(rval)
return rval
# Check if input sequences and variables representing a slice of
# them have the same dtype
argoffset = 0
for inner_seq, outer_seq in zip(
self.inner_seqs(self.inputs), self.outer_seqs(inputs)
):
check_broadcast(outer_seq, inner_seq)
new_inputs.append(format(outer_seq, as_var=inner_seq))
argoffset += len(self.outer_seqs(inputs))
# Check that this 3 things have the same dtype for mit_mot:
# - initial state of the output
# - variable representing an input slice of the output
# - variable representing an output slice of the output
ipos = 0
opos = 0
inner_mitmot = self.inner_mitmot(self.inputs)
inner_mitmot_outs = self.inner_mitmot_outs(self.outputs)
for idx, (itaps, otaps, _outer_mitmot) in enumerate(
zip(self.mitmot_taps(), self.mitmot_out_taps(), self.outer_mitmot(inputs))
):
outer_mitmot = format(_outer_mitmot, as_var=inner_mitmot[ipos])
new_inputs.append(outer_mitmot)
for k in range(len(itaps)):
if (
inner_mitmot[ipos + k].type.dtype != outer_mitmot.type.dtype
or inner_mitmot[ipos + k].ndim != outer_mitmot.ndim - 1
):
raise ValueError(
err_msg1
% (
"initial state (outputs_info" " in scan nomenclature) ",
str(outer_mitmot),
argoffset + idx,
outer_mitmot.type.dtype,
outer_mitmot.type.ndim,
str(inner_mitmot[ipos + k]),
inner_mitmot[ipos + k].type.dtype,
inner_mitmot[ipos + k].type.ndim,
)
)
ipos += len(itaps)
for k in range(len(otaps)):
if inner_mitmot_outs[opos + k].type.dtype != outer_mitmot.type.dtype:
raise ValueError(
err_msg2
% (
str(outer_mitmot),
argoffset + idx,
outer_mitmot.type.dtype,
inner_mitmot_outs[opos + k].type.dtype,
)
)
if inner_mitmot_outs[opos + k].ndim != outer_mitmot.ndim - 1:
raise ValueError(
err_msg3
% (
str(outer_mitmot),
argoffset + idx,
outer_mitmot.ndim,
inner_mitmot_outs[opos + k].ndim,
)
)
opos += len(otaps)
argoffset += len(self.outer_mitmot(inputs))
# Same checks as above but for outputs of type mit_sot
ipos = 0
inner_mitsots = self.inner_mitsot(self.inputs)
for idx, (itaps, _outer_mitsot, inner_mitsot_out) in enumerate(
zip(
self.mitsot_taps(),
self.outer_mitsot(inputs),
self.inner_mitsot_outs(self.outputs),
)
):
outer_mitsot = format(_outer_mitsot, as_var=inner_mitsots[ipos])
new_inputs.append(outer_mitsot)
for k in range(len(itaps)):
if (
inner_mitsots[ipos + k].type.dtype != outer_mitsot.type.dtype
or inner_mitsots[ipos + k].ndim != outer_mitsot.ndim - 1
):
raise ValueError(
err_msg1
% (
"initial state (outputs_info" " in scan nomenclature) ",
str(outer_mitsot),
argoffset + idx,
outer_mitsot.type.dtype,
outer_mitsot.type.ndim,
str(inner_mitsots[ipos + k]),
inner_mitsots[ipos + k].type.dtype,
inner_mitsots[ipos + k].type.ndim,
)
)
ipos += len(itaps)
if inner_mitsot_out.type.dtype != outer_mitsot.type.dtype:
raise ValueError(
err_msg2
% (
str(outer_mitsot),
argoffset + idx,
outer_mitsot.type.dtype,
inner_mitsot_out.type.dtype,
)
)
if inner_mitsot_out.ndim != outer_mitsot.ndim - 1:
raise ValueError(
err_msg3
% (
str(outer_mitsot),
argoffset + idx,
outer_mitsot.ndim,
inner_mitsot_out.ndim,
)
)
argoffset += len(self.outer_mitsot(inputs))
# Same checks as above but for outputs of type sit_sot
for idx, (inner_sitsot, _outer_sitsot, inner_sitsot_out) in enumerate(
zip(
self.inner_sitsot(self.inputs),
self.outer_sitsot(inputs),
self.inner_sitsot_outs(self.outputs),
)
):
outer_sitsot = format(_outer_sitsot, as_var=inner_sitsot)
new_inputs.append(outer_sitsot)
if inner_sitsot.ndim != outer_sitsot.ndim - 1:
raise ValueError(
err_msg1
% (
"initial state (outputs_info" " in scan nomenclature) ",
str(outer_sitsot),
argoffset + idx,
outer_sitsot.type.dtype,
outer_sitsot.type.ndim,
str(inner_sitsot),
inner_sitsot.type.dtype,
inner_sitsot.type.ndim,
)
)
if inner_sitsot_out.type.dtype != outer_sitsot.type.dtype:
raise ValueError(
err_msg2
% (
str(outer_sitsot),
argoffset + idx,
outer_sitsot.type.dtype,
inner_sitsot_out.type.dtype,
)
)
if inner_sitsot_out.ndim != outer_sitsot.ndim - 1:
raise ValueError(
err_msg3
% (
str(outer_sitsot),
argoffset + idx,
outer_sitsot.type.ndim,
inner_sitsot_out.type.ndim,
)
)
argoffset += len(self.outer_sitsot(inputs))
# Check that the shared variable and their update rule have the same
# dtype. Maybe even same type ?!
for idx, (inner_shared, inner_shared_out, _outer_shared) in enumerate(
zip(
self.inner_shared(self.inputs),
self.inner_shared_outs(self.outputs),
self.outer_shared(inputs),
)
):
outer_shared = format(_outer_shared, as_var=inner_shared)
new_inputs.append(outer_shared)
if (
hasattr(outer_shared, "dtype")
and outer_shared.dtype != inner_shared_out.dtype
):
raise ValueError(
err_msg2
% (
str(outer_shared),
idx + argoffset,
outer_shared.dtype,
inner_shared_out.dtype,
)
)
if (
hasattr(outer_shared, "dtype")
and outer_shared.ndim != inner_shared_out.ndim
):
raise ValueError(
err_msg3
% (
str(outer_shared),
idx + argoffset,
outer_shared.ndim,
inner_shared_out.ndim,
)
)
if hasattr(outer_shared, "dtype") and (
outer_shared.dtype != inner_shared.dtype
or outer_shared.ndim != inner_shared.ndim
):
raise ValueError(
err_msg1
% (
"initial state (outputs_info" " in scan nomenclature) ",
str(outer_shared),
argoffset + idx,
outer_shared.dtype,
outer_shared.ndim,
str(inner_shared),
inner_shared.dtype,
inner_shared.ndim,
)
)
# We do not need to call `format` on outer_nisot arguments.
# outer_nitsot stands for no input tap single output tap. This means
# these are states that do not feed anything back in the recurrent
# computation, and hence they do not have an initial state. The scan
# node however receives an input for each such argument, the input
# in this case is just a int saying how many steps of this output we
# need to store. This input does not have the same dtype, nor is it the same
# type of tensor as the output, it is always a scalar int.
new_inputs += [as_tensor_variable(ons) for ons in self.outer_nitsot(inputs)]
for inner_nonseq, _outer_nonseq in zip(
self.inner_non_seqs(self.inputs), self.outer_non_seqs(inputs)
):
outer_nonseq = format(_outer_nonseq, as_var=inner_nonseq)
new_inputs.append(outer_nonseq)
if inner_nonseq.type != outer_nonseq.type:
raise ValueError(
(
"Argument %s given to scan node does not"
" match its correspondence %s"
)
% (str(outer_nonseq), str(inner_nonseq))
)
for outer_nitsot in self.outer_nitsot(inputs):
# For every nit_sot input we get as input a int/uint that
# depicts the size in memory for that sequence. This feature is
# used by truncated BPTT and by scan space optimization
if (
str(outer_nitsot.type.dtype) not in integer_dtypes
or outer_nitsot.ndim != 0
):
raise ValueError(
"For output %s you need to provide a " "scalar int !",
str(outer_nitsot),
)
assert len(new_inputs) == len(inputs)
# The vector_seqs and vector_outs are just a workaround
# strange NumPy behavior: vector_ndarray[int] return a NumPy
# scalar and not a NumPy ndarray of 0 dimensions.
def is_cpu_vector(s):
return isinstance(s.type, TensorType) and s.ndim == 1
self.vector_seqs = [
is_cpu_vector(seq) for seq in new_inputs[1 : 1 + self.n_seqs]
]
self.vector_outs = [
is_cpu_vector(arg)
for arg in new_inputs[1 + self.n_seqs : (1 + self.n_seqs + self.n_outs)]
]
self.vector_outs += [
isinstance(t.type, TensorType) and t.ndim == 0
for t in self.outer_nitsot_outs(self.outputs)
]
apply_node = Apply(self, new_inputs, [t() for t in self.output_types])
return apply_node
def __eq__(self, other):
# Check if we are dealing with same type of objects
if not type(self) == type(other):
return False
if "destroy_map" not in self.info:
self.info["destroy_map"] = OrderedDict()
if "destroy_map" not in other.info:
other.info["destroy_map"] = OrderedDict()
keys_to_check = [
"truncate_gradient",
"profile",
"n_seqs",
"tap_array",
"as_while",
"n_mit_sot",
"destroy_map",
"n_nit_sot",
"n_shared_outs",
"n_sit_sot",
"gpua",
"n_mit_mot_outs",
"n_mit_mot",
"mit_mot_out_slices",
]
# This are some safety checks ( namely that the inner graph has the
# same number of inputs and same number of outputs )
if not len(self.inputs) == len(other.inputs):
return False
elif not len(self.outputs) == len(other.outputs):
return False
for key in keys_to_check:
if self.info[key] != other.info[key]:
return False
# If everything went OK up to here, there is still one thing to
# check. Namely, do the internal graph represent same
# computations
for self_in, other_in in zip(self.inputs, other.inputs):
if self_in.type != other_in.type:
return False
return equal_computations(
self.outputs, other.outputs, self.inputs, other.inputs
)
def __str__(self):
if self.gpua:
gpu_str = "gpu"
else:
gpu_str = "cpu"
if self.as_while:
name = "do_while"
else:
name = "for"
aux_txt = "%s"
if len(self.destroy_map.keys()) > 0:
# Check if all outputs are inplace
if sorted(self.destroy_map.keys()) == sorted(
range(self.n_mit_mot + self.n_mit_sot + self.n_sit_sot)
):
aux_txt += "all_inplace,%s,%s}"
else:
aux_txt += "{inplace{"
for k in self.destroy_map.keys():
aux_txt += str(k) + ","
aux_txt += "},%s,%s}"
else:
aux_txt += "{%s,%s}"
aux_txt = aux_txt % (name, gpu_str, str(self.name))
return aux_txt
def __hash__(self):
return hash(
(
type(self),
# and a hash representing the inner graph using the
# CLinker.cmodule_key_
self._hash_inner_graph,
hash_listsDictsTuples(self.info),
)
)
def make_thunk(self, node, storage_map, compute_map, no_recycling, impl=None):
"""
Parameters
----------
node
Something previously returned by self.make_node.
storage_map
dict variable -> one-element-list where a computed
value for this variable may be found.
compute_map
dict variable -> one-element-list where a boolean
value will be found. The boolean indicates whether the
variable's storage_map container contains a valid value (True)
or if it has not been computed yet (False).
no_recycling
List of variables for which it is forbidden to reuse memory
allocated by a previous call.
impl
Use 'py' if we want python execution.
Notes
-----
If the thunk consults the storage_map on every call, it is safe
for it to ignore the no_recycling argument, because elements of the
no_recycling list will have a value of None in the storage map. If
the thunk can potentially cache return values (like CLinker does),
then it must not do so for variables in the no_recycling list.
"""
# Before building the thunk, validate that the inner graph is
# coherent
self.validate_inner_graph()
# Setting up all my variables in what I believe is a more Cython
# friendly form
node_input_storage = [storage_map[r] for r in node.inputs]
node_output_storage = [storage_map[r] for r in node.outputs]
# If a shared variable is the result of a ViewOp it is a clear
# indication that we need to copy that value after the perform of
# scan is done
slices = self.n_mit_mot_outs + self.n_mit_sot + self.n_sit_sot + self.n_nit_sot
if config.scan__allow_output_prealloc:
# Go through the mitmots. Whenever a mitmot has a tap both as an
# input and an output, wrap the input such that the corresponding
# output variable becomes an update to be performed on it, possibly
# inplace at the end of the functions's execution.
wrapped_inputs = [In(x, borrow=False) for x in self.inputs[: self.n_seqs]]
new_outputs = [x for x in self.outputs]
preallocated_mitmot_outs = []
new_mit_mot_out_slices = copy.deepcopy(self.mit_mot_out_slices)
input_idx = self.n_seqs
for mitmot_idx in range(self.n_mit_mot):
for inp_tap in self.tap_array[mitmot_idx]:
if inp_tap in self.mit_mot_out_slices[mitmot_idx]:
inp = self.inputs[input_idx]
# Figure out the index of the corresponding output
output_idx = sum(
[len(m) for m in self.mit_mot_out_slices[:mitmot_idx]]
)
output_idx += self.mit_mot_out_slices[mitmot_idx].index(inp_tap)
# Make it so the input is automatically updated to the
# output value, possibly inplace, at the end of the
# function execution. Also, since an update is
# defined, a default value must also be (this is
# verified by DebugMode). Use an array of size 0 but
# the right ndim and dtype (use a shape of 1 on
# broadcastable dimensions, 0 on the others).
default_shape = [1 if _b else 0 for _b in inp.broadcastable]
default_val = inp.type.value_zeros(default_shape)
wrapped_inp = In(
variable=inp,
value=default_val,
update=self.outputs[output_idx],
)
wrapped_inputs.append(wrapped_inp)
preallocated_mitmot_outs.append(output_idx)
new_mit_mot_out_slices[mitmot_idx].remove(inp_tap)
else:
# Wrap the corresponding input as usual. Leave the
# output as-is.
wrapped_inputs.append(In(self.inputs[input_idx], borrow=False))
input_idx += 1
# Wrap the inputs not associated to mitmots and wrap the remaining
# outputs
wrapped_inputs += [In(x, borrow=False) for x in self.inputs[input_idx:]]
wrapped_outputs = [Out(x, borrow=True) for x in new_outputs[:slices]]
wrapped_outputs += new_outputs[slices:]
# Remove now useless outputs from the output list (start from the
# end to avoid altering the indices of the other outputs to be
# deleted.
preallocated_mitmot_outs.sort()
for p in preallocated_mitmot_outs[::-1]:
del wrapped_outputs[p]
# Store the list of mitmot output taps that have been altered
# so they can be preallocated
self.mitmots_preallocated = [
i in preallocated_mitmot_outs for i in range(self.n_mit_mot_outs)
]
# Add an optimization to the compilation mode to attach a feature
# to the function graph just before the inplace optimizations are
# applied (inplace optimizations start at position 50 so the
# optimization to attach the feature is registered at position 49.9
# so that it runs before them). This feature will prevent mitsot,
# sitsot and nitsot outputs from being computed inplace (to allow
# their preallocation).
mitsot_start = self.n_mit_mot_outs - len(preallocated_mitmot_outs)
nitsot_end = mitsot_start + self.n_mit_sot + self.n_sit_sot + self.n_nit_sot
feature = NoOutputFromInplace(mitsot_start, nitsot_end)
opt = AddFeatureOptimizer(feature)
compilation_mode = self.mode_instance.register((opt, 49.9))
else:
# Output preallocation is not activated. Mark every mitmot output
# tap as not being preallocated
self.mitmots_preallocated = [False] * self.n_mit_mot_outs
wrapped_inputs = [In(x, borrow=True) for x in self.inputs]
wrapped_outputs = [Out(x, borrow=False) for x in self.outputs[:slices]]
wrapped_outputs += self.outputs[slices:]
compilation_mode = self.mode_instance
profile = None
if config.profile or (
isinstance(self.profile, (str, bool, (int,))) and self.profile
):
if isinstance(self.profile, str):
profile = ScanProfileStats(name=self.profile)
else:
profile = ScanProfileStats(name=self.name)
elif self.profile:
profile = self.profile
# make_thunk can be called many times on the same op
# we do not want to recompile the inner fct every time.
if not getattr(self, "fn", None):
self.fn = function(
wrapped_inputs,
wrapped_outputs,
mode=compilation_mode,
name=self.name,
profile=profile,
on_unused_input="ignore",
)
# Analyse the compile inner function to determine which inputs and
# outputs are on the gpu and speed up some checks during the execution
self.inps_is_tensor = [
isinstance(out, TensorVariable) for out in self.fn.maker.fgraph.inputs
]
self.outs_is_tensor = [
isinstance(out, TensorVariable) for out in self.fn.maker.fgraph.outputs
]
try:
if impl == "py":
raise MissingGXX
cython_mintaps = np.asarray(self.mintaps, dtype="int32")
cython_tap_array_len = np.asarray(
[len(x) for x in self.tap_array], dtype="int32"
)
if len(self.tap_array) == 0:
d1 = 0
else:
d1 = np.max(cython_tap_array_len)
d0 = len(self.tap_array)
cython_tap_array = np.zeros((d0, d1), dtype="int32")
for _d0 in range(d0):
for _d1 in range(cython_tap_array_len[_d0]):
cython_tap_array[_d0, _d1] = self.tap_array[_d0][_d1]
cython_mit_mot_out_nslices = np.asarray(
[len(x) for x in self.mit_mot_out_slices], dtype="int32"
)
if len(self.mit_mot_out_slices) == 0:
d1 = 0
else:
d1 = np.max(cython_mit_mot_out_nslices)
d0 = len(self.mit_mot_out_slices)
cython_mit_mot_out_slices = np.zeros((d0, d1), dtype="int32")
for _d0 in range(d0):
for _d1 in range(cython_mit_mot_out_nslices[_d0]):
cython_mit_mot_out_slices[_d0, _d1] = self.mit_mot_out_slices[_d0][
_d1
]
cython_vector_seqs = np.asarray(self.vector_seqs, dtype="int32")
cython_vector_outs = np.asarray(self.vector_outs, dtype="int32")
cython_mitmots_preallocated = np.asarray(
self.mitmots_preallocated, dtype="int32"
)
cython_inps_is_tensor = np.asarray(self.inps_is_tensor, dtype="int32")
cython_outs_is_tensor = np.asarray(self.outs_is_tensor, dtype="int32")
if self.destroy_map:
cython_destroy_map = [
x in self.destroy_map for x in range(len(node.outputs))
]
else:
cython_destroy_map = [0 for x in range(len(node.outputs))]
cython_destroy_map = np.asarray(cython_destroy_map, dtype="int32")
from . import scan_perform_ext
def p(node, args, outs):
return scan_perform_ext.perform(
self.n_shared_outs,
self.n_mit_mot_outs,
self.n_seqs,
self.n_mit_mot,
self.n_mit_sot,
self.n_sit_sot,
self.n_nit_sot,
args[0],
self.as_while,
cython_mintaps,
cython_tap_array,
cython_tap_array_len,
cython_vector_seqs,
cython_vector_outs,
cython_mit_mot_out_slices,
cython_mit_mot_out_nslices,
cython_mitmots_preallocated,
cython_inps_is_tensor,
cython_outs_is_tensor,
self.fn.fn,
self.fn,
cython_destroy_map,
args,
outs,
self,
node,
)
except (ImportError, MissingGXX):
p = self.perform
# default arguments are stored in the closure of `rval`
# Big ugly hack since we can't get the real value of allow_gc
# for the englobing function.
allow_gc = config.allow_gc and not self.allow_gc
def rval(
p=p, i=node_input_storage, o=node_output_storage, n=node, allow_gc=allow_gc
):
r = p(n, [x[0] for x in i], o)
for o in node.outputs:
compute_map[o][0] = True
if allow_gc:
self.fn.free()
return r
rval.inputs = node_input_storage
rval.outputs = node_output_storage
rval.perform = p
rval.lazy = False
return rval
def inner_seqs(self, list_inputs):
# Given the list of inner inputs this function grabs those
# corresponding to sequences
return list_inputs[: self.n_seqs]
def outer_seqs(self, list_inputs):
if isinstance(list_inputs, Apply):
list_inputs = list_inputs.inputs
# Given the list of outer inputs this function grabs those
# corresponding to sequences
return list_inputs[1 : 1 + self.n_seqs]
def inner_mitmot(self, list_inputs):
n_taps = sum(len(x) for x in self.tap_array[: self.n_mit_mot])
return list_inputs[self.n_seqs : self.n_seqs + n_taps]
def outer_mitmot(self, list_inputs):
if isinstance(list_inputs, Apply):
list_inputs = list_inputs.inputs
return list_inputs[1 + self.n_seqs : 1 + self.n_seqs + self.n_mit_mot]
def inner_mitmot_outs(self, list_outputs):
n_taps = sum(len(x) for x in self.mit_mot_out_slices)
return list_outputs[:n_taps]
def outer_mitmot_outs(self, list_outputs):
if isinstance(list_outputs, Apply):
list_outputs = list_outputs.outputs
return list_outputs[: self.n_mit_mot]
def mitmot_taps(self):
return self.tap_array[: self.n_mit_mot]
def mitmot_out_taps(self):
return self.mit_mot_out_slices[: self.n_mit_mot]
def inner_mitsot(self, list_inputs):
n_mitmot_taps = sum(len(x) for x in self.tap_array[: self.n_mit_mot])
ntaps_upto_sit_sot = sum(
len(x) for x in self.tap_array[: (self.n_mit_mot + self.n_mit_sot)]
)
return list_inputs[
self.n_seqs + n_mitmot_taps : self.n_seqs + ntaps_upto_sit_sot
]
def outer_mitsot(self, list_inputs):
if isinstance(list_inputs, Apply):
list_inputs = list_inputs.inputs
offset = 1 + self.n_seqs + self.n_mit_mot
return list_inputs[offset : offset + self.n_mit_sot]
def inner_mitsot_outs(self, list_outputs):
n_taps = sum(len(x) for x in self.mit_mot_out_slices)
return list_outputs[n_taps : n_taps + self.n_mit_sot]
def outer_mitsot_outs(self, list_outputs):
if isinstance(list_outputs, Apply):
list_outputs = list_outputs.outputs
return list_outputs[self.n_mit_mot : self.n_mit_mot + self.n_mit_sot]
def mitsot_taps(self):
return self.tap_array[self.n_mit_mot : self.n_mit_mot + self.n_mit_sot]
def inner_sitsot(self, list_inputs):
n_taps_upto_sit_sot = sum(
len(x) for x in self.tap_array[: (self.n_mit_mot + self.n_mit_sot)]
)
offset = self.n_seqs + n_taps_upto_sit_sot
return list_inputs[offset : offset + self.n_sit_sot]
def outer_sitsot(self, list_inputs):
if isinstance(list_inputs, Apply):
list_inputs = list_inputs.inputs
offset = 1 + self.n_seqs + self.n_mit_mot + self.n_mit_sot
return list_inputs[offset : offset + self.n_sit_sot]
def inner_sitsot_outs(self, list_outputs):
n_taps = sum(len(x) for x in self.mit_mot_out_slices)
offset = self.n_mit_sot + n_taps
return list_outputs[offset : offset + self.n_sit_sot]
def outer_sitsot_outs(self, list_outputs):
if isinstance(list_outputs, Apply):
list_outputs = list_outputs.outputs
offset = self.n_mit_mot + self.n_mit_sot
return list_outputs[offset : offset + self.n_sit_sot]
def outer_nitsot(self, list_inputs):
if isinstance(list_inputs, Apply):
list_inputs = list_inputs.inputs
offset = (
1
+ self.n_seqs
+ self.n_mit_mot
+ self.n_mit_sot
+ self.n_sit_sot
+ self.n_shared_outs
)
return list_inputs[offset : offset + self.n_nit_sot]
def inner_nitsot_outs(self, list_outputs):
n_taps = sum(len(x) for x in self.mit_mot_out_slices)
offset = self.n_mit_sot + n_taps + self.n_sit_sot
return list_outputs[offset : offset + self.n_nit_sot]
def outer_nitsot_outs(self, list_outputs):
if isinstance(list_outputs, Apply):
list_outputs = list_outputs.outputs
offset = self.n_mit_mot + self.n_mit_sot + self.n_sit_sot
return list_outputs[offset : offset + self.n_nit_sot]
def inner_shared(self, list_inputs):
n_taps_upto_sit_sot = sum(
len(x) for x in self.tap_array[: (self.n_mit_mot + self.n_mit_sot)]
)
offset = self.n_seqs + n_taps_upto_sit_sot + self.n_sit_sot
return list_inputs[offset : offset + self.n_shared_outs]
def outer_shared(self, list_inputs):
if isinstance(list_inputs, Apply):
list_inputs = list_inputs.inputs
offset = 1 + self.n_seqs + self.n_mit_mot + self.n_mit_sot + self.n_sit_sot
return list_inputs[offset : offset + self.n_shared_outs]
def inner_shared_outs(self, list_outputs):
n_taps = sum(len(x) for x in self.mit_mot_out_slices)
offset = self.n_mit_sot + n_taps + self.n_sit_sot + self.n_nit_sot
return list_outputs[offset : offset + self.n_shared_outs]
def outer_shared_outs(self, list_outputs):
if isinstance(list_outputs, Apply):
list_outputs = list_outputs.outputs
offset = self.n_mit_mot + self.n_mit_sot + self.n_sit_sot + self.n_nit_sot
return list_outputs[offset : offset + self.n_shared_outs]
def inner_non_seqs(self, list_inputs):
n_taps_upto_sit_sot = sum(
len(x) for x in self.tap_array[: (self.n_mit_mot + self.n_mit_sot)]
)
offset = self.n_seqs + n_taps_upto_sit_sot + self.n_sit_sot + self.n_shared_outs
return list_inputs[offset:]
def outer_non_seqs(self, list_inputs):
if isinstance(list_inputs, Apply):
list_inputs = list_inputs.inputs
offset = (
1
+ self.n_seqs
+ self.n_mit_mot
+ self.n_mit_sot
+ self.n_sit_sot
+ self.n_nit_sot
+ self.n_shared_outs
)
return list_inputs[offset:]
def perform(self, node, inputs, output_storage, params=None):
"""Compute the scan operation in Python.
The `inputs` are packed like this:
n_steps
X sequence inputs x_1, x_2, ... x_<self.n_seqs>
Y initial states (u_1, u_2, ... u_<self.n_outs>) for our
outputs. Each must have appropriate length (T_1, T_2, ..., T_Y).
W other inputs w_1, w_2, ... w_W
There are at least ``1 + self.n_seqs + self.n_outs`` inputs, and the
ones above this number are passed to the scanned function as
non-sequential inputs.
The outputs are more straightforward:
Y sequence outputs y_1, y_2, ... y_<self.n_outs>
"""
# 1. Unzip the number of steps and sequences. If number of steps is
# negative flip sequences around, and make n_steps positive
t0_call = time.time()
t_fn = 0
n_steps = inputs[0]
seqs = []
if n_steps < 0:
# History, in the past, this was used for backward
# scan. Now we reverse the inputs outside of scan.
raise IndexError(
f"Scan was asked to run for negative number of step {int(n_steps)}"
)
elif n_steps == 0:
raise NotImplementedError(
"We didn't implemented yet the case where scan do 0 iteration"
)
else:
for idx, seq in enumerate(inputs[1 : self.seqs_arg_offset]):
if seq.shape[0] < n_steps:
raise ValueError(
(
"Sequence is shorter then the required "
"number of steps : (n_steps, seq, "
"seq.shape):"
),
n_steps,
node.inputs[1 + idx],
seq.shape,
)
seqs.append(seq)
# 2. Allocate memory for the outputs. Construct the list:
# store_steps -- map containing the length of each output
# pos -- map containing the current position of each
# output
store_steps = [
arg.shape[0]
for arg in inputs[self.seqs_arg_offset : self.shared_arg_offset]
]
store_steps += [
arg
for arg in inputs[
self.nit_sot_arg_offset : self.nit_sot_arg_offset + self.n_nit_sot
]
]
pos = [
(-self.mintaps[idx]) % store_steps[idx]
for idx in range(self.n_outs + self.n_nit_sot)
]
# 2.1 Create storage space for outputs
for idx in range(self.n_outs):
if idx in self.destroy_map:
# ^ Case 1. Outputs should be computed inplace of their
# initial state
output_storage[idx][0] = inputs[self.seqs_arg_offset + idx]
elif (
output_storage[idx][0] is not None
and output_storage[idx][0].shape[1:]
== inputs[self.seqs_arg_offset + idx].shape[1:]
and output_storage[idx][0].shape[0] >= store_steps[idx]
):
# Put in the values of the initial state
output_storage[idx][0] = output_storage[idx][0][: store_steps[idx]]
if idx > self.n_mit_mot:
l = -self.mintaps[idx]
output_storage[idx][0][:l] = inputs[self.seqs_arg_offset + idx][:l]
else:
output_storage[idx][0][:] = inputs[self.seqs_arg_offset + idx]
else:
output_storage[idx][0] = inputs[self.seqs_arg_offset + idx].copy()
offset = self.nit_sot_arg_offset + self.n_nit_sot
other_args = inputs[offset:]
inner_input_storage = self.fn.input_storage
nb_mitmot_in = sum(map(len, self.tap_array[: self.n_mit_mot]))
old_mitmot_input_storage = [None] * nb_mitmot_in
old_mitmot_input_data = [None] * nb_mitmot_in
inner_output_storage = self.fn.output_storage
old_inner_output_storage = [None] * len(inner_output_storage)
old_inner_output_data = [None] * len(inner_output_storage)
fn = self.fn.fn
offset = (
self.n_seqs
+ sum(map(len, self.tap_array[: self.n_outs]))
+ self.n_shared_outs
)
for idx in range(len(other_args)):
inner_input_storage[idx + offset].storage[0] = other_args[idx]
i = 0
cond = True
# ############# THE MAIN LOOP ##############
# for i in range(n_steps):
while (i < n_steps) and cond:
# sequences over which scan iterates
# 3. collect input slices
for idx in range(self.n_seqs):
if self.vector_seqs[idx]:
inner_input_storage[idx].storage[0] = seqs[idx][i : i + 1].reshape(
()
)
else:
inner_input_storage[idx].storage[0] = seqs[idx][i]
offset = self.n_seqs
for idx in range(self.n_outs):
if self.vector_outs[idx]:
for tap in self.tap_array[idx]:
_idx = (pos[idx] + tap) % store_steps[idx]
inner_input_storage[offset].storage[0] = output_storage[idx][0][
_idx : _idx + 1
].reshape(())
offset += 1
else:
for tap in self.tap_array[idx]:
_idx = (pos[idx] + tap) % store_steps[idx]
inner_input_storage[offset].storage[0] = output_storage[idx][0][
_idx
]
offset += 1
a_offset = self.shared_arg_offset
o_offset = self.n_outs + self.n_nit_sot
if i == 0:
for j in range(self.n_shared_outs):
inner_input_storage[offset].storage[0] = inputs[a_offset + j]
offset += 1
else:
for j in range(self.n_shared_outs):
inner_input_storage[offset].storage[0] = output_storage[
o_offset + j
][0]
offset += 1
# 4. collecting slices where the output should be stored
# 4.1. Collect slices for mitmots
offset = 0
for idx in range(self.n_mit_mot_outs):
if not self.mitmots_preallocated[idx]:
inner_output_storage[offset].storage[0] = None
offset += 1
# 4.2. Collect slices for mitsots, sitsots and nitsots
if i != 0:
for idx in range(self.n_outs + self.n_nit_sot - self.n_mit_mot):
if (
store_steps[idx + self.n_mit_mot] == 1
or self.vector_outs[idx + self.n_mit_mot]
):
inner_output_storage[idx + offset].storage[0] = None
else:
_pos0 = idx + self.n_mit_mot
inner_output_storage[idx + offset].storage[0] = output_storage[
_pos0
][0][pos[_pos0]]
else:
for idx in range(self.n_outs + self.n_nit_sot - self.n_mit_mot):
inner_output_storage[idx + offset].storage[0] = None
# 4.3. Collect slices for shared outputs
offset += self.n_outs + self.n_nit_sot - self.n_mit_mot
for idx in range(self.n_shared_outs):
inner_output_storage[idx + offset].storage[0] = None
# 4.4. If there is a condition add it to the mix
if self.as_while:
pdx = offset + self.n_shared_outs
inner_output_storage[pdx].storage[0] = None
# 4.5. Keep a reference to the variables (ndarrays, GpuArrays,
# etc) currently in the output_storage to be able to compare them
# with the actual outputs of the inner function after its
# execution. Also keep pointers to their data to be able to detect
# cases where outputs reused the allocated object but alter the
# memory region they refer to.
for idx in range(len(inner_output_storage)):
var = inner_output_storage[idx].storage[0]
old_inner_output_storage[idx] = var
if var is None:
old_inner_output_data[idx] = None
elif self.outs_is_tensor[idx]:
old_inner_output_data[idx] = var.data
else:
old_inner_output_data[idx] = var.gpudata
# 4.6. Keep a reference to the variables (ndarrays, GpuArrays,
# etc) associated with mitmot inputs currently in the
# input_storage to be able to compare them with the content of the
# input_storage after the execution of the function. Also keep
# pointers to their data to be able to detect cases where outputs
# reused the allocated object but alter the memory region they
# refer to.
for idx in range(nb_mitmot_in):
var = inner_input_storage[idx + self.n_seqs].storage[0]
old_mitmot_input_storage[idx] = var
if var is None:
old_mitmot_input_data[idx] = None
elif self.inps_is_tensor[idx + self.n_seqs]:
old_mitmot_input_data[idx] = var.data
else:
old_mitmot_input_data[idx] = var.gpudata
# 5.1 compute outputs
t0_fn = time.time()
try:
fn()
except Exception:
if hasattr(fn, "position_of_error"):
# this is a new vm-provided function or c linker
# they need this because the exception manipulation
# done by raise_with_op is not implemented in C.
if hasattr(fn, "thunks"):
# For the CVM
raise_with_op(
self.fn.maker.fgraph,
fn.nodes[fn.position_of_error],
fn.thunks[fn.position_of_error],
)
else:
# For the c linker
# We don't have access from python to all the
# temps values So for now, we just don't print
# the extra shapes/strides info
raise_with_op(
self.fn.maker.fgraph, fn.nodes[fn.position_of_error]
)
else:
# old-style linkers raise their own exceptions
raise
dt_fn = time.time() - t0_fn
if self.as_while:
pdx = offset + self.n_shared_outs
cond = inner_output_storage[pdx].storage[0] == 0
# 5.2. By calling fn() directly instead of calling the aesara
# function, it is possible that the updates have not been
# performed. Perform the updates if needed.
offset_out = len(inner_output_storage) - 1
if getattr(fn, "need_update_inputs", True):
# Update the inputs that have an update function
for inp, storage in zip(
self.fn.maker.expanded_inputs[::-1], self.fn.input_storage[::-1]
):
if inp.update is not None:
storage.data = inner_output_storage[offset_out].data
offset_out -= 1
t_fn += dt_fn
offset_out = 0
# 5.3 Copy over the values for mit_mot outputs
mitmot_inp_offset = 0
mitmot_out_idx = 0
for j in range(self.n_mit_mot):
for k in self.mit_mot_out_slices[j]:
if self.mitmots_preallocated[mitmot_out_idx]:
# This output tap has been preallocated.
inp_idx = mitmot_inp_offset + self.tap_array[j].index(k)
# Verify whether the input points to the same data as
# it did before the execution of the inner function.
old_var = old_mitmot_input_storage[inp_idx]
new_var = inner_input_storage[self.n_seqs + inp_idx].storage[0]
if old_var is new_var:
old_data = old_mitmot_input_data[inp_idx]
if self.inps_is_tensor[self.n_seqs + inp_idx]:
same_data = new_var.data == old_data
else:
same_data = new_var.gpudata == old_data
else:
same_data = False
# If the corresponding input storage still points to
# the same data, it has been modified inplace and
# nothing needs to be done. Otherwise, recover the
# and store it in `outs` as usual
if not same_data:
output_storage[j][0][k + pos[j]] = inner_input_storage[
self.n_seqs + inp_idx
].storage[0]
else:
# This output tap has not been preallocated, recover
# its value as usual
output_storage[j][0][k + pos[j]] = inner_output_storage[
offset_out
].storage[0]
offset_out += 1
mitmot_out_idx += 1
mitmot_inp_offset += len(self.tap_array[j])
# 5.4 Copy over the values for mit_sot/sit_sot outputs
begin = self.n_mit_mot
end = self.n_outs
offset_out -= self.n_mit_mot
for j in range(begin, end):
# Copy the output value to `outs`, if necessary
if store_steps[j] == 1 or self.vector_outs[j]:
output_storage[j][0][pos[j]] = inner_output_storage[
offset_out + j
].storage[0]
else:
# Check whether the initialization of the output storage
# map for this output has been reused.
old_var = old_inner_output_storage[offset_out + j]
new_var = inner_output_storage[offset_out + j].storage[0]
if old_var is new_var:
old_data = old_inner_output_data[offset_out + j]
if old_data is None:
output_reused = False
elif self.outs_is_tensor[offset_out + j]:
output_reused = new_var.data == old_data
else:
output_reused = new_var.gpudata == old_data
else:
output_reused = False
if not output_reused:
try:
output_storage[j][0][pos[j]] = inner_output_storage[
offset_out + j
].storage[0]
except ValueError as e:
if i == 0:
# First iteration, so don't change the
# error message as it can't be the
# case we write about.
raise
ne = ValueError(
"An output of the scan has changed shape. "
"This may be caused by a pushout optimization."
" Try adding "
"'optimizer_excluding=scanOp_pushout_output' "
"to your Aesara flags."
)
raise ne from e
# 5.5 Copy over the values for nit_sot outputs
begin = end
end += self.n_nit_sot
for j in range(begin, end):
if i == 0:
jout = j + offset_out
shape = (store_steps[j],) + inner_output_storage[jout].storage[
0
].shape
dtype = inner_output_storage[jout].storage[0].dtype
if (
output_storage[j][0] is None
or output_storage[j][0].shape[0] < store_steps[j]
or output_storage[j][0].shape[1:] != shape[1:]
or output_storage[j][0].dtype != dtype
):
output_storage[j][0] = node.outputs[j].type.value_zeros(shape)
elif output_storage[j][0].shape[0] != store_steps[j]:
output_storage[j][0] = output_storage[j][0][: store_steps[j]]
output_storage[j][0][pos[j]] = inner_output_storage[jout].storage[0]
elif store_steps[j] == 1 or self.vector_outs[j]:
output_storage[j][0][pos[j]] = inner_output_storage[
j + offset_out
].storage[0]
else:
# Check whether the initialization of the output storage map
# for this output has been reused.
old_var = old_inner_output_storage[offset_out + j]
old_data = old_inner_output_data[offset_out + j]
new_var = inner_output_storage[offset_out + j].storage[0]
if old_var is new_var:
if old_data is None:
output_reused = False
elif self.outs_is_tensor[offset_out + j]:
output_reused = new_var.data == old_data
else:
output_reused = new_var.gpudata == old_data
else:
output_reused = False
if not output_reused:
output_storage[j][0][pos[j]] = inner_output_storage[
j + offset_out
].storage[0]
# 5.6 Copy over the values for outputs corresponding to shared
# variables
begin = end
end += self.n_shared_outs
for j in range(begin, end):
jout = j + offset_out
output_storage[j][0] = inner_output_storage[jout].storage[0]
pos = [(idx + 1) % store for idx, store in zip(pos, store_steps)]
i = i + 1
# 6. Check if you need to re-order output buffers
begin = self.n_mit_mot
end = self.n_outs + self.n_nit_sot
for idx in range(begin, end):
if store_steps[idx] < i - self.mintaps[idx] and pos[idx] < store_steps[idx]:
pdx = pos[idx]
if pdx >= store_steps[idx] // 2:
# It seems inefficient to copy the bigger part of the
# array over, and back, but it is the only way that
# there is no overlap in the areas of out[idx][0] that
# are read and written.
# This way, there will be no information overwritten
# before it is read (as it used to happen).
shape = (pdx,) + output_storage[idx][0].shape[1:]
tmp = node.outputs[idx].type.value_zeros(shape)
tmp[:] = output_storage[idx][0][:pdx]
output_storage[idx][0][: store_steps[idx] - pdx] = output_storage[
idx
][0][pdx:]
output_storage[idx][0][store_steps[idx] - pdx :] = tmp
del tmp
else:
shape = (store_steps[idx] - pdx,) + output_storage[idx][0].shape[1:]
tmp = node.outputs[idx].type.value_zeros(shape)
tmp[:] = output_storage[idx][0][pdx:]
output_storage[idx][0][store_steps[idx] - pdx :] = output_storage[
idx
][0][:pdx]
output_storage[idx][0][: store_steps[idx] - pdx] = tmp
del tmp
# This would normally happen only when doing truncated
# backpropagation through time. In such a scenario Scan is
# expected to return 0 for all entries for which the gradient is
# not actually computed
elif store_steps[idx] > i - self.mintaps[idx]:
output_storage[idx][0][i - self.mintaps[idx] :] = 0
# This is a fix for a bug introduced by while. If you say
# you want to loop up to a condition, you expect the output
# to have that length ( and not the maximal length possible)
#
# Without this the behaviour of a scan op is not consistent
# if optimization gets applied compared to when optimization
# do not get applied
if i < n_steps:
# The reason I don't use out[idx][0][:i] is because for
# certain outputs (those with multiple taps),
# outs[idx][0] has more than n_steps entries, with the
# initial state at the beginning. When indexing in it I
# usually have to do something like
# outs[idx][0][i+offset]. To do something similar here,
# I would have first to compute the maximal tap for
# every output and then do outs[0][:i+maximal_tap],
# which implies I think more computations then this
# little trick that I used
output_storage[idx][0] = output_storage[idx][0][: -(n_steps - i)]
# We never reuse the input or output storage of the
# inner function so we clear it.
for i_s in inner_input_storage:
i_s.storage[0] = None
for o_s in inner_output_storage:
o_s.storage[0] = None
t_call = time.time() - t0_call
# NOTE: make this match what's in function.types.Function
# and this little string helps us to find this spot:
# "PROFILE_CODE"
if hasattr(self.fn.maker, "profile") and self.fn.maker.profile:
profile = self.fn.maker.profile
profile.callcount += 1
profile.nbsteps += n_steps
profile.call_time += t_call
profile.vm_call_time += t_fn
if hasattr(self.fn.fn, "update_profile"):
self.fn.fn.update_profile(profile)
self.t_call = t_call
self.t_fn = t_fn
def infer_shape(self, fgraph, node, input_shapes):
# input_shapes correspond to the shapes of node.inputs
for inp, inp_shp in zip(node.inputs, input_shapes):
assert inp_shp is None or len(inp_shp) == inp.type.ndim
# Here we build 2 variables;
# - A list `inner_ins_shapes`, such that inner_ins_shapes[i] is the
# shape of self.inputs[i]
# - A dictionary `out_equivalent` containing, for every inner input,
# an equivalent variable computed from the outer inputs.
# NOTE : For non-sequences, this equivalence is trivial. For
# sequences and recurrent states, there is no direct equivalence
# between outer and inner inputs. However, because every iteration
# of the Scan needs to give the same output shapes, we can give an
# equivalence between these inner inputs and the subelements of the
# corresponding outer inputs that the Scan would use as input for
# any given iteration. For simplicity, we use iteration 0.
inner_ins_shapes = []
out_equivalent = OrderedDict()
# The two following blocks are commented as it cause in some
# cases extra scans in the graph. See gh-XXX for the
# investigation.
# We skip the first outer input as it is the total or current number
# of iterations.
# sequences
seqs_shape = [x[1:] for x in input_shapes[1 : 1 + self.n_seqs]]
# We disable extra infer_shape for now. See gh-3765.
extra_infer_shape = False
if extra_infer_shape:
inner_seqs = self.inputs[: self.n_seqs]
outer_seqs = node.inputs[1 : 1 + self.n_seqs]
for in_s, out_s in zip(inner_seqs, outer_seqs):
out_equivalent[in_s] = out_s[0]
# mit_mot, mit_sot, sit_sot
outer_inp_idx = 1 + self.n_seqs
inner_inp_idx = self.n_seqs
else:
outer_inp_idx = 0
n_outs = self.n_mit_mot + self.n_mit_sot + self.n_sit_sot
outs_shape = []
for idx in range(n_outs):
mintap = abs(min(self.tap_array[idx]))
for k in self.tap_array[idx]:
outs_shape += [input_shapes[idx + self.n_seqs + 1][1:]]
if extra_infer_shape:
corresponding_tap = node.inputs[outer_inp_idx][mintap + k]
out_equivalent[self.inputs[inner_inp_idx]] = corresponding_tap
inner_inp_idx += 1
outer_inp_idx += 1
# shared_outs
offset = 1 + self.n_seqs + n_outs
for idx in range(self.n_shared_outs):
outs_shape += [input_shapes[idx + offset]]
# non_sequences
offset += self.n_nit_sot + self.n_shared_outs
inner_ins_shapes = seqs_shape + outs_shape + input_shapes[offset:]
assert len(inner_ins_shapes) == len(self.inputs)
# Non-sequences have a direct equivalent from self.inputs in
# node.inputs
inner_non_sequences = self.inputs[len(seqs_shape) + len(outs_shape) :]
for in_ns, out_ns in zip(inner_non_sequences, node.inputs[offset:]):
out_equivalent[in_ns] = out_ns
if self.as_while:
self_outs = self.outputs[:-1]
else:
self_outs = self.outputs
outs_shape = infer_shape(
outs=self_outs, inputs=self.inputs, input_shapes=inner_ins_shapes
)
# Will be used to check if outs_shape can be expressed without using
# variables in self.inputs.
# The shapes of node.inputs are valid.
validator = Validator(
valid=input_shapes, invalid=self.inputs, valid_equivalent=out_equivalent
)
offset = 1 + self.n_seqs
scan_outs = [x for x in input_shapes[offset : offset + n_outs]]
offset += n_outs
outs_shape_n = self.n_mit_mot_outs + self.n_mit_sot + self.n_sit_sot
for x in range(self.n_nit_sot):
out_shape_x = outs_shape[outs_shape_n + x]
if out_shape_x is None:
# This output is not a tensor, and has no shape
scan_outs.append(None)
else:
# We need to make sure that we can compute the shapes from
# node.inputs, and constants, without using the variables
# in the inner function.
r = node.outputs[n_outs + x]
assert r.ndim == 1 + len(out_shape_x)
shp = [node.inputs[offset + self.n_shared_outs + x]]
for i, shp_i in zip(range(1, r.ndim), out_shape_x):
# Validate shp_i. v_shape_i is either None (if invalid),
# or a (variable, Boolean) tuple. The Boolean indicates
# whether variable is shp_i (if True), or an valid
# equivalent (if False). Here, we only need the variable.
v_shp_i = validator.check(shp_i)
if v_shp_i is None:
if hasattr(r, "broadcastable") and r.broadcastable[i]:
shp.append(1)
else:
shp.append(Shape_i(i)(r))
else:
# It can (or at least, an equivalent variable can)
shp.append(v_shp_i[0])
scan_outs.append(tuple(shp))
scan_outs += [x for x in input_shapes[offset : offset + self.n_shared_outs]]
# if we are dealing with a repeat-until, then we do not know the
# leading dimension so we replace it for every entry with Shape_i
if self.as_while:
scan_outs_init = scan_outs
scan_outs = []
for o, x in zip(node.outputs, scan_outs_init):
if x is None:
scan_outs.append(None)
else:
scan_outs.append((Shape_i(0)(o),) + x[1:])
return scan_outs
def connection_pattern(self, node):
# We cache the result of this function because, with a previous
# implementation that repeatedly called grad, there were cases
# where calls to aesara.grad() took as much as 4h for functions
# containing many nested scans.
if hasattr(node.tag, "connection_pattern"):
return node.tag.connection_pattern
# Obtain the connection pattern of the inner function.
inner_connect_pattern = io_connection_pattern(self.inputs, self.outputs)
# Initially assume no outer input is connected to any outer output
connection_pattern = [[False for output in node.outputs] for x in node.inputs]
# For every possible pair of outer input and outer output, iterate
# over every possible pairing of their corresponding inner inputs
# and inner outputs and, if one such pair of inner variables is
# connected than the pair of outer variables is connected.
for outer_oidx in range(len(node.outputs)):
inner_oidxs = self.var_mappings["inner_out_from_outer_out"][outer_oidx]
for outer_iidx in range(len(node.inputs)):
inner_iidxs = self.var_mappings["inner_inp_from_outer_inp"][outer_iidx]
for inner_oidx in inner_oidxs:
for inner_iidx in inner_iidxs:
if inner_connect_pattern[inner_iidx][inner_oidx]:
connection_pattern[outer_iidx][outer_oidx] = True
break
if connection_pattern[outer_iidx][outer_oidx]:
break
# Applying Floyd-Warshall to find all paths connecting inputs to
# outputs. Note that if `x` is an input to `y_t` and `y_tm1` is an
# input to `z_t` then `x` is an input to `z_t`.
n_outs = len(node.outputs)
for steps in range(n_outs):
for iidx in range(n_outs):
for jidx in range(n_outs):
# Get the idx of the outer input corresponding to that
# outer output
j_inp_idx = self.var_mappings["outer_inp_from_outer_out"][jidx]
if j_inp_idx != -1:
if connection_pattern[j_inp_idx][iidx] is True:
for k in range(len(connection_pattern)):
if connection_pattern[k][jidx]:
connection_pattern[k][iidx] = True
node.tag.connection_pattern = connection_pattern
return connection_pattern
def get_oinp_iinp_iout_oout_mappings(self):
"""
Compute and return dictionary mappings between the inputs and
outputs of the inner function and the inputs and outputs of the Scan
node in the outer graph.
The return value is a dictionary in which the keys are the names of
the individual mappings and the values are the mapping dictionaries
themselves. In dictionaries representing mappings to outer variables,
the values are individual integer indices. In dictionaries
representing mappings to inner variables, the values are sequences of
indices because multiple inner variables can be associated with the
same state.
"""
# Lists for outer variables contain individual indices, lists for
# inner variables contain sequences of indices because many inner
# variables can be associated with the same outer variable. The list
# and indices are initialized already containing the data associated
# with the timestep index, the first outer input.
outer_input_indices = [0]
inner_input_indices = [[]]
inner_output_indices = [[]]
outer_output_indices = [-1]
outer_iidx = 1
inner_iidx = 0
inner_oidx = 0
outer_oidx = 0
# Handle sequences inputs
for i in range(self.info["n_seqs"]):
outer_input_indices.append(outer_iidx)
inner_input_indices.append([inner_iidx])
inner_output_indices.append([])
outer_output_indices.append(-1)
outer_iidx += 1
inner_iidx += 1
inner_oidx += 0
outer_oidx += 0
# Handle mitmots, mitsots and sitsots variables
for i in range(len(self.info["tap_array"])):
nb_input_taps = len(self.info["tap_array"][i])
if i < self.n_mit_mot:
nb_output_taps = len(self.mit_mot_out_slices[i])
else:
nb_output_taps = 1
outer_input_indices.append(outer_iidx)
inner_input_indices.append(
list(range(inner_iidx, inner_iidx + nb_input_taps))
)
inner_output_indices.append(
list(range(inner_oidx, inner_oidx + nb_output_taps))
)
outer_output_indices.append(outer_oidx)
outer_iidx += 1
inner_iidx += nb_input_taps
inner_oidx += nb_output_taps
outer_oidx += 1
# This is needed because, for outer inputs (and for outer inputs only)
# nitsots come *after* shared variables.
outer_iidx += self.info["n_shared_outs"]
# Handle nitsots variables
for i in range(self.n_nit_sot):
outer_input_indices.append(outer_iidx)
inner_input_indices.append([])
inner_output_indices.append([inner_oidx])
outer_output_indices.append(outer_oidx)
outer_iidx += 1
inner_iidx += 0
inner_oidx += 1
outer_oidx += 1
# This is needed because, for outer inputs (and for outer inputs only)
# nitsots come *after* shared variables.
outer_iidx -= self.info["n_shared_outs"] + self.n_nit_sot
# Handle shared states
for i in range(self.info["n_shared_outs"]):
outer_input_indices.append(outer_iidx)
inner_input_indices.append([inner_iidx])
inner_output_indices.append([inner_oidx])
outer_output_indices.append(outer_oidx)
outer_iidx += 1
inner_iidx += 1
inner_oidx += 1
outer_oidx += 1
# This is needed because, for outer inputs (and for outer inputs only)
# nitsots come *after* shared variables.
outer_iidx += self.n_nit_sot
# Handle non-sequence inputs
# Note : the number of non-sequence inputs is not stored in self.info
# so it has to be inferred from the number of inner inputs that remain
# to be handled
for i in range(len(self.inputs) - inner_iidx):
outer_input_indices.append(outer_iidx)
inner_input_indices.append([inner_iidx])
inner_output_indices.append([])
outer_output_indices.append(-1)
outer_iidx += 1
inner_iidx += 1
inner_oidx += 0
outer_oidx += 0
# With the global mapping inferred, the individual mappings
# can be produced
mappings = {
"outer_inp_from_outer_out": {},
"inner_inp_from_outer_out": {},
"inner_out_from_outer_out": {},
"inner_inp_from_outer_inp": {},
"inner_out_from_outer_inp": {},
"outer_out_from_outer_inp": {},
"outer_inp_from_inner_inp": {},
"inner_out_from_inner_inp": {},
"outer_out_from_inner_inp": {},
"outer_inp_from_inner_out": {},
"inner_inp_from_inner_out": {},
"outer_out_from_inner_out": {},
}
for (oinp, iinp, iout, oout) in zip(
outer_input_indices,
inner_input_indices,
inner_output_indices,
outer_output_indices,
):
if oout != -1:
mappings["outer_inp_from_outer_out"][oout] = oinp
mappings["inner_inp_from_outer_out"][oout] = iinp
mappings["inner_out_from_outer_out"][oout] = iout
if oinp != -1:
mappings["inner_inp_from_outer_inp"][oinp] = iinp
mappings["inner_out_from_outer_inp"][oinp] = iout
mappings["outer_out_from_outer_inp"][oinp] = oout
for idx in iinp:
mappings["outer_inp_from_inner_inp"][idx] = oinp
mappings["inner_out_from_inner_inp"][idx] = iout
mappings["outer_out_from_inner_inp"][idx] = oout
for idx in iout:
mappings["outer_inp_from_inner_out"][idx] = oinp
mappings["inner_inp_from_inner_out"][idx] = iinp
mappings["outer_out_from_inner_out"][idx] = oout
return mappings
def L_op(self, inputs, outs, dC_douts):
if not isinstance(outs, (list, tuple)):
outs = [outs]
# `grad_step` equals the number of steps the original scan node has
# done (if the original scan is a while loop than this number is the
# length of the output sequence)
# We do not know what kind of outputs the original scan has, so we
# try first to see if it has a nit_sot output, then a sit_sot and
# then a mit_sot
if self.n_nit_sot > 0:
grad_steps = self.outer_nitsot_outs(outs)[0].shape[0]
elif self.n_sit_sot > 0:
grad_steps = self.outer_sitsot_outs(outs)[0].shape[0] - 1
elif self.n_mit_sot > 0:
grad_steps = (
self.outer_mitsot_outs(outs)[0].shape[0] + self.mintaps[self.n_mit_mot]
)
else:
grad_steps = inputs[0]
if self.as_while:
n_steps = outs[0].shape[0]
# Restrict the number of grad steps according to
# self.truncate_gradient
if self.truncate_gradient != -1:
grad_steps = minimum(grad_steps, self.truncate_gradient)
self_inputs = self.inputs
self_outputs = self.outputs
# differentiable inputs
diff_inputs = (
self.inner_seqs(self_inputs)
+ self.inner_mitmot(self_inputs)
+ self.inner_mitsot(self_inputs)
+ self.inner_sitsot(self_inputs)
+ self.inner_non_seqs(self_inputs)
)
diff_outputs = (
self.inner_mitmot_outs(self_outputs)
+ self.inner_mitsot_outs(self_outputs)
+ self.inner_sitsot_outs(self_outputs)
+ self.inner_nitsot_outs(self_outputs)
)
scan_node = outs[0].owner
connection_pattern = self.connection_pattern(scan_node)
def get_inp_idx(iidx):
if iidx < self.n_seqs:
return 1 + iidx
oidx = 1 + self.n_seqs
iidx = iidx - self.n_seqs
for taps in self.mitmot_taps():
if len(taps) > iidx:
return oidx
else:
oidx += 1
iidx -= len(taps)
for taps in self.mitsot_taps():
if len(taps) > iidx:
return oidx
else:
oidx += 1
iidx -= len(taps)
if iidx < self.info["n_sit_sot"]:
return oidx + iidx
else:
return oidx + iidx + self.info["n_nit_sot"]
def get_out_idx(iidx):
oidx = 0
for taps in self.mitmot_out_taps():
if len(taps) > iidx:
return oidx
else:
oidx += 1
iidx -= len(taps)
return oidx + iidx
def compute_all_gradients(known_grads):
y_s = known_grads.keys()
g_y_s = known_grads.values()
for g_y in g_y_s:
if str(g_y.dtype) in integer_dtypes:
raise TypeError(
"Gradients may never be integers but g_y "
"has type " + str(g_y.type)
)
out_indices = [get_out_idx(self_outputs.index(y)) for y in y_s]
connected_inputs = [
i
for i in range(len(scan_node.inputs))
if any([connection_pattern[i][odx] for odx in out_indices])
]
wrt = [
x
for x in graph_inputs(y_s)
if (x in diff_inputs)
and get_inp_idx(self_inputs.index(x)) in connected_inputs
]
gmp = OrderedDict()
# Required in case there is a pair of variables X and Y, with X
# used to compute Y, for both of which there is an external
# gradient signal. Without this, the total gradient signal on X
# will be the external gradient signalknown_grads[X]. With this,
# it will be the sum of the external gradient signal and the
# gradient obtained by propagating Y's external gradient signal
# to X.
known_grads = OrderedDict([(k.copy(), v) for (k, v) in known_grads.items()])
grads = grad(
cost=None,
known_grads=known_grads,
wrt=wrt,
consider_constant=wrt,
disconnected_inputs="ignore",
return_disconnected="None",
null_gradients="return",
)
for i in range(len(wrt)):
gmp[wrt[i]] = grads[i]
rval = [gmp.get(p, None) for p in diff_inputs]
return rval
dC_dinps_t = [None for inp in diff_inputs]
disconnected_dC_dinps_t = [True for inp in diff_inputs]
dC_dXts = []
Xts = []
for idx, Xt in enumerate(diff_outputs):
# We are looking for x[t-1] for a given x[t]
if idx >= self.n_mit_mot_outs:
Xt_placeholder = safe_new(Xt)
Xts.append(Xt_placeholder)
# Different processing based on whether Xt is a nitsot output
# or not. NOTE : This cannot be done by using
# "if Xt not in self.inner_nitsot_outs(self_outputs)" because
# the exact same variable can be used as multiple outputs.
idx_nitsot_start = (
self.info["n_mit_mot"] + self.info["n_mit_sot"] + self.info["n_sit_sot"]
)
idx_nitsot_end = idx_nitsot_start + self.info["n_nit_sot"]
if idx < idx_nitsot_start or idx >= idx_nitsot_end:
# What we do here is loop through dC_douts and collect all
# those that are connected to the specific one and do an
# upcast on all of their dtypes to get the dtype for this
# specific output. Deciding if the gradient with this
# specific previous step is defined or not is done somewhere
# else.
dtypes = []
states = (
self.inner_mitmot(self_inputs)
+ self.inner_mitsot(self_inputs)
+ self.inner_sitsot(self_inputs)
)
for pos, inp in enumerate(states):
if inp in graph_inputs([Xt]):
# Get the index of the outer output that to which
# the state variable 'inp' corresponds.
outer_oidx = self.var_mappings["outer_out_from_inner_inp"][
self.n_seqs + pos
]
if not isinstance(dC_douts[outer_oidx].type, DisconnectedType):
dtypes.append(dC_douts[outer_oidx].dtype)
if dtypes:
new_dtype = aesara.scalar.upcast(*dtypes)
else:
new_dtype = config.floatX
dC_dXt = safe_new(Xt, dtype=new_dtype)
else:
if isinstance(dC_douts[idx].type, DisconnectedType):
continue
dC_dXt = safe_new(dC_douts[idx][0])
dC_dXts.append(dC_dXt)
known_grads = OrderedDict()
dc_dxts_idx = 0
for i in range(len(diff_outputs)):
if i < idx_nitsot_start or i >= idx_nitsot_end:
if diff_outputs[i] in known_grads:
known_grads[diff_outputs[i]] += dC_dXts[dc_dxts_idx]
else:
known_grads[diff_outputs[i]] = dC_dXts[dc_dxts_idx]
dc_dxts_idx += 1
else:
if isinstance(dC_douts[i].type, DisconnectedType):
continue
else:
if diff_outputs[i] in known_grads:
known_grads[diff_outputs[i]] += dC_dXts[dc_dxts_idx]
else:
known_grads[diff_outputs[i]] = dC_dXts[dc_dxts_idx]
dc_dxts_idx += 1
dC_dinps_t = compute_all_gradients(known_grads)
# mask inputs that get no gradients
for dx in range(len(dC_dinps_t)):
if not dC_dinps_t[dx]:
dC_dinps_t[dx] = aet.zeros_like(diff_inputs[dx])
else:
disconnected_dC_dinps_t[dx] = False
for Xt, Xt_placeholder in zip(diff_outputs[self.n_mit_mot_outs :], Xts):
tmp = forced_replace(dC_dinps_t[dx], Xt, Xt_placeholder)
dC_dinps_t[dx] = tmp
# construct dX_dtm1
dC_dXtm1s = []
for pos, x in enumerate(dC_dinps_t[self.n_seqs :]):
# Get the index of the first inner input corresponding to the
# pos-ieth inner input state
idxs = self.var_mappings["inner_out_from_inner_inp"][self.n_seqs + pos]
# Check if the pos-th input is associated with one of the
# recurrent states
x_is_state = pos < sum([len(t) for t in self.tap_array])
if x_is_state and len(idxs) > 0:
opos = idxs[0]
dC_dXtm1s.append(safe_new(dC_dXts[opos]))
if hasattr(x, "dtype") and x.dtype != dC_dXts[opos].dtype:
dC_dinps_t[pos + self.n_seqs] = x.astype(dC_dXts[opos].dtype)
else:
dC_dXtm1s.append(safe_new(x))
for dx, dC_dXtm1 in enumerate(dC_dXtm1s):
if isinstance(dC_dinps_t[dx + self.n_seqs].type, NullType):
# The accumulated gradient is undefined
pass
elif isinstance(dC_dXtm1.type, NullType):
# The new gradient is undefined, this makes the accumulated
# gradient undefined as weell
dC_dinps_t[dx + self.n_seqs] = dC_dXtm1
else:
dC_dinps_t[dx + self.n_seqs] += dC_dXtm1
# Construct scan op
# Seqs
if self.as_while:
# equivalent to x[:n_steps][::-1]
outer_inp_seqs = [x[n_steps - 1 :: -1] for x in inputs[1 : 1 + self.n_seqs]]
else:
outer_inp_seqs = [x[::-1] for x in inputs[1 : 1 + self.n_seqs]]
for idx in range(self.n_mit_mot + self.n_mit_sot):
mintap = np.min(self.tap_array[idx])
if idx < self.n_mit_mot:
outmaxtap = np.max(self.mitmot_out_taps()[idx])
else:
outmaxtap = 0
seq = outs[idx]
for k in self.tap_array[idx]:
if outmaxtap - k != 0:
nw_seq = seq[k - mintap : -(outmaxtap - k)][::-1]
else:
nw_seq = seq[k - mintap :][::-1]
outer_inp_seqs.append(nw_seq)
outer_inp_seqs += [x[:-1][::-1] for x in self.outer_sitsot_outs(outs)]
for x in self.outer_nitsot_outs(dC_douts):
if not isinstance(x.type, DisconnectedType):
if self.as_while:
# equivalent to x[:n_steps][::-1]
outer_inp_seqs.append(x[n_steps - 1 :: -1])
else:
outer_inp_seqs.append(x[::-1])
if hasattr(inputs[0].tag, "test_value"):
# Here we tests that the new scan input sequence all have
# the same shape[0]. This is a properties that the scan()
# fct add and we want to keep it for all Scan op. This is
# used in T_Scan.test_grad_multiple_outs_taps to test
# that.
if self.as_while:
n = n_steps.tag.test_value
else:
n = inputs[0].tag.test_value
for taps, x in zip(self.mitsot_taps(), self.outer_mitsot_outs(outs)):
mintap = np.min(taps)
if hasattr(x[::-1][:mintap], "test_value"):
assert x[::-1][:mintap].tag.test_value.shape[0] == n
for x in self.outer_sitsot_outs(outs):
if hasattr(x[::-1][:-1].tag, "test_value"):
assert x[::-1][:-1].tag.test_value.shape[0] == n
for x in self.outer_nitsot_outs(outs):
if hasattr(x[::-1].tag, "test_value"):
if self.as_while:
assert x[n_steps - 1 :: -1].tag.test_value.shape[0] == n
else:
assert x[::-1].tag.test_value.shape[0] == n
outer_inp_seqs += [
x[::-1][: np.min(taps)]
for taps, x in zip(self.mitsot_taps(), self.outer_mitsot_outs(outs))
]
outer_inp_seqs += [x[::-1][:-1] for x in self.outer_sitsot_outs(outs)]
outer_inp_seqs += [x[::-1] for x in self.outer_nitsot_outs(outs)]
# Restrict the length of the outer sequences to the number of grad
# steps
outer_inp_seqs = [s_[:grad_steps] for s_ in outer_inp_seqs]
inner_inp_seqs = self.inner_seqs(self_inputs)
inner_inp_seqs += self.inner_mitmot(self_inputs)
inner_inp_seqs += self.inner_mitsot(self_inputs)
inner_inp_seqs += self.inner_sitsot(self_inputs)
inner_inp_seqs += self.inner_nitsot_outs(dC_dXts)
inner_inp_seqs += Xts
# mitmot
outer_inp_mitmot = []
inner_inp_mitmot = []
inner_out_mitmot = []
mitmot_inp_taps = []
mitmot_out_taps = []
type_outs = []
out_pos = 0
ins_pos = self.n_seqs
n_mitmot_outs = 0
n_mitmot_inps = 0
for idx in range(self.n_mit_mot):
if isinstance(dC_douts[idx].type, DisconnectedType):
out = outs[idx]
outer_inp_mitmot.append(aet.zeros_like(out))
else:
outer_inp_mitmot.append(dC_douts[idx][::-1])
mitmot_inp_taps.append([])
mitmot_out_taps.append([])
undefined_msg = None
through_shared = False
disconnected = True
for jdx in range(len(self.mit_mot_out_slices[idx])):
inner_inp_mitmot.append(dC_dXts[out_pos])
mitmot_inp_taps[idx].append(-self.mit_mot_out_slices[idx][jdx])
n_mitmot_inps += 1
out_pos += 1
for jdx in range(len(self.tap_array[idx])):
tap = -self.tap_array[idx][jdx]
# Only create a new inner input if there is not already one
# associated with this input tap
if tap not in mitmot_inp_taps[idx]:
inner_inp_mitmot.append(dC_dXtm1s[ins_pos - self.n_seqs])
if isinstance(dC_dinps_t[ins_pos].type, NullType):
# We cannot use Null in the inner graph, so we
# use a zero tensor of the appropriate shape instead.
inner_out_mitmot.append(
aet.zeros(diff_inputs[ins_pos].shape, dtype=config.floatX)
)
undefined_msg = dC_dinps_t[ins_pos].type.why_null
else:
new_inner_out_mitmot = dC_dinps_t[ins_pos]
# If there is already an inner input associated with that
# input tap, make sure the computation of the new output
# uses it instead of the input it's currently using
if tap in mitmot_inp_taps[idx]:
to_replace = dC_dXtm1s[ins_pos - self.n_seqs]
replacement_idx = len(mitmot_inp_taps[idx]) - mitmot_inp_taps[
idx
].index(tap)
replacement = inner_inp_mitmot[-replacement_idx]
self.tap_array[idx]
new_inner_out_mitmot = clone_replace(
new_inner_out_mitmot, replace=[(to_replace, replacement)]
)
inner_out_mitmot.append(new_inner_out_mitmot)
if not disconnected_dC_dinps_t[ins_pos]:
disconnected = False
for _sh in self.inner_shared(self_inputs):
if _sh in graph_inputs([dC_dinps_t[ins_pos]]):
through_shared = True
ins_pos += 1
n_mitmot_outs += 1
mitmot_out_taps[idx].append(-self.tap_array[idx][jdx])
# Only add the tap as a new input tap if needed
if tap not in mitmot_inp_taps[idx]:
n_mitmot_inps += 1
mitmot_inp_taps[idx].append(-self.tap_array[idx][jdx])
if undefined_msg:
type_outs.append(undefined_msg)
elif through_shared:
type_outs.append("through_shared")
elif disconnected:
type_outs.append("disconnected")
else:
type_outs.append("connected")
offset = self.n_mit_mot
for idx in range(self.n_mit_sot):
if isinstance(dC_douts[idx + offset].type, DisconnectedType):
outer_inp_mitmot.append(outs[idx + offset].zeros_like())
else:
outer_inp_mitmot.append(dC_douts[idx + offset][::-1])
mitmot_inp_taps.append([])
mitmot_out_taps.append([])
idx_tap = idx + self.n_mit_mot
inner_inp_mitmot.append(dC_dXts[out_pos])
out_pos += 1
n_mitmot_inps += 1
undefined_msg = None
through_shared = False
disconnected = True
mitmot_inp_taps[idx + offset].append(0)
for jdx in range(len(self.tap_array[idx_tap])):
inner_inp_mitmot.append(dC_dXtm1s[ins_pos - self.n_seqs])
if isinstance(dC_dinps_t[ins_pos].type, NullType):
# We cannot use Null in the inner graph, so we
# use a zero tensor of the appropriate shape instead.
inner_out_mitmot.append(
aet.zeros(diff_inputs[ins_pos].shape, dtype=config.floatX)
)
undefined_msg = dC_dinps_t[ins_pos].type.why_null
else:
inner_out_mitmot.append(dC_dinps_t[ins_pos])
mitmot_inp_taps[idx + offset].append(-self.tap_array[idx_tap][jdx])
mitmot_out_taps[idx].append(-self.tap_array[idx_tap][jdx])
if not disconnected_dC_dinps_t[ins_pos]:
disconnected = False
for _sh in self.inner_shared(self_inputs):
if _sh in graph_inputs([dC_dinps_t[ins_pos]]):
through_shared = True
n_mitmot_inps += 1
ins_pos += 1
n_mitmot_outs += 1
if undefined_msg:
type_outs.append(undefined_msg)
elif through_shared:
type_outs.append("through_shared")
elif disconnected:
type_outs.append("disconnected")
else:
type_outs.append("connected")
offset += self.n_mit_sot
for idx in range(self.n_sit_sot):
mitmot_inp_taps.append([0, 1])
mitmot_out_taps.append([1])
through_shared = False
if not isinstance(dC_douts[idx + offset].type, DisconnectedType):
outer_inp_mitmot.append(dC_douts[idx + offset][::-1])
else:
if isinstance(dC_dinps_t[ins_pos].type, NullType):
# Cannot use dC_dinps_t[ins_pos].dtype, so we use
# floatX instead, as it is a dummy value that will not
# be used anyway.
outer_inp_mitmot.append(
aet.zeros(outs[idx + offset].shape, dtype=config.floatX)
)
else:
outer_inp_mitmot.append(
aet.zeros(
outs[idx + offset].shape, dtype=dC_dinps_t[ins_pos].dtype
)
)
if isinstance(dC_dinps_t[ins_pos].type, NullType):
# We cannot use Null in the inner graph, so we
# use a zero tensor of the appropriate shape instead.
inner_out_mitmot.append(
aet.zeros(diff_inputs[ins_pos].shape, dtype=config.floatX)
)
else:
inner_out_mitmot.append(dC_dinps_t[ins_pos])
for _sh in self.inner_shared(self_inputs):
if _sh in graph_inputs([dC_dinps_t[ins_pos]]):
through_shared = True
if isinstance(dC_dinps_t[ins_pos].type, NullType):
type_outs.append(dC_dinps_t[ins_pos].type.why_null)
elif through_shared:
type_outs.append("through_shared")
elif disconnected_dC_dinps_t[ins_pos]:
type_outs.append("disconnected")
else:
type_outs.append("connected")
inner_inp_mitmot += [dC_dXts[out_pos], dC_dXtm1s[ins_pos - self.n_seqs]]
n_mitmot_outs += 1
out_pos += 1
ins_pos += 1
n_mitmot_inps += 2
n_nit_sot = self.n_seqs
inner_out_nitsot = dC_dinps_t[: self.n_seqs]
inner_out_sitsot = dC_dinps_t[ins_pos:]
for _p, vl in enumerate(inner_out_sitsot):
through_shared = False
for _sh in self.inner_shared(self_inputs):
if _sh in graph_inputs([vl]):
through_shared = True
if isinstance(vl.type, NullType):
type_outs.append(vl.type.why_null)
# Replace the inner output with a zero tensor of
# the right shape
inner_out_sitsot[_p] = aet.zeros(
diff_inputs[ins_pos + _p].shape, dtype=config.floatX
)
elif through_shared:
type_outs.append("through_shared")
elif disconnected_dC_dinps_t[_p + ins_pos]:
type_outs.append("disconnected")
else:
type_outs.append("connected")
for _p, vl in enumerate(inner_out_nitsot):
through_shared = False
for _sh in self.inner_shared(self_inputs):
if _sh in graph_inputs([vl]):
through_shared = True
if isinstance(vl.type, NullType):
type_outs.append(vl.type.why_null)
# Replace the inner output with a zero tensor of
# the right shape
inner_out_nitsot[_p] = aet.zeros(
diff_inputs[_p].shape, dtype=config.floatX
)
if through_shared:
type_outs.append("through_shared")
elif disconnected_dC_dinps_t[_p]:
type_outs.append("disconnected")
else:
type_outs.append("connected")
inner_inp_sitsot = dC_dXtm1s[ins_pos - self.n_seqs :]
outer_inp_sitsot = []
for _idx, y in enumerate(inner_inp_sitsot):
x = self.outer_non_seqs(inputs)[_idx]
if isinstance(y.type, NullType):
# Cannot use dC_dXtm1s.dtype, so we use floatX instead.
outer_inp_sitsot.append(
aet.zeros(
[grad_steps + 1] + [x.shape[i] for i in range(x.ndim)],
dtype=config.floatX,
)
)
# replace y by a zero tensor of the right shape
inner_inp_sitsot[_idx] = aet.zeros(
diff_inputs[ins_pos + _idx].shape, dtype=config.floatX
)
else:
outer_inp_sitsot.append(
aet.zeros(
[grad_steps + 1] + [x.shape[i] for i in range(x.ndim)],
dtype=y.dtype,
)
)
n_sitsot_outs = len(outer_inp_sitsot)
new_tap_array = mitmot_inp_taps + [[-1] for k in range(n_sitsot_outs)]
info = OrderedDict()
info["n_seqs"] = len(outer_inp_seqs)
info["n_mit_sot"] = 0
info["tap_array"] = new_tap_array
info["gpua"] = False
info["n_mit_mot"] = len(outer_inp_mitmot)
info["n_mit_mot_outs"] = n_mitmot_outs
info["mit_mot_out_slices"] = mitmot_out_taps
info["truncate_gradient"] = self.truncate_gradient
info["n_sit_sot"] = n_sitsot_outs
info["n_shared_outs"] = 0
info["n_nit_sot"] = n_nit_sot
info["as_while"] = False
info["profile"] = self.profile
info["destroy_map"] = OrderedDict()
if self.name:
info["name"] = "grad_of_" + self.name
else:
info["name"] = None
info["mode"] = self.mode
info["allow_gc"] = self.allow_gc
outer_inputs = (
[grad_steps]
+ outer_inp_seqs
+ outer_inp_mitmot
+ outer_inp_sitsot
+ [n_steps if self.as_while else inputs[0] for _ in range(n_nit_sot)]
+ self.outer_shared(inputs)
+ self.outer_non_seqs(inputs)
)
inner_gfn_ins = (
inner_inp_seqs
+ inner_inp_mitmot
+ inner_inp_sitsot
+ self.inner_shared(self_inputs)
+ self.inner_non_seqs(self_inputs)
)
inner_gfn_outs = inner_out_mitmot + inner_out_sitsot + inner_out_nitsot
local_op = Scan(inner_gfn_ins, inner_gfn_outs, info)
outputs = local_op(*outer_inputs)
if type(outputs) not in (list, tuple):
outputs = [outputs]
# Re-order the gradients correctly
gradients = [DisconnectedType()()]
offset = self.n_mit_mot + self.n_mit_sot + self.n_sit_sot + n_sitsot_outs
for p, (x, t) in enumerate(
zip(
outputs[offset : offset + self.n_seqs],
type_outs[offset : offset + self.n_seqs],
)
):
if t == "connected":
# If the forward scan is in as_while mode, we need to pad
# the gradients, so that they match the size of the input
# sequences.
if self.as_while:
n_zeros = inputs[0] - n_steps
shp = (n_zeros,)
if x.ndim > 1:
shp = shp + tuple(x.shape[i] for i in range(1, x.ndim))
z = aet.zeros(shp, dtype=x.dtype)
x = aet.concatenate([x[::-1], z], axis=0)
gradients.append(x)
else:
gradients.append(x[::-1])
elif t == "disconnected":
gradients.append(DisconnectedType()())
elif t == "through_shared":
gradients.append(
grad_undefined(
self, p + 1, inputs[p + 1], "Depends on a shared variable"
)
)
else:
# t contains the "why_null" string of a NullType
gradients.append(NullType(t)())
end = self.n_mit_mot + self.n_mit_sot + self.n_sit_sot
for p, (x, t) in enumerate(zip(outputs[:end], type_outs[:end])):
if t == "connected":
# If the forward scan is in as_while mode, we need to pad
# the gradients, so that they match the size of the input
# sequences.
if self.as_while:
n_zeros = inputs[0] - grad_steps
shp = (n_zeros,)
if x.ndim > 1:
shp = shp + tuple(x.shape[i] for i in range(1, x.ndim))
z = aet.zeros(shp, dtype=x.dtype)
x = aet.concatenate([x[::-1], z], axis=0)
gradients.append(x)
else:
gradients.append(x[::-1])
elif t == "disconnected":
gradients.append(DisconnectedType()())
elif t == "through_shared":
gradients.append(
grad_undefined(
self,
p + 1 + self.n_seqs,
inputs[p + 1 + self.n_seqs],
"Depends on a shared variable",
)
)
else:
# t contains the "why_null" string of a NullType
gradients.append(NullType(t)())
start = len(gradients)
node = outs[0].owner
for idx in range(self.n_shared_outs):
disconnected = True
connected_flags = self.connection_pattern(node)[idx + start]
for dC_dout, connected in zip(dC_douts, connected_flags):
if not isinstance(dC_dout.type, DisconnectedType) and connected:
disconnected = False
if disconnected:
gradients.append(DisconnectedType()())
else:
gradients.append(
grad_undefined(
self, idx, inputs[idx], "Shared Variable with update"
)
)
start = len(gradients)
gradients += [DisconnectedType()() for _ in range(self.n_nit_sot)]
begin = end
end = begin + n_sitsot_outs
for p, (x, t) in enumerate(zip(outputs[begin:end], type_outs[begin:end])):
if t == "connected":
gradients.append(x[-1])
elif t == "disconnected":
gradients.append(DisconnectedType()())
elif t == "through_shared":
gradients.append(
grad_undefined(
self,
p + begin + 1,
inputs[p + begin + 1],
"Depends on a shared variable",
)
)
else:
# t contains the "why_null" string of a NullType
gradients.append(NullType(t)())
# Mask disconnected gradients
# Ideally we would want to assert that the gradients we are
# replacing do indeed evaluate to 0, though that is not practical
# from a computational point of view
# The gradients of scan are computed replacing Disconnected with 0,
# because through the recurrence they can become nonzero
for idx in range(len(gradients)):
disconnected = True
for kdx in range(len(node.outputs)):
if connection_pattern[idx][kdx] and not isinstance(
dC_douts[kdx].type, DisconnectedType
):
disconnected = False
if disconnected:
gradients[idx] = DisconnectedType()()
return gradients
def R_op(self, inputs, eval_points):
# Step 0. Prepare some shortcut variable
self_inputs = self.inputs
rop_of_inputs = (
self_inputs[: self.n_seqs + self.n_outs]
+ self_inputs[self.n_seqs + self.n_outs + self.n_shared_outs :]
)
self_outputs = self.outputs
# Step 1. Compute the R_op of the inner function
inner_eval_points = [safe_new(x, "_evalpoint") for x in rop_of_inputs]
if self.as_while:
rop_self_outputs = self_outputs[:-1]
else:
rop_self_outputs = self_outputs
if self.info["n_shared_outs"] > 0:
rop_self_outputs = rop_self_outputs[: -self.info["n_shared_outs"]]
rop_outs = Rop(rop_self_outputs, rop_of_inputs, inner_eval_points)
if type(rop_outs) not in (list, tuple):
rop_outs = [rop_outs]
# Step 2. Figure out what corresponds to what in the scan
# When doing the R-op of scan, you end up having double of each type of
# input, because for each sequence you need also its eval point, for
# each mit_mot, mit_sot, sit_sot or other type of inputs the same.
# Interestingly enough, all these types of eval points behave the same
# way as the input to which they correspond
# The only exception is the eval point for the number of sequences, and
# evan point for the number of nit_sot which I think should just be
# ignored (?)
info = OrderedDict()
info["n_seqs"] = self.n_seqs * 2
info["n_mit_sot"] = self.n_mit_sot * 2
info["n_sit_sot"] = self.n_sit_sot * 2
info["n_mit_mot"] = self.n_mit_mot * 2
info["n_nit_sot"] = self.n_nit_sot * 2
info["n_shared_outs"] = self.n_shared_outs
info["gpua"] = False
info["as_while"] = self.as_while
info["profile"] = self.profile
info["truncate_gradient"] = self.truncate_gradient
if self.name:
info["name"] = "rop_of_" + self.name
else:
info["name"] = None
info["mode"] = self.mode
info["allow_gc"] = self.allow_gc
info["mit_mot_out_slices"] = self.mit_mot_out_slices * 2
info["destroy_map"] = OrderedDict()
new_tap_array = []
b = 0
e = self.n_mit_mot
new_tap_array += self.tap_array[b:e] * 2
b = e
e += self.n_mit_sot
new_tap_array += self.tap_array[b:e] * 2
b = e
e += self.n_sit_sot
new_tap_array += self.tap_array[b:e] * 2
info["tap_array"] = new_tap_array
# Sequences ...
b = 1
ib = 0
e = 1 + self.n_seqs
ie = self.n_seqs
clean_eval_points = []
for inp, evp in zip(inputs[b:e], eval_points[b:e]):
if evp is not None:
clean_eval_points.append(evp)
else:
clean_eval_points.append(inp.zeros_like())
scan_seqs = inputs[b:e] + clean_eval_points
inner_seqs = self_inputs[ib:ie] + inner_eval_points[ib:ie]
# MIT_MOT sequences ...
b = e
e = e + self.n_mit_mot
ib = ie
ie = ie + int(np.sum([len(x) for x in self.tap_array[: self.n_mit_mot]]))
clean_eval_points = []
for inp, evp in zip(inputs[b:e], eval_points[b:e]):
if evp is not None:
clean_eval_points.append(evp)
else:
clean_eval_points.append(inp.zeros_like())
scan_mit_mot = inputs[b:e] + clean_eval_points
inner_mit_mot = self_inputs[ib:ie] + inner_eval_points[ib:ie]
# MIT_SOT sequences ...
b = e
e = e + self.n_mit_sot
ib = ie
ie = ie + int(
np.sum(
[
len(x)
for x in self.tap_array[
self.n_mit_mot : self.n_mit_mot + self.n_mit_sot
]
]
)
)
clean_eval_points = []
for inp, evp in zip(inputs[b:e], eval_points[b:e]):
if evp is not None:
clean_eval_points.append(evp)
else:
clean_eval_points.append(inp.zeros_like())
scan_mit_sot = inputs[b:e] + eval_points[b:e]
inner_mit_sot = self_inputs[ib:ie] + inner_eval_points[ib:ie]
# SIT_SOT sequences ...
b = e
e = e + self.n_sit_sot
ib = ie
ie = ie + self.n_sit_sot
clean_eval_points = []
for inp, evp in zip(inputs[b:e], eval_points[b:e]):
if evp is not None:
clean_eval_points.append(evp)
else:
clean_eval_points.append(inp.zeros_like())
scan_sit_sot = inputs[b:e] + clean_eval_points
inner_sit_sot = self_inputs[ib:ie] + inner_eval_points[ib:ie]
# Shared outs ...
b = e
e = e + self.n_shared_outs
ib = ie
ie = ie + self.n_shared_outs
scan_shared = inputs[b:e]
inner_shared = self_inputs[ib:ie]
# NIT_SOT sequences
b = e
e = e + self.n_nit_sot
scan_nit_sot = inputs[b:e] * 2
# All other arguments
clean_eval_points = []
for inp, evp in zip(inputs[e:], eval_points[e:]):
if evp is not None:
clean_eval_points.append(evp)
else:
clean_eval_points.append(inp.zeros_like())
scan_other = inputs[e:] + clean_eval_points
# inner_eval_points do not have entries for shared variables
inner_other = self_inputs[ie:] + inner_eval_points[ib:]
# Outputs
n_mit_mot_outs = int(np.sum([len(x) for x in self.mit_mot_out_slices]))
info["n_mit_mot_outs"] = n_mit_mot_outs * 2
b = 0
e = n_mit_mot_outs
inner_out_mit_mot = self_outputs[b:e] + rop_outs[b:e]
b = e
e = e + self.n_mit_sot
inner_out_mit_sot = self_outputs[b:e] + rop_outs[b:e]
b = e
e = e + self.n_sit_sot
inner_out_sit_sot = self_outputs[b:e] + rop_outs[b:e]
b = e
e = e + self.n_nit_sot
inner_out_nit_sot = self_outputs[b:e] + rop_outs[b:e]
b = e
e = e + self.n_shared_outs
inner_out_shared = self_outputs[b:e]
inner_ins = (
inner_seqs
+ inner_mit_mot
+ inner_mit_sot
+ inner_sit_sot
+ inner_shared
+ inner_other
)
inner_outs = (
inner_out_mit_mot
+ inner_out_mit_sot
+ inner_out_sit_sot
+ inner_out_nit_sot
+ inner_out_shared
)
if self.as_while:
inner_outs += [self_outputs[-1]]
scan_inputs = (
[inputs[0]]
+ scan_seqs
+ scan_mit_mot
+ scan_mit_sot
+ scan_sit_sot
+ scan_shared
+ scan_nit_sot
+ scan_other
)
local_op = Scan(inner_ins, inner_outs, info)
outputs = local_op(*scan_inputs)
if type(outputs) not in (list, tuple):
outputs = [outputs]
# Select only the result of the R_op results
final_outs = []
b = self.n_mit_mot
e = self.n_mit_mot * 2
final_outs += outputs[b:e]
b = e + self.n_mit_sot
e = e + self.n_mit_sot * 2
final_outs += outputs[b:e]
b = e + self.n_sit_sot
e = e + self.n_sit_sot * 2
final_outs += outputs[b:e]
b = e + self.n_nit_sot
e = e + self.n_nit_sot * 2
final_outs += outputs[b:e]
final_outs += [None] * self.n_shared_outs
return final_outs
# Since Scan is an op that contains an Aesara compiled function, it is
# useful to let DebugMode know about it.
ops_with_inner_function[Scan] = "fn"
@register_profiler_printer
def profile_printer(
message, compile_time, fct_call_time, apply_time, apply_cimpl, outputs_size, file
):
# Scan overhead profile
if any(
[
isinstance(node.op, Scan) and v > 0
for (fgraph, node), v in apply_time.items()
]
):
print("", file=file)
print("Scan overhead:", file=file)
print(
"<Scan op time(s)> <sub scan fct time(s)> <sub scan op "
"time(s)> <sub scan fct time(% scan op time)> <sub scan "
"op time(% scan op time)> <node>",
file=file,
)
total_super_scan_time = 0
total_scan_fct_time = 0
total_scan_op_time = 0
for (fgraph, node), v in apply_time.items():
if isinstance(node.op, Scan) and not node.op.fn.profile:
print(
" One scan node do not have its inner profile enabled. "
"If you enable Aesara profiler with "
"'aesara.function(..., profile=True)', you must manually"
" enable the profiling for each scan too: "
"'aesara.scan(...,profile=True)'."
" Or use Aesara flag 'profile=True'.",
file=file,
)
elif isinstance(node.op, Scan) and node.op.fn.profile:
if v > 0:
scan_fct_time = node.op.fn.profile.call_time
scan_op_time = sum(node.op.fn.profile.apply_time.values())
total_super_scan_time += v
total_scan_fct_time += scan_fct_time
total_scan_op_time += scan_op_time
print(
" %5.1fs %5.1fs %5.1fs %5.1f%% %5.1f%%"
% (
v,
scan_fct_time,
scan_op_time,
scan_fct_time / v * 100,
scan_op_time / v * 100,
),
node,
file=file,
)
else:
print(
(" The node took 0s, so we can not " "compute the overhead"),
node,
file=file,
)
if total_super_scan_time == 0:
print(" No scan have its inner profile enabled.", file=file)
else:
print(
"total %5.1fs %5.1fs %5.1fs %5.1f%% %5.1f%%"
% (
total_super_scan_time,
total_scan_fct_time,
total_scan_op_time,
total_scan_fct_time / total_super_scan_time * 100,
total_scan_op_time / total_super_scan_time * 100,
),
file=file,
)
| 42.190552
| 88
| 0.543936
|
import copy
import itertools
import logging
import time
from collections import OrderedDict
import numpy as np
import aesara
from aesara import tensor as aet
from aesara.compile.builders import infer_shape
from aesara.compile.function import function
from aesara.compile.io import In, Out
from aesara.compile.mode import AddFeatureOptimizer, get_mode
from aesara.compile.profiling import ScanProfileStats, register_profiler_printer
from aesara.configdefaults import config
from aesara.gradient import DisconnectedType, NullType, Rop, grad, grad_undefined
from aesara.graph.basic import (
Apply,
Constant,
Variable,
clone_replace,
equal_computations,
graph_inputs,
io_connection_pattern,
)
from aesara.graph.features import NoOutputFromInplace
from aesara.graph.fg import MissingInputError
from aesara.graph.op import Op, ops_with_inner_function
from aesara.link.c.basic import CLinker
from aesara.link.c.exceptions import MissingGXX
from aesara.link.utils import raise_with_op
from aesara.scan.utils import Validator, forced_replace, hash_listsDictsTuples, safe_new
from aesara.tensor.basic import as_tensor_variable
from aesara.tensor.math import minimum
from aesara.tensor.shape import Shape_i
from aesara.tensor.type import TensorType, integer_dtypes
from aesara.tensor.var import TensorVariable
__docformat__ = "restructedtext en"
__authors__ = (
"Razvan Pascanu "
"Frederic Bastien "
"James Bergstra "
"Pascal Lamblin "
"PyMC Developers "
"Aesara Developers "
)
__copyright__ = "(c) 2010, Universite de Montreal"
_logger = logging.getLogger("aesara.scan.op")
class Scan(Op):
def __init__(
self,
inputs,
outputs,
info,
typeConstructor=None,
):
self.inputs = inputs
self.outputs = outputs
self.__dict__.update(info)
self.info = info
self.output_types = []
idx = 0
jdx = 0
def tensorConstructor(broadcastable, dtype):
return TensorType(broadcastable=broadcastable, dtype=dtype)
if typeConstructor is None:
typeConstructor = tensorConstructor
while idx < self.n_mit_mot_outs:
o = outputs[idx]
self.output_types.append(
typeConstructor(
broadcastable=(False,) + o.type.broadcastable, dtype=o.type.dtype
)
)
idx += len(self.mit_mot_out_slices[jdx])
jdx += 1
end = idx + self.n_mit_sot + self.n_sit_sot + self.n_nit_sot
for o in outputs[idx:end]:
self.output_types.append(
typeConstructor(
broadcastable=(False,) + o.type.broadcastable, dtype=o.type.dtype
)
)
for o in outputs[end:]:
self.output_types.append(o.type)
if self.as_while:
self.output_types = self.output_types[:-1]
mode_instance = get_mode(self.mode)
if self.name:
message = self.name + " sub profile"
else:
message = "Scan sub profile"
self.mode_instance = mode_instance.clone(
link_kwargs=dict(allow_gc=self.allow_gc), message=message
)
if not hasattr(self, "name") or self.name is None:
self.name = "scan_fn"
self.info["name"] = self.name
self.mintaps = [np.min(x) for x in self.tap_array]
self.mintaps += [0 for x in range(self.n_nit_sot)]
self.seqs_arg_offset = 1 + self.n_seqs
self.shared_arg_offset = (
self.seqs_arg_offset + self.n_mit_mot + self.n_mit_sot + self.n_sit_sot
)
self.nit_sot_arg_offset = self.shared_arg_offset + self.n_shared_outs
self.n_outs = self.n_mit_mot + self.n_mit_sot + self.n_sit_sot
self.n_tap_outs = self.n_mit_mot + self.n_mit_sot
if self.info["gpua"]:
self._hash_inner_graph = self.info["gpu_hash"]
else:
for var in graph_inputs(self.outputs, self.inputs):
if var not in self.inputs and not isinstance(var, Constant):
raise MissingInputError(f"ScanOp is missing an input: {repr(var)}")
self._cmodule_key = CLinker().cmodule_key_variables(
self.inputs, self.outputs, []
)
self._hash_inner_graph = hash(self._cmodule_key)
self.var_mappings = self.get_oinp_iinp_iout_oout_mappings()
def validate_inner_graph(self):
nb_recurr_outputs = self.n_mit_mot + self.n_mit_sot + self.n_sit_sot
for outer_oidx in range(nb_recurr_outputs):
inner_iidxs = self.var_mappings["inner_inp_from_outer_out"][outer_oidx]
inner_oidxs = self.var_mappings["inner_out_from_outer_out"][outer_oidx]
for (inner_iidx, inner_oidx) in itertools.product(inner_iidxs, inner_oidxs):
type_input = self.inputs[inner_iidx].type
type_output = self.outputs[inner_oidx].type
if type_input != type_output:
raise TypeError(
"Inconsistency in the inner graph of "
f"scan '{self.name}' : an input and an output are "
"associated with the same recurrent state "
"and should have the same type but have "
f"type '{type_input}' and '{type_output}' respectively."
)
# use the gpuarray gpu backend ), ensure that is has no input and no
# output with type GpuArrayType
from aesara.gpuarray import GpuArrayType
if not self.info.get("gpua", False):
for inp in self.inputs:
if isinstance(inp.type, GpuArrayType):
raise TypeError(
"Inconsistency in the inner graph of "
f"scan '{self.name}' : one of the inputs to the "
"inner graph is of type GpuArrayType but "
"the attributes of the scan op indicate "
"that it shouldn't be the case"
)
for out in self.outputs:
if isinstance(out.type, GpuArrayType):
raise TypeError(
"Inconsistency in the inner graph of "
f"scan '{self.name}' : one of the outputs to the "
"inner graph is of type GpuArrayType but "
"the attributes of the scan op indicate "
"that it shouldn't be the case"
)
def __setstate__(self, d):
self.__dict__.update(d)
if "allow_gc" not in self.__dict__:
self.allow_gc = True
self.info["allow_gc"] = True
if not hasattr(self, "var_mappings"):
# Generate the mappings between inner and outer inputs and outputs
# if they haven't already been generated.
self.var_mappings = self.get_oinp_iinp_iout_oout_mappings()
if hasattr(self, "fn"):
if not hasattr(self, "thunk_mit_mot_out_slices"):
self.mitmots_preallocated = [False] * self.n_mit_mot_outs
if not hasattr(self, "outs_is_tensor"):
self.inps_is_tensor = [
isinstance(out, TensorVariable)
for out in self.fn.maker.fgraph.inputs
]
self.outs_is_tensor = [
isinstance(out, TensorVariable)
for out in self.fn.maker.fgraph.outputs
]
self.validate_inner_graph()
def make_node(self, *inputs):
assert np.all(isinstance(i, Variable) for i in inputs)
n_outer_ins = len(inputs) - len(self.outer_nitsot(inputs)) - 1
n_inner_ins = (
len(self.inner_seqs(self.inputs))
+ len(self.mitmot_taps())
+ len(self.mitsot_taps())
+ len(self.inner_sitsot(self.inputs))
+ len(self.inner_shared(self.inputs))
+ len(self.inner_non_seqs(self.inputs))
)
assert n_outer_ins == n_inner_ins, (
"The number of inputs given to the inner function of scan"
" does not match the number of inputs given to scan."
)
new_inputs = [as_tensor_variable(inputs[0])]
err_msg1 = (
"When compiling the inner function of scan (the "
"function called by scan in each of its iterations) "
"the following error has been encountered: The "
"%s %s (argument number %d) has dtype "
"%s and %d dimension(s). The corresponding variable "
"in the inner function of scan %s "
"however has dtype %s and %d dimension(s). This "
"variable in the inner function of scan should "
"have the same dtype and one fewer dimension "
"compared to its corresponding variable in the initial "
"state (outputs_info in scan nomenclature). For example, "
"if the inner function of scan returns a vector "
"of size d and scan uses the values of "
"the previous time-step, then the initial state in scan "
"should be a matrix of shape (1, d). "
"The first dimension of this "
"matrix corresponds to the number of previous time-steps "
"that scan uses in each of its iterations. "
"In order to solve this issue if the two variable currently "
"have the same dimensionality, you can increase the "
"dimensionality of the varialbe in the initial state of scan "
"by using dimshuffle or shape_padleft. "
)
err_msg2 = (
"When compiling the inner function of scan the "
"following error has been encountered: The "
"initial state (`outputs_info` in scan nomenclature) "
"of variable %s (argument number %d) "
"has dtype %s, while the result of the inner function "
"(`fn`) has dtype %s. This can happen if the inner "
"function of scan results in an upcast or downcast."
)
err_msg3 = (
"When compiling the inner function of scan (the "
"function called by scan in each of its iterations) "
"the following error has been encountered: The "
"initial state (`outputs_info` in scan nomenclature) "
"of variable %s (argument number %d) has %d dimension(s), "
"while the corresponding variable in the result of the inner "
"function of scan (`fn`) has %d dimension(s) (it should "
"be one less than the initial state). For example, "
"if the inner function of scan returns a vector "
"of size d and scan uses the values of "
"the previous time-step, then the initial state in scan "
"should be a matrix of shape (1, d). "
"The first dimension of this "
"matrix corresponds to the number of previous time-steps "
"that scan uses in each of its iterations. "
"In order to solve this issue if the two varialbe currently "
"have the same dimensionality, you can increase the "
"dimensionality of the variable in the initial state of scan "
"by using dimshuffle or shape_padleft. "
)
def check_broadcast(v1, v2):
if not hasattr(v1, "broadcastable") and not hasattr(v2, "broadcastable"):
return
msg = (
"The broadcast pattern of the output of scan (%s) is "
"inconsistent with the one provided in `output_info` "
"(%s). The output on axis %d is `%r`, but it is `%r` on "
"axis %d in `output_info`. This can happen if one of the "
"dimension is fixed to 1 in the input, while it is still "
"variable in the output, or vice-verca. You have to make "
"them consistent, e.g. using aesara.tensor."
"{patternbroadcast,unbroadcast,addbroadcast}."
)
size = min(len(v1.broadcastable), len(v2.broadcastable))
for n, (b1, b2) in enumerate(
zip(v1.broadcastable[-size:], v2.broadcastable[-size:])
):
if b1 != b2:
a1 = n + size - len(v1.broadcastable) + 1
a2 = n + size - len(v2.broadcastable) + 1
raise TypeError(msg % (v1.type, v2.type, a1, b1, b2, a2))
def format(var, as_var):
if not hasattr(var, "dtype"):
return var
rval = var
if rval.type.dtype != as_var.type.dtype:
rval = rval.astype(as_var.type.dtype)
if rval.ndim == as_var.ndim:
rval = as_var.type.filter_variable(rval)
else:
tmp = as_var.type.clone(
broadcastable=(
tuple(var.broadcastable[:1]) + tuple(as_var.broadcastable)
)
)
rval = tmp.filter_variable(rval)
return rval
argoffset = 0
for inner_seq, outer_seq in zip(
self.inner_seqs(self.inputs), self.outer_seqs(inputs)
):
check_broadcast(outer_seq, inner_seq)
new_inputs.append(format(outer_seq, as_var=inner_seq))
argoffset += len(self.outer_seqs(inputs))
ipos = 0
opos = 0
inner_mitmot = self.inner_mitmot(self.inputs)
inner_mitmot_outs = self.inner_mitmot_outs(self.outputs)
for idx, (itaps, otaps, _outer_mitmot) in enumerate(
zip(self.mitmot_taps(), self.mitmot_out_taps(), self.outer_mitmot(inputs))
):
outer_mitmot = format(_outer_mitmot, as_var=inner_mitmot[ipos])
new_inputs.append(outer_mitmot)
for k in range(len(itaps)):
if (
inner_mitmot[ipos + k].type.dtype != outer_mitmot.type.dtype
or inner_mitmot[ipos + k].ndim != outer_mitmot.ndim - 1
):
raise ValueError(
err_msg1
% (
"initial state (outputs_info" " in scan nomenclature) ",
str(outer_mitmot),
argoffset + idx,
outer_mitmot.type.dtype,
outer_mitmot.type.ndim,
str(inner_mitmot[ipos + k]),
inner_mitmot[ipos + k].type.dtype,
inner_mitmot[ipos + k].type.ndim,
)
)
ipos += len(itaps)
for k in range(len(otaps)):
if inner_mitmot_outs[opos + k].type.dtype != outer_mitmot.type.dtype:
raise ValueError(
err_msg2
% (
str(outer_mitmot),
argoffset + idx,
outer_mitmot.type.dtype,
inner_mitmot_outs[opos + k].type.dtype,
)
)
if inner_mitmot_outs[opos + k].ndim != outer_mitmot.ndim - 1:
raise ValueError(
err_msg3
% (
str(outer_mitmot),
argoffset + idx,
outer_mitmot.ndim,
inner_mitmot_outs[opos + k].ndim,
)
)
opos += len(otaps)
argoffset += len(self.outer_mitmot(inputs))
ipos = 0
inner_mitsots = self.inner_mitsot(self.inputs)
for idx, (itaps, _outer_mitsot, inner_mitsot_out) in enumerate(
zip(
self.mitsot_taps(),
self.outer_mitsot(inputs),
self.inner_mitsot_outs(self.outputs),
)
):
outer_mitsot = format(_outer_mitsot, as_var=inner_mitsots[ipos])
new_inputs.append(outer_mitsot)
for k in range(len(itaps)):
if (
inner_mitsots[ipos + k].type.dtype != outer_mitsot.type.dtype
or inner_mitsots[ipos + k].ndim != outer_mitsot.ndim - 1
):
raise ValueError(
err_msg1
% (
"initial state (outputs_info" " in scan nomenclature) ",
str(outer_mitsot),
argoffset + idx,
outer_mitsot.type.dtype,
outer_mitsot.type.ndim,
str(inner_mitsots[ipos + k]),
inner_mitsots[ipos + k].type.dtype,
inner_mitsots[ipos + k].type.ndim,
)
)
ipos += len(itaps)
if inner_mitsot_out.type.dtype != outer_mitsot.type.dtype:
raise ValueError(
err_msg2
% (
str(outer_mitsot),
argoffset + idx,
outer_mitsot.type.dtype,
inner_mitsot_out.type.dtype,
)
)
if inner_mitsot_out.ndim != outer_mitsot.ndim - 1:
raise ValueError(
err_msg3
% (
str(outer_mitsot),
argoffset + idx,
outer_mitsot.ndim,
inner_mitsot_out.ndim,
)
)
argoffset += len(self.outer_mitsot(inputs))
for idx, (inner_sitsot, _outer_sitsot, inner_sitsot_out) in enumerate(
zip(
self.inner_sitsot(self.inputs),
self.outer_sitsot(inputs),
self.inner_sitsot_outs(self.outputs),
)
):
outer_sitsot = format(_outer_sitsot, as_var=inner_sitsot)
new_inputs.append(outer_sitsot)
if inner_sitsot.ndim != outer_sitsot.ndim - 1:
raise ValueError(
err_msg1
% (
"initial state (outputs_info" " in scan nomenclature) ",
str(outer_sitsot),
argoffset + idx,
outer_sitsot.type.dtype,
outer_sitsot.type.ndim,
str(inner_sitsot),
inner_sitsot.type.dtype,
inner_sitsot.type.ndim,
)
)
if inner_sitsot_out.type.dtype != outer_sitsot.type.dtype:
raise ValueError(
err_msg2
% (
str(outer_sitsot),
argoffset + idx,
outer_sitsot.type.dtype,
inner_sitsot_out.type.dtype,
)
)
if inner_sitsot_out.ndim != outer_sitsot.ndim - 1:
raise ValueError(
err_msg3
% (
str(outer_sitsot),
argoffset + idx,
outer_sitsot.type.ndim,
inner_sitsot_out.type.ndim,
)
)
argoffset += len(self.outer_sitsot(inputs))
for idx, (inner_shared, inner_shared_out, _outer_shared) in enumerate(
zip(
self.inner_shared(self.inputs),
self.inner_shared_outs(self.outputs),
self.outer_shared(inputs),
)
):
outer_shared = format(_outer_shared, as_var=inner_shared)
new_inputs.append(outer_shared)
if (
hasattr(outer_shared, "dtype")
and outer_shared.dtype != inner_shared_out.dtype
):
raise ValueError(
err_msg2
% (
str(outer_shared),
idx + argoffset,
outer_shared.dtype,
inner_shared_out.dtype,
)
)
if (
hasattr(outer_shared, "dtype")
and outer_shared.ndim != inner_shared_out.ndim
):
raise ValueError(
err_msg3
% (
str(outer_shared),
idx + argoffset,
outer_shared.ndim,
inner_shared_out.ndim,
)
)
if hasattr(outer_shared, "dtype") and (
outer_shared.dtype != inner_shared.dtype
or outer_shared.ndim != inner_shared.ndim
):
raise ValueError(
err_msg1
% (
"initial state (outputs_info" " in scan nomenclature) ",
str(outer_shared),
argoffset + idx,
outer_shared.dtype,
outer_shared.ndim,
str(inner_shared),
inner_shared.dtype,
inner_shared.ndim,
)
)
new_inputs += [as_tensor_variable(ons) for ons in self.outer_nitsot(inputs)]
for inner_nonseq, _outer_nonseq in zip(
self.inner_non_seqs(self.inputs), self.outer_non_seqs(inputs)
):
outer_nonseq = format(_outer_nonseq, as_var=inner_nonseq)
new_inputs.append(outer_nonseq)
if inner_nonseq.type != outer_nonseq.type:
raise ValueError(
(
"Argument %s given to scan node does not"
" match its correspondence %s"
)
% (str(outer_nonseq), str(inner_nonseq))
)
for outer_nitsot in self.outer_nitsot(inputs):
if (
str(outer_nitsot.type.dtype) not in integer_dtypes
or outer_nitsot.ndim != 0
):
raise ValueError(
"For output %s you need to provide a " "scalar int !",
str(outer_nitsot),
)
assert len(new_inputs) == len(inputs)
def is_cpu_vector(s):
return isinstance(s.type, TensorType) and s.ndim == 1
self.vector_seqs = [
is_cpu_vector(seq) for seq in new_inputs[1 : 1 + self.n_seqs]
]
self.vector_outs = [
is_cpu_vector(arg)
for arg in new_inputs[1 + self.n_seqs : (1 + self.n_seqs + self.n_outs)]
]
self.vector_outs += [
isinstance(t.type, TensorType) and t.ndim == 0
for t in self.outer_nitsot_outs(self.outputs)
]
apply_node = Apply(self, new_inputs, [t() for t in self.output_types])
return apply_node
def __eq__(self, other):
if not type(self) == type(other):
return False
if "destroy_map" not in self.info:
self.info["destroy_map"] = OrderedDict()
if "destroy_map" not in other.info:
other.info["destroy_map"] = OrderedDict()
keys_to_check = [
"truncate_gradient",
"profile",
"n_seqs",
"tap_array",
"as_while",
"n_mit_sot",
"destroy_map",
"n_nit_sot",
"n_shared_outs",
"n_sit_sot",
"gpua",
"n_mit_mot_outs",
"n_mit_mot",
"mit_mot_out_slices",
]
if not len(self.inputs) == len(other.inputs):
return False
elif not len(self.outputs) == len(other.outputs):
return False
for key in keys_to_check:
if self.info[key] != other.info[key]:
return False
for self_in, other_in in zip(self.inputs, other.inputs):
if self_in.type != other_in.type:
return False
return equal_computations(
self.outputs, other.outputs, self.inputs, other.inputs
)
def __str__(self):
if self.gpua:
gpu_str = "gpu"
else:
gpu_str = "cpu"
if self.as_while:
name = "do_while"
else:
name = "for"
aux_txt = "%s"
if len(self.destroy_map.keys()) > 0:
if sorted(self.destroy_map.keys()) == sorted(
range(self.n_mit_mot + self.n_mit_sot + self.n_sit_sot)
):
aux_txt += "all_inplace,%s,%s}"
else:
aux_txt += "{inplace{"
for k in self.destroy_map.keys():
aux_txt += str(k) + ","
aux_txt += "},%s,%s}"
else:
aux_txt += "{%s,%s}"
aux_txt = aux_txt % (name, gpu_str, str(self.name))
return aux_txt
def __hash__(self):
return hash(
(
type(self),
self._hash_inner_graph,
hash_listsDictsTuples(self.info),
)
)
def make_thunk(self, node, storage_map, compute_map, no_recycling, impl=None):
self.validate_inner_graph()
node_input_storage = [storage_map[r] for r in node.inputs]
node_output_storage = [storage_map[r] for r in node.outputs]
slices = self.n_mit_mot_outs + self.n_mit_sot + self.n_sit_sot + self.n_nit_sot
if config.scan__allow_output_prealloc:
wrapped_inputs = [In(x, borrow=False) for x in self.inputs[: self.n_seqs]]
new_outputs = [x for x in self.outputs]
preallocated_mitmot_outs = []
new_mit_mot_out_slices = copy.deepcopy(self.mit_mot_out_slices)
input_idx = self.n_seqs
for mitmot_idx in range(self.n_mit_mot):
for inp_tap in self.tap_array[mitmot_idx]:
if inp_tap in self.mit_mot_out_slices[mitmot_idx]:
inp = self.inputs[input_idx]
# Figure out the index of the corresponding output
output_idx = sum(
[len(m) for m in self.mit_mot_out_slices[:mitmot_idx]]
)
output_idx += self.mit_mot_out_slices[mitmot_idx].index(inp_tap)
# Make it so the input is automatically updated to the
# output value, possibly inplace, at the end of the
# function execution. Also, since an update is
# defined, a default value must also be (this is
# verified by DebugMode). Use an array of size 0 but
# the right ndim and dtype (use a shape of 1 on
# broadcastable dimensions, 0 on the others).
default_shape = [1 if _b else 0 for _b in inp.broadcastable]
default_val = inp.type.value_zeros(default_shape)
wrapped_inp = In(
variable=inp,
value=default_val,
update=self.outputs[output_idx],
)
wrapped_inputs.append(wrapped_inp)
preallocated_mitmot_outs.append(output_idx)
new_mit_mot_out_slices[mitmot_idx].remove(inp_tap)
else:
# Wrap the corresponding input as usual. Leave the
# output as-is.
wrapped_inputs.append(In(self.inputs[input_idx], borrow=False))
input_idx += 1
# Wrap the inputs not associated to mitmots and wrap the remaining
# outputs
wrapped_inputs += [In(x, borrow=False) for x in self.inputs[input_idx:]]
wrapped_outputs = [Out(x, borrow=True) for x in new_outputs[:slices]]
wrapped_outputs += new_outputs[slices:]
# Remove now useless outputs from the output list (start from the
# end to avoid altering the indices of the other outputs to be
# deleted.
preallocated_mitmot_outs.sort()
for p in preallocated_mitmot_outs[::-1]:
del wrapped_outputs[p]
# Store the list of mitmot output taps that have been altered
# so they can be preallocated
self.mitmots_preallocated = [
i in preallocated_mitmot_outs for i in range(self.n_mit_mot_outs)
]
# Add an optimization to the compilation mode to attach a feature
# to the function graph just before the inplace optimizations are
# applied (inplace optimizations start at position 50 so the
# optimization to attach the feature is registered at position 49.9
# so that it runs before them). This feature will prevent mitsot,
# sitsot and nitsot outputs from being computed inplace (to allow
# their preallocation).
mitsot_start = self.n_mit_mot_outs - len(preallocated_mitmot_outs)
nitsot_end = mitsot_start + self.n_mit_sot + self.n_sit_sot + self.n_nit_sot
feature = NoOutputFromInplace(mitsot_start, nitsot_end)
opt = AddFeatureOptimizer(feature)
compilation_mode = self.mode_instance.register((opt, 49.9))
else:
# Output preallocation is not activated. Mark every mitmot output
# tap as not being preallocated
self.mitmots_preallocated = [False] * self.n_mit_mot_outs
wrapped_inputs = [In(x, borrow=True) for x in self.inputs]
wrapped_outputs = [Out(x, borrow=False) for x in self.outputs[:slices]]
wrapped_outputs += self.outputs[slices:]
compilation_mode = self.mode_instance
profile = None
if config.profile or (
isinstance(self.profile, (str, bool, (int,))) and self.profile
):
if isinstance(self.profile, str):
profile = ScanProfileStats(name=self.profile)
else:
profile = ScanProfileStats(name=self.name)
elif self.profile:
profile = self.profile
# make_thunk can be called many times on the same op
# we do not want to recompile the inner fct every time.
if not getattr(self, "fn", None):
self.fn = function(
wrapped_inputs,
wrapped_outputs,
mode=compilation_mode,
name=self.name,
profile=profile,
on_unused_input="ignore",
)
# Analyse the compile inner function to determine which inputs and
# outputs are on the gpu and speed up some checks during the execution
self.inps_is_tensor = [
isinstance(out, TensorVariable) for out in self.fn.maker.fgraph.inputs
]
self.outs_is_tensor = [
isinstance(out, TensorVariable) for out in self.fn.maker.fgraph.outputs
]
try:
if impl == "py":
raise MissingGXX
cython_mintaps = np.asarray(self.mintaps, dtype="int32")
cython_tap_array_len = np.asarray(
[len(x) for x in self.tap_array], dtype="int32"
)
if len(self.tap_array) == 0:
d1 = 0
else:
d1 = np.max(cython_tap_array_len)
d0 = len(self.tap_array)
cython_tap_array = np.zeros((d0, d1), dtype="int32")
for _d0 in range(d0):
for _d1 in range(cython_tap_array_len[_d0]):
cython_tap_array[_d0, _d1] = self.tap_array[_d0][_d1]
cython_mit_mot_out_nslices = np.asarray(
[len(x) for x in self.mit_mot_out_slices], dtype="int32"
)
if len(self.mit_mot_out_slices) == 0:
d1 = 0
else:
d1 = np.max(cython_mit_mot_out_nslices)
d0 = len(self.mit_mot_out_slices)
cython_mit_mot_out_slices = np.zeros((d0, d1), dtype="int32")
for _d0 in range(d0):
for _d1 in range(cython_mit_mot_out_nslices[_d0]):
cython_mit_mot_out_slices[_d0, _d1] = self.mit_mot_out_slices[_d0][
_d1
]
cython_vector_seqs = np.asarray(self.vector_seqs, dtype="int32")
cython_vector_outs = np.asarray(self.vector_outs, dtype="int32")
cython_mitmots_preallocated = np.asarray(
self.mitmots_preallocated, dtype="int32"
)
cython_inps_is_tensor = np.asarray(self.inps_is_tensor, dtype="int32")
cython_outs_is_tensor = np.asarray(self.outs_is_tensor, dtype="int32")
if self.destroy_map:
cython_destroy_map = [
x in self.destroy_map for x in range(len(node.outputs))
]
else:
cython_destroy_map = [0 for x in range(len(node.outputs))]
cython_destroy_map = np.asarray(cython_destroy_map, dtype="int32")
from . import scan_perform_ext
def p(node, args, outs):
return scan_perform_ext.perform(
self.n_shared_outs,
self.n_mit_mot_outs,
self.n_seqs,
self.n_mit_mot,
self.n_mit_sot,
self.n_sit_sot,
self.n_nit_sot,
args[0],
self.as_while,
cython_mintaps,
cython_tap_array,
cython_tap_array_len,
cython_vector_seqs,
cython_vector_outs,
cython_mit_mot_out_slices,
cython_mit_mot_out_nslices,
cython_mitmots_preallocated,
cython_inps_is_tensor,
cython_outs_is_tensor,
self.fn.fn,
self.fn,
cython_destroy_map,
args,
outs,
self,
node,
)
except (ImportError, MissingGXX):
p = self.perform
# default arguments are stored in the closure of `rval`
# Big ugly hack since we can't get the real value of allow_gc
allow_gc = config.allow_gc and not self.allow_gc
def rval(
p=p, i=node_input_storage, o=node_output_storage, n=node, allow_gc=allow_gc
):
r = p(n, [x[0] for x in i], o)
for o in node.outputs:
compute_map[o][0] = True
if allow_gc:
self.fn.free()
return r
rval.inputs = node_input_storage
rval.outputs = node_output_storage
rval.perform = p
rval.lazy = False
return rval
def inner_seqs(self, list_inputs):
return list_inputs[: self.n_seqs]
def outer_seqs(self, list_inputs):
if isinstance(list_inputs, Apply):
list_inputs = list_inputs.inputs
return list_inputs[1 : 1 + self.n_seqs]
def inner_mitmot(self, list_inputs):
n_taps = sum(len(x) for x in self.tap_array[: self.n_mit_mot])
return list_inputs[self.n_seqs : self.n_seqs + n_taps]
def outer_mitmot(self, list_inputs):
if isinstance(list_inputs, Apply):
list_inputs = list_inputs.inputs
return list_inputs[1 + self.n_seqs : 1 + self.n_seqs + self.n_mit_mot]
def inner_mitmot_outs(self, list_outputs):
n_taps = sum(len(x) for x in self.mit_mot_out_slices)
return list_outputs[:n_taps]
def outer_mitmot_outs(self, list_outputs):
if isinstance(list_outputs, Apply):
list_outputs = list_outputs.outputs
return list_outputs[: self.n_mit_mot]
def mitmot_taps(self):
return self.tap_array[: self.n_mit_mot]
def mitmot_out_taps(self):
return self.mit_mot_out_slices[: self.n_mit_mot]
def inner_mitsot(self, list_inputs):
n_mitmot_taps = sum(len(x) for x in self.tap_array[: self.n_mit_mot])
ntaps_upto_sit_sot = sum(
len(x) for x in self.tap_array[: (self.n_mit_mot + self.n_mit_sot)]
)
return list_inputs[
self.n_seqs + n_mitmot_taps : self.n_seqs + ntaps_upto_sit_sot
]
def outer_mitsot(self, list_inputs):
if isinstance(list_inputs, Apply):
list_inputs = list_inputs.inputs
offset = 1 + self.n_seqs + self.n_mit_mot
return list_inputs[offset : offset + self.n_mit_sot]
def inner_mitsot_outs(self, list_outputs):
n_taps = sum(len(x) for x in self.mit_mot_out_slices)
return list_outputs[n_taps : n_taps + self.n_mit_sot]
def outer_mitsot_outs(self, list_outputs):
if isinstance(list_outputs, Apply):
list_outputs = list_outputs.outputs
return list_outputs[self.n_mit_mot : self.n_mit_mot + self.n_mit_sot]
def mitsot_taps(self):
return self.tap_array[self.n_mit_mot : self.n_mit_mot + self.n_mit_sot]
def inner_sitsot(self, list_inputs):
n_taps_upto_sit_sot = sum(
len(x) for x in self.tap_array[: (self.n_mit_mot + self.n_mit_sot)]
)
offset = self.n_seqs + n_taps_upto_sit_sot
return list_inputs[offset : offset + self.n_sit_sot]
def outer_sitsot(self, list_inputs):
if isinstance(list_inputs, Apply):
list_inputs = list_inputs.inputs
offset = 1 + self.n_seqs + self.n_mit_mot + self.n_mit_sot
return list_inputs[offset : offset + self.n_sit_sot]
def inner_sitsot_outs(self, list_outputs):
n_taps = sum(len(x) for x in self.mit_mot_out_slices)
offset = self.n_mit_sot + n_taps
return list_outputs[offset : offset + self.n_sit_sot]
def outer_sitsot_outs(self, list_outputs):
if isinstance(list_outputs, Apply):
list_outputs = list_outputs.outputs
offset = self.n_mit_mot + self.n_mit_sot
return list_outputs[offset : offset + self.n_sit_sot]
def outer_nitsot(self, list_inputs):
if isinstance(list_inputs, Apply):
list_inputs = list_inputs.inputs
offset = (
1
+ self.n_seqs
+ self.n_mit_mot
+ self.n_mit_sot
+ self.n_sit_sot
+ self.n_shared_outs
)
return list_inputs[offset : offset + self.n_nit_sot]
def inner_nitsot_outs(self, list_outputs):
n_taps = sum(len(x) for x in self.mit_mot_out_slices)
offset = self.n_mit_sot + n_taps + self.n_sit_sot
return list_outputs[offset : offset + self.n_nit_sot]
def outer_nitsot_outs(self, list_outputs):
if isinstance(list_outputs, Apply):
list_outputs = list_outputs.outputs
offset = self.n_mit_mot + self.n_mit_sot + self.n_sit_sot
return list_outputs[offset : offset + self.n_nit_sot]
def inner_shared(self, list_inputs):
n_taps_upto_sit_sot = sum(
len(x) for x in self.tap_array[: (self.n_mit_mot + self.n_mit_sot)]
)
offset = self.n_seqs + n_taps_upto_sit_sot + self.n_sit_sot
return list_inputs[offset : offset + self.n_shared_outs]
def outer_shared(self, list_inputs):
if isinstance(list_inputs, Apply):
list_inputs = list_inputs.inputs
offset = 1 + self.n_seqs + self.n_mit_mot + self.n_mit_sot + self.n_sit_sot
return list_inputs[offset : offset + self.n_shared_outs]
def inner_shared_outs(self, list_outputs):
n_taps = sum(len(x) for x in self.mit_mot_out_slices)
offset = self.n_mit_sot + n_taps + self.n_sit_sot + self.n_nit_sot
return list_outputs[offset : offset + self.n_shared_outs]
def outer_shared_outs(self, list_outputs):
if isinstance(list_outputs, Apply):
list_outputs = list_outputs.outputs
offset = self.n_mit_mot + self.n_mit_sot + self.n_sit_sot + self.n_nit_sot
return list_outputs[offset : offset + self.n_shared_outs]
def inner_non_seqs(self, list_inputs):
n_taps_upto_sit_sot = sum(
len(x) for x in self.tap_array[: (self.n_mit_mot + self.n_mit_sot)]
)
offset = self.n_seqs + n_taps_upto_sit_sot + self.n_sit_sot + self.n_shared_outs
return list_inputs[offset:]
def outer_non_seqs(self, list_inputs):
if isinstance(list_inputs, Apply):
list_inputs = list_inputs.inputs
offset = (
1
+ self.n_seqs
+ self.n_mit_mot
+ self.n_mit_sot
+ self.n_sit_sot
+ self.n_nit_sot
+ self.n_shared_outs
)
return list_inputs[offset:]
def perform(self, node, inputs, output_storage, params=None):
t0_call = time.time()
t_fn = 0
n_steps = inputs[0]
seqs = []
if n_steps < 0:
raise IndexError(
f"Scan was asked to run for negative number of step {int(n_steps)}"
)
elif n_steps == 0:
raise NotImplementedError(
"We didn't implemented yet the case where scan do 0 iteration"
)
else:
for idx, seq in enumerate(inputs[1 : self.seqs_arg_offset]):
if seq.shape[0] < n_steps:
raise ValueError(
(
"Sequence is shorter then the required "
"number of steps : (n_steps, seq, "
"seq.shape):"
),
n_steps,
node.inputs[1 + idx],
seq.shape,
)
seqs.append(seq)
# 2. Allocate memory for the outputs. Construct the list:
# store_steps -- map containing the length of each output
# pos -- map containing the current position of each
# output
store_steps = [
arg.shape[0]
for arg in inputs[self.seqs_arg_offset : self.shared_arg_offset]
]
store_steps += [
arg
for arg in inputs[
self.nit_sot_arg_offset : self.nit_sot_arg_offset + self.n_nit_sot
]
]
pos = [
(-self.mintaps[idx]) % store_steps[idx]
for idx in range(self.n_outs + self.n_nit_sot)
]
# 2.1 Create storage space for outputs
for idx in range(self.n_outs):
if idx in self.destroy_map:
# ^ Case 1. Outputs should be computed inplace of their
# initial state
output_storage[idx][0] = inputs[self.seqs_arg_offset + idx]
elif (
output_storage[idx][0] is not None
and output_storage[idx][0].shape[1:]
== inputs[self.seqs_arg_offset + idx].shape[1:]
and output_storage[idx][0].shape[0] >= store_steps[idx]
):
# Put in the values of the initial state
output_storage[idx][0] = output_storage[idx][0][: store_steps[idx]]
if idx > self.n_mit_mot:
l = -self.mintaps[idx]
output_storage[idx][0][:l] = inputs[self.seqs_arg_offset + idx][:l]
else:
output_storage[idx][0][:] = inputs[self.seqs_arg_offset + idx]
else:
output_storage[idx][0] = inputs[self.seqs_arg_offset + idx].copy()
offset = self.nit_sot_arg_offset + self.n_nit_sot
other_args = inputs[offset:]
inner_input_storage = self.fn.input_storage
nb_mitmot_in = sum(map(len, self.tap_array[: self.n_mit_mot]))
old_mitmot_input_storage = [None] * nb_mitmot_in
old_mitmot_input_data = [None] * nb_mitmot_in
inner_output_storage = self.fn.output_storage
old_inner_output_storage = [None] * len(inner_output_storage)
old_inner_output_data = [None] * len(inner_output_storage)
fn = self.fn.fn
offset = (
self.n_seqs
+ sum(map(len, self.tap_array[: self.n_outs]))
+ self.n_shared_outs
)
for idx in range(len(other_args)):
inner_input_storage[idx + offset].storage[0] = other_args[idx]
i = 0
cond = True
# ############# THE MAIN LOOP ##############
# for i in range(n_steps):
while (i < n_steps) and cond:
# sequences over which scan iterates
# 3. collect input slices
for idx in range(self.n_seqs):
if self.vector_seqs[idx]:
inner_input_storage[idx].storage[0] = seqs[idx][i : i + 1].reshape(
()
)
else:
inner_input_storage[idx].storage[0] = seqs[idx][i]
offset = self.n_seqs
for idx in range(self.n_outs):
if self.vector_outs[idx]:
for tap in self.tap_array[idx]:
_idx = (pos[idx] + tap) % store_steps[idx]
inner_input_storage[offset].storage[0] = output_storage[idx][0][
_idx : _idx + 1
].reshape(())
offset += 1
else:
for tap in self.tap_array[idx]:
_idx = (pos[idx] + tap) % store_steps[idx]
inner_input_storage[offset].storage[0] = output_storage[idx][0][
_idx
]
offset += 1
a_offset = self.shared_arg_offset
o_offset = self.n_outs + self.n_nit_sot
if i == 0:
for j in range(self.n_shared_outs):
inner_input_storage[offset].storage[0] = inputs[a_offset + j]
offset += 1
else:
for j in range(self.n_shared_outs):
inner_input_storage[offset].storage[0] = output_storage[
o_offset + j
][0]
offset += 1
# 4. collecting slices where the output should be stored
# 4.1. Collect slices for mitmots
offset = 0
for idx in range(self.n_mit_mot_outs):
if not self.mitmots_preallocated[idx]:
inner_output_storage[offset].storage[0] = None
offset += 1
# 4.2. Collect slices for mitsots, sitsots and nitsots
if i != 0:
for idx in range(self.n_outs + self.n_nit_sot - self.n_mit_mot):
if (
store_steps[idx + self.n_mit_mot] == 1
or self.vector_outs[idx + self.n_mit_mot]
):
inner_output_storage[idx + offset].storage[0] = None
else:
_pos0 = idx + self.n_mit_mot
inner_output_storage[idx + offset].storage[0] = output_storage[
_pos0
][0][pos[_pos0]]
else:
for idx in range(self.n_outs + self.n_nit_sot - self.n_mit_mot):
inner_output_storage[idx + offset].storage[0] = None
# 4.3. Collect slices for shared outputs
offset += self.n_outs + self.n_nit_sot - self.n_mit_mot
for idx in range(self.n_shared_outs):
inner_output_storage[idx + offset].storage[0] = None
# 4.4. If there is a condition add it to the mix
if self.as_while:
pdx = offset + self.n_shared_outs
inner_output_storage[pdx].storage[0] = None
# 4.5. Keep a reference to the variables (ndarrays, GpuArrays,
# etc) currently in the output_storage to be able to compare them
# with the actual outputs of the inner function after its
# execution. Also keep pointers to their data to be able to detect
# cases where outputs reused the allocated object but alter the
# memory region they refer to.
for idx in range(len(inner_output_storage)):
var = inner_output_storage[idx].storage[0]
old_inner_output_storage[idx] = var
if var is None:
old_inner_output_data[idx] = None
elif self.outs_is_tensor[idx]:
old_inner_output_data[idx] = var.data
else:
old_inner_output_data[idx] = var.gpudata
# 4.6. Keep a reference to the variables (ndarrays, GpuArrays,
# etc) associated with mitmot inputs currently in the
# input_storage to be able to compare them with the content of the
# input_storage after the execution of the function. Also keep
# pointers to their data to be able to detect cases where outputs
# reused the allocated object but alter the memory region they
# refer to.
for idx in range(nb_mitmot_in):
var = inner_input_storage[idx + self.n_seqs].storage[0]
old_mitmot_input_storage[idx] = var
if var is None:
old_mitmot_input_data[idx] = None
elif self.inps_is_tensor[idx + self.n_seqs]:
old_mitmot_input_data[idx] = var.data
else:
old_mitmot_input_data[idx] = var.gpudata
# 5.1 compute outputs
t0_fn = time.time()
try:
fn()
except Exception:
if hasattr(fn, "position_of_error"):
# this is a new vm-provided function or c linker
# they need this because the exception manipulation
# done by raise_with_op is not implemented in C.
if hasattr(fn, "thunks"):
# For the CVM
raise_with_op(
self.fn.maker.fgraph,
fn.nodes[fn.position_of_error],
fn.thunks[fn.position_of_error],
)
else:
# For the c linker
# We don't have access from python to all the
# the extra shapes/strides info
raise_with_op(
self.fn.maker.fgraph, fn.nodes[fn.position_of_error]
)
else:
# old-style linkers raise their own exceptions
raise
dt_fn = time.time() - t0_fn
if self.as_while:
pdx = offset + self.n_shared_outs
cond = inner_output_storage[pdx].storage[0] == 0
# 5.2. By calling fn() directly instead of calling the aesara
# function, it is possible that the updates have not been
# performed. Perform the updates if needed.
offset_out = len(inner_output_storage) - 1
if getattr(fn, "need_update_inputs", True):
# Update the inputs that have an update function
for inp, storage in zip(
self.fn.maker.expanded_inputs[::-1], self.fn.input_storage[::-1]
):
if inp.update is not None:
storage.data = inner_output_storage[offset_out].data
offset_out -= 1
t_fn += dt_fn
offset_out = 0
# 5.3 Copy over the values for mit_mot outputs
mitmot_inp_offset = 0
mitmot_out_idx = 0
for j in range(self.n_mit_mot):
for k in self.mit_mot_out_slices[j]:
if self.mitmots_preallocated[mitmot_out_idx]:
# This output tap has been preallocated.
inp_idx = mitmot_inp_offset + self.tap_array[j].index(k)
# Verify whether the input points to the same data as
# it did before the execution of the inner function.
old_var = old_mitmot_input_storage[inp_idx]
new_var = inner_input_storage[self.n_seqs + inp_idx].storage[0]
if old_var is new_var:
old_data = old_mitmot_input_data[inp_idx]
if self.inps_is_tensor[self.n_seqs + inp_idx]:
same_data = new_var.data == old_data
else:
same_data = new_var.gpudata == old_data
else:
same_data = False
# If the corresponding input storage still points to
# the same data, it has been modified inplace and
# nothing needs to be done. Otherwise, recover the
# and store it in `outs` as usual
if not same_data:
output_storage[j][0][k + pos[j]] = inner_input_storage[
self.n_seqs + inp_idx
].storage[0]
else:
# This output tap has not been preallocated, recover
# its value as usual
output_storage[j][0][k + pos[j]] = inner_output_storage[
offset_out
].storage[0]
offset_out += 1
mitmot_out_idx += 1
mitmot_inp_offset += len(self.tap_array[j])
# 5.4 Copy over the values for mit_sot/sit_sot outputs
begin = self.n_mit_mot
end = self.n_outs
offset_out -= self.n_mit_mot
for j in range(begin, end):
# Copy the output value to `outs`, if necessary
if store_steps[j] == 1 or self.vector_outs[j]:
output_storage[j][0][pos[j]] = inner_output_storage[
offset_out + j
].storage[0]
else:
# Check whether the initialization of the output storage
# map for this output has been reused.
old_var = old_inner_output_storage[offset_out + j]
new_var = inner_output_storage[offset_out + j].storage[0]
if old_var is new_var:
old_data = old_inner_output_data[offset_out + j]
if old_data is None:
output_reused = False
elif self.outs_is_tensor[offset_out + j]:
output_reused = new_var.data == old_data
else:
output_reused = new_var.gpudata == old_data
else:
output_reused = False
if not output_reused:
try:
output_storage[j][0][pos[j]] = inner_output_storage[
offset_out + j
].storage[0]
except ValueError as e:
if i == 0:
# First iteration, so don't change the
# case we write about.
raise
ne = ValueError(
"An output of the scan has changed shape. "
"This may be caused by a pushout optimization."
" Try adding "
"'optimizer_excluding=scanOp_pushout_output' "
"to your Aesara flags."
)
raise ne from e
# 5.5 Copy over the values for nit_sot outputs
begin = end
end += self.n_nit_sot
for j in range(begin, end):
if i == 0:
jout = j + offset_out
shape = (store_steps[j],) + inner_output_storage[jout].storage[
0
].shape
dtype = inner_output_storage[jout].storage[0].dtype
if (
output_storage[j][0] is None
or output_storage[j][0].shape[0] < store_steps[j]
or output_storage[j][0].shape[1:] != shape[1:]
or output_storage[j][0].dtype != dtype
):
output_storage[j][0] = node.outputs[j].type.value_zeros(shape)
elif output_storage[j][0].shape[0] != store_steps[j]:
output_storage[j][0] = output_storage[j][0][: store_steps[j]]
output_storage[j][0][pos[j]] = inner_output_storage[jout].storage[0]
elif store_steps[j] == 1 or self.vector_outs[j]:
output_storage[j][0][pos[j]] = inner_output_storage[
j + offset_out
].storage[0]
else:
# Check whether the initialization of the output storage map
# for this output has been reused.
old_var = old_inner_output_storage[offset_out + j]
old_data = old_inner_output_data[offset_out + j]
new_var = inner_output_storage[offset_out + j].storage[0]
if old_var is new_var:
if old_data is None:
output_reused = False
elif self.outs_is_tensor[offset_out + j]:
output_reused = new_var.data == old_data
else:
output_reused = new_var.gpudata == old_data
else:
output_reused = False
if not output_reused:
output_storage[j][0][pos[j]] = inner_output_storage[
j + offset_out
].storage[0]
# 5.6 Copy over the values for outputs corresponding to shared
# variables
begin = end
end += self.n_shared_outs
for j in range(begin, end):
jout = j + offset_out
output_storage[j][0] = inner_output_storage[jout].storage[0]
pos = [(idx + 1) % store for idx, store in zip(pos, store_steps)]
i = i + 1
# 6. Check if you need to re-order output buffers
begin = self.n_mit_mot
end = self.n_outs + self.n_nit_sot
for idx in range(begin, end):
if store_steps[idx] < i - self.mintaps[idx] and pos[idx] < store_steps[idx]:
pdx = pos[idx]
if pdx >= store_steps[idx] // 2:
# It seems inefficient to copy the bigger part of the
# array over, and back, but it is the only way that
# there is no overlap in the areas of out[idx][0] that
# are read and written.
# This way, there will be no information overwritten
# before it is read (as it used to happen).
shape = (pdx,) + output_storage[idx][0].shape[1:]
tmp = node.outputs[idx].type.value_zeros(shape)
tmp[:] = output_storage[idx][0][:pdx]
output_storage[idx][0][: store_steps[idx] - pdx] = output_storage[
idx
][0][pdx:]
output_storage[idx][0][store_steps[idx] - pdx :] = tmp
del tmp
else:
shape = (store_steps[idx] - pdx,) + output_storage[idx][0].shape[1:]
tmp = node.outputs[idx].type.value_zeros(shape)
tmp[:] = output_storage[idx][0][pdx:]
output_storage[idx][0][store_steps[idx] - pdx :] = output_storage[
idx
][0][:pdx]
output_storage[idx][0][: store_steps[idx] - pdx] = tmp
del tmp
# This would normally happen only when doing truncated
# backpropagation through time. In such a scenario Scan is
# expected to return 0 for all entries for which the gradient is
# not actually computed
elif store_steps[idx] > i - self.mintaps[idx]:
output_storage[idx][0][i - self.mintaps[idx] :] = 0
# This is a fix for a bug introduced by while. If you say
# you want to loop up to a condition, you expect the output
# to have that length ( and not the maximal length possible)
#
# Without this the behaviour of a scan op is not consistent
# if optimization gets applied compared to when optimization
# do not get applied
if i < n_steps:
# The reason I don't use out[idx][0][:i] is because for
output_storage[idx][0] = output_storage[idx][0][: -(n_steps - i)]
for i_s in inner_input_storage:
i_s.storage[0] = None
for o_s in inner_output_storage:
o_s.storage[0] = None
t_call = time.time() - t0_call
# and this little string helps us to find this spot:
# "PROFILE_CODE"
if hasattr(self.fn.maker, "profile") and self.fn.maker.profile:
profile = self.fn.maker.profile
profile.callcount += 1
profile.nbsteps += n_steps
profile.call_time += t_call
profile.vm_call_time += t_fn
if hasattr(self.fn.fn, "update_profile"):
self.fn.fn.update_profile(profile)
self.t_call = t_call
self.t_fn = t_fn
def infer_shape(self, fgraph, node, input_shapes):
# input_shapes correspond to the shapes of node.inputs
for inp, inp_shp in zip(node.inputs, input_shapes):
assert inp_shp is None or len(inp_shp) == inp.type.ndim
# Here we build 2 variables;
# - A list `inner_ins_shapes`, such that inner_ins_shapes[i] is the
# shape of self.inputs[i]
# - A dictionary `out_equivalent` containing, for every inner input,
# an equivalent variable computed from the outer inputs.
# NOTE : For non-sequences, this equivalence is trivial. For
# sequences and recurrent states, there is no direct equivalence
# between outer and inner inputs. However, because every iteration
# of the Scan needs to give the same output shapes, we can give an
# equivalence between these inner inputs and the subelements of the
# corresponding outer inputs that the Scan would use as input for
# any given iteration. For simplicity, we use iteration 0.
inner_ins_shapes = []
out_equivalent = OrderedDict()
# The two following blocks are commented as it cause in some
# cases extra scans in the graph. See gh-XXX for the
# investigation.
# We skip the first outer input as it is the total or current number
# of iterations.
# sequences
seqs_shape = [x[1:] for x in input_shapes[1 : 1 + self.n_seqs]]
# We disable extra infer_shape for now. See gh-3765.
extra_infer_shape = False
if extra_infer_shape:
inner_seqs = self.inputs[: self.n_seqs]
outer_seqs = node.inputs[1 : 1 + self.n_seqs]
for in_s, out_s in zip(inner_seqs, outer_seqs):
out_equivalent[in_s] = out_s[0]
# mit_mot, mit_sot, sit_sot
outer_inp_idx = 1 + self.n_seqs
inner_inp_idx = self.n_seqs
else:
outer_inp_idx = 0
n_outs = self.n_mit_mot + self.n_mit_sot + self.n_sit_sot
outs_shape = []
for idx in range(n_outs):
mintap = abs(min(self.tap_array[idx]))
for k in self.tap_array[idx]:
outs_shape += [input_shapes[idx + self.n_seqs + 1][1:]]
if extra_infer_shape:
corresponding_tap = node.inputs[outer_inp_idx][mintap + k]
out_equivalent[self.inputs[inner_inp_idx]] = corresponding_tap
inner_inp_idx += 1
outer_inp_idx += 1
# shared_outs
offset = 1 + self.n_seqs + n_outs
for idx in range(self.n_shared_outs):
outs_shape += [input_shapes[idx + offset]]
# non_sequences
offset += self.n_nit_sot + self.n_shared_outs
inner_ins_shapes = seqs_shape + outs_shape + input_shapes[offset:]
assert len(inner_ins_shapes) == len(self.inputs)
# Non-sequences have a direct equivalent from self.inputs in
# node.inputs
inner_non_sequences = self.inputs[len(seqs_shape) + len(outs_shape) :]
for in_ns, out_ns in zip(inner_non_sequences, node.inputs[offset:]):
out_equivalent[in_ns] = out_ns
if self.as_while:
self_outs = self.outputs[:-1]
else:
self_outs = self.outputs
outs_shape = infer_shape(
outs=self_outs, inputs=self.inputs, input_shapes=inner_ins_shapes
)
# Will be used to check if outs_shape can be expressed without using
# variables in self.inputs.
# The shapes of node.inputs are valid.
validator = Validator(
valid=input_shapes, invalid=self.inputs, valid_equivalent=out_equivalent
)
offset = 1 + self.n_seqs
scan_outs = [x for x in input_shapes[offset : offset + n_outs]]
offset += n_outs
outs_shape_n = self.n_mit_mot_outs + self.n_mit_sot + self.n_sit_sot
for x in range(self.n_nit_sot):
out_shape_x = outs_shape[outs_shape_n + x]
if out_shape_x is None:
# This output is not a tensor, and has no shape
scan_outs.append(None)
else:
# We need to make sure that we can compute the shapes from
# node.inputs, and constants, without using the variables
# in the inner function.
r = node.outputs[n_outs + x]
assert r.ndim == 1 + len(out_shape_x)
shp = [node.inputs[offset + self.n_shared_outs + x]]
for i, shp_i in zip(range(1, r.ndim), out_shape_x):
# Validate shp_i. v_shape_i is either None (if invalid),
# or a (variable, Boolean) tuple. The Boolean indicates
# whether variable is shp_i (if True), or an valid
# equivalent (if False). Here, we only need the variable.
v_shp_i = validator.check(shp_i)
if v_shp_i is None:
if hasattr(r, "broadcastable") and r.broadcastable[i]:
shp.append(1)
else:
shp.append(Shape_i(i)(r))
else:
# It can (or at least, an equivalent variable can)
shp.append(v_shp_i[0])
scan_outs.append(tuple(shp))
scan_outs += [x for x in input_shapes[offset : offset + self.n_shared_outs]]
# if we are dealing with a repeat-until, then we do not know the
# leading dimension so we replace it for every entry with Shape_i
if self.as_while:
scan_outs_init = scan_outs
scan_outs = []
for o, x in zip(node.outputs, scan_outs_init):
if x is None:
scan_outs.append(None)
else:
scan_outs.append((Shape_i(0)(o),) + x[1:])
return scan_outs
def connection_pattern(self, node):
# We cache the result of this function because, with a previous
# implementation that repeatedly called grad, there were cases
# where calls to aesara.grad() took as much as 4h for functions
# containing many nested scans.
if hasattr(node.tag, "connection_pattern"):
return node.tag.connection_pattern
# Obtain the connection pattern of the inner function.
inner_connect_pattern = io_connection_pattern(self.inputs, self.outputs)
# Initially assume no outer input is connected to any outer output
connection_pattern = [[False for output in node.outputs] for x in node.inputs]
# For every possible pair of outer input and outer output, iterate
# over every possible pairing of their corresponding inner inputs
# and inner outputs and, if one such pair of inner variables is
# connected than the pair of outer variables is connected.
for outer_oidx in range(len(node.outputs)):
inner_oidxs = self.var_mappings["inner_out_from_outer_out"][outer_oidx]
for outer_iidx in range(len(node.inputs)):
inner_iidxs = self.var_mappings["inner_inp_from_outer_inp"][outer_iidx]
for inner_oidx in inner_oidxs:
for inner_iidx in inner_iidxs:
if inner_connect_pattern[inner_iidx][inner_oidx]:
connection_pattern[outer_iidx][outer_oidx] = True
break
if connection_pattern[outer_iidx][outer_oidx]:
break
# Applying Floyd-Warshall to find all paths connecting inputs to
# outputs. Note that if `x` is an input to `y_t` and `y_tm1` is an
# input to `z_t` then `x` is an input to `z_t`.
n_outs = len(node.outputs)
for steps in range(n_outs):
for iidx in range(n_outs):
for jidx in range(n_outs):
# Get the idx of the outer input corresponding to that
# outer output
j_inp_idx = self.var_mappings["outer_inp_from_outer_out"][jidx]
if j_inp_idx != -1:
if connection_pattern[j_inp_idx][iidx] is True:
for k in range(len(connection_pattern)):
if connection_pattern[k][jidx]:
connection_pattern[k][iidx] = True
node.tag.connection_pattern = connection_pattern
return connection_pattern
def get_oinp_iinp_iout_oout_mappings(self):
# Lists for outer variables contain individual indices, lists for
# inner variables contain sequences of indices because many inner
# variables can be associated with the same outer variable. The list
# and indices are initialized already containing the data associated
# with the timestep index, the first outer input.
outer_input_indices = [0]
inner_input_indices = [[]]
inner_output_indices = [[]]
outer_output_indices = [-1]
outer_iidx = 1
inner_iidx = 0
inner_oidx = 0
outer_oidx = 0
# Handle sequences inputs
for i in range(self.info["n_seqs"]):
outer_input_indices.append(outer_iidx)
inner_input_indices.append([inner_iidx])
inner_output_indices.append([])
outer_output_indices.append(-1)
outer_iidx += 1
inner_iidx += 1
inner_oidx += 0
outer_oidx += 0
# Handle mitmots, mitsots and sitsots variables
for i in range(len(self.info["tap_array"])):
nb_input_taps = len(self.info["tap_array"][i])
if i < self.n_mit_mot:
nb_output_taps = len(self.mit_mot_out_slices[i])
else:
nb_output_taps = 1
outer_input_indices.append(outer_iidx)
inner_input_indices.append(
list(range(inner_iidx, inner_iidx + nb_input_taps))
)
inner_output_indices.append(
list(range(inner_oidx, inner_oidx + nb_output_taps))
)
outer_output_indices.append(outer_oidx)
outer_iidx += 1
inner_iidx += nb_input_taps
inner_oidx += nb_output_taps
outer_oidx += 1
# This is needed because, for outer inputs (and for outer inputs only)
# nitsots come *after* shared variables.
outer_iidx += self.info["n_shared_outs"]
# Handle nitsots variables
for i in range(self.n_nit_sot):
outer_input_indices.append(outer_iidx)
inner_input_indices.append([])
inner_output_indices.append([inner_oidx])
outer_output_indices.append(outer_oidx)
outer_iidx += 1
inner_iidx += 0
inner_oidx += 1
outer_oidx += 1
# This is needed because, for outer inputs (and for outer inputs only)
# nitsots come *after* shared variables.
outer_iidx -= self.info["n_shared_outs"] + self.n_nit_sot
# Handle shared states
for i in range(self.info["n_shared_outs"]):
outer_input_indices.append(outer_iidx)
inner_input_indices.append([inner_iidx])
inner_output_indices.append([inner_oidx])
outer_output_indices.append(outer_oidx)
outer_iidx += 1
inner_iidx += 1
inner_oidx += 1
outer_oidx += 1
# This is needed because, for outer inputs (and for outer inputs only)
# nitsots come *after* shared variables.
outer_iidx += self.n_nit_sot
# Handle non-sequence inputs
# Note : the number of non-sequence inputs is not stored in self.info
# so it has to be inferred from the number of inner inputs that remain
# to be handled
for i in range(len(self.inputs) - inner_iidx):
outer_input_indices.append(outer_iidx)
inner_input_indices.append([inner_iidx])
inner_output_indices.append([])
outer_output_indices.append(-1)
outer_iidx += 1
inner_iidx += 1
inner_oidx += 0
outer_oidx += 0
# With the global mapping inferred, the individual mappings
# can be produced
mappings = {
"outer_inp_from_outer_out": {},
"inner_inp_from_outer_out": {},
"inner_out_from_outer_out": {},
"inner_inp_from_outer_inp": {},
"inner_out_from_outer_inp": {},
"outer_out_from_outer_inp": {},
"outer_inp_from_inner_inp": {},
"inner_out_from_inner_inp": {},
"outer_out_from_inner_inp": {},
"outer_inp_from_inner_out": {},
"inner_inp_from_inner_out": {},
"outer_out_from_inner_out": {},
}
for (oinp, iinp, iout, oout) in zip(
outer_input_indices,
inner_input_indices,
inner_output_indices,
outer_output_indices,
):
if oout != -1:
mappings["outer_inp_from_outer_out"][oout] = oinp
mappings["inner_inp_from_outer_out"][oout] = iinp
mappings["inner_out_from_outer_out"][oout] = iout
if oinp != -1:
mappings["inner_inp_from_outer_inp"][oinp] = iinp
mappings["inner_out_from_outer_inp"][oinp] = iout
mappings["outer_out_from_outer_inp"][oinp] = oout
for idx in iinp:
mappings["outer_inp_from_inner_inp"][idx] = oinp
mappings["inner_out_from_inner_inp"][idx] = iout
mappings["outer_out_from_inner_inp"][idx] = oout
for idx in iout:
mappings["outer_inp_from_inner_out"][idx] = oinp
mappings["inner_inp_from_inner_out"][idx] = iinp
mappings["outer_out_from_inner_out"][idx] = oout
return mappings
def L_op(self, inputs, outs, dC_douts):
if not isinstance(outs, (list, tuple)):
outs = [outs]
# `grad_step` equals the number of steps the original scan node has
# done (if the original scan is a while loop than this number is the
# length of the output sequence)
# We do not know what kind of outputs the original scan has, so we
# try first to see if it has a nit_sot output, then a sit_sot and
# then a mit_sot
if self.n_nit_sot > 0:
grad_steps = self.outer_nitsot_outs(outs)[0].shape[0]
elif self.n_sit_sot > 0:
grad_steps = self.outer_sitsot_outs(outs)[0].shape[0] - 1
elif self.n_mit_sot > 0:
grad_steps = (
self.outer_mitsot_outs(outs)[0].shape[0] + self.mintaps[self.n_mit_mot]
)
else:
grad_steps = inputs[0]
if self.as_while:
n_steps = outs[0].shape[0]
# Restrict the number of grad steps according to
# self.truncate_gradient
if self.truncate_gradient != -1:
grad_steps = minimum(grad_steps, self.truncate_gradient)
self_inputs = self.inputs
self_outputs = self.outputs
# differentiable inputs
diff_inputs = (
self.inner_seqs(self_inputs)
+ self.inner_mitmot(self_inputs)
+ self.inner_mitsot(self_inputs)
+ self.inner_sitsot(self_inputs)
+ self.inner_non_seqs(self_inputs)
)
diff_outputs = (
self.inner_mitmot_outs(self_outputs)
+ self.inner_mitsot_outs(self_outputs)
+ self.inner_sitsot_outs(self_outputs)
+ self.inner_nitsot_outs(self_outputs)
)
scan_node = outs[0].owner
connection_pattern = self.connection_pattern(scan_node)
def get_inp_idx(iidx):
if iidx < self.n_seqs:
return 1 + iidx
oidx = 1 + self.n_seqs
iidx = iidx - self.n_seqs
for taps in self.mitmot_taps():
if len(taps) > iidx:
return oidx
else:
oidx += 1
iidx -= len(taps)
for taps in self.mitsot_taps():
if len(taps) > iidx:
return oidx
else:
oidx += 1
iidx -= len(taps)
if iidx < self.info["n_sit_sot"]:
return oidx + iidx
else:
return oidx + iidx + self.info["n_nit_sot"]
def get_out_idx(iidx):
oidx = 0
for taps in self.mitmot_out_taps():
if len(taps) > iidx:
return oidx
else:
oidx += 1
iidx -= len(taps)
return oidx + iidx
def compute_all_gradients(known_grads):
y_s = known_grads.keys()
g_y_s = known_grads.values()
for g_y in g_y_s:
if str(g_y.dtype) in integer_dtypes:
raise TypeError(
"Gradients may never be integers but g_y "
"has type " + str(g_y.type)
)
out_indices = [get_out_idx(self_outputs.index(y)) for y in y_s]
connected_inputs = [
i
for i in range(len(scan_node.inputs))
if any([connection_pattern[i][odx] for odx in out_indices])
]
wrt = [
x
for x in graph_inputs(y_s)
if (x in diff_inputs)
and get_inp_idx(self_inputs.index(x)) in connected_inputs
]
gmp = OrderedDict()
# Required in case there is a pair of variables X and Y, with X
# used to compute Y, for both of which there is an external
# gradient signal. Without this, the total gradient signal on X
# will be the external gradient signalknown_grads[X]. With this,
# it will be the sum of the external gradient signal and the
# gradient obtained by propagating Y's external gradient signal
known_grads = OrderedDict([(k.copy(), v) for (k, v) in known_grads.items()])
grads = grad(
cost=None,
known_grads=known_grads,
wrt=wrt,
consider_constant=wrt,
disconnected_inputs="ignore",
return_disconnected="None",
null_gradients="return",
)
for i in range(len(wrt)):
gmp[wrt[i]] = grads[i]
rval = [gmp.get(p, None) for p in diff_inputs]
return rval
dC_dinps_t = [None for inp in diff_inputs]
disconnected_dC_dinps_t = [True for inp in diff_inputs]
dC_dXts = []
Xts = []
for idx, Xt in enumerate(diff_outputs):
if idx >= self.n_mit_mot_outs:
Xt_placeholder = safe_new(Xt)
Xts.append(Xt_placeholder)
idx_nitsot_start = (
self.info["n_mit_mot"] + self.info["n_mit_sot"] + self.info["n_sit_sot"]
)
idx_nitsot_end = idx_nitsot_start + self.info["n_nit_sot"]
if idx < idx_nitsot_start or idx >= idx_nitsot_end:
dtypes = []
states = (
self.inner_mitmot(self_inputs)
+ self.inner_mitsot(self_inputs)
+ self.inner_sitsot(self_inputs)
)
for pos, inp in enumerate(states):
if inp in graph_inputs([Xt]):
outer_oidx = self.var_mappings["outer_out_from_inner_inp"][
self.n_seqs + pos
]
if not isinstance(dC_douts[outer_oidx].type, DisconnectedType):
dtypes.append(dC_douts[outer_oidx].dtype)
if dtypes:
new_dtype = aesara.scalar.upcast(*dtypes)
else:
new_dtype = config.floatX
dC_dXt = safe_new(Xt, dtype=new_dtype)
else:
if isinstance(dC_douts[idx].type, DisconnectedType):
continue
dC_dXt = safe_new(dC_douts[idx][0])
dC_dXts.append(dC_dXt)
known_grads = OrderedDict()
dc_dxts_idx = 0
for i in range(len(diff_outputs)):
if i < idx_nitsot_start or i >= idx_nitsot_end:
if diff_outputs[i] in known_grads:
known_grads[diff_outputs[i]] += dC_dXts[dc_dxts_idx]
else:
known_grads[diff_outputs[i]] = dC_dXts[dc_dxts_idx]
dc_dxts_idx += 1
else:
if isinstance(dC_douts[i].type, DisconnectedType):
continue
else:
if diff_outputs[i] in known_grads:
known_grads[diff_outputs[i]] += dC_dXts[dc_dxts_idx]
else:
known_grads[diff_outputs[i]] = dC_dXts[dc_dxts_idx]
dc_dxts_idx += 1
dC_dinps_t = compute_all_gradients(known_grads)
for dx in range(len(dC_dinps_t)):
if not dC_dinps_t[dx]:
dC_dinps_t[dx] = aet.zeros_like(diff_inputs[dx])
else:
disconnected_dC_dinps_t[dx] = False
for Xt, Xt_placeholder in zip(diff_outputs[self.n_mit_mot_outs :], Xts):
tmp = forced_replace(dC_dinps_t[dx], Xt, Xt_placeholder)
dC_dinps_t[dx] = tmp
dC_dXtm1s = []
for pos, x in enumerate(dC_dinps_t[self.n_seqs :]):
idxs = self.var_mappings["inner_out_from_inner_inp"][self.n_seqs + pos]
x_is_state = pos < sum([len(t) for t in self.tap_array])
if x_is_state and len(idxs) > 0:
opos = idxs[0]
dC_dXtm1s.append(safe_new(dC_dXts[opos]))
if hasattr(x, "dtype") and x.dtype != dC_dXts[opos].dtype:
dC_dinps_t[pos + self.n_seqs] = x.astype(dC_dXts[opos].dtype)
else:
dC_dXtm1s.append(safe_new(x))
for dx, dC_dXtm1 in enumerate(dC_dXtm1s):
if isinstance(dC_dinps_t[dx + self.n_seqs].type, NullType):
pass
elif isinstance(dC_dXtm1.type, NullType):
dC_dinps_t[dx + self.n_seqs] = dC_dXtm1
else:
dC_dinps_t[dx + self.n_seqs] += dC_dXtm1
if self.as_while:
outer_inp_seqs = [x[n_steps - 1 :: -1] for x in inputs[1 : 1 + self.n_seqs]]
else:
outer_inp_seqs = [x[::-1] for x in inputs[1 : 1 + self.n_seqs]]
for idx in range(self.n_mit_mot + self.n_mit_sot):
mintap = np.min(self.tap_array[idx])
if idx < self.n_mit_mot:
outmaxtap = np.max(self.mitmot_out_taps()[idx])
else:
outmaxtap = 0
seq = outs[idx]
for k in self.tap_array[idx]:
if outmaxtap - k != 0:
nw_seq = seq[k - mintap : -(outmaxtap - k)][::-1]
else:
nw_seq = seq[k - mintap :][::-1]
outer_inp_seqs.append(nw_seq)
outer_inp_seqs += [x[:-1][::-1] for x in self.outer_sitsot_outs(outs)]
for x in self.outer_nitsot_outs(dC_douts):
if not isinstance(x.type, DisconnectedType):
if self.as_while:
outer_inp_seqs.append(x[n_steps - 1 :: -1])
else:
outer_inp_seqs.append(x[::-1])
if hasattr(inputs[0].tag, "test_value"):
if self.as_while:
n = n_steps.tag.test_value
else:
n = inputs[0].tag.test_value
for taps, x in zip(self.mitsot_taps(), self.outer_mitsot_outs(outs)):
mintap = np.min(taps)
if hasattr(x[::-1][:mintap], "test_value"):
assert x[::-1][:mintap].tag.test_value.shape[0] == n
for x in self.outer_sitsot_outs(outs):
if hasattr(x[::-1][:-1].tag, "test_value"):
assert x[::-1][:-1].tag.test_value.shape[0] == n
for x in self.outer_nitsot_outs(outs):
if hasattr(x[::-1].tag, "test_value"):
if self.as_while:
assert x[n_steps - 1 :: -1].tag.test_value.shape[0] == n
else:
assert x[::-1].tag.test_value.shape[0] == n
outer_inp_seqs += [
x[::-1][: np.min(taps)]
for taps, x in zip(self.mitsot_taps(), self.outer_mitsot_outs(outs))
]
outer_inp_seqs += [x[::-1][:-1] for x in self.outer_sitsot_outs(outs)]
outer_inp_seqs += [x[::-1] for x in self.outer_nitsot_outs(outs)]
outer_inp_seqs = [s_[:grad_steps] for s_ in outer_inp_seqs]
inner_inp_seqs = self.inner_seqs(self_inputs)
inner_inp_seqs += self.inner_mitmot(self_inputs)
inner_inp_seqs += self.inner_mitsot(self_inputs)
inner_inp_seqs += self.inner_sitsot(self_inputs)
inner_inp_seqs += self.inner_nitsot_outs(dC_dXts)
inner_inp_seqs += Xts
outer_inp_mitmot = []
inner_inp_mitmot = []
inner_out_mitmot = []
mitmot_inp_taps = []
mitmot_out_taps = []
type_outs = []
out_pos = 0
ins_pos = self.n_seqs
n_mitmot_outs = 0
n_mitmot_inps = 0
for idx in range(self.n_mit_mot):
if isinstance(dC_douts[idx].type, DisconnectedType):
out = outs[idx]
outer_inp_mitmot.append(aet.zeros_like(out))
else:
outer_inp_mitmot.append(dC_douts[idx][::-1])
mitmot_inp_taps.append([])
mitmot_out_taps.append([])
undefined_msg = None
through_shared = False
disconnected = True
for jdx in range(len(self.mit_mot_out_slices[idx])):
inner_inp_mitmot.append(dC_dXts[out_pos])
mitmot_inp_taps[idx].append(-self.mit_mot_out_slices[idx][jdx])
n_mitmot_inps += 1
out_pos += 1
for jdx in range(len(self.tap_array[idx])):
tap = -self.tap_array[idx][jdx]
if tap not in mitmot_inp_taps[idx]:
inner_inp_mitmot.append(dC_dXtm1s[ins_pos - self.n_seqs])
if isinstance(dC_dinps_t[ins_pos].type, NullType):
inner_out_mitmot.append(
aet.zeros(diff_inputs[ins_pos].shape, dtype=config.floatX)
)
undefined_msg = dC_dinps_t[ins_pos].type.why_null
else:
new_inner_out_mitmot = dC_dinps_t[ins_pos]
if tap in mitmot_inp_taps[idx]:
to_replace = dC_dXtm1s[ins_pos - self.n_seqs]
replacement_idx = len(mitmot_inp_taps[idx]) - mitmot_inp_taps[
idx
].index(tap)
replacement = inner_inp_mitmot[-replacement_idx]
self.tap_array[idx]
new_inner_out_mitmot = clone_replace(
new_inner_out_mitmot, replace=[(to_replace, replacement)]
)
inner_out_mitmot.append(new_inner_out_mitmot)
if not disconnected_dC_dinps_t[ins_pos]:
disconnected = False
for _sh in self.inner_shared(self_inputs):
if _sh in graph_inputs([dC_dinps_t[ins_pos]]):
through_shared = True
ins_pos += 1
n_mitmot_outs += 1
mitmot_out_taps[idx].append(-self.tap_array[idx][jdx])
# Only add the tap as a new input tap if needed
if tap not in mitmot_inp_taps[idx]:
n_mitmot_inps += 1
mitmot_inp_taps[idx].append(-self.tap_array[idx][jdx])
if undefined_msg:
type_outs.append(undefined_msg)
elif through_shared:
type_outs.append("through_shared")
elif disconnected:
type_outs.append("disconnected")
else:
type_outs.append("connected")
offset = self.n_mit_mot
for idx in range(self.n_mit_sot):
if isinstance(dC_douts[idx + offset].type, DisconnectedType):
outer_inp_mitmot.append(outs[idx + offset].zeros_like())
else:
outer_inp_mitmot.append(dC_douts[idx + offset][::-1])
mitmot_inp_taps.append([])
mitmot_out_taps.append([])
idx_tap = idx + self.n_mit_mot
inner_inp_mitmot.append(dC_dXts[out_pos])
out_pos += 1
n_mitmot_inps += 1
undefined_msg = None
through_shared = False
disconnected = True
mitmot_inp_taps[idx + offset].append(0)
for jdx in range(len(self.tap_array[idx_tap])):
inner_inp_mitmot.append(dC_dXtm1s[ins_pos - self.n_seqs])
if isinstance(dC_dinps_t[ins_pos].type, NullType):
# We cannot use Null in the inner graph, so we
# use a zero tensor of the appropriate shape instead.
inner_out_mitmot.append(
aet.zeros(diff_inputs[ins_pos].shape, dtype=config.floatX)
)
undefined_msg = dC_dinps_t[ins_pos].type.why_null
else:
inner_out_mitmot.append(dC_dinps_t[ins_pos])
mitmot_inp_taps[idx + offset].append(-self.tap_array[idx_tap][jdx])
mitmot_out_taps[idx].append(-self.tap_array[idx_tap][jdx])
if not disconnected_dC_dinps_t[ins_pos]:
disconnected = False
for _sh in self.inner_shared(self_inputs):
if _sh in graph_inputs([dC_dinps_t[ins_pos]]):
through_shared = True
n_mitmot_inps += 1
ins_pos += 1
n_mitmot_outs += 1
if undefined_msg:
type_outs.append(undefined_msg)
elif through_shared:
type_outs.append("through_shared")
elif disconnected:
type_outs.append("disconnected")
else:
type_outs.append("connected")
offset += self.n_mit_sot
for idx in range(self.n_sit_sot):
mitmot_inp_taps.append([0, 1])
mitmot_out_taps.append([1])
through_shared = False
if not isinstance(dC_douts[idx + offset].type, DisconnectedType):
outer_inp_mitmot.append(dC_douts[idx + offset][::-1])
else:
if isinstance(dC_dinps_t[ins_pos].type, NullType):
# Cannot use dC_dinps_t[ins_pos].dtype, so we use
# floatX instead, as it is a dummy value that will not
# be used anyway.
outer_inp_mitmot.append(
aet.zeros(outs[idx + offset].shape, dtype=config.floatX)
)
else:
outer_inp_mitmot.append(
aet.zeros(
outs[idx + offset].shape, dtype=dC_dinps_t[ins_pos].dtype
)
)
if isinstance(dC_dinps_t[ins_pos].type, NullType):
# We cannot use Null in the inner graph, so we
# use a zero tensor of the appropriate shape instead.
inner_out_mitmot.append(
aet.zeros(diff_inputs[ins_pos].shape, dtype=config.floatX)
)
else:
inner_out_mitmot.append(dC_dinps_t[ins_pos])
for _sh in self.inner_shared(self_inputs):
if _sh in graph_inputs([dC_dinps_t[ins_pos]]):
through_shared = True
if isinstance(dC_dinps_t[ins_pos].type, NullType):
type_outs.append(dC_dinps_t[ins_pos].type.why_null)
elif through_shared:
type_outs.append("through_shared")
elif disconnected_dC_dinps_t[ins_pos]:
type_outs.append("disconnected")
else:
type_outs.append("connected")
inner_inp_mitmot += [dC_dXts[out_pos], dC_dXtm1s[ins_pos - self.n_seqs]]
n_mitmot_outs += 1
out_pos += 1
ins_pos += 1
n_mitmot_inps += 2
n_nit_sot = self.n_seqs
inner_out_nitsot = dC_dinps_t[: self.n_seqs]
inner_out_sitsot = dC_dinps_t[ins_pos:]
for _p, vl in enumerate(inner_out_sitsot):
through_shared = False
for _sh in self.inner_shared(self_inputs):
if _sh in graph_inputs([vl]):
through_shared = True
if isinstance(vl.type, NullType):
type_outs.append(vl.type.why_null)
# Replace the inner output with a zero tensor of
# the right shape
inner_out_sitsot[_p] = aet.zeros(
diff_inputs[ins_pos + _p].shape, dtype=config.floatX
)
elif through_shared:
type_outs.append("through_shared")
elif disconnected_dC_dinps_t[_p + ins_pos]:
type_outs.append("disconnected")
else:
type_outs.append("connected")
for _p, vl in enumerate(inner_out_nitsot):
through_shared = False
for _sh in self.inner_shared(self_inputs):
if _sh in graph_inputs([vl]):
through_shared = True
if isinstance(vl.type, NullType):
type_outs.append(vl.type.why_null)
# Replace the inner output with a zero tensor of
# the right shape
inner_out_nitsot[_p] = aet.zeros(
diff_inputs[_p].shape, dtype=config.floatX
)
if through_shared:
type_outs.append("through_shared")
elif disconnected_dC_dinps_t[_p]:
type_outs.append("disconnected")
else:
type_outs.append("connected")
inner_inp_sitsot = dC_dXtm1s[ins_pos - self.n_seqs :]
outer_inp_sitsot = []
for _idx, y in enumerate(inner_inp_sitsot):
x = self.outer_non_seqs(inputs)[_idx]
if isinstance(y.type, NullType):
# Cannot use dC_dXtm1s.dtype, so we use floatX instead.
outer_inp_sitsot.append(
aet.zeros(
[grad_steps + 1] + [x.shape[i] for i in range(x.ndim)],
dtype=config.floatX,
)
)
# replace y by a zero tensor of the right shape
inner_inp_sitsot[_idx] = aet.zeros(
diff_inputs[ins_pos + _idx].shape, dtype=config.floatX
)
else:
outer_inp_sitsot.append(
aet.zeros(
[grad_steps + 1] + [x.shape[i] for i in range(x.ndim)],
dtype=y.dtype,
)
)
n_sitsot_outs = len(outer_inp_sitsot)
new_tap_array = mitmot_inp_taps + [[-1] for k in range(n_sitsot_outs)]
info = OrderedDict()
info["n_seqs"] = len(outer_inp_seqs)
info["n_mit_sot"] = 0
info["tap_array"] = new_tap_array
info["gpua"] = False
info["n_mit_mot"] = len(outer_inp_mitmot)
info["n_mit_mot_outs"] = n_mitmot_outs
info["mit_mot_out_slices"] = mitmot_out_taps
info["truncate_gradient"] = self.truncate_gradient
info["n_sit_sot"] = n_sitsot_outs
info["n_shared_outs"] = 0
info["n_nit_sot"] = n_nit_sot
info["as_while"] = False
info["profile"] = self.profile
info["destroy_map"] = OrderedDict()
if self.name:
info["name"] = "grad_of_" + self.name
else:
info["name"] = None
info["mode"] = self.mode
info["allow_gc"] = self.allow_gc
outer_inputs = (
[grad_steps]
+ outer_inp_seqs
+ outer_inp_mitmot
+ outer_inp_sitsot
+ [n_steps if self.as_while else inputs[0] for _ in range(n_nit_sot)]
+ self.outer_shared(inputs)
+ self.outer_non_seqs(inputs)
)
inner_gfn_ins = (
inner_inp_seqs
+ inner_inp_mitmot
+ inner_inp_sitsot
+ self.inner_shared(self_inputs)
+ self.inner_non_seqs(self_inputs)
)
inner_gfn_outs = inner_out_mitmot + inner_out_sitsot + inner_out_nitsot
local_op = Scan(inner_gfn_ins, inner_gfn_outs, info)
outputs = local_op(*outer_inputs)
if type(outputs) not in (list, tuple):
outputs = [outputs]
# Re-order the gradients correctly
gradients = [DisconnectedType()()]
offset = self.n_mit_mot + self.n_mit_sot + self.n_sit_sot + n_sitsot_outs
for p, (x, t) in enumerate(
zip(
outputs[offset : offset + self.n_seqs],
type_outs[offset : offset + self.n_seqs],
)
):
if t == "connected":
# If the forward scan is in as_while mode, we need to pad
# the gradients, so that they match the size of the input
# sequences.
if self.as_while:
n_zeros = inputs[0] - n_steps
shp = (n_zeros,)
if x.ndim > 1:
shp = shp + tuple(x.shape[i] for i in range(1, x.ndim))
z = aet.zeros(shp, dtype=x.dtype)
x = aet.concatenate([x[::-1], z], axis=0)
gradients.append(x)
else:
gradients.append(x[::-1])
elif t == "disconnected":
gradients.append(DisconnectedType()())
elif t == "through_shared":
gradients.append(
grad_undefined(
self, p + 1, inputs[p + 1], "Depends on a shared variable"
)
)
else:
# t contains the "why_null" string of a NullType
gradients.append(NullType(t)())
end = self.n_mit_mot + self.n_mit_sot + self.n_sit_sot
for p, (x, t) in enumerate(zip(outputs[:end], type_outs[:end])):
if t == "connected":
# If the forward scan is in as_while mode, we need to pad
# the gradients, so that they match the size of the input
# sequences.
if self.as_while:
n_zeros = inputs[0] - grad_steps
shp = (n_zeros,)
if x.ndim > 1:
shp = shp + tuple(x.shape[i] for i in range(1, x.ndim))
z = aet.zeros(shp, dtype=x.dtype)
x = aet.concatenate([x[::-1], z], axis=0)
gradients.append(x)
else:
gradients.append(x[::-1])
elif t == "disconnected":
gradients.append(DisconnectedType()())
elif t == "through_shared":
gradients.append(
grad_undefined(
self,
p + 1 + self.n_seqs,
inputs[p + 1 + self.n_seqs],
"Depends on a shared variable",
)
)
else:
# t contains the "why_null" string of a NullType
gradients.append(NullType(t)())
start = len(gradients)
node = outs[0].owner
for idx in range(self.n_shared_outs):
disconnected = True
connected_flags = self.connection_pattern(node)[idx + start]
for dC_dout, connected in zip(dC_douts, connected_flags):
if not isinstance(dC_dout.type, DisconnectedType) and connected:
disconnected = False
if disconnected:
gradients.append(DisconnectedType()())
else:
gradients.append(
grad_undefined(
self, idx, inputs[idx], "Shared Variable with update"
)
)
start = len(gradients)
gradients += [DisconnectedType()() for _ in range(self.n_nit_sot)]
begin = end
end = begin + n_sitsot_outs
for p, (x, t) in enumerate(zip(outputs[begin:end], type_outs[begin:end])):
if t == "connected":
gradients.append(x[-1])
elif t == "disconnected":
gradients.append(DisconnectedType()())
elif t == "through_shared":
gradients.append(
grad_undefined(
self,
p + begin + 1,
inputs[p + begin + 1],
"Depends on a shared variable",
)
)
else:
# t contains the "why_null" string of a NullType
gradients.append(NullType(t)())
# Mask disconnected gradients
# Ideally we would want to assert that the gradients we are
# replacing do indeed evaluate to 0, though that is not practical
# from a computational point of view
# The gradients of scan are computed replacing Disconnected with 0,
# because through the recurrence they can become nonzero
for idx in range(len(gradients)):
disconnected = True
for kdx in range(len(node.outputs)):
if connection_pattern[idx][kdx] and not isinstance(
dC_douts[kdx].type, DisconnectedType
):
disconnected = False
if disconnected:
gradients[idx] = DisconnectedType()()
return gradients
def R_op(self, inputs, eval_points):
# Step 0. Prepare some shortcut variable
self_inputs = self.inputs
rop_of_inputs = (
self_inputs[: self.n_seqs + self.n_outs]
+ self_inputs[self.n_seqs + self.n_outs + self.n_shared_outs :]
)
self_outputs = self.outputs
# Step 1. Compute the R_op of the inner function
inner_eval_points = [safe_new(x, "_evalpoint") for x in rop_of_inputs]
if self.as_while:
rop_self_outputs = self_outputs[:-1]
else:
rop_self_outputs = self_outputs
if self.info["n_shared_outs"] > 0:
rop_self_outputs = rop_self_outputs[: -self.info["n_shared_outs"]]
rop_outs = Rop(rop_self_outputs, rop_of_inputs, inner_eval_points)
if type(rop_outs) not in (list, tuple):
rop_outs = [rop_outs]
# Step 2. Figure out what corresponds to what in the scan
# When doing the R-op of scan, you end up having double of each type of
# input, because for each sequence you need also its eval point, for
# each mit_mot, mit_sot, sit_sot or other type of inputs the same.
# Interestingly enough, all these types of eval points behave the same
# way as the input to which they correspond
# The only exception is the eval point for the number of sequences, and
# evan point for the number of nit_sot which I think should just be
# ignored (?)
info = OrderedDict()
info["n_seqs"] = self.n_seqs * 2
info["n_mit_sot"] = self.n_mit_sot * 2
info["n_sit_sot"] = self.n_sit_sot * 2
info["n_mit_mot"] = self.n_mit_mot * 2
info["n_nit_sot"] = self.n_nit_sot * 2
info["n_shared_outs"] = self.n_shared_outs
info["gpua"] = False
info["as_while"] = self.as_while
info["profile"] = self.profile
info["truncate_gradient"] = self.truncate_gradient
if self.name:
info["name"] = "rop_of_" + self.name
else:
info["name"] = None
info["mode"] = self.mode
info["allow_gc"] = self.allow_gc
info["mit_mot_out_slices"] = self.mit_mot_out_slices * 2
info["destroy_map"] = OrderedDict()
new_tap_array = []
b = 0
e = self.n_mit_mot
new_tap_array += self.tap_array[b:e] * 2
b = e
e += self.n_mit_sot
new_tap_array += self.tap_array[b:e] * 2
b = e
e += self.n_sit_sot
new_tap_array += self.tap_array[b:e] * 2
info["tap_array"] = new_tap_array
# Sequences ...
b = 1
ib = 0
e = 1 + self.n_seqs
ie = self.n_seqs
clean_eval_points = []
for inp, evp in zip(inputs[b:e], eval_points[b:e]):
if evp is not None:
clean_eval_points.append(evp)
else:
clean_eval_points.append(inp.zeros_like())
scan_seqs = inputs[b:e] + clean_eval_points
inner_seqs = self_inputs[ib:ie] + inner_eval_points[ib:ie]
# MIT_MOT sequences ...
b = e
e = e + self.n_mit_mot
ib = ie
ie = ie + int(np.sum([len(x) for x in self.tap_array[: self.n_mit_mot]]))
clean_eval_points = []
for inp, evp in zip(inputs[b:e], eval_points[b:e]):
if evp is not None:
clean_eval_points.append(evp)
else:
clean_eval_points.append(inp.zeros_like())
scan_mit_mot = inputs[b:e] + clean_eval_points
inner_mit_mot = self_inputs[ib:ie] + inner_eval_points[ib:ie]
# MIT_SOT sequences ...
b = e
e = e + self.n_mit_sot
ib = ie
ie = ie + int(
np.sum(
[
len(x)
for x in self.tap_array[
self.n_mit_mot : self.n_mit_mot + self.n_mit_sot
]
]
)
)
clean_eval_points = []
for inp, evp in zip(inputs[b:e], eval_points[b:e]):
if evp is not None:
clean_eval_points.append(evp)
else:
clean_eval_points.append(inp.zeros_like())
scan_mit_sot = inputs[b:e] + eval_points[b:e]
inner_mit_sot = self_inputs[ib:ie] + inner_eval_points[ib:ie]
# SIT_SOT sequences ...
b = e
e = e + self.n_sit_sot
ib = ie
ie = ie + self.n_sit_sot
clean_eval_points = []
for inp, evp in zip(inputs[b:e], eval_points[b:e]):
if evp is not None:
clean_eval_points.append(evp)
else:
clean_eval_points.append(inp.zeros_like())
scan_sit_sot = inputs[b:e] + clean_eval_points
inner_sit_sot = self_inputs[ib:ie] + inner_eval_points[ib:ie]
# Shared outs ...
b = e
e = e + self.n_shared_outs
ib = ie
ie = ie + self.n_shared_outs
scan_shared = inputs[b:e]
inner_shared = self_inputs[ib:ie]
# NIT_SOT sequences
b = e
e = e + self.n_nit_sot
scan_nit_sot = inputs[b:e] * 2
# All other arguments
clean_eval_points = []
for inp, evp in zip(inputs[e:], eval_points[e:]):
if evp is not None:
clean_eval_points.append(evp)
else:
clean_eval_points.append(inp.zeros_like())
scan_other = inputs[e:] + clean_eval_points
# inner_eval_points do not have entries for shared variables
inner_other = self_inputs[ie:] + inner_eval_points[ib:]
# Outputs
n_mit_mot_outs = int(np.sum([len(x) for x in self.mit_mot_out_slices]))
info["n_mit_mot_outs"] = n_mit_mot_outs * 2
b = 0
e = n_mit_mot_outs
inner_out_mit_mot = self_outputs[b:e] + rop_outs[b:e]
b = e
e = e + self.n_mit_sot
inner_out_mit_sot = self_outputs[b:e] + rop_outs[b:e]
b = e
e = e + self.n_sit_sot
inner_out_sit_sot = self_outputs[b:e] + rop_outs[b:e]
b = e
e = e + self.n_nit_sot
inner_out_nit_sot = self_outputs[b:e] + rop_outs[b:e]
b = e
e = e + self.n_shared_outs
inner_out_shared = self_outputs[b:e]
inner_ins = (
inner_seqs
+ inner_mit_mot
+ inner_mit_sot
+ inner_sit_sot
+ inner_shared
+ inner_other
)
inner_outs = (
inner_out_mit_mot
+ inner_out_mit_sot
+ inner_out_sit_sot
+ inner_out_nit_sot
+ inner_out_shared
)
if self.as_while:
inner_outs += [self_outputs[-1]]
scan_inputs = (
[inputs[0]]
+ scan_seqs
+ scan_mit_mot
+ scan_mit_sot
+ scan_sit_sot
+ scan_shared
+ scan_nit_sot
+ scan_other
)
local_op = Scan(inner_ins, inner_outs, info)
outputs = local_op(*scan_inputs)
if type(outputs) not in (list, tuple):
outputs = [outputs]
# Select only the result of the R_op results
final_outs = []
b = self.n_mit_mot
e = self.n_mit_mot * 2
final_outs += outputs[b:e]
b = e + self.n_mit_sot
e = e + self.n_mit_sot * 2
final_outs += outputs[b:e]
b = e + self.n_sit_sot
e = e + self.n_sit_sot * 2
final_outs += outputs[b:e]
b = e + self.n_nit_sot
e = e + self.n_nit_sot * 2
final_outs += outputs[b:e]
final_outs += [None] * self.n_shared_outs
return final_outs
# Since Scan is an op that contains an Aesara compiled function, it is
# useful to let DebugMode know about it.
ops_with_inner_function[Scan] = "fn"
@register_profiler_printer
def profile_printer(
message, compile_time, fct_call_time, apply_time, apply_cimpl, outputs_size, file
):
# Scan overhead profile
if any(
[
isinstance(node.op, Scan) and v > 0
for (fgraph, node), v in apply_time.items()
]
):
print("", file=file)
print("Scan overhead:", file=file)
print(
"<Scan op time(s)> <sub scan fct time(s)> <sub scan op "
"time(s)> <sub scan fct time(% scan op time)> <sub scan "
"op time(% scan op time)> <node>",
file=file,
)
total_super_scan_time = 0
total_scan_fct_time = 0
total_scan_op_time = 0
for (fgraph, node), v in apply_time.items():
if isinstance(node.op, Scan) and not node.op.fn.profile:
print(
" One scan node do not have its inner profile enabled. "
"If you enable Aesara profiler with "
"'aesara.function(..., profile=True)', you must manually"
" enable the profiling for each scan too: "
"'aesara.scan(...,profile=True)'."
" Or use Aesara flag 'profile=True'.",
file=file,
)
elif isinstance(node.op, Scan) and node.op.fn.profile:
if v > 0:
scan_fct_time = node.op.fn.profile.call_time
scan_op_time = sum(node.op.fn.profile.apply_time.values())
total_super_scan_time += v
total_scan_fct_time += scan_fct_time
total_scan_op_time += scan_op_time
print(
" %5.1fs %5.1fs %5.1fs %5.1f%% %5.1f%%"
% (
v,
scan_fct_time,
scan_op_time,
scan_fct_time / v * 100,
scan_op_time / v * 100,
),
node,
file=file,
)
else:
print(
(" The node took 0s, so we can not " "compute the overhead"),
node,
file=file,
)
if total_super_scan_time == 0:
print(" No scan have its inner profile enabled.", file=file)
else:
print(
"total %5.1fs %5.1fs %5.1fs %5.1f%% %5.1f%%"
% (
total_super_scan_time,
total_scan_fct_time,
total_scan_op_time,
total_scan_fct_time / total_super_scan_time * 100,
total_scan_op_time / total_super_scan_time * 100,
),
file=file,
)
| true
| true
|
f7186e5afa113e54c0e28168f166b0ec2f0dcf61
| 2,842
|
py
|
Python
|
src/meu_condominio/views/funcionario.py
|
lucasjoao/meu_condominio
|
aac37911384726b1aa1a40237050801a39174dc7
|
[
"Unlicense"
] | null | null | null |
src/meu_condominio/views/funcionario.py
|
lucasjoao/meu_condominio
|
aac37911384726b1aa1a40237050801a39174dc7
|
[
"Unlicense"
] | null | null | null |
src/meu_condominio/views/funcionario.py
|
lucasjoao/meu_condominio
|
aac37911384726b1aa1a40237050801a39174dc7
|
[
"Unlicense"
] | null | null | null |
# <controller>
# -*- coding: utf-8 -*-
from django.shortcuts import render
from django.http import HttpResponse, HttpResponseRedirect
from django.urls import reverse
from django.contrib import messages
from meu_condominio.forms import *
from meu_condominio.models import Condominio, Funcionario
def funcionarios(request):
if request.user.is_authenticated:
return render(request, 'meu_condominio/funcionarios.html',
{'user' : request.user})
else:
return HttpResponseRedirect(reverse('mc-login'))
def f_add(request):
if request.user.is_authenticated:
if request.method == 'POST':
form = FuncionarioForm(request.POST)
if form.is_valid():
c = Condominio.objects.get(user__pk=request.user.pk)
f = Funcionario(nome=request.POST['nome'],
salario=request.POST['salario'],
condominio=c)
f.save()
messages.success(request, 'Funcionário adicionado com sucesso!')
return HttpResponseRedirect(reverse('mc-f_view'))
else:
form = FuncionarioForm()
title = 'Cadastrar'
return render(request, 'meu_condominio/funcionarios/form.html',
{'form' : form, 'title' : title})
else:
return HttpResponseRedirect(reverse('mc-login'))
def f_view(request):
if request.user.is_authenticated:
c = Condominio.objects.get(user__pk=request.user.pk)
funcionarios = Funcionario.objects.all().filter(condominio__pk=c.pk)
return render(request, 'meu_condominio/funcionarios/f_view.html',
{'funcionarios' : funcionarios})
else:
return HttpResponseRedirect(reverse('mc-login'))
def f_del(request, id):
if request.user.is_authenticated:
funcionario = Funcionario.objects.get(pk=id)
funcionario.delete()
messages.success(request, 'Funcionário deletado com sucesso!')
return HttpResponseRedirect(reverse('mc-f_view'))
else:
return HttpResponseRedirect(reverse('mc-login'))
def f_edit(request, id):
if request.user.is_authenticated:
funcionario = Funcionario.objects.get(pk=id)
if request.method == 'POST':
form = FuncionarioForm(request.POST)
if form.is_valid():
funcionario.nome = request.POST['nome']
funcionario.salario = request.POST['salario']
funcionario.save()
messages.success(request, 'Funcionário editado com sucesso!')
return HttpResponseRedirect(reverse('mc-f_view'))
else:
form = FuncionarioForm()
form.fields['nome'].widget.attrs['placeholder'] = funcionario.nome
form.fields['salario'].widget.attrs['placeholder'] = funcionario.salario
title = 'Editar'
return render(request, 'meu_condominio/funcionarios/form.html',
{'form' : form, 'title' : title})
else:
return HttpResponseRedirect(reverse('mc-login'))
| 34.240964
| 76
| 0.683673
|
from django.shortcuts import render
from django.http import HttpResponse, HttpResponseRedirect
from django.urls import reverse
from django.contrib import messages
from meu_condominio.forms import *
from meu_condominio.models import Condominio, Funcionario
def funcionarios(request):
if request.user.is_authenticated:
return render(request, 'meu_condominio/funcionarios.html',
{'user' : request.user})
else:
return HttpResponseRedirect(reverse('mc-login'))
def f_add(request):
if request.user.is_authenticated:
if request.method == 'POST':
form = FuncionarioForm(request.POST)
if form.is_valid():
c = Condominio.objects.get(user__pk=request.user.pk)
f = Funcionario(nome=request.POST['nome'],
salario=request.POST['salario'],
condominio=c)
f.save()
messages.success(request, 'Funcionário adicionado com sucesso!')
return HttpResponseRedirect(reverse('mc-f_view'))
else:
form = FuncionarioForm()
title = 'Cadastrar'
return render(request, 'meu_condominio/funcionarios/form.html',
{'form' : form, 'title' : title})
else:
return HttpResponseRedirect(reverse('mc-login'))
def f_view(request):
if request.user.is_authenticated:
c = Condominio.objects.get(user__pk=request.user.pk)
funcionarios = Funcionario.objects.all().filter(condominio__pk=c.pk)
return render(request, 'meu_condominio/funcionarios/f_view.html',
{'funcionarios' : funcionarios})
else:
return HttpResponseRedirect(reverse('mc-login'))
def f_del(request, id):
if request.user.is_authenticated:
funcionario = Funcionario.objects.get(pk=id)
funcionario.delete()
messages.success(request, 'Funcionário deletado com sucesso!')
return HttpResponseRedirect(reverse('mc-f_view'))
else:
return HttpResponseRedirect(reverse('mc-login'))
def f_edit(request, id):
if request.user.is_authenticated:
funcionario = Funcionario.objects.get(pk=id)
if request.method == 'POST':
form = FuncionarioForm(request.POST)
if form.is_valid():
funcionario.nome = request.POST['nome']
funcionario.salario = request.POST['salario']
funcionario.save()
messages.success(request, 'Funcionário editado com sucesso!')
return HttpResponseRedirect(reverse('mc-f_view'))
else:
form = FuncionarioForm()
form.fields['nome'].widget.attrs['placeholder'] = funcionario.nome
form.fields['salario'].widget.attrs['placeholder'] = funcionario.salario
title = 'Editar'
return render(request, 'meu_condominio/funcionarios/form.html',
{'form' : form, 'title' : title})
else:
return HttpResponseRedirect(reverse('mc-login'))
| true
| true
|
f7186e6d02c05356235e0949f257691e9716ebfa
| 24,748
|
py
|
Python
|
qa327_test/frontend/test_update_ticket.py
|
EricFillion/CMPE-327
|
5e9f7c0b083643f7b6b9702775f69f67863b395e
|
[
"MIT"
] | null | null | null |
qa327_test/frontend/test_update_ticket.py
|
EricFillion/CMPE-327
|
5e9f7c0b083643f7b6b9702775f69f67863b395e
|
[
"MIT"
] | null | null | null |
qa327_test/frontend/test_update_ticket.py
|
EricFillion/CMPE-327
|
5e9f7c0b083643f7b6b9702775f69f67863b395e
|
[
"MIT"
] | null | null | null |
import pytest
from seleniumbase import BaseCase
from qa327_test.conftest import base_url
from unittest.mock import patch
from qa327_test.common import TEST_USER, TEST_TICKET, auto_login
from qa327.models import Ticket
from datetime import datetime
from qa327_test.conftest import base_url
"""
This file defines all unit tests for the login page
"""
class FrontEndUpdateTicketTest(BaseCase):
"""
A class that contains the unit tests for the login page
"""
@patch('qa327.backend.get_all_tickets', return_value=[TEST_TICKET])
@patch('qa327.backend.update_ticket', return_value=None)
@auto_login(TEST_USER)
def test_update_ticket_name_alphanumeric_negative(self, *_):
"""
R5.1.1: The name of the ticket has to be alphanumeric-only - Negative.
"""
# Login and user mocking is handled with the common login decorator
# Enter a string containing symbols (ex. "t!cket_1") into the element `#updateform_input_name`
self.type("#updateform_input_name", "t!cket_1")
# Enter the test_ticket's quantity in element `#updateform_input_quantity`
self.type("#updateform_input_quantity", str(TEST_TICKET.quantity))
# Enter the test_ticket's price in element `#updateform_input_price`
self.type("#updateform_input_price", str(TEST_TICKET.price))
# Enter the test_ticket's expiry date in element `#updateform_input_expiry`
self.type("#updateform_input_expiry", TEST_TICKET.raw_expiry)
# Click element `input[type = "updateform_submit"]`
self.click('#updateform_submit')
# the welcome element is unique to the profile page
self.assert_element("#welcome")
# Validate that the `#message_error` element shows an error message stating “Unable to update ticket: The name of the ticket has to be alphanumeric only”.
self.assert_text("Unable to update ticket: The name of the ticket has to be alphanumeric only", selector = '.message_error')
@patch('qa327.backend.get_all_tickets', return_value=[TEST_TICKET])
@patch('qa327.backend.update_ticket', return_value=None)
@auto_login(TEST_USER)
def test_update_ticket_name_space_first_char(self, *_):
"""
R5.1.2: The name is only allowed spaces if it is not the first or the last character - Negative. Testing the first character.
"""
# Login and user mocking is handled with the common login decorator
# Enter a string, that is less than 60 characters, containing only alphanumeric symbols that has a space for the first character (ex. " t1")in the element `#updateform_input_name`
self.type("#updateform_input_name", " t1")
# Enter the test_ticket's quantity in element `#updateform_input_quantity`
self.type("#updateform_input_quantity", str(TEST_TICKET.quantity))
# Enter the test_ticket's price in element `#updateform_input_price`
self.type("#updateform_input_price", str(TEST_TICKET.price))
# Enter the test_ticket's expiry date in element `#updateform_input_expiry`
self.type("#updateform_input_expiry", TEST_TICKET.raw_expiry)
# Click element `input[type = "updateform_submit"]`
self.click('#updateform_submit')
# the welcome element is unique to the profile page
self.assert_element("#welcome")
print(datetime.now().strftime("%Y%m%d"))
# Validate that the `#message_error` element shows an error message stating “Unable to update ticket: The name of the ticket has to be alphanumeric only”.
self.assert_text("Unable to update ticket: The name of the ticket is only allowed spaces if it is not the first or last character", selector = '.message_error')
@patch('qa327.backend.get_all_tickets', return_value=[TEST_TICKET])
@patch('qa327.backend.update_ticket', return_value=None)
@auto_login(TEST_USER)
def test_update_ticket_name_space_last_char(self, *_):
"""
R5.1.3: The name is only allowed spaces if it is not the first or the last character - Negative. Testing the last character.
"""
# Login and user mocking is handled with the common login decorator
# Enter a string, that is less than 60 characters, containing only alphanumeric symbols that
# has a space for the last character (ex. " t1")in the element `#updateform_input_name`
self.type("#updateform_input_name", "t1 ")
# Enter the test_ticket's quantity in element `#updateform_input_quantity`
self.type("#updateform_input_quantity", str(TEST_TICKET.quantity))
# Enter the test_ticket's price in element `#updateform_input_price`
self.type("#updateform_input_price", str(TEST_TICKET.price))
# Enter the test_ticket's expiry date in element `#updateform_input_expiry`
self.type("#updateform_input_expiry", TEST_TICKET.raw_expiry)
# Click element `input[type = "updateform_submit"]`
self.click('#updateform_submit')
# the welcome element is unique to the profile page
self.assert_element("#welcome")
# Validate that the `#message_error` element shows an error message stating “Unable to update ticket: The name of the ticket has to be alphanumeric only”.
self.assert_text("Unable to update ticket: The name of the ticket is only allowed spaces if it is not the first or last character", selector = '.message_error')
@patch('qa327.backend.get_all_tickets', return_value=[TEST_TICKET])
@patch('qa327.backend.update_ticket', return_value=None)
@auto_login(TEST_USER)
def test_update_ticket_name_space_in_middle(self, *_):
"""
R5.1.4: The name is only allowed spaces if it is not the first or the last character - Positive.
"""
# Login and user mocking is handled with the common login decorator
# Enter a string that is less than 60 characters, containing only alphanumeric symbols that
# contains spaces that are not the first and last character (ex. "ticket 1") in the element `#updateform_input_name`
self.type("#updateform_input_name", "ticket 1")
# Enter the test_ticket's quantity in element `#updateform_input_quantity`
self.type("#updateform_input_quantity", str(TEST_TICKET.quantity))
# Enter the test_ticket's price in element `#updateform_input_price`
self.type("#updateform_input_price", str(TEST_TICKET.price))
# Enter the test_ticket's expiry date in element `#updateform_input_expiry`
self.type("#updateform_input_expiry", TEST_TICKET.raw_expiry)
# Click element `input[type = "updateform_submit"]`
self.click('#updateform_submit')
# the welcome element is unique to the profile page
self.assert_element("#welcome")
# Validate that the `#message_info` element shows "Ticket was updated successfully"
self.assert_text("Ticket was updated successfully", selector = '.message_info')
@patch('qa327.backend.get_all_tickets', return_value=[TEST_TICKET])
@patch('qa327.backend.update_ticket', return_value=None)
@auto_login(TEST_USER)
def test_update_ticket_valid_name(self, *_):
"""
R5.1.5: Updating to a valid name - Positive.
"""
# Login and user mocking is handled with the common login decorator
# Enter test ticket's name into the element `#updateform_input_name`
self.type("#updateform_input_name", TEST_TICKET.name)
# Enter the test_ticket's quantity in element `#updateform_input_quantity`
self.type("#updateform_input_quantity", str(TEST_TICKET.quantity))
# Enter the test_ticket's price in element `#updateform_input_price`
self.type("#updateform_input_price", str(TEST_TICKET.price))
# Enter the test_ticket's expiry date in element `#updateform_input_expiry`
self.type("#updateform_input_expiry", TEST_TICKET.raw_expiry)
# Click element `input[type = "updateform_submit"]`
self.click('#updateform_submit')
# the welcome element is unique to the profile page
self.assert_element("#welcome")
# Validate that the `#message_info` element shows "Ticket was updated successfully"
self.assert_text("Ticket was updated successfully", selector = '.message_info')
@patch('qa327.backend.get_all_tickets', return_value=[TEST_TICKET])
@patch('qa327.backend.update_ticket', return_value=None)
@auto_login(TEST_USER)
def test_update_ticket_long_name(self, *_):
"""
R5.2: The name of the ticket is no longer than 60 characters - Negative.
"""
# Login and user mocking is handled with the common login decorator
# Enter “aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa”
# (61 chars) in the element element `#updateform_input_name`
self.type("#updateform_input_name", "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa")
# Enter the test_ticket's quantity in element `#updateform_input_quantity`
self.type("#updateform_input_quantity", str(TEST_TICKET.quantity))
# Enter the test_ticket's price in element `#updateform_input_price`
self.type("#updateform_input_price", str(TEST_TICKET.price))
# Enter the test_ticket's expiry date in element `#updateform_input_expiry`
self.type("#updateform_input_expiry", TEST_TICKET.raw_expiry)
# Click element `input[type = "updateform_submit"]`
self.click('#updateform_submit')
# the welcome element is unique to the profile page
self.assert_element("#welcome")
# Validate that the `#message_error` element shows an error message stating
# “Unable to update ticket: The name of the ticket should be no longer than 60 characters”.
self.assert_text("Unable to update ticket: The name of the ticket should be no longer than 60 characters", selector = '.message_error')
@patch('qa327.backend.get_all_tickets', return_value=[TEST_TICKET])
@patch('qa327.backend.update_ticket', return_value=None)
@auto_login(TEST_USER)
def test_update_ticket_low_quantity(self, *_):
"""
R5.3.1: The quantity of the tickets has to be more than 0, and less than or equal to 100 - Negative. Testing quantity below range.
"""
# Login and user mocking is handled with the common login decorator
# Enter the test_ticket's name in element `#updateform_input_name`
self.type("#updateform_input_name", TEST_TICKET.name)
# Enter a number less than or equal to 0 into the element `#updateform_input_quantity`
self.type("#updateform_input_quantity", "0")
# Enter the test_ticket's price in element `#updateform_input_price`
self.type("#updateform_input_price", str(TEST_TICKET.price))
# Enter the test_ticket's expiry date in element `#updateform_input_expiry`
self.type("#updateform_input_expiry", TEST_TICKET.raw_expiry)
# Click element `input[type = "updateform_submit"]`
self.click('#updateform_submit')
# the welcome element is unique to the profile page
self.assert_element("#welcome")
# Validate that the `#message_error` element shows an error message stating “Unable to update ticket:
# The quantity of the ticket must be between 1 and 100”.
self.assert_text("Unable to update ticket: The quantity of the ticket must be between 1 and 100", selector = '.message_error')
@patch('qa327.backend.get_all_tickets', return_value=[TEST_TICKET])
@patch('qa327.backend.update_ticket', return_value=None)
@auto_login(TEST_USER)
def test_update_ticket_high_quantity(self, *_):
"""
R5.3.2: The quantity of the tickets has to be more than 0, and less than or equal to 100 - Negative. Testing quantity above range.
"""
# Login and user mocking is handled with the common login decorator
# Enter the test_ticket's name in element `#updateform_input_name`
self.type("#updateform_input_name", TEST_TICKET.name)
# Enter a number greater than 100 (ex. 101) into the element `#updateform_input_quantity`
self.type("#updateform_input_quantity", "101")
# Enter the test_ticket's price in element `#updateform_input_price`
self.type("#updateform_input_price", str(TEST_TICKET.price))
# Enter the test_ticket's expiry date in element `#updateform_input_expiry`
self.type("#updateform_input_expiry", TEST_TICKET.raw_expiry)
# Click element `input[type = "updateform_submit"]`
self.click('#updateform_submit')
# the welcome element is unique to the profile page
self.assert_element("#welcome")
# Validate that the `#message_error` element shows an error message stating “Unable to update ticket:
# The quantity of the ticket must be between 1 and 100”.
self.assert_text("Unable to update ticket: The quantity of the ticket must be between 1 and 100", selector = '.message_error')
@patch('qa327.backend.get_all_tickets', return_value=[TEST_TICKET])
@patch('qa327.backend.update_ticket', return_value=None)
@auto_login(TEST_USER)
def test_update_ticket_valid_quantity(self, *_):
"""
R5.3.3: The quantity of the tickets has to be more than 0, and less than or equal to 100 - Positive.
"""
# Login and user mocking is handled with the common login decorator
# Enter test ticket's name into the element `#updateform_input_name`
self.type("#updateform_input_name", TEST_TICKET.name)
# Enter the number 50 into the element `#updateform_input_quantity`
self.type("#updateform_input_quantity", "50")
# Enter the test_ticket's price in element `#updateform_input_price`
self.type("#updateform_input_price", str(TEST_TICKET.price))
# Enter the test_ticket's expiry date in element `#updateform_input_expiry`
self.type("#updateform_input_expiry", TEST_TICKET.raw_expiry)
# Click element `input[type = "updateform_submit"]`
self.click('#updateform_submit')
# the welcome element is unique to the profile page
self.assert_element("#welcome")
# Validate that the `#message_info` element shows "Ticket was updated successfully"
self.assert_text("Ticket was updated successfully", selector = '.message_info')
@patch('qa327.backend.get_all_tickets', return_value=[TEST_TICKET])
@patch('qa327.backend.update_ticket', return_value=None)
@auto_login(TEST_USER)
def test_update_ticket_low_price(self, *_):
"""
R5.4.1: Price has to be of range [10, 100] - Negative. Testing price below the range.
"""
# Login and user mocking is handled with the common login decorator
# Enter the test_ticket's name in element `#updateform_input_name`
self.type("#updateform_input_name", TEST_TICKET.name)
# Enter the test_ticket's quantity in element `updateform_input_quantity`
self.type("#updateform_input_quantity", str(TEST_TICKET.quantity))
# Enter a number below 10 (ex. 9) into the element `#updateform_input_price`
self.type("#updateform_input_price", "9")
# Enter the test_ticket's expiry date in element `#updateform_input_expiry`
self.type("#updateform_input_expiry", TEST_TICKET.raw_expiry)
# Click element `input[type = "updateform_submit"]`
self.click('#updateform_submit')
# the welcome element is unique to the profile page
self.assert_element("#welcome")
# Validate that the `#message_error` element shows an error message stating
# “Unable to update ticket: The price of the ticket must be between 10 and 100”.
self.assert_text("Unable to update ticket: The price of the ticket must be between 10 and 100", selector = '.message_error')
@patch('qa327.backend.get_all_tickets', return_value=[TEST_TICKET])
@patch('qa327.backend.update_ticket', return_value=None)
@auto_login(TEST_USER)
def test_update_ticket_high_price(self, *_):
"""
R5.4.2: Price has to be of range [10, 100] - Negative. Testing price above the range.
"""
# Login and user mocking is handled with the common login decorator
# Enter the test_ticket's name in element `#updateform_input_name`
self.type("#updateform_input_name", TEST_TICKET.name)
# Enter the test_ticket's quantity in element `updateform_input_quantity`
self.type("#updateform_input_quantity", str(TEST_TICKET.quantity))
# Enter a number above 100 (ex. 101) into the element `#updateform_input_price`
self.type("#updateform_input_price", "101")
# Enter the test_ticket's expiry date in element `#updateform_input_expiry`
self.type("#updateform_input_expiry", TEST_TICKET.raw_expiry)
# Click element `input[type = "updateform_submit"]`
self.click('#updateform_submit')
# the welcome element is unique to the profile page
self.assert_element("#welcome")
# Validate that the `#message_error` element shows an error message stating
# “Unable to update ticket: The price of the ticket must be between 10 and 100”.
self.assert_text("Unable to update ticket: The price of the ticket must be between 10 and 100", selector = '.message_error')
@patch('qa327.backend.get_all_tickets', return_value=[TEST_TICKET])
@patch('qa327.backend.update_ticket', return_value=None)
@auto_login(TEST_USER)
def test_update_ticket_valid_price(self, *_):
"""
R5.4.3: Price has to be of range [10, 100] - Positive.
"""
# Login and user mocking is handled with the common login decorator
# Enter test ticket's name into the element `#updateform_input_name`
self.type("#updateform_input_name", TEST_TICKET.name)
# Enter the test_ticket's quantity in element `updateform_input_quantity`
self.type("#updateform_input_quantity", str(TEST_TICKET.quantity))
# Enter the number 50 into the element `#updateform_input_price`
self.type("#updateform_input_price", "50")
# Enter the test_ticket's expiry date in element `#updateform_input_expiry`
self.type("#updateform_input_expiry", TEST_TICKET.raw_expiry)
# Click element `input[type = "updateform_submit"]`
self.click('#updateform_submit')
# the welcome element is unique to the profile page
self.assert_element("#welcome")
# Validate that the `#message_info` element shows "Ticket was updated successfully"
self.assert_text("Ticket was updated successfully", selector = '.message_info')
@patch('qa327.backend.get_all_tickets', return_value=[TEST_TICKET])
@patch('qa327.backend.update_ticket', return_value=None)
@auto_login(TEST_USER)
def test_update_ticket_incorrect_date_format(self, *_):
"""
R5.5.1: Date must be given in the format YYYYMMDD (e.g. 20200901) - Negative
"""
# Login and user mocking is handled with the common login decorator
# Enter the test_ticket's name in element `#updateform_input_name`
self.type("#updateform_input_name", TEST_TICKET.name)
# Enter the test_ticket's quantity in element `updateform_input_quantity`
self.type("#updateform_input_quantity", str(TEST_TICKET.quantity))
# Enter the test_ticket's price in element `#updateform_input_price`
self.type("#updateform_input_price", str(TEST_TICKET.price))
# Enter a date in an invalid format (ex. 20201331) into the element `#updateform_input_expiry`
self.type("#updateform_input_expiry", "20201331")
# Click element `input[type = "updateform_submit"]`
self.click('#updateform_submit')
# the welcome element is unique to the profile page
self.assert_element("#welcome")
# Validate that the `#message_error` element shows an error message stating
# “Unable to update ticket: Date must be given in the format YYYYMMDD (e.g. 20200901)”.
self.assert_text("Unable to update ticket: Date must be given in the format YYYYMMDD (e.g. 20200901)", selector = '.message_error')
@patch('qa327.backend.get_all_tickets', return_value=[TEST_TICKET])
@patch('qa327.backend.update_ticket', return_value=None)
@auto_login(TEST_USER)
def test_update_ticket_valid_date(self, *_):
"""
R5.5.2: Date must be given in the format YYYYMMDD (e.g. 20200901) - Positive.
"""
# Login and user mocking is handled with the common login decorator
# Enter test ticket's name into the element `#updateform_input_name`
self.type("#updateform_input_name", TEST_TICKET.name)
# Enter the test_ticket's quantity in element `updateform_input_quantity`
self.type("#updateform_input_quantity", str(TEST_TICKET.quantity))
# Enter the test_ticket's price in element `#updateform_input_price`
self.type("#updateform_input_price", str(TEST_TICKET.price))
# Call function to get todays date and enter date into the element
# `#updateform_input_expiry`. Todays date is used so that the date is never in the past.
self.type("#updateform_input_expiry", datetime.now().strftime("%Y%m%d"))
# Click element `input[type = "updateform_submit"]`
self.click('#updateform_submit')
# the welcome element is unique to the profile page
self.assert_element("#welcome")
# Validate that the `#message_info` element shows "Ticket was updated successfully"
self.assert_text("Ticket was updated successfully", selector = '.message_info')
@patch('qa327.backend.get_all_tickets', return_value=[TEST_TICKET])
@auto_login(TEST_USER)
def test_update_ticket_non_existent(self, *_):
"""
R5.6.1: The ticket of the given name must exist - Negative.
"""
# Login and user mocking is handled with the common login decorator
# Enter "nonExistentTicket" in element `#updateform_input_name`
self.type("#updateform_input_name", "nonExistentTicket")
# Enter the test_ticket's quantity in element `updateform_input_quantity`
self.type("#updateform_input_quantity", str(TEST_TICKET.quantity))
# Enter the test_ticket's price in element `#updateform_input_price`
self.type("#updateform_input_price", str(TEST_TICKET.price))
# Enter the test_ticket's expiry date in element `#updateform_input_expiry`
self.type("#updateform_input_expiry", TEST_TICKET.raw_expiry)
# Click element `input[type = "updateform_submit"]`
self.click('#updateform_submit')
# the welcome element is unique to the profile page
self.assert_element("#welcome")
# Validate that the `#message_error` element shows an error message stating
# “Unable to update ticket: The ticket of the given name must exist."
self.assert_text("Unable to update ticket: The ticket of the given name must exist.", selector = '.message_error')
@patch('qa327.backend.get_all_tickets', return_value=[TEST_TICKET])
@patch('qa327.backend.update_ticket', return_value=None)
@auto_login(TEST_USER)
def test_update_ticket_error_redirect(self, *_):
"""
R5.7.1: For any errors, redirect back to / and show an error message.
"""
# Login and user mocking is handled with the common login decorator
# Enter " no!tATicket " in element `#updateform_input_name`
self.type("#updateform_input_name", " no!tATicket ")
# Enter the test_ticket's quantity in element `updateform_input_quantity`
self.type("#updateform_input_quantity", str(TEST_TICKET.quantity))
# Enter the test_ticket's price in element `#updateform_input_price`
self.type("#updateform_input_price", str(TEST_TICKET.price))
# Enter the test_ticket's expiry date in element `#updateform_input_expiry`
self.type("#updateform_input_expiry", TEST_TICKET.raw_expiry)
# Click element `input[type = "updateform_submit"]`
self.click('#updateform_submit')
# Validate that the page has been redirected to '/'
self.assert_equal(self.get_current_url(), base_url + '/')
#Validate that the `#message_error` element is shown."
self.assert_element(".message_error")
| 48.52549
| 188
| 0.694642
|
import pytest
from seleniumbase import BaseCase
from qa327_test.conftest import base_url
from unittest.mock import patch
from qa327_test.common import TEST_USER, TEST_TICKET, auto_login
from qa327.models import Ticket
from datetime import datetime
from qa327_test.conftest import base_url
class FrontEndUpdateTicketTest(BaseCase):
@patch('qa327.backend.get_all_tickets', return_value=[TEST_TICKET])
@patch('qa327.backend.update_ticket', return_value=None)
@auto_login(TEST_USER)
def test_update_ticket_name_alphanumeric_negative(self, *_):
dateform_input_name", "t!cket_1")
self.type("#updateform_input_quantity", str(TEST_TICKET.quantity))
# Enter the test_ticket's price in element `
self.type("#updateform_input_price", str(TEST_TICKET.price))
self.type("#updateform_input_expiry", TEST_TICKET.raw_expiry)
# Click element `input[type = "updateform_submit"]`
self.click('
# the welcome element is unique to the profile page
self.assert_element("#welcome")
# Validate that the `#message_error` element shows an error message stating “Unable to update ticket: The name of the ticket has to be alphanumeric only”.
self.assert_text("Unable to update ticket: The name of the ticket has to be alphanumeric only", selector = '.message_error')
@patch('qa327.backend.get_all_tickets', return_value=[TEST_TICKET])
@patch('qa327.backend.update_ticket', return_value=None)
@auto_login(TEST_USER)
def test_update_ticket_name_space_first_char(self, *_):
# Login and user mocking is handled with the common login decorator
# Enter a string, that is less than 60 characters, containing only alphanumeric symbols that has a space for the first character (ex. " t1")in the element `#updateform_input_name`
self.type("#updateform_input_name", " t1")
# Enter the test_ticket's quantity in element `
self.type("#updateform_input_quantity", str(TEST_TICKET.quantity))
self.type("#updateform_input_price", str(TEST_TICKET.price))
# Enter the test_ticket's expiry date in element `
self.type("#updateform_input_expiry", TEST_TICKET.raw_expiry)
self.click('#updateform_submit')
self.assert_element("#welcome")
print(datetime.now().strftime("%Y%m%d"))
acter", selector = '.message_error')
@patch('qa327.backend.get_all_tickets', return_value=[TEST_TICKET])
@patch('qa327.backend.update_ticket', return_value=None)
@auto_login(TEST_USER)
def test_update_ticket_name_space_last_char(self, *_):
dateform_input_name", "t1 ")
self.type("#updateform_input_quantity", str(TEST_TICKET.quantity))
# Enter the test_ticket's price in element `
self.type("#updateform_input_price", str(TEST_TICKET.price))
self.type("#updateform_input_expiry", TEST_TICKET.raw_expiry)
# Click element `input[type = "updateform_submit"]`
self.click('
# the welcome element is unique to the profile page
self.assert_element("#welcome")
# Validate that the `#message_error` element shows an error message stating “Unable to update ticket: The name of the ticket has to be alphanumeric only”.
self.assert_text("Unable to update ticket: The name of the ticket is only allowed spaces if it is not the first or last character", selector = '.message_error')
@patch('qa327.backend.get_all_tickets', return_value=[TEST_TICKET])
@patch('qa327.backend.update_ticket', return_value=None)
@auto_login(TEST_USER)
def test_update_ticket_name_space_in_middle(self, *_):
# Login and user mocking is handled with the common login decorator
# Enter a string that is less than 60 characters, containing only alphanumeric symbols that
# contains spaces that are not the first and last character (ex. "ticket 1") in the element `#updateform_input_name`
self.type("#updateform_input_name", "ticket 1")
# Enter the test_ticket's quantity in element `
self.type("#updateform_input_quantity", str(TEST_TICKET.quantity))
self.type("#updateform_input_price", str(TEST_TICKET.price))
# Enter the test_ticket's expiry date in element `
self.type("#updateform_input_expiry", TEST_TICKET.raw_expiry)
self.click('#updateform_submit')
self.assert_element("#welcome")
elector = '.message_info')
@patch('qa327.backend.get_all_tickets', return_value=[TEST_TICKET])
@patch('qa327.backend.update_ticket', return_value=None)
@auto_login(TEST_USER)
def test_update_ticket_valid_name(self, *_):
self.type("#updateform_input_name", TEST_TICKET.name)
# Enter the test_ticket's quantity in element `
self.type("#updateform_input_quantity", str(TEST_TICKET.quantity))
self.type("#updateform_input_price", str(TEST_TICKET.price))
# Enter the test_ticket's expiry date in element `
self.type("#updateform_input_expiry", TEST_TICKET.raw_expiry)
self.click('#updateform_submit')
self.assert_element("#welcome")
elector = '.message_info')
@patch('qa327.backend.get_all_tickets', return_value=[TEST_TICKET])
@patch('qa327.backend.update_ticket', return_value=None)
@auto_login(TEST_USER)
def test_update_ticket_long_name(self, *_):
dateform_input_name", "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa")
self.type("#updateform_input_quantity", str(TEST_TICKET.quantity))
# Enter the test_ticket's price in element `
self.type("#updateform_input_price", str(TEST_TICKET.price))
self.type("#updateform_input_expiry", TEST_TICKET.raw_expiry)
# Click element `input[type = "updateform_submit"]`
self.click('
# the welcome element is unique to the profile page
self.assert_element("#welcome")
# Validate that the `#message_error` element shows an error message stating
# “Unable to update ticket: The name of the ticket should be no longer than 60 characters”.
self.assert_text("Unable to update ticket: The name of the ticket should be no longer than 60 characters", selector = '.message_error')
@patch('qa327.backend.get_all_tickets', return_value=[TEST_TICKET])
@patch('qa327.backend.update_ticket', return_value=None)
@auto_login(TEST_USER)
def test_update_ticket_low_quantity(self, *_):
# Login and user mocking is handled with the common login decorator
# Enter the test_ticket's name in element `
self.type("#updateform_input_name", TEST_TICKET.name)
form_input_quantity", "0")
self.type("#updateform_input_price", str(TEST_TICKET.price))
# Enter the test_ticket's expiry date in element `
self.type("#updateform_input_expiry", TEST_TICKET.raw_expiry)
self.click('#updateform_submit')
self.assert_element("#welcome")
icket must be between 1 and 100", selector = '.message_error')
@patch('qa327.backend.get_all_tickets', return_value=[TEST_TICKET])
@patch('qa327.backend.update_ticket', return_value=None)
@auto_login(TEST_USER)
def test_update_ticket_high_quantity(self, *_):
self.type("#updateform_input_name", TEST_TICKET.name)
# Enter a number greater than 100 (ex. 101) into the element `#updateform_input_quantity`
self.type("#updateform_input_quantity", "101")
# Enter the test_ticket's price in element `
self.type("#updateform_input_price", str(TEST_TICKET.price))
self.type("#updateform_input_expiry", TEST_TICKET.raw_expiry)
# Click element `input[type = "updateform_submit"]`
self.click('
# the welcome element is unique to the profile page
self.assert_element("#welcome")
# Validate that the `#message_error` element shows an error message stating “Unable to update ticket:
# The quantity of the ticket must be between 1 and 100”.
self.assert_text("Unable to update ticket: The quantity of the ticket must be between 1 and 100", selector = '.message_error')
@patch('qa327.backend.get_all_tickets', return_value=[TEST_TICKET])
@patch('qa327.backend.update_ticket', return_value=None)
@auto_login(TEST_USER)
def test_update_ticket_valid_quantity(self, *_):
# Login and user mocking is handled with the common login decorator
# Enter test ticket's name into the element `
self.type("#updateform_input_name", TEST_TICKET.name)
form_input_quantity", "50")
self.type("#updateform_input_price", str(TEST_TICKET.price))
# Enter the test_ticket's expiry date in element `
self.type("#updateform_input_expiry", TEST_TICKET.raw_expiry)
self.click('#updateform_submit')
self.assert_element("#welcome")
elector = '.message_info')
@patch('qa327.backend.get_all_tickets', return_value=[TEST_TICKET])
@patch('qa327.backend.update_ticket', return_value=None)
@auto_login(TEST_USER)
def test_update_ticket_low_price(self, *_):
self.type("#updateform_input_name", TEST_TICKET.name)
# Enter the test_ticket's quantity in element `updateform_input_quantity`
self.type("#updateform_input_quantity", str(TEST_TICKET.quantity))
ateform_input_price", "9")
self.type("#updateform_input_expiry", TEST_TICKET.raw_expiry)
# Click element `input[type = "updateform_submit"]`
self.click('
# the welcome element is unique to the profile page
self.assert_element("#welcome")
# Validate that the `#message_error` element shows an error message stating
# “Unable to update ticket: The price of the ticket must be between 10 and 100”.
self.assert_text("Unable to update ticket: The price of the ticket must be between 10 and 100", selector = '.message_error')
@patch('qa327.backend.get_all_tickets', return_value=[TEST_TICKET])
@patch('qa327.backend.update_ticket', return_value=None)
@auto_login(TEST_USER)
def test_update_ticket_high_price(self, *_):
# Login and user mocking is handled with the common login decorator
# Enter the test_ticket's name in element `
self.type("#updateform_input_name", TEST_TICKET.name)
self.type("#updateform_input_quantity", str(TEST_TICKET.quantity))
# Enter a number above 100 (ex. 101) into the element `#updateform_input_price`
self.type("#updateform_input_price", "101")
# Enter the test_ticket's expiry date in element `
self.type("#updateform_input_expiry", TEST_TICKET.raw_expiry)
self.click('#updateform_submit')
self.assert_element("#welcome")
icket: The price of the ticket must be between 10 and 100", selector = '.message_error')
@patch('qa327.backend.get_all_tickets', return_value=[TEST_TICKET])
@patch('qa327.backend.update_ticket', return_value=None)
@auto_login(TEST_USER)
def test_update_ticket_valid_price(self, *_):
self.type("#updateform_input_name", TEST_TICKET.name)
# Enter the test_ticket's quantity in element `updateform_input_quantity`
self.type("#updateform_input_quantity", str(TEST_TICKET.quantity))
ateform_input_price", "50")
self.type("#updateform_input_expiry", TEST_TICKET.raw_expiry)
# Click element `input[type = "updateform_submit"]`
self.click('
# the welcome element is unique to the profile page
self.assert_element("#welcome")
# Validate that the `#message_info` element shows "Ticket was updated successfully"
self.assert_text("Ticket was updated successfully", selector = '.message_info')
@patch('qa327.backend.get_all_tickets', return_value=[TEST_TICKET])
@patch('qa327.backend.update_ticket', return_value=None)
@auto_login(TEST_USER)
def test_update_ticket_incorrect_date_format(self, *_):
# Login and user mocking is handled with the common login decorator
# Enter the test_ticket's name in element `
self.type("#updateform_input_name", TEST_TICKET.name)
self.type("#updateform_input_quantity", str(TEST_TICKET.quantity))
# Enter the test_ticket's price in element `
self.type("#updateform_input_price", str(TEST_TICKET.price))
teform_input_expiry", "20201331")
self.click('#updateform_submit')
self.assert_element("#welcome")
cket: Date must be given in the format YYYYMMDD (e.g. 20200901)", selector = '.message_error')
@patch('qa327.backend.get_all_tickets', return_value=[TEST_TICKET])
@patch('qa327.backend.update_ticket', return_value=None)
@auto_login(TEST_USER)
def test_update_ticket_valid_date(self, *_):
self.type("#updateform_input_name", TEST_TICKET.name)
# Enter the test_ticket's quantity in element `updateform_input_quantity`
self.type("#updateform_input_quantity", str(TEST_TICKET.quantity))
self.type("#updateform_input_price", str(TEST_TICKET.price))
# Call function to get todays date and enter date into the element
# `#updateform_input_expiry`. Todays date is used so that the date is never in the past.
self.type("#updateform_input_expiry", datetime.now().strftime("%Y%m%d"))
# Click element `input[type = "updateform_submit"]`
self.click('
# the welcome element is unique to the profile page
self.assert_element("#welcome")
# Validate that the `#message_info` element shows "Ticket was updated successfully"
self.assert_text("Ticket was updated successfully", selector = '.message_info')
@patch('qa327.backend.get_all_tickets', return_value=[TEST_TICKET])
@auto_login(TEST_USER)
def test_update_ticket_non_existent(self, *_):
# Login and user mocking is handled with the common login decorator
# Enter "nonExistentTicket" in element `#updateform_input_name`
self.type("#updateform_input_name", "nonExistentTicket")
# Enter the test_ticket's quantity in element `updateform_input_quantity`
self.type("#updateform_input_quantity", str(TEST_TICKET.quantity))
self.type("#updateform_input_price", str(TEST_TICKET.price))
# Enter the test_ticket's expiry date in element `
self.type("#updateform_input_expiry", TEST_TICKET.raw_expiry)
self.click('#updateform_submit')
self.assert_element("#welcome")
cket: The ticket of the given name must exist.", selector = '.message_error')
@patch('qa327.backend.get_all_tickets', return_value=[TEST_TICKET])
@patch('qa327.backend.update_ticket', return_value=None)
@auto_login(TEST_USER)
def test_update_ticket_error_redirect(self, *_):
# Login and user mocking is handled with the common login decorator
# Enter " no!tATicket " in element `#updateform_input_name`
self.type("
# Enter the test_ticket's quantity in element `updateform_input_quantity`
self.type("#updateform_input_quantity", str(TEST_TICKET.quantity))
# Enter the test_ticket's price in element `#updateform_input_price`
self.type("
# Enter the test_ticket's expiry date in element `#updateform_input_expiry`
self.type("#updateform_input_expiry", TEST_TICKET.raw_expiry)
# Click element `input[type = "updateform_submit"]`
self.click('#updateform_submit')
# Validate that the page has been redirected to '/'
self.assert_equal(self.get_current_url(), base_url + '/')
#Validate that the `#message_error` element is shown."
self.assert_element(".message_error")
| true
| true
|
f7186fc46e644f635442d789ab4d0f721f98e6ea
| 1,072
|
py
|
Python
|
pythonLib/Evaluate_tool/test.py
|
BK-bokai/TW-SIM
|
4e0bce9949919463af9d475b719a579b5e17c343
|
[
"MIT"
] | null | null | null |
pythonLib/Evaluate_tool/test.py
|
BK-bokai/TW-SIM
|
4e0bce9949919463af9d475b719a579b5e17c343
|
[
"MIT"
] | 4
|
2020-09-07T20:55:40.000Z
|
2021-10-06T13:08:51.000Z
|
pythonLib/Evaluate_tool/test.py
|
BK-bokai/BK_WEB
|
8c730e7fff54353768e19eeecb296c5435fd63b3
|
[
"MIT"
] | null | null | null |
import MySQLdb
import re, time
#connect() 方法用於建立資料庫的連線,裡面可以指定引數:使用者名稱,密碼,主機等資訊。
#這只是連線到了資料庫,要想操作資料庫需要建立遊標。
conn= MySQLdb.connect(
host='localhost',
port = 3306,
user='bokai',
passwd='2841p4204',
db ='tw_sim_evaluate',
)
#通過獲取到的資料庫連線conn下的cursor()方法來建立遊標。
cur = conn.cursor()
start = '2016-01-01'
end = '2016-01-31'
#修改查詢條件的資料
# cur.execute("update evaluate_tasks set Finish='%d' where Time_Period = '%s'" % (now+"_"+start+"-"+end,True,start+'_'+end))
cur.execute("update met_evaluates set Finish='%d' where Time_Period = '%s'" % (True,start+'_'+end))
conn.commit()
# # Connect MySQL
# import mysql.connector
# start = '2016-06-01'
# end = '2016-06-300'
# conn = mysql.connector.connect(
# host = "127.0.0.1",
# user = "bokai",
# password = "2841p4204",
# database = "tw_sim_evaluate",
# )
# cursor=conn.cursor()
# update_users = "UPDATE test SET Finish='%s' where Time_Period = '%s'" % ('test',start+'_'+end)
# cursor.execute(update_users)
# conn.commit()
print(time.perf_counter())
print(time.clock())
| 26.146341
| 124
| 0.646455
|
import MySQLdb
import re, time
conn= MySQLdb.connect(
host='localhost',
port = 3306,
user='bokai',
passwd='2841p4204',
db ='tw_sim_evaluate',
)
cur = conn.cursor()
start = '2016-01-01'
end = '2016-01-31'
cur.execute("update met_evaluates set Finish='%d' where Time_Period = '%s'" % (True,start+'_'+end))
conn.commit()
print(time.perf_counter())
print(time.clock())
| true
| true
|
f7186ff31a042b4df78abd1eb0d664d671177e3a
| 2,843
|
py
|
Python
|
delete_empty_detection.py
|
ichiro-its/detection-utilities
|
2c9ceba1a8e7e91e3c3c098dcf7bdf38f6d916b1
|
[
"MIT"
] | null | null | null |
delete_empty_detection.py
|
ichiro-its/detection-utilities
|
2c9ceba1a8e7e91e3c3c098dcf7bdf38f6d916b1
|
[
"MIT"
] | 1
|
2022-03-17T07:03:56.000Z
|
2022-03-17T07:03:56.000Z
|
delete_empty_detection.py
|
ichiro-its/detection-utilities
|
2c9ceba1a8e7e91e3c3c098dcf7bdf38f6d916b1
|
[
"MIT"
] | null | null | null |
# Copyright (c) 2021 Ichiro ITS
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import argparse
import os
from tqdm import tqdm
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--data_path', help='path for image data',
type=str, default="data")
arg = parser.parse_args()
data_path = arg.data_path
# loop every directory in <data_path> directory
for name in os.listdir(data_path):
target_dir = os.path.join(data_path, name)
if os.path.isdir(target_dir):
print(f"\nRunning on directory {target_dir}...")
with open(f"{target_dir}.txt", "r") as file_list_text:
for file in tqdm(os.listdir(target_dir)):
full_path = os.path.join(target_dir, file)
file_name = full_path.split('.')[0]
extension = full_path.split('.')[-1]
if os.path.isfile(full_path) and extension == 'txt':
line_in_file_list = file_list_text.readline().strip()
# check if file txt is empty -> no detection
if os.stat(full_path).st_size == 0:
# delete txt file
os.remove(full_path)
# delete image with different extension
for image_extension in ['jpg', 'png', 'jpeg', 'tiff', 'bmp', 'gif']:
try:
os.remove(file_name + '.' + image_extension)
break
except:
print('file extensions do not match')
print(f"deleted ({file_name})")
else:
with open(f"{target_dir}_revision.txt", "a") as file:
# only write line when the data is not deleted
file.write(line_in_file_list + "\n")
os.remove(f"{target_dir}.txt")
os.rename(f"{target_dir}_revision.txt", f"{target_dir}.txt")
| 41.808824
| 82
| 0.653887
|
import argparse
import os
from tqdm import tqdm
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--data_path', help='path for image data',
type=str, default="data")
arg = parser.parse_args()
data_path = arg.data_path
for name in os.listdir(data_path):
target_dir = os.path.join(data_path, name)
if os.path.isdir(target_dir):
print(f"\nRunning on directory {target_dir}...")
with open(f"{target_dir}.txt", "r") as file_list_text:
for file in tqdm(os.listdir(target_dir)):
full_path = os.path.join(target_dir, file)
file_name = full_path.split('.')[0]
extension = full_path.split('.')[-1]
if os.path.isfile(full_path) and extension == 'txt':
line_in_file_list = file_list_text.readline().strip()
if os.stat(full_path).st_size == 0:
os.remove(full_path)
for image_extension in ['jpg', 'png', 'jpeg', 'tiff', 'bmp', 'gif']:
try:
os.remove(file_name + '.' + image_extension)
break
except:
print('file extensions do not match')
print(f"deleted ({file_name})")
else:
with open(f"{target_dir}_revision.txt", "a") as file:
file.write(line_in_file_list + "\n")
os.remove(f"{target_dir}.txt")
os.rename(f"{target_dir}_revision.txt", f"{target_dir}.txt")
| true
| true
|
f71870bca248e095ee9b2d951727e9f1124651d1
| 599
|
py
|
Python
|
concoord/openreplica/testdnsport.py
|
liranz/concoord
|
bdb3798bf200d1cbd04bc50260cddaec6ba2a763
|
[
"BSD-3-Clause"
] | 1
|
2016-04-07T11:28:55.000Z
|
2016-04-07T11:28:55.000Z
|
concoord/openreplica/testdnsport.py
|
liranz/concoord
|
bdb3798bf200d1cbd04bc50260cddaec6ba2a763
|
[
"BSD-3-Clause"
] | null | null | null |
concoord/openreplica/testdnsport.py
|
liranz/concoord
|
bdb3798bf200d1cbd04bc50260cddaec6ba2a763
|
[
"BSD-3-Clause"
] | null | null | null |
'''
@author: Deniz Altinbuken, Emin Gun Sirer
@note: Script to check DNS Port bindings
@copyright: See LICENSE
'''
import socket
import sys
def testdnsport():
addr = 'localhost'
port = 53
thesocket = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
thesocket.setsockopt(socket.SOL_SOCKET,socket.SO_REUSEADDR,1)
thesocket.setsockopt(socket.IPPROTO_TCP,socket.TCP_NODELAY,1)
thesocket.setblocking(0)
try:
thesocket.bind((addr,port))
except socket.error:
return 1
thesocket.close()
return 0
if __name__=='__main__':
sys.exit(testdnsport())
| 23.96
| 65
| 0.706177
|
import socket
import sys
def testdnsport():
addr = 'localhost'
port = 53
thesocket = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
thesocket.setsockopt(socket.SOL_SOCKET,socket.SO_REUSEADDR,1)
thesocket.setsockopt(socket.IPPROTO_TCP,socket.TCP_NODELAY,1)
thesocket.setblocking(0)
try:
thesocket.bind((addr,port))
except socket.error:
return 1
thesocket.close()
return 0
if __name__=='__main__':
sys.exit(testdnsport())
| true
| true
|
f71871ac74a5b36090da32aa41ca195d4a98b8ac
| 4,274
|
py
|
Python
|
wavepytools/optics/fourierOptics/exampleCircularLens2Steps.py
|
APS-XSD-OPT-Group/wavepytools
|
25397c099e86a8939cc4ee3a2d266e4f809a1d18
|
[
"MIT"
] | 3
|
2019-04-12T18:28:00.000Z
|
2020-11-17T18:33:01.000Z
|
wavepytools/optics/fourierOptics/exampleCircularLens2Steps.py
|
APS-XSD-OPT-Group/wavepytools
|
25397c099e86a8939cc4ee3a2d266e4f809a1d18
|
[
"MIT"
] | null | null | null |
wavepytools/optics/fourierOptics/exampleCircularLens2Steps.py
|
APS-XSD-OPT-Group/wavepytools
|
25397c099e86a8939cc4ee3a2d266e4f809a1d18
|
[
"MIT"
] | 3
|
2019-04-19T16:46:54.000Z
|
2021-02-10T18:49:06.000Z
|
# -*- coding: utf-8 -*- #
"""
Created on Tue Mar 3 11:18:30 2015
@author: wcgrizolli
"""
import sys
import numpy as np
import matplotlib.pyplot as plt
from myFourierLib import *
sys.path.append('/home/wcgrizolli/pythonWorkspace/wgTools')
import wgTools as wgt
sys.path.append('/home/wcgrizolli/pythonWorkspace/srw/wgTools4srw')
from wgTools4srw import *
##=========================================================#
# %% sampling definition
##=========================================================#
wavelength = 1.2398e-9 # 1KeV
[Lx, Ly] = [2.5e-3, 2.5e-3]
# Mx = Lx^2/wavelength/z
[Mx, My] = [1001, 1001]
dx = Lx/Mx
dy = Ly/My
#zz = 1.00 # XXX: dist to propag
#Lx2 = Lx
zz = .00322808 # XXX: dist to propag
Lx2 = Lx/2500.0
print('WG: sampling x=' + str(Mx))
print('WG: sampling y=' + str(My))
# %%
if Mx > 1001 or My > 1001:
wgt.color_print('WG: Sampling bigger than 1001^2, stoping the program')
# sys.exit()
##=========================================================#
# %% 2D u1 function
##=========================================================#
def circ(X, Y, wx, wy, Xo=0.0, Yo=0.0): # circular
out = X*0.0
out[abs(((X-Xo)/wx)**2 + ((Y-Yo)/wy)**2) < 0.5**2] = 1.0
out[abs(((X-Xo)/wx)**2 + ((Y-Yo)/wy)**2) == 0.5**2] = .50
return out
def tFuncLens(X, Y, wavelength, fx=1e23, fy=1e23):
return np.exp(-1j*2*np.pi/wavelength/2/fx*(X**2+Y**2))
def tFuncZP(X, Y, wavelength, fx=1e23, fy=1e23):
return .5*(1.0 + np.sign(np.cos(np.pi/wavelength/fx*(X**2 + Y**2))))
wx = 200e-6
wy = 200e-6
X, Y = np.meshgrid(np.linspace(-Lx/2, Lx/2, Mx), np.linspace(-Ly/2, Ly/2, My))
print('WG: Creating Source Wave u1...')
#u1_xy = circ(X, Y, wx, wy)*tFuncZP(X, Y, wavelength, fx=zz)
u1_xy = circ(X, Y, wx, wy)*tFuncLens(X, Y, wavelength, fx=zz)
#u1_xy = circ(X, Y, wx, wy, 0, 80e-6) + circ(X, Y, wx, wy, 0,-80e-6) # double slit
print('WG: Creating Source Wave u1: DONE!')
##=========================================================#
# %% Propagation
##=========================================================#
print('WG: Propagation...')
if Lx == Lx2:
u2_xy = propTForIR(u1_xy, Lx, Ly, wavelength, zz)
X2, Y2 = X, Y
else:
u2_xy = prop2step(u1_xy, Lx, Lx2, wavelength, zz)
X2, Y2 = np.meshgrid(np.linspace(-Lx2/2, Lx2/2, Mx),
np.linspace(-Lx2/2, Lx2/2, My))
print('WG: Propagation: DONE!')
##=========================================================#
# %% Plot u1
##=========================================================#
saveFigure = 0
print('WG: Plot u1...')
factorX, unitStrX = wgt.chooseUnit(X)
factorY, unitStrY = wgt.chooseUnit(Y)
unitStrX = unitStrX + ' m'
unitStrY = unitStrY + ' m'
# %% U1
wgt.plotProfile(X*factorX, Y*factorY, np.abs(u1_xy),
r'$x [' + unitStrX +']$',
r'$y [' + unitStrY + ']$',
r'Intensity [a.u.]',
xo=0.0, yo=0.0,
unitX=unitStrX, unitY=unitStrY)
# %% U1
#wgt.plotProfile(X*factorX, Y*factorY, np.abs(u1_xy),
# r'$x [' + unitStrX +']$',
# r'$y [' + unitStrY + ']$',
# r'Intensity [a.u.]',
# xo=0.0, yo=0.0,
# unitX=unitStrX, unitY=unitStrY)
if saveFigure:
outputFigureName = wgt.datetimeNowStr() + '_u1.png'
plt.savefig(outputFigureName)
print('WG: Figure saved at %s!\n' % (outputFigureName))
plt.close()
else:
plt.show(block=True)
print('WG: Plot u1: DONE!')
##=========================================================#
# %% Plot u2
##=========================================================#
print('WG: Plot u2...')
factorX2, unitStrX2 = wgt.chooseUnit(X2)
factorY2, unitStrY2 = wgt.chooseUnit(Y2)
unitStrX2 = unitStrX2 + ' m'
unitStrY2 = unitStrY2 + ' m'
## U1
wgt.plotProfile(X2*factorX2, Y2*factorY2, np.abs(u2_xy),
r'$x [' + unitStrX2 + ']$',
r'$y [' + unitStrY2 + ']$',
r'Intensity [a.u.]',
unitX=unitStrX2, unitY=unitStrY2)
if saveFigure:
outputFigureName = wgt.datetimeNowStr() + '_u2.png'
plt.savefig(outputFigureName)
print('WG: Figure saved at %s!\n' % (outputFigureName))
plt.close()
else:
plt.show(block=True)
print('WG: Plot u2: DONE!')
# %%
| 24.146893
| 83
| 0.494151
|
import sys
import numpy as np
import matplotlib.pyplot as plt
from myFourierLib import *
sys.path.append('/home/wcgrizolli/pythonWorkspace/wgTools')
import wgTools as wgt
sys.path.append('/home/wcgrizolli/pythonWorkspace/srw/wgTools4srw')
from wgTools4srw import *
print('WG: sampling x=' + str(Mx))
print('WG: sampling y=' + str(My))
if Mx > 1001 or My > 1001:
wgt.color_print('WG: Sampling bigger than 1001^2, stoping the program')
1.0
out[abs(((X-Xo)/wx)**2 + ((Y-Yo)/wy)**2) == 0.5**2] = .50
return out
def tFuncLens(X, Y, wavelength, fx=1e23, fy=1e23):
return np.exp(-1j*2*np.pi/wavelength/2/fx*(X**2+Y**2))
def tFuncZP(X, Y, wavelength, fx=1e23, fy=1e23):
return .5*(1.0 + np.sign(np.cos(np.pi/wavelength/fx*(X**2 + Y**2))))
wx = 200e-6
wy = 200e-6
X, Y = np.meshgrid(np.linspace(-Lx/2, Lx/2, Mx), np.linspace(-Ly/2, Ly/2, My))
print('WG: Creating Source Wave u1...')
u1_xy = circ(X, Y, wx, wy)*tFuncLens(X, Y, wavelength, fx=zz)
Creating Source Wave u1: DONE!')
else:
u2_xy = prop2step(u1_xy, Lx, Lx2, wavelength, zz)
X2, Y2 = np.meshgrid(np.linspace(-Lx2/2, Lx2/2, Mx),
np.linspace(-Lx2/2, Lx2/2, My))
print('WG: Propagation: DONE!')
it(Y)
unitStrX = unitStrX + ' m'
unitStrY = unitStrY + ' m'
wgt.plotProfile(X*factorX, Y*factorY, np.abs(u1_xy),
r'$x [' + unitStrX +']$',
r'$y [' + unitStrY + ']$',
r'Intensity [a.u.]',
xo=0.0, yo=0.0,
unitX=unitStrX, unitY=unitStrY)
if saveFigure:
outputFigureName = wgt.datetimeNowStr() + '_u1.png'
plt.savefig(outputFigureName)
print('WG: Figure saved at %s!\n' % (outputFigureName))
plt.close()
else:
plt.show(block=True)
print('WG: Plot u1: DONE!')
2 = unitStrX2 + ' m'
unitStrY2 = unitStrY2 + ' m'
t.plotProfile(X2*factorX2, Y2*factorY2, np.abs(u2_xy),
r'$x [' + unitStrX2 + ']$',
r'$y [' + unitStrY2 + ']$',
r'Intensity [a.u.]',
unitX=unitStrX2, unitY=unitStrY2)
if saveFigure:
outputFigureName = wgt.datetimeNowStr() + '_u2.png'
plt.savefig(outputFigureName)
print('WG: Figure saved at %s!\n' % (outputFigureName))
plt.close()
else:
plt.show(block=True)
print('WG: Plot u2: DONE!')
| true
| true
|
f71871b675be1af1cbe9148c90930e299463b8f0
| 10,314
|
py
|
Python
|
src/busio.py
|
theacodes/Adafruit_Blinka
|
e89ea27c8b1db795949b3538dc10bec6117399ad
|
[
"MIT"
] | null | null | null |
src/busio.py
|
theacodes/Adafruit_Blinka
|
e89ea27c8b1db795949b3538dc10bec6117399ad
|
[
"MIT"
] | null | null | null |
src/busio.py
|
theacodes/Adafruit_Blinka
|
e89ea27c8b1db795949b3538dc10bec6117399ad
|
[
"MIT"
] | null | null | null |
"""
`busio` - Bus protocol support like I2C and SPI
=================================================
See `CircuitPython:busio` in CircuitPython for more details.
* Author(s): cefn
"""
import threading
from adafruit_blinka import Enum, Lockable, agnostic
from adafruit_blinka.agnostic import board_id, detector
import adafruit_platformdetect.board as ap_board
class I2C(Lockable):
def __init__(self, scl, sda, frequency=400000):
self.init(scl, sda, frequency)
def init(self, scl, sda, frequency):
self.deinit()
if detector.board.ftdi_ft232h:
from adafruit_blinka.microcontroller.ft232h.i2c import I2C
self._i2c = I2C()
return
elif detector.board.any_embedded_linux:
from adafruit_blinka.microcontroller.generic_linux.i2c import I2C as _I2C
else:
from machine import I2C as _I2C
from microcontroller.pin import i2cPorts
for portId, portScl, portSda in i2cPorts:
if scl == portScl and sda == portSda:
self._i2c = _I2C(portId, mode=_I2C.MASTER, baudrate=frequency)
break
else:
raise ValueError(
"No Hardware I2C on (scl,sda)={}\nValid I2C ports: {}".format((scl, sda), i2cPorts)
)
self._lock = threading.RLock()
def deinit(self):
try:
del self._i2c
except AttributeError:
pass
def __enter__(self):
self._lock.acquire()
return self
def __exit__(self, exc_type, exc_value, traceback):
self._lock.release()
self.deinit()
def scan(self):
return self._i2c.scan()
def readfrom_into(self, address, buffer, *, start=0, end=None):
if start is not 0 or end is not None:
if end is None:
end = len(buffer)
buffer = memoryview(buffer)[start:end]
stop = True # remove for efficiency later
return self._i2c.readfrom_into(address, buffer, stop=stop)
def writeto(self, address, buffer, *, start=0, end=None, stop=True):
if isinstance(buffer, str):
buffer = bytes([ord(x) for x in buffer])
if start is not 0 or end is not None:
if end is None:
return self._i2c.writeto(address, memoryview(buffer)[start:], stop=stop)
else:
return self._i2c.writeto(address, memoryview(buffer)[start:end], stop=stop)
return self._i2c.writeto(address, buffer, stop=stop)
def writeto_then_readfrom(self, address, buffer_out, buffer_in, *, out_start=0, out_end=None, in_start=0, in_end=None, stop=False):
return self._i2c.writeto_then_readfrom(address, buffer_out, buffer_in,
out_start=out_start, out_end=out_end,
in_start=in_start, in_end=in_end, stop=stop)
class SPI(Lockable):
def __init__(self, clock, MOSI=None, MISO=None):
self.deinit()
if detector.board.ftdi_ft232h:
from adafruit_blinka.microcontroller.ft232h.spi import SPI as _SPI
from adafruit_blinka.microcontroller.ft232h.pin import SCK, MOSI, MISO
self._spi = _SPI()
self._pins = (SCK, MOSI, MISO)
return
elif detector.board.any_embedded_linux:
from adafruit_blinka.microcontroller.generic_linux.spi import SPI as _SPI
else:
from machine import SPI as _SPI
from microcontroller.pin import spiPorts
for portId, portSck, portMosi, portMiso in spiPorts:
if ((clock == portSck) and # Clock is required!
(MOSI == portMosi or MOSI == None) and # But can do with just output
(MISO == portMiso or MISO == None)): # Or just input
self._spi = _SPI(portId)
self._pins = (portSck, portMosi, portMiso)
break
else:
raise ValueError(
"No Hardware SPI on (SCLK, MOSI, MISO)={}\nValid SPI ports:{}".
format((clock, MOSI, MISO), spiPorts))
def configure(self, baudrate=100000, polarity=0, phase=0, bits=8):
if detector.board.any_raspberry_pi or detector.board.any_raspberry_pi_40_pin:
from adafruit_blinka.microcontroller.bcm283x.pin import Pin
from adafruit_blinka.microcontroller.generic_linux.spi import SPI as _SPI
elif detector.board.any_beaglebone:
from adafruit_blinka.microcontroller.am335x.pin import Pin
from adafruit_blinka.microcontroller.generic_linux.spi import SPI as _SPI
elif board_id == ap_board.ORANGE_PI_PC or board_id == ap_board.ORANGE_PI_R1 or board_id == ap_board.ORANGE_PI_ZERO:
from adafruit_blinka.microcontroller.allwinner_h3.pin import Pin
from adafruit_blinka.microcontroller.generic_linux.spi import SPI as _SPI
elif board_id == ap_board.GIANT_BOARD:
from adafruit_blinka.microcontroller.sama5.pin import Pin
from adafruit_blinka.microcontroller.generic_linux.spi import SPI as _SPI
elif board_id == ap_board.CORAL_EDGE_TPU_DEV:
from adafruit_blinka.microcontroller.nxp_imx8m.pin import Pin
from adafruit_blinka.microcontroller.generic_linux.spi import SPI as _SPI
elif board_id == ap_board.ODROID_C2:
from adafruit_blinka.microcontroller.amlogic.s905.pin import Pin
from adafruit_blinka.microcontroller.generic_linux.spi import SPI as _SPI
elif board_id == ap_board.DRAGONBOARD_410C:
from adafruit_blinka.microcontroller.snapdragon.apq8016.pin import Pin
from adafruit_blinka.microcontroller.generic_linux.spi import SPI as _SPI
elif board_id == ap_board.JETSON_NANO:
from adafruit_blinka.microcontroller.generic_linux.spi import SPI as _SPI
from adafruit_blinka.microcontroller.tegra.t210.pin import Pin
elif board_id == ap_board.JETSON_TX1:
from adafruit_blinka.microcontroller.generic_linux.spi import SPI as _SPI
from adafruit_blinka.microcontroller.tegra.t210.pin import Pin
elif board_id == ap_board.JETSON_TX2:
from adafruit_blinka.microcontroller.generic_linux.spi import SPI as _SPI
from adafruit_blinka.microcontroller.tegra.t186.pin import Pin
elif board_id == ap_board.JETSON_XAVIER:
from adafruit_blinka.microcontroller.generic_linux.spi import SPI as _SPI
from adafruit_blinka.microcontroller.tegra.t194.pin import Pin
elif detector.board.ftdi_ft232h:
from adafruit_blinka.microcontroller.ft232h.spi import SPI as _SPI
from adafruit_blinka.microcontroller.ft232h.pin import Pin
else:
from machine import SPI as _SPI
from machine import Pin
if self._locked:
# TODO check if #init ignores MOSI=None rather than unsetting, to save _pinIds attribute
self._spi.init(
baudrate=baudrate,
polarity=polarity,
phase=phase,
bits=bits,
firstbit=_SPI.MSB,
sck=Pin(self._pins[0].id),
mosi=Pin(self._pins[1].id),
miso=Pin(self._pins[2].id)
)
else:
raise RuntimeError("First call try_lock()")
def deinit(self):
self._spi = None
self._pinIds = None
@property
def frequency(self):
try:
return self._spi.frequency
except AttributeError:
raise NotImplementedError("Frequency attribute not implemented for this platform")
def write(self, buf, start=0, end=None):
return self._spi.write(buf, start, end)
def readinto(self, buf, start=0, end=None, write_value=0):
return self._spi.readinto(buf, start, end, write_value=write_value)
def write_readinto(self, buffer_out, buffer_in, out_start=0, out_end=None, in_start=0, in_end=None):
return self._spi.write_readinto(buffer_out, buffer_in, out_start, out_end, in_start, in_end)
class UART(Lockable):
class Parity(Enum):
pass
Parity.ODD = Parity()
Parity.EVEN = Parity()
def __init__(self,
tx,
rx,
baudrate=9600,
bits=8,
parity=None,
stop=1,
timeout=1000,
receiver_buffer_size=64,
flow=None):
if detector.board.any_embedded_linux:
raise RuntimeError('busio.UART not supported on this platform. Please use pyserial instead.')
else:
from machine import UART as _UART
from microcontroller.pin import uartPorts
self.baudrate = baudrate
if flow is not None: # default 0
raise NotImplementedError(
"Parameter '{}' unsupported on {}".format(
"flow", agnostic.board_id))
# translate parity flag for Micropython
if parity is UART.Parity.ODD:
parity = 1
elif parity is UART.Parity.EVEN:
parity = 0
elif parity is None:
pass
else:
raise ValueError("Invalid parity")
# check tx and rx have hardware support
for portId, portTx, portRx in uartPorts: #
if portTx == tx and portRx == rx:
self._uart = _UART(
portId,
baudrate,
bits=bits,
parity=parity,
stop=stop,
timeout=timeout,
read_buf_len=receiver_buffer_size
)
break
else:
raise ValueError(
"No Hardware UART on (tx,rx)={}\nValid UART ports: {}".format((tx, rx), uartPorts)
)
def deinit(self):
self._uart = None
def read(self, nbytes=None):
return self._uart.read(nbytes)
def readinto(self, buf, nbytes=None):
return self._uart.readinto(buf, nbytes)
def readline(self):
return self._uart.readline()
def write(self, buf):
return self._uart.write(buf)
| 40.132296
| 135
| 0.610432
|
import threading
from adafruit_blinka import Enum, Lockable, agnostic
from adafruit_blinka.agnostic import board_id, detector
import adafruit_platformdetect.board as ap_board
class I2C(Lockable):
def __init__(self, scl, sda, frequency=400000):
self.init(scl, sda, frequency)
def init(self, scl, sda, frequency):
self.deinit()
if detector.board.ftdi_ft232h:
from adafruit_blinka.microcontroller.ft232h.i2c import I2C
self._i2c = I2C()
return
elif detector.board.any_embedded_linux:
from adafruit_blinka.microcontroller.generic_linux.i2c import I2C as _I2C
else:
from machine import I2C as _I2C
from microcontroller.pin import i2cPorts
for portId, portScl, portSda in i2cPorts:
if scl == portScl and sda == portSda:
self._i2c = _I2C(portId, mode=_I2C.MASTER, baudrate=frequency)
break
else:
raise ValueError(
"No Hardware I2C on (scl,sda)={}\nValid I2C ports: {}".format((scl, sda), i2cPorts)
)
self._lock = threading.RLock()
def deinit(self):
try:
del self._i2c
except AttributeError:
pass
def __enter__(self):
self._lock.acquire()
return self
def __exit__(self, exc_type, exc_value, traceback):
self._lock.release()
self.deinit()
def scan(self):
return self._i2c.scan()
def readfrom_into(self, address, buffer, *, start=0, end=None):
if start is not 0 or end is not None:
if end is None:
end = len(buffer)
buffer = memoryview(buffer)[start:end]
stop = True
return self._i2c.readfrom_into(address, buffer, stop=stop)
def writeto(self, address, buffer, *, start=0, end=None, stop=True):
if isinstance(buffer, str):
buffer = bytes([ord(x) for x in buffer])
if start is not 0 or end is not None:
if end is None:
return self._i2c.writeto(address, memoryview(buffer)[start:], stop=stop)
else:
return self._i2c.writeto(address, memoryview(buffer)[start:end], stop=stop)
return self._i2c.writeto(address, buffer, stop=stop)
def writeto_then_readfrom(self, address, buffer_out, buffer_in, *, out_start=0, out_end=None, in_start=0, in_end=None, stop=False):
return self._i2c.writeto_then_readfrom(address, buffer_out, buffer_in,
out_start=out_start, out_end=out_end,
in_start=in_start, in_end=in_end, stop=stop)
class SPI(Lockable):
def __init__(self, clock, MOSI=None, MISO=None):
self.deinit()
if detector.board.ftdi_ft232h:
from adafruit_blinka.microcontroller.ft232h.spi import SPI as _SPI
from adafruit_blinka.microcontroller.ft232h.pin import SCK, MOSI, MISO
self._spi = _SPI()
self._pins = (SCK, MOSI, MISO)
return
elif detector.board.any_embedded_linux:
from adafruit_blinka.microcontroller.generic_linux.spi import SPI as _SPI
else:
from machine import SPI as _SPI
from microcontroller.pin import spiPorts
for portId, portSck, portMosi, portMiso in spiPorts:
if ((clock == portSck) and
(MOSI == portMosi or MOSI == None) and
(MISO == portMiso or MISO == None)):
self._spi = _SPI(portId)
self._pins = (portSck, portMosi, portMiso)
break
else:
raise ValueError(
"No Hardware SPI on (SCLK, MOSI, MISO)={}\nValid SPI ports:{}".
format((clock, MOSI, MISO), spiPorts))
def configure(self, baudrate=100000, polarity=0, phase=0, bits=8):
if detector.board.any_raspberry_pi or detector.board.any_raspberry_pi_40_pin:
from adafruit_blinka.microcontroller.bcm283x.pin import Pin
from adafruit_blinka.microcontroller.generic_linux.spi import SPI as _SPI
elif detector.board.any_beaglebone:
from adafruit_blinka.microcontroller.am335x.pin import Pin
from adafruit_blinka.microcontroller.generic_linux.spi import SPI as _SPI
elif board_id == ap_board.ORANGE_PI_PC or board_id == ap_board.ORANGE_PI_R1 or board_id == ap_board.ORANGE_PI_ZERO:
from adafruit_blinka.microcontroller.allwinner_h3.pin import Pin
from adafruit_blinka.microcontroller.generic_linux.spi import SPI as _SPI
elif board_id == ap_board.GIANT_BOARD:
from adafruit_blinka.microcontroller.sama5.pin import Pin
from adafruit_blinka.microcontroller.generic_linux.spi import SPI as _SPI
elif board_id == ap_board.CORAL_EDGE_TPU_DEV:
from adafruit_blinka.microcontroller.nxp_imx8m.pin import Pin
from adafruit_blinka.microcontroller.generic_linux.spi import SPI as _SPI
elif board_id == ap_board.ODROID_C2:
from adafruit_blinka.microcontroller.amlogic.s905.pin import Pin
from adafruit_blinka.microcontroller.generic_linux.spi import SPI as _SPI
elif board_id == ap_board.DRAGONBOARD_410C:
from adafruit_blinka.microcontroller.snapdragon.apq8016.pin import Pin
from adafruit_blinka.microcontroller.generic_linux.spi import SPI as _SPI
elif board_id == ap_board.JETSON_NANO:
from adafruit_blinka.microcontroller.generic_linux.spi import SPI as _SPI
from adafruit_blinka.microcontroller.tegra.t210.pin import Pin
elif board_id == ap_board.JETSON_TX1:
from adafruit_blinka.microcontroller.generic_linux.spi import SPI as _SPI
from adafruit_blinka.microcontroller.tegra.t210.pin import Pin
elif board_id == ap_board.JETSON_TX2:
from adafruit_blinka.microcontroller.generic_linux.spi import SPI as _SPI
from adafruit_blinka.microcontroller.tegra.t186.pin import Pin
elif board_id == ap_board.JETSON_XAVIER:
from adafruit_blinka.microcontroller.generic_linux.spi import SPI as _SPI
from adafruit_blinka.microcontroller.tegra.t194.pin import Pin
elif detector.board.ftdi_ft232h:
from adafruit_blinka.microcontroller.ft232h.spi import SPI as _SPI
from adafruit_blinka.microcontroller.ft232h.pin import Pin
else:
from machine import SPI as _SPI
from machine import Pin
if self._locked:
polarity=polarity,
phase=phase,
bits=bits,
firstbit=_SPI.MSB,
sck=Pin(self._pins[0].id),
mosi=Pin(self._pins[1].id),
miso=Pin(self._pins[2].id)
)
else:
raise RuntimeError("First call try_lock()")
def deinit(self):
self._spi = None
self._pinIds = None
@property
def frequency(self):
try:
return self._spi.frequency
except AttributeError:
raise NotImplementedError("Frequency attribute not implemented for this platform")
def write(self, buf, start=0, end=None):
return self._spi.write(buf, start, end)
def readinto(self, buf, start=0, end=None, write_value=0):
return self._spi.readinto(buf, start, end, write_value=write_value)
def write_readinto(self, buffer_out, buffer_in, out_start=0, out_end=None, in_start=0, in_end=None):
return self._spi.write_readinto(buffer_out, buffer_in, out_start, out_end, in_start, in_end)
class UART(Lockable):
class Parity(Enum):
pass
Parity.ODD = Parity()
Parity.EVEN = Parity()
def __init__(self,
tx,
rx,
baudrate=9600,
bits=8,
parity=None,
stop=1,
timeout=1000,
receiver_buffer_size=64,
flow=None):
if detector.board.any_embedded_linux:
raise RuntimeError('busio.UART not supported on this platform. Please use pyserial instead.')
else:
from machine import UART as _UART
from microcontroller.pin import uartPorts
self.baudrate = baudrate
if flow is not None:
raise NotImplementedError(
"Parameter '{}' unsupported on {}".format(
"flow", agnostic.board_id))
if parity is UART.Parity.ODD:
parity = 1
elif parity is UART.Parity.EVEN:
parity = 0
elif parity is None:
pass
else:
raise ValueError("Invalid parity")
for portId, portTx, portRx in uartPorts:
if portTx == tx and portRx == rx:
self._uart = _UART(
portId,
baudrate,
bits=bits,
parity=parity,
stop=stop,
timeout=timeout,
read_buf_len=receiver_buffer_size
)
break
else:
raise ValueError(
"No Hardware UART on (tx,rx)={}\nValid UART ports: {}".format((tx, rx), uartPorts)
)
def deinit(self):
self._uart = None
def read(self, nbytes=None):
return self._uart.read(nbytes)
def readinto(self, buf, nbytes=None):
return self._uart.readinto(buf, nbytes)
def readline(self):
return self._uart.readline()
def write(self, buf):
return self._uart.write(buf)
| true
| true
|
f71874666f031e2f40b1def6bb96f40e949bdb3b
| 2,497
|
py
|
Python
|
pytorch_toolbelt/modules/encoders/timm/common.py
|
George-Jiao/pytorch-toolbelt
|
920e03876805351ed5645e439a64074cb4f37589
|
[
"MIT"
] | 1
|
2021-08-18T07:05:50.000Z
|
2021-08-18T07:05:50.000Z
|
pytorch_toolbelt/modules/encoders/timm/common.py
|
George-Jiao/pytorch-toolbelt
|
920e03876805351ed5645e439a64074cb4f37589
|
[
"MIT"
] | null | null | null |
pytorch_toolbelt/modules/encoders/timm/common.py
|
George-Jiao/pytorch-toolbelt
|
920e03876805351ed5645e439a64074cb4f37589
|
[
"MIT"
] | null | null | null |
import math
import warnings
import torch
from typing import List, Union
from torch import Tensor, nn
from ..common import EncoderModule, _take
__all__ = ["GenericTimmEncoder", "make_n_channel_input_std_conv"]
class GenericTimmEncoder(EncoderModule):
def __init__(self, timm_encoder: Union[nn.Module, str], layers: List[int] = None):
strides = []
channels = []
default_layers = []
if isinstance(timm_encoder, str):
import timm.models.factory
timm_encoder = timm.models.factory.create_model(timm_encoder, pretrained=True)
for i, oi in enumerate(timm_encoder.feature_info.out_indices):
fi = timm_encoder.feature_info.info[i]
strides.append(fi["reduction"])
channels.append(fi["num_chs"])
default_layers.append(i)
if layers is None:
layers = default_layers
super().__init__(channels, strides, layers)
self.encoder = timm_encoder
def forward(self, x: Tensor) -> List[Tensor]:
return _take(self.encoder(x), self._layers)
def make_n_channel_input_std_conv(conv: nn.Module, in_channels: int, mode="auto", **kwargs) -> nn.Module:
"""
Return the same convolution class but with desired number of channels
Args:
conv: Input nn.Conv2D object to copy settings/weights from
in_channels: Desired number of input channels
mode:
**kwargs: Optional overrides for Conv2D parameters
"""
conv_cls = conv.__class__
if conv.in_channels == in_channels:
warnings.warn("make_n_channel_input call is spurious")
return conv
new_conv = conv_cls(
in_channels,
out_channels=conv.out_channels,
kernel_size=kwargs.get("kernel_size", conv.kernel_size),
stride=kwargs.get("stride", conv.stride),
padding=kwargs.get("padding", conv.padding),
dilation=kwargs.get("dilation", conv.dilation),
groups=kwargs.get("groups", conv.groups),
bias=kwargs.get("bias", conv.bias is not None),
eps=kwargs.get("eps", conv.eps),
)
w = conv.weight
if in_channels > conv.in_channels:
n = math.ceil(in_channels / float(conv.in_channels))
w = torch.cat([w] * n, dim=1)
w = w[:, :in_channels, ...]
new_conv.weight = nn.Parameter(w, requires_grad=True)
else:
w = w[:, 0:in_channels, ...]
new_conv.weight = nn.Parameter(w, requires_grad=True)
return new_conv
| 32.012821
| 105
| 0.647177
|
import math
import warnings
import torch
from typing import List, Union
from torch import Tensor, nn
from ..common import EncoderModule, _take
__all__ = ["GenericTimmEncoder", "make_n_channel_input_std_conv"]
class GenericTimmEncoder(EncoderModule):
def __init__(self, timm_encoder: Union[nn.Module, str], layers: List[int] = None):
strides = []
channels = []
default_layers = []
if isinstance(timm_encoder, str):
import timm.models.factory
timm_encoder = timm.models.factory.create_model(timm_encoder, pretrained=True)
for i, oi in enumerate(timm_encoder.feature_info.out_indices):
fi = timm_encoder.feature_info.info[i]
strides.append(fi["reduction"])
channels.append(fi["num_chs"])
default_layers.append(i)
if layers is None:
layers = default_layers
super().__init__(channels, strides, layers)
self.encoder = timm_encoder
def forward(self, x: Tensor) -> List[Tensor]:
return _take(self.encoder(x), self._layers)
def make_n_channel_input_std_conv(conv: nn.Module, in_channels: int, mode="auto", **kwargs) -> nn.Module:
conv_cls = conv.__class__
if conv.in_channels == in_channels:
warnings.warn("make_n_channel_input call is spurious")
return conv
new_conv = conv_cls(
in_channels,
out_channels=conv.out_channels,
kernel_size=kwargs.get("kernel_size", conv.kernel_size),
stride=kwargs.get("stride", conv.stride),
padding=kwargs.get("padding", conv.padding),
dilation=kwargs.get("dilation", conv.dilation),
groups=kwargs.get("groups", conv.groups),
bias=kwargs.get("bias", conv.bias is not None),
eps=kwargs.get("eps", conv.eps),
)
w = conv.weight
if in_channels > conv.in_channels:
n = math.ceil(in_channels / float(conv.in_channels))
w = torch.cat([w] * n, dim=1)
w = w[:, :in_channels, ...]
new_conv.weight = nn.Parameter(w, requires_grad=True)
else:
w = w[:, 0:in_channels, ...]
new_conv.weight = nn.Parameter(w, requires_grad=True)
return new_conv
| true
| true
|
f71874cba60460437882bdc45ed1583f94262ca5
| 1,184
|
py
|
Python
|
go.py
|
goldtime1987/pyQTGraph
|
0ab1e341907f791c21980dbf3ea79b15977a0e33
|
[
"MIT"
] | 238
|
2016-07-31T16:11:22.000Z
|
2022-03-25T19:20:56.000Z
|
2016-07-31_qt_PyQtGraph_sine_scroll/go.py
|
jradler-wassoc/Python-GUI-examples
|
97193758d9f8f57f304f95959403f1db84c3c0b0
|
[
"MIT"
] | 12
|
2016-11-07T17:22:50.000Z
|
2020-07-09T14:39:48.000Z
|
2016-07-31_qt_PyQtGraph_sine_scroll/go.py
|
jradler-wassoc/Python-GUI-examples
|
97193758d9f8f57f304f95959403f1db84c3c0b0
|
[
"MIT"
] | 191
|
2016-08-10T01:44:51.000Z
|
2022-01-03T01:39:08.000Z
|
from PyQt4 import QtGui,QtCore
import sys
import ui_main
import numpy as np
import pylab
import time
import pyqtgraph
class ExampleApp(QtGui.QMainWindow, ui_main.Ui_MainWindow):
def __init__(self, parent=None):
pyqtgraph.setConfigOption('background', 'w') #before loading widget
super(ExampleApp, self).__init__(parent)
self.setupUi(self)
self.btnAdd.clicked.connect(self.update)
self.grPlot.plotItem.showGrid(True, True, 0.7)
def update(self):
t1=time.clock()
points=100 #number of data points
X=np.arange(points)
Y=np.sin(np.arange(points)/points*3*np.pi+time.time())
C=pyqtgraph.hsvColor(time.time()/5%1,alpha=.5)
pen=pyqtgraph.mkPen(color=C,width=10)
self.grPlot.plot(X,Y,pen=pen,clear=True)
print("update took %.02f ms"%((time.clock()-t1)*1000))
if self.chkMore.isChecked():
QtCore.QTimer.singleShot(1, self.update) # QUICKLY repeat
if __name__=="__main__":
app = QtGui.QApplication(sys.argv)
form = ExampleApp()
form.show()
form.update() #start with something
app.exec_()
print("DONE")
| 33.828571
| 76
| 0.643581
|
from PyQt4 import QtGui,QtCore
import sys
import ui_main
import numpy as np
import pylab
import time
import pyqtgraph
class ExampleApp(QtGui.QMainWindow, ui_main.Ui_MainWindow):
def __init__(self, parent=None):
pyqtgraph.setConfigOption('background', 'w')
super(ExampleApp, self).__init__(parent)
self.setupUi(self)
self.btnAdd.clicked.connect(self.update)
self.grPlot.plotItem.showGrid(True, True, 0.7)
def update(self):
t1=time.clock()
points=100
X=np.arange(points)
Y=np.sin(np.arange(points)/points*3*np.pi+time.time())
C=pyqtgraph.hsvColor(time.time()/5%1,alpha=.5)
pen=pyqtgraph.mkPen(color=C,width=10)
self.grPlot.plot(X,Y,pen=pen,clear=True)
print("update took %.02f ms"%((time.clock()-t1)*1000))
if self.chkMore.isChecked():
QtCore.QTimer.singleShot(1, self.update)
if __name__=="__main__":
app = QtGui.QApplication(sys.argv)
form = ExampleApp()
form.show()
form.update()
app.exec_()
print("DONE")
| true
| true
|
f71874e9bbb685b97aee7b1ea9ac4bc50e8a3bfc
| 11,660
|
py
|
Python
|
cinder/tests/unit/api/contrib/test_volume_replication.py
|
rackerlabs/cinder
|
4295ff0a64f781c3546f6c6e0816dbb8100133cb
|
[
"Apache-2.0"
] | 1
|
2019-02-08T05:24:58.000Z
|
2019-02-08T05:24:58.000Z
|
cinder/tests/unit/api/contrib/test_volume_replication.py
|
rackerlabs/cinder
|
4295ff0a64f781c3546f6c6e0816dbb8100133cb
|
[
"Apache-2.0"
] | 1
|
2021-03-21T11:38:29.000Z
|
2021-03-21T11:38:29.000Z
|
cinder/tests/unit/api/contrib/test_volume_replication.py
|
rackerlabs/cinder
|
4295ff0a64f781c3546f6c6e0816dbb8100133cb
|
[
"Apache-2.0"
] | 15
|
2017-01-12T10:35:10.000Z
|
2019-04-19T08:22:10.000Z
|
# Copyright 2014 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Tests for volume replication API code.
"""
import json
import mock
from oslo_config import cfg
import webob
from cinder import context
from cinder import test
from cinder.tests.unit.api import fakes
from cinder.tests.unit import utils as tests_utils
CONF = cfg.CONF
def app():
# no auth, just let environ['cinder.context'] pass through
api = fakes.router.APIRouter()
mapper = fakes.urlmap.URLMap()
mapper['/v2'] = api
return mapper
class VolumeReplicationAPITestCase(test.TestCase):
"""Test Cases for replication API."""
def setUp(self):
super(VolumeReplicationAPITestCase, self).setUp()
self.ctxt = context.RequestContext('admin', 'fake', True)
self.volume_params = {
'host': CONF.host,
'size': 1}
def _get_resp(self, operation, volume_id, xml=False):
"""Helper for a replication action req for the specified volume_id."""
req = webob.Request.blank('/v2/fake/volumes/%s/action' % volume_id)
req.method = 'POST'
if xml:
body = '<os-%s-replica/>' % operation
req.headers['Content-Type'] = 'application/xml'
req.headers['Accept'] = 'application/xml'
req.body = body
else:
body = {'os-%s-replica' % operation: ''}
req.headers['Content-Type'] = 'application/json'
req.body = json.dumps(body)
req.environ['cinder.context'] = context.RequestContext('admin',
'fake',
True)
res = req.get_response(app())
return req, res
def test_promote_bad_id(self):
(req, res) = self._get_resp('promote', 'fake')
msg = ("request: %s\nresult: %s" % (req, res))
self.assertEqual(404, res.status_int, msg)
def test_promote_bad_id_xml(self):
(req, res) = self._get_resp('promote', 'fake', xml=True)
msg = ("request: %s\nresult: %s" % (req, res))
self.assertEqual(404, res.status_int, msg)
def test_promote_volume_not_replicated(self):
volume = tests_utils.create_volume(
self.ctxt,
**self.volume_params)
(req, res) = self._get_resp('promote', volume['id'])
msg = ("request: %s\nresult: %s" % (req, res))
self.assertEqual(400, res.status_int, msg)
def test_promote_volume_not_replicated_xml(self):
volume = tests_utils.create_volume(
self.ctxt,
**self.volume_params)
(req, res) = self._get_resp('promote', volume['id'], xml=True)
msg = ("request: %s\nresult: %s" % (req, res))
self.assertEqual(400, res.status_int, msg)
@mock.patch('cinder.volume.rpcapi.VolumeAPI.promote_replica')
def test_promote_replication_volume_status(self,
_rpcapi_promote):
for status in ['error', 'in-use']:
volume = tests_utils.create_volume(self.ctxt,
status = status,
replication_status = 'active',
**self.volume_params)
(req, res) = self._get_resp('promote', volume['id'])
msg = ("request: %s\nresult: %s" % (req, res))
self.assertEqual(400, res.status_int, msg)
for status in ['available']:
volume = tests_utils.create_volume(self.ctxt,
status = status,
replication_status = 'active',
**self.volume_params)
(req, res) = self._get_resp('promote', volume['id'])
msg = ("request: %s\nresult: %s" % (req, res))
self.assertEqual(202, res.status_int, msg)
@mock.patch('cinder.volume.rpcapi.VolumeAPI.promote_replica')
def test_promote_replication_volume_status_xml(self,
_rpcapi_promote):
for status in ['error', 'in-use']:
volume = tests_utils.create_volume(self.ctxt,
status = status,
replication_status = 'active',
**self.volume_params)
(req, res) = self._get_resp('promote', volume['id'], xml=True)
msg = ("request: %s\nresult: %s" % (req, res))
self.assertEqual(400, res.status_int, msg)
for status in ['available']:
volume = tests_utils.create_volume(self.ctxt,
status = status,
replication_status = 'active',
**self.volume_params)
(req, res) = self._get_resp('promote', volume['id'], xml=True)
msg = ("request: %s\nresult: %s" % (req, res))
self.assertEqual(202, res.status_int, msg)
@mock.patch('cinder.volume.rpcapi.VolumeAPI.promote_replica')
def test_promote_replication_replication_status(self,
_rpcapi_promote):
for status in ['error', 'copying', 'inactive']:
volume = tests_utils.create_volume(self.ctxt,
status = 'available',
replication_status = status,
**self.volume_params)
(req, res) = self._get_resp('promote', volume['id'])
msg = ("request: %s\nresult: %s" % (req, res))
self.assertEqual(400, res.status_int, msg)
for status in ['active', 'active-stopped']:
volume = tests_utils.create_volume(self.ctxt,
status = 'available',
replication_status = status,
**self.volume_params)
(req, res) = self._get_resp('promote', volume['id'])
msg = ("request: %s\nresult: %s" % (req, res))
self.assertEqual(202, res.status_int, msg)
@mock.patch('cinder.volume.rpcapi.VolumeAPI.promote_replica')
def test_promote_replication_replication_status_xml(self,
_rpcapi_promote):
for status in ['error', 'copying', 'inactive']:
volume = tests_utils.create_volume(self.ctxt,
status = 'available',
replication_status = status,
**self.volume_params)
(req, res) = self._get_resp('promote', volume['id'], xml=True)
msg = ("request: %s\nresult: %s" % (req, res))
self.assertEqual(400, res.status_int, msg)
for status in ['active', 'active-stopped']:
volume = tests_utils.create_volume(self.ctxt,
status = 'available',
replication_status = status,
**self.volume_params)
(req, res) = self._get_resp('promote', volume['id'], xml=True)
msg = ("request: %s\nresult: %s" % (req, res))
self.assertEqual(202, res.status_int, msg)
def test_reenable_bad_id(self):
(req, res) = self._get_resp('reenable', 'fake')
msg = ("request: %s\nresult: %s" % (req, res))
self.assertEqual(404, res.status_int, msg)
def test_reenable_bad_id_xml(self):
(req, res) = self._get_resp('reenable', 'fake', xml=True)
msg = ("request: %s\nresult: %s" % (req, res))
self.assertEqual(404, res.status_int, msg)
def test_reenable_volume_not_replicated(self):
volume = tests_utils.create_volume(
self.ctxt,
**self.volume_params)
(req, res) = self._get_resp('reenable', volume['id'])
msg = ("request: %s\nresult: %s" % (req, res))
self.assertEqual(400, res.status_int, msg)
def test_reenable_volume_not_replicated_xml(self):
volume = tests_utils.create_volume(
self.ctxt,
**self.volume_params)
(req, res) = self._get_resp('reenable', volume['id'], xml=True)
msg = ("request: %s\nresult: %s" % (req, res))
self.assertEqual(400, res.status_int, msg)
@mock.patch('cinder.volume.rpcapi.VolumeAPI.reenable_replication')
def test_reenable_replication_replication_status(self,
_rpcapi_promote):
for status in ['active', 'copying']:
volume = tests_utils.create_volume(self.ctxt,
status = 'available',
replication_status = status,
**self.volume_params)
(req, res) = self._get_resp('reenable', volume['id'])
msg = ("request: %s\nresult: %s" % (req, res))
self.assertEqual(400, res.status_int, msg)
for status in ['inactive', 'active-stopped', 'error']:
volume = tests_utils.create_volume(self.ctxt,
status = 'available',
replication_status = status,
**self.volume_params)
(req, res) = self._get_resp('reenable', volume['id'])
msg = ("request: %s\nresult: %s" % (req, res))
self.assertEqual(202, res.status_int, msg)
@mock.patch('cinder.volume.rpcapi.VolumeAPI.reenable_replication')
def test_reenable_replication_replication_status_xml(self,
_rpcapi_promote):
for status in ['active', 'copying']:
volume = tests_utils.create_volume(self.ctxt,
status = 'available',
replication_status = status,
**self.volume_params)
(req, res) = self._get_resp('reenable', volume['id'], xml=True)
msg = ("request: %s\nresult: %s" % (req, res))
self.assertEqual(400, res.status_int, msg)
for status in ['inactive', 'active-stopped', 'error']:
volume = tests_utils.create_volume(self.ctxt,
status = 'available',
replication_status = status,
**self.volume_params)
(req, res) = self._get_resp('reenable', volume['id'], xml=True)
msg = ("request: %s\nresult: %s" % (req, res))
self.assertEqual(202, res.status_int, msg)
| 47.206478
| 78
| 0.513722
|
import json
import mock
from oslo_config import cfg
import webob
from cinder import context
from cinder import test
from cinder.tests.unit.api import fakes
from cinder.tests.unit import utils as tests_utils
CONF = cfg.CONF
def app():
api = fakes.router.APIRouter()
mapper = fakes.urlmap.URLMap()
mapper['/v2'] = api
return mapper
class VolumeReplicationAPITestCase(test.TestCase):
def setUp(self):
super(VolumeReplicationAPITestCase, self).setUp()
self.ctxt = context.RequestContext('admin', 'fake', True)
self.volume_params = {
'host': CONF.host,
'size': 1}
def _get_resp(self, operation, volume_id, xml=False):
req = webob.Request.blank('/v2/fake/volumes/%s/action' % volume_id)
req.method = 'POST'
if xml:
body = '<os-%s-replica/>' % operation
req.headers['Content-Type'] = 'application/xml'
req.headers['Accept'] = 'application/xml'
req.body = body
else:
body = {'os-%s-replica' % operation: ''}
req.headers['Content-Type'] = 'application/json'
req.body = json.dumps(body)
req.environ['cinder.context'] = context.RequestContext('admin',
'fake',
True)
res = req.get_response(app())
return req, res
def test_promote_bad_id(self):
(req, res) = self._get_resp('promote', 'fake')
msg = ("request: %s\nresult: %s" % (req, res))
self.assertEqual(404, res.status_int, msg)
def test_promote_bad_id_xml(self):
(req, res) = self._get_resp('promote', 'fake', xml=True)
msg = ("request: %s\nresult: %s" % (req, res))
self.assertEqual(404, res.status_int, msg)
def test_promote_volume_not_replicated(self):
volume = tests_utils.create_volume(
self.ctxt,
**self.volume_params)
(req, res) = self._get_resp('promote', volume['id'])
msg = ("request: %s\nresult: %s" % (req, res))
self.assertEqual(400, res.status_int, msg)
def test_promote_volume_not_replicated_xml(self):
volume = tests_utils.create_volume(
self.ctxt,
**self.volume_params)
(req, res) = self._get_resp('promote', volume['id'], xml=True)
msg = ("request: %s\nresult: %s" % (req, res))
self.assertEqual(400, res.status_int, msg)
@mock.patch('cinder.volume.rpcapi.VolumeAPI.promote_replica')
def test_promote_replication_volume_status(self,
_rpcapi_promote):
for status in ['error', 'in-use']:
volume = tests_utils.create_volume(self.ctxt,
status = status,
replication_status = 'active',
**self.volume_params)
(req, res) = self._get_resp('promote', volume['id'])
msg = ("request: %s\nresult: %s" % (req, res))
self.assertEqual(400, res.status_int, msg)
for status in ['available']:
volume = tests_utils.create_volume(self.ctxt,
status = status,
replication_status = 'active',
**self.volume_params)
(req, res) = self._get_resp('promote', volume['id'])
msg = ("request: %s\nresult: %s" % (req, res))
self.assertEqual(202, res.status_int, msg)
@mock.patch('cinder.volume.rpcapi.VolumeAPI.promote_replica')
def test_promote_replication_volume_status_xml(self,
_rpcapi_promote):
for status in ['error', 'in-use']:
volume = tests_utils.create_volume(self.ctxt,
status = status,
replication_status = 'active',
**self.volume_params)
(req, res) = self._get_resp('promote', volume['id'], xml=True)
msg = ("request: %s\nresult: %s" % (req, res))
self.assertEqual(400, res.status_int, msg)
for status in ['available']:
volume = tests_utils.create_volume(self.ctxt,
status = status,
replication_status = 'active',
**self.volume_params)
(req, res) = self._get_resp('promote', volume['id'], xml=True)
msg = ("request: %s\nresult: %s" % (req, res))
self.assertEqual(202, res.status_int, msg)
@mock.patch('cinder.volume.rpcapi.VolumeAPI.promote_replica')
def test_promote_replication_replication_status(self,
_rpcapi_promote):
for status in ['error', 'copying', 'inactive']:
volume = tests_utils.create_volume(self.ctxt,
status = 'available',
replication_status = status,
**self.volume_params)
(req, res) = self._get_resp('promote', volume['id'])
msg = ("request: %s\nresult: %s" % (req, res))
self.assertEqual(400, res.status_int, msg)
for status in ['active', 'active-stopped']:
volume = tests_utils.create_volume(self.ctxt,
status = 'available',
replication_status = status,
**self.volume_params)
(req, res) = self._get_resp('promote', volume['id'])
msg = ("request: %s\nresult: %s" % (req, res))
self.assertEqual(202, res.status_int, msg)
@mock.patch('cinder.volume.rpcapi.VolumeAPI.promote_replica')
def test_promote_replication_replication_status_xml(self,
_rpcapi_promote):
for status in ['error', 'copying', 'inactive']:
volume = tests_utils.create_volume(self.ctxt,
status = 'available',
replication_status = status,
**self.volume_params)
(req, res) = self._get_resp('promote', volume['id'], xml=True)
msg = ("request: %s\nresult: %s" % (req, res))
self.assertEqual(400, res.status_int, msg)
for status in ['active', 'active-stopped']:
volume = tests_utils.create_volume(self.ctxt,
status = 'available',
replication_status = status,
**self.volume_params)
(req, res) = self._get_resp('promote', volume['id'], xml=True)
msg = ("request: %s\nresult: %s" % (req, res))
self.assertEqual(202, res.status_int, msg)
def test_reenable_bad_id(self):
(req, res) = self._get_resp('reenable', 'fake')
msg = ("request: %s\nresult: %s" % (req, res))
self.assertEqual(404, res.status_int, msg)
def test_reenable_bad_id_xml(self):
(req, res) = self._get_resp('reenable', 'fake', xml=True)
msg = ("request: %s\nresult: %s" % (req, res))
self.assertEqual(404, res.status_int, msg)
def test_reenable_volume_not_replicated(self):
volume = tests_utils.create_volume(
self.ctxt,
**self.volume_params)
(req, res) = self._get_resp('reenable', volume['id'])
msg = ("request: %s\nresult: %s" % (req, res))
self.assertEqual(400, res.status_int, msg)
def test_reenable_volume_not_replicated_xml(self):
volume = tests_utils.create_volume(
self.ctxt,
**self.volume_params)
(req, res) = self._get_resp('reenable', volume['id'], xml=True)
msg = ("request: %s\nresult: %s" % (req, res))
self.assertEqual(400, res.status_int, msg)
@mock.patch('cinder.volume.rpcapi.VolumeAPI.reenable_replication')
def test_reenable_replication_replication_status(self,
_rpcapi_promote):
for status in ['active', 'copying']:
volume = tests_utils.create_volume(self.ctxt,
status = 'available',
replication_status = status,
**self.volume_params)
(req, res) = self._get_resp('reenable', volume['id'])
msg = ("request: %s\nresult: %s" % (req, res))
self.assertEqual(400, res.status_int, msg)
for status in ['inactive', 'active-stopped', 'error']:
volume = tests_utils.create_volume(self.ctxt,
status = 'available',
replication_status = status,
**self.volume_params)
(req, res) = self._get_resp('reenable', volume['id'])
msg = ("request: %s\nresult: %s" % (req, res))
self.assertEqual(202, res.status_int, msg)
@mock.patch('cinder.volume.rpcapi.VolumeAPI.reenable_replication')
def test_reenable_replication_replication_status_xml(self,
_rpcapi_promote):
for status in ['active', 'copying']:
volume = tests_utils.create_volume(self.ctxt,
status = 'available',
replication_status = status,
**self.volume_params)
(req, res) = self._get_resp('reenable', volume['id'], xml=True)
msg = ("request: %s\nresult: %s" % (req, res))
self.assertEqual(400, res.status_int, msg)
for status in ['inactive', 'active-stopped', 'error']:
volume = tests_utils.create_volume(self.ctxt,
status = 'available',
replication_status = status,
**self.volume_params)
(req, res) = self._get_resp('reenable', volume['id'], xml=True)
msg = ("request: %s\nresult: %s" % (req, res))
self.assertEqual(202, res.status_int, msg)
| true
| true
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.