code stringlengths 2k 1.04M | repo_path stringlengths 5 517 | parsed_code stringlengths 0 1.04M | quality_prob float64 0.02 0.95 | learning_prob float64 0.02 0.93 |
|---|---|---|---|---|
from flask import Blueprint, jsonify, session, request, redirect, url_for, render_template
from flask_dance.consumer import OAuth2ConsumerBlueprint
import logging
import json
import oauthlib
import datetime
import traceback
import urllib.parse
from datetime import timezone
from activity.activity import activity
from config import Config
import os
log = logging.getLogger(__name__)
conf = Config().data
client_id = conf['oauth']['client_id']
client_secret = conf['oauth']['client_secret']
oauth_url = conf['oauth']['url']
oauth_realm = conf['oauth']['realm']
hub_redirect_url = conf['hub_redirect_url']
selfserve = OAuth2ConsumerBlueprint(
"kc", 'selfserve',
client_id=client_id,
client_secret=client_secret,
base_url="%s/auth/realms/%s/protocol/openid-connect/" % (oauth_url, oauth_realm),
token_url="%s/auth/realms/%s/protocol/openid-connect/token" % (oauth_url, oauth_realm),
authorization_url="%s/auth/realms/%s/protocol/openid-connect/auth" % (oauth_url, oauth_realm),
redirect_to="kc._selfserve"
)
@selfserve.route("/logout")
def logout():
session.clear()
return redirect(url_for("kc.login"))
@selfserve.route("/")
def _selfserve():
try:
if not selfserve.session.authorized:
return redirect(url_for("kc.login"))
resp = selfserve.session.get("/auth/realms/%s/protocol/openid-connect/userinfo" % oauth_realm)
assert resp.ok
js = resp.json()
if 'groups' in js:
groups = js['groups']
else:
groups = []
session['groups'] = groups
session['policy'] = resp.json()['policy']
session['username'] = resp.json()['preferred_username']
session['jwt_info'] = json.dumps(resp.json(), indent=4, sort_keys=True)
activity ('access', '', '', session['username'], True, "Access Granted")
return redirect(url_for("kc.main"))
except oauthlib.oauth2.rfc6749.errors.TokenExpiredError as ex:
return redirect(url_for("kc.login"))
@selfserve.route("/main")
def main():
if not selfserve.session.authorized:
return redirect(url_for("kc.login"))
if not 'groups' in session:
return render_template('error.html', message = "Access Denied", logout_url=logout_url())
message = None
if len(session['groups']) == 0:
message = "You currently do not have any projects assigned to your account. Contact your project administrator to get your account added to the project."
is_project_assigned = session['policy'] != 'no-access'
return render_template('selfserve/index.html', hub_redirect_url=hub_redirect_url, is_project_assigned=is_project_assigned, message=message, logout_url=logout_url(), jwt_info=session['jwt_info'], username=session['username'], tab={"registration":"show active"})
@selfserve.route('/groups',
methods=['GET'], strict_slashes=False)
def view_groups() -> object:
if not 'groups' in session:
return render_template('error.html', message = "Access Denied")
return json.dumps(session['groups'])
def do_render_template(**args):
if 'repository' in args['data']:
team = get_sae_project(session['groups'])
actor = session['username']
activity (args['action'], args['data']['repository'], team, actor, args['success'], args['message'])
linked_repos = get_linked_repos()
return render_template('index.html', **args, repo_list=linked_repos, unlinked_repo_list=get_unlinked_repos(), noshares_repo_list=get_noshares_repos(linked_repos), groups=session['groups'], project=get_sae_project(session['groups']))
def logout_url():
return "%s/auth/realms/%s/protocol/openid-connect/logout?%s" % (oauth_url, oauth_realm, urllib.parse.urlencode({'redirect_uri':url_for("kc.logout", _external=True)}) ) | project-api/server/routes/selfserve.py | from flask import Blueprint, jsonify, session, request, redirect, url_for, render_template
from flask_dance.consumer import OAuth2ConsumerBlueprint
import logging
import json
import oauthlib
import datetime
import traceback
import urllib.parse
from datetime import timezone
from activity.activity import activity
from config import Config
import os
log = logging.getLogger(__name__)
conf = Config().data
client_id = conf['oauth']['client_id']
client_secret = conf['oauth']['client_secret']
oauth_url = conf['oauth']['url']
oauth_realm = conf['oauth']['realm']
hub_redirect_url = conf['hub_redirect_url']
selfserve = OAuth2ConsumerBlueprint(
"kc", 'selfserve',
client_id=client_id,
client_secret=client_secret,
base_url="%s/auth/realms/%s/protocol/openid-connect/" % (oauth_url, oauth_realm),
token_url="%s/auth/realms/%s/protocol/openid-connect/token" % (oauth_url, oauth_realm),
authorization_url="%s/auth/realms/%s/protocol/openid-connect/auth" % (oauth_url, oauth_realm),
redirect_to="kc._selfserve"
)
@selfserve.route("/logout")
def logout():
session.clear()
return redirect(url_for("kc.login"))
@selfserve.route("/")
def _selfserve():
try:
if not selfserve.session.authorized:
return redirect(url_for("kc.login"))
resp = selfserve.session.get("/auth/realms/%s/protocol/openid-connect/userinfo" % oauth_realm)
assert resp.ok
js = resp.json()
if 'groups' in js:
groups = js['groups']
else:
groups = []
session['groups'] = groups
session['policy'] = resp.json()['policy']
session['username'] = resp.json()['preferred_username']
session['jwt_info'] = json.dumps(resp.json(), indent=4, sort_keys=True)
activity ('access', '', '', session['username'], True, "Access Granted")
return redirect(url_for("kc.main"))
except oauthlib.oauth2.rfc6749.errors.TokenExpiredError as ex:
return redirect(url_for("kc.login"))
@selfserve.route("/main")
def main():
if not selfserve.session.authorized:
return redirect(url_for("kc.login"))
if not 'groups' in session:
return render_template('error.html', message = "Access Denied", logout_url=logout_url())
message = None
if len(session['groups']) == 0:
message = "You currently do not have any projects assigned to your account. Contact your project administrator to get your account added to the project."
is_project_assigned = session['policy'] != 'no-access'
return render_template('selfserve/index.html', hub_redirect_url=hub_redirect_url, is_project_assigned=is_project_assigned, message=message, logout_url=logout_url(), jwt_info=session['jwt_info'], username=session['username'], tab={"registration":"show active"})
@selfserve.route('/groups',
methods=['GET'], strict_slashes=False)
def view_groups() -> object:
if not 'groups' in session:
return render_template('error.html', message = "Access Denied")
return json.dumps(session['groups'])
def do_render_template(**args):
if 'repository' in args['data']:
team = get_sae_project(session['groups'])
actor = session['username']
activity (args['action'], args['data']['repository'], team, actor, args['success'], args['message'])
linked_repos = get_linked_repos()
return render_template('index.html', **args, repo_list=linked_repos, unlinked_repo_list=get_unlinked_repos(), noshares_repo_list=get_noshares_repos(linked_repos), groups=session['groups'], project=get_sae_project(session['groups']))
def logout_url():
return "%s/auth/realms/%s/protocol/openid-connect/logout?%s" % (oauth_url, oauth_realm, urllib.parse.urlencode({'redirect_uri':url_for("kc.logout", _external=True)}) ) | 0.290176 | 0.031232 |
from django.db import models
from django.utils.translation import gettext_lazy as _
from django.contrib.auth.models import User
from django.utils.crypto import get_random_string
from todo.models import Lst
from hier.models import Lst
app_name = 'store'
#----------------------------------
# deprecated
class Group(models.Model):
user = models.ForeignKey(User, on_delete=models.CASCADE, verbose_name=_('user'))
code = models.CharField(_('code'), max_length=100, blank = True)
name = models.CharField(_('name'), max_length=300)
uuid = models.CharField(_('UUID'), max_length=100, blank = True)
creation = models.DateTimeField(_('creation time'), null = True, auto_now_add = True)
last_mod = models.DateTimeField(_('last modification time'), null = True, auto_now = True)
#----------------------------------
class Entry(models.Model):
user = models.ForeignKey(User, on_delete=models.CASCADE, verbose_name=_('user'))
title = models.CharField(_('title'), max_length=500)
username = models.CharField(_('username'), max_length=150, blank=True)
value = models.CharField(_('value'), max_length=128)
url = models.CharField(_('URL'), max_length=2000, blank = True)
notes = models.TextField(_('notes'), blank = True, null = True)
uuid = models.CharField(_('UUID'), max_length=100, blank = True)
created = models.DateTimeField(_('creation time'), auto_now_add = True)
last_mod = models.DateTimeField(_('last modification time'), blank = True, auto_now = True, null = True)
# group - deprecated
group = models.ForeignKey(Group, verbose_name = _('group'), on_delete = models.CASCADE, null = True)
actual = models.IntegerField(_('actual'), default = 1)
categories = models.CharField(_('categories'), max_length = 2000, blank = True, default = '', null = True)
params = models.IntegerField(_('generator parameters used'), default = 0, null = True)
lst = models.ForeignKey(Lst, on_delete = models.CASCADE, verbose_name = _('list'), blank = True, null = True)
@classmethod
def get_new_value(cls, user):
if (len(Params.objects.filter(user = user.id)) > 0):
params = Params.objects.filter(user = user.id)[0]
else:
params = Params.objects.create(user = user)
allowed_chars = ''
if params.uc:
allowed_chars = allowed_chars + 'ABCDEFGHJKLMNPQRSTUVWXYZ'
if not params.ac:
allowed_chars = allowed_chars + 'IO'
if params.lc:
allowed_chars = allowed_chars + 'abcdefghjkmnpqrstuvwxyz'
if not params.ac:
allowed_chars = allowed_chars + 'io'
if params.dg:
allowed_chars = allowed_chars + '23456789'
if not params.ac:
allowed_chars = allowed_chars + '10'
if params.sp:
allowed_chars = allowed_chars + '!@#$%^&*=+'
if params.br:
allowed_chars = allowed_chars + '()[]{}<>'
if params.mi:
allowed_chars = allowed_chars + '-'
if params.ul:
allowed_chars = allowed_chars + '_'
if (allowed_chars == ''):
allowed_chars = 'abcdefghjkmnpqrstuvwxyzABCDEFGHJKLMNPQRSTUVWXYZ23456789!@#$%^&*(-_=+)'
ret_params = 0
if params.uc:
ret_params += 1
if params.lc:
ret_params += 2
if params.dg:
ret_params += 4
if params.sp:
ret_params += 8
if params.br:
ret_params += 16
if params.mi:
ret_params += 32
if params.ul:
ret_params += 64
if params.ac:
ret_params += 128
ret_value = get_random_string(params.ln, allowed_chars)
return ret_params, params.un, ret_value
#----------------------------------
# deprecated
class History(models.Model):
node = models.ForeignKey(Entry, verbose_name = _('node'), on_delete = models.CASCADE, related_name='node')
data = models.ForeignKey(Entry, verbose_name = _('entry'), on_delete = models.CASCADE, related_name='data')
#----------------------------------
class Params(models.Model):
user = models.ForeignKey(User, on_delete=models.CASCADE, verbose_name=_('user'), related_name='store_user')
ln = models.IntegerField(_('length').capitalize(), default = 20)
uc = models.BooleanField(_('upper case').capitalize(), default = True)
lc = models.BooleanField(_('lower case').capitalize(), default = True)
dg = models.BooleanField(_('digits').capitalize(), default = True)
sp = models.BooleanField(_('special symbols').capitalize(), default = True)
br = models.BooleanField(_('brackets').capitalize(), default = True)
mi = models.BooleanField(_('minus').capitalize(), default = True)
ul = models.BooleanField(_('underline').capitalize(), default = True)
ac = models.BooleanField(_('avoid confusion').capitalize(), default = True)
un = models.CharField(_('default username'), max_length=160, blank=True, default='') | store/models.py | from django.db import models
from django.utils.translation import gettext_lazy as _
from django.contrib.auth.models import User
from django.utils.crypto import get_random_string
from todo.models import Lst
from hier.models import Lst
app_name = 'store'
#----------------------------------
# deprecated
class Group(models.Model):
user = models.ForeignKey(User, on_delete=models.CASCADE, verbose_name=_('user'))
code = models.CharField(_('code'), max_length=100, blank = True)
name = models.CharField(_('name'), max_length=300)
uuid = models.CharField(_('UUID'), max_length=100, blank = True)
creation = models.DateTimeField(_('creation time'), null = True, auto_now_add = True)
last_mod = models.DateTimeField(_('last modification time'), null = True, auto_now = True)
#----------------------------------
class Entry(models.Model):
user = models.ForeignKey(User, on_delete=models.CASCADE, verbose_name=_('user'))
title = models.CharField(_('title'), max_length=500)
username = models.CharField(_('username'), max_length=150, blank=True)
value = models.CharField(_('value'), max_length=128)
url = models.CharField(_('URL'), max_length=2000, blank = True)
notes = models.TextField(_('notes'), blank = True, null = True)
uuid = models.CharField(_('UUID'), max_length=100, blank = True)
created = models.DateTimeField(_('creation time'), auto_now_add = True)
last_mod = models.DateTimeField(_('last modification time'), blank = True, auto_now = True, null = True)
# group - deprecated
group = models.ForeignKey(Group, verbose_name = _('group'), on_delete = models.CASCADE, null = True)
actual = models.IntegerField(_('actual'), default = 1)
categories = models.CharField(_('categories'), max_length = 2000, blank = True, default = '', null = True)
params = models.IntegerField(_('generator parameters used'), default = 0, null = True)
lst = models.ForeignKey(Lst, on_delete = models.CASCADE, verbose_name = _('list'), blank = True, null = True)
@classmethod
def get_new_value(cls, user):
if (len(Params.objects.filter(user = user.id)) > 0):
params = Params.objects.filter(user = user.id)[0]
else:
params = Params.objects.create(user = user)
allowed_chars = ''
if params.uc:
allowed_chars = allowed_chars + 'ABCDEFGHJKLMNPQRSTUVWXYZ'
if not params.ac:
allowed_chars = allowed_chars + 'IO'
if params.lc:
allowed_chars = allowed_chars + 'abcdefghjkmnpqrstuvwxyz'
if not params.ac:
allowed_chars = allowed_chars + 'io'
if params.dg:
allowed_chars = allowed_chars + '23456789'
if not params.ac:
allowed_chars = allowed_chars + '10'
if params.sp:
allowed_chars = allowed_chars + '!@#$%^&*=+'
if params.br:
allowed_chars = allowed_chars + '()[]{}<>'
if params.mi:
allowed_chars = allowed_chars + '-'
if params.ul:
allowed_chars = allowed_chars + '_'
if (allowed_chars == ''):
allowed_chars = 'abcdefghjkmnpqrstuvwxyzABCDEFGHJKLMNPQRSTUVWXYZ23456789!@#$%^&*(-_=+)'
ret_params = 0
if params.uc:
ret_params += 1
if params.lc:
ret_params += 2
if params.dg:
ret_params += 4
if params.sp:
ret_params += 8
if params.br:
ret_params += 16
if params.mi:
ret_params += 32
if params.ul:
ret_params += 64
if params.ac:
ret_params += 128
ret_value = get_random_string(params.ln, allowed_chars)
return ret_params, params.un, ret_value
#----------------------------------
# deprecated
class History(models.Model):
node = models.ForeignKey(Entry, verbose_name = _('node'), on_delete = models.CASCADE, related_name='node')
data = models.ForeignKey(Entry, verbose_name = _('entry'), on_delete = models.CASCADE, related_name='data')
#----------------------------------
class Params(models.Model):
user = models.ForeignKey(User, on_delete=models.CASCADE, verbose_name=_('user'), related_name='store_user')
ln = models.IntegerField(_('length').capitalize(), default = 20)
uc = models.BooleanField(_('upper case').capitalize(), default = True)
lc = models.BooleanField(_('lower case').capitalize(), default = True)
dg = models.BooleanField(_('digits').capitalize(), default = True)
sp = models.BooleanField(_('special symbols').capitalize(), default = True)
br = models.BooleanField(_('brackets').capitalize(), default = True)
mi = models.BooleanField(_('minus').capitalize(), default = True)
ul = models.BooleanField(_('underline').capitalize(), default = True)
ac = models.BooleanField(_('avoid confusion').capitalize(), default = True)
un = models.CharField(_('default username'), max_length=160, blank=True, default='') | 0.326271 | 0.055592 |
CHARMAP = {
#0x0000:"亜", # dqviewer では「亜」だが、うまくいかない
0x0000:" ",
0x0001:"園",
0x0002:"馬",
0x0003:"平",
0x0004:"閉",
0x0005:"辺",
0x0006:"飛",
0x0007:"匹",
0x0008:"ぷ",
0x0009:"亡",
0x000A:"不",
0x000B:"風",
0x000C:"聞",
0x000D:"囲",
0x000E:"因",
0x000F:"院",
0x0010:"下",
0x0011:"化",
0x0012:"可",
0x0013:"果",
0x0014:"回",
0x0015:"灰",
0x0016:"開",
0x0017:"階",
0x0018:"間",
0x0019:"ガ",
0x001A:"グ",
0x001B:"サ",
0x001C:"ネ",
0x001D:"ピ",
0x001E:"ホ",
0x001F:"ヤ",
0x0020:"ヴ",
0x0021:"兄",
0x0022:"穴",
0x0023:"元",
0x0024:"幻",
0x0025:"器",
0x0026:"帰",
0x0027:"記",
0x0028:"起",
0x0029:"局",
0x002A:"故",
0x002B:"語",
0x002C:"国",
0x002D:"困",
0x002E:"苦",
0x002F:"具",
0x0030:"空",
0x0031:"面",
0x0032:"恩",
0x0033:"両",
0x0034:"再",
0x0035:"山",
0x0036:"世",
0x0037:"星",
0x0038:"生",
0x0039:"声",
0x003A:"石",
0x003B:"仕",
0x003C:"士",
0x003D:"思",
0x003E:"指",
0x003F:"止",
0x0040:"死",
0x0041:"車",
0x0042:"囚",
0x0043:"上",
0x0044:"冗",
0x0045:"心",
0x0046:"図",
0x0047:"天",
0x0048:"田",
0x0049:"同",
0x004A:"要",
0x004B:"愛",
0x004C:"悪",
0x004D:"安",
0x004E:"暗",
0x004F:"案",
0x0050:"闇",
0x0051:"影",
0x0052:"栄",
0x0053:"永",
0x0054:"英",
0x0055:"宴",
0x0056:"演",
0x0057:"炎",
0x0058:"煙",
0x0059:"遠",
0x005A:"派",
0x005B:"破",
0x005C:"敗",
0x005D:"杯",
0x005E:"背",
0x005F:"配",
0x0060:"倍",
0x0061:"売",
0x0062:"漠",
0x0063:"箱",
0x0064:"畑",
0x0065:"発",
0x0066:"抜",
0x0067:"判",
0x0068:"半",
0x0069:"反",
0x006A:"帆",
0x006B:"板",
0x006C:"犯",
0x006D:"晩",
0x006E:"番",
0x006F:"兵",
0x0070:"別",
0x0071:"変",
0x0072:"返",
0x0073:"便",
0x0074:"勉",
0x0075:"妃",
0x0076:"彼",
0x0077:"悲",
0x0078:"扉",
0x0079:"疲",
0x007A:"秘",
0x007B:"非",
0x007C:"備",
0x007D:"美",
0x007E:"必",
0x007F:"姫",
0x0080:"百",
0x0081:"氷",
0x0082:"表",
0x0083:"評",
0x0084:"描",
0x0085:"病",
0x0086:"品",
0x0087:"貧",
0x0088:"歩",
0x0089:"募",
0x008A:"墓",
0x008B:"母",
0x008C:"報",
0x008D:"宝",
0x008E:"抱",
0x008F:"放",
0x0090:"方",
0x0091:"法",
0x0092:"訪",
0x0093:"豊",
0x0094:"坊",
0x0095:"忘",
0x0096:"房",
0x0097:"望",
0x0098:"防",
0x0099:"北",
0x009A:"本",
0x009B:"付",
0x009C:"夫",
0x009D:"婦",
0x009E:"敷",
0x009F:"普",
0x00A0:"浮",
0x00A1:"父",
0x00A2:"負",
0x00A3:"附",
0x00A4:"武",
0x00A5:"舞",
0x00A6:"部",
0x00A7:"封",
0x00A8:"復",
0x00A9:"服",
0x00AA:"福",
0x00AB:"腹",
0x00AC:"払",
0x00AD:"物",
0x00AE:"分",
0x00AF:"文",
0x00B0:"以",
0x00B1:"位",
0x00B2:"偉",
0x00B3:"意",
0x00B4:"移",
0x00B5:"違",
0x00B6:"井",
0x00B7:"育",
0x00B8:"印",
0x00B9:"飲",
0x00BA:"何",
0x00BB:"夏",
0x00BC:"嫁",
0x00BD:"家",
0x00BE:"歌",
0x00BF:"火",
0x00C0:"花",
0x00C1:"苛",
0x00C2:"荷",
0x00C3:"華",
0x00C4:"我",
0x00C5:"会",
0x00C6:"解",
0x00C7:"快",
0x00C8:"怪",
0x00C9:"悔",
0x00CA:"改",
0x00CB:"海",
0x00CC:"界",
0x00CD:"皆",
0x00CE:"絵",
0x00CF:"外",
0x00D0:"格",
0x00D1:"確",
0x00D2:"覚",
0x00D3:"学",
0x00D4:"楽",
0x00D5:"恰",
0x00D6:"活",
0x00D7:"寒",
0x00D8:"完",
0x00D9:"官",
0x00DA:"感",
0x00DB:"換",
0x00DC:"汗",
0x00DD:"甘",
0x00DE:"看",
0x00DF:"肝",
0x00E0:"館",
0x00E1:"丸",
0x00E2:"岩",
0x00E3:"顔",
0x00E4:"願",
0x00E5:"刑",
0x00E6:"形",
0x00E7:"恵",
0x00E8:"敬",
0x00E9:"景",
0x00EA:"系",
0x00EB:"経",
0x00EC:"継",
0x00ED:"計",
0x00EE:"軽",
0x00EF:"決",
0x00F0:"結",
0x00F1:"件",
0x00F2:"剣",
0x00F3:"堅",
0x00F4:"建",
0x00F5:"権",
0x00F6:"犬",
0x00F7:"研",
0x00F8:"見",
0x00F9:"険",
0x00FA:"験",
0x00FB:"原",
0x00FC:"現",
0x00FD:"言",
0x00FE:"限",
0x00FF:"危",
0x0100:"奇",
0x0101:"寄",
0x0102:"希",
0x0103:"忌",
0x0104:"期",
0x0105:"機",
0x0106:"気",
0x0107:"祈",
0x0108:"季",
0x0109:"貴",
0x010A:"儀",
0x010B:"技",
0x010C:"犠",
0x010D:"議",
0x010E:"客",
0x010F:"久",
0x0110:"休",
0x0111:"急",
0x0112:"救",
0x0113:"求",
0x0114:"泣",
0x0115:"究",
0x0116:"去",
0x0117:"許",
0x0118:"供",
0x0119:"競",
0x011A:"共",
0x011B:"叫",
0x011C:"強",
0x011D:"教",
0x011E:"橋",
0x011F:"胸",
0x0120:"鏡",
0x0121:"業",
0x0122:"曲",
0x0123:"禁",
0x0124:"筋",
0x0125:"近",
0x0126:"金",
0x0127:"古",
0x0128:"呼",
0x0129:"庫",
0x012A:"湖",
0x012B:"雇",
0x012C:"後",
0x012D:"御",
0x012E:"護",
0x012F:"交",
0x0130:"光",
0x0131:"功",
0x0132:"効",
0x0133:"向",
0x0134:"好",
0x0135:"幸",
0x0136:"広",
0x0137:"抗",
0x0138:"攻",
0x0139:"港",
0x013A:"考",
0x013B:"航",
0x013C:"荒",
0x013D:"行",
0x013E:"降",
0x013F:"高",
0x0140:"合",
0x0141:"豪",
0x0142:"告",
0x0143:"酷",
0x0144:"黒",
0x0145:"獄",
0x0146:"腰",
0x0147:"骨",
0x0148:"頃",
0x0149:"今",
0x014A:"婚",
0x014B:"根",
0x014C:"句",
0x014D:"屈",
0x014E:"君",
0x014F:"訓",
0x0150:"魔",
0x0151:"埋",
0x0152:"妹",
0x0153:"枚",
0x0154:"毎",
0x0155:"末",
0x0156:"満",
0x0157:"命",
0x0158:"迷",
0x0159:"鳴",
0x015A:"味",
0x015B:"未",
0x015C:"密",
0x015D:"妙",
0x015E:"民",
0x015F:"眠",
0x0160:"戻",
0x0161:"問",
0x0162:"紋",
0x0163:"門",
0x0164:"夢",
0x0165:"無",
0x0166:"娘",
0x0167:"内",
0x0168:"謎",
0x0169:"南",
0x016A:"熱",
0x016B:"年",
0x016C:"念",
0x016D:"尼",
0x016E:"任",
0x016F:"認",
0x0170:"悩",
0x0171:"能",
0x0172:"農",
0x0173:"奥",
0x0174:"押",
0x0175:"横",
0x0176:"黄",
0x0177:"屋",
0x0178:"音",
0x0179:"来",
0x017A:"頼",
0x017B:"絡",
0x017C:"落",
0x017D:"令",
0x017E:"冷",
0x017F:"礼",
0x0180:"歴",
0x0181:"列",
0x0182:"恋",
0x0183:"練",
0x0184:"連",
0x0185:"利",
0x0186:"理",
0x0187:"陸",
0x0188:"率",
0x0189:"立",
0x018A:"流",
0x018B:"留",
0x018C:"竜",
0x018D:"旅",
0x018E:"料",
0x018F:"良",
0x0190:"量",
0x0191:"淋",
0x0192:"輪",
0x0193:"路",
0x0194:"労",
0x0195:"牢",
0x0196:"老",
0x0197:"録",
0x0198:"涙",
0x0199:"砂",
0x019A:"座",
0x019B:"最",
0x019C:"妻",
0x019D:"才",
0x019E:"細",
0x019F:"在",
0x01A0:"材",
0x01A1:"罪",
0x01A2:"財",
0x01A3:"作",
0x01A4:"昨",
0x01A5:"札",
0x01A6:"殺",
0x01A7:"参",
0x01A8:"散",
0x01A9:"産",
0x01AA:"残",
0x01AB:"征",
0x01AC:"性",
0x01AD:"成",
0x01AE:"晴",
0x01AF:"清",
0x01B0:"牲",
0x01B1:"精",
0x01B2:"聖",
0x01B3:"誓",
0x01B4:"青",
0x01B5:"静",
0x01B6:"税",
0x01B7:"昔",
0x01B8:"責",
0x01B9:"赤",
0x01BA:"切",
0x01BB:"節",
0x01BC:"説",
0x01BD:"雪",
0x01BE:"絶",
0x01BF:"先",
0x01C0:"専",
0x01C1:"川",
0x01C2:"戦",
0x01C3:"泉",
0x01C4:"潜",
0x01C5:"船",
0x01C6:"選",
0x01C7:"前",
0x01C8:"善",
0x01C9:"然",
0x01CA:"全",
0x01CB:"使",
0x01CC:"史",
0x01CD:"始",
0x01CE:"姉",
0x01CF:"姿",
0x01D0:"子",
0x01D1:"志",
0x01D2:"私",
0x01D3:"糸",
0x01D4:"紙",
0x01D5:"至",
0x01D6:"詩",
0x01D7:"試",
0x01D8:"飼",
0x01D9:"事",
0x01DA:"似",
0x01DB:"字",
0x01DC:"持",
0x01DD:"時",
0x01DE:"次",
0x01DF:"治",
0x01E0:"耳",
0x01E1:"式",
0x01E2:"失",
0x01E3:"室",
0x01E4:"実",
0x01E5:"舎",
0x01E6:"捨",
0x01E7:"者",
0x01E8:"謝",
0x01E9:"邪",
0x01EA:"若",
0x01EB:"弱",
0x01EC:"主",
0x01ED:"取",
0x01EE:"守",
0x01EF:"手",
0x01F0:"酒",
0x01F1:"受",
0x01F2:"呪",
0x01F3:"樹",
0x01F4:"収",
0x01F5:"修",
0x01F6:"拾",
0x01F7:"終",
0x01F8:"習",
0x01F9:"舟",
0x01FA:"集",
0x01FB:"住",
0x01FC:"十",
0x01FD:"重",
0x01FE:"宿",
0x01FF:"祝",
0x0200:"出",
0x0201:"春",
0x0202:"準",
0x0203:"盾",
0x0204:"純",
0x0205:"処",
0x0206:"初",
0x0207:"所",
0x0208:"暑",
0x0209:"緒",
0x020A:"書",
0x020B:"助",
0x020C:"女",
0x020D:"勝",
0x020E:"商",
0x020F:"小",
0x0210:"少",
0x0211:"承",
0x0212:"招",
0x0213:"焼",
0x0214:"照",
0x0215:"章",
0x0216:"笑",
0x0217:"粧",
0x0218:"証",
0x0219:"丈",
0x021A:"乗",
0x021B:"城",
0x021C:"場",
0x021D:"嬢",
0x021E:"情",
0x021F:"条",
0x0220:"杖",
0x0221:"状",
0x0222:"色",
0x0223:"食",
0x0224:"信",
0x0225:"寝",
0x0226:"新",
0x0227:"森",
0x0228:"深",
0x0229:"真",
0x022A:"神",
0x022B:"親",
0x022C:"身",
0x022D:"進",
0x022E:"祖",
0x022F:"素",
0x0230:"僧",
0x0231:"倉",
0x0232:"秦",
0x0233:"捜",
0x0234:"早",
0x0235:"争",
0x0236:"相",
0x0237:"草",
0x0238:"荘",
0x0239:"装",
0x023A:"走",
0x023B:"送",
0x023C:"騒",
0x023D:"像",
0x023E:"憎",
0x023F:"側",
0x0240:"即",
0x0241:"息",
0x0242:"束",
0x0243:"足",
0x0244:"族",
0x0245:"続",
0x0246:"存",
0x0247:"孫",
0x0248:"尊",
0x0249:"村",
0x024A:"吹",
0x024B:"水",
0x024C:"酔",
0x024D:"数",
0x024E:"他",
0x024F:"多",
0x0250:"太",
0x0251:"打",
0x0252:"体",
0x0253:"対",
0x0254:"待",
0x0255:"泰",
0x0256:"袋",
0x0257:"貸",
0x0258:"退",
0x0259:"代",
0x025A:"台",
0x025B:"大",
0x025C:"第",
0x025D:"題",
0x025E:"滝",
0x025F:"誰",
0x0260:"単",
0x0261:"探",
0x0262:"誕",
0x0263:"団",
0x0264:"断",
0x0265:"段",
0x0266:"男",
0x0267:"談",
0x0268:"亭",
0x0269:"帝",
0x026A:"底",
0x026B:"庭",
0x026C:"弟",
0x026D:"敵",
0x026E:"的",
0x026F:"鉄",
0x0270:"店",
0x0271:"点",
0x0272:"伝",
0x0273:"殿",
0x0274:"値",
0x0275:"知",
0x0276:"地",
0x0277:"置",
0x0278:"茶",
0x0279:"着",
0x027A:"中",
0x027B:"仲",
0x027C:"忠",
0x027D:"昼",
0x027E:"注",
0x027F:"虫",
0x0280:"兆",
0x0281:"帳",
0x0282:"張",
0x0283:"挑",
0x0284:"朝",
0x0285:"町",
0x0286:"調",
0x0287:"長",
0x0288:"頂",
0x0289:"鳥",
0x028A:"直",
0x028B:"沈",
0x028C:"渡",
0x028D:"途",
0x028E:"都",
0x028F:"度",
0x0290:"土",
0x0291:"奴",
0x0292:"倒",
0x0293:"冬",
0x0294:"凍",
0x0295:"塔",
0x0296:"島",
0x0297:"投",
0x0298:"東",
0x0299:"盗",
0x029A:"湯",
0x029B:"灯",
0x029C:"等",
0x029D:"答",
0x029E:"統",
0x029F:"逃",
0x02A0:"頭",
0x02A1:"闘",
0x02A2:"働",
0x02A3:"動",
0x02A4:"導",
0x02A5:"洞",
0x02A6:"道",
0x02A7:"得",
0x02A8:"特",
0x02A9:"毒",
0x02AA:"読",
0x02AB:"突",
0x02AC:"追",
0x02AD:"通",
0x02AE:"右",
0x02AF:"運",
0x02B0:"雲",
0x02B1:"和",
0x02B2:"話",
0x02B3:"惑",
0x02B4:"腕",
0x02B5:"夜",
0x02B6:"野",
0x02B7:"役",
0x02B8:"約",
0x02B9:"薬",
0x02BA:"予",
0x02BB:"余",
0x02BC:"与",
0x02BD:"預",
0x02BE:"幼",
0x02BF:"妖",
0x02C0:"様",
0x02C1:"溶",
0x02C2:"用",
0x02C3:"羊",
0x02C4:"葉",
0x02C5:"踊",
0x02C6:"養",
0x02C7:"欲",
0x02C8:"油",
0x02C9:"勇",
0x02CA:"友",
0x02CB:"有",
0x02CC:"由",
0x02CD:"裕",
0x02CE:"遊",
0x02CF:"雄",
0x02D0:"夕",
0x02D1:"0",
0x02D2:"2",
0x02D3:"3",
0x02D4:"4",
0x02D5:"5",
0x02D6:"6",
0x02D7:"7",
0x02D8:"8",
0x02D9:"9",
0x02DA:"$",
0x02DB:"ト",
0x02DC:"リ",
0x02DD:"1",
0x02DE:"A",
0x02DF:"H",
0x02E0:"K",
0x02E1:"Q",
0x02E2:"T",
0x02E3:"V",
0x02E4:"h",
0x02E5:"き",
0x02E6:"そ",
0x02E7:"ち",
0x02E8:"は",
0x02E9:"ま",
0x02EA:"も",
0x02EB:"ら",
0x02EC:"る",
0x02ED:"ろ",
0x02EE:"を",
0x02EF:"ん",
0x02F0:"ノ",
0x02F1:"ヒ",
0x02F2:"レ",
0x02F3:"ワ",
0x02F4:"B",
0x02F5:"E",
0x02F6:"F",
0x02F7:"L",
0x02F8:"O",
0x02F9:"P",
0x02FA:"S",
0x02FB:"b",
0x02FC:"d",
0x02FD:"う",
0x02FE:"く",
0x02FF:"さ",
0x0300:"し",
0x0301:"じ",
0x0302:"と",
0x0303:"よ",
0x0304:"り",
0x0305:"ミ",
0x0306:"ヲ",
0x0307:"C",
0x0308:"D",
0x0309:"G",
0x030A:"*",
0x030B:"メ",
0x030C:"M",
0x030D:"W",
0x030E:"X",
0x030F:"Y",
0x0310:"+",
0x0311:"ざ",
0x0312:"ぢ",
0x0313:"ど",
0x0314:"ね",
0x0315:"ひ",
0x0316:"ふ",
0x0317:"ぶ",
0x0318:"み",
0x0319:"め",
0x031A:"れ",
0x031B:"わ",
0x031C:"セ",
0x031D:"ム",
0x031E:"ル",
0x031F:"血",
0x0320:"玉",
0x0321:"口",
0x0322:"王",
0x0323:"皿",
0x0324:"正",
0x0325:"西",
0x0326:"U",
0x0327:"あ",
0x0328:"え",
0x0329:"お",
0x032A:"か",
0x032B:"け",
0x032C:"す",
0x032D:"せ",
0x032E:"た",
0x032F:"だ",
0x0330:"な",
0x0331:"べ",
0x0332:"む",
0x0333:"や",
0x0334:"ゆ",
0x0335:"ア",
0x0336:"シ",
0x0337:"ツ",
0x0338:"テ",
0x0339:"ラ",
0x033A:"日",
0x033B:"a",
0x033C:"o",
0x033D:"っ",
0x033E:"ゃ",
0x033F:"ゅ",
0x0340:"ァ",
0x0341:"c",
0x0342:"e",
0x0343:"i",
0x0344:"l",
0x0345:"j",
0x0346:"“",
0x0347:"”",
0x0348:"m",
0x0349:"n",
0x034A:"u",
0x034B:"p",
0x034C:"こ",
0x034D:"r",
0x034E:"s",
0x034F:"t",
0x0350:"々",
0x0351:"v",
0x0352:"ヨ",
0x0353:"w",
0x0354:"つ",
0x0355:"エ",
0x0356:"。",
0x0357:",",
0x0358:"・",
0x0359:"?",
0x035A:"!",
0x035B:"#",
0x035C:"@",
0x035D:"ウ",
0x035E:"カ",
0x035F:"@", # 要チェック
0x0360:"ー",
0x0361:"{",
0x0362:"}",
0x0363:"「",
0x0364:"―",
0x0365:"ヘ",
0x0366:"…",
0x0367:"<",
0x0368:">",
0x0369:"%",
0x036A:"ぺ",
0x036B:"医",
0x036C:"オ",
0x036D:"キ",
0x036E:"ケ",
0x036F:"チ",
0x0370:"ナ",
0x0371:"月",
0x0372:"巨",
0x0373:"入",
0x0374:"乙",
0x0375:"臣",
0x0376:"人",
0x0377:"&",
0x0378:"イ",
0x0379:"ク",
0x037A:"タ",
0x037B:"ド",
0x037C:"買",
0x037D:"泊",
0x037E:"罰",
0x037F:"冒",
0x0380:"員",
0x0381:"引",
0x0382:"加",
0x0383:"賀",
0x0384:"枯",
0x0385:"后",
0x0386:"名",
0x0387:"明",
0x0388:"力",
0x0389:"師",
0x038A:"消",
0x038B:"刃",
0x038C:"当",
0x038D:"届",
0x038E:"白",
0x038F:"目",
0x0390:"呂",
0x0391:"占",
0x0392:"自",
0x0393:"申",
0x0394:"ぁ",
0x0395:"ォ",
0x0396:"ッ",
0x0397:"ャ",
0x0398:"い",
0x0399:"の",
0x039A:"ハ",
0x039B:"モ",
0x039C:"ニ",
0x039D:"が",
0x039E:"ぱ",
0x039F:"ぴ",
0x03A0:"ぽ",
0x03A1:"ギ",
0x03A2:"ゲ",
0x03A3:"ゴ",
0x03A4:"ザ",
0x03A5:"ズ",
0x03A6:"ゾ",
0x03A7:"ダ",
0x03A8:"ヂ",
0x03A9:"ヅ",
0x03AA:"バ",
0x03AB:"パ",
0x03AC:"ビ",
0x03AD:"ブ",
0x03AE:"プ",
0x03AF:"ベ",
0x03B0:"ボ",
0x03B1:"ポ",
0x03B2:"温",
0x03B3:"ぎ",
0x03B4:"ぐ",
0x03B5:"ご",
0x03B6:"ず",
0x03B7:"ぞ",
0x03B8:"づ",
0x03B9:"ぬ",
0x03BA:"ば",
0x03BB:"び",
0x03BC:"ぼ",
0x03BD:"ジ",
0x03BE:"ゼ",
0x03BF:"デ",
0x03C0:"げ",
0x03C1:"ぜ",
0x03C2:"で",
0x03C3:"て",
0x03C4:"マ",
0x03C5:"ン",
0x03C6:"に",
0x03C7:"ほ",
0x03C8:"コ",
0x03C9:"ス",
0x03CA:"ソ",
0x03CB:"ヌ",
0x03CC:"フ",
0x03CD:"ロ",
0x03CE:"ょ",
0x03CF:"ィ",
0x03D0:"暮",
0x03D1:"一",
0x03D2:"ェ",
0x03D3:"ニ",
0x03D4:"ユ",
0x03D5:"ヘ",
0x03D6:"ペ",
0x03D7:"滅",
0x03D8:"猛",
0x03D9:"ュ",
0x03DA:"ョ",
} | dqutils/dq5/charlarge.py | CHARMAP = {
#0x0000:"亜", # dqviewer では「亜」だが、うまくいかない
0x0000:" ",
0x0001:"園",
0x0002:"馬",
0x0003:"平",
0x0004:"閉",
0x0005:"辺",
0x0006:"飛",
0x0007:"匹",
0x0008:"ぷ",
0x0009:"亡",
0x000A:"不",
0x000B:"風",
0x000C:"聞",
0x000D:"囲",
0x000E:"因",
0x000F:"院",
0x0010:"下",
0x0011:"化",
0x0012:"可",
0x0013:"果",
0x0014:"回",
0x0015:"灰",
0x0016:"開",
0x0017:"階",
0x0018:"間",
0x0019:"ガ",
0x001A:"グ",
0x001B:"サ",
0x001C:"ネ",
0x001D:"ピ",
0x001E:"ホ",
0x001F:"ヤ",
0x0020:"ヴ",
0x0021:"兄",
0x0022:"穴",
0x0023:"元",
0x0024:"幻",
0x0025:"器",
0x0026:"帰",
0x0027:"記",
0x0028:"起",
0x0029:"局",
0x002A:"故",
0x002B:"語",
0x002C:"国",
0x002D:"困",
0x002E:"苦",
0x002F:"具",
0x0030:"空",
0x0031:"面",
0x0032:"恩",
0x0033:"両",
0x0034:"再",
0x0035:"山",
0x0036:"世",
0x0037:"星",
0x0038:"生",
0x0039:"声",
0x003A:"石",
0x003B:"仕",
0x003C:"士",
0x003D:"思",
0x003E:"指",
0x003F:"止",
0x0040:"死",
0x0041:"車",
0x0042:"囚",
0x0043:"上",
0x0044:"冗",
0x0045:"心",
0x0046:"図",
0x0047:"天",
0x0048:"田",
0x0049:"同",
0x004A:"要",
0x004B:"愛",
0x004C:"悪",
0x004D:"安",
0x004E:"暗",
0x004F:"案",
0x0050:"闇",
0x0051:"影",
0x0052:"栄",
0x0053:"永",
0x0054:"英",
0x0055:"宴",
0x0056:"演",
0x0057:"炎",
0x0058:"煙",
0x0059:"遠",
0x005A:"派",
0x005B:"破",
0x005C:"敗",
0x005D:"杯",
0x005E:"背",
0x005F:"配",
0x0060:"倍",
0x0061:"売",
0x0062:"漠",
0x0063:"箱",
0x0064:"畑",
0x0065:"発",
0x0066:"抜",
0x0067:"判",
0x0068:"半",
0x0069:"反",
0x006A:"帆",
0x006B:"板",
0x006C:"犯",
0x006D:"晩",
0x006E:"番",
0x006F:"兵",
0x0070:"別",
0x0071:"変",
0x0072:"返",
0x0073:"便",
0x0074:"勉",
0x0075:"妃",
0x0076:"彼",
0x0077:"悲",
0x0078:"扉",
0x0079:"疲",
0x007A:"秘",
0x007B:"非",
0x007C:"備",
0x007D:"美",
0x007E:"必",
0x007F:"姫",
0x0080:"百",
0x0081:"氷",
0x0082:"表",
0x0083:"評",
0x0084:"描",
0x0085:"病",
0x0086:"品",
0x0087:"貧",
0x0088:"歩",
0x0089:"募",
0x008A:"墓",
0x008B:"母",
0x008C:"報",
0x008D:"宝",
0x008E:"抱",
0x008F:"放",
0x0090:"方",
0x0091:"法",
0x0092:"訪",
0x0093:"豊",
0x0094:"坊",
0x0095:"忘",
0x0096:"房",
0x0097:"望",
0x0098:"防",
0x0099:"北",
0x009A:"本",
0x009B:"付",
0x009C:"夫",
0x009D:"婦",
0x009E:"敷",
0x009F:"普",
0x00A0:"浮",
0x00A1:"父",
0x00A2:"負",
0x00A3:"附",
0x00A4:"武",
0x00A5:"舞",
0x00A6:"部",
0x00A7:"封",
0x00A8:"復",
0x00A9:"服",
0x00AA:"福",
0x00AB:"腹",
0x00AC:"払",
0x00AD:"物",
0x00AE:"分",
0x00AF:"文",
0x00B0:"以",
0x00B1:"位",
0x00B2:"偉",
0x00B3:"意",
0x00B4:"移",
0x00B5:"違",
0x00B6:"井",
0x00B7:"育",
0x00B8:"印",
0x00B9:"飲",
0x00BA:"何",
0x00BB:"夏",
0x00BC:"嫁",
0x00BD:"家",
0x00BE:"歌",
0x00BF:"火",
0x00C0:"花",
0x00C1:"苛",
0x00C2:"荷",
0x00C3:"華",
0x00C4:"我",
0x00C5:"会",
0x00C6:"解",
0x00C7:"快",
0x00C8:"怪",
0x00C9:"悔",
0x00CA:"改",
0x00CB:"海",
0x00CC:"界",
0x00CD:"皆",
0x00CE:"絵",
0x00CF:"外",
0x00D0:"格",
0x00D1:"確",
0x00D2:"覚",
0x00D3:"学",
0x00D4:"楽",
0x00D5:"恰",
0x00D6:"活",
0x00D7:"寒",
0x00D8:"完",
0x00D9:"官",
0x00DA:"感",
0x00DB:"換",
0x00DC:"汗",
0x00DD:"甘",
0x00DE:"看",
0x00DF:"肝",
0x00E0:"館",
0x00E1:"丸",
0x00E2:"岩",
0x00E3:"顔",
0x00E4:"願",
0x00E5:"刑",
0x00E6:"形",
0x00E7:"恵",
0x00E8:"敬",
0x00E9:"景",
0x00EA:"系",
0x00EB:"経",
0x00EC:"継",
0x00ED:"計",
0x00EE:"軽",
0x00EF:"決",
0x00F0:"結",
0x00F1:"件",
0x00F2:"剣",
0x00F3:"堅",
0x00F4:"建",
0x00F5:"権",
0x00F6:"犬",
0x00F7:"研",
0x00F8:"見",
0x00F9:"険",
0x00FA:"験",
0x00FB:"原",
0x00FC:"現",
0x00FD:"言",
0x00FE:"限",
0x00FF:"危",
0x0100:"奇",
0x0101:"寄",
0x0102:"希",
0x0103:"忌",
0x0104:"期",
0x0105:"機",
0x0106:"気",
0x0107:"祈",
0x0108:"季",
0x0109:"貴",
0x010A:"儀",
0x010B:"技",
0x010C:"犠",
0x010D:"議",
0x010E:"客",
0x010F:"久",
0x0110:"休",
0x0111:"急",
0x0112:"救",
0x0113:"求",
0x0114:"泣",
0x0115:"究",
0x0116:"去",
0x0117:"許",
0x0118:"供",
0x0119:"競",
0x011A:"共",
0x011B:"叫",
0x011C:"強",
0x011D:"教",
0x011E:"橋",
0x011F:"胸",
0x0120:"鏡",
0x0121:"業",
0x0122:"曲",
0x0123:"禁",
0x0124:"筋",
0x0125:"近",
0x0126:"金",
0x0127:"古",
0x0128:"呼",
0x0129:"庫",
0x012A:"湖",
0x012B:"雇",
0x012C:"後",
0x012D:"御",
0x012E:"護",
0x012F:"交",
0x0130:"光",
0x0131:"功",
0x0132:"効",
0x0133:"向",
0x0134:"好",
0x0135:"幸",
0x0136:"広",
0x0137:"抗",
0x0138:"攻",
0x0139:"港",
0x013A:"考",
0x013B:"航",
0x013C:"荒",
0x013D:"行",
0x013E:"降",
0x013F:"高",
0x0140:"合",
0x0141:"豪",
0x0142:"告",
0x0143:"酷",
0x0144:"黒",
0x0145:"獄",
0x0146:"腰",
0x0147:"骨",
0x0148:"頃",
0x0149:"今",
0x014A:"婚",
0x014B:"根",
0x014C:"句",
0x014D:"屈",
0x014E:"君",
0x014F:"訓",
0x0150:"魔",
0x0151:"埋",
0x0152:"妹",
0x0153:"枚",
0x0154:"毎",
0x0155:"末",
0x0156:"満",
0x0157:"命",
0x0158:"迷",
0x0159:"鳴",
0x015A:"味",
0x015B:"未",
0x015C:"密",
0x015D:"妙",
0x015E:"民",
0x015F:"眠",
0x0160:"戻",
0x0161:"問",
0x0162:"紋",
0x0163:"門",
0x0164:"夢",
0x0165:"無",
0x0166:"娘",
0x0167:"内",
0x0168:"謎",
0x0169:"南",
0x016A:"熱",
0x016B:"年",
0x016C:"念",
0x016D:"尼",
0x016E:"任",
0x016F:"認",
0x0170:"悩",
0x0171:"能",
0x0172:"農",
0x0173:"奥",
0x0174:"押",
0x0175:"横",
0x0176:"黄",
0x0177:"屋",
0x0178:"音",
0x0179:"来",
0x017A:"頼",
0x017B:"絡",
0x017C:"落",
0x017D:"令",
0x017E:"冷",
0x017F:"礼",
0x0180:"歴",
0x0181:"列",
0x0182:"恋",
0x0183:"練",
0x0184:"連",
0x0185:"利",
0x0186:"理",
0x0187:"陸",
0x0188:"率",
0x0189:"立",
0x018A:"流",
0x018B:"留",
0x018C:"竜",
0x018D:"旅",
0x018E:"料",
0x018F:"良",
0x0190:"量",
0x0191:"淋",
0x0192:"輪",
0x0193:"路",
0x0194:"労",
0x0195:"牢",
0x0196:"老",
0x0197:"録",
0x0198:"涙",
0x0199:"砂",
0x019A:"座",
0x019B:"最",
0x019C:"妻",
0x019D:"才",
0x019E:"細",
0x019F:"在",
0x01A0:"材",
0x01A1:"罪",
0x01A2:"財",
0x01A3:"作",
0x01A4:"昨",
0x01A5:"札",
0x01A6:"殺",
0x01A7:"参",
0x01A8:"散",
0x01A9:"産",
0x01AA:"残",
0x01AB:"征",
0x01AC:"性",
0x01AD:"成",
0x01AE:"晴",
0x01AF:"清",
0x01B0:"牲",
0x01B1:"精",
0x01B2:"聖",
0x01B3:"誓",
0x01B4:"青",
0x01B5:"静",
0x01B6:"税",
0x01B7:"昔",
0x01B8:"責",
0x01B9:"赤",
0x01BA:"切",
0x01BB:"節",
0x01BC:"説",
0x01BD:"雪",
0x01BE:"絶",
0x01BF:"先",
0x01C0:"専",
0x01C1:"川",
0x01C2:"戦",
0x01C3:"泉",
0x01C4:"潜",
0x01C5:"船",
0x01C6:"選",
0x01C7:"前",
0x01C8:"善",
0x01C9:"然",
0x01CA:"全",
0x01CB:"使",
0x01CC:"史",
0x01CD:"始",
0x01CE:"姉",
0x01CF:"姿",
0x01D0:"子",
0x01D1:"志",
0x01D2:"私",
0x01D3:"糸",
0x01D4:"紙",
0x01D5:"至",
0x01D6:"詩",
0x01D7:"試",
0x01D8:"飼",
0x01D9:"事",
0x01DA:"似",
0x01DB:"字",
0x01DC:"持",
0x01DD:"時",
0x01DE:"次",
0x01DF:"治",
0x01E0:"耳",
0x01E1:"式",
0x01E2:"失",
0x01E3:"室",
0x01E4:"実",
0x01E5:"舎",
0x01E6:"捨",
0x01E7:"者",
0x01E8:"謝",
0x01E9:"邪",
0x01EA:"若",
0x01EB:"弱",
0x01EC:"主",
0x01ED:"取",
0x01EE:"守",
0x01EF:"手",
0x01F0:"酒",
0x01F1:"受",
0x01F2:"呪",
0x01F3:"樹",
0x01F4:"収",
0x01F5:"修",
0x01F6:"拾",
0x01F7:"終",
0x01F8:"習",
0x01F9:"舟",
0x01FA:"集",
0x01FB:"住",
0x01FC:"十",
0x01FD:"重",
0x01FE:"宿",
0x01FF:"祝",
0x0200:"出",
0x0201:"春",
0x0202:"準",
0x0203:"盾",
0x0204:"純",
0x0205:"処",
0x0206:"初",
0x0207:"所",
0x0208:"暑",
0x0209:"緒",
0x020A:"書",
0x020B:"助",
0x020C:"女",
0x020D:"勝",
0x020E:"商",
0x020F:"小",
0x0210:"少",
0x0211:"承",
0x0212:"招",
0x0213:"焼",
0x0214:"照",
0x0215:"章",
0x0216:"笑",
0x0217:"粧",
0x0218:"証",
0x0219:"丈",
0x021A:"乗",
0x021B:"城",
0x021C:"場",
0x021D:"嬢",
0x021E:"情",
0x021F:"条",
0x0220:"杖",
0x0221:"状",
0x0222:"色",
0x0223:"食",
0x0224:"信",
0x0225:"寝",
0x0226:"新",
0x0227:"森",
0x0228:"深",
0x0229:"真",
0x022A:"神",
0x022B:"親",
0x022C:"身",
0x022D:"進",
0x022E:"祖",
0x022F:"素",
0x0230:"僧",
0x0231:"倉",
0x0232:"秦",
0x0233:"捜",
0x0234:"早",
0x0235:"争",
0x0236:"相",
0x0237:"草",
0x0238:"荘",
0x0239:"装",
0x023A:"走",
0x023B:"送",
0x023C:"騒",
0x023D:"像",
0x023E:"憎",
0x023F:"側",
0x0240:"即",
0x0241:"息",
0x0242:"束",
0x0243:"足",
0x0244:"族",
0x0245:"続",
0x0246:"存",
0x0247:"孫",
0x0248:"尊",
0x0249:"村",
0x024A:"吹",
0x024B:"水",
0x024C:"酔",
0x024D:"数",
0x024E:"他",
0x024F:"多",
0x0250:"太",
0x0251:"打",
0x0252:"体",
0x0253:"対",
0x0254:"待",
0x0255:"泰",
0x0256:"袋",
0x0257:"貸",
0x0258:"退",
0x0259:"代",
0x025A:"台",
0x025B:"大",
0x025C:"第",
0x025D:"題",
0x025E:"滝",
0x025F:"誰",
0x0260:"単",
0x0261:"探",
0x0262:"誕",
0x0263:"団",
0x0264:"断",
0x0265:"段",
0x0266:"男",
0x0267:"談",
0x0268:"亭",
0x0269:"帝",
0x026A:"底",
0x026B:"庭",
0x026C:"弟",
0x026D:"敵",
0x026E:"的",
0x026F:"鉄",
0x0270:"店",
0x0271:"点",
0x0272:"伝",
0x0273:"殿",
0x0274:"値",
0x0275:"知",
0x0276:"地",
0x0277:"置",
0x0278:"茶",
0x0279:"着",
0x027A:"中",
0x027B:"仲",
0x027C:"忠",
0x027D:"昼",
0x027E:"注",
0x027F:"虫",
0x0280:"兆",
0x0281:"帳",
0x0282:"張",
0x0283:"挑",
0x0284:"朝",
0x0285:"町",
0x0286:"調",
0x0287:"長",
0x0288:"頂",
0x0289:"鳥",
0x028A:"直",
0x028B:"沈",
0x028C:"渡",
0x028D:"途",
0x028E:"都",
0x028F:"度",
0x0290:"土",
0x0291:"奴",
0x0292:"倒",
0x0293:"冬",
0x0294:"凍",
0x0295:"塔",
0x0296:"島",
0x0297:"投",
0x0298:"東",
0x0299:"盗",
0x029A:"湯",
0x029B:"灯",
0x029C:"等",
0x029D:"答",
0x029E:"統",
0x029F:"逃",
0x02A0:"頭",
0x02A1:"闘",
0x02A2:"働",
0x02A3:"動",
0x02A4:"導",
0x02A5:"洞",
0x02A6:"道",
0x02A7:"得",
0x02A8:"特",
0x02A9:"毒",
0x02AA:"読",
0x02AB:"突",
0x02AC:"追",
0x02AD:"通",
0x02AE:"右",
0x02AF:"運",
0x02B0:"雲",
0x02B1:"和",
0x02B2:"話",
0x02B3:"惑",
0x02B4:"腕",
0x02B5:"夜",
0x02B6:"野",
0x02B7:"役",
0x02B8:"約",
0x02B9:"薬",
0x02BA:"予",
0x02BB:"余",
0x02BC:"与",
0x02BD:"預",
0x02BE:"幼",
0x02BF:"妖",
0x02C0:"様",
0x02C1:"溶",
0x02C2:"用",
0x02C3:"羊",
0x02C4:"葉",
0x02C5:"踊",
0x02C6:"養",
0x02C7:"欲",
0x02C8:"油",
0x02C9:"勇",
0x02CA:"友",
0x02CB:"有",
0x02CC:"由",
0x02CD:"裕",
0x02CE:"遊",
0x02CF:"雄",
0x02D0:"夕",
0x02D1:"0",
0x02D2:"2",
0x02D3:"3",
0x02D4:"4",
0x02D5:"5",
0x02D6:"6",
0x02D7:"7",
0x02D8:"8",
0x02D9:"9",
0x02DA:"$",
0x02DB:"ト",
0x02DC:"リ",
0x02DD:"1",
0x02DE:"A",
0x02DF:"H",
0x02E0:"K",
0x02E1:"Q",
0x02E2:"T",
0x02E3:"V",
0x02E4:"h",
0x02E5:"き",
0x02E6:"そ",
0x02E7:"ち",
0x02E8:"は",
0x02E9:"ま",
0x02EA:"も",
0x02EB:"ら",
0x02EC:"る",
0x02ED:"ろ",
0x02EE:"を",
0x02EF:"ん",
0x02F0:"ノ",
0x02F1:"ヒ",
0x02F2:"レ",
0x02F3:"ワ",
0x02F4:"B",
0x02F5:"E",
0x02F6:"F",
0x02F7:"L",
0x02F8:"O",
0x02F9:"P",
0x02FA:"S",
0x02FB:"b",
0x02FC:"d",
0x02FD:"う",
0x02FE:"く",
0x02FF:"さ",
0x0300:"し",
0x0301:"じ",
0x0302:"と",
0x0303:"よ",
0x0304:"り",
0x0305:"ミ",
0x0306:"ヲ",
0x0307:"C",
0x0308:"D",
0x0309:"G",
0x030A:"*",
0x030B:"メ",
0x030C:"M",
0x030D:"W",
0x030E:"X",
0x030F:"Y",
0x0310:"+",
0x0311:"ざ",
0x0312:"ぢ",
0x0313:"ど",
0x0314:"ね",
0x0315:"ひ",
0x0316:"ふ",
0x0317:"ぶ",
0x0318:"み",
0x0319:"め",
0x031A:"れ",
0x031B:"わ",
0x031C:"セ",
0x031D:"ム",
0x031E:"ル",
0x031F:"血",
0x0320:"玉",
0x0321:"口",
0x0322:"王",
0x0323:"皿",
0x0324:"正",
0x0325:"西",
0x0326:"U",
0x0327:"あ",
0x0328:"え",
0x0329:"お",
0x032A:"か",
0x032B:"け",
0x032C:"す",
0x032D:"せ",
0x032E:"た",
0x032F:"だ",
0x0330:"な",
0x0331:"べ",
0x0332:"む",
0x0333:"や",
0x0334:"ゆ",
0x0335:"ア",
0x0336:"シ",
0x0337:"ツ",
0x0338:"テ",
0x0339:"ラ",
0x033A:"日",
0x033B:"a",
0x033C:"o",
0x033D:"っ",
0x033E:"ゃ",
0x033F:"ゅ",
0x0340:"ァ",
0x0341:"c",
0x0342:"e",
0x0343:"i",
0x0344:"l",
0x0345:"j",
0x0346:"“",
0x0347:"”",
0x0348:"m",
0x0349:"n",
0x034A:"u",
0x034B:"p",
0x034C:"こ",
0x034D:"r",
0x034E:"s",
0x034F:"t",
0x0350:"々",
0x0351:"v",
0x0352:"ヨ",
0x0353:"w",
0x0354:"つ",
0x0355:"エ",
0x0356:"。",
0x0357:",",
0x0358:"・",
0x0359:"?",
0x035A:"!",
0x035B:"#",
0x035C:"@",
0x035D:"ウ",
0x035E:"カ",
0x035F:"@", # 要チェック
0x0360:"ー",
0x0361:"{",
0x0362:"}",
0x0363:"「",
0x0364:"―",
0x0365:"ヘ",
0x0366:"…",
0x0367:"<",
0x0368:">",
0x0369:"%",
0x036A:"ぺ",
0x036B:"医",
0x036C:"オ",
0x036D:"キ",
0x036E:"ケ",
0x036F:"チ",
0x0370:"ナ",
0x0371:"月",
0x0372:"巨",
0x0373:"入",
0x0374:"乙",
0x0375:"臣",
0x0376:"人",
0x0377:"&",
0x0378:"イ",
0x0379:"ク",
0x037A:"タ",
0x037B:"ド",
0x037C:"買",
0x037D:"泊",
0x037E:"罰",
0x037F:"冒",
0x0380:"員",
0x0381:"引",
0x0382:"加",
0x0383:"賀",
0x0384:"枯",
0x0385:"后",
0x0386:"名",
0x0387:"明",
0x0388:"力",
0x0389:"師",
0x038A:"消",
0x038B:"刃",
0x038C:"当",
0x038D:"届",
0x038E:"白",
0x038F:"目",
0x0390:"呂",
0x0391:"占",
0x0392:"自",
0x0393:"申",
0x0394:"ぁ",
0x0395:"ォ",
0x0396:"ッ",
0x0397:"ャ",
0x0398:"い",
0x0399:"の",
0x039A:"ハ",
0x039B:"モ",
0x039C:"ニ",
0x039D:"が",
0x039E:"ぱ",
0x039F:"ぴ",
0x03A0:"ぽ",
0x03A1:"ギ",
0x03A2:"ゲ",
0x03A3:"ゴ",
0x03A4:"ザ",
0x03A5:"ズ",
0x03A6:"ゾ",
0x03A7:"ダ",
0x03A8:"ヂ",
0x03A9:"ヅ",
0x03AA:"バ",
0x03AB:"パ",
0x03AC:"ビ",
0x03AD:"ブ",
0x03AE:"プ",
0x03AF:"ベ",
0x03B0:"ボ",
0x03B1:"ポ",
0x03B2:"温",
0x03B3:"ぎ",
0x03B4:"ぐ",
0x03B5:"ご",
0x03B6:"ず",
0x03B7:"ぞ",
0x03B8:"づ",
0x03B9:"ぬ",
0x03BA:"ば",
0x03BB:"び",
0x03BC:"ぼ",
0x03BD:"ジ",
0x03BE:"ゼ",
0x03BF:"デ",
0x03C0:"げ",
0x03C1:"ぜ",
0x03C2:"で",
0x03C3:"て",
0x03C4:"マ",
0x03C5:"ン",
0x03C6:"に",
0x03C7:"ほ",
0x03C8:"コ",
0x03C9:"ス",
0x03CA:"ソ",
0x03CB:"ヌ",
0x03CC:"フ",
0x03CD:"ロ",
0x03CE:"ょ",
0x03CF:"ィ",
0x03D0:"暮",
0x03D1:"一",
0x03D2:"ェ",
0x03D3:"ニ",
0x03D4:"ユ",
0x03D5:"ヘ",
0x03D6:"ペ",
0x03D7:"滅",
0x03D8:"猛",
0x03D9:"ュ",
0x03DA:"ョ",
} | 0.132248 | 0.361897 |
import face_recognition
from cv2 import cv2
from PIL import Image, ImageDraw, ImageFont
import numpy as np
import traceback
import os
def SearchPersonFR(image, database, model):
'''
Function to find who the person is
Arguments:
image {cv2 image} -- image of the person
database {dict} -- encodings of all know ppl
model {FRmodel} -- Face Recognition model
returns:
status {bool} -- status of detection
identity {name} -- name of the person
'''
status = False
encodings = fr.face_encodings(image)
identity = "unknown"
for enc in encodings:
for name, known_enc in database.items():
match = fr.compare_faces([known_enc], enc)
if match[0].any():
identity = name
status = True
return status, identity
def LoadDatabaseFR(imageFolderPath:str, model) -> dict:
'''
Function that loads all recognised personals
Arguments:
imageFolderPath {str} -- Path of all registered user images
Returns:
database -- user name to encodings dictionary
'''
database = dict()
for imagePath in os.listdir(imageFolderPath):
name = imagePath.split(".")[0]
image = fr.load_image_file(os.path.join(imageFolderPath + "/" + imagePath))
database[name] = fr.face_encodings(image)
for name, known_enc in database.items():
print(name, known_enc)
return database
camera = cv2.VideoCapture(cv2.CAP_DSHOW)
try:
while(True):
# Exiting mechanism
if cv2.waitKey(1) & 0xFF == ord('q'):
raise KeyboardInterrupt
# Read video frame by frame
status, image = camera.read()
if not status:
raise IOError
# Detect face location
faceLocations = face_recognition.face_locations(image)
# Convert image
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
image = Image.fromarray(image)
font = ImageFont.truetype(font='font/FiraMono-Medium.otf',size=np.floor(3e-2 * image.size[1] + 0.5).astype('int32'))
thickness = (image.size[0] + image.size[1]) // 300
label = "Face Detected"
for (top, right, bottom, left) in faceLocations:
draw = ImageDraw.Draw(image)
labelSize = draw.textsize(label, font)
top = max(0, np.floor(top + 0.5).astype('int32')) # To handle upper bound
left = max(0, np.floor(left + 0.5).astype('int32'))
bottom = min(image.size[1], np.floor(bottom + 0.5).astype('int32')) # To handle lower bound
right = min(image.size[0], np.floor(right + 0.5).astype('int32'))
print(label, (left, top), (right, bottom))
if top - labelSize[1] >= 0:
textOrgin = np.array([left, top - labelSize[1]])
else:
textOrgin = np.array([left, top + 1])
for i in range(thickness):
draw.rectangle([left + i, top + i, right - i, bottom - i], outline = (255, 0, 0))
draw.rectangle([tuple(textOrgin), tuple(textOrgin + labelSize)], fill = (255, 0, 0))
draw.text(textOrgin, label, fill = (0, 0, 0), font = font)
del draw
# Convert back to cv2 image
image = cv2.cvtColor(np.asarray(image), cv2.COLOR_RGB2BGR)
cv2.imshow("output", image)
except KeyboardInterrupt:
print("[+] Releasing camera and shuting it down")
except IOError:
print("[+] Read Camera error")
except Exception as err:
print("[+] This is bad, we don't what error is this?!!")
print("[+] Send us a mail to check it out")
print("[+] You Faced the following error: ", err)
check = str(input("[+] Do you want to print the traceback error? (Y/N): ")).lower()
if check == "y":
traceback.print_exc()
finally:
camera.release()
cv2.destroyAllWindows() | ObjectDetection/faceTest.py | import face_recognition
from cv2 import cv2
from PIL import Image, ImageDraw, ImageFont
import numpy as np
import traceback
import os
def SearchPersonFR(image, database, model):
'''
Function to find who the person is
Arguments:
image {cv2 image} -- image of the person
database {dict} -- encodings of all know ppl
model {FRmodel} -- Face Recognition model
returns:
status {bool} -- status of detection
identity {name} -- name of the person
'''
status = False
encodings = fr.face_encodings(image)
identity = "unknown"
for enc in encodings:
for name, known_enc in database.items():
match = fr.compare_faces([known_enc], enc)
if match[0].any():
identity = name
status = True
return status, identity
def LoadDatabaseFR(imageFolderPath:str, model) -> dict:
'''
Function that loads all recognised personals
Arguments:
imageFolderPath {str} -- Path of all registered user images
Returns:
database -- user name to encodings dictionary
'''
database = dict()
for imagePath in os.listdir(imageFolderPath):
name = imagePath.split(".")[0]
image = fr.load_image_file(os.path.join(imageFolderPath + "/" + imagePath))
database[name] = fr.face_encodings(image)
for name, known_enc in database.items():
print(name, known_enc)
return database
camera = cv2.VideoCapture(cv2.CAP_DSHOW)
try:
while(True):
# Exiting mechanism
if cv2.waitKey(1) & 0xFF == ord('q'):
raise KeyboardInterrupt
# Read video frame by frame
status, image = camera.read()
if not status:
raise IOError
# Detect face location
faceLocations = face_recognition.face_locations(image)
# Convert image
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
image = Image.fromarray(image)
font = ImageFont.truetype(font='font/FiraMono-Medium.otf',size=np.floor(3e-2 * image.size[1] + 0.5).astype('int32'))
thickness = (image.size[0] + image.size[1]) // 300
label = "Face Detected"
for (top, right, bottom, left) in faceLocations:
draw = ImageDraw.Draw(image)
labelSize = draw.textsize(label, font)
top = max(0, np.floor(top + 0.5).astype('int32')) # To handle upper bound
left = max(0, np.floor(left + 0.5).astype('int32'))
bottom = min(image.size[1], np.floor(bottom + 0.5).astype('int32')) # To handle lower bound
right = min(image.size[0], np.floor(right + 0.5).astype('int32'))
print(label, (left, top), (right, bottom))
if top - labelSize[1] >= 0:
textOrgin = np.array([left, top - labelSize[1]])
else:
textOrgin = np.array([left, top + 1])
for i in range(thickness):
draw.rectangle([left + i, top + i, right - i, bottom - i], outline = (255, 0, 0))
draw.rectangle([tuple(textOrgin), tuple(textOrgin + labelSize)], fill = (255, 0, 0))
draw.text(textOrgin, label, fill = (0, 0, 0), font = font)
del draw
# Convert back to cv2 image
image = cv2.cvtColor(np.asarray(image), cv2.COLOR_RGB2BGR)
cv2.imshow("output", image)
except KeyboardInterrupt:
print("[+] Releasing camera and shuting it down")
except IOError:
print("[+] Read Camera error")
except Exception as err:
print("[+] This is bad, we don't what error is this?!!")
print("[+] Send us a mail to check it out")
print("[+] You Faced the following error: ", err)
check = str(input("[+] Do you want to print the traceback error? (Y/N): ")).lower()
if check == "y":
traceback.print_exc()
finally:
camera.release()
cv2.destroyAllWindows() | 0.514156 | 0.312108 |
from helper import utility as util
import os
import configparser
import numpy as np
from scipy import misc
import random
import logging
INPUT_IMAGE_DIR = "input"
INTERPOLATED_IMAGE_DIR = "interpolated"
TRUE_IMAGE_DIR = "true"
def convert_to_multi_channel_image(multi_channel_image, image, scale):
height = multi_channel_image.shape[0]
width = multi_channel_image.shape[1]
for y in range(height):
for x in range(width):
for y2 in range(scale):
for x2 in range(scale):
multi_channel_image[y, x, y2 * scale + x2] = image[y * scale + y2, x * scale + x2, 0]
def convert_from_multi_channel_image(image, multi_channel_image, scale):
height = multi_channel_image.shape[0]
width = multi_channel_image.shape[1]
for y in range(height):
for x in range(width):
for y2 in range(scale):
for x2 in range(scale):
image[y * scale + y2, x * scale + x2, 0] = multi_channel_image[y, x, y2 * scale + x2]
def load_input_image(filename, width=0, height=0, channels=1, scale=1, alignment=0, convert_ycbcr=True,
jpeg_mode=False, print_console=True):
image = util.load_image(filename, print_console=print_console)
return build_input_image(image, width, height, channels, scale, alignment, convert_ycbcr, jpeg_mode)
def build_input_image(image, width=0, height=0, channels=1, scale=1, alignment=0, convert_ycbcr=True, jpeg_mode=False):
"""
build input image from file.
crop, adjust the image alignment for the scale factor, resize, convert color space.
"""
if width != 0 and height != 0:
if image.shape[0] != height or image.shape[1] != width:
x = (image.shape[1] - width) // 2
y = (image.shape[0] - height) // 2
image = image[y: y + height, x: x + width, :]
if image.shape[2] >= 4:
image = image[:, :, 0:3]
if alignment > 1:
image = util.set_image_alignment(image, alignment)
if scale != 1:
image = util.resize_image_by_pil(image, 1.0 / scale)
if channels == 1 and image.shape[2] == 3:
if convert_ycbcr:
image = util.convert_rgb_to_y(image, jpeg_mode=jpeg_mode)
else:
if convert_ycbcr:
image = util.convert_rgb_to_ycbcr(image, jpeg_mode=jpeg_mode)
return image
class DataSet:
def __init__(self, batch_image_size, channels=1, scale=1, max_value=255.0, alignment=0, jpeg_mode=False):
self.batch_image_size = batch_image_size
self.max_value = max_value
self.channels = channels
self.scale = scale
self.max_value = max_value
self.alignment = alignment
self.jpeg_mode = jpeg_mode
self.count = 0
self.images = None
self.quad_images = None
def load_test_image(self, filename):
image = load_input_image(filename, channels=self.channels, scale=1, alignment=self.alignment,
jpeg_mode=self.jpeg_mode, print_console=False)
if self.max_value != 255.0:
image = np.multiply(image, self.max_value / 255.0)
return image
def load_input_image(self, filename, rescale=False, resampling_method="bicubic"):
image = load_input_image(filename, channels=self.channels, scale=self.scale, alignment=self.alignment,
jpeg_mode=self.jpeg_mode, print_console=True)
if self.max_value != 255.0:
image = np.multiply(image, self.max_value / 255.0)
if rescale:
rescaled_image = util.resize_image_by_pil(image, self.scale, resampling_method=resampling_method)
return image, rescaled_image
else:
return image
def load_batch_images(self, batch_dir, input_batch, count):
print("Loading %d batch images from %s for [%s]" % (count, batch_dir, "input" if input_batch else "true"))
self.count = count
if input_batch:
self.images = np.zeros(shape=[count, self.batch_image_size, self.batch_image_size, 1]) # type: np.ndarray
else:
self.images = None
self.quad_images = np.zeros(
shape=[count, self.batch_image_size, self.batch_image_size, self.scale * self.scale]) # type: np.ndarray
for i in range(count):
if input_batch:
self.images[i] = util.load_image(batch_dir + "/" + INPUT_IMAGE_DIR + "/%06d.bmp" % i,
print_console=False)
quad_image = util.load_image(batch_dir + "/" + INTERPOLATED_IMAGE_DIR + "/%06d.bmp" % i,
print_console=False)
else:
quad_image = util.load_image(batch_dir + "/" + TRUE_IMAGE_DIR + "/%06d.bmp" % i, print_console=False)
convert_to_multi_channel_image(self.quad_images[i], quad_image, self.scale)
if i % 1000 == 0:
print('.', end='', flush=True)
print("Finished")
class DataSets:
def __init__(self, scale, batch_image_size, stride_size, channels=1,
jpeg_mode=False, max_value=255.0, resampling_method="nearest"):
self.scale = scale
self.batch_image_size = batch_image_size
self.stride = stride_size
self.channels = channels
self.jpeg_mode = jpeg_mode
self.max_value = max_value
self.resampling_method = resampling_method
self.input = DataSet(batch_image_size, channels=channels, scale=scale, alignment=scale, jpeg_mode=jpeg_mode,
max_value=max_value)
self.true = DataSet(batch_image_size, channels=channels, scale=scale, alignment=scale, jpeg_mode=jpeg_mode,
max_value=max_value)
def build_batch(self, data_dir, batch_dir):
""" load from input files. Then save batch images on file to reduce memory consumption. """
print("Building batch images for %s..." % batch_dir)
filenames = util.get_files_in_directory(data_dir)
images_count = 0
util.make_dir(batch_dir)
util.clean_dir(batch_dir)
util.make_dir(batch_dir + "/" + INPUT_IMAGE_DIR)
util.make_dir(batch_dir + "/" + INTERPOLATED_IMAGE_DIR)
util.make_dir(batch_dir + "/" + TRUE_IMAGE_DIR)
for filename in filenames:
output_window_size = self.batch_image_size * self.scale
output_window_stride = self.stride * self.scale
input_image, input_bicubic_image = self.input.load_input_image(filename, rescale=True,
resampling_method=self.resampling_method)
test_image = self.true.load_test_image(filename)
# split into batch images
input_batch_images = util.get_split_images(input_image, self.batch_image_size, stride=self.stride)
input_bicubic_batch_images = util.get_split_images(input_bicubic_image, output_window_size,
stride=output_window_stride)
if input_batch_images is None or input_bicubic_batch_images is None:
continue
input_count = input_batch_images.shape[0]
test_batch_images = util.get_split_images(test_image, output_window_size, stride=output_window_stride)
for i in range(input_count):
# util.save_image_data(batch_dir + "/" + INPUT_IMAGE_DIR + "/%06d.npy" % images_count,
# input_batch_images[i])
# util.save_image_data(batch_dir + "/" + INTERPOLATED_IMAGE_DIR + "/%06d.npy" % images_count,
# input_bicubic_batch_images[i])
# util.save_image_data(batch_dir + "/" + TRUE_IMAGE_DIR + "/%06d.npy" % images_count,
# test_batch_images[i])
util.save_image(batch_dir + "/" + INPUT_IMAGE_DIR + "/%06d.bmp" % images_count, input_batch_images[i])
util.save_image(batch_dir + "/" + INTERPOLATED_IMAGE_DIR + "/%06d.bmp" % images_count,
input_bicubic_batch_images[i])
util.save_image(batch_dir + "/" + TRUE_IMAGE_DIR + "/%06d.bmp" % images_count, test_batch_images[i])
images_count += 1
print("%d mini-batch images are built(saved)." % images_count)
config = configparser.ConfigParser()
config.add_section("batch")
config.set("batch", "count", str(images_count))
config.set("batch", "scale", str(self.scale))
config.set("batch", "batch_image_size", str(self.batch_image_size))
config.set("batch", "stride", str(self.stride))
config.set("batch", "channels", str(self.channels))
config.set("batch", "jpeg_mode", str(self.jpeg_mode))
config.set("batch", "max_value", str(self.max_value))
with open(batch_dir + "/batch_images.ini", "w") as configfile:
config.write(configfile)
def load_batch_train(self, batch_dir):
""" load already built batch images. """
config = configparser.ConfigParser()
config.read(batch_dir + "/batch_images.ini")
count = config.getint("batch", "count")
self.input.count = count
self.true.count = count
def load_batch_test(self, batch_dir):
""" load already built batch images. """
config = configparser.ConfigParser()
config.read(batch_dir + "/batch_images.ini")
count = config.getint("batch", "count")
self.input.load_batch_images(batch_dir, True, count)
self.true.load_batch_images(batch_dir, False, count)
def is_batch_exist(self, batch_dir):
if not os.path.isdir(batch_dir):
return False
config = configparser.ConfigParser()
try:
with open(batch_dir + "/batch_images.ini") as f:
config.read_file(f)
if config.getint("batch", "count") <= 0:
return False
if config.getint("batch", "scale") != self.scale:
return False
if config.getint("batch", "batch_image_size") != self.batch_image_size:
return False
if config.getint("batch", "stride") != self.stride:
return False
if config.getint("batch", "channels") != self.channels:
return False
if config.getboolean("batch", "jpeg_mode") != self.jpeg_mode:
return False
if config.getfloat("batch", "max_value") != self.max_value:
return False
return True
except IOError:
return False | helper/loader.py | from helper import utility as util
import os
import configparser
import numpy as np
from scipy import misc
import random
import logging
INPUT_IMAGE_DIR = "input"
INTERPOLATED_IMAGE_DIR = "interpolated"
TRUE_IMAGE_DIR = "true"
def convert_to_multi_channel_image(multi_channel_image, image, scale):
height = multi_channel_image.shape[0]
width = multi_channel_image.shape[1]
for y in range(height):
for x in range(width):
for y2 in range(scale):
for x2 in range(scale):
multi_channel_image[y, x, y2 * scale + x2] = image[y * scale + y2, x * scale + x2, 0]
def convert_from_multi_channel_image(image, multi_channel_image, scale):
height = multi_channel_image.shape[0]
width = multi_channel_image.shape[1]
for y in range(height):
for x in range(width):
for y2 in range(scale):
for x2 in range(scale):
image[y * scale + y2, x * scale + x2, 0] = multi_channel_image[y, x, y2 * scale + x2]
def load_input_image(filename, width=0, height=0, channels=1, scale=1, alignment=0, convert_ycbcr=True,
jpeg_mode=False, print_console=True):
image = util.load_image(filename, print_console=print_console)
return build_input_image(image, width, height, channels, scale, alignment, convert_ycbcr, jpeg_mode)
def build_input_image(image, width=0, height=0, channels=1, scale=1, alignment=0, convert_ycbcr=True, jpeg_mode=False):
"""
build input image from file.
crop, adjust the image alignment for the scale factor, resize, convert color space.
"""
if width != 0 and height != 0:
if image.shape[0] != height or image.shape[1] != width:
x = (image.shape[1] - width) // 2
y = (image.shape[0] - height) // 2
image = image[y: y + height, x: x + width, :]
if image.shape[2] >= 4:
image = image[:, :, 0:3]
if alignment > 1:
image = util.set_image_alignment(image, alignment)
if scale != 1:
image = util.resize_image_by_pil(image, 1.0 / scale)
if channels == 1 and image.shape[2] == 3:
if convert_ycbcr:
image = util.convert_rgb_to_y(image, jpeg_mode=jpeg_mode)
else:
if convert_ycbcr:
image = util.convert_rgb_to_ycbcr(image, jpeg_mode=jpeg_mode)
return image
class DataSet:
def __init__(self, batch_image_size, channels=1, scale=1, max_value=255.0, alignment=0, jpeg_mode=False):
self.batch_image_size = batch_image_size
self.max_value = max_value
self.channels = channels
self.scale = scale
self.max_value = max_value
self.alignment = alignment
self.jpeg_mode = jpeg_mode
self.count = 0
self.images = None
self.quad_images = None
def load_test_image(self, filename):
image = load_input_image(filename, channels=self.channels, scale=1, alignment=self.alignment,
jpeg_mode=self.jpeg_mode, print_console=False)
if self.max_value != 255.0:
image = np.multiply(image, self.max_value / 255.0)
return image
def load_input_image(self, filename, rescale=False, resampling_method="bicubic"):
image = load_input_image(filename, channels=self.channels, scale=self.scale, alignment=self.alignment,
jpeg_mode=self.jpeg_mode, print_console=True)
if self.max_value != 255.0:
image = np.multiply(image, self.max_value / 255.0)
if rescale:
rescaled_image = util.resize_image_by_pil(image, self.scale, resampling_method=resampling_method)
return image, rescaled_image
else:
return image
def load_batch_images(self, batch_dir, input_batch, count):
print("Loading %d batch images from %s for [%s]" % (count, batch_dir, "input" if input_batch else "true"))
self.count = count
if input_batch:
self.images = np.zeros(shape=[count, self.batch_image_size, self.batch_image_size, 1]) # type: np.ndarray
else:
self.images = None
self.quad_images = np.zeros(
shape=[count, self.batch_image_size, self.batch_image_size, self.scale * self.scale]) # type: np.ndarray
for i in range(count):
if input_batch:
self.images[i] = util.load_image(batch_dir + "/" + INPUT_IMAGE_DIR + "/%06d.bmp" % i,
print_console=False)
quad_image = util.load_image(batch_dir + "/" + INTERPOLATED_IMAGE_DIR + "/%06d.bmp" % i,
print_console=False)
else:
quad_image = util.load_image(batch_dir + "/" + TRUE_IMAGE_DIR + "/%06d.bmp" % i, print_console=False)
convert_to_multi_channel_image(self.quad_images[i], quad_image, self.scale)
if i % 1000 == 0:
print('.', end='', flush=True)
print("Finished")
class DataSets:
def __init__(self, scale, batch_image_size, stride_size, channels=1,
jpeg_mode=False, max_value=255.0, resampling_method="nearest"):
self.scale = scale
self.batch_image_size = batch_image_size
self.stride = stride_size
self.channels = channels
self.jpeg_mode = jpeg_mode
self.max_value = max_value
self.resampling_method = resampling_method
self.input = DataSet(batch_image_size, channels=channels, scale=scale, alignment=scale, jpeg_mode=jpeg_mode,
max_value=max_value)
self.true = DataSet(batch_image_size, channels=channels, scale=scale, alignment=scale, jpeg_mode=jpeg_mode,
max_value=max_value)
def build_batch(self, data_dir, batch_dir):
""" load from input files. Then save batch images on file to reduce memory consumption. """
print("Building batch images for %s..." % batch_dir)
filenames = util.get_files_in_directory(data_dir)
images_count = 0
util.make_dir(batch_dir)
util.clean_dir(batch_dir)
util.make_dir(batch_dir + "/" + INPUT_IMAGE_DIR)
util.make_dir(batch_dir + "/" + INTERPOLATED_IMAGE_DIR)
util.make_dir(batch_dir + "/" + TRUE_IMAGE_DIR)
for filename in filenames:
output_window_size = self.batch_image_size * self.scale
output_window_stride = self.stride * self.scale
input_image, input_bicubic_image = self.input.load_input_image(filename, rescale=True,
resampling_method=self.resampling_method)
test_image = self.true.load_test_image(filename)
# split into batch images
input_batch_images = util.get_split_images(input_image, self.batch_image_size, stride=self.stride)
input_bicubic_batch_images = util.get_split_images(input_bicubic_image, output_window_size,
stride=output_window_stride)
if input_batch_images is None or input_bicubic_batch_images is None:
continue
input_count = input_batch_images.shape[0]
test_batch_images = util.get_split_images(test_image, output_window_size, stride=output_window_stride)
for i in range(input_count):
# util.save_image_data(batch_dir + "/" + INPUT_IMAGE_DIR + "/%06d.npy" % images_count,
# input_batch_images[i])
# util.save_image_data(batch_dir + "/" + INTERPOLATED_IMAGE_DIR + "/%06d.npy" % images_count,
# input_bicubic_batch_images[i])
# util.save_image_data(batch_dir + "/" + TRUE_IMAGE_DIR + "/%06d.npy" % images_count,
# test_batch_images[i])
util.save_image(batch_dir + "/" + INPUT_IMAGE_DIR + "/%06d.bmp" % images_count, input_batch_images[i])
util.save_image(batch_dir + "/" + INTERPOLATED_IMAGE_DIR + "/%06d.bmp" % images_count,
input_bicubic_batch_images[i])
util.save_image(batch_dir + "/" + TRUE_IMAGE_DIR + "/%06d.bmp" % images_count, test_batch_images[i])
images_count += 1
print("%d mini-batch images are built(saved)." % images_count)
config = configparser.ConfigParser()
config.add_section("batch")
config.set("batch", "count", str(images_count))
config.set("batch", "scale", str(self.scale))
config.set("batch", "batch_image_size", str(self.batch_image_size))
config.set("batch", "stride", str(self.stride))
config.set("batch", "channels", str(self.channels))
config.set("batch", "jpeg_mode", str(self.jpeg_mode))
config.set("batch", "max_value", str(self.max_value))
with open(batch_dir + "/batch_images.ini", "w") as configfile:
config.write(configfile)
def load_batch_train(self, batch_dir):
""" load already built batch images. """
config = configparser.ConfigParser()
config.read(batch_dir + "/batch_images.ini")
count = config.getint("batch", "count")
self.input.count = count
self.true.count = count
def load_batch_test(self, batch_dir):
""" load already built batch images. """
config = configparser.ConfigParser()
config.read(batch_dir + "/batch_images.ini")
count = config.getint("batch", "count")
self.input.load_batch_images(batch_dir, True, count)
self.true.load_batch_images(batch_dir, False, count)
def is_batch_exist(self, batch_dir):
if not os.path.isdir(batch_dir):
return False
config = configparser.ConfigParser()
try:
with open(batch_dir + "/batch_images.ini") as f:
config.read_file(f)
if config.getint("batch", "count") <= 0:
return False
if config.getint("batch", "scale") != self.scale:
return False
if config.getint("batch", "batch_image_size") != self.batch_image_size:
return False
if config.getint("batch", "stride") != self.stride:
return False
if config.getint("batch", "channels") != self.channels:
return False
if config.getboolean("batch", "jpeg_mode") != self.jpeg_mode:
return False
if config.getfloat("batch", "max_value") != self.max_value:
return False
return True
except IOError:
return False | 0.546496 | 0.266612 |
import sys
import argparse
OFFSET = 0xc0e0
# Not at all sure about the max length of the binary path to be started but
LENGTH = 32
CONTENT = b'/\x00b\x00i\x00n\x00/\x00b\x00a\x00s\x00h'
def find_offset(binary):
for i in range(0, len(binary)):
if binary[i:i+len(CONTENT)] == CONTENT:
return i
return -1
def check_offset(binary):
orig = binary[OFFSET:OFFSET+len(CONTENT)]
return orig == CONTENT
def stob(val):
val = val.encode('ascii')
ret = bytearray()
for i in range(0, LENGTH):
if (i != 0 and i % 2 == 1) or i >= 2 * len(val):
ret += b'\x00'
continue
if i % 2 == 0:
ret += val[i // 2].to_bytes(1, byteorder=sys.byteorder)
assert len(ret) == LENGTH
return ret
def parse_args(argv):
parser = argparse.ArgumentParser(description='''Patch Windows Subsystem for Linux\'s bash.exe to be able run any
Linux executable file present in the WSL container.''')
parser.add_argument('binary', type=argparse.FileType('rb'),
help='Path to the original WSL bash.exe')
parser.add_argument('path', type=str,
help='New path to be applied to the binary')
parser.add_argument('-o', '--output', type=str, default='launcher.exe',
help='Where to output the newly created binary')
return parser.parse_args()
def main(argv, argc):
args = parse_args(argv)
print("Opened '%s' for reading" % args.binary.name)
content = bytearray(args.binary.read())
offset = OFFSET if check_offset(content) else find_offset(content)
if offset != -1:
print('Found valid char sequence at %s' % hex(offset))
else:
print('ERROR: Unable to find valid char sequence. Cannot continue!')
exit(1)
if 2 * len(args.path) >= LENGTH:
print('ERROR: value to be patched in is too long. must not exceed %d characters' % (LENGTH // 2))
exit(1)
print('Patching file and writing to \'%s\'... ' % args.output, end='')
content[offset:offset+LENGTH] = stob(args.path)
with open(args.output, 'wb') as f:
f.write(content)
print('wrote %d bytes' % f.tell())
if __name__ == '__main__':
main(sys.argv, len(sys.argv)) | wsl-bashexe-patcher.py | import sys
import argparse
OFFSET = 0xc0e0
# Not at all sure about the max length of the binary path to be started but
LENGTH = 32
CONTENT = b'/\x00b\x00i\x00n\x00/\x00b\x00a\x00s\x00h'
def find_offset(binary):
for i in range(0, len(binary)):
if binary[i:i+len(CONTENT)] == CONTENT:
return i
return -1
def check_offset(binary):
orig = binary[OFFSET:OFFSET+len(CONTENT)]
return orig == CONTENT
def stob(val):
val = val.encode('ascii')
ret = bytearray()
for i in range(0, LENGTH):
if (i != 0 and i % 2 == 1) or i >= 2 * len(val):
ret += b'\x00'
continue
if i % 2 == 0:
ret += val[i // 2].to_bytes(1, byteorder=sys.byteorder)
assert len(ret) == LENGTH
return ret
def parse_args(argv):
parser = argparse.ArgumentParser(description='''Patch Windows Subsystem for Linux\'s bash.exe to be able run any
Linux executable file present in the WSL container.''')
parser.add_argument('binary', type=argparse.FileType('rb'),
help='Path to the original WSL bash.exe')
parser.add_argument('path', type=str,
help='New path to be applied to the binary')
parser.add_argument('-o', '--output', type=str, default='launcher.exe',
help='Where to output the newly created binary')
return parser.parse_args()
def main(argv, argc):
args = parse_args(argv)
print("Opened '%s' for reading" % args.binary.name)
content = bytearray(args.binary.read())
offset = OFFSET if check_offset(content) else find_offset(content)
if offset != -1:
print('Found valid char sequence at %s' % hex(offset))
else:
print('ERROR: Unable to find valid char sequence. Cannot continue!')
exit(1)
if 2 * len(args.path) >= LENGTH:
print('ERROR: value to be patched in is too long. must not exceed %d characters' % (LENGTH // 2))
exit(1)
print('Patching file and writing to \'%s\'... ' % args.output, end='')
content[offset:offset+LENGTH] = stob(args.path)
with open(args.output, 'wb') as f:
f.write(content)
print('wrote %d bytes' % f.tell())
if __name__ == '__main__':
main(sys.argv, len(sys.argv)) | 0.21626 | 0.152663 |
import numpy as np
import pandas as pd
import cv2
import os
import imageio
from scipy.spatial.distance import cdist
import matplotlib.pyplot as plt
import pickle
import itertools
import json
import glob
import collections
import shutil
import pickle
import re
DTYPE = "float32"
PD_SEP = ","
PD_NAN = np.inf
PD_DTYPE = np.float32
READ_CSV_ARGS = {"skiprows": 1}
PD_TIME_COL = "Time (sec)"
PD_PTAT_COL = "PTAT"
HTPA_UDP_MODULE_WEBCAM_IMG_EXT = "jpg"
READERS_EXTENSIONS_DICT = {
"txt": "txt",
"csv": "csv",
"pickle": "pickle",
"pkl": "pickle",
"p": "pickle",
}
SUPPORTED_EXTENSIONS = list(READERS_EXTENSIONS_DICT.keys())
def remove_extension(filepath):
return filepath.split(".")[0]
def get_extension(filepath):
return filepath.split(".")[1]
def ensure_path_exists(path):
if not os.path.exists(path):
os.makedirs(path)
def ensure_parent_exists(path):
ensure_path_exists(os.path.dirname(path))
def read_tpa_file(filepath: str, array_size: int = 32):
"""
Convert Heimann HTPA file to NumPy array shaped [frames, height, width].
Currently supported: see SUPPORTED_EXTENSIONS flag
Parameters
----------
filepath : str
array_size : int, optional (for txt files only)
Returns
-------
np.array
3D array of temperature distribution sequence, shaped [frames, height, width].
list
list of timestamps
"""
extension_lowercase = get_extension(filepath).lower()
assert (extension_lowercase in SUPPORTED_EXTENSIONS)
reader = READERS_EXTENSIONS_DICT[extension_lowercase]
if reader == 'txt':
return txt2np(filepath)
if reader == 'csv':
return csv2np(filepath)
if reader == 'pickle':
return pickle2np(filepath)
def write_tpa_file(filepath: str, array, timestamps: list, header=None) -> bool:
"""
Convert and save Heimann HTPA NumPy array shaped [frames, height, width] to a txt file.
Currently supported: see SUPPORTED_EXTENSIONS flag
Parameters
----------
filepath : str
Filepath to destination file, including the file name.
array : np.array
Temperatue distribution sequence, shaped [frames, height, width].
timestamps : list
List of timestamps of corresponding array frames.
"""
extension_lowercase = get_extension(filepath).lower()
assert (extension_lowercase in SUPPORTED_EXTENSIONS)
writer = READERS_EXTENSIONS_DICT[extension_lowercase]
if writer == 'txt':
return write_np2txt(filepath, array, timestamps, header=header)
if writer == 'csv':
assert not header
return write_np2csv(filepath, array, timestamps)
if writer == 'pickle':
assert not header
return write_np2pickle(filepath, array, timestamps)
def modify_txt_header(filepath : str, new_header):
header = new_header.rstrip()
header += "\n"
with open(filepath) as f:
lines = f.readlines()
lines[0] = header
with open(filepath, "w") as f:
f.writelines(lines)
def read_txt_header(filepath: str):
"""
Read Heimann HTPA .txt header.
Parameters
----------
filepath : str
Returns
-------
str
TPA file header
"""
with open(filepath) as f:
header = f.readline().rstrip()
return header
def txt2np(filepath: str, array_size: int = 32):
"""
Convert Heimann HTPA .txt to NumPy array shaped [frames, height, width].
Parameters
----------
filepath : str
array_size : int, optional
Returns
-------
np.array
3D array of temperature distribution sequence, shaped [frames, height, width].
list
list of timestamps
"""
with open(filepath) as f:
# discard the first line
_ = f.readline()
# read line by line now
line = "dummy line"
frames = []
timestamps = []
while line:
line = f.readline()
if line:
split = line.split(" ")
frame = split[0: array_size ** 2]
timestamp = split[-1]
frame = np.array([int(T) for T in frame], dtype=DTYPE)
frame = frame.reshape([array_size, array_size], order="F")
frame *= 1e-2
frames.append(frame)
timestamps.append(float(timestamp))
frames = np.array(frames)
# the array needs rotating 90 CW
frames = np.rot90(frames, k=-1, axes=(1, 2))
return frames, timestamps
def write_np2txt(output_fp: str, array, timestamps: list, header: str = None) -> bool:
"""
Convert and save Heimann HTPA NumPy array shaped [frames, height, width] to a txt file.
Parameters
----------
output_fp : str
Filepath to destination file, including the file name.
array : np.array
Temperatue distribution sequence, shaped [frames, height, width].
timestamps : list
List of timestamps of corresponding array frames.
header : str, optional
TXT header
"""
ensure_parent_exists(output_fp)
frames = np.rot90(array, k=1, axes=(1, 2))
if header:
header = header.rstrip()
header += "\n"
else:
header = "HTPA32x32d\n"
with open(output_fp, 'w') as file:
file.write(header)
for step, t in zip(frames, timestamps):
line = ""
for val in step.flatten("F"):
line += ("%02.2f" % val).replace(".", "")[:4] + " "
file.write("{}t: {}\n".format(line, t))
def write_np2pickle(output_fp: str, array, timestamps: list) -> bool:
"""
Convert and save Heimann HTPA NumPy array shaped [frames, height, width] to a pickle file.
Parameters
----------
output_fp : str
Filepath to destination file, including the file name.
array : np.array
Temperatue distribution sequence, shaped [frames, height, width].
timestamps : list
List of timestamps of corresponding array frames.
"""
ensure_parent_exists(output_fp)
with open(output_fp, "wb") as f:
pickle.dump((array, timestamps), f)
return True
def pickle2np(filepath: str):
"""
Convert Heimann HTPA .txt to NumPy array shaped [frames, height, width].
Parameters
----------
filepath : str
Returns
-------
np.array
3D array of temperature distribution sequence, shaped [frames, height, width].
list
list of timestamps
"""
with open(filepath, "rb") as f:
frames, timestamps = pickle.load(f)
return frames, timestamps
def write_np2csv(output_fp: str, array, timestamps: list) -> bool:
"""
Convert and save Heimann HTPA NumPy array shaped [frames, height, width] to .CSV dataframe.
CSV should preferably represent the data collected without preprocessing, cropping or any data manipulation.
Parameters
----------
output_fp : str
Filepath to destination file, including the file name.
array : np.array
Temperatue distribution sequence, shaped [frames, height, width].
timestamps : list
List of timestamps of corresponding array frames.
"""
ensure_parent_exists(output_fp)
# initialize csv template (and append frames later)
# prepend first row for compability with legacy format
first_row = pd.DataFrame({"HTPA 32x32d": []})
first_row.to_csv(output_fp, index=False, sep=PD_SEP)
headers = {PD_TIME_COL: [], PD_PTAT_COL: []}
df = pd.DataFrame(headers)
for idx in range(np.prod(array.shape[1:])):
df.insert(len(df.columns), "P%04d" % idx, [])
df.to_csv(output_fp, mode="a", index=False, sep=PD_SEP)
for idx in range(array.shape[0]):
frame = array[idx, ...]
timestamp = timestamps[idx]
temps = list(frame.flatten())
row_data = [timestamp, PD_NAN]
row_data.extend(temps)
row = pd.DataFrame([row_data])
row = row.astype(PD_DTYPE)
row.to_csv(output_fp, mode="a", header=False, sep=PD_SEP, index=False)
return True
def csv2np(csv_fp: str):
"""
Read and convert .CSV dataframe to a Heimann HTPA NumPy array shaped [frames, height, width]
Parameters
----------
csv_fp : str
Filepath to the csv file tor read.
Returns
-------
array : np.array
Temperatue distribution sequence, shape [frames, height, width].
timestamps : list
List of timestamps of corresponding array frames.
"""
df = pd.read_csv(csv_fp, **READ_CSV_ARGS)
timestamps = df[PD_TIME_COL]
array = df.drop([PD_TIME_COL, PD_PTAT_COL], axis=1).to_numpy(dtype=DTYPE)
array = reshape_flattened_frames(array)
return array, timestamps
def apply_heatmap(array, cv_colormap: int = cv2.COLORMAP_JET) -> np.ndarray:
"""
Applies pseudocoloring (heatmap) to a sequence of thermal distribution. Same as np2pc().
np2pc() is preffered.
Parameters
----------
array : np.array
(frames, height, width)
cv_colormap : int, optional
Returns
-------
np.array
(frames, height, width, channels)
"""
min, max = array.min(), array.max()
shape = array.shape
array_normalized = (255 * ((array - min) / (max - min))).astype(np.uint8)
heatmap_flat = cv2.applyColorMap(array_normalized.flatten(), cv_colormap)
return heatmap_flat.reshape([shape[0], shape[1], shape[2], 3])
def np2pc(array, cv_colormap: int = cv2.COLORMAP_JET) -> np.ndarray:
"""
Applies pseudocoloring (heatmap) to a sequence of thermal distribution. Same as apply_heatmap().
np2pc() is preffered.
Parameters
----------
array : np.array
(frames, height, width)
cv_colormap : int, optional
Returns
-------
np.array
(frames, height, width, channels)
"""
return apply_heatmap(array, cv_colormap)
def save_frames(array, dir_name: str, extension: str = ".bmp") -> bool:
"""
Exctracts and saves frames from a sequence array into a folder dir_name
Parameters
----------
array : np.array
(frames, height, width, channels)
Returns
-------
bool
True if success
"""
if not os.path.exists(dir_name):
os.mkdir(dir_name)
for idx, frame in enumerate(array):
cv2.imwrite(os.path.join(dir_name, "%d" % idx + extension), frame)
return True
def flatten_frames(array):
"""
Flattens array of shape [frames, height, width] into array of shape [frames, height*width]
Parameters
----------
array : np.array
(frames, height, width)
Returns
-------
np.array
flattened array (frames, height, width)
"""
_, height, width = array.shape
return array.reshape((-1, height * width))
def write_pc2gif(array, fp: str, fps=10, loop: int = 0, duration=None):
"""
Converts and saves NumPy array of pseudocolored thermopile sensor array data, shaped [frames, height, width, channels], into a .gif file
Parameters
----------
array : np.array
Pseudocolored data (frames, height, width, channels).
fp : str
The filepath to write to.
fps : float, optional
Default 10, approx. equal to a typical thermopile sensor array FPS value.
loop : int, optional
The number of iterations. Default 0 (meaning loop indefinitely).
duration : float, list, optional
The duration (in seconds) of each frame. Either specify one value
that is used for all frames, or one value for each frame.
Note that in the GIF format the duration/delay is expressed in
hundredths of a second, which limits the precision of the duration. (from imageio doc)
Returns
-------
bool
True if success.
"""
ensure_parent_exists(fp)
if not duration:
duration = 1 / fps
with imageio.get_writer(fp, mode="I", duration=duration, loop=loop) as writer:
for frame in array:
writer.append_data(frame[:, :, ::-1])
return True
def timestamps2frame_durations(timestamps: list, last_frame_duration=None) -> list:
"""
Produces frame durations list to make gifs produced with write_pc2gif() more accurate temporally,
Parameters
----------
timestamps : list
List of timestamps of corresponding array frames.
last_frame_duration : float, optional
List of N timestamps gives information about durations of N-1 initial frames,
if not given, the function will duplicate the last value in the produced list to make up for the missing frame duration.
Returns
-------
list
List of frame durations.
"""
frame_durations = [x_t2 - x_t1 for x_t1,
x_t2 in zip(timestamps, timestamps[1:])]
if not last_frame_duration:
last_frame_duration = frame_durations[-1]
frame_durations.append(last_frame_duration)
return frame_durations
def reshape_flattened_frames(array):
"""
Reshapes array shaped [frames, height*width] into array of shape [frames, height, width]
Parameters
----------
array : np.array
flattened array (frames, height*width)
Returns
-------
np.array
reshaped array (frames, height, width)
"""
_, elements = array.shape
height = int(elements ** (1 / 2))
width = height
return array.reshape((-1, height, width))
def crop_center(array, crop_height=None, crop_width=None):
"""
Crops the center portion of an infrared sensor array image sequence.
Parameters
---------
array : np.array
(frames, height, width) or (frames, height, width, channel)
crop_height : int, optional
Height of the cropped patch, if -1 then equal to input's height.
If crop_height, crop_width are None image will be cropped to match smaller spatial dimension.
crop_width : int, optional
Width of the cropped patch, if -1 then equal to input's width.
If crop_height, crop_width are None image will be cropped to match smaller spatial dimension.
Returns
-------
np.array
cropped array (frames, crop_height, crop_width)
"""
_, height, width = array.shape[:3]
if not (crop_width or crop_height):
smaller_dim = height if (height < width) else width
crop_width, crop_height = smaller_dim, smaller_dim
if not crop_width:
if crop_height:
crop_width = crop_height
if not crop_height:
if crop_width:
crop_height = crop_width
crop_height = height if (crop_height == -1) else crop_height
start_y = height//2 - crop_height//2
crop_width = width if (crop_width == -1) else crop_width
start_x = width//2 - crop_width//2
return array[:, start_y:start_y+crop_height, start_x:start_x+crop_width]
def match_timesteps(*timestamps_lists):
"""
Aligns timesteps of given timestamps.
Parameters
---------
*timestamps_list : list, np.array
lists-like data containing timestamps
Returns
-------
list
list of indices of timesteps corresponding to input lists so that input lists are aligned
Example:
ts1 = [1, 2, 3, 4, 5]
ts2 = [1.1, 2.1, 2.9, 3.6, 5.1, 6, 6.1]
ts3 = [0.9, 1.2, 2, 3, 4.1, 4.2, 4.3, 4.9]
idx1, idx2, idx3 = match_timesteps(ts1, ts2, ts3)
now ts1[idx1], ts2[idx2] and ts3[idx3] will be aligned
"""
ts_list = [np.array(ts).reshape(-1, 1) for ts in timestamps_lists]
min_len_idx = np.array([len(ts) for ts in ts_list]).argmin()
min_len_ts = ts_list[min_len_idx]
indices_list = [None] * len(ts_list)
for idx, ts in enumerate(ts_list):
if (idx == min_len_idx):
indices_list[idx] = list(range(len(min_len_ts)))
else:
indices_list[idx] = list(cdist(min_len_ts, ts).argmin(axis=-1))
return indices_list
def match_timesteps2(*timestamps_lists):
#XXX Not finished
"""
Aligns timesteps of given timestamps.
Parameters
---------
*timestamps_list : list, np.array
lists-like data containing timestamps
Returns
-------
list
list of indices of timesteps corresponding to input lists so that input lists are aligned
Example:
ts1 = [1, 2, 3, 4, 5]
ts2 = [1.1, 2.1, 2.9, 3.6, 5.1, 6, 6.1]
ts3 = [0.9, 1.2, 2, 3, 4.1, 4.2, 4.3, 4.9]
idx1, idx2, idx3 = match_timesteps(ts1, ts2, ts3)
now ts1[idx1], ts2[idx2] and ts3[idx3] will be aligned
"""
ts_list = [np.array(ts).reshape(-1, 1) for ts in timestamps_lists]
#min_len_idx = np.array([len(ts) for ts in ts_list]).argmin()
#min_len_ts = ts_list[min_len_idx]
max_error_list = [0] * len(ts_list)
for idx, ts in enumerate(ts_list):
for idx2, ts2 in enumerate(ts_list):
if (idx == idx2):
continue
tmp_indexes = list(cdist(ts, ts2).argmin(axis=-1))
diff = ts - ts2[tmp_indexes]
max_error = np.abs(np.max(diff))
current_max = max_error_list[idx]
if (max_error > current_max):
max_error_list[idx] = max_error
min_error_idx = np.argmin(max_error_list)
indices_list = [None] * len(ts_list)
min_error_ts = ts_list[min_error_idx]
for idx, ts in enumerate(ts_list):
if (idx == min_error_idx):
indices_list[idx] = list(range(len(min_error_ts)))
else:
indices_list[idx] = list(cdist(min_error_ts, ts).argmin(axis=-1))
return indices_list
def resample_np_tuples(arrays, indices=None, step=None):
"""
Resampling for 3D arrays.
Parameters
---------
arrays : list
arays to resample
indices : list, optional
list of indices applied to arrays
step : int, optional
resampling with a step, if given indices will be ignored
Returns
-------
list
list of resampled arrays
"""
if indices:
if len(arrays) != len(indices):
raise ValueError('Iterables have different lengths')
resampled_arrays = []
for array, ids in zip(arrays, indices):
resampled_arrays.append(array[ids])
return resampled_arrays
if step:
return [array[range(0, len(array), step)] for array in arrays]
return arrays
def save_temperature_histogram(array, fp="histogram.png", bins=None, xlabel='Temperature grad. C', ylabel='Number of pixels', title='Histogram of temperature', grid=True, mu=False, sigma=False):
"""
Saves a histogram of measured temperatures
Parameters
---------
array : np.array
(frames, height, width)
fp : str
filepath to save plotted histogram to
bins, xlabel, ylabel, title, grid
as in pyplot
"""
data = array.flatten()
hist = plt.hist(data, bins=bins)
plt.xlabel(xlabel)
plt.ylabel(ylabel)
text = r'{}{}{}'.format('$\mu={0:.2f} \degree C$'.format(data.mean()) if mu else '', ', ' if (
mu and sigma) else '', '$\sigma={0:.2f} \degree C$'.format(data.std()) if sigma else '')
plt.title("{} {}".format(title, text))
plt.grid(grid)
plt.savefig(fp)
plt.close('all')
return True
def resample_timestamps(timestamps, indices=None, step=None):
"""
Resampling for 3D arrays.
Parameters
---------
arrays : list
arays to resample
indices : list, optional
list of indices applied to arrays
step : int, optional
resampling with a step, if given indices will be ignored
Returns
-------
list
list of resampled arrays
"""
ts_array = [np.array(ts) for ts in timestamps]
return [list(ts) for ts in resample_np_tuples(ts_array, indices, step)]
def debug_HTPA32x32d_txt(filepath: str, array_size=32):
"""
Debug Heimann HTPA .txt by attempting to convert to NumPy array shaped [frames, height, width].
Parameters
----------
filepath : str
array_size : int, optional
Returns
-------
int
line that raises error, -1 if no error
"""
with open(filepath) as f:
line_n = 1
_ = f.readline()
line = "dummy line"
frames = []
timestamps = []
while line:
line_n += 1
line = f.readline()
if line:
try:
split = line.split(" ")
frame = split[0: array_size ** 2]
timestamp = split[-1]
frame = np.array([int(T) for T in frame], dtype=DTYPE)
frame = frame.reshape([array_size, array_size], order="F")
frame *= 1e-2
frames.append(frame)
timestamps.append(float(timestamp))
except:
split = line.split(" ")
frame = split[0: array_size ** 2]
timestamp = split[-1]
T_idx = 0
for T in frame:
try:
_ = int(T)
except:
break
T_idx += 1
print("{} caused error at line {} (t: {}), bit {} (= {})".format(
filepath, line_n, timestamp, T_idx, frame[T_idx]))
for idx in range(-3, 3 + 1):
try:
print("bit {}: {}".format(
T_idx-idx, frame[T_idx-idx]))
except:
pass
return line_n
frames = np.array(frames)
# the array needs rotating 90 CW
frames = np.rot90(frames, k=-1, axes=(1, 2))
return -1 | HTPA32x32d/tools.py | import numpy as np
import pandas as pd
import cv2
import os
import imageio
from scipy.spatial.distance import cdist
import matplotlib.pyplot as plt
import pickle
import itertools
import json
import glob
import collections
import shutil
import pickle
import re
DTYPE = "float32"
PD_SEP = ","
PD_NAN = np.inf
PD_DTYPE = np.float32
READ_CSV_ARGS = {"skiprows": 1}
PD_TIME_COL = "Time (sec)"
PD_PTAT_COL = "PTAT"
HTPA_UDP_MODULE_WEBCAM_IMG_EXT = "jpg"
READERS_EXTENSIONS_DICT = {
"txt": "txt",
"csv": "csv",
"pickle": "pickle",
"pkl": "pickle",
"p": "pickle",
}
SUPPORTED_EXTENSIONS = list(READERS_EXTENSIONS_DICT.keys())
def remove_extension(filepath):
return filepath.split(".")[0]
def get_extension(filepath):
return filepath.split(".")[1]
def ensure_path_exists(path):
if not os.path.exists(path):
os.makedirs(path)
def ensure_parent_exists(path):
ensure_path_exists(os.path.dirname(path))
def read_tpa_file(filepath: str, array_size: int = 32):
"""
Convert Heimann HTPA file to NumPy array shaped [frames, height, width].
Currently supported: see SUPPORTED_EXTENSIONS flag
Parameters
----------
filepath : str
array_size : int, optional (for txt files only)
Returns
-------
np.array
3D array of temperature distribution sequence, shaped [frames, height, width].
list
list of timestamps
"""
extension_lowercase = get_extension(filepath).lower()
assert (extension_lowercase in SUPPORTED_EXTENSIONS)
reader = READERS_EXTENSIONS_DICT[extension_lowercase]
if reader == 'txt':
return txt2np(filepath)
if reader == 'csv':
return csv2np(filepath)
if reader == 'pickle':
return pickle2np(filepath)
def write_tpa_file(filepath: str, array, timestamps: list, header=None) -> bool:
"""
Convert and save Heimann HTPA NumPy array shaped [frames, height, width] to a txt file.
Currently supported: see SUPPORTED_EXTENSIONS flag
Parameters
----------
filepath : str
Filepath to destination file, including the file name.
array : np.array
Temperatue distribution sequence, shaped [frames, height, width].
timestamps : list
List of timestamps of corresponding array frames.
"""
extension_lowercase = get_extension(filepath).lower()
assert (extension_lowercase in SUPPORTED_EXTENSIONS)
writer = READERS_EXTENSIONS_DICT[extension_lowercase]
if writer == 'txt':
return write_np2txt(filepath, array, timestamps, header=header)
if writer == 'csv':
assert not header
return write_np2csv(filepath, array, timestamps)
if writer == 'pickle':
assert not header
return write_np2pickle(filepath, array, timestamps)
def modify_txt_header(filepath : str, new_header):
header = new_header.rstrip()
header += "\n"
with open(filepath) as f:
lines = f.readlines()
lines[0] = header
with open(filepath, "w") as f:
f.writelines(lines)
def read_txt_header(filepath: str):
"""
Read Heimann HTPA .txt header.
Parameters
----------
filepath : str
Returns
-------
str
TPA file header
"""
with open(filepath) as f:
header = f.readline().rstrip()
return header
def txt2np(filepath: str, array_size: int = 32):
"""
Convert Heimann HTPA .txt to NumPy array shaped [frames, height, width].
Parameters
----------
filepath : str
array_size : int, optional
Returns
-------
np.array
3D array of temperature distribution sequence, shaped [frames, height, width].
list
list of timestamps
"""
with open(filepath) as f:
# discard the first line
_ = f.readline()
# read line by line now
line = "dummy line"
frames = []
timestamps = []
while line:
line = f.readline()
if line:
split = line.split(" ")
frame = split[0: array_size ** 2]
timestamp = split[-1]
frame = np.array([int(T) for T in frame], dtype=DTYPE)
frame = frame.reshape([array_size, array_size], order="F")
frame *= 1e-2
frames.append(frame)
timestamps.append(float(timestamp))
frames = np.array(frames)
# the array needs rotating 90 CW
frames = np.rot90(frames, k=-1, axes=(1, 2))
return frames, timestamps
def write_np2txt(output_fp: str, array, timestamps: list, header: str = None) -> bool:
"""
Convert and save Heimann HTPA NumPy array shaped [frames, height, width] to a txt file.
Parameters
----------
output_fp : str
Filepath to destination file, including the file name.
array : np.array
Temperatue distribution sequence, shaped [frames, height, width].
timestamps : list
List of timestamps of corresponding array frames.
header : str, optional
TXT header
"""
ensure_parent_exists(output_fp)
frames = np.rot90(array, k=1, axes=(1, 2))
if header:
header = header.rstrip()
header += "\n"
else:
header = "HTPA32x32d\n"
with open(output_fp, 'w') as file:
file.write(header)
for step, t in zip(frames, timestamps):
line = ""
for val in step.flatten("F"):
line += ("%02.2f" % val).replace(".", "")[:4] + " "
file.write("{}t: {}\n".format(line, t))
def write_np2pickle(output_fp: str, array, timestamps: list) -> bool:
"""
Convert and save Heimann HTPA NumPy array shaped [frames, height, width] to a pickle file.
Parameters
----------
output_fp : str
Filepath to destination file, including the file name.
array : np.array
Temperatue distribution sequence, shaped [frames, height, width].
timestamps : list
List of timestamps of corresponding array frames.
"""
ensure_parent_exists(output_fp)
with open(output_fp, "wb") as f:
pickle.dump((array, timestamps), f)
return True
def pickle2np(filepath: str):
"""
Convert Heimann HTPA .txt to NumPy array shaped [frames, height, width].
Parameters
----------
filepath : str
Returns
-------
np.array
3D array of temperature distribution sequence, shaped [frames, height, width].
list
list of timestamps
"""
with open(filepath, "rb") as f:
frames, timestamps = pickle.load(f)
return frames, timestamps
def write_np2csv(output_fp: str, array, timestamps: list) -> bool:
"""
Convert and save Heimann HTPA NumPy array shaped [frames, height, width] to .CSV dataframe.
CSV should preferably represent the data collected without preprocessing, cropping or any data manipulation.
Parameters
----------
output_fp : str
Filepath to destination file, including the file name.
array : np.array
Temperatue distribution sequence, shaped [frames, height, width].
timestamps : list
List of timestamps of corresponding array frames.
"""
ensure_parent_exists(output_fp)
# initialize csv template (and append frames later)
# prepend first row for compability with legacy format
first_row = pd.DataFrame({"HTPA 32x32d": []})
first_row.to_csv(output_fp, index=False, sep=PD_SEP)
headers = {PD_TIME_COL: [], PD_PTAT_COL: []}
df = pd.DataFrame(headers)
for idx in range(np.prod(array.shape[1:])):
df.insert(len(df.columns), "P%04d" % idx, [])
df.to_csv(output_fp, mode="a", index=False, sep=PD_SEP)
for idx in range(array.shape[0]):
frame = array[idx, ...]
timestamp = timestamps[idx]
temps = list(frame.flatten())
row_data = [timestamp, PD_NAN]
row_data.extend(temps)
row = pd.DataFrame([row_data])
row = row.astype(PD_DTYPE)
row.to_csv(output_fp, mode="a", header=False, sep=PD_SEP, index=False)
return True
def csv2np(csv_fp: str):
"""
Read and convert .CSV dataframe to a Heimann HTPA NumPy array shaped [frames, height, width]
Parameters
----------
csv_fp : str
Filepath to the csv file tor read.
Returns
-------
array : np.array
Temperatue distribution sequence, shape [frames, height, width].
timestamps : list
List of timestamps of corresponding array frames.
"""
df = pd.read_csv(csv_fp, **READ_CSV_ARGS)
timestamps = df[PD_TIME_COL]
array = df.drop([PD_TIME_COL, PD_PTAT_COL], axis=1).to_numpy(dtype=DTYPE)
array = reshape_flattened_frames(array)
return array, timestamps
def apply_heatmap(array, cv_colormap: int = cv2.COLORMAP_JET) -> np.ndarray:
"""
Applies pseudocoloring (heatmap) to a sequence of thermal distribution. Same as np2pc().
np2pc() is preffered.
Parameters
----------
array : np.array
(frames, height, width)
cv_colormap : int, optional
Returns
-------
np.array
(frames, height, width, channels)
"""
min, max = array.min(), array.max()
shape = array.shape
array_normalized = (255 * ((array - min) / (max - min))).astype(np.uint8)
heatmap_flat = cv2.applyColorMap(array_normalized.flatten(), cv_colormap)
return heatmap_flat.reshape([shape[0], shape[1], shape[2], 3])
def np2pc(array, cv_colormap: int = cv2.COLORMAP_JET) -> np.ndarray:
"""
Applies pseudocoloring (heatmap) to a sequence of thermal distribution. Same as apply_heatmap().
np2pc() is preffered.
Parameters
----------
array : np.array
(frames, height, width)
cv_colormap : int, optional
Returns
-------
np.array
(frames, height, width, channels)
"""
return apply_heatmap(array, cv_colormap)
def save_frames(array, dir_name: str, extension: str = ".bmp") -> bool:
"""
Exctracts and saves frames from a sequence array into a folder dir_name
Parameters
----------
array : np.array
(frames, height, width, channels)
Returns
-------
bool
True if success
"""
if not os.path.exists(dir_name):
os.mkdir(dir_name)
for idx, frame in enumerate(array):
cv2.imwrite(os.path.join(dir_name, "%d" % idx + extension), frame)
return True
def flatten_frames(array):
"""
Flattens array of shape [frames, height, width] into array of shape [frames, height*width]
Parameters
----------
array : np.array
(frames, height, width)
Returns
-------
np.array
flattened array (frames, height, width)
"""
_, height, width = array.shape
return array.reshape((-1, height * width))
def write_pc2gif(array, fp: str, fps=10, loop: int = 0, duration=None):
"""
Converts and saves NumPy array of pseudocolored thermopile sensor array data, shaped [frames, height, width, channels], into a .gif file
Parameters
----------
array : np.array
Pseudocolored data (frames, height, width, channels).
fp : str
The filepath to write to.
fps : float, optional
Default 10, approx. equal to a typical thermopile sensor array FPS value.
loop : int, optional
The number of iterations. Default 0 (meaning loop indefinitely).
duration : float, list, optional
The duration (in seconds) of each frame. Either specify one value
that is used for all frames, or one value for each frame.
Note that in the GIF format the duration/delay is expressed in
hundredths of a second, which limits the precision of the duration. (from imageio doc)
Returns
-------
bool
True if success.
"""
ensure_parent_exists(fp)
if not duration:
duration = 1 / fps
with imageio.get_writer(fp, mode="I", duration=duration, loop=loop) as writer:
for frame in array:
writer.append_data(frame[:, :, ::-1])
return True
def timestamps2frame_durations(timestamps: list, last_frame_duration=None) -> list:
"""
Produces frame durations list to make gifs produced with write_pc2gif() more accurate temporally,
Parameters
----------
timestamps : list
List of timestamps of corresponding array frames.
last_frame_duration : float, optional
List of N timestamps gives information about durations of N-1 initial frames,
if not given, the function will duplicate the last value in the produced list to make up for the missing frame duration.
Returns
-------
list
List of frame durations.
"""
frame_durations = [x_t2 - x_t1 for x_t1,
x_t2 in zip(timestamps, timestamps[1:])]
if not last_frame_duration:
last_frame_duration = frame_durations[-1]
frame_durations.append(last_frame_duration)
return frame_durations
def reshape_flattened_frames(array):
"""
Reshapes array shaped [frames, height*width] into array of shape [frames, height, width]
Parameters
----------
array : np.array
flattened array (frames, height*width)
Returns
-------
np.array
reshaped array (frames, height, width)
"""
_, elements = array.shape
height = int(elements ** (1 / 2))
width = height
return array.reshape((-1, height, width))
def crop_center(array, crop_height=None, crop_width=None):
"""
Crops the center portion of an infrared sensor array image sequence.
Parameters
---------
array : np.array
(frames, height, width) or (frames, height, width, channel)
crop_height : int, optional
Height of the cropped patch, if -1 then equal to input's height.
If crop_height, crop_width are None image will be cropped to match smaller spatial dimension.
crop_width : int, optional
Width of the cropped patch, if -1 then equal to input's width.
If crop_height, crop_width are None image will be cropped to match smaller spatial dimension.
Returns
-------
np.array
cropped array (frames, crop_height, crop_width)
"""
_, height, width = array.shape[:3]
if not (crop_width or crop_height):
smaller_dim = height if (height < width) else width
crop_width, crop_height = smaller_dim, smaller_dim
if not crop_width:
if crop_height:
crop_width = crop_height
if not crop_height:
if crop_width:
crop_height = crop_width
crop_height = height if (crop_height == -1) else crop_height
start_y = height//2 - crop_height//2
crop_width = width if (crop_width == -1) else crop_width
start_x = width//2 - crop_width//2
return array[:, start_y:start_y+crop_height, start_x:start_x+crop_width]
def match_timesteps(*timestamps_lists):
"""
Aligns timesteps of given timestamps.
Parameters
---------
*timestamps_list : list, np.array
lists-like data containing timestamps
Returns
-------
list
list of indices of timesteps corresponding to input lists so that input lists are aligned
Example:
ts1 = [1, 2, 3, 4, 5]
ts2 = [1.1, 2.1, 2.9, 3.6, 5.1, 6, 6.1]
ts3 = [0.9, 1.2, 2, 3, 4.1, 4.2, 4.3, 4.9]
idx1, idx2, idx3 = match_timesteps(ts1, ts2, ts3)
now ts1[idx1], ts2[idx2] and ts3[idx3] will be aligned
"""
ts_list = [np.array(ts).reshape(-1, 1) for ts in timestamps_lists]
min_len_idx = np.array([len(ts) for ts in ts_list]).argmin()
min_len_ts = ts_list[min_len_idx]
indices_list = [None] * len(ts_list)
for idx, ts in enumerate(ts_list):
if (idx == min_len_idx):
indices_list[idx] = list(range(len(min_len_ts)))
else:
indices_list[idx] = list(cdist(min_len_ts, ts).argmin(axis=-1))
return indices_list
def match_timesteps2(*timestamps_lists):
#XXX Not finished
"""
Aligns timesteps of given timestamps.
Parameters
---------
*timestamps_list : list, np.array
lists-like data containing timestamps
Returns
-------
list
list of indices of timesteps corresponding to input lists so that input lists are aligned
Example:
ts1 = [1, 2, 3, 4, 5]
ts2 = [1.1, 2.1, 2.9, 3.6, 5.1, 6, 6.1]
ts3 = [0.9, 1.2, 2, 3, 4.1, 4.2, 4.3, 4.9]
idx1, idx2, idx3 = match_timesteps(ts1, ts2, ts3)
now ts1[idx1], ts2[idx2] and ts3[idx3] will be aligned
"""
ts_list = [np.array(ts).reshape(-1, 1) for ts in timestamps_lists]
#min_len_idx = np.array([len(ts) for ts in ts_list]).argmin()
#min_len_ts = ts_list[min_len_idx]
max_error_list = [0] * len(ts_list)
for idx, ts in enumerate(ts_list):
for idx2, ts2 in enumerate(ts_list):
if (idx == idx2):
continue
tmp_indexes = list(cdist(ts, ts2).argmin(axis=-1))
diff = ts - ts2[tmp_indexes]
max_error = np.abs(np.max(diff))
current_max = max_error_list[idx]
if (max_error > current_max):
max_error_list[idx] = max_error
min_error_idx = np.argmin(max_error_list)
indices_list = [None] * len(ts_list)
min_error_ts = ts_list[min_error_idx]
for idx, ts in enumerate(ts_list):
if (idx == min_error_idx):
indices_list[idx] = list(range(len(min_error_ts)))
else:
indices_list[idx] = list(cdist(min_error_ts, ts).argmin(axis=-1))
return indices_list
def resample_np_tuples(arrays, indices=None, step=None):
"""
Resampling for 3D arrays.
Parameters
---------
arrays : list
arays to resample
indices : list, optional
list of indices applied to arrays
step : int, optional
resampling with a step, if given indices will be ignored
Returns
-------
list
list of resampled arrays
"""
if indices:
if len(arrays) != len(indices):
raise ValueError('Iterables have different lengths')
resampled_arrays = []
for array, ids in zip(arrays, indices):
resampled_arrays.append(array[ids])
return resampled_arrays
if step:
return [array[range(0, len(array), step)] for array in arrays]
return arrays
def save_temperature_histogram(array, fp="histogram.png", bins=None, xlabel='Temperature grad. C', ylabel='Number of pixels', title='Histogram of temperature', grid=True, mu=False, sigma=False):
"""
Saves a histogram of measured temperatures
Parameters
---------
array : np.array
(frames, height, width)
fp : str
filepath to save plotted histogram to
bins, xlabel, ylabel, title, grid
as in pyplot
"""
data = array.flatten()
hist = plt.hist(data, bins=bins)
plt.xlabel(xlabel)
plt.ylabel(ylabel)
text = r'{}{}{}'.format('$\mu={0:.2f} \degree C$'.format(data.mean()) if mu else '', ', ' if (
mu and sigma) else '', '$\sigma={0:.2f} \degree C$'.format(data.std()) if sigma else '')
plt.title("{} {}".format(title, text))
plt.grid(grid)
plt.savefig(fp)
plt.close('all')
return True
def resample_timestamps(timestamps, indices=None, step=None):
"""
Resampling for 3D arrays.
Parameters
---------
arrays : list
arays to resample
indices : list, optional
list of indices applied to arrays
step : int, optional
resampling with a step, if given indices will be ignored
Returns
-------
list
list of resampled arrays
"""
ts_array = [np.array(ts) for ts in timestamps]
return [list(ts) for ts in resample_np_tuples(ts_array, indices, step)]
def debug_HTPA32x32d_txt(filepath: str, array_size=32):
"""
Debug Heimann HTPA .txt by attempting to convert to NumPy array shaped [frames, height, width].
Parameters
----------
filepath : str
array_size : int, optional
Returns
-------
int
line that raises error, -1 if no error
"""
with open(filepath) as f:
line_n = 1
_ = f.readline()
line = "dummy line"
frames = []
timestamps = []
while line:
line_n += 1
line = f.readline()
if line:
try:
split = line.split(" ")
frame = split[0: array_size ** 2]
timestamp = split[-1]
frame = np.array([int(T) for T in frame], dtype=DTYPE)
frame = frame.reshape([array_size, array_size], order="F")
frame *= 1e-2
frames.append(frame)
timestamps.append(float(timestamp))
except:
split = line.split(" ")
frame = split[0: array_size ** 2]
timestamp = split[-1]
T_idx = 0
for T in frame:
try:
_ = int(T)
except:
break
T_idx += 1
print("{} caused error at line {} (t: {}), bit {} (= {})".format(
filepath, line_n, timestamp, T_idx, frame[T_idx]))
for idx in range(-3, 3 + 1):
try:
print("bit {}: {}".format(
T_idx-idx, frame[T_idx-idx]))
except:
pass
return line_n
frames = np.array(frames)
# the array needs rotating 90 CW
frames = np.rot90(frames, k=-1, axes=(1, 2))
return -1 | 0.745584 | 0.316303 |
from branje_strani import shrani_stran, nalozi_stran_iz_datoteke
MAPA_KATALOGA = 'katalog'
def dobi_ime_strani_indeks(indeks):
return f'{MAPA_KATALOGA}\stran_{indeks}.html'
# v vzorec strani vstavimo indeks nato pa shranimo stran s tem url-jem
OSNOVA_SPAR_STRANI = 'https://www.spar.si'
VZOREC_STRANI = OSNOVA_SPAR_STRANI + '/online/c/root/?_=1635264522253&callback=parseResponse&category=root&i=1&m_sortProdResults_egisp=a&page={stevilka_strani}&pos=81701&q=*&sort=product-ecr-sortlev&sp_cs=UTF-8&sp_q_12=81701&sp_q_exact_14=root&sp_x_12=product-visible-pos'
def shrani_stran_indeks(indeks):
shrani_stran(VZOREC_STRANI.format(stevilka_strani=indeks), dobi_ime_strani_indeks(indeks))
# pobere in shrani vseh 255 strani (toliko jih je v času programiranja te naloge).
STEVILO_VSEH_STRANI_SPAR=255
def shrani_vse_strani_kataloga(stevilo_strani=STEVILO_VSEH_STRANI_SPAR):
for i in range(1, stevilo_strani + 1):
shrani_stran_indeks(i)
def nalozi_vse_strani_kataloga(stevilo_strani=STEVILO_VSEH_STRANI_SPAR):
vse_strani = []
for i in range(1, stevilo_strani+1):
vse_strani += [nalozi_stran_iz_datoteke(dobi_ime_strani_indeks(i))]
return vse_strani
# preveri ali je povezava res povezava do produkta (link se loči po tem da vsebuje: /p/)
import re
def je_povezava_do_produkta(povezava):
return re.search('\/online\/[\w\-]+\/p[\/|$]', povezava) is not None
# iz objekta HTML knjižnice requests_html prebere vse povezave na strani, ki predstavljajo posamezen izdelek
def poberi_povezave_do_produkta(html_objekt):
vse_povezave = html_objekt.links
povezave_do_produkta = []
for povezava in vse_povezave:
if je_povezava_do_produkta(povezava):
povezave_do_produkta+=[OSNOVA_SPAR_STRANI + povezava]
return povezave_do_produkta
def zdruzi_sezname(seznami):
zdruzen = []
for seznam in seznami:
for element in seznam:
zdruzen += [element]
return list(set(zdruzen))
def poberi_povezave_seznam(seznam_html_objektov):
seznam_seznamov = []
for html_objekt in seznam_html_objektov:
seznam_seznamov += [poberi_povezave_do_produkta(html_objekt)]
return zdruzi_sezname(seznam_seznamov)
# iz shranjenih datotek v mapi katalog prebere vse povezave in jih nato združi brez ponavljanja,
def obdelaj_vse_strani_kataloga():
vse_strani = nalozi_vse_strani_kataloga()
return poberi_povezave_seznam(vse_strani)
import csv
DATOTEKA_VSEH_POVEZAV_KATALOGA = 'vse_povezave_do_produkta.csv'
def shrani_povezave_kataloga(nalozi_strani_iz_interneta=False):
if nalozi_strani_iz_interneta:
shrani_vse_strani_kataloga()
vse_povezave = obdelaj_vse_strani_kataloga()
with open(DATOTEKA_VSEH_POVEZAV_KATALOGA, 'w') as datoteka:
zapis = csv.writer(datoteka, delimiter='\n')
zapis.writerow(vse_povezave) | branje_kataloga.py | from branje_strani import shrani_stran, nalozi_stran_iz_datoteke
MAPA_KATALOGA = 'katalog'
def dobi_ime_strani_indeks(indeks):
return f'{MAPA_KATALOGA}\stran_{indeks}.html'
# v vzorec strani vstavimo indeks nato pa shranimo stran s tem url-jem
OSNOVA_SPAR_STRANI = 'https://www.spar.si'
VZOREC_STRANI = OSNOVA_SPAR_STRANI + '/online/c/root/?_=1635264522253&callback=parseResponse&category=root&i=1&m_sortProdResults_egisp=a&page={stevilka_strani}&pos=81701&q=*&sort=product-ecr-sortlev&sp_cs=UTF-8&sp_q_12=81701&sp_q_exact_14=root&sp_x_12=product-visible-pos'
def shrani_stran_indeks(indeks):
shrani_stran(VZOREC_STRANI.format(stevilka_strani=indeks), dobi_ime_strani_indeks(indeks))
# pobere in shrani vseh 255 strani (toliko jih je v času programiranja te naloge).
STEVILO_VSEH_STRANI_SPAR=255
def shrani_vse_strani_kataloga(stevilo_strani=STEVILO_VSEH_STRANI_SPAR):
for i in range(1, stevilo_strani + 1):
shrani_stran_indeks(i)
def nalozi_vse_strani_kataloga(stevilo_strani=STEVILO_VSEH_STRANI_SPAR):
vse_strani = []
for i in range(1, stevilo_strani+1):
vse_strani += [nalozi_stran_iz_datoteke(dobi_ime_strani_indeks(i))]
return vse_strani
# preveri ali je povezava res povezava do produkta (link se loči po tem da vsebuje: /p/)
import re
def je_povezava_do_produkta(povezava):
return re.search('\/online\/[\w\-]+\/p[\/|$]', povezava) is not None
# iz objekta HTML knjižnice requests_html prebere vse povezave na strani, ki predstavljajo posamezen izdelek
def poberi_povezave_do_produkta(html_objekt):
vse_povezave = html_objekt.links
povezave_do_produkta = []
for povezava in vse_povezave:
if je_povezava_do_produkta(povezava):
povezave_do_produkta+=[OSNOVA_SPAR_STRANI + povezava]
return povezave_do_produkta
def zdruzi_sezname(seznami):
zdruzen = []
for seznam in seznami:
for element in seznam:
zdruzen += [element]
return list(set(zdruzen))
def poberi_povezave_seznam(seznam_html_objektov):
seznam_seznamov = []
for html_objekt in seznam_html_objektov:
seznam_seznamov += [poberi_povezave_do_produkta(html_objekt)]
return zdruzi_sezname(seznam_seznamov)
# iz shranjenih datotek v mapi katalog prebere vse povezave in jih nato združi brez ponavljanja,
def obdelaj_vse_strani_kataloga():
vse_strani = nalozi_vse_strani_kataloga()
return poberi_povezave_seznam(vse_strani)
import csv
DATOTEKA_VSEH_POVEZAV_KATALOGA = 'vse_povezave_do_produkta.csv'
def shrani_povezave_kataloga(nalozi_strani_iz_interneta=False):
if nalozi_strani_iz_interneta:
shrani_vse_strani_kataloga()
vse_povezave = obdelaj_vse_strani_kataloga()
with open(DATOTEKA_VSEH_POVEZAV_KATALOGA, 'w') as datoteka:
zapis = csv.writer(datoteka, delimiter='\n')
zapis.writerow(vse_povezave) | 0.257952 | 0.14885 |
import os
import shutil
import sys
from PIL import Image, ImageChops, ImageDraw
from photoshoppy.models.blend_mode.model import BlendMode, ALL_BLEND_MODES
from photoshoppy.psd_file import PSDFile
from photoshoppy.psd_render import render_utils
THIS_DIR = os.path.dirname(__file__)
BLENDING_MODES_DIR = os.path.join(THIS_DIR, "renders", "blending_modes")
FROM_PHOTOSHOP_DIR = os.path.join(BLENDING_MODES_DIR, "from_photoshop")
FROM_PHOTOSHOPPY_DIR = os.path.join(BLENDING_MODES_DIR, "from_photoshoppy")
SIDE_BY_SIDE_DIR = os.path.join(BLENDING_MODES_DIR, "side_by_side")
PSD_FILE_PATH = os.path.join(THIS_DIR, "psd_files", "lena.psd")
psd = PSDFile(PSD_FILE_PATH)
def clean_folder(path: str):
if not os.path.isdir(path):
return
print(f"cleaning {path}")
for item in os.listdir(path):
full_path = os.path.join(path, item)
if os.path.isfile(full_path):
os.unlink(full_path)
elif os.path.isdir(full_path):
shutil.rmtree(full_path)
def render_all_blending_modes():
try:
os.makedirs(FROM_PHOTOSHOPPY_DIR)
except OSError:
pass
for blend in ALL_BLEND_MODES:
file_name = blend.name.replace(" ", "_") + ".png"
output_path = os.path.join(FROM_PHOTOSHOPPY_DIR, file_name)
try:
render_blending_mode(output_path, blend)
except NotImplementedError:
print(f"{blend.name} not implemented")
def render_single_blending_mode(name):
try:
os.makedirs(FROM_PHOTOSHOPPY_DIR)
except OSError:
pass
blend = BlendMode.from_name(name)
file_name = blend.name.replace(" ", "_") + ".png"
output_path = os.path.join(FROM_PHOTOSHOPPY_DIR, file_name)
try:
render_blending_mode(output_path, blend)
except NotImplementedError:
print(f"{blend.name} not implemented")
def render_blending_mode(file_path: str, blend: BlendMode):
print(f"rendering {file_path}")
fg = render_utils.layer_to_screen_space(psd.layer("colors"), psd)
bg = render_utils.layer_to_screen_space(psd.layer("lena"), psd)
image_data = blend.blend_fn(fg=fg, bg=bg, fg_opacity=1.0, mask=None)
image = Image.fromarray(image_data, mode="RGBA")
image.save(file_path)
def render_comparisons():
for file in os.listdir(FROM_PHOTOSHOPPY_DIR):
output_image = os.path.join(FROM_PHOTOSHOPPY_DIR, file)
photoshop_image = os.path.join(FROM_PHOTOSHOP_DIR, file)
if os.path.isfile(photoshop_image):
sbs_path = os.path.join(SIDE_BY_SIDE_DIR, file)
render_sbs(sbs_path, left_image=photoshop_image, right_image=output_image)
def render_sbs(file_path: str, left_image: str, right_image: str):
print(f"Rendering side-by-side: {file_path} ...")
img_l = Image.open(left_image)
img_r = Image.open(right_image)
img_diff = ImageChops.difference(img_l.convert("RGB"), img_r.convert("RGB"))
left_w, left_h = img_l.size
right_w, right_h = img_r.size
if left_w != right_w:
raise RuntimeError("Images are not the same size")
text_margin = 50
header = text_margin * 2
width = left_w * 3 + 2
height = left_h + text_margin * 2
img = Image.new(mode="RGB", size=(width, height))
draw = ImageDraw.Draw(img)
img.paste(img_l, (0, header))
img.paste(img_r, (left_w + 1, header))
img.paste(img_diff, (left_w * 2 + 2, header))
draw.text(((left_w * 0) + text_margin, text_margin), "From Photoshop")
draw.text(((left_w * 1) + text_margin, text_margin), "From PhotoshopPy")
draw.text(((left_w * 2) + text_margin, text_margin), "Difference")
draw.line((left_w * 1 + 0, 0, left_w * 1 + 0, height), fill=(255, 255, 255))
draw.line((left_w * 2 + 1, 0, left_w * 2 + 1, height), fill=(255, 255, 255))
img.save(file_path)
def main(args):
clean_folder(FROM_PHOTOSHOPPY_DIR)
clean_folder(SIDE_BY_SIDE_DIR)
if len(args):
for arg in args:
render_single_blending_mode(name=arg)
else:
render_all_blending_modes()
render_comparisons()
pass
if __name__ == "__main__":
""" Given a list of blending mode names as arguments, render a comparison of Photoshop vs photoshoppy.
For example, to render normal, screen, and multiply:
test_blending_modes.py normal screen multiply
If no arguments are listed, render all blending modes.
"""
main(sys.argv[1:]) | tests/test_blending_modes.py | import os
import shutil
import sys
from PIL import Image, ImageChops, ImageDraw
from photoshoppy.models.blend_mode.model import BlendMode, ALL_BLEND_MODES
from photoshoppy.psd_file import PSDFile
from photoshoppy.psd_render import render_utils
THIS_DIR = os.path.dirname(__file__)
BLENDING_MODES_DIR = os.path.join(THIS_DIR, "renders", "blending_modes")
FROM_PHOTOSHOP_DIR = os.path.join(BLENDING_MODES_DIR, "from_photoshop")
FROM_PHOTOSHOPPY_DIR = os.path.join(BLENDING_MODES_DIR, "from_photoshoppy")
SIDE_BY_SIDE_DIR = os.path.join(BLENDING_MODES_DIR, "side_by_side")
PSD_FILE_PATH = os.path.join(THIS_DIR, "psd_files", "lena.psd")
psd = PSDFile(PSD_FILE_PATH)
def clean_folder(path: str):
if not os.path.isdir(path):
return
print(f"cleaning {path}")
for item in os.listdir(path):
full_path = os.path.join(path, item)
if os.path.isfile(full_path):
os.unlink(full_path)
elif os.path.isdir(full_path):
shutil.rmtree(full_path)
def render_all_blending_modes():
try:
os.makedirs(FROM_PHOTOSHOPPY_DIR)
except OSError:
pass
for blend in ALL_BLEND_MODES:
file_name = blend.name.replace(" ", "_") + ".png"
output_path = os.path.join(FROM_PHOTOSHOPPY_DIR, file_name)
try:
render_blending_mode(output_path, blend)
except NotImplementedError:
print(f"{blend.name} not implemented")
def render_single_blending_mode(name):
try:
os.makedirs(FROM_PHOTOSHOPPY_DIR)
except OSError:
pass
blend = BlendMode.from_name(name)
file_name = blend.name.replace(" ", "_") + ".png"
output_path = os.path.join(FROM_PHOTOSHOPPY_DIR, file_name)
try:
render_blending_mode(output_path, blend)
except NotImplementedError:
print(f"{blend.name} not implemented")
def render_blending_mode(file_path: str, blend: BlendMode):
print(f"rendering {file_path}")
fg = render_utils.layer_to_screen_space(psd.layer("colors"), psd)
bg = render_utils.layer_to_screen_space(psd.layer("lena"), psd)
image_data = blend.blend_fn(fg=fg, bg=bg, fg_opacity=1.0, mask=None)
image = Image.fromarray(image_data, mode="RGBA")
image.save(file_path)
def render_comparisons():
for file in os.listdir(FROM_PHOTOSHOPPY_DIR):
output_image = os.path.join(FROM_PHOTOSHOPPY_DIR, file)
photoshop_image = os.path.join(FROM_PHOTOSHOP_DIR, file)
if os.path.isfile(photoshop_image):
sbs_path = os.path.join(SIDE_BY_SIDE_DIR, file)
render_sbs(sbs_path, left_image=photoshop_image, right_image=output_image)
def render_sbs(file_path: str, left_image: str, right_image: str):
print(f"Rendering side-by-side: {file_path} ...")
img_l = Image.open(left_image)
img_r = Image.open(right_image)
img_diff = ImageChops.difference(img_l.convert("RGB"), img_r.convert("RGB"))
left_w, left_h = img_l.size
right_w, right_h = img_r.size
if left_w != right_w:
raise RuntimeError("Images are not the same size")
text_margin = 50
header = text_margin * 2
width = left_w * 3 + 2
height = left_h + text_margin * 2
img = Image.new(mode="RGB", size=(width, height))
draw = ImageDraw.Draw(img)
img.paste(img_l, (0, header))
img.paste(img_r, (left_w + 1, header))
img.paste(img_diff, (left_w * 2 + 2, header))
draw.text(((left_w * 0) + text_margin, text_margin), "From Photoshop")
draw.text(((left_w * 1) + text_margin, text_margin), "From PhotoshopPy")
draw.text(((left_w * 2) + text_margin, text_margin), "Difference")
draw.line((left_w * 1 + 0, 0, left_w * 1 + 0, height), fill=(255, 255, 255))
draw.line((left_w * 2 + 1, 0, left_w * 2 + 1, height), fill=(255, 255, 255))
img.save(file_path)
def main(args):
clean_folder(FROM_PHOTOSHOPPY_DIR)
clean_folder(SIDE_BY_SIDE_DIR)
if len(args):
for arg in args:
render_single_blending_mode(name=arg)
else:
render_all_blending_modes()
render_comparisons()
pass
if __name__ == "__main__":
""" Given a list of blending mode names as arguments, render a comparison of Photoshop vs photoshoppy.
For example, to render normal, screen, and multiply:
test_blending_modes.py normal screen multiply
If no arguments are listed, render all blending modes.
"""
main(sys.argv[1:]) | 0.26693 | 0.143998 |
#Initiate
import pygame
import Tile
#PlayerClass
class player(pygame.sprite.Sprite):
def __init__(self,x=7,y=8):
self.speed = 2
#Image variables
self.i=1
self.j=0
#Awareness
self.location=Tile.tile.grid[x][y]
self.radar=Tile.tile.grid[x][y+1]
self.radar2=Tile.tile.grid[x][y+2]
#Directions
self.move_dir = ''
self.move_Q = ''
self.face_dir = 'down'
self.stopping = False
self.opening = False
#Initialize image
self.playerSheet = pygame.image.load('Graphics/player_sheet.png')
self.playerSheet.set_clip(pygame.Rect(0,0, 32,32))
self.playerImg = self.playerSheet.subsurface(self.playerSheet.get_clip())
self.rect = self.playerImg.get_rect()
pygame.sprite.Sprite.__init__(self)
#Place rect object at image location
self.rect.topleft = (x*32,y*32+64)
self.x = self.rect.x
self.y = self.rect.y
#Set timing
self.last = pygame.time.get_ticks()
self.cooldown = 250
def x2grid(self,x):
xgrid = int(x/32)
return xgrid
def y2grid(self,y):
ygrid = int((y-64)/32)
return ygrid
def scanTiles(self):
#Empty old location
self.location.empty()
#Update new location
self.location = Tile.tile.grid[self.x2grid(self.x)][self.y2grid(self.y)]
#Fill new location
self.location.fill(self)
#Left
if self.face_dir=='left':
self.radar=Tile.tile.grid[self.x2grid(self.x)-1][self.y2grid(self.y)]
if (self.x2grid(self.x)-2)<0:
self.radar2=None
else:
self.radar2=Tile.tile.grid[self.x2grid(self.x)-2][self.y2grid(self.y)]
#Right
elif self.face_dir=='right':
self.radar=Tile.tile.grid[self.x2grid(self.x)+1][self.y2grid(self.y)]
if (self.x2grid(self.x)+2)>14:
self.radar2=None
else:
self.radar2=Tile.tile.grid[self.x2grid(self.x)+2][self.y2grid(self.y)]
#Up
elif self.face_dir=='up':
self.radar=Tile.tile.grid[self.x2grid(self.x)][self.y2grid(self.y)-1]
if (self.y2grid(self.y)-2)<0:
self.radar2=None
else:
self.radar2=Tile.tile.grid[self.x2grid(self.x)][self.y2grid(self.y)-2]
#Down
elif self.face_dir=='down':
self.radar=Tile.tile.grid[self.x2grid(self.x)][self.y2grid(self.y)+1]
if (self.y2grid(self.y)+2)>16:
self.radar2=None
else:
self.radar2=Tile.tile.grid[self.x2grid(self.x)][self.y2grid(self.y)+2]
def move(self):
#Scan Player Vision
if self.x%32==0 and self.y%32==0:
self.scanTiles()
if self.move_dir == 'left':
self.j = 1
if self.radar.solid==True:
None
else:
self.x -= self.speed
now = pygame.time.get_ticks()
if now - self.last >= self.cooldown-200:
self.last = now
self.i+=1
if self.i > 2:
self.i=0
elif self.move_dir == 'right':
self.j = 2
if self.radar.solid==True:
None
else:
self.x += self.speed
now = pygame.time.get_ticks()
if now - self.last >= self.cooldown-200:
self.last = now
self.i+=1
if self.i > 2:
self.i=0
elif self.move_dir == 'up':
self.j = 3
if self.radar.solid==True:
None
else:
self.y -= self.speed
now = pygame.time.get_ticks()
if now - self.last >= self.cooldown-100:
self.last = now
self.i+=1
if self.i > 2:
self.i=1
elif self.move_dir == 'down':
self.j = 0
if self.radar.solid==True:
None
else:
self.y += self.speed
now = pygame.time.get_ticks()
if now - self.last >= self.cooldown-100:
self.last = now
self.i+=1
if self.i > 2:
self.i=1
else:
self.i = 0
#Object Location Update
self.rect.topleft = (self.x,self.y)
def update(self, surface):
#Current Frame Update
self.playerSheet.set_clip(pygame.Rect(self.i*32,self.j*32, 32,32))
self.playerImg = self.playerSheet.subsurface(self.playerSheet.get_clip())
#Image Location Update
surface.blit(self.playerImg,(self.x,self.y)) | PokePengo/Player.py |
#Initiate
import pygame
import Tile
#PlayerClass
class player(pygame.sprite.Sprite):
def __init__(self,x=7,y=8):
self.speed = 2
#Image variables
self.i=1
self.j=0
#Awareness
self.location=Tile.tile.grid[x][y]
self.radar=Tile.tile.grid[x][y+1]
self.radar2=Tile.tile.grid[x][y+2]
#Directions
self.move_dir = ''
self.move_Q = ''
self.face_dir = 'down'
self.stopping = False
self.opening = False
#Initialize image
self.playerSheet = pygame.image.load('Graphics/player_sheet.png')
self.playerSheet.set_clip(pygame.Rect(0,0, 32,32))
self.playerImg = self.playerSheet.subsurface(self.playerSheet.get_clip())
self.rect = self.playerImg.get_rect()
pygame.sprite.Sprite.__init__(self)
#Place rect object at image location
self.rect.topleft = (x*32,y*32+64)
self.x = self.rect.x
self.y = self.rect.y
#Set timing
self.last = pygame.time.get_ticks()
self.cooldown = 250
def x2grid(self,x):
xgrid = int(x/32)
return xgrid
def y2grid(self,y):
ygrid = int((y-64)/32)
return ygrid
def scanTiles(self):
#Empty old location
self.location.empty()
#Update new location
self.location = Tile.tile.grid[self.x2grid(self.x)][self.y2grid(self.y)]
#Fill new location
self.location.fill(self)
#Left
if self.face_dir=='left':
self.radar=Tile.tile.grid[self.x2grid(self.x)-1][self.y2grid(self.y)]
if (self.x2grid(self.x)-2)<0:
self.radar2=None
else:
self.radar2=Tile.tile.grid[self.x2grid(self.x)-2][self.y2grid(self.y)]
#Right
elif self.face_dir=='right':
self.radar=Tile.tile.grid[self.x2grid(self.x)+1][self.y2grid(self.y)]
if (self.x2grid(self.x)+2)>14:
self.radar2=None
else:
self.radar2=Tile.tile.grid[self.x2grid(self.x)+2][self.y2grid(self.y)]
#Up
elif self.face_dir=='up':
self.radar=Tile.tile.grid[self.x2grid(self.x)][self.y2grid(self.y)-1]
if (self.y2grid(self.y)-2)<0:
self.radar2=None
else:
self.radar2=Tile.tile.grid[self.x2grid(self.x)][self.y2grid(self.y)-2]
#Down
elif self.face_dir=='down':
self.radar=Tile.tile.grid[self.x2grid(self.x)][self.y2grid(self.y)+1]
if (self.y2grid(self.y)+2)>16:
self.radar2=None
else:
self.radar2=Tile.tile.grid[self.x2grid(self.x)][self.y2grid(self.y)+2]
def move(self):
#Scan Player Vision
if self.x%32==0 and self.y%32==0:
self.scanTiles()
if self.move_dir == 'left':
self.j = 1
if self.radar.solid==True:
None
else:
self.x -= self.speed
now = pygame.time.get_ticks()
if now - self.last >= self.cooldown-200:
self.last = now
self.i+=1
if self.i > 2:
self.i=0
elif self.move_dir == 'right':
self.j = 2
if self.radar.solid==True:
None
else:
self.x += self.speed
now = pygame.time.get_ticks()
if now - self.last >= self.cooldown-200:
self.last = now
self.i+=1
if self.i > 2:
self.i=0
elif self.move_dir == 'up':
self.j = 3
if self.radar.solid==True:
None
else:
self.y -= self.speed
now = pygame.time.get_ticks()
if now - self.last >= self.cooldown-100:
self.last = now
self.i+=1
if self.i > 2:
self.i=1
elif self.move_dir == 'down':
self.j = 0
if self.radar.solid==True:
None
else:
self.y += self.speed
now = pygame.time.get_ticks()
if now - self.last >= self.cooldown-100:
self.last = now
self.i+=1
if self.i > 2:
self.i=1
else:
self.i = 0
#Object Location Update
self.rect.topleft = (self.x,self.y)
def update(self, surface):
#Current Frame Update
self.playerSheet.set_clip(pygame.Rect(self.i*32,self.j*32, 32,32))
self.playerImg = self.playerSheet.subsurface(self.playerSheet.get_clip())
#Image Location Update
surface.blit(self.playerImg,(self.x,self.y)) | 0.095102 | 0.096025 |
from fastapi import APIRouter
from starlette.requests import Request
from app.api.utils.responseCode import resp_200, resp_400, resp_500
from app.supervisor_.core.clogger import ActivityLog
router = APIRouter()
activity = ActivityLog.getInstance()
def get_nodes(*, request: Request, ):
nodes = request.app.state.cesi.serialize_nodes()
data = {"items": nodes, "total": len(nodes)}
return resp_200(data=data)
def get_node(*, request: Request, node_name: str):
node = request.app.state.cesi.get_node_or_400(node_name)
data = {"items": node.serialize_node(), "total": len(node.serialize_node().get("processes"))}
return resp_200(data=data)
def get_node_processes(*, request: Request, node_name: str):
node = request.app.state.cesi.get_node_or_400(node_name)
if not node.is_connected:
return resp_400()
data = {"items": node.serialize_processes()}
return resp_200(data=data)
def get_process(*, request: Request, node_name, unique_process_name):
node = request.app.state.cesi.get_node_or_400(node_name)
if not node.is_connected:
return resp_400()
process = node.get_process_or_400(unique_process_name)
data = {"items": process.serialize()}
return resp_200(data=data)
def start_process(*, request: Request, node_name, unique_process_name):
node = request.app.state.cesi.get_node_or_400(node_name)
if not node.is_connected:
return resp_400()
status, msg = node.start_process(unique_process_name)
if status:
activity.logger.info(
"{} started {} node's {} process.".format(
"不知道他娘的谁", node_name, unique_process_name
)
)
return resp_200(message=f"{node.name} {unique_process_name} start event successful")
activity.logger.info(
"{} unsuccessful start event {} node's {} process.".format("不知道他娘的谁", node_name, unique_process_name))
return resp_500()
def stop_process(*, request: Request, node_name, unique_process_name):
node = request.app.state.cesi.get_node_or_400(node_name)
if not node.is_connected:
return resp_400()
status, msg = node.stop_process(unique_process_name)
if status:
activity.logger.info(
"{} stopped {} node's {} process.".format(
"不知道他娘的谁", node_name, unique_process_name
)
)
return resp_200(message=f"{node.name} {unique_process_name} stop event successful")
activity.logger.info(
"{} unsuccessful stop event {} node's {} process.".format(
"不知道他娘的谁", node_name, unique_process_name
)
)
return resp_500()
def restart_process(*, request: Request, node_name, unique_process_name):
node = request.app.state.cesi.get_node_or_400(node_name)
if not node.is_connected:
return resp_400()
status, msg = node.restart_process(unique_process_name)
if status:
activity.logger.info(
"{} restarted {} node's {} process.".format(
"不知道他娘的谁", node_name, unique_process_name
)
)
return resp_200(message=f"{node.name} {unique_process_name} restart event successful")
activity.logger.info(
"{} unsuccessful restart event {} node's {} process.".format(
"不知道他娘的谁", node_name, unique_process_name
)
)
return resp_500()
def read_process_log(*, request: Request, node_name, unique_process_name):
node = request.app.state.cesi.get_node_or_400(node_name)
if not node.is_connected:
return resp_400()
logs = node.get_process_logs(unique_process_name)
items = {"log": logs}
return resp_200(data=items)
# todo 这里 supervisor 实际上提供了批量操作的语句
def start_all_process(*, request: Request, node_name):
node = request.app.state.cesi.get_node_or_400(node_name)
if not node.is_connected:
return resp_400()
for process in node.processes:
if not process.state == 20:
status, msg = node.start_process(process.group + ":" + process.name)
if status:
activity.logger.info(
"{} started {} node's {} process.".format(
"不知道他娘的谁", node_name, process.name
)
)
else:
activity.logger.info(
"{} unsuccessful start event {} node's {} process.".format(
"不知道他娘的谁", node_name, process.name
)
)
return resp_200()
def stop_all_process(*, request: Request, node_name):
node = request.app.state.cesi.get_node_or_400(node_name)
if not node.is_connected:
return resp_400()
for process in node.processes:
if not process.state == 0:
status, msg = node.stop_process(process.group + ":" + process.name)
if status:
activity.logger.info(
"{} stopped {} node's {} process.".format(
"不知道他娘的谁", node_name, process.name
)
)
else:
activity.logger.info(
"{} unsuccessful stop event {} node's {} process.".format(
"不知道他娘的谁", node_name, process.name
)
)
return resp_200()
def restart_all_process(*, request: Request, node_name):
node = request.app.state.cesi.get_node_or_400(node_name)
if not node.is_connected:
return resp_400()
for process in node.processes:
if not process.state == 0:
status, msg = node.stop_process(process.group + ":" + process.name)
if status:
print("Process stopped!")
else:
print(msg)
status, msg = node.start_process(process.group + ":" + process.name)
if status:
...
activity.logger.info(
"{} restarted {} node's {} process.".format(
"不知道他娘的谁", node_name, process.name
)
)
else:
...
activity.logger.info(
"{} unsuccessful restart event {} node's {} process.".format(
"不知道他娘的谁", node_name, process.name
)
)
return resp_200()
# ------------------------------- 路由添加 --------------------------------
router.add_api_route(methods=['GET'], path="/nodes", endpoint=get_nodes,
summary="supervisor 获取所有 node, 仅 node 信息而不是 processes")
router.add_api_route(methods=['GET'], path="/nodes/{node_name}", endpoint=get_node, summary="supervisor 获取单个 node")
router.add_api_route(methods=['GET'], path="/nodes/{node_name}/processes", endpoint=get_node_processes,
summary="supervisor 获取 node processes")
router.add_api_route(methods=['GET'], path="/nodes/{node_name}/process/{unique_process_name}",
endpoint=get_process, summary="supervisor 获取 process")
router.add_api_route(methods=['PUT'], path="/nodes/{node_name}/process/{unique_process_name}/start",
endpoint=start_process, summary="supervisor 开启 process")
router.add_api_route(methods=['PUT'], path="/nodes/{node_name}/process/{unique_process_name}/stop",
endpoint=stop_process, summary="supervisor 停止 process")
router.add_api_route(methods=['PUT'], path="/nodes/{node_name}/process/{unique_process_name}/restart",
endpoint=restart_process, summary="supervisor 重启 process")
router.add_api_route(methods=['GET'], path="/nodes/{node_name}/process/{unique_process_name}/log",
endpoint=read_process_log, summary="supervisor process 日志")
router.add_api_route(methods=['PUT'], path="/nodes/{node_name}/all-processes/start",
endpoint=start_all_process, summary="supervisor 启动所有 processes")
router.add_api_route(methods=['PUT'], path="/nodes/{node_name}/all-processes/stop",
endpoint=stop_all_process, summary="supervisor 停止所有 processes")
router.add_api_route(methods=['PUT'], path="/nodes/{node_name}/all-processes/restart",
endpoint=restart_all_process, summary="supervisor 重启所有 processes") | backend/app/app/api/api_v1/router/supervisord/nodes.py | from fastapi import APIRouter
from starlette.requests import Request
from app.api.utils.responseCode import resp_200, resp_400, resp_500
from app.supervisor_.core.clogger import ActivityLog
router = APIRouter()
activity = ActivityLog.getInstance()
def get_nodes(*, request: Request, ):
nodes = request.app.state.cesi.serialize_nodes()
data = {"items": nodes, "total": len(nodes)}
return resp_200(data=data)
def get_node(*, request: Request, node_name: str):
node = request.app.state.cesi.get_node_or_400(node_name)
data = {"items": node.serialize_node(), "total": len(node.serialize_node().get("processes"))}
return resp_200(data=data)
def get_node_processes(*, request: Request, node_name: str):
node = request.app.state.cesi.get_node_or_400(node_name)
if not node.is_connected:
return resp_400()
data = {"items": node.serialize_processes()}
return resp_200(data=data)
def get_process(*, request: Request, node_name, unique_process_name):
node = request.app.state.cesi.get_node_or_400(node_name)
if not node.is_connected:
return resp_400()
process = node.get_process_or_400(unique_process_name)
data = {"items": process.serialize()}
return resp_200(data=data)
def start_process(*, request: Request, node_name, unique_process_name):
node = request.app.state.cesi.get_node_or_400(node_name)
if not node.is_connected:
return resp_400()
status, msg = node.start_process(unique_process_name)
if status:
activity.logger.info(
"{} started {} node's {} process.".format(
"不知道他娘的谁", node_name, unique_process_name
)
)
return resp_200(message=f"{node.name} {unique_process_name} start event successful")
activity.logger.info(
"{} unsuccessful start event {} node's {} process.".format("不知道他娘的谁", node_name, unique_process_name))
return resp_500()
def stop_process(*, request: Request, node_name, unique_process_name):
node = request.app.state.cesi.get_node_or_400(node_name)
if not node.is_connected:
return resp_400()
status, msg = node.stop_process(unique_process_name)
if status:
activity.logger.info(
"{} stopped {} node's {} process.".format(
"不知道他娘的谁", node_name, unique_process_name
)
)
return resp_200(message=f"{node.name} {unique_process_name} stop event successful")
activity.logger.info(
"{} unsuccessful stop event {} node's {} process.".format(
"不知道他娘的谁", node_name, unique_process_name
)
)
return resp_500()
def restart_process(*, request: Request, node_name, unique_process_name):
node = request.app.state.cesi.get_node_or_400(node_name)
if not node.is_connected:
return resp_400()
status, msg = node.restart_process(unique_process_name)
if status:
activity.logger.info(
"{} restarted {} node's {} process.".format(
"不知道他娘的谁", node_name, unique_process_name
)
)
return resp_200(message=f"{node.name} {unique_process_name} restart event successful")
activity.logger.info(
"{} unsuccessful restart event {} node's {} process.".format(
"不知道他娘的谁", node_name, unique_process_name
)
)
return resp_500()
def read_process_log(*, request: Request, node_name, unique_process_name):
node = request.app.state.cesi.get_node_or_400(node_name)
if not node.is_connected:
return resp_400()
logs = node.get_process_logs(unique_process_name)
items = {"log": logs}
return resp_200(data=items)
# todo 这里 supervisor 实际上提供了批量操作的语句
def start_all_process(*, request: Request, node_name):
node = request.app.state.cesi.get_node_or_400(node_name)
if not node.is_connected:
return resp_400()
for process in node.processes:
if not process.state == 20:
status, msg = node.start_process(process.group + ":" + process.name)
if status:
activity.logger.info(
"{} started {} node's {} process.".format(
"不知道他娘的谁", node_name, process.name
)
)
else:
activity.logger.info(
"{} unsuccessful start event {} node's {} process.".format(
"不知道他娘的谁", node_name, process.name
)
)
return resp_200()
def stop_all_process(*, request: Request, node_name):
node = request.app.state.cesi.get_node_or_400(node_name)
if not node.is_connected:
return resp_400()
for process in node.processes:
if not process.state == 0:
status, msg = node.stop_process(process.group + ":" + process.name)
if status:
activity.logger.info(
"{} stopped {} node's {} process.".format(
"不知道他娘的谁", node_name, process.name
)
)
else:
activity.logger.info(
"{} unsuccessful stop event {} node's {} process.".format(
"不知道他娘的谁", node_name, process.name
)
)
return resp_200()
def restart_all_process(*, request: Request, node_name):
node = request.app.state.cesi.get_node_or_400(node_name)
if not node.is_connected:
return resp_400()
for process in node.processes:
if not process.state == 0:
status, msg = node.stop_process(process.group + ":" + process.name)
if status:
print("Process stopped!")
else:
print(msg)
status, msg = node.start_process(process.group + ":" + process.name)
if status:
...
activity.logger.info(
"{} restarted {} node's {} process.".format(
"不知道他娘的谁", node_name, process.name
)
)
else:
...
activity.logger.info(
"{} unsuccessful restart event {} node's {} process.".format(
"不知道他娘的谁", node_name, process.name
)
)
return resp_200()
# ------------------------------- 路由添加 --------------------------------
router.add_api_route(methods=['GET'], path="/nodes", endpoint=get_nodes,
summary="supervisor 获取所有 node, 仅 node 信息而不是 processes")
router.add_api_route(methods=['GET'], path="/nodes/{node_name}", endpoint=get_node, summary="supervisor 获取单个 node")
router.add_api_route(methods=['GET'], path="/nodes/{node_name}/processes", endpoint=get_node_processes,
summary="supervisor 获取 node processes")
router.add_api_route(methods=['GET'], path="/nodes/{node_name}/process/{unique_process_name}",
endpoint=get_process, summary="supervisor 获取 process")
router.add_api_route(methods=['PUT'], path="/nodes/{node_name}/process/{unique_process_name}/start",
endpoint=start_process, summary="supervisor 开启 process")
router.add_api_route(methods=['PUT'], path="/nodes/{node_name}/process/{unique_process_name}/stop",
endpoint=stop_process, summary="supervisor 停止 process")
router.add_api_route(methods=['PUT'], path="/nodes/{node_name}/process/{unique_process_name}/restart",
endpoint=restart_process, summary="supervisor 重启 process")
router.add_api_route(methods=['GET'], path="/nodes/{node_name}/process/{unique_process_name}/log",
endpoint=read_process_log, summary="supervisor process 日志")
router.add_api_route(methods=['PUT'], path="/nodes/{node_name}/all-processes/start",
endpoint=start_all_process, summary="supervisor 启动所有 processes")
router.add_api_route(methods=['PUT'], path="/nodes/{node_name}/all-processes/stop",
endpoint=stop_all_process, summary="supervisor 停止所有 processes")
router.add_api_route(methods=['PUT'], path="/nodes/{node_name}/all-processes/restart",
endpoint=restart_all_process, summary="supervisor 重启所有 processes") | 0.192312 | 0.07836 |
import sqlite3
def __sqlite(query: str):
con = sqlite3.connect("../resources_manager/ttbm.db")
cur = con.cursor()
cur.execute(query)
result = cur.fetchall()
con.commit()
con.close()
return result
def sqlite_3_add_user(name: str, password: str, id: int):
__sqlite(f"INSERT INTO users(name, password, id) VALUES('{name}', '{password}', {id})")
def sqlite_3_select_identity_name(name: str):
try:
text = __sqlite(f"SELECT * FROM users WHERE name = '{name}'")[0]
return text
except IndexError:
return []
def sqlite_3_create_statistic(name: str, hours: float, win_rates: float, count_of_wins: int, count_of_plays: int):
key = __sqlite(f"SELECT key FROM users WHERE name = '{name}'")[0][0]
__sqlite(f"INSERT INTO statistic(hours, win_rates, count_of_wins, count_of_plays, key) VALUES({hours}, {win_rates}"
f", {count_of_wins}, {count_of_plays}, {key});")
def sqlite_3_update_statistic(name: str, hours: float, win_rates: float, count_of_wins: int, count_of_plays: int):
key = __sqlite(f"SELECT key FROM users WHERE name = '{name}'")[0][0]
__sqlite(f"UPDATE statistic SET hours = {hours}, win_rates = {win_rates}, count_of_wins = {count_of_wins}, "
f"count_of_plays = {count_of_plays} WHERE key = {key}")
def sqlite_3_get_statistic(name: str):
key = __sqlite(f"SELECT key FROM users WHERE name = '{name}'")[0][0]
statistic = __sqlite(f"SELECT hours, win_rates, count_of_wins, count_of_plays FROM statistic WHERE key = {key}")[0]
return statistic
def sqlite_3_create_info(name: str, date: str, gender: str, description: str):
key = __sqlite(f"SELECT key FROM users WHERE name = '{name}'")[0][0]
__sqlite(f"INSERT INTO info(date, gender, description, key) VALUES('{date}', '{gender}', '{description}', {key})")
def sqlite_3_get_info(name: str):
key = __sqlite(f"SELECT key FROM users WHERE name = '{name}'")[0][0]
info = __sqlite(f"SELECT date, gender, description FROM info WHERE key = {key}")[0]
return info
def sqlite_3_create_view(table: str):
__sqlite(f"CREATE VIEW [{table}] AS SELECT users.key, users.name, users.id, users.password, info.date, "
f"info.description FROM users INNER JOIN info ON users.key=info.key ORDER BY users.key;")
def sqlite_3_get_view(table: str):
view = __sqlite(f"SELECT * FROM [{table}]")
return view
def sqlite_3_drop_view(table: str):
__sqlite(f"DROP VIEW [{table}]")
# print(sqlite_3_select_identity_name('Leshqa_Random'))
# sqlite_3_create_statistic('Leshqa_Random', 0, 0, 0, 0)
# sqlite_3_update_statistic('Leshqa_Random', 0, 50, 1, 2)
# print(sqlite_3_get_statistic('Leshqa_Random'))
# sqlite_3_create_info('Leshqa_Random', '2001-10-18', 'male', 'NULL')
# print(sqlite_3_get_info('Leshqa_Random'))
# sqlite_3_create_view("test")
# print(sqlite_3_get_view("test"))
# sqlite_3_drop_view("test") | bot/bot/resources_manager/sql.py | import sqlite3
def __sqlite(query: str):
con = sqlite3.connect("../resources_manager/ttbm.db")
cur = con.cursor()
cur.execute(query)
result = cur.fetchall()
con.commit()
con.close()
return result
def sqlite_3_add_user(name: str, password: str, id: int):
__sqlite(f"INSERT INTO users(name, password, id) VALUES('{name}', '{password}', {id})")
def sqlite_3_select_identity_name(name: str):
try:
text = __sqlite(f"SELECT * FROM users WHERE name = '{name}'")[0]
return text
except IndexError:
return []
def sqlite_3_create_statistic(name: str, hours: float, win_rates: float, count_of_wins: int, count_of_plays: int):
key = __sqlite(f"SELECT key FROM users WHERE name = '{name}'")[0][0]
__sqlite(f"INSERT INTO statistic(hours, win_rates, count_of_wins, count_of_plays, key) VALUES({hours}, {win_rates}"
f", {count_of_wins}, {count_of_plays}, {key});")
def sqlite_3_update_statistic(name: str, hours: float, win_rates: float, count_of_wins: int, count_of_plays: int):
key = __sqlite(f"SELECT key FROM users WHERE name = '{name}'")[0][0]
__sqlite(f"UPDATE statistic SET hours = {hours}, win_rates = {win_rates}, count_of_wins = {count_of_wins}, "
f"count_of_plays = {count_of_plays} WHERE key = {key}")
def sqlite_3_get_statistic(name: str):
key = __sqlite(f"SELECT key FROM users WHERE name = '{name}'")[0][0]
statistic = __sqlite(f"SELECT hours, win_rates, count_of_wins, count_of_plays FROM statistic WHERE key = {key}")[0]
return statistic
def sqlite_3_create_info(name: str, date: str, gender: str, description: str):
key = __sqlite(f"SELECT key FROM users WHERE name = '{name}'")[0][0]
__sqlite(f"INSERT INTO info(date, gender, description, key) VALUES('{date}', '{gender}', '{description}', {key})")
def sqlite_3_get_info(name: str):
key = __sqlite(f"SELECT key FROM users WHERE name = '{name}'")[0][0]
info = __sqlite(f"SELECT date, gender, description FROM info WHERE key = {key}")[0]
return info
def sqlite_3_create_view(table: str):
__sqlite(f"CREATE VIEW [{table}] AS SELECT users.key, users.name, users.id, users.password, info.date, "
f"info.description FROM users INNER JOIN info ON users.key=info.key ORDER BY users.key;")
def sqlite_3_get_view(table: str):
view = __sqlite(f"SELECT * FROM [{table}]")
return view
def sqlite_3_drop_view(table: str):
__sqlite(f"DROP VIEW [{table}]")
# print(sqlite_3_select_identity_name('Leshqa_Random'))
# sqlite_3_create_statistic('Leshqa_Random', 0, 0, 0, 0)
# sqlite_3_update_statistic('Leshqa_Random', 0, 50, 1, 2)
# print(sqlite_3_get_statistic('Leshqa_Random'))
# sqlite_3_create_info('Leshqa_Random', '2001-10-18', 'male', 'NULL')
# print(sqlite_3_get_info('Leshqa_Random'))
# sqlite_3_create_view("test")
# print(sqlite_3_get_view("test"))
# sqlite_3_drop_view("test") | 0.186762 | 0.15588 |
import os
import glob
import shutil
import argparse
import subprocess
# convert a .mid file to a .omd file using the omdconvert.exe program by OneTesla
def convert_midi_to_omd(file_path: str):
# get absolute path from relative path
absolute_path = os.path.abspath(file_path)
# execute the omd converter with a subprocess call, supress the non error output
subprocess.call(["omdconvert.exe", absolute_path], stdout=subprocess.DEVNULL)
# fix file paths to work (theoretically) on both windows and linux machines
def fix_file_path(path: str) -> str:
return path.replace("\\\\", "\\").replace("/", "\\")
# get the name of the last directory in a path from a path
def get_path_token(path: str, index: int = 1):
return fix_file_path(path).split("\\")[-1 * index]
# get the relative path from a given path & root
def get_origin_relative_path(path: str, root: str):
# remove the root component from the path
return path.replace(root, "")
# move a file from the origin path to the destination dir, keeping the same parent directory structure as the origin
def move_file(file_path: str, destination: str, origin: str):
# get relative path from origin
relative_path = file_path.replace(origin, "")
# create destination path
destination_path = destination + relative_path
# move the file to the destination
os.rename(file_path, destination_path)
# takes glob string, find .mid files, converts to .omd files, moves .omd files to destination
def glob_and_convert(glob_string: str, origin: str, destination: str, verbose: bool = False) -> int:
# keep track of total converted midi files
count = 0
for file in glob.glob(glob_string):
# correct file name
file = fix_file_path(file)
# debug message
if verbose:
print("Converting .mid file '" + file + "'...")
# convert midi to omd file
convert_midi_to_omd(file)
# get omd file name from .mid or .midi filename
omd_filename = file.replace(".mid", ".omd").replace(".midi", ".omd")
# debug message
if verbose:
print("Moving generated .omd file '" + omd_filename + "'...")
# move new omd file into destination
move_file(omd_filename, destination, origin)
# increment count
count += 1
return count
# generate new destination folders
def generate_new_folders(origin: str, destination: str, verbose: bool = False):
# find and iterate over all origin folders and make copies of them
for path in os.listdir(origin):
# ignore files
if os.path.isfile(origin + "\\" + path):
continue
# fix the directory path
path = fix_file_path(path)
# create directory with same name in destination as in origin
new_folder = destination + "\\" + get_path_token(path)
# debug message
if verbose:
print("Creating OMD Output Directory '" + new_folder + "'...")
# create new dir
os.mkdir(new_folder)
# delete the old destination OMD folders
def delete_old_omd(destination: str, verbose: bool = False):
# fold all old OMD folders
for path in glob.glob(destination + "/*"):
# check if path is file
if os.path.isfile(path):
# debug message
if verbose:
print("Removing Old .omd File '" + path + "'...")
# remove file
os.remove(path)
else:
# debug message
if verbose:
print("Removing Old .omd Directory '" + path + "'...")
# remove directory and its files
shutil.rmtree(path)
if __name__ == "__main__":
# argument parser
parser = argparse.ArgumentParser(description='A tool for mass converting midi files into .omd files for use in '
'the OneTesla interrupter.')
# arguments for parser
parser.add_argument('-o', "-output", dest='destination', action='store', default="./omd",
help="Path to directory where .omd files and subdirectories will be populated into.")
parser.add_argument('-s', "-source", dest='origin', action='store', default="./midi",
help="Path to directory where midi files and subdirectories will be converted and copied from.")
parser.add_argument('-v', "--verbose", dest='verbose', action='store_const', default=False, const=True,
help="If provided will enable verbose logging.")
# parse args
args = parser.parse_args()
# fixed destination file path
dest = fix_file_path(args.destination)
orig = fix_file_path(args.origin)
verb = args.verbose
# keep track of total converted midi files
converted_count = 0
# delete the old data in the destination directory
delete_old_omd(dest, verb)
# generate replacement subdirectories
generate_new_folders(orig, dest, verb)
# convert and count midi files in all subdirectories
converted_count += glob_and_convert(orig + "\**\*.mid", orig, dest, verb)
# convert and count midi files in top level of directory
converted_count += glob_and_convert(orig + "\*.mid", orig, dest, verb)
print("Converted " + str(converted_count) + " midi files to .omd files successfully.") | convertall.py | import os
import glob
import shutil
import argparse
import subprocess
# convert a .mid file to a .omd file using the omdconvert.exe program by OneTesla
def convert_midi_to_omd(file_path: str):
# get absolute path from relative path
absolute_path = os.path.abspath(file_path)
# execute the omd converter with a subprocess call, supress the non error output
subprocess.call(["omdconvert.exe", absolute_path], stdout=subprocess.DEVNULL)
# fix file paths to work (theoretically) on both windows and linux machines
def fix_file_path(path: str) -> str:
return path.replace("\\\\", "\\").replace("/", "\\")
# get the name of the last directory in a path from a path
def get_path_token(path: str, index: int = 1):
return fix_file_path(path).split("\\")[-1 * index]
# get the relative path from a given path & root
def get_origin_relative_path(path: str, root: str):
# remove the root component from the path
return path.replace(root, "")
# move a file from the origin path to the destination dir, keeping the same parent directory structure as the origin
def move_file(file_path: str, destination: str, origin: str):
# get relative path from origin
relative_path = file_path.replace(origin, "")
# create destination path
destination_path = destination + relative_path
# move the file to the destination
os.rename(file_path, destination_path)
# takes glob string, find .mid files, converts to .omd files, moves .omd files to destination
def glob_and_convert(glob_string: str, origin: str, destination: str, verbose: bool = False) -> int:
# keep track of total converted midi files
count = 0
for file in glob.glob(glob_string):
# correct file name
file = fix_file_path(file)
# debug message
if verbose:
print("Converting .mid file '" + file + "'...")
# convert midi to omd file
convert_midi_to_omd(file)
# get omd file name from .mid or .midi filename
omd_filename = file.replace(".mid", ".omd").replace(".midi", ".omd")
# debug message
if verbose:
print("Moving generated .omd file '" + omd_filename + "'...")
# move new omd file into destination
move_file(omd_filename, destination, origin)
# increment count
count += 1
return count
# generate new destination folders
def generate_new_folders(origin: str, destination: str, verbose: bool = False):
# find and iterate over all origin folders and make copies of them
for path in os.listdir(origin):
# ignore files
if os.path.isfile(origin + "\\" + path):
continue
# fix the directory path
path = fix_file_path(path)
# create directory with same name in destination as in origin
new_folder = destination + "\\" + get_path_token(path)
# debug message
if verbose:
print("Creating OMD Output Directory '" + new_folder + "'...")
# create new dir
os.mkdir(new_folder)
# delete the old destination OMD folders
def delete_old_omd(destination: str, verbose: bool = False):
# fold all old OMD folders
for path in glob.glob(destination + "/*"):
# check if path is file
if os.path.isfile(path):
# debug message
if verbose:
print("Removing Old .omd File '" + path + "'...")
# remove file
os.remove(path)
else:
# debug message
if verbose:
print("Removing Old .omd Directory '" + path + "'...")
# remove directory and its files
shutil.rmtree(path)
if __name__ == "__main__":
# argument parser
parser = argparse.ArgumentParser(description='A tool for mass converting midi files into .omd files for use in '
'the OneTesla interrupter.')
# arguments for parser
parser.add_argument('-o', "-output", dest='destination', action='store', default="./omd",
help="Path to directory where .omd files and subdirectories will be populated into.")
parser.add_argument('-s', "-source", dest='origin', action='store', default="./midi",
help="Path to directory where midi files and subdirectories will be converted and copied from.")
parser.add_argument('-v', "--verbose", dest='verbose', action='store_const', default=False, const=True,
help="If provided will enable verbose logging.")
# parse args
args = parser.parse_args()
# fixed destination file path
dest = fix_file_path(args.destination)
orig = fix_file_path(args.origin)
verb = args.verbose
# keep track of total converted midi files
converted_count = 0
# delete the old data in the destination directory
delete_old_omd(dest, verb)
# generate replacement subdirectories
generate_new_folders(orig, dest, verb)
# convert and count midi files in all subdirectories
converted_count += glob_and_convert(orig + "\**\*.mid", orig, dest, verb)
# convert and count midi files in top level of directory
converted_count += glob_and_convert(orig + "\*.mid", orig, dest, verb)
print("Converted " + str(converted_count) + " midi files to .omd files successfully.") | 0.40592 | 0.305115 |
import unittest
from unittest import mock
import jinja2
from pythonforandroid.build import run_pymodules_install
from pythonforandroid.archs import ArchARMv7_a, ArchAarch_64
class TestBuildBasic(unittest.TestCase):
def test_run_pymodules_install_optional_project_dir(self):
"""
Makes sure the `run_pymodules_install()` doesn't crash when the
`project_dir` optional parameter is None, refs #1898
"""
ctx = mock.Mock()
ctx.archs = [ArchARMv7_a(ctx), ArchAarch_64(ctx)]
modules = []
project_dir = None
with mock.patch('pythonforandroid.build.info') as m_info:
assert run_pymodules_install(ctx, ctx.archs[0], modules, project_dir) is None
assert m_info.call_args_list[-1] == mock.call(
'No Python modules and no setup.py to process, skipping')
def test_strip_if_with_debug_symbols(self):
ctx = mock.Mock()
ctx.python_recipe.major_minor_version_string = "python3.6"
ctx.get_site_packages_dir.return_value = "test-doesntexist"
ctx.build_dir = "nonexistant_directory"
ctx.archs = ["arm64"]
modules = ["mymodule"]
project_dir = None
with mock.patch('pythonforandroid.build.info'), \
mock.patch('sh.Command'),\
mock.patch('pythonforandroid.build.open'),\
mock.patch('pythonforandroid.build.shprint'),\
mock.patch('pythonforandroid.build.current_directory'),\
mock.patch('pythonforandroid.build.CythonRecipe') as m_CythonRecipe, \
mock.patch('pythonforandroid.build.project_has_setup_py') as m_project_has_setup_py, \
mock.patch('pythonforandroid.build.run_setuppy_install'):
m_project_has_setup_py.return_value = False
# Make sure it is NOT called when `with_debug_symbols` is true:
ctx.with_debug_symbols = True
assert run_pymodules_install(ctx, ctx.archs[0], modules, project_dir) is None
assert m_CythonRecipe().strip_object_files.called is False
# Make sure strip object files IS called when
# `with_debug_symbols` is fasle:
ctx.with_debug_symbols = False
assert run_pymodules_install(ctx, ctx.archs[0], modules, project_dir) is None
assert m_CythonRecipe().strip_object_files.called is True
class TestTemplates(unittest.TestCase):
def test_android_manifest_xml(self):
args = mock.Mock()
args.min_sdk_version = 12
args.build_mode = 'debug'
args.native_services = ['abcd', ]
args.permissions = []
args.add_activity = []
args.android_used_libs = []
args.meta_data = []
args.extra_manifest_xml = '<tag-a><tag-b></tag-b></tag-a>'
args.extra_manifest_application_arguments = 'android:someParameter="true" android:anotherParameter="false"'
render_args = {
"args": args,
"service": False,
"service_names": [],
"android_api": 1234,
"debug": "debug" in args.build_mode,
"native_services": args.native_services
}
environment = jinja2.Environment(
loader=jinja2.FileSystemLoader('pythonforandroid/bootstraps/sdl2/build/templates/')
)
template = environment.get_template('AndroidManifest.tmpl.xml')
xml = template.render(**render_args)
assert xml.count('android:minSdkVersion="12"') == 1
assert xml.count('android:anotherParameter="false"') == 1
assert xml.count('android:someParameter="true"') == 1
assert xml.count('<tag-a><tag-b></tag-b></tag-a>') == 1
assert xml.count('android:process=":service_') == 0
assert xml.count('targetSdkVersion="1234"') == 1
assert xml.count('android:debuggable="true"') == 1
assert xml.count('<service android:name="abcd" />') == 1
# TODO: potentially some other checks to be added here to cover other "logic" (flags and loops) in the template | tests/test_build.py | import unittest
from unittest import mock
import jinja2
from pythonforandroid.build import run_pymodules_install
from pythonforandroid.archs import ArchARMv7_a, ArchAarch_64
class TestBuildBasic(unittest.TestCase):
def test_run_pymodules_install_optional_project_dir(self):
"""
Makes sure the `run_pymodules_install()` doesn't crash when the
`project_dir` optional parameter is None, refs #1898
"""
ctx = mock.Mock()
ctx.archs = [ArchARMv7_a(ctx), ArchAarch_64(ctx)]
modules = []
project_dir = None
with mock.patch('pythonforandroid.build.info') as m_info:
assert run_pymodules_install(ctx, ctx.archs[0], modules, project_dir) is None
assert m_info.call_args_list[-1] == mock.call(
'No Python modules and no setup.py to process, skipping')
def test_strip_if_with_debug_symbols(self):
ctx = mock.Mock()
ctx.python_recipe.major_minor_version_string = "python3.6"
ctx.get_site_packages_dir.return_value = "test-doesntexist"
ctx.build_dir = "nonexistant_directory"
ctx.archs = ["arm64"]
modules = ["mymodule"]
project_dir = None
with mock.patch('pythonforandroid.build.info'), \
mock.patch('sh.Command'),\
mock.patch('pythonforandroid.build.open'),\
mock.patch('pythonforandroid.build.shprint'),\
mock.patch('pythonforandroid.build.current_directory'),\
mock.patch('pythonforandroid.build.CythonRecipe') as m_CythonRecipe, \
mock.patch('pythonforandroid.build.project_has_setup_py') as m_project_has_setup_py, \
mock.patch('pythonforandroid.build.run_setuppy_install'):
m_project_has_setup_py.return_value = False
# Make sure it is NOT called when `with_debug_symbols` is true:
ctx.with_debug_symbols = True
assert run_pymodules_install(ctx, ctx.archs[0], modules, project_dir) is None
assert m_CythonRecipe().strip_object_files.called is False
# Make sure strip object files IS called when
# `with_debug_symbols` is fasle:
ctx.with_debug_symbols = False
assert run_pymodules_install(ctx, ctx.archs[0], modules, project_dir) is None
assert m_CythonRecipe().strip_object_files.called is True
class TestTemplates(unittest.TestCase):
def test_android_manifest_xml(self):
args = mock.Mock()
args.min_sdk_version = 12
args.build_mode = 'debug'
args.native_services = ['abcd', ]
args.permissions = []
args.add_activity = []
args.android_used_libs = []
args.meta_data = []
args.extra_manifest_xml = '<tag-a><tag-b></tag-b></tag-a>'
args.extra_manifest_application_arguments = 'android:someParameter="true" android:anotherParameter="false"'
render_args = {
"args": args,
"service": False,
"service_names": [],
"android_api": 1234,
"debug": "debug" in args.build_mode,
"native_services": args.native_services
}
environment = jinja2.Environment(
loader=jinja2.FileSystemLoader('pythonforandroid/bootstraps/sdl2/build/templates/')
)
template = environment.get_template('AndroidManifest.tmpl.xml')
xml = template.render(**render_args)
assert xml.count('android:minSdkVersion="12"') == 1
assert xml.count('android:anotherParameter="false"') == 1
assert xml.count('android:someParameter="true"') == 1
assert xml.count('<tag-a><tag-b></tag-b></tag-a>') == 1
assert xml.count('android:process=":service_') == 0
assert xml.count('targetSdkVersion="1234"') == 1
assert xml.count('android:debuggable="true"') == 1
assert xml.count('<service android:name="abcd" />') == 1
# TODO: potentially some other checks to be added here to cover other "logic" (flags and loops) in the template | 0.63477 | 0.354293 |
from flask import render_template, url_for, request, flash, redirect
# importation de render_template (relie les templates aux routes), url_for (permet de construire des url vers les
# fonctions et les pages html), request (permet d'importer types d'objets et de les utiliser comme insinstance),
# flash (envoie des messages flash) et redirect (permet de rediriger vers l'url d'une autre route) depuis le module flask
from flask_login import current_user, login_user, logout_user, login_required
# importation de current_user (utilisateur courant), login_user (connexion), logout_user (déconnexion) et login_required
# (accès limité) pour gérer les sessions utilisateur·rice·s
from sqlalchemy import or_
# importation de l'opérateur OR depuis SQLAlchemy pour faire du requêtage
from ..app import app, db, login
# importation de la variable app, de la BDD et de login pour gérer les utilisateur·rice·s
from ..constantes import RESULTATS_PAR_PAGE
# importation de la variable RESULTATS_PAR_PAGE utilisée pour les routes recherche et index
from ..modeles.donnees import Collection, Work, Mediums
# importation des classes Collection, Work et Mediums du fichier données.py
from ..modeles.utilisateurs import User
# importation de la classe User du fichier utilisateurs.py
# | ROUTES GENERALES |
@app.route("/")
def accueil():
"""
Route permettant d'afficher la page d'accueil
:return: template accueil.html
:rtype: template
"""
collections = Collection.query.all()
return render_template("pages/accueil.html", nom="CollectArt", collections=collections)
# La fonction render_template prend comme premier argument le chemin du template et en deuxième des arguments nommés, qui
# peuvent ensuite être réutilisés en tant que variables dans les templates.
@app.route("/collections")
def collections():
"""
Route permettant d'afficher les différentes collections de la base de données
:return: template collections.html
:rtype: template
"""
collections = Collection.query.order_by(Collection.collection_name.desc())
return render_template("pages/collections.html", nom="CollectArt", collections=collections)
@app.route("/collection/<int:collection_id>")
def collection(collection_id):
"""
Route permettant d'afficher les données d'une collection et les oeuvres qui y sont associées
:param collection_id: clé primaire d'une collection (int)
:return: template collection.html
:rtype: template
"""
unique_collection = Collection.query.get(collection_id)
work = unique_collection.work
return render_template("pages/collection.html", nom="CollectArt", collection=unique_collection, work=work)
@app.route("/collection/oeuvre/<int:work_id>")
def oeuvre(work_id):
"""
Route permettant d'afficher la notice d'une oeuvre
:param work_id: clé primaire d'une oeuvre (int)
:return: template oeuvre.html
:rtype: template
"""
unique_work = Work.query.get(work_id)
return render_template("pages/oeuvre.html", nom="CollectArt", work=unique_work)
@app.route("/recherche")
def recherche():
"""
Route permettant de faire de la recherche plein-texte et d'afficher une liste de résultats
:return: template resultats.html
:rtype: template
"""
keyword = request.args.get("keyword", None)
# stockage dans la variable keywork une liste contenant la valeur du mot-clé rentré par l'utilisateur·rice
page = request.args.get("page", 1)
if isinstance(page, str) and page.isdigit():
page = int(page)
else:
page = 1
# si le numéro de la page est une chaîne de caractères composée uniquement de chiffres, on la recaste en integer
# sinon, le numéro de la page est égal à 1
results = []
# On crée une liste vide de résultats
title = "Recherche"
if keyword :
# Si un mot-clé est rentré dans la barre de recherche, on requête les tables de la BDD pour vérifier s'il y a des
# correspondances. Le résultat est stocké dans la liste résults = []
results = Collection.query.filter(
or_(
Collection.collection_name.like("%{}%".format(keyword)),
Collection.collection_collector_name.like("%{}%".format(keyword)),
Collection.collection_collector_firstname.like("%{}%".format(keyword)),
Collection.collection_collector_date.like("%{}%".format(keyword)),
Collection.collection_collector_bio.like("%{}%".format(keyword)),
Collection.work.any((Work.work_title).like("%{}%".format(keyword))),
Collection.work.any((Work.work_author).like("%{}%".format(keyword))),
Collection.work.any((Work.work_date).like("%{}%".format(keyword))),
Collection.work.any((Work.work_medium).like("%{}%".format(keyword))),
)
# on requête la table collection et la table work grâce à la commande any (au moins un des critères est true)
).order_by(Collection.collection_name.asc()).paginate(page=page, per_page=RESULTATS_PAR_PAGE)
# création de la pagination avec la méthode .paginate qui remplace le .all dans la requête sur la base
title = "Résultat(s) de la recherche : " + keyword + "."
return render_template("pages/resultats.html", nom="CollectArt", results=results, title=title, keyword=keyword)
@app.route("/index")
def index():
"""
Route qui affiche la liste des collectionneur·euse·s (ordonnée par nom) de la base
:return: template index.html
:rtype: template
"""
title="Index"
collector = Collection.query.all()
if len(collector) == 0:
return render_template("pages/index.html", nom="CollectArt", collector=collector, title=title)
else:
page = request.args.get("page", 1)
if isinstance(page, str) and page.isdigit():
page = int(page)
else:
page = 1
collector = Collection.query.order_by(
Collection.collection_collector_name
).paginate(page=page, per_page=RESULTATS_PAR_PAGE)
return render_template("pages/index.html", nom="CollectArt", collector=collector, title=title)
# | ROUTES INTERFACE UTILISATEUR·RICE |
@app.route("/edit-collection", methods=["GET", "POST"])
@login_required
def edit_collection():
"""
Route permettant à un·e utilisateur·rice de créer une nouvelle collection
:return: redirection ou template edit_collection.html
:rtype: template
"""
if request.method == "POST":
# si le formulaire est envoyé, on passe en méthode POST
status, data = Collection.add_collection(
# on applique la fonction add_collection définie dans le fichier données.py
name=request.form.get("name", None),
collector_name=request.form.get("collector_name", None),
collector_firstname=request.form.get("collector_firstname", None),
collector_date=request.form.get("collector_date", None),
collector_bio=request.form.get("collector_bio", None)
)
if status is True:
flash("Création d'une nouvelle collection réussie !", "success")
return redirect("/collections")
else:
flash("La création d'une nouvelle collection a échoué pour les raisons suivantes : " + ", ".join(data), "error")
return render_template("pages/edit-collection.html", nom="CollectArt")
else:
return render_template("pages/edit-collection.html", nom="CollectArt")
@app.route("/update-collection/<int:collection_id>", methods=["POST", "GET"])
@login_required
def update_collection(collection_id):
"""
Route permettant de modifier les données d'une collection
:param collection_id: ID de la collection récupérée depuis la page collection
:return: redirection ou template update-collection.html
:rtype: template
"""
if request.method == "GET":
updateCollection = Collection.query.get(collection_id)
return render_template("pages/update-collection.html", nom="CollectArt", updateCollection=updateCollection)
# si on est en méthode GET, on renvoie sur la page html les éléments de l'objet collection correspondant à l'id
# de la route
else:
status, data = Collection.update_collection(
collection_id=collection_id,
name=request.form.get("name", None),
collector_name=request.form.get("collector_name", None),
collector_firstname=request.form.get("collector_firstname", None),
collector_date=request.form.get("collector_date", None),
collector_bio=request.form.get("collector_bio", None)
)
# sinon, on récupère les données du formulaire à modifier et on les modifie grâce à la fonction update_collection
if status is True:
flash("Modification réussie !", "success")
return redirect("/collections")
else:
flash("Les erreurs suivantes ont été rencontrées : " + ", ".join(data), "danger")
updateCollection = Collection.query.get(collection_id)
return render_template("pages/update-collection.html", nom="CollectArt", updateCollection=updateCollection)
@app.route("/delete-collection/<int:collection_id>", methods=["POST", "GET"])
@login_required
def delete_collection(collection_id):
"""
Route permettant de supprimer une collection et ses données
:param collection_id : ID de la collection
:return: redirection ou template delete-collection.html
:rtype: template
"""
deleteCollection = Collection.query.get(collection_id)
works = deleteCollection.work
# on cherche les oeuvres liées à la collection
if request.method == "POST":
status = Collection.delete_collection(
collection_id=collection_id
)
# si le formulaire a été envoyé, on passe en méthode POST et on récupère la notice puis on applique la méthode
# delete_collection
if status is True:
flash("Suppression réussie !", "success")
return redirect("/collections")
else:
flash("La suppression a échouée...", "error")
return redirect("/collections")
else:
return render_template("pages/delete-collection.html", nom="CollectArt", deleteCollection=deleteCollection)
@app.route("/collection/<int:collection_id>/edit-work", methods=["GET", "POST"])
@login_required
def edit_work(collection_id):
"""
Route permettant à un·e utilisateur·rice de créer la notice d'une nouvelle oeuvre et de l'ajouter à une collection
:param collection_id: ID de la collection récupérée depuis la page collection
:return: redirection ou template edit-work.html
:rtype: template
"""
mediums = Mediums.query.all()
unique_collection = Collection.query.get(collection_id)
if request.method == "POST":
status, data = Work.add_work(
title=request.form.get("title", None),
author=request.form.get("author", None),
date=request.form.get("date", None),
medium=request.form.get("medium", None),
dimensions=request.form.get("dimensions", None),
image=request.form.get("image", None),
collection_id=collection_id
)
if status is True:
flash("Vous venez d'ajouter une nouvelle oeuvre à votre collection !", "success")
return redirect("/collections")
else:
flash("L'ajout d'une nouvelle oeuvre a échoué pour les raisons suivantes : " + ", ".join(data), "error")
return render_template("pages/edit-work.html", nom="CollectArt", collection=unique_collection, mediums=mediums)
else:
return render_template("pages/edit-work.html", nom="CollectArt", collection=unique_collection, mediums=mediums)
@app.route("/update-work/<int:work_id>", methods=["POST", "GET"])
@login_required
def update_work(work_id):
"""
Route permettant de modifier les données d'une collection
:param work_id: ID de l'oeuvre récupérée depuis la page oeuvre
:return: redirection ou template update-work.html
:rtype: template
"""
if request.method == "GET":
updateWork = Work.query.get(work_id)
return render_template("pages/update-work.html", updateWork=updateWork)
else:
status, data = Work.update_work(
work_id=work_id,
title=request.form.get("title", None),
author=request.form.get("author", None),
date=request.form.get("date", None),
medium=request.form.get("medium", None),
dimensions=request.form.get("dimensions", None),
image=request.form.get("image", None)
)
if status is True:
flash("Modification réussie !", "success")
return redirect("/collections")
else:
flash("Les erreurs suivantes ont été rencontrées : " + ", ".join(data), "danger")
updateWork = Work.query.get(work_id)
return render_template("pages/update-work.html", nom="CollectArt", updateWork=updateWork)
@app.route("/delete-work/<int:work_id>", methods=["POST", "GET"])
@login_required
def delete_work(work_id):
"""
Route pour supprimer une oeuvre et ses données dans la base
:param work_id : ID de l'oeuvre
:return: redirection ou template delete-work.html
:rtype: template
"""
deleteWork = Work.query.get(work_id)
if request.method == "POST":
status = Work.delete_work(
work_id=work_id
)
if status is True:
flash("Suppression réussie !", "success")
return redirect("/collections")
else:
flash("La suppresion a échoué...", "error")
return redirect("/collections")
else:
return render_template("pages/delete-work.html", deleteWork=deleteWork)
# | ROUTES POUR LA GESTION DES UTILISATEUR·RICE·S |
@app.route("/inscription", methods=["GET", "POST"])
def inscription():
"""
Route permettant de gérer les inscriptions utilisateur·rice·s
:return: redirection ou template inscription.html
:rtype: template
"""
if request.method == "POST":
status, data = User.creer(
login=request.form.get("login", None),
email=request.form.get("email", None),
name=request.form.get("name", None),
password=request.form.get("password", None)
)
if status is True:
flash("Inscription réussie ! Vous pouvez désormais vous connecter", "success")
return redirect("/")
else:
flash("Les erreurs suivantes ont été rencontrées dans les champs suivants : " + ", ".join(data), "error")
return render_template("pages/inscription.html", nom="CollectArt")
else:
return render_template("pages/inscription.html", nom="CollectArt")
@app.route("/connexion", methods=["POST", "GET"])
def connexion():
"""
Route permettant de gérer les connexions
:return: reidrection ou template connexion.html
:rtype: template
"""
if current_user.is_authenticated is True:
flash("Vous êtes déjà connecté·e", "info")
return redirect("/")
# si l'utilisateur·rice est déjà connecté·e, il/elle est redirigé·e vers la page d'accueil
if request.method == "POST":
user = User.identification(
login=request.form.get("login", None),
password=request.form.get("password", None)
)
if user:
flash("Connexion réussie !", "success")
login_user(user)
return redirect("/")
else:
flash("Nom d'utilisateur·rice ou mot de passe incorrect", "error")
return render_template("pages/connexion.html", nom="CollectArt")
login.login_view = "connexion"
@app.route("/deconnexion")
def deconnexion():
"""
Route permettant de gérer les déconnexions
:return: redirection vers l'accueil
:rtype: template
"""
if current_user.is_authenticated is True:
logout_user()
flash("Vous êtes déconnecté·e", "info")
return redirect("/") | app/routes/generic.py | from flask import render_template, url_for, request, flash, redirect
# importation de render_template (relie les templates aux routes), url_for (permet de construire des url vers les
# fonctions et les pages html), request (permet d'importer types d'objets et de les utiliser comme insinstance),
# flash (envoie des messages flash) et redirect (permet de rediriger vers l'url d'une autre route) depuis le module flask
from flask_login import current_user, login_user, logout_user, login_required
# importation de current_user (utilisateur courant), login_user (connexion), logout_user (déconnexion) et login_required
# (accès limité) pour gérer les sessions utilisateur·rice·s
from sqlalchemy import or_
# importation de l'opérateur OR depuis SQLAlchemy pour faire du requêtage
from ..app import app, db, login
# importation de la variable app, de la BDD et de login pour gérer les utilisateur·rice·s
from ..constantes import RESULTATS_PAR_PAGE
# importation de la variable RESULTATS_PAR_PAGE utilisée pour les routes recherche et index
from ..modeles.donnees import Collection, Work, Mediums
# importation des classes Collection, Work et Mediums du fichier données.py
from ..modeles.utilisateurs import User
# importation de la classe User du fichier utilisateurs.py
# | ROUTES GENERALES |
@app.route("/")
def accueil():
"""
Route permettant d'afficher la page d'accueil
:return: template accueil.html
:rtype: template
"""
collections = Collection.query.all()
return render_template("pages/accueil.html", nom="CollectArt", collections=collections)
# La fonction render_template prend comme premier argument le chemin du template et en deuxième des arguments nommés, qui
# peuvent ensuite être réutilisés en tant que variables dans les templates.
@app.route("/collections")
def collections():
"""
Route permettant d'afficher les différentes collections de la base de données
:return: template collections.html
:rtype: template
"""
collections = Collection.query.order_by(Collection.collection_name.desc())
return render_template("pages/collections.html", nom="CollectArt", collections=collections)
@app.route("/collection/<int:collection_id>")
def collection(collection_id):
"""
Route permettant d'afficher les données d'une collection et les oeuvres qui y sont associées
:param collection_id: clé primaire d'une collection (int)
:return: template collection.html
:rtype: template
"""
unique_collection = Collection.query.get(collection_id)
work = unique_collection.work
return render_template("pages/collection.html", nom="CollectArt", collection=unique_collection, work=work)
@app.route("/collection/oeuvre/<int:work_id>")
def oeuvre(work_id):
"""
Route permettant d'afficher la notice d'une oeuvre
:param work_id: clé primaire d'une oeuvre (int)
:return: template oeuvre.html
:rtype: template
"""
unique_work = Work.query.get(work_id)
return render_template("pages/oeuvre.html", nom="CollectArt", work=unique_work)
@app.route("/recherche")
def recherche():
"""
Route permettant de faire de la recherche plein-texte et d'afficher une liste de résultats
:return: template resultats.html
:rtype: template
"""
keyword = request.args.get("keyword", None)
# stockage dans la variable keywork une liste contenant la valeur du mot-clé rentré par l'utilisateur·rice
page = request.args.get("page", 1)
if isinstance(page, str) and page.isdigit():
page = int(page)
else:
page = 1
# si le numéro de la page est une chaîne de caractères composée uniquement de chiffres, on la recaste en integer
# sinon, le numéro de la page est égal à 1
results = []
# On crée une liste vide de résultats
title = "Recherche"
if keyword :
# Si un mot-clé est rentré dans la barre de recherche, on requête les tables de la BDD pour vérifier s'il y a des
# correspondances. Le résultat est stocké dans la liste résults = []
results = Collection.query.filter(
or_(
Collection.collection_name.like("%{}%".format(keyword)),
Collection.collection_collector_name.like("%{}%".format(keyword)),
Collection.collection_collector_firstname.like("%{}%".format(keyword)),
Collection.collection_collector_date.like("%{}%".format(keyword)),
Collection.collection_collector_bio.like("%{}%".format(keyword)),
Collection.work.any((Work.work_title).like("%{}%".format(keyword))),
Collection.work.any((Work.work_author).like("%{}%".format(keyword))),
Collection.work.any((Work.work_date).like("%{}%".format(keyword))),
Collection.work.any((Work.work_medium).like("%{}%".format(keyword))),
)
# on requête la table collection et la table work grâce à la commande any (au moins un des critères est true)
).order_by(Collection.collection_name.asc()).paginate(page=page, per_page=RESULTATS_PAR_PAGE)
# création de la pagination avec la méthode .paginate qui remplace le .all dans la requête sur la base
title = "Résultat(s) de la recherche : " + keyword + "."
return render_template("pages/resultats.html", nom="CollectArt", results=results, title=title, keyword=keyword)
@app.route("/index")
def index():
"""
Route qui affiche la liste des collectionneur·euse·s (ordonnée par nom) de la base
:return: template index.html
:rtype: template
"""
title="Index"
collector = Collection.query.all()
if len(collector) == 0:
return render_template("pages/index.html", nom="CollectArt", collector=collector, title=title)
else:
page = request.args.get("page", 1)
if isinstance(page, str) and page.isdigit():
page = int(page)
else:
page = 1
collector = Collection.query.order_by(
Collection.collection_collector_name
).paginate(page=page, per_page=RESULTATS_PAR_PAGE)
return render_template("pages/index.html", nom="CollectArt", collector=collector, title=title)
# | ROUTES INTERFACE UTILISATEUR·RICE |
@app.route("/edit-collection", methods=["GET", "POST"])
@login_required
def edit_collection():
"""
Route permettant à un·e utilisateur·rice de créer une nouvelle collection
:return: redirection ou template edit_collection.html
:rtype: template
"""
if request.method == "POST":
# si le formulaire est envoyé, on passe en méthode POST
status, data = Collection.add_collection(
# on applique la fonction add_collection définie dans le fichier données.py
name=request.form.get("name", None),
collector_name=request.form.get("collector_name", None),
collector_firstname=request.form.get("collector_firstname", None),
collector_date=request.form.get("collector_date", None),
collector_bio=request.form.get("collector_bio", None)
)
if status is True:
flash("Création d'une nouvelle collection réussie !", "success")
return redirect("/collections")
else:
flash("La création d'une nouvelle collection a échoué pour les raisons suivantes : " + ", ".join(data), "error")
return render_template("pages/edit-collection.html", nom="CollectArt")
else:
return render_template("pages/edit-collection.html", nom="CollectArt")
@app.route("/update-collection/<int:collection_id>", methods=["POST", "GET"])
@login_required
def update_collection(collection_id):
"""
Route permettant de modifier les données d'une collection
:param collection_id: ID de la collection récupérée depuis la page collection
:return: redirection ou template update-collection.html
:rtype: template
"""
if request.method == "GET":
updateCollection = Collection.query.get(collection_id)
return render_template("pages/update-collection.html", nom="CollectArt", updateCollection=updateCollection)
# si on est en méthode GET, on renvoie sur la page html les éléments de l'objet collection correspondant à l'id
# de la route
else:
status, data = Collection.update_collection(
collection_id=collection_id,
name=request.form.get("name", None),
collector_name=request.form.get("collector_name", None),
collector_firstname=request.form.get("collector_firstname", None),
collector_date=request.form.get("collector_date", None),
collector_bio=request.form.get("collector_bio", None)
)
# sinon, on récupère les données du formulaire à modifier et on les modifie grâce à la fonction update_collection
if status is True:
flash("Modification réussie !", "success")
return redirect("/collections")
else:
flash("Les erreurs suivantes ont été rencontrées : " + ", ".join(data), "danger")
updateCollection = Collection.query.get(collection_id)
return render_template("pages/update-collection.html", nom="CollectArt", updateCollection=updateCollection)
@app.route("/delete-collection/<int:collection_id>", methods=["POST", "GET"])
@login_required
def delete_collection(collection_id):
"""
Route permettant de supprimer une collection et ses données
:param collection_id : ID de la collection
:return: redirection ou template delete-collection.html
:rtype: template
"""
deleteCollection = Collection.query.get(collection_id)
works = deleteCollection.work
# on cherche les oeuvres liées à la collection
if request.method == "POST":
status = Collection.delete_collection(
collection_id=collection_id
)
# si le formulaire a été envoyé, on passe en méthode POST et on récupère la notice puis on applique la méthode
# delete_collection
if status is True:
flash("Suppression réussie !", "success")
return redirect("/collections")
else:
flash("La suppression a échouée...", "error")
return redirect("/collections")
else:
return render_template("pages/delete-collection.html", nom="CollectArt", deleteCollection=deleteCollection)
@app.route("/collection/<int:collection_id>/edit-work", methods=["GET", "POST"])
@login_required
def edit_work(collection_id):
"""
Route permettant à un·e utilisateur·rice de créer la notice d'une nouvelle oeuvre et de l'ajouter à une collection
:param collection_id: ID de la collection récupérée depuis la page collection
:return: redirection ou template edit-work.html
:rtype: template
"""
mediums = Mediums.query.all()
unique_collection = Collection.query.get(collection_id)
if request.method == "POST":
status, data = Work.add_work(
title=request.form.get("title", None),
author=request.form.get("author", None),
date=request.form.get("date", None),
medium=request.form.get("medium", None),
dimensions=request.form.get("dimensions", None),
image=request.form.get("image", None),
collection_id=collection_id
)
if status is True:
flash("Vous venez d'ajouter une nouvelle oeuvre à votre collection !", "success")
return redirect("/collections")
else:
flash("L'ajout d'une nouvelle oeuvre a échoué pour les raisons suivantes : " + ", ".join(data), "error")
return render_template("pages/edit-work.html", nom="CollectArt", collection=unique_collection, mediums=mediums)
else:
return render_template("pages/edit-work.html", nom="CollectArt", collection=unique_collection, mediums=mediums)
@app.route("/update-work/<int:work_id>", methods=["POST", "GET"])
@login_required
def update_work(work_id):
"""
Route permettant de modifier les données d'une collection
:param work_id: ID de l'oeuvre récupérée depuis la page oeuvre
:return: redirection ou template update-work.html
:rtype: template
"""
if request.method == "GET":
updateWork = Work.query.get(work_id)
return render_template("pages/update-work.html", updateWork=updateWork)
else:
status, data = Work.update_work(
work_id=work_id,
title=request.form.get("title", None),
author=request.form.get("author", None),
date=request.form.get("date", None),
medium=request.form.get("medium", None),
dimensions=request.form.get("dimensions", None),
image=request.form.get("image", None)
)
if status is True:
flash("Modification réussie !", "success")
return redirect("/collections")
else:
flash("Les erreurs suivantes ont été rencontrées : " + ", ".join(data), "danger")
updateWork = Work.query.get(work_id)
return render_template("pages/update-work.html", nom="CollectArt", updateWork=updateWork)
@app.route("/delete-work/<int:work_id>", methods=["POST", "GET"])
@login_required
def delete_work(work_id):
"""
Route pour supprimer une oeuvre et ses données dans la base
:param work_id : ID de l'oeuvre
:return: redirection ou template delete-work.html
:rtype: template
"""
deleteWork = Work.query.get(work_id)
if request.method == "POST":
status = Work.delete_work(
work_id=work_id
)
if status is True:
flash("Suppression réussie !", "success")
return redirect("/collections")
else:
flash("La suppresion a échoué...", "error")
return redirect("/collections")
else:
return render_template("pages/delete-work.html", deleteWork=deleteWork)
# | ROUTES POUR LA GESTION DES UTILISATEUR·RICE·S |
@app.route("/inscription", methods=["GET", "POST"])
def inscription():
"""
Route permettant de gérer les inscriptions utilisateur·rice·s
:return: redirection ou template inscription.html
:rtype: template
"""
if request.method == "POST":
status, data = User.creer(
login=request.form.get("login", None),
email=request.form.get("email", None),
name=request.form.get("name", None),
password=request.form.get("password", None)
)
if status is True:
flash("Inscription réussie ! Vous pouvez désormais vous connecter", "success")
return redirect("/")
else:
flash("Les erreurs suivantes ont été rencontrées dans les champs suivants : " + ", ".join(data), "error")
return render_template("pages/inscription.html", nom="CollectArt")
else:
return render_template("pages/inscription.html", nom="CollectArt")
@app.route("/connexion", methods=["POST", "GET"])
def connexion():
"""
Route permettant de gérer les connexions
:return: reidrection ou template connexion.html
:rtype: template
"""
if current_user.is_authenticated is True:
flash("Vous êtes déjà connecté·e", "info")
return redirect("/")
# si l'utilisateur·rice est déjà connecté·e, il/elle est redirigé·e vers la page d'accueil
if request.method == "POST":
user = User.identification(
login=request.form.get("login", None),
password=request.form.get("password", None)
)
if user:
flash("Connexion réussie !", "success")
login_user(user)
return redirect("/")
else:
flash("Nom d'utilisateur·rice ou mot de passe incorrect", "error")
return render_template("pages/connexion.html", nom="CollectArt")
login.login_view = "connexion"
@app.route("/deconnexion")
def deconnexion():
"""
Route permettant de gérer les déconnexions
:return: redirection vers l'accueil
:rtype: template
"""
if current_user.is_authenticated is True:
logout_user()
flash("Vous êtes déconnecté·e", "info")
return redirect("/") | 0.291182 | 0.320383 |
import typing
from abc import abstractmethod
from ..uno.x_interface import XInterface as XInterface_8f010a43
if typing.TYPE_CHECKING:
from ..io.x_input_stream import XInputStream as XInputStream_98d40ab4
from ..io.x_output_stream import XOutputStream as XOutputStream_a4e00b35
class XBinaryStreamResolver(XInterface_8f010a43):
"""
This interface encapsulates functionality to get/resolve binary data streams. It is used to transform binary data to a URL or to transform a URL to binary data. The binary data is represented through input and output streams.
In the case of transforming a URL to binary data, the getInputStream method is used. This returns a com.sun.star.io.XInputStream from which the binary data, transformed from the given URL, can be read.
In the case of transforming binary data to a URL, a com.sun.star.io.XOutputStream is created first to write the binary data to. After this, the resolveOutputStream method can be used to transform the binary data, represented through the com.sun.star.io.XOutputStream interface, to a URL.
See Also:
`API XBinaryStreamResolver <https://api.libreoffice.org/docs/idl/ref/interfacecom_1_1sun_1_1star_1_1document_1_1XBinaryStreamResolver.html>`_
"""
__ooo_ns__: str = 'com.sun.star.document'
__ooo_full_ns__: str = 'com.sun.star.document.XBinaryStreamResolver'
__ooo_type_name__: str = 'interface'
__pyunointerface__: str = 'com.sun.star.document.XBinaryStreamResolver'
@abstractmethod
def createOutputStream(self) -> 'XOutputStream_a4e00b35':
"""
creates an output stream, to which binary data can be written.
After writing, a URL can be retrieved by a call to XBinaryStreamResolver.resolveOutputStream().
"""
@abstractmethod
def getInputStream(self, aURL: str) -> 'XInputStream_98d40ab4':
"""
converts the given URL from the source URL namespace to an input stream, from which binary data can be read
"""
@abstractmethod
def resolveOutputStream(self, aBinaryStream: 'XOutputStream_a4e00b35') -> str:
"""
converts the output stream, data has been written to, to a URL in source URL namespace.
"""
__all__ = ['XBinaryStreamResolver'] | ooobuild/lo/document/x_binary_stream_resolver.py | import typing
from abc import abstractmethod
from ..uno.x_interface import XInterface as XInterface_8f010a43
if typing.TYPE_CHECKING:
from ..io.x_input_stream import XInputStream as XInputStream_98d40ab4
from ..io.x_output_stream import XOutputStream as XOutputStream_a4e00b35
class XBinaryStreamResolver(XInterface_8f010a43):
"""
This interface encapsulates functionality to get/resolve binary data streams. It is used to transform binary data to a URL or to transform a URL to binary data. The binary data is represented through input and output streams.
In the case of transforming a URL to binary data, the getInputStream method is used. This returns a com.sun.star.io.XInputStream from which the binary data, transformed from the given URL, can be read.
In the case of transforming binary data to a URL, a com.sun.star.io.XOutputStream is created first to write the binary data to. After this, the resolveOutputStream method can be used to transform the binary data, represented through the com.sun.star.io.XOutputStream interface, to a URL.
See Also:
`API XBinaryStreamResolver <https://api.libreoffice.org/docs/idl/ref/interfacecom_1_1sun_1_1star_1_1document_1_1XBinaryStreamResolver.html>`_
"""
__ooo_ns__: str = 'com.sun.star.document'
__ooo_full_ns__: str = 'com.sun.star.document.XBinaryStreamResolver'
__ooo_type_name__: str = 'interface'
__pyunointerface__: str = 'com.sun.star.document.XBinaryStreamResolver'
@abstractmethod
def createOutputStream(self) -> 'XOutputStream_a4e00b35':
"""
creates an output stream, to which binary data can be written.
After writing, a URL can be retrieved by a call to XBinaryStreamResolver.resolveOutputStream().
"""
@abstractmethod
def getInputStream(self, aURL: str) -> 'XInputStream_98d40ab4':
"""
converts the given URL from the source URL namespace to an input stream, from which binary data can be read
"""
@abstractmethod
def resolveOutputStream(self, aBinaryStream: 'XOutputStream_a4e00b35') -> str:
"""
converts the output stream, data has been written to, to a URL in source URL namespace.
"""
__all__ = ['XBinaryStreamResolver'] | 0.733547 | 0.463869 |
import copy
class ChangeCheckerMixin(object):
containerItems = {dict: dict.iteritems, list: enumerate}
immutable = False
def snapshot(self):
''' create a snapshot of self's state -- like a shallow copy, but
recursing over container types (not over general instances:
instances must keep track of their own changes if needed). '''
if self.immutable:
return
self._snapshot = self._copy_container(self.__dict__)
def makeImmutable(self):
''' the instance state can't change any more, set .immutable '''
self.immutable = True
try:
del self._snapshot
except AttributeError:
pass
def _copy_container(self, container):
''' semi-shallow copy, recursing on container types only '''
new_container = copy.copy(container)
for k, v in self.containerItems[type(new_container)](new_container):
if type(v) in self.containerItems:
new_container[k] = self._copy_container(v)
elif hasattr(v, 'snapshot'):
v.snapshot()
return new_container
def isChanged(self):
''' True if self's state is changed since the last snapshot '''
if self.immutable:
return False
# remove snapshot from self.__dict__, put it back at the end
snap = self.__dict__.pop('_snapshot', None)
if snap is None:
return True
try:
return self._checkContainer(self.__dict__, snap)
finally:
self._snapshot = snap
def _checkContainer(self, container, snapshot):
''' return True if the container and its snapshot differ '''
if len(container) != len(snapshot):
return True
for k, v in self.containerItems[type(container)](container):
try:
ov = snapshot[k]
except LookupError:
return True
if self._checkItem(v, ov):
return True
return False
def _checkItem(self, newitem, olditem):
''' compare newitem and olditem. If they are containers, call
self._checkContainer recursively. If they're an instance with
an 'isChanged' method, delegate to that method. Otherwise,
return True if the items differ. '''
if type(newitem) != type(olditem):
return True
if type(newitem) in self.containerItems:
return self._checkContainer(newitem, olditem)
if newitem is olditem:
method_isChanged = getattr(newitem, 'isChanged', None)
if method_isChanged is None:
return False
return method_isChanged()
return newitem != olditem | lang/py/cookbook/v2/source/cb2_6_12_sol_1.py | import copy
class ChangeCheckerMixin(object):
containerItems = {dict: dict.iteritems, list: enumerate}
immutable = False
def snapshot(self):
''' create a snapshot of self's state -- like a shallow copy, but
recursing over container types (not over general instances:
instances must keep track of their own changes if needed). '''
if self.immutable:
return
self._snapshot = self._copy_container(self.__dict__)
def makeImmutable(self):
''' the instance state can't change any more, set .immutable '''
self.immutable = True
try:
del self._snapshot
except AttributeError:
pass
def _copy_container(self, container):
''' semi-shallow copy, recursing on container types only '''
new_container = copy.copy(container)
for k, v in self.containerItems[type(new_container)](new_container):
if type(v) in self.containerItems:
new_container[k] = self._copy_container(v)
elif hasattr(v, 'snapshot'):
v.snapshot()
return new_container
def isChanged(self):
''' True if self's state is changed since the last snapshot '''
if self.immutable:
return False
# remove snapshot from self.__dict__, put it back at the end
snap = self.__dict__.pop('_snapshot', None)
if snap is None:
return True
try:
return self._checkContainer(self.__dict__, snap)
finally:
self._snapshot = snap
def _checkContainer(self, container, snapshot):
''' return True if the container and its snapshot differ '''
if len(container) != len(snapshot):
return True
for k, v in self.containerItems[type(container)](container):
try:
ov = snapshot[k]
except LookupError:
return True
if self._checkItem(v, ov):
return True
return False
def _checkItem(self, newitem, olditem):
''' compare newitem and olditem. If they are containers, call
self._checkContainer recursively. If they're an instance with
an 'isChanged' method, delegate to that method. Otherwise,
return True if the items differ. '''
if type(newitem) != type(olditem):
return True
if type(newitem) in self.containerItems:
return self._checkContainer(newitem, olditem)
if newitem is olditem:
method_isChanged = getattr(newitem, 'isChanged', None)
if method_isChanged is None:
return False
return method_isChanged()
return newitem != olditem | 0.291787 | 0.105579 |
from __future__ import annotations
from typing import TYPE_CHECKING, Any, Optional, Union
from discord import ButtonStyle, Embed
from discord.ui import View, button as button_decorator
from discord.utils import maybe_coroutine
if TYPE_CHECKING:
from typing_extensions import Self
from discord import Interaction, InteractionMessage, Message, WebhookMessage
from discord.ui.button import Button
from discord.ui.item import Item
from discord.ext.commands.context import Context
ValidPage = Union[str, Embed]
PossibleMessage = Union[InteractionMessage, Message, WebhookMessage]
else:
Interaction = Any
Button = Any
Context = Any
__all__: tuple[str, ...] = ("SimplePaginator",)
class SimplePaginator(View):
def __init__(
self,
pages: list[ValidPage],
*,
delete_message_after: bool = False,
):
self.pages = pages
super().__init__()
self.delete_message_after = delete_message_after
self.message: Optional[PossibleMessage] = None
self.current_page: int = 0
def _init_children(self) -> list[Item[Self]]:
org_children = super()._init_children()
# only show stop button if there is only 1 page.
if len(self.pages) <= 1:
return [item for item in org_children if item.callback.callback.__name__ == "stop_button"]
return org_children
def format_page(self, page: ValidPage) -> ValidPage:
return page
async def get_page_kwargs(self, page_number: int) -> dict[str, Any]:
page = await maybe_coroutine(self.format_page, self.pages[page_number])
base_kwargs: dict[str, Any] = {"content": None, "embeds": [], "view": self}
if isinstance(page, Embed):
base_kwargs["embeds"].append(page)
elif isinstance(page, str):
base_kwargs["content"] = page
elif isinstance(page, dict):
return page
return base_kwargs
async def update(self, interaction: Interaction) -> None:
if hasattr(self, "right_button") and hasattr(self, "left_button"):
if self.current_page >= len(self.pages) - 1:
self.right_button.disabled = True
self.left_button.disabled = False
elif self.current_page == 0:
self.right_button.disabled = False
self.left_button.disabled = True
if self.current_page > len(self.pages):
self.current_page = 0
kwargs = await self.get_page_kwargs(self.current_page)
if not interaction.response.is_done():
await interaction.response.edit_message(**kwargs)
if not self.message:
self.message = await interaction.original_message()
else:
if self.message:
await self.message.edit(**kwargs)
else:
await interaction.message.edit(**kwargs) # type: ignore
self.message = interaction.message
async def start(
self, ctx: Optional[Context] = None, interaction: Optional[Interaction] = None, **kwargs
) -> Optional[PossibleMessage]:
kwargs = await self.get_page_kwargs(self.current_page)
if self.message:
await self.message.edit(**kwargs)
return self.message
if ctx:
self.message = await ctx.send(**kwargs)
elif interaction:
if not interaction.response.is_done():
await interaction.response.send_message(**kwargs)
self.message = await interaction.original_message()
else:
self.message = await interaction.followup.send(wait=True, **kwargs)
return self.message
@button_decorator(emoji="⬅️", style=ButtonStyle.secondary, custom_id="left")
async def left_button(self, interaction: Interaction, button: Button) -> None:
self.current_page -= 1
await self.update(interaction)
@button_decorator(label="Stop", style=ButtonStyle.red, custom_id="stop")
async def stop_button(self, interaction: Interaction, button: Button) -> None:
self.stop()
if self.delete_message_after:
await self.message.delete() # type: ignore
@button_decorator(emoji="➡️", style=ButtonStyle.secondary, custom_id="right")
async def right_button(self, interaction: Interaction, button: Button) -> None:
self.current_page += 1
await self.update(interaction) | utils/simple_paginator.py | from __future__ import annotations
from typing import TYPE_CHECKING, Any, Optional, Union
from discord import ButtonStyle, Embed
from discord.ui import View, button as button_decorator
from discord.utils import maybe_coroutine
if TYPE_CHECKING:
from typing_extensions import Self
from discord import Interaction, InteractionMessage, Message, WebhookMessage
from discord.ui.button import Button
from discord.ui.item import Item
from discord.ext.commands.context import Context
ValidPage = Union[str, Embed]
PossibleMessage = Union[InteractionMessage, Message, WebhookMessage]
else:
Interaction = Any
Button = Any
Context = Any
__all__: tuple[str, ...] = ("SimplePaginator",)
class SimplePaginator(View):
def __init__(
self,
pages: list[ValidPage],
*,
delete_message_after: bool = False,
):
self.pages = pages
super().__init__()
self.delete_message_after = delete_message_after
self.message: Optional[PossibleMessage] = None
self.current_page: int = 0
def _init_children(self) -> list[Item[Self]]:
org_children = super()._init_children()
# only show stop button if there is only 1 page.
if len(self.pages) <= 1:
return [item for item in org_children if item.callback.callback.__name__ == "stop_button"]
return org_children
def format_page(self, page: ValidPage) -> ValidPage:
return page
async def get_page_kwargs(self, page_number: int) -> dict[str, Any]:
page = await maybe_coroutine(self.format_page, self.pages[page_number])
base_kwargs: dict[str, Any] = {"content": None, "embeds": [], "view": self}
if isinstance(page, Embed):
base_kwargs["embeds"].append(page)
elif isinstance(page, str):
base_kwargs["content"] = page
elif isinstance(page, dict):
return page
return base_kwargs
async def update(self, interaction: Interaction) -> None:
if hasattr(self, "right_button") and hasattr(self, "left_button"):
if self.current_page >= len(self.pages) - 1:
self.right_button.disabled = True
self.left_button.disabled = False
elif self.current_page == 0:
self.right_button.disabled = False
self.left_button.disabled = True
if self.current_page > len(self.pages):
self.current_page = 0
kwargs = await self.get_page_kwargs(self.current_page)
if not interaction.response.is_done():
await interaction.response.edit_message(**kwargs)
if not self.message:
self.message = await interaction.original_message()
else:
if self.message:
await self.message.edit(**kwargs)
else:
await interaction.message.edit(**kwargs) # type: ignore
self.message = interaction.message
async def start(
self, ctx: Optional[Context] = None, interaction: Optional[Interaction] = None, **kwargs
) -> Optional[PossibleMessage]:
kwargs = await self.get_page_kwargs(self.current_page)
if self.message:
await self.message.edit(**kwargs)
return self.message
if ctx:
self.message = await ctx.send(**kwargs)
elif interaction:
if not interaction.response.is_done():
await interaction.response.send_message(**kwargs)
self.message = await interaction.original_message()
else:
self.message = await interaction.followup.send(wait=True, **kwargs)
return self.message
@button_decorator(emoji="⬅️", style=ButtonStyle.secondary, custom_id="left")
async def left_button(self, interaction: Interaction, button: Button) -> None:
self.current_page -= 1
await self.update(interaction)
@button_decorator(label="Stop", style=ButtonStyle.red, custom_id="stop")
async def stop_button(self, interaction: Interaction, button: Button) -> None:
self.stop()
if self.delete_message_after:
await self.message.delete() # type: ignore
@button_decorator(emoji="➡️", style=ButtonStyle.secondary, custom_id="right")
async def right_button(self, interaction: Interaction, button: Button) -> None:
self.current_page += 1
await self.update(interaction) | 0.878731 | 0.092934 |
import json
from typing import Any, Dict, List
import requests
from ...shared.exceptions import PermissionException
from ...shared.interfaces.logging import LoggingModule
from .interface import NotAllowed, PermissionService
class PermissionHTTPAdapter(PermissionService):
"""
Adapter to connect to permission service.
"""
def __init__(self, permission_url: str, logging: LoggingModule) -> None:
self.endpoint = permission_url + "/is_allowed"
self.logger = logging.getLogger(__name__)
def is_allowed(
self, name: str, user_id: int, data_list: List[Dict[str, Any]]
) -> List[Dict[str, Any]]:
payload = json.dumps(
{"name": name, "user_id": user_id, "data": data_list}, separators=(",", ":")
)
try:
response = requests.post(
url=self.endpoint,
data=payload,
headers={"Content-Type": "application/json"},
)
except requests.exceptions.ConnectionError as e:
raise PermissionException(
f"Cannot reach the permission service on {self.endpoint}. Error: {e}"
)
content = response.json()
self.logger.debug(f"Permission service response: {str(content)}")
if "error" in content:
type = content["error"]["type"]
msg = content["error"]["msg"]
raise PermissionException(f"Error in permission service. {type}: {msg}")
allowed = content.get("allowed", False)
if not allowed:
reason = content.get("reason")
error_index = content.get("error_index")
if error_index < 0:
error_index = None
# TODO: dev only. Log about missing perms check
if "no such query" in reason:
self.logger.warning(
f"Action {name} has no permission check. Return a default-true."
)
return [{} for _ in data_list]
raise NotAllowed(reason, error_index)
additions = content.get("additions")
if not isinstance(additions, list):
raise PermissionException("additions must be a list")
for i in range(len(additions)):
if additions[i] is None:
additions[i] = {}
if not isinstance(additions[i], dict):
raise PermissionError(f"Addition {i} is not a dict")
return additions | openslides_backend/services/permission/adapter.py | import json
from typing import Any, Dict, List
import requests
from ...shared.exceptions import PermissionException
from ...shared.interfaces.logging import LoggingModule
from .interface import NotAllowed, PermissionService
class PermissionHTTPAdapter(PermissionService):
"""
Adapter to connect to permission service.
"""
def __init__(self, permission_url: str, logging: LoggingModule) -> None:
self.endpoint = permission_url + "/is_allowed"
self.logger = logging.getLogger(__name__)
def is_allowed(
self, name: str, user_id: int, data_list: List[Dict[str, Any]]
) -> List[Dict[str, Any]]:
payload = json.dumps(
{"name": name, "user_id": user_id, "data": data_list}, separators=(",", ":")
)
try:
response = requests.post(
url=self.endpoint,
data=payload,
headers={"Content-Type": "application/json"},
)
except requests.exceptions.ConnectionError as e:
raise PermissionException(
f"Cannot reach the permission service on {self.endpoint}. Error: {e}"
)
content = response.json()
self.logger.debug(f"Permission service response: {str(content)}")
if "error" in content:
type = content["error"]["type"]
msg = content["error"]["msg"]
raise PermissionException(f"Error in permission service. {type}: {msg}")
allowed = content.get("allowed", False)
if not allowed:
reason = content.get("reason")
error_index = content.get("error_index")
if error_index < 0:
error_index = None
# TODO: dev only. Log about missing perms check
if "no such query" in reason:
self.logger.warning(
f"Action {name} has no permission check. Return a default-true."
)
return [{} for _ in data_list]
raise NotAllowed(reason, error_index)
additions = content.get("additions")
if not isinstance(additions, list):
raise PermissionException("additions must be a list")
for i in range(len(additions)):
if additions[i] is None:
additions[i] = {}
if not isinstance(additions[i], dict):
raise PermissionError(f"Addition {i} is not a dict")
return additions | 0.36886 | 0.098425 |
# Author: <NAME> (<EMAIL>)
# modules
import string, re, collections
import os, sys, subprocess
from optparse import OptionParser
# BioPython modules for reading and writing sequences
from Bio import SeqIO
from Bio.Seq import Seq
from Bio.SeqRecord import SeqRecord
from Bio.Alphabet import IUPAC
def main():
usage = "usage: %prog [options]"
parser = OptionParser(usage=usage)
parser.add_option("-t", "--table", action="store", dest="table_file", help="table to read (csv)", default="")
parser.add_option("-o", "--out", action="store", dest="output_file", help="output file (fasta)", default="")
parser.add_option("-s", "--seq_col", action="store", dest="seq_col", help="column number containing sequences", default="")
parser.add_option("-f", "--fasta", action="store", dest="fasta_file", help="fasta file to read sequences from (must specify which column in the table contains the sequence names that match the fasta file headers)", default="")
parser.add_option("-c", "--headers_col", action="store", dest="headers_col", help="column number that contains the sequence names that match the fasta file headers", default="")
return parser.parse_args()
if __name__ == "__main__":
(options, args) = main()
seqid_col = False
seqs_file_col = False
input_seqs = {}
if options.table_file == "":
DoError("Please specify input table using -t")
if options.output_file == "":
DoError("Please specify output fasta file using -o")
if options.seq_col != "":
print "Reading DNA sequences from table, column" + options.seq_col
seqid_col = int(options.seq_col)
elif options.fasta_file != "":
if options.headers_col == "":
DoError("Please specify which column of the table contains identifiers that match the headers in the fasta file")
seqs_file_col = int(options.headers_col)
print "Reading DNA sequences from fasta file: " + options.fasta_file
for record in SeqIO.parse(open(options.fasta_file, "r"), "fasta"):
input_seqs[record.id] = record.seq
else:
print DoError("Where are the sequences? If they are in the table, specify which column using -s. Otherwise provide a fasta file of sequence using -f and specify which column contains sequence identifiers that match the fasta headers, using -h")
# read contents of a table and print as fasta
f = file(options.table_file,"r")
o = open(options.output_file, "w")
header = []
for line in f:
fields = line.rstrip().split(",")
if len(header) > 0:
seqID = fields[0]
cluster = fields[1]
gene = fields[2]
allele = fields[3]
db_id = "__".join([cluster,gene,allele,seqID]) ## this is the format for SRST2 detection and typing
if seqid_col:
seq = fields.pop(seqid_col-1)
record = SeqRecord(Seq(seq,
IUPAC.unambiguous_dna),
id=db_id,
description=db_id)
elif seqs_file_col:
seqs_file_id = fields.pop(seqs_file_col-1)
if seqs_file_id in input_seqs:
record = SeqRecord(input_seqs[seqs_file_id],id=db_id, description=db_id)
else:
print "Warning, couldn't find a sequence in the fasta file matching this id: " + seqs_file_id
else:
"??"
# add annotation from other columns
if len(fields) > 4:
description = ";".join(fields[4:len(fields)])
record.description = description
count = SeqIO.write(record, o, "fasta")
else:
header = fields
f.close()
o.close() | database_clustering/csv_to_gene_db.py |
# Author: <NAME> (<EMAIL>)
# modules
import string, re, collections
import os, sys, subprocess
from optparse import OptionParser
# BioPython modules for reading and writing sequences
from Bio import SeqIO
from Bio.Seq import Seq
from Bio.SeqRecord import SeqRecord
from Bio.Alphabet import IUPAC
def main():
usage = "usage: %prog [options]"
parser = OptionParser(usage=usage)
parser.add_option("-t", "--table", action="store", dest="table_file", help="table to read (csv)", default="")
parser.add_option("-o", "--out", action="store", dest="output_file", help="output file (fasta)", default="")
parser.add_option("-s", "--seq_col", action="store", dest="seq_col", help="column number containing sequences", default="")
parser.add_option("-f", "--fasta", action="store", dest="fasta_file", help="fasta file to read sequences from (must specify which column in the table contains the sequence names that match the fasta file headers)", default="")
parser.add_option("-c", "--headers_col", action="store", dest="headers_col", help="column number that contains the sequence names that match the fasta file headers", default="")
return parser.parse_args()
if __name__ == "__main__":
(options, args) = main()
seqid_col = False
seqs_file_col = False
input_seqs = {}
if options.table_file == "":
DoError("Please specify input table using -t")
if options.output_file == "":
DoError("Please specify output fasta file using -o")
if options.seq_col != "":
print "Reading DNA sequences from table, column" + options.seq_col
seqid_col = int(options.seq_col)
elif options.fasta_file != "":
if options.headers_col == "":
DoError("Please specify which column of the table contains identifiers that match the headers in the fasta file")
seqs_file_col = int(options.headers_col)
print "Reading DNA sequences from fasta file: " + options.fasta_file
for record in SeqIO.parse(open(options.fasta_file, "r"), "fasta"):
input_seqs[record.id] = record.seq
else:
print DoError("Where are the sequences? If they are in the table, specify which column using -s. Otherwise provide a fasta file of sequence using -f and specify which column contains sequence identifiers that match the fasta headers, using -h")
# read contents of a table and print as fasta
f = file(options.table_file,"r")
o = open(options.output_file, "w")
header = []
for line in f:
fields = line.rstrip().split(",")
if len(header) > 0:
seqID = fields[0]
cluster = fields[1]
gene = fields[2]
allele = fields[3]
db_id = "__".join([cluster,gene,allele,seqID]) ## this is the format for SRST2 detection and typing
if seqid_col:
seq = fields.pop(seqid_col-1)
record = SeqRecord(Seq(seq,
IUPAC.unambiguous_dna),
id=db_id,
description=db_id)
elif seqs_file_col:
seqs_file_id = fields.pop(seqs_file_col-1)
if seqs_file_id in input_seqs:
record = SeqRecord(input_seqs[seqs_file_id],id=db_id, description=db_id)
else:
print "Warning, couldn't find a sequence in the fasta file matching this id: " + seqs_file_id
else:
"??"
# add annotation from other columns
if len(fields) > 4:
description = ";".join(fields[4:len(fields)])
record.description = description
count = SeqIO.write(record, o, "fasta")
else:
header = fields
f.close()
o.close() | 0.344774 | 0.191479 |
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
""" Decreases the time needed to build large galleries (e.g.: 25k images in
2.5s instead of 30s)
This plugin allows extended caching, which is useful for large galleries. Once
a gallery has been built it caches all metadata for all media (markdown, exif,
itpc) in the gallery target folder. Before the next run it restores them so
that the image and metadata files do not have to be parsed again. For large
galleries this can speed up the creation of index files dramatically.
"""
import logging
import os
import pickle
from .. import signals
from ..utils import get_mod_date
logger = logging.getLogger(__name__)
def load_metadata(album):
"""Loads the metadata of all media in an album from cache"""
if not hasattr(album.gallery, "metadataCache"):
_restore_cache(album.gallery)
cache = album.gallery.metadataCache
# load album metadata
key = os.path.join(album.path, '_index')
if key in cache:
data = cache[key]
# check if file has changed
try:
mod_date = int(get_mod_date(album.markdown_metadata_filepath))
except FileNotFoundError:
pass
else:
if data.get('mod_date', -1) >= mod_date:
# cache is good
if 'markdown_metadata' in data:
album.markdown_metadata = data['markdown_metadata']
# load media metadata
for media in album.medias:
key = os.path.join(media.path, media.dst_filename)
if key in cache:
data = cache[key]
# check if files have changed
try:
mod_date = int(get_mod_date(media.src_path))
except FileNotFoundError:
continue
if data.get('mod_date', -1) < mod_date:
continue # file_metadata needs updating
if 'file_metadata' in data:
media.file_metadata = data['file_metadata']
if 'exif' in data:
media.exif = data['exif']
try:
mod_date = int(get_mod_date(media.markdown_metadata_filepath))
except FileNotFoundError:
continue
if data.get('meta_mod_date', -1) < mod_date:
continue # markdown_metadata needs updating
if 'markdown_metadata' in data:
media.markdown_metadata = data['markdown_metadata']
def _restore_cache(gallery):
"""Restores the metadata cache from the cache file"""
cachePath = os.path.join(gallery.settings["destination"], ".metadata_cache")
try:
if os.path.exists(cachePath):
with open(cachePath, "rb") as cacheFile:
gallery.metadataCache = pickle.load(cacheFile)
logger.debug("Loaded cache with %d entries", len(gallery.metadataCache))
else:
gallery.metadataCache = {}
except Exception as e:
logger.warning("Could not load cache: %s", e)
gallery.metadataCache = {}
def save_cache(gallery):
"""Stores the exif data of all images in the gallery"""
if hasattr(gallery, "metadataCache"):
cache = gallery.metadataCache
else:
cache = gallery.metadataCache = {}
for album in gallery.albums.values():
try:
data = {
'mod_date': int(get_mod_date(album.markdown_metadata_filepath)),
'markdown_metadata': album.markdown_metadata,
}
cache[os.path.join(album.path, '_index')] = data
except FileNotFoundError:
pass
for media in album.medias:
data = {}
try:
mod_date = int(get_mod_date(media.src_path))
except FileNotFoundError:
continue
else:
data['mod_date'] = mod_date
data['file_metadata'] = media.file_metadata
if hasattr(media, 'exif'):
data['exif'] = media.exif
try:
meta_mod_date = int(get_mod_date(media.markdown_metadata_filepath))
except FileNotFoundError:
pass
else:
data['meta_mod_date'] = meta_mod_date
data['markdown_metadata'] = media.markdown_metadata
cache[os.path.join(media.path, media.dst_filename)] = data
cachePath = os.path.join(gallery.settings["destination"], ".metadata_cache")
if len(cache) == 0:
if os.path.exists(cachePath):
os.remove(cachePath)
return
try:
with open(cachePath, "wb") as cacheFile:
pickle.dump(cache, cacheFile)
logger.debug("Stored cache with %d entries", len(gallery.metadataCache))
except Exception as e:
logger.warn("Could not store cache: %s", e)
os.remove(cachePath)
def register(settings):
signals.gallery_build.connect(save_cache)
signals.album_initialized.connect(load_metadata) | sigal/plugins/extended_caching.py |
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
""" Decreases the time needed to build large galleries (e.g.: 25k images in
2.5s instead of 30s)
This plugin allows extended caching, which is useful for large galleries. Once
a gallery has been built it caches all metadata for all media (markdown, exif,
itpc) in the gallery target folder. Before the next run it restores them so
that the image and metadata files do not have to be parsed again. For large
galleries this can speed up the creation of index files dramatically.
"""
import logging
import os
import pickle
from .. import signals
from ..utils import get_mod_date
logger = logging.getLogger(__name__)
def load_metadata(album):
"""Loads the metadata of all media in an album from cache"""
if not hasattr(album.gallery, "metadataCache"):
_restore_cache(album.gallery)
cache = album.gallery.metadataCache
# load album metadata
key = os.path.join(album.path, '_index')
if key in cache:
data = cache[key]
# check if file has changed
try:
mod_date = int(get_mod_date(album.markdown_metadata_filepath))
except FileNotFoundError:
pass
else:
if data.get('mod_date', -1) >= mod_date:
# cache is good
if 'markdown_metadata' in data:
album.markdown_metadata = data['markdown_metadata']
# load media metadata
for media in album.medias:
key = os.path.join(media.path, media.dst_filename)
if key in cache:
data = cache[key]
# check if files have changed
try:
mod_date = int(get_mod_date(media.src_path))
except FileNotFoundError:
continue
if data.get('mod_date', -1) < mod_date:
continue # file_metadata needs updating
if 'file_metadata' in data:
media.file_metadata = data['file_metadata']
if 'exif' in data:
media.exif = data['exif']
try:
mod_date = int(get_mod_date(media.markdown_metadata_filepath))
except FileNotFoundError:
continue
if data.get('meta_mod_date', -1) < mod_date:
continue # markdown_metadata needs updating
if 'markdown_metadata' in data:
media.markdown_metadata = data['markdown_metadata']
def _restore_cache(gallery):
"""Restores the metadata cache from the cache file"""
cachePath = os.path.join(gallery.settings["destination"], ".metadata_cache")
try:
if os.path.exists(cachePath):
with open(cachePath, "rb") as cacheFile:
gallery.metadataCache = pickle.load(cacheFile)
logger.debug("Loaded cache with %d entries", len(gallery.metadataCache))
else:
gallery.metadataCache = {}
except Exception as e:
logger.warning("Could not load cache: %s", e)
gallery.metadataCache = {}
def save_cache(gallery):
"""Stores the exif data of all images in the gallery"""
if hasattr(gallery, "metadataCache"):
cache = gallery.metadataCache
else:
cache = gallery.metadataCache = {}
for album in gallery.albums.values():
try:
data = {
'mod_date': int(get_mod_date(album.markdown_metadata_filepath)),
'markdown_metadata': album.markdown_metadata,
}
cache[os.path.join(album.path, '_index')] = data
except FileNotFoundError:
pass
for media in album.medias:
data = {}
try:
mod_date = int(get_mod_date(media.src_path))
except FileNotFoundError:
continue
else:
data['mod_date'] = mod_date
data['file_metadata'] = media.file_metadata
if hasattr(media, 'exif'):
data['exif'] = media.exif
try:
meta_mod_date = int(get_mod_date(media.markdown_metadata_filepath))
except FileNotFoundError:
pass
else:
data['meta_mod_date'] = meta_mod_date
data['markdown_metadata'] = media.markdown_metadata
cache[os.path.join(media.path, media.dst_filename)] = data
cachePath = os.path.join(gallery.settings["destination"], ".metadata_cache")
if len(cache) == 0:
if os.path.exists(cachePath):
os.remove(cachePath)
return
try:
with open(cachePath, "wb") as cacheFile:
pickle.dump(cache, cacheFile)
logger.debug("Stored cache with %d entries", len(gallery.metadataCache))
except Exception as e:
logger.warn("Could not store cache: %s", e)
os.remove(cachePath)
def register(settings):
signals.gallery_build.connect(save_cache)
signals.album_initialized.connect(load_metadata) | 0.444203 | 0.16378 |
# The MIT License (MIT)
# Copyright (c) 2018 AndyTempel
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
from .errors import RequireFormatting
class Route(object):
def __init__(self, url: str, method: str, require_format: bool = False):
self.url = url
self.method = method
self.require_format = require_format
def __str__(self) -> str:
if self.require_format:
raise RequireFormatting
return self.url
def __get__(self, instance, owner) -> str:
if self.require_format:
raise RequireFormatting
return self.url
def format_url(self, *args) -> str:
return self.url.format(*args)
class Router(object):
def __init__(self, base_url: str):
if not base_url.endswith("/"):
base_url += "/"
self.base_url = base_url
self.base_bot = base_url + "bots"
self.base_usr = base_url + "users/"
self.base_wig = base_url + "widget/"
self.bot_search = Route(self.base_bot, "GET")
self.bot_get = Route(self.base_bot + "/{}", "GET", True)
self.bot_votes = Route(self.base_bot + "/{}/votes", "GET", True)
self.bot_stats = Route(self.base_bot + "/{}/stats", "GET", True)
self.bot_ul_stats = Route(self.base_bot + "{}/stats", "POST", True)
self.user_get = Route(self.base_usr + "{}", "GET", True)
self.widget_get = Route(self.base_wig + "{}.svg", "GET", True)
self.widget_owner = Route(self.base_wig + "owner/{}.svg", "GET", True) | dblapi/router.py |
# The MIT License (MIT)
# Copyright (c) 2018 AndyTempel
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
from .errors import RequireFormatting
class Route(object):
def __init__(self, url: str, method: str, require_format: bool = False):
self.url = url
self.method = method
self.require_format = require_format
def __str__(self) -> str:
if self.require_format:
raise RequireFormatting
return self.url
def __get__(self, instance, owner) -> str:
if self.require_format:
raise RequireFormatting
return self.url
def format_url(self, *args) -> str:
return self.url.format(*args)
class Router(object):
def __init__(self, base_url: str):
if not base_url.endswith("/"):
base_url += "/"
self.base_url = base_url
self.base_bot = base_url + "bots"
self.base_usr = base_url + "users/"
self.base_wig = base_url + "widget/"
self.bot_search = Route(self.base_bot, "GET")
self.bot_get = Route(self.base_bot + "/{}", "GET", True)
self.bot_votes = Route(self.base_bot + "/{}/votes", "GET", True)
self.bot_stats = Route(self.base_bot + "/{}/stats", "GET", True)
self.bot_ul_stats = Route(self.base_bot + "{}/stats", "POST", True)
self.user_get = Route(self.base_usr + "{}", "GET", True)
self.widget_get = Route(self.base_wig + "{}.svg", "GET", True)
self.widget_owner = Route(self.base_wig + "owner/{}.svg", "GET", True) | 0.781747 | 0.124532 |
"""This model adds noise/rir to signal."""
import delta.compat as tf
from delta.utils.hparam import HParams
from core.ops import py_x_ops
from delta.data.frontend.base_frontend import BaseFrontend
class Add_rir_noise_aecres(BaseFrontend):
"""
Add a random signal-to-noise ratio noise or impulse response to clean speech.
"""
def __init__(self, config: dict):
super().__init__(config)
@classmethod
def params(cls, config=None):
"""
Set params.
:param config: contains nine optional parameters:
--sample_rate : Sample frequency of waveform data. (int, default = 16000)
--if_add_rir : If true, add rir to audio data. (bool, default = False)
--rir_filelist : FileList path of rir.(string, default = 'rirlist.scp')
--if_add_noise : If true, add random noise to audio data. (bool, default = False)
--snr_min : Minimum SNR adds to signal. (float, default = 0)
--snr_max : Maximum SNR adds to signal. (float, default = 30)
--noise_filelist : FileList path of noise.(string, default = 'noiselist.scp')
--if_add_aecres : If true, add aecres to audio data. (bool, default = False)
--aecres_filelist : FileList path of aecres.(string, default = 'aecreslist.scp')
:return: An object of class HParams, which is a set of hyperparameters as name-value pairs.
"""
sample_rate = 16000
if_add_rir = False
rir_filelist = 'rirlist.scp'
if_add_noise = False
noise_filelist = 'noiselist.scp'
snr_min = 0
snr_max = 30
if_add_aecres = False
aecres_filelist = 'aecreslist.scp'
hparams = HParams(cls=cls)
hparams.add_hparam('sample_rate', sample_rate)
hparams.add_hparam('if_add_rir', if_add_rir)
hparams.add_hparam('if_add_noise', if_add_noise)
hparams.add_hparam('rir_filelist', rir_filelist)
hparams.add_hparam('noise_filelist', noise_filelist)
hparams.add_hparam('snr_min', snr_min)
hparams.add_hparam('snr_max', snr_max)
hparams.add_hparam('if_add_aecres', if_add_aecres)
hparams.add_hparam('aecres_filelist', aecres_filelist)
if config is not None:
hparams.override_from_dict(config)
return hparams
def call(self, audio_data, sample_rate=None):
"""
Caculate power spectrum or log power spectrum of audio data.
:param audio_data: the audio signal from which to compute spectrum.
Should be an (1, N) tensor.
:param sample_rate: [option]the samplerate of the signal we working with,
default is 16kHz.
:return: A float tensor of size N containing add-noise audio.
"""
p = self.config
with tf.name_scope('add_rir_noise_aecres'):
if sample_rate == None:
sample_rate = tf.constant(p.sample_rate, dtype=tf.int32)
assert_op = tf.assert_equal(
tf.constant(p.sample_rate), tf.cast(sample_rate, dtype=tf.int32))
with tf.control_dependencies([assert_op]):
sample_rate = tf.cast(sample_rate, dtype=float)
add_rir_noise_aecres_out = py_x_ops.add_rir_noise_aecres(
audio_data,
sample_rate,
if_add_rir=p.if_add_rir,
rir_filelist=p.rir_filelist,
if_add_noise=p.if_add_noise,
snr_min=p.snr_min,
snr_max=p.snr_max,
noise_filelist=p.noise_filelist,
if_add_aecres=p.if_add_aecres,
aecres_filelist=p.aecres_filelist)
return tf.squeeze(add_rir_noise_aecres_out) | delta/data/frontend/add_rir_noise_aecres.py | """This model adds noise/rir to signal."""
import delta.compat as tf
from delta.utils.hparam import HParams
from core.ops import py_x_ops
from delta.data.frontend.base_frontend import BaseFrontend
class Add_rir_noise_aecres(BaseFrontend):
"""
Add a random signal-to-noise ratio noise or impulse response to clean speech.
"""
def __init__(self, config: dict):
super().__init__(config)
@classmethod
def params(cls, config=None):
"""
Set params.
:param config: contains nine optional parameters:
--sample_rate : Sample frequency of waveform data. (int, default = 16000)
--if_add_rir : If true, add rir to audio data. (bool, default = False)
--rir_filelist : FileList path of rir.(string, default = 'rirlist.scp')
--if_add_noise : If true, add random noise to audio data. (bool, default = False)
--snr_min : Minimum SNR adds to signal. (float, default = 0)
--snr_max : Maximum SNR adds to signal. (float, default = 30)
--noise_filelist : FileList path of noise.(string, default = 'noiselist.scp')
--if_add_aecres : If true, add aecres to audio data. (bool, default = False)
--aecres_filelist : FileList path of aecres.(string, default = 'aecreslist.scp')
:return: An object of class HParams, which is a set of hyperparameters as name-value pairs.
"""
sample_rate = 16000
if_add_rir = False
rir_filelist = 'rirlist.scp'
if_add_noise = False
noise_filelist = 'noiselist.scp'
snr_min = 0
snr_max = 30
if_add_aecres = False
aecres_filelist = 'aecreslist.scp'
hparams = HParams(cls=cls)
hparams.add_hparam('sample_rate', sample_rate)
hparams.add_hparam('if_add_rir', if_add_rir)
hparams.add_hparam('if_add_noise', if_add_noise)
hparams.add_hparam('rir_filelist', rir_filelist)
hparams.add_hparam('noise_filelist', noise_filelist)
hparams.add_hparam('snr_min', snr_min)
hparams.add_hparam('snr_max', snr_max)
hparams.add_hparam('if_add_aecres', if_add_aecres)
hparams.add_hparam('aecres_filelist', aecres_filelist)
if config is not None:
hparams.override_from_dict(config)
return hparams
def call(self, audio_data, sample_rate=None):
"""
Caculate power spectrum or log power spectrum of audio data.
:param audio_data: the audio signal from which to compute spectrum.
Should be an (1, N) tensor.
:param sample_rate: [option]the samplerate of the signal we working with,
default is 16kHz.
:return: A float tensor of size N containing add-noise audio.
"""
p = self.config
with tf.name_scope('add_rir_noise_aecres'):
if sample_rate == None:
sample_rate = tf.constant(p.sample_rate, dtype=tf.int32)
assert_op = tf.assert_equal(
tf.constant(p.sample_rate), tf.cast(sample_rate, dtype=tf.int32))
with tf.control_dependencies([assert_op]):
sample_rate = tf.cast(sample_rate, dtype=float)
add_rir_noise_aecres_out = py_x_ops.add_rir_noise_aecres(
audio_data,
sample_rate,
if_add_rir=p.if_add_rir,
rir_filelist=p.rir_filelist,
if_add_noise=p.if_add_noise,
snr_min=p.snr_min,
snr_max=p.snr_max,
noise_filelist=p.noise_filelist,
if_add_aecres=p.if_add_aecres,
aecres_filelist=p.aecres_filelist)
return tf.squeeze(add_rir_noise_aecres_out) | 0.861217 | 0.466177 |
import torch
import torch.nn as nn
import torch.nn.functional as F
from pytorch_lightning.metrics import MulticlassROC, MulticlassPrecisionRecall
from pytorch_lightning.metrics.functional import auc, precision, recall
class MultiAUPRC(nn.Module):
def __init__(self, num_classes: int):
super(MultiAUPRC, self).__init__()
self.num_classes = num_classes
self.multi_prc = MulticlassPrecisionRecall(num_classes=num_classes)
def forward(self, logits: torch.FloatTensor, labels: torch.LongTensor):
multi_prcs = self.multi_prc(
pred=logits.softmax(dim=1),
target=labels,
sample_weight=None
)
avg_auprc = 0.
for precision_, recall_, _ in multi_prcs:
avg_auprc += auc(x=precision_, y=recall_, reorder=True)
return torch.Tensor([avg_auprc / self.num_classes])
class MultiAUROC(nn.Module):
def __init__(self, num_classes: int):
super(MultiAUROC, self).__init__()
self.num_classes = num_classes
self.multi_roc = MulticlassROC(num_classes=num_classes)
def forward(self, logits: torch.FloatTensor, labels: torch.LongTensor):
multi_rocs = self.multi_roc(
pred=logits.softmax(dim=1),
target=labels,
sample_weight=None
)
avg_auroc = 0.
for fpr, tpr, _ in multi_rocs:
avg_auroc += auc(x=fpr, y=tpr, reorder=True)
return torch.Tensor([avg_auroc / self.num_classes])
class MultiAccuracy(nn.Module):
def __init__(self, num_classes: int):
super(MultiAccuracy, self).__init__()
self.num_classes = num_classes
def forward(self, logits: torch.FloatTensor, labels: torch.LongTensor):
assert logits.ndim == 2
assert labels.ndim == 1
assert len(logits) == len(labels)
with torch.no_grad():
preds = logits.argmax(dim=1)
correct = torch.eq(preds, labels)
return torch.mean(correct.float())
class TopKAccuracy(nn.Module):
def __init__(self, num_classes: int, k: int, threshold: float = 0.):
super(TopKAccuracy, self).__init__()
self.num_classes = num_classes
self.k = k
self.threshold = threshold
def forward(self, logits: torch.Tensor, labels: torch.Tensor):
assert logits.ndim == 2
assert labels.ndim == 1
assert len(logits) == len(labels)
with torch.no_grad():
topk_probs, topk_indices = torch.topk(F.softmax(logits, dim=1), self.k, dim=1)
labels = labels.view(-1, 1).expand_as(topk_indices) # (B, k)
correct = labels.eq(topk_indices) * (topk_probs >= self.threshold) # (B, k)
correct = correct.sum(dim=1).bool().float() # (B, ) & {0, 1}
return torch.mean(correct)
class MultiPrecision(nn.Module):
def __init__(self, num_classes: int, average='macro'):
super(MultiPrecision, self).__init__()
self.num_classes = num_classes
assert average in ['macro', 'micro', 'weighted']
self.average = average
def forward(self, logits: torch.FloatTensor, labels: torch.LongTensor):
assert logits.ndim == 2
assert labels.ndim == 1
with torch.no_grad():
if self.average == 'macro':
return precision(
pred=nn.functional.softmax(logits, dim=1),
target=labels,
num_classes=self.num_classes,
reduction='elementwise_mean'
)
else:
raise NotImplementedError
class MultiRecall(nn.Module):
def __init__(self, num_classes: int, average='macro'):
super(MultiRecall, self).__init__()
self.num_classes = num_classes
assert average in ['macro', 'micro', 'weighted']
self.average = average
def forward(self, logits: torch.FloatTensor, labels: torch.LongTensor):
assert logits.ndim == 2
assert labels.ndim == 1
with torch.no_grad():
if self.average == 'macro':
return recall(
pred=nn.functional.softmax(logits, dim=1),
target=labels,
num_classes=self.num_classes,
reduction='elementwise_mean',
)
else:
raise NotImplementedError
class MultiF1Score(nn.Module):
def __init__(self, num_classes: int, average: str = 'macro'):
super(MultiF1Score, self).__init__()
self.num_classes = num_classes
assert average in ['macro', 'micro', 'weighted']
self.average = average
def forward(self, logits: torch.FloatTensor, labels: torch.LongTensor):
assert logits.ndim == 2
assert labels.ndim == 1
with torch.no_grad():
if self.average == 'macro':
f1_scores = torch.zeros(self.num_classes, device=logits.device)
for c in range(self.num_classes):
pred = logits.argmax(dim=1) == c
true = labels == c
f1 = BinaryFBetaScore.macro_f_beta_score(pred, true, beta=1)
f1_scores[c] = f1
return torch.mean(f1_scores)
elif self.average == 'micro':
raise NotImplementedError
elif self.average == 'weighted':
raise NotImplementedError
else:
raise NotImplementedError
class BinaryFBetaScore(nn.Module):
def __init__(self, beta=1, threshold=.5, average='macro'):
super(BinaryFBetaScore, self).__init__()
self.beta = beta
self.threshold = threshold
self.average = average
def forward(self, logit: torch.Tensor, label: torch.Tensor):
assert logit.ndim == 1
assert label.ndim == 1
with torch.no_grad():
pred = torch.sigmoid(logit)
pred = pred > self.threshold # boolean
true = label > self.threshold # boolean
if self.average == 'macro':
return self.macro_f_beta_score(pred, true, self.beta)
elif self.average == 'micro':
return self.micro_f_beta_score(pred, true, self.beta)
elif self.average == 'weighted':
return self.weighted_f_beta_score(pred, true, self.beta)
else:
raise NotImplementedError
@staticmethod
def macro_f_beta_score(pred: torch.Tensor, true: torch.Tensor, beta=1):
assert true.ndim == 1
assert pred.ndim == 1
pred = pred.float() # inputs could be boolean values
true = true.float() # inputs could be boolean values
tp = (pred * true).sum().float() # True positive
_ = ((1-pred) * (1-true)).sum().float() # True negative
fp = ((pred) * (1-true)).sum().float() # False positive
fn = ((1-pred) * true).sum().float() # False negative
precision_ = tp / (tp + fp + 1e-7)
recall_ = tp / (tp + fn + 1e-7)
f_beta = (1 + beta**2) * precision_ * recall_ / (beta**2 * precision_ + recall_ + 1e-7)
return f_beta
@staticmethod
def micro_f_beta_score(pred: torch.Tensor, true: torch.Tensor, beta=1):
raise NotImplementedError
@staticmethod
def weighted_f_beta_score(pred: torch.Tensor, true: torch.Tensor, beta=1):
raise NotImplementedError
class BinaryF1Score(BinaryFBetaScore):
def __init__(self, threshold=.5, average='macro'):
super(BinaryF1Score, self).__init__(beta=1, threshold=threshold, average=average)
if __name__ == '__main__':
targets = torch.LongTensor([2, 2, 0, 2, 1, 1, 1])
predictions = torch.FloatTensor(
[
[1, 2, 7], # 2
[1, 3, 7], # 2
[3, 9, 0], # 1
[1, 2, 3], # 2
[3, 7, 0], # 1
[8, 1, 1], # 0
[9, 1, 1], # 0
]
)
f1_function = MultiF1Score(num_classes=3, average='macro')
f1_val = f1_function(logits=predictions, labels=targets)
print(f1_val) | utils/metrics.py |
import torch
import torch.nn as nn
import torch.nn.functional as F
from pytorch_lightning.metrics import MulticlassROC, MulticlassPrecisionRecall
from pytorch_lightning.metrics.functional import auc, precision, recall
class MultiAUPRC(nn.Module):
def __init__(self, num_classes: int):
super(MultiAUPRC, self).__init__()
self.num_classes = num_classes
self.multi_prc = MulticlassPrecisionRecall(num_classes=num_classes)
def forward(self, logits: torch.FloatTensor, labels: torch.LongTensor):
multi_prcs = self.multi_prc(
pred=logits.softmax(dim=1),
target=labels,
sample_weight=None
)
avg_auprc = 0.
for precision_, recall_, _ in multi_prcs:
avg_auprc += auc(x=precision_, y=recall_, reorder=True)
return torch.Tensor([avg_auprc / self.num_classes])
class MultiAUROC(nn.Module):
def __init__(self, num_classes: int):
super(MultiAUROC, self).__init__()
self.num_classes = num_classes
self.multi_roc = MulticlassROC(num_classes=num_classes)
def forward(self, logits: torch.FloatTensor, labels: torch.LongTensor):
multi_rocs = self.multi_roc(
pred=logits.softmax(dim=1),
target=labels,
sample_weight=None
)
avg_auroc = 0.
for fpr, tpr, _ in multi_rocs:
avg_auroc += auc(x=fpr, y=tpr, reorder=True)
return torch.Tensor([avg_auroc / self.num_classes])
class MultiAccuracy(nn.Module):
def __init__(self, num_classes: int):
super(MultiAccuracy, self).__init__()
self.num_classes = num_classes
def forward(self, logits: torch.FloatTensor, labels: torch.LongTensor):
assert logits.ndim == 2
assert labels.ndim == 1
assert len(logits) == len(labels)
with torch.no_grad():
preds = logits.argmax(dim=1)
correct = torch.eq(preds, labels)
return torch.mean(correct.float())
class TopKAccuracy(nn.Module):
def __init__(self, num_classes: int, k: int, threshold: float = 0.):
super(TopKAccuracy, self).__init__()
self.num_classes = num_classes
self.k = k
self.threshold = threshold
def forward(self, logits: torch.Tensor, labels: torch.Tensor):
assert logits.ndim == 2
assert labels.ndim == 1
assert len(logits) == len(labels)
with torch.no_grad():
topk_probs, topk_indices = torch.topk(F.softmax(logits, dim=1), self.k, dim=1)
labels = labels.view(-1, 1).expand_as(topk_indices) # (B, k)
correct = labels.eq(topk_indices) * (topk_probs >= self.threshold) # (B, k)
correct = correct.sum(dim=1).bool().float() # (B, ) & {0, 1}
return torch.mean(correct)
class MultiPrecision(nn.Module):
def __init__(self, num_classes: int, average='macro'):
super(MultiPrecision, self).__init__()
self.num_classes = num_classes
assert average in ['macro', 'micro', 'weighted']
self.average = average
def forward(self, logits: torch.FloatTensor, labels: torch.LongTensor):
assert logits.ndim == 2
assert labels.ndim == 1
with torch.no_grad():
if self.average == 'macro':
return precision(
pred=nn.functional.softmax(logits, dim=1),
target=labels,
num_classes=self.num_classes,
reduction='elementwise_mean'
)
else:
raise NotImplementedError
class MultiRecall(nn.Module):
def __init__(self, num_classes: int, average='macro'):
super(MultiRecall, self).__init__()
self.num_classes = num_classes
assert average in ['macro', 'micro', 'weighted']
self.average = average
def forward(self, logits: torch.FloatTensor, labels: torch.LongTensor):
assert logits.ndim == 2
assert labels.ndim == 1
with torch.no_grad():
if self.average == 'macro':
return recall(
pred=nn.functional.softmax(logits, dim=1),
target=labels,
num_classes=self.num_classes,
reduction='elementwise_mean',
)
else:
raise NotImplementedError
class MultiF1Score(nn.Module):
def __init__(self, num_classes: int, average: str = 'macro'):
super(MultiF1Score, self).__init__()
self.num_classes = num_classes
assert average in ['macro', 'micro', 'weighted']
self.average = average
def forward(self, logits: torch.FloatTensor, labels: torch.LongTensor):
assert logits.ndim == 2
assert labels.ndim == 1
with torch.no_grad():
if self.average == 'macro':
f1_scores = torch.zeros(self.num_classes, device=logits.device)
for c in range(self.num_classes):
pred = logits.argmax(dim=1) == c
true = labels == c
f1 = BinaryFBetaScore.macro_f_beta_score(pred, true, beta=1)
f1_scores[c] = f1
return torch.mean(f1_scores)
elif self.average == 'micro':
raise NotImplementedError
elif self.average == 'weighted':
raise NotImplementedError
else:
raise NotImplementedError
class BinaryFBetaScore(nn.Module):
def __init__(self, beta=1, threshold=.5, average='macro'):
super(BinaryFBetaScore, self).__init__()
self.beta = beta
self.threshold = threshold
self.average = average
def forward(self, logit: torch.Tensor, label: torch.Tensor):
assert logit.ndim == 1
assert label.ndim == 1
with torch.no_grad():
pred = torch.sigmoid(logit)
pred = pred > self.threshold # boolean
true = label > self.threshold # boolean
if self.average == 'macro':
return self.macro_f_beta_score(pred, true, self.beta)
elif self.average == 'micro':
return self.micro_f_beta_score(pred, true, self.beta)
elif self.average == 'weighted':
return self.weighted_f_beta_score(pred, true, self.beta)
else:
raise NotImplementedError
@staticmethod
def macro_f_beta_score(pred: torch.Tensor, true: torch.Tensor, beta=1):
assert true.ndim == 1
assert pred.ndim == 1
pred = pred.float() # inputs could be boolean values
true = true.float() # inputs could be boolean values
tp = (pred * true).sum().float() # True positive
_ = ((1-pred) * (1-true)).sum().float() # True negative
fp = ((pred) * (1-true)).sum().float() # False positive
fn = ((1-pred) * true).sum().float() # False negative
precision_ = tp / (tp + fp + 1e-7)
recall_ = tp / (tp + fn + 1e-7)
f_beta = (1 + beta**2) * precision_ * recall_ / (beta**2 * precision_ + recall_ + 1e-7)
return f_beta
@staticmethod
def micro_f_beta_score(pred: torch.Tensor, true: torch.Tensor, beta=1):
raise NotImplementedError
@staticmethod
def weighted_f_beta_score(pred: torch.Tensor, true: torch.Tensor, beta=1):
raise NotImplementedError
class BinaryF1Score(BinaryFBetaScore):
def __init__(self, threshold=.5, average='macro'):
super(BinaryF1Score, self).__init__(beta=1, threshold=threshold, average=average)
if __name__ == '__main__':
targets = torch.LongTensor([2, 2, 0, 2, 1, 1, 1])
predictions = torch.FloatTensor(
[
[1, 2, 7], # 2
[1, 3, 7], # 2
[3, 9, 0], # 1
[1, 2, 3], # 2
[3, 7, 0], # 1
[8, 1, 1], # 0
[9, 1, 1], # 0
]
)
f1_function = MultiF1Score(num_classes=3, average='macro')
f1_val = f1_function(logits=predictions, labels=targets)
print(f1_val) | 0.963695 | 0.683726 |
import sys
class Tokenizer:
CTX_NO = 'NO'
CTX_NUMBER = 'NUMBER'
SINGLE_SYMBOLS = ['[', ',', ']']
SPACE_SYMBOLS = [' ', '\t', '\r', '\n']
DEC_NUMBER_SYMBOLS = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9']
def __init__(self):
self.tokens = []
def consume(self, line: str):
self.__context = self.CTX_NO
number = ''
for c in line:
if self.__context is self.CTX_NUMBER:
if c in self.DEC_NUMBER_SYMBOLS:
number += c
continue
else:
self.tokens.append(number)
self.__context = self.CTX_NO
number = ''
if c in self.SINGLE_SYMBOLS:
self.tokens.append(c)
elif c in self.SPACE_SYMBOLS:
continue
elif c in self.DEC_NUMBER_SYMBOLS:
number += c
self.__context = self.CTX_NUMBER
else:
print('Unexpected symbol; \'{}\''.format(c))
sys.exit(1)
class Parser:
EXPECT_NUMBER_OR_LIST = 'number or \'[\''
EXPECT_NUMBER_OR_LIST_OR_CLOSE = 'number or \'[\' or \']\''
EXPECT_DIVIDER_OR_CLOSE = '\',\' or \']\''
EXPECT_DIVIDER = '\',\''
__level = 0
__stack = []
__current_list = []
__context = EXPECT_NUMBER_OR_LIST
def __init__(self):
pass
def __is_number(self, t: str):
c = ord(t[0])
return c >= 0x30 and c <= 0x39
def __is_open_bracket(self, t: str):
return t == '['
def __is_close_bracket(self, t: str):
return t == ']'
def __is_divider(self, t: str):
return t == ','
def __accept_number(self, t: str):
self.__current_list.append(int(t))
if self.__level == 0:
self.__context = self.EXPECT_DIVIDER
else:
self.__context = self.EXPECT_DIVIDER_OR_CLOSE
def __accept_open_bracket(self, t: str):
self.__level += 1
self.__stack.insert(0, self.__current_list)
self.__current_list = []
self.__context = self.EXPECT_NUMBER_OR_LIST_OR_CLOSE
def __accept_close_bracket(self, t: str):
parent_list = self.__stack.pop(0)
parent_list.append(self.__current_list)
self.__current_list = parent_list
self.__level -= 1
if self.__level == 0:
self.__context = self.EXPECT_DIVIDER
else:
self.__context = self.EXPECT_DIVIDER_OR_CLOSE
def __accept_divider(self, t: str):
if self.__level == 0:
self.__context = self.EXPECT_NUMBER_OR_LIST
else:
self.__context = self.EXPECT_NUMBER_OR_LIST_OR_CLOSE
def __unexpected(self, t: str, expected: str):
print('Unexpected token; \'{}\'. Expected {}.'.format(t, expected))
sys.exit(1)
def parse(self, tokens: list):
for t in tokens:
if self.__context == self.EXPECT_DIVIDER:
if self.__is_divider(t):
self.__accept_divider(t)
else:
self.__unexpected(t, self.__context)
elif self.__context == self.EXPECT_DIVIDER_OR_CLOSE:
if self.__is_divider(t):
self.__accept_divider(t)
elif self.__is_close_bracket(t):
self.__accept_close_bracket(t)
else:
self.__unexpected(t, self.__context)
elif self.__context == self.EXPECT_NUMBER_OR_LIST:
if self.__is_number(t):
self.__accept_number(t)
elif self.__is_open_bracket(t):
self.__accept_open_bracket(t)
else:
self.__unexpected(t, self.__context)
elif self.__context == self.EXPECT_NUMBER_OR_LIST_OR_CLOSE:
if self.__is_number(t):
self.__accept_number(t)
elif self.__is_open_bracket(t):
self.__accept_open_bracket(t)
elif self.__is_close_bracket(t):
self.__accept_close_bracket(t)
else:
self.__unexpected(t, self.__context)
elif self.__level != 0:
print('Invalid self.__context ' + self.__context)
sys.exit(1)
if self.__stack:
print('Unexpected end of tokens. Expected ' + self.__context + '.')
sys.exit(1)
return self.__current_list
if __name__ == "__main__":
t = Tokenizer()
for line in sys.stdin:
t.consume(line)
p = Parser()
ast = p.parse(t.tokens)
print(ast) | python-1/main.py | import sys
class Tokenizer:
CTX_NO = 'NO'
CTX_NUMBER = 'NUMBER'
SINGLE_SYMBOLS = ['[', ',', ']']
SPACE_SYMBOLS = [' ', '\t', '\r', '\n']
DEC_NUMBER_SYMBOLS = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9']
def __init__(self):
self.tokens = []
def consume(self, line: str):
self.__context = self.CTX_NO
number = ''
for c in line:
if self.__context is self.CTX_NUMBER:
if c in self.DEC_NUMBER_SYMBOLS:
number += c
continue
else:
self.tokens.append(number)
self.__context = self.CTX_NO
number = ''
if c in self.SINGLE_SYMBOLS:
self.tokens.append(c)
elif c in self.SPACE_SYMBOLS:
continue
elif c in self.DEC_NUMBER_SYMBOLS:
number += c
self.__context = self.CTX_NUMBER
else:
print('Unexpected symbol; \'{}\''.format(c))
sys.exit(1)
class Parser:
EXPECT_NUMBER_OR_LIST = 'number or \'[\''
EXPECT_NUMBER_OR_LIST_OR_CLOSE = 'number or \'[\' or \']\''
EXPECT_DIVIDER_OR_CLOSE = '\',\' or \']\''
EXPECT_DIVIDER = '\',\''
__level = 0
__stack = []
__current_list = []
__context = EXPECT_NUMBER_OR_LIST
def __init__(self):
pass
def __is_number(self, t: str):
c = ord(t[0])
return c >= 0x30 and c <= 0x39
def __is_open_bracket(self, t: str):
return t == '['
def __is_close_bracket(self, t: str):
return t == ']'
def __is_divider(self, t: str):
return t == ','
def __accept_number(self, t: str):
self.__current_list.append(int(t))
if self.__level == 0:
self.__context = self.EXPECT_DIVIDER
else:
self.__context = self.EXPECT_DIVIDER_OR_CLOSE
def __accept_open_bracket(self, t: str):
self.__level += 1
self.__stack.insert(0, self.__current_list)
self.__current_list = []
self.__context = self.EXPECT_NUMBER_OR_LIST_OR_CLOSE
def __accept_close_bracket(self, t: str):
parent_list = self.__stack.pop(0)
parent_list.append(self.__current_list)
self.__current_list = parent_list
self.__level -= 1
if self.__level == 0:
self.__context = self.EXPECT_DIVIDER
else:
self.__context = self.EXPECT_DIVIDER_OR_CLOSE
def __accept_divider(self, t: str):
if self.__level == 0:
self.__context = self.EXPECT_NUMBER_OR_LIST
else:
self.__context = self.EXPECT_NUMBER_OR_LIST_OR_CLOSE
def __unexpected(self, t: str, expected: str):
print('Unexpected token; \'{}\'. Expected {}.'.format(t, expected))
sys.exit(1)
def parse(self, tokens: list):
for t in tokens:
if self.__context == self.EXPECT_DIVIDER:
if self.__is_divider(t):
self.__accept_divider(t)
else:
self.__unexpected(t, self.__context)
elif self.__context == self.EXPECT_DIVIDER_OR_CLOSE:
if self.__is_divider(t):
self.__accept_divider(t)
elif self.__is_close_bracket(t):
self.__accept_close_bracket(t)
else:
self.__unexpected(t, self.__context)
elif self.__context == self.EXPECT_NUMBER_OR_LIST:
if self.__is_number(t):
self.__accept_number(t)
elif self.__is_open_bracket(t):
self.__accept_open_bracket(t)
else:
self.__unexpected(t, self.__context)
elif self.__context == self.EXPECT_NUMBER_OR_LIST_OR_CLOSE:
if self.__is_number(t):
self.__accept_number(t)
elif self.__is_open_bracket(t):
self.__accept_open_bracket(t)
elif self.__is_close_bracket(t):
self.__accept_close_bracket(t)
else:
self.__unexpected(t, self.__context)
elif self.__level != 0:
print('Invalid self.__context ' + self.__context)
sys.exit(1)
if self.__stack:
print('Unexpected end of tokens. Expected ' + self.__context + '.')
sys.exit(1)
return self.__current_list
if __name__ == "__main__":
t = Tokenizer()
for line in sys.stdin:
t.consume(line)
p = Parser()
ast = p.parse(t.tokens)
print(ast) | 0.232136 | 0.232986 |
import logging
from sys import exc_info
LOGGER = logging.getLogger('anthem.hook')
class Hook(object):
"""A Request Hook callback pointer and its metadata: failsafe, priority, and kwargs."""
callback = None
"""
The bare callable that this Hook object is wrapping, which will
be called when the Hook is called."""
failsafe = False
"""
If True, the callback is guaranteed to run even if other callbacks
from the same call point raise exceptions."""
priority = 50
"""
Defines the order of execution for a list of Hooks. Priority numbers
should be limited to the closed interval [0, 100], but values outside
this range are acceptable, as are fractional values."""
def __init__(self, callback, failsafe=None, priority=None):
self.callback = callback
if failsafe is None:
failsafe = getattr(callback, "failsafe", False)
self.failsafe = failsafe
if priority is None:
priority = getattr(callback, "priority", 50)
self.priority = priority
def __lt__(self, other):
# Python 3
return self.priority < other.priority
def __cmp__(self, other):
# Python 2
return cmp(self.priority, other.priority)
def __call__(self, *args, **kw):
"""Run self.callback(*args, **kw)."""
return self.callback(*args, **kw)
def __repr__(self):
cls = self.__class__
return ("%s.%s(callback=%r, failsafe=%r, priority=%r)"
% (cls.__module__, cls.__name__, self.callback,
self.failsafe, self.priority))
class HookMap(dict):
"""A Manager of Request call points to lists of callbacks (Hook objects)."""
def __new__(cls, points=None):
d = dict.__new__(cls)
for p in points or []:
d[p] = []
return d
def __init__(self, *a, **kw):
"""Init
"""
pass
def attach(self, point, callback, failsafe=None, priority=None, **kwargs):
"""Append a new Hook made from the supplied arguments."""
if point not in self:
self[point] = []
self[point].append(Hook(callback, failsafe, priority, **kwargs))
self[point].sort()
def run(self, point, *args, **kw):
"""Execute all registered Hooks (callbacks) for the given point."""
exc = None
hooks = self.get(point, [])
for hook in hooks:
# Running the hook pointer, if fails, keep the exception info in exc,
# then raises it, when all hook pointer finished.
if exc is None or hook.failsafe:
try:
hook(*args, **kw)
except (KeyboardInterrupt, SystemExit):
raise
except Exception:
exc = exc_info()[1]
LOGGER.exception("Hook Error: %s", exc)
if exc:
raise exc
def __copy__(self):
newmap = self.__class__()
# We can't just use 'update' because we want copies of the
# mutable values (each is a list) as well.
for k, v in self.items():
newmap[k] = v[:]
return newmap
copy = __copy__
def __repr__(self):
cls = self.__class__
return "%s.%s(points=%r)" % (
cls.__module__,
cls.__name__,
self.keys()
) | medoly/anthem/hook.py | import logging
from sys import exc_info
LOGGER = logging.getLogger('anthem.hook')
class Hook(object):
"""A Request Hook callback pointer and its metadata: failsafe, priority, and kwargs."""
callback = None
"""
The bare callable that this Hook object is wrapping, which will
be called when the Hook is called."""
failsafe = False
"""
If True, the callback is guaranteed to run even if other callbacks
from the same call point raise exceptions."""
priority = 50
"""
Defines the order of execution for a list of Hooks. Priority numbers
should be limited to the closed interval [0, 100], but values outside
this range are acceptable, as are fractional values."""
def __init__(self, callback, failsafe=None, priority=None):
self.callback = callback
if failsafe is None:
failsafe = getattr(callback, "failsafe", False)
self.failsafe = failsafe
if priority is None:
priority = getattr(callback, "priority", 50)
self.priority = priority
def __lt__(self, other):
# Python 3
return self.priority < other.priority
def __cmp__(self, other):
# Python 2
return cmp(self.priority, other.priority)
def __call__(self, *args, **kw):
"""Run self.callback(*args, **kw)."""
return self.callback(*args, **kw)
def __repr__(self):
cls = self.__class__
return ("%s.%s(callback=%r, failsafe=%r, priority=%r)"
% (cls.__module__, cls.__name__, self.callback,
self.failsafe, self.priority))
class HookMap(dict):
"""A Manager of Request call points to lists of callbacks (Hook objects)."""
def __new__(cls, points=None):
d = dict.__new__(cls)
for p in points or []:
d[p] = []
return d
def __init__(self, *a, **kw):
"""Init
"""
pass
def attach(self, point, callback, failsafe=None, priority=None, **kwargs):
"""Append a new Hook made from the supplied arguments."""
if point not in self:
self[point] = []
self[point].append(Hook(callback, failsafe, priority, **kwargs))
self[point].sort()
def run(self, point, *args, **kw):
"""Execute all registered Hooks (callbacks) for the given point."""
exc = None
hooks = self.get(point, [])
for hook in hooks:
# Running the hook pointer, if fails, keep the exception info in exc,
# then raises it, when all hook pointer finished.
if exc is None or hook.failsafe:
try:
hook(*args, **kw)
except (KeyboardInterrupt, SystemExit):
raise
except Exception:
exc = exc_info()[1]
LOGGER.exception("Hook Error: %s", exc)
if exc:
raise exc
def __copy__(self):
newmap = self.__class__()
# We can't just use 'update' because we want copies of the
# mutable values (each is a list) as well.
for k, v in self.items():
newmap[k] = v[:]
return newmap
copy = __copy__
def __repr__(self):
cls = self.__class__
return "%s.%s(points=%r)" % (
cls.__module__,
cls.__name__,
self.keys()
) | 0.635449 | 0.193967 |
import torch
import torch.nn as nn
from torch.nn import functional as F
class ColorConstancyLoss(nn.Module):
"""Color Constancy Loss"""
def __init__(self):
super(ColorConstancyLoss, self).__init__()
def forward(self, x):
mean_rgb = torch.mean(x, [2, 3], keepdim=True)
mr, mg, mb = torch.split(mean_rgb, 1, dim=1)
drg = torch.pow(mr - mg, 2)
drb = torch.pow(mr - mb, 2)
dgb = torch.pow(mb - mg, 2)
k = torch.pow(
torch.pow(drg, 2) + torch.pow(drb, 2) + torch.pow(dgb, 2), 0.5)
return k
class ExposureLoss(nn.Module):
"""Exposure Loss"""
def __init__(self, patch_size, mean_val):
super(ExposureLoss, self).__init__()
self.pool = nn.AvgPool2d(patch_size)
self.mean_val = mean_val
def forward(self, x):
x = torch.mean(x, 1, keepdim=True)
mean = self.pool(x)
return torch.mean(torch.pow(
mean - torch.FloatTensor([self.mean_val]).cuda(), 2
))
class IlluminationSmoothnessLoss(nn.Module):
"""Illumination Smoothing Loss"""
def __init__(self, loss_weight=1):
super(IlluminationSmoothnessLoss, self).__init__()
self.loss_weight = loss_weight
def forward(self, x):
batch_size = x.size()[0]
h_x = x.size()[2]
w_x = x.size()[3]
count_h = (x.size()[2] - 1) * x.size()[3]
count_w = x.size()[2] * (x.size()[3] - 1)
h_tv = torch.pow((x[:, :, 1:, :] - x[:, :, :h_x - 1, :]), 2).sum()
w_tv = torch.pow((x[:, :, :, 1:] - x[:, :, :, :w_x - 1]), 2).sum()
return self.loss_weight * 2 * (h_tv / count_h + w_tv / count_w) / batch_size
class SpatialConsistancyLoss(nn.Module):
"""Spatial Consistancy Loss"""
def __init__(self):
super(SpatialConsistancyLoss, self).__init__()
kernel_left = torch.FloatTensor(
[[0, 0, 0], [-1, 1, 0], [0, 0, 0]]).cuda().unsqueeze(0).unsqueeze(0)
kernel_right = torch.FloatTensor(
[[0, 0, 0], [0, 1, -1], [0, 0, 0]]).cuda().unsqueeze(0).unsqueeze(0)
kernel_up = torch.FloatTensor(
[[0, -1, 0], [0, 1, 0], [0, 0, 0]]).cuda().unsqueeze(0).unsqueeze(0)
kernel_down = torch.FloatTensor(
[[0, 0, 0], [0, 1, 0], [0, -1, 0]]).cuda().unsqueeze(0).unsqueeze(0)
self.weight_left = nn.Parameter(data=kernel_left, requires_grad=False)
self.weight_right = nn.Parameter(data=kernel_right, requires_grad=False)
self.weight_up = nn.Parameter(data=kernel_up, requires_grad=False)
self.weight_down = nn.Parameter(data=kernel_down, requires_grad=False)
self.pool = nn.AvgPool2d(4)
def forward(self, org, enhance):
org_mean = torch.mean(org, 1, keepdim=True)
enhance_mean = torch.mean(enhance, 1, keepdim=True)
org_pool = self.pool(org_mean)
enhance_pool = self.pool(enhance_mean)
d_org_left = F.conv2d(org_pool, self.weight_left, padding=1)
d_org_right = F.conv2d(org_pool, self.weight_right, padding=1)
d_org_up = F.conv2d(org_pool, self.weight_up, padding=1)
d_org_down = F.conv2d(org_pool, self.weight_down, padding=1)
d_enhance_left = F.conv2d(enhance_pool, self.weight_left, padding=1)
d_enhance_right = F.conv2d(enhance_pool, self.weight_right, padding=1)
d_enhance_up = F.conv2d(enhance_pool, self.weight_up, padding=1)
d_enhance_down = F.conv2d(enhance_pool, self.weight_down, padding=1)
d_left = torch.pow(d_org_left - d_enhance_left, 2)
d_right = torch.pow(d_org_right - d_enhance_right, 2)
d_up = torch.pow(d_org_up - d_enhance_up, 2)
d_down = torch.pow(d_org_down - d_enhance_down, 2)
return d_left + d_right + d_up + d_down | zero_dce/losses.py | import torch
import torch.nn as nn
from torch.nn import functional as F
class ColorConstancyLoss(nn.Module):
"""Color Constancy Loss"""
def __init__(self):
super(ColorConstancyLoss, self).__init__()
def forward(self, x):
mean_rgb = torch.mean(x, [2, 3], keepdim=True)
mr, mg, mb = torch.split(mean_rgb, 1, dim=1)
drg = torch.pow(mr - mg, 2)
drb = torch.pow(mr - mb, 2)
dgb = torch.pow(mb - mg, 2)
k = torch.pow(
torch.pow(drg, 2) + torch.pow(drb, 2) + torch.pow(dgb, 2), 0.5)
return k
class ExposureLoss(nn.Module):
"""Exposure Loss"""
def __init__(self, patch_size, mean_val):
super(ExposureLoss, self).__init__()
self.pool = nn.AvgPool2d(patch_size)
self.mean_val = mean_val
def forward(self, x):
x = torch.mean(x, 1, keepdim=True)
mean = self.pool(x)
return torch.mean(torch.pow(
mean - torch.FloatTensor([self.mean_val]).cuda(), 2
))
class IlluminationSmoothnessLoss(nn.Module):
"""Illumination Smoothing Loss"""
def __init__(self, loss_weight=1):
super(IlluminationSmoothnessLoss, self).__init__()
self.loss_weight = loss_weight
def forward(self, x):
batch_size = x.size()[0]
h_x = x.size()[2]
w_x = x.size()[3]
count_h = (x.size()[2] - 1) * x.size()[3]
count_w = x.size()[2] * (x.size()[3] - 1)
h_tv = torch.pow((x[:, :, 1:, :] - x[:, :, :h_x - 1, :]), 2).sum()
w_tv = torch.pow((x[:, :, :, 1:] - x[:, :, :, :w_x - 1]), 2).sum()
return self.loss_weight * 2 * (h_tv / count_h + w_tv / count_w) / batch_size
class SpatialConsistancyLoss(nn.Module):
"""Spatial Consistancy Loss"""
def __init__(self):
super(SpatialConsistancyLoss, self).__init__()
kernel_left = torch.FloatTensor(
[[0, 0, 0], [-1, 1, 0], [0, 0, 0]]).cuda().unsqueeze(0).unsqueeze(0)
kernel_right = torch.FloatTensor(
[[0, 0, 0], [0, 1, -1], [0, 0, 0]]).cuda().unsqueeze(0).unsqueeze(0)
kernel_up = torch.FloatTensor(
[[0, -1, 0], [0, 1, 0], [0, 0, 0]]).cuda().unsqueeze(0).unsqueeze(0)
kernel_down = torch.FloatTensor(
[[0, 0, 0], [0, 1, 0], [0, -1, 0]]).cuda().unsqueeze(0).unsqueeze(0)
self.weight_left = nn.Parameter(data=kernel_left, requires_grad=False)
self.weight_right = nn.Parameter(data=kernel_right, requires_grad=False)
self.weight_up = nn.Parameter(data=kernel_up, requires_grad=False)
self.weight_down = nn.Parameter(data=kernel_down, requires_grad=False)
self.pool = nn.AvgPool2d(4)
def forward(self, org, enhance):
org_mean = torch.mean(org, 1, keepdim=True)
enhance_mean = torch.mean(enhance, 1, keepdim=True)
org_pool = self.pool(org_mean)
enhance_pool = self.pool(enhance_mean)
d_org_left = F.conv2d(org_pool, self.weight_left, padding=1)
d_org_right = F.conv2d(org_pool, self.weight_right, padding=1)
d_org_up = F.conv2d(org_pool, self.weight_up, padding=1)
d_org_down = F.conv2d(org_pool, self.weight_down, padding=1)
d_enhance_left = F.conv2d(enhance_pool, self.weight_left, padding=1)
d_enhance_right = F.conv2d(enhance_pool, self.weight_right, padding=1)
d_enhance_up = F.conv2d(enhance_pool, self.weight_up, padding=1)
d_enhance_down = F.conv2d(enhance_pool, self.weight_down, padding=1)
d_left = torch.pow(d_org_left - d_enhance_left, 2)
d_right = torch.pow(d_org_right - d_enhance_right, 2)
d_up = torch.pow(d_org_up - d_enhance_up, 2)
d_down = torch.pow(d_org_down - d_enhance_down, 2)
return d_left + d_right + d_up + d_down | 0.964539 | 0.677247 |
from unittest import TestCase
from unittest.mock import patch, ANY
import responses
import azkaban_cli.azkaban
from azkaban_cli.exceptions import FetchFlowExecutionUpdatesError, SessionError
class AzkabanFetchFlowExecutionTest(TestCase):
def setUp(self):
"""
Creates an Azkaban instance and set a logged session for all upload tests
"""
self.azk = azkaban_cli.azkaban.Azkaban()
self.host = 'http://azkaban-mock.com'
self.user = 'username'
self.session_id = 'aebe406b-d5e6-4056-add6-bf41091e42c6'
self.azk.set_logged_session(self.host, self.user, self.session_id)
self.exec_id = '1234'
self.lastUpdateTime = '1407778382894'
def tearDown(self):
pass
@responses.activate
def test_fetch_flow_execution_updates(self):
"""
Test fetch flow execution updates method from Azkaban class
"""
responses.add(
responses.GET,
self.host + "/executor",
json={
"id" : "test",
"startTime" : 1407778382894,
"attempt" : 0,
"status" : "FAILED",
"updateTime" : 1407778404708,
"nodes" : [ {
"attempt" : 0,
"startTime" : 1407778404683,
"id" : "test",
"updateTime" : 1407778404683,
"status" : "CANCELLED",
"endTime" : 1407778404683
}, {
"attempt" : 0,
"startTime" : 1407778382913,
"id" : "test-job-1",
"updateTime" : 1407778393850,
"status" : "SUCCEEDED",
"endTime" : 1407778393845
}, {
"attempt" : 0,
"startTime" : 1407778393849,
"id" : "test-job-2",
"updateTime" : 1407778404679,
"status" : "FAILED",
"endTime" : 1407778404675
}, {
"attempt" : 0,
"startTime" : 1407778404675,
"id" : "test-job-3",
"updateTime" : 1407778404675,
"status" : "CANCELLED",
"endTime" : 1407778404675
} ],
"flow" : "test",
"endTime" : 1407778404705
},
status=200
)
self.azk.fetch_flow_execution_updates(self.exec_id, self.lastUpdateTime)
@patch('azkaban_cli.azkaban.api.fetch_flow_execution_updates_request')
def test_fetch_flow_execution_updates_called(self, mock_fetch_flow_execution_updates):
"""
Test if fetch flow execution updates method from Azkaban class is calling fetch flow execution
updates request with expected arguments
"""
self.azk.fetch_flow_execution_updates(self.exec_id, self.lastUpdateTime)
mock_fetch_flow_execution_updates.assert_called_with(
ANY, self.host, self.session_id, self.exec_id, self.lastUpdateTime)
@responses.activate
def test_execution_cannot_be_found_fetch_flow_execution_updates(self):
"""
Test if fetch flow execution updates method from Azkaban class raises FetchFlowExecutionUpdatesError
if request returns error caused by execution not be found
"""
responses.add(
responses.GET,
self.host + "/executor",
json={
'error': "Cannot find execution '0'"
},
status=200
)
with self.assertRaises(FetchFlowExecutionUpdatesError):
self.azk.fetch_flow_execution_updates(self.exec_id, self.lastUpdateTime)
@responses.activate
def test_error_session_expired_fetch_flow_execution_updates(self):
"""
Test if fetch flow execution updates method from Azkaban class raises SessionError if request
returns error caused by session expired
"""
responses.add(responses.GET, self.host + "/executor", json={"error": "session"}, status=200)
with self.assertRaises(SessionError):
self.azk.fetch_flow_execution_updates(self.exec_id, self.lastUpdateTime) | azkaban_cli/tests/test_azkaban/test_fetch_flow_execution_updates.py | from unittest import TestCase
from unittest.mock import patch, ANY
import responses
import azkaban_cli.azkaban
from azkaban_cli.exceptions import FetchFlowExecutionUpdatesError, SessionError
class AzkabanFetchFlowExecutionTest(TestCase):
def setUp(self):
"""
Creates an Azkaban instance and set a logged session for all upload tests
"""
self.azk = azkaban_cli.azkaban.Azkaban()
self.host = 'http://azkaban-mock.com'
self.user = 'username'
self.session_id = 'aebe406b-d5e6-4056-add6-bf41091e42c6'
self.azk.set_logged_session(self.host, self.user, self.session_id)
self.exec_id = '1234'
self.lastUpdateTime = '1407778382894'
def tearDown(self):
pass
@responses.activate
def test_fetch_flow_execution_updates(self):
"""
Test fetch flow execution updates method from Azkaban class
"""
responses.add(
responses.GET,
self.host + "/executor",
json={
"id" : "test",
"startTime" : 1407778382894,
"attempt" : 0,
"status" : "FAILED",
"updateTime" : 1407778404708,
"nodes" : [ {
"attempt" : 0,
"startTime" : 1407778404683,
"id" : "test",
"updateTime" : 1407778404683,
"status" : "CANCELLED",
"endTime" : 1407778404683
}, {
"attempt" : 0,
"startTime" : 1407778382913,
"id" : "test-job-1",
"updateTime" : 1407778393850,
"status" : "SUCCEEDED",
"endTime" : 1407778393845
}, {
"attempt" : 0,
"startTime" : 1407778393849,
"id" : "test-job-2",
"updateTime" : 1407778404679,
"status" : "FAILED",
"endTime" : 1407778404675
}, {
"attempt" : 0,
"startTime" : 1407778404675,
"id" : "test-job-3",
"updateTime" : 1407778404675,
"status" : "CANCELLED",
"endTime" : 1407778404675
} ],
"flow" : "test",
"endTime" : 1407778404705
},
status=200
)
self.azk.fetch_flow_execution_updates(self.exec_id, self.lastUpdateTime)
@patch('azkaban_cli.azkaban.api.fetch_flow_execution_updates_request')
def test_fetch_flow_execution_updates_called(self, mock_fetch_flow_execution_updates):
"""
Test if fetch flow execution updates method from Azkaban class is calling fetch flow execution
updates request with expected arguments
"""
self.azk.fetch_flow_execution_updates(self.exec_id, self.lastUpdateTime)
mock_fetch_flow_execution_updates.assert_called_with(
ANY, self.host, self.session_id, self.exec_id, self.lastUpdateTime)
@responses.activate
def test_execution_cannot_be_found_fetch_flow_execution_updates(self):
"""
Test if fetch flow execution updates method from Azkaban class raises FetchFlowExecutionUpdatesError
if request returns error caused by execution not be found
"""
responses.add(
responses.GET,
self.host + "/executor",
json={
'error': "Cannot find execution '0'"
},
status=200
)
with self.assertRaises(FetchFlowExecutionUpdatesError):
self.azk.fetch_flow_execution_updates(self.exec_id, self.lastUpdateTime)
@responses.activate
def test_error_session_expired_fetch_flow_execution_updates(self):
"""
Test if fetch flow execution updates method from Azkaban class raises SessionError if request
returns error caused by session expired
"""
responses.add(responses.GET, self.host + "/executor", json={"error": "session"}, status=200)
with self.assertRaises(SessionError):
self.azk.fetch_flow_execution_updates(self.exec_id, self.lastUpdateTime) | 0.668123 | 0.374333 |
import plot_class
import random
class Minesweeper:
''' Constructor of the class: start the game for you '''
def __init__( self, lines = 10, cols = 10 ):
self._lines = lines
self._cols = cols
self._map = [ [plot_class.Plot() for i in range(cols) ] for j in range(lines) ]
''' Returns the display of the cell '''
def getCell( self, x, y ):
var = self._map[x][y]
return var.getIndicator( trueSight = True )
''' Display the whole map for the player '''
def displayMap( self, trueSight = False ):
count = 0
for line in self._map:
print( ' ', sep = '', end = '' )
for col in line:
if col.getIndicator(trueSight = True) == plot_class.c_mine :
count += 1
print( col.getIndicator( trueSight = trueSight ), sep = '', end = '' )
print( ' ', sep = '', end = '' )
print( )
print( 'Total : ' + str(count) + ' mines' + ' - Format: ' + str(self._cols) + 'x' + str(self._lines) + '\n' )
''' Add a random bomb to the map '''
def randomBomb( self ):
x = random.randrange( self._lines )
y = random.randrange( self._cols )
if self.getCell( x, y ) == plot_class.c_mine :
self.randomBomb()
else :
self._map[x][y].setMine()
''' Generate as much bombs as specified '''
def carpetBomb( self, n = 10 ):
for i in range(n):
self.randomBomb()
''' Pass through every plot to determine its indicator value '''
''' Run this only once after doing the carpet bomb'''
def scanMap( self ):
for i, line in enumerate( self._map ) :
for j, p in enumerate( line ) :
count = 0
if p.getIndicator(trueSight = True) == plot_class.c_mine :
continue
else :
# up left
if i-1 >= 0 and j-1 >= 0 :
if self.getCell( i-1, j-1 ) == plot_class.c_mine :
count += 1
# up top
if i-1 >= 0 :
if self.getCell( i-1, j ) == plot_class.c_mine :
count += 1
# up right
if i-1 >= 0 and j+1 < self._cols :
if self.getCell( i-1, j+1 ) == plot_class.c_mine :
count += 1
# left
if j-1 >= 0 :
if self.getCell( i, j-1 ) == plot_class.c_mine :
count += 1
# right
if j+1 < self._cols :
if self.getCell( i, j+1 ) == plot_class.c_mine :
count += 1
# down left
if i+1 < self._lines and j-1 >= 0 :
if self.getCell( i+1, j-1 ) == plot_class.c_mine :
count += 1
# down bottom
if i+1 < self._lines :
if self.getCell( i+1, j ) == plot_class.c_mine :
count += 1
# down right
if i+1 < self._lines and j+1 < self._cols :
if self.getCell( i+1, j+1 ) == plot_class.c_mine :
count += 1
p.setIndicator( str(count) )
''' Give the player the first start into the game '''
def showClue( self ):
x = random.randrange( self._lines )
y = random.randrange( self._cols )
if self.getCell( x, y ) != plot_class.c_empty :
self.showClue()
else :
self._map[x][y].revealPlot()
self.propagateDiscovery(x, y)
''' When a empty plot is found, we look for other similar neighbor '''
def propagateDiscovery( self, x, y ):
if self.getCell(x, y) == plot_class.c_empty :
# Reveal the plot and propagate to the neighbors
self._map[x][y].revealPlot()
# up left
if x-1 >= 0 and y-1 >= 0 and self._map[x-1][y-1].revealed == False :
self.propagateDiscovery(x-1, y-1)
# up top
if x-1 >= 0 and self._map[x-1][y].revealed == False :
self.propagateDiscovery(x-1, y)
# up right
if x-1 >= 0 and y+1 < self._cols and self._map[x-1][y+1].revealed == False :
self.propagateDiscovery(x-1, y+1)
# left
if y-1 >= 0 and self._map[x][y-1].revealed == False :
self.propagateDiscovery(x, y-1)
# right
if y+1 < self._cols and self._map[x][y+1].revealed == False :
self.propagateDiscovery(x, y+1)
# down left
if x+1 < self._lines and y-1 >= 0 and self._map[x+1][y-1].revealed == False :
self.propagateDiscovery(x+1, y-1)
# down bottom
if x+1 < self._lines and self._map[x+1][y].revealed == False :
self.propagateDiscovery(x+1, y)
# down right
if x+1 < self._lines and y+1 < self._cols and self._map[x+1][y+1].revealed == False :
self.propagateDiscovery(x+1, y+1)
else :
# just reveat the plot
self._map[x][y].revealPlot()
''' '''
def findUnsolvable( self ):
for i, line in enumerate( self._map ) :
for j, p in enumerate( line ) :
if self.getCell(i, j) == plot_class.c_empty and self._map[i][j].revealed == False :
self.propagateDiscovery(i, j)
#----------------------
# Creating the application
program = Minesweeper( lines = 16, cols = 30 )
program.carpetBomb(50)
program.scanMap()
program.displayMap( trueSight = True )
#program.findUnsolvable()
program.propagateDiscovery( 0, 0)
program.displayMap() | main.py | import plot_class
import random
class Minesweeper:
''' Constructor of the class: start the game for you '''
def __init__( self, lines = 10, cols = 10 ):
self._lines = lines
self._cols = cols
self._map = [ [plot_class.Plot() for i in range(cols) ] for j in range(lines) ]
''' Returns the display of the cell '''
def getCell( self, x, y ):
var = self._map[x][y]
return var.getIndicator( trueSight = True )
''' Display the whole map for the player '''
def displayMap( self, trueSight = False ):
count = 0
for line in self._map:
print( ' ', sep = '', end = '' )
for col in line:
if col.getIndicator(trueSight = True) == plot_class.c_mine :
count += 1
print( col.getIndicator( trueSight = trueSight ), sep = '', end = '' )
print( ' ', sep = '', end = '' )
print( )
print( 'Total : ' + str(count) + ' mines' + ' - Format: ' + str(self._cols) + 'x' + str(self._lines) + '\n' )
''' Add a random bomb to the map '''
def randomBomb( self ):
x = random.randrange( self._lines )
y = random.randrange( self._cols )
if self.getCell( x, y ) == plot_class.c_mine :
self.randomBomb()
else :
self._map[x][y].setMine()
''' Generate as much bombs as specified '''
def carpetBomb( self, n = 10 ):
for i in range(n):
self.randomBomb()
''' Pass through every plot to determine its indicator value '''
''' Run this only once after doing the carpet bomb'''
def scanMap( self ):
for i, line in enumerate( self._map ) :
for j, p in enumerate( line ) :
count = 0
if p.getIndicator(trueSight = True) == plot_class.c_mine :
continue
else :
# up left
if i-1 >= 0 and j-1 >= 0 :
if self.getCell( i-1, j-1 ) == plot_class.c_mine :
count += 1
# up top
if i-1 >= 0 :
if self.getCell( i-1, j ) == plot_class.c_mine :
count += 1
# up right
if i-1 >= 0 and j+1 < self._cols :
if self.getCell( i-1, j+1 ) == plot_class.c_mine :
count += 1
# left
if j-1 >= 0 :
if self.getCell( i, j-1 ) == plot_class.c_mine :
count += 1
# right
if j+1 < self._cols :
if self.getCell( i, j+1 ) == plot_class.c_mine :
count += 1
# down left
if i+1 < self._lines and j-1 >= 0 :
if self.getCell( i+1, j-1 ) == plot_class.c_mine :
count += 1
# down bottom
if i+1 < self._lines :
if self.getCell( i+1, j ) == plot_class.c_mine :
count += 1
# down right
if i+1 < self._lines and j+1 < self._cols :
if self.getCell( i+1, j+1 ) == plot_class.c_mine :
count += 1
p.setIndicator( str(count) )
''' Give the player the first start into the game '''
def showClue( self ):
x = random.randrange( self._lines )
y = random.randrange( self._cols )
if self.getCell( x, y ) != plot_class.c_empty :
self.showClue()
else :
self._map[x][y].revealPlot()
self.propagateDiscovery(x, y)
''' When a empty plot is found, we look for other similar neighbor '''
def propagateDiscovery( self, x, y ):
if self.getCell(x, y) == plot_class.c_empty :
# Reveal the plot and propagate to the neighbors
self._map[x][y].revealPlot()
# up left
if x-1 >= 0 and y-1 >= 0 and self._map[x-1][y-1].revealed == False :
self.propagateDiscovery(x-1, y-1)
# up top
if x-1 >= 0 and self._map[x-1][y].revealed == False :
self.propagateDiscovery(x-1, y)
# up right
if x-1 >= 0 and y+1 < self._cols and self._map[x-1][y+1].revealed == False :
self.propagateDiscovery(x-1, y+1)
# left
if y-1 >= 0 and self._map[x][y-1].revealed == False :
self.propagateDiscovery(x, y-1)
# right
if y+1 < self._cols and self._map[x][y+1].revealed == False :
self.propagateDiscovery(x, y+1)
# down left
if x+1 < self._lines and y-1 >= 0 and self._map[x+1][y-1].revealed == False :
self.propagateDiscovery(x+1, y-1)
# down bottom
if x+1 < self._lines and self._map[x+1][y].revealed == False :
self.propagateDiscovery(x+1, y)
# down right
if x+1 < self._lines and y+1 < self._cols and self._map[x+1][y+1].revealed == False :
self.propagateDiscovery(x+1, y+1)
else :
# just reveat the plot
self._map[x][y].revealPlot()
''' '''
def findUnsolvable( self ):
for i, line in enumerate( self._map ) :
for j, p in enumerate( line ) :
if self.getCell(i, j) == plot_class.c_empty and self._map[i][j].revealed == False :
self.propagateDiscovery(i, j)
#----------------------
# Creating the application
program = Minesweeper( lines = 16, cols = 30 )
program.carpetBomb(50)
program.scanMap()
program.displayMap( trueSight = True )
#program.findUnsolvable()
program.propagateDiscovery( 0, 0)
program.displayMap() | 0.495606 | 0.375621 |
import json
import logging
import unittest
from pathlib import Path
import numpy as np
from sira.configuration import Configuration
from sira.model_ingest import ingest_model
from sira.modelling.hazard import HazardsContainer
from sira.scenario import Scenario
from sira.simulation import calculate_response
rootLogger = logging.getLogger(__name__)
rootLogger.setLevel(logging.CRITICAL)
class TestSystemSanity(unittest.TestCase):
"""
Sets up and runs tests to compare against results from pre-run and checked
simulations to check that code is producing the expected results.
"""
def setUp(self):
self.root_dir = Path(__file__).resolve().parent
self.models_dir = Path(self.root_dir, 'models')
self.comparison_data_dir = Path(self.root_dir, 'historical_data')
# -------------------------------------------------------------------------
def test_economic_loss_comparison_for_system_sanity(self):
input_dir = Path(
self.models_dir, "powerstation_coal_A", "input")
conf_file_path = [d for d in input_dir.glob('*config*.json')].pop()
model_file_path = [d for d in input_dir.glob('*model*.json')].pop()
config = Configuration(conf_file_path, model_file_path)
scenario = Scenario(config)
hazards = HazardsContainer(config, model_file_path)
infrastructure = ingest_model(config)
response_list = calculate_response(hazards, scenario, infrastructure)
economic_loss_array = response_list[5]
test_file_path = Path(
self.comparison_data_dir,
"economic_loss_for_system_sanity_testing.npy")
historical_economic_loss_array = np.load(test_file_path)
self.assertTrue(
np.array_equal(economic_loss_array, historical_economic_loss_array),
f"{len(economic_loss_array)} '\n'{len(historical_economic_loss_array)}"
)
# -------------------------------------------------------------------------
def test_run_scenario_lower_limit(self):
input_dir = Path(
self.models_dir, "test_structure__limit_lower", "input"
)
conf_file_path = [d for d in input_dir.glob('*config*.json')].pop()
model_file_path = [d for d in input_dir.glob('*model*.json')].pop()
config = Configuration(conf_file_path, model_file_path)
scenario = Scenario(config)
hazards = HazardsContainer(config, model_file_path)
infrastructure = ingest_model(config)
response_list = calculate_response(hazards, scenario, infrastructure)
output_node_capacity = 0
with open(model_file_path, 'r') as mdl:
json_infra_model = json.load(mdl)
output_node_capacity =\
json_infra_model["sysout_setup"]["output_node"]["output_node_capacity"]
self.assertTrue(
int(response_list[4][0][0]) == int(output_node_capacity)
)
# -------------------------------------------------------------------------
def test_run_scenario_upper_limit(self):
input_dir = Path(
self.models_dir, "test_structure__limit_upper", "input"
)
conf_file_path = [d for d in input_dir.glob('*config*.json')].pop()
model_file_path = [d for d in input_dir.glob('*model*.json')].pop()
config = Configuration(conf_file_path, model_file_path)
scenario = Scenario(config)
hazards = HazardsContainer(config, model_file_path)
infrastructure = ingest_model(config)
response_list = calculate_response(hazards, scenario, infrastructure)
self.assertTrue(int(response_list[4][0][0]) == int(0))
# -------------------------------------------------------------------------
def test_compare_economic_loss_for_existing_models(self):
print("\n{}\n>>> Initiating sanity check aganist pre-run models...".
format('-' * 70))
conf_file_paths = [
d for d in self.models_dir.rglob('input/*config_testmdl*.json')]
model_file_paths = [
d for d in self.models_dir.rglob('input/*model_testmdl*.json')]
for conf_file_path, model_file_path in \
zip(conf_file_paths, model_file_paths):
if conf_file_path.is_file():
print("\nMatching results for: " + Path(conf_file_path).name)
config = Configuration(conf_file_path, model_file_path)
scenario = Scenario(config)
hazards = HazardsContainer(config, model_file_path)
infrastructure = ingest_model(config)
response_list = calculate_response(
hazards, scenario, infrastructure)
econ_loss_calculated = response_list[5]
stored_data_file = Path(
self.comparison_data_dir,
"economic_loss_for_" + config.SCENARIO_NAME + '.npy')
econ_loss_historic = np.load(stored_data_file)
self.assertTrue(
np.array_equal(econ_loss_calculated, econ_loss_historic),
conf_file_path
)
print("OK")
if __name__ == '__main__':
unittest.main() | tests/test_output_sanity_check.py | import json
import logging
import unittest
from pathlib import Path
import numpy as np
from sira.configuration import Configuration
from sira.model_ingest import ingest_model
from sira.modelling.hazard import HazardsContainer
from sira.scenario import Scenario
from sira.simulation import calculate_response
rootLogger = logging.getLogger(__name__)
rootLogger.setLevel(logging.CRITICAL)
class TestSystemSanity(unittest.TestCase):
"""
Sets up and runs tests to compare against results from pre-run and checked
simulations to check that code is producing the expected results.
"""
def setUp(self):
self.root_dir = Path(__file__).resolve().parent
self.models_dir = Path(self.root_dir, 'models')
self.comparison_data_dir = Path(self.root_dir, 'historical_data')
# -------------------------------------------------------------------------
def test_economic_loss_comparison_for_system_sanity(self):
input_dir = Path(
self.models_dir, "powerstation_coal_A", "input")
conf_file_path = [d for d in input_dir.glob('*config*.json')].pop()
model_file_path = [d for d in input_dir.glob('*model*.json')].pop()
config = Configuration(conf_file_path, model_file_path)
scenario = Scenario(config)
hazards = HazardsContainer(config, model_file_path)
infrastructure = ingest_model(config)
response_list = calculate_response(hazards, scenario, infrastructure)
economic_loss_array = response_list[5]
test_file_path = Path(
self.comparison_data_dir,
"economic_loss_for_system_sanity_testing.npy")
historical_economic_loss_array = np.load(test_file_path)
self.assertTrue(
np.array_equal(economic_loss_array, historical_economic_loss_array),
f"{len(economic_loss_array)} '\n'{len(historical_economic_loss_array)}"
)
# -------------------------------------------------------------------------
def test_run_scenario_lower_limit(self):
input_dir = Path(
self.models_dir, "test_structure__limit_lower", "input"
)
conf_file_path = [d for d in input_dir.glob('*config*.json')].pop()
model_file_path = [d for d in input_dir.glob('*model*.json')].pop()
config = Configuration(conf_file_path, model_file_path)
scenario = Scenario(config)
hazards = HazardsContainer(config, model_file_path)
infrastructure = ingest_model(config)
response_list = calculate_response(hazards, scenario, infrastructure)
output_node_capacity = 0
with open(model_file_path, 'r') as mdl:
json_infra_model = json.load(mdl)
output_node_capacity =\
json_infra_model["sysout_setup"]["output_node"]["output_node_capacity"]
self.assertTrue(
int(response_list[4][0][0]) == int(output_node_capacity)
)
# -------------------------------------------------------------------------
def test_run_scenario_upper_limit(self):
input_dir = Path(
self.models_dir, "test_structure__limit_upper", "input"
)
conf_file_path = [d for d in input_dir.glob('*config*.json')].pop()
model_file_path = [d for d in input_dir.glob('*model*.json')].pop()
config = Configuration(conf_file_path, model_file_path)
scenario = Scenario(config)
hazards = HazardsContainer(config, model_file_path)
infrastructure = ingest_model(config)
response_list = calculate_response(hazards, scenario, infrastructure)
self.assertTrue(int(response_list[4][0][0]) == int(0))
# -------------------------------------------------------------------------
def test_compare_economic_loss_for_existing_models(self):
print("\n{}\n>>> Initiating sanity check aganist pre-run models...".
format('-' * 70))
conf_file_paths = [
d for d in self.models_dir.rglob('input/*config_testmdl*.json')]
model_file_paths = [
d for d in self.models_dir.rglob('input/*model_testmdl*.json')]
for conf_file_path, model_file_path in \
zip(conf_file_paths, model_file_paths):
if conf_file_path.is_file():
print("\nMatching results for: " + Path(conf_file_path).name)
config = Configuration(conf_file_path, model_file_path)
scenario = Scenario(config)
hazards = HazardsContainer(config, model_file_path)
infrastructure = ingest_model(config)
response_list = calculate_response(
hazards, scenario, infrastructure)
econ_loss_calculated = response_list[5]
stored_data_file = Path(
self.comparison_data_dir,
"economic_loss_for_" + config.SCENARIO_NAME + '.npy')
econ_loss_historic = np.load(stored_data_file)
self.assertTrue(
np.array_equal(econ_loss_calculated, econ_loss_historic),
conf_file_path
)
print("OK")
if __name__ == '__main__':
unittest.main() | 0.562657 | 0.409634 |
class ASTNode:
def __init__(self):
pass
def accept(self, visitor):
return visitor.visitASTNode(self)
class StmtNode(ASTNode):
def __init__(self, content, line_index, stmtcol_index):
"""
stmtcol = statement column
"""
super().__init__()
self.content = content
self.line_index = line_index
self.stmtcol_index = stmtcol_index
def accept(self, visitor):
return visitor.visitStmtNode(self)
class GroupStmtNode(StmtNode):
def __init__(self, content, line_index, stmtcol_index, name, children, key):
super().__init__(content, line_index, stmtcol_index)
self.name = name
self.children = children
self.key = key
def accept(self, visitor):
return visitor.visitGroupStmtNode(self)
class ScopeStmtNode(StmtNode):
def __init__(self, content, line_index, stmtcol_index, name, children):
super().__init__(content, line_index, stmtcol_index)
self.name = name
self.children = children
def accept(self, visitor):
return visitor.visitScopeStmtNode(self)
class ShowStmtNode(StmtNode):
def __init__(self, content, line_index, stmtcol_index, body):
super().__init__(content, line_index, stmtcol_index)
self.body = body
def accept(self, visitor):
return visitor.visitShowStmtNode(self)
class UnzipStmtNode(StmtNode):
def __init__(self, content, line_index, stmtcol_index, body):
super().__init__(content, line_index, stmtcol_index)
self.body = body
def accept(self, visitor):
return visitor.visitUnzipStmtNode(self)
class UseStmtNode(StmtNode):
def __init__(self, content, line_index, stmtcol_index, body):
super().__init__(content, line_index, stmtcol_index)
self.body = body
def accept(self, visitor):
return visitor.visitUseStmtNode(self)
class ValidateStmtNode(StmtNode):
def __init__(self, content, line_index, stmtcol_index):
super().__init__(content, line_index, stmtcol_index)
def accept(self, visitor):
return visitor.visitValidateStmtNode(self)
class InvalidateStmtNode(StmtNode):
def __init__(self, content, line_index, stmtcol_index):
super().__init__(content, line_index, stmtcol_index)
def accept(self, visitor):
return visitor.visitInvalidateStmtNode(self)
class SetStmtNode(StmtNode):
def __init__(self, content, line_index, stmtcol_index, body):
super().__init__(content, line_index, stmtcol_index)
self.body = body
def accept(self, visitor):
return visitor.visitSetStmtNode(self)
class AssignStmtNode(StmtNode):
def __init__(self, content, line_index, stmtcol_index, left, right):
super().__init__(content, line_index, stmtcol_index)
self.left = left
self.right = right
def accept(self, visitor):
return visitor.visitAssignStmtNode(self)
class CollapseStmtNode(StmtNode):
def __init__(self, content, line_index, stmtcol_index, pairs):
super().__init__(content, line_index, stmtcol_index)
self.pairs = pairs
def accept(self, visitor):
return visitor.visitCollapseStmtNode(self)
class ExprNode(ASTNode):
def __init__(self):
super().__init__()
def accept(self, visitor):
return visitor.visitExprNode(self)
class UnionExprNode(ExprNode):
def __init__(self, children, keeps):
super().__init__()
self.children = children
self.keeps = keeps
def accept(self, visitor):
return visitor.visitUnionExprNode(self)
class ConcatExprNode(ExprNode):
def __init__(self, left, right, connection, reverse, choices):
super().__init__()
self.left = left
self.right = right
self.connection = connection
self.reverse = reverse
self.choices = choices
def accept(self, visitor):
return visitor.visitConcatExprNode(self)
class FilterExprNode(ExprNode):
def __init__(self, body, trailer):
super().__init__()
self.body = body
self.trailer = trailer
def accept(self, visitor):
return visitor.visitFilterExprNode(self)
class FilterScriptNode(ASTNode):
def __init__(self, body, trailer):
super().__init__()
self.body = body
self.trailer = trailer
def accept(self, visitor):
return visitor.visitFilterScriptNode(self)
class FilterTrailerNode(ASTNode):
def __init__(self, children, common, out):
super().__init__()
self.children = children
self.common = common
self.out = out
def accept(self, visitor):
return visitor.visitFilterTrailerNode(self)
class AtomExprNode(ExprNode):
def __init__(self, body, trailers):
super().__init__()
self.body = body
self.trailers = trailers
def accept(self, visitor):
return visitor.visitAtomExprNode(self)
class AtomNode(ExprNode):
def __init__(self):
super().__init__()
def accept(self, visitor):
return visitor.visitAtomNode(self)
class SubscriptAtomNode(AtomNode):
def __init__(self, subscript):
super().__init__()
self.subscript = subscript
def accept(self, visitor):
return visitor.visitSubscriptAtomNode(self)
class IndividualAtomNode(AtomNode):
def __init__(self, pairs):
super().__init__()
self.pairs = pairs
def accept(self, visitor):
return visitor.visitIndividualAtomNode(self)
class ListAtomNode(AtomNode):
def __init__(self, length):
super().__init__()
self.length = length
def accept(self, visitor):
return visitor.visitListAtomNode(self)
class GroupAtomNode(AtomNode):
def __init__(self, pairs):
super().__init__()
self.pairs = pairs
def accept(self, visitor):
return visitor.visitGroupAtomNode(self)
class NameAtomNode(AtomNode):
def __init__(self, name):
super().__init__()
self.name = name
def accept(self, visitor):
return visitor.visitNameAtomNode(self)
class ContentAtomNode(AtomNode):
def __init__(self, content):
super().__init__()
self.content = content
def accept(self, visitor):
return visitor.visitContentAtomNode(self)
class SubscriptNode(ASTNode):
def __init__(self):
super().__init__()
def accept(self, visitor):
return visitor.visitSubscriptNode(self)
class NameSubscriptNode(SubscriptNode):
def __init__(self, name):
super().__init__()
self.name = name
def accept(self, visitor):
return visitor.visitNameSubscriptNode(self)
class IntegerSubscriptNode(SubscriptNode):
def __init__(self, index):
super().__init__()
self.index = index
def accept(self, visitor):
return visitor.visitIntegerSubscriptNode(self)
class StringSubscriptNode(SubscriptNode):
def __init__(self, key):
super().__init__()
self.key = key
def accept(self, visitor):
return visitor.visitStringSubscriptNode(self) | naming-protocol/ast/ast.py | class ASTNode:
def __init__(self):
pass
def accept(self, visitor):
return visitor.visitASTNode(self)
class StmtNode(ASTNode):
def __init__(self, content, line_index, stmtcol_index):
"""
stmtcol = statement column
"""
super().__init__()
self.content = content
self.line_index = line_index
self.stmtcol_index = stmtcol_index
def accept(self, visitor):
return visitor.visitStmtNode(self)
class GroupStmtNode(StmtNode):
def __init__(self, content, line_index, stmtcol_index, name, children, key):
super().__init__(content, line_index, stmtcol_index)
self.name = name
self.children = children
self.key = key
def accept(self, visitor):
return visitor.visitGroupStmtNode(self)
class ScopeStmtNode(StmtNode):
def __init__(self, content, line_index, stmtcol_index, name, children):
super().__init__(content, line_index, stmtcol_index)
self.name = name
self.children = children
def accept(self, visitor):
return visitor.visitScopeStmtNode(self)
class ShowStmtNode(StmtNode):
def __init__(self, content, line_index, stmtcol_index, body):
super().__init__(content, line_index, stmtcol_index)
self.body = body
def accept(self, visitor):
return visitor.visitShowStmtNode(self)
class UnzipStmtNode(StmtNode):
def __init__(self, content, line_index, stmtcol_index, body):
super().__init__(content, line_index, stmtcol_index)
self.body = body
def accept(self, visitor):
return visitor.visitUnzipStmtNode(self)
class UseStmtNode(StmtNode):
def __init__(self, content, line_index, stmtcol_index, body):
super().__init__(content, line_index, stmtcol_index)
self.body = body
def accept(self, visitor):
return visitor.visitUseStmtNode(self)
class ValidateStmtNode(StmtNode):
def __init__(self, content, line_index, stmtcol_index):
super().__init__(content, line_index, stmtcol_index)
def accept(self, visitor):
return visitor.visitValidateStmtNode(self)
class InvalidateStmtNode(StmtNode):
def __init__(self, content, line_index, stmtcol_index):
super().__init__(content, line_index, stmtcol_index)
def accept(self, visitor):
return visitor.visitInvalidateStmtNode(self)
class SetStmtNode(StmtNode):
def __init__(self, content, line_index, stmtcol_index, body):
super().__init__(content, line_index, stmtcol_index)
self.body = body
def accept(self, visitor):
return visitor.visitSetStmtNode(self)
class AssignStmtNode(StmtNode):
def __init__(self, content, line_index, stmtcol_index, left, right):
super().__init__(content, line_index, stmtcol_index)
self.left = left
self.right = right
def accept(self, visitor):
return visitor.visitAssignStmtNode(self)
class CollapseStmtNode(StmtNode):
def __init__(self, content, line_index, stmtcol_index, pairs):
super().__init__(content, line_index, stmtcol_index)
self.pairs = pairs
def accept(self, visitor):
return visitor.visitCollapseStmtNode(self)
class ExprNode(ASTNode):
def __init__(self):
super().__init__()
def accept(self, visitor):
return visitor.visitExprNode(self)
class UnionExprNode(ExprNode):
def __init__(self, children, keeps):
super().__init__()
self.children = children
self.keeps = keeps
def accept(self, visitor):
return visitor.visitUnionExprNode(self)
class ConcatExprNode(ExprNode):
def __init__(self, left, right, connection, reverse, choices):
super().__init__()
self.left = left
self.right = right
self.connection = connection
self.reverse = reverse
self.choices = choices
def accept(self, visitor):
return visitor.visitConcatExprNode(self)
class FilterExprNode(ExprNode):
def __init__(self, body, trailer):
super().__init__()
self.body = body
self.trailer = trailer
def accept(self, visitor):
return visitor.visitFilterExprNode(self)
class FilterScriptNode(ASTNode):
def __init__(self, body, trailer):
super().__init__()
self.body = body
self.trailer = trailer
def accept(self, visitor):
return visitor.visitFilterScriptNode(self)
class FilterTrailerNode(ASTNode):
def __init__(self, children, common, out):
super().__init__()
self.children = children
self.common = common
self.out = out
def accept(self, visitor):
return visitor.visitFilterTrailerNode(self)
class AtomExprNode(ExprNode):
def __init__(self, body, trailers):
super().__init__()
self.body = body
self.trailers = trailers
def accept(self, visitor):
return visitor.visitAtomExprNode(self)
class AtomNode(ExprNode):
def __init__(self):
super().__init__()
def accept(self, visitor):
return visitor.visitAtomNode(self)
class SubscriptAtomNode(AtomNode):
def __init__(self, subscript):
super().__init__()
self.subscript = subscript
def accept(self, visitor):
return visitor.visitSubscriptAtomNode(self)
class IndividualAtomNode(AtomNode):
def __init__(self, pairs):
super().__init__()
self.pairs = pairs
def accept(self, visitor):
return visitor.visitIndividualAtomNode(self)
class ListAtomNode(AtomNode):
def __init__(self, length):
super().__init__()
self.length = length
def accept(self, visitor):
return visitor.visitListAtomNode(self)
class GroupAtomNode(AtomNode):
def __init__(self, pairs):
super().__init__()
self.pairs = pairs
def accept(self, visitor):
return visitor.visitGroupAtomNode(self)
class NameAtomNode(AtomNode):
def __init__(self, name):
super().__init__()
self.name = name
def accept(self, visitor):
return visitor.visitNameAtomNode(self)
class ContentAtomNode(AtomNode):
def __init__(self, content):
super().__init__()
self.content = content
def accept(self, visitor):
return visitor.visitContentAtomNode(self)
class SubscriptNode(ASTNode):
def __init__(self):
super().__init__()
def accept(self, visitor):
return visitor.visitSubscriptNode(self)
class NameSubscriptNode(SubscriptNode):
def __init__(self, name):
super().__init__()
self.name = name
def accept(self, visitor):
return visitor.visitNameSubscriptNode(self)
class IntegerSubscriptNode(SubscriptNode):
def __init__(self, index):
super().__init__()
self.index = index
def accept(self, visitor):
return visitor.visitIntegerSubscriptNode(self)
class StringSubscriptNode(SubscriptNode):
def __init__(self, key):
super().__init__()
self.key = key
def accept(self, visitor):
return visitor.visitStringSubscriptNode(self) | 0.736211 | 0.310838 |
usage = """
Usage:
moses-detokenizer [options] <lang> [<inputfile> [<outputfile>]]
moses-detokenizer --selftest [--verbose]
Options:
--selftest, -t Run selftests.
--verbose, -v Be more verbose.
2017, <NAME> <<EMAIL>>
"""
from docopt import docopt
from openfile import openfile
from os import path
from toolwrapper import ToolWrapper
import sys
class MosesDetokenizer(ToolWrapper):
"""A module for interfacing with ``detokenizer.perl`` from Moses.
This class communicates with detokenizer.perl process via pipes. When the
MosesDetokenizer object is no longer needed, the close() method should be
called to free system resources. The class supports the context manager
interface. If used in a with statement, the close() method is invoked
automatically.
>>> detokenize = MosesDetokenizer('en')
>>> detokenize('Hello', 'World', '!')
'Hello World!'
"""
def __init__(self, lang="en"):
self.lang = lang
program = path.join(path.dirname(__file__), "detokenizer.perl")
# -q = quiet
# -b = disable output buffering
argv = ["perl", program, "-q", "-b", "-l", self.lang]
super().__init__(argv)
def __str__(self):
return "MosesDetokenizer(lang=\"{lang}\")".format(lang=self.lang)
def __call__(self, sentence):
"""Detokenizes a single sentence.
Newline characters are not allowed in tokens.
"""
assert isinstance(sentence, (list, tuple))
assert all(isinstance(token, str) for token in sentence)
assert all("\n" not in token for token in sentence)
if not sentence:
return ""
self.writeline(" ".join(sentence))
return self.readline()
def main():
args = docopt(usage)
if args["--selftest"]:
import doctest
import mosestokenizer.detokenizer
doctest.testmod(mosestokenizer.detokenizer)
if not args["<lang>"]:
sys.exit(0)
detokenize = MosesDetokenizer(args["<lang>"])
inputfile = openfile(args["<inputfile>"])
outputfile = openfile(args["<outputfile>"], "wt")
with inputfile, outputfile:
for line in inputfile:
print(detokenize(line.split()), file=outputfile)
if __name__ == "__main__":
main() | github/preprocess/sockeye/code/MOSES/scripts/tokenizer/mosestokenizer/detokenizer.py | usage = """
Usage:
moses-detokenizer [options] <lang> [<inputfile> [<outputfile>]]
moses-detokenizer --selftest [--verbose]
Options:
--selftest, -t Run selftests.
--verbose, -v Be more verbose.
2017, <NAME> <<EMAIL>>
"""
from docopt import docopt
from openfile import openfile
from os import path
from toolwrapper import ToolWrapper
import sys
class MosesDetokenizer(ToolWrapper):
"""A module for interfacing with ``detokenizer.perl`` from Moses.
This class communicates with detokenizer.perl process via pipes. When the
MosesDetokenizer object is no longer needed, the close() method should be
called to free system resources. The class supports the context manager
interface. If used in a with statement, the close() method is invoked
automatically.
>>> detokenize = MosesDetokenizer('en')
>>> detokenize('Hello', 'World', '!')
'Hello World!'
"""
def __init__(self, lang="en"):
self.lang = lang
program = path.join(path.dirname(__file__), "detokenizer.perl")
# -q = quiet
# -b = disable output buffering
argv = ["perl", program, "-q", "-b", "-l", self.lang]
super().__init__(argv)
def __str__(self):
return "MosesDetokenizer(lang=\"{lang}\")".format(lang=self.lang)
def __call__(self, sentence):
"""Detokenizes a single sentence.
Newline characters are not allowed in tokens.
"""
assert isinstance(sentence, (list, tuple))
assert all(isinstance(token, str) for token in sentence)
assert all("\n" not in token for token in sentence)
if not sentence:
return ""
self.writeline(" ".join(sentence))
return self.readline()
def main():
args = docopt(usage)
if args["--selftest"]:
import doctest
import mosestokenizer.detokenizer
doctest.testmod(mosestokenizer.detokenizer)
if not args["<lang>"]:
sys.exit(0)
detokenize = MosesDetokenizer(args["<lang>"])
inputfile = openfile(args["<inputfile>"])
outputfile = openfile(args["<outputfile>"], "wt")
with inputfile, outputfile:
for line in inputfile:
print(detokenize(line.split()), file=outputfile)
if __name__ == "__main__":
main() | 0.469277 | 0.24135 |
from typing import Callable, Dict, List, Tuple, Type, Union, Text
import cv2
import numpy as np
from matplotlib import pyplot as plt
from functools import reduce
Image = Type[np.ndarray]
FnWithArgs = Tuple[Callable[..., Image], Dict[Text, int]]
FnWithoutArgs = Tuple[Callable[[Image], Image]]
FunctionList = List[Union[FnWithArgs, FnWithoutArgs]]
class Preprocessor:
@staticmethod
def apply(pipeline: FunctionList, images: Union[Image, List[Image]]) -> List[Image]:
"""Applies a preprocessing function to a list of images"""
if isinstance(images, np.ndarray):
images = [images]
def apply_fn(obj, fun):
if fun[1]:
return fun[0](obj, **fun[1])
return fun[0](obj)
return [reduce(apply_fn, pipeline, image) for image in images]
@staticmethod
def bilateral(image, diameter=9, sigma_color=150, sigma_space=150, times=1):
filtered = image
for _ in range(times):
filtered = cv2.bilateralFilter(
image, d=diameter, sigmaColor=sigma_color, sigmaSpace=sigma_space
)
return filtered
@classmethod
def median_filter(cls, img, ksize, times):
filtered = img
for i in range(times):
filtered = cv2.medianBlur(img, ksize=ksize)
return filtered
@classmethod
def errosion(cls, img, ksize):
return cv2.erode(img, kernel=ksize)
@classmethod
def dilatation(cls, img, ksize):
return cv2.dilate(img, kernel=ksize)
@classmethod
def top_hat_processing(cls, img, ksize):
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, ksize=(ksize, ksize))
return cv2.morphologyEx(img, cv2.MORPH_TOPHAT, kernel=kernel)
@classmethod
def laplacian(cls, img):
return cv2.Laplacian(img, ddepth=cv2.CV_64F)
@classmethod
def show_image(cls, img):
plt.imshow(img, cmap="gray")
plt.show()
@staticmethod
def sobel(img, scale=1, delta=0):
ddepth = cv2.CV_16S
grad_x = cv2.Sobel(img, ddepth, 1, 0, ksize=3, scale=scale, delta=delta)
grad_y = cv2.Sobel(img, ddepth, 0, 1, ksize=3, scale=scale, delta=delta)
abs_grad_x = cv2.convertScaleAbs(grad_x)
abs_grad_y = cv2.convertScaleAbs(grad_y)
return cv2.addWeighted(abs_grad_x, 0.5, abs_grad_y, 0.5, 0)
@classmethod
def apply_scharr(cls, img, scale, delta):
ddepth = cv2.CV_16S
grad_x = cv2.Scharr(
img, ddepth, 1, 0, scale=scale, delta=delta, borderType=cv2.BORDER_DEFAULT
)
grad_y = cv2.Scharr(
img, ddepth, 0, 1, scale=scale, delta=delta, borderType=cv2.BORDER_DEFAULT
)
abs_grad_x = cv2.convertScaleAbs(grad_x)
abs_grad_y = cv2.convertScaleAbs(grad_y)
return cv2.addWeighted(abs_grad_x, 0.5, abs_grad_y, 0.5, 0)
if __name__ == "__main__":
img = cv2.imread("../data/Radiographs/01.tif", flags=cv2.IMREAD_GRAYSCALE)
img = Preprocessor.bilateral_filter(
img, diameter=9, sigma_color=150, sigma_space=150, times=1
)
# img = Preprocessor.median_filter(img, ksize=5, times=5)
# img = Preprocessor.top_hat_processing(img, ksize=150)
img = Preprocessor.apply_sobel(img, scale=1, delta=0)
Preprocessor.show_image(img) | src/data_preprocessing.py | from typing import Callable, Dict, List, Tuple, Type, Union, Text
import cv2
import numpy as np
from matplotlib import pyplot as plt
from functools import reduce
Image = Type[np.ndarray]
FnWithArgs = Tuple[Callable[..., Image], Dict[Text, int]]
FnWithoutArgs = Tuple[Callable[[Image], Image]]
FunctionList = List[Union[FnWithArgs, FnWithoutArgs]]
class Preprocessor:
@staticmethod
def apply(pipeline: FunctionList, images: Union[Image, List[Image]]) -> List[Image]:
"""Applies a preprocessing function to a list of images"""
if isinstance(images, np.ndarray):
images = [images]
def apply_fn(obj, fun):
if fun[1]:
return fun[0](obj, **fun[1])
return fun[0](obj)
return [reduce(apply_fn, pipeline, image) for image in images]
@staticmethod
def bilateral(image, diameter=9, sigma_color=150, sigma_space=150, times=1):
filtered = image
for _ in range(times):
filtered = cv2.bilateralFilter(
image, d=diameter, sigmaColor=sigma_color, sigmaSpace=sigma_space
)
return filtered
@classmethod
def median_filter(cls, img, ksize, times):
filtered = img
for i in range(times):
filtered = cv2.medianBlur(img, ksize=ksize)
return filtered
@classmethod
def errosion(cls, img, ksize):
return cv2.erode(img, kernel=ksize)
@classmethod
def dilatation(cls, img, ksize):
return cv2.dilate(img, kernel=ksize)
@classmethod
def top_hat_processing(cls, img, ksize):
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, ksize=(ksize, ksize))
return cv2.morphologyEx(img, cv2.MORPH_TOPHAT, kernel=kernel)
@classmethod
def laplacian(cls, img):
return cv2.Laplacian(img, ddepth=cv2.CV_64F)
@classmethod
def show_image(cls, img):
plt.imshow(img, cmap="gray")
plt.show()
@staticmethod
def sobel(img, scale=1, delta=0):
ddepth = cv2.CV_16S
grad_x = cv2.Sobel(img, ddepth, 1, 0, ksize=3, scale=scale, delta=delta)
grad_y = cv2.Sobel(img, ddepth, 0, 1, ksize=3, scale=scale, delta=delta)
abs_grad_x = cv2.convertScaleAbs(grad_x)
abs_grad_y = cv2.convertScaleAbs(grad_y)
return cv2.addWeighted(abs_grad_x, 0.5, abs_grad_y, 0.5, 0)
@classmethod
def apply_scharr(cls, img, scale, delta):
ddepth = cv2.CV_16S
grad_x = cv2.Scharr(
img, ddepth, 1, 0, scale=scale, delta=delta, borderType=cv2.BORDER_DEFAULT
)
grad_y = cv2.Scharr(
img, ddepth, 0, 1, scale=scale, delta=delta, borderType=cv2.BORDER_DEFAULT
)
abs_grad_x = cv2.convertScaleAbs(grad_x)
abs_grad_y = cv2.convertScaleAbs(grad_y)
return cv2.addWeighted(abs_grad_x, 0.5, abs_grad_y, 0.5, 0)
if __name__ == "__main__":
img = cv2.imread("../data/Radiographs/01.tif", flags=cv2.IMREAD_GRAYSCALE)
img = Preprocessor.bilateral_filter(
img, diameter=9, sigma_color=150, sigma_space=150, times=1
)
# img = Preprocessor.median_filter(img, ksize=5, times=5)
# img = Preprocessor.top_hat_processing(img, ksize=150)
img = Preprocessor.apply_sobel(img, scale=1, delta=0)
Preprocessor.show_image(img) | 0.860823 | 0.544801 |
import os
import sys
import subprocess
from git import GitCommandError
from optparse import OptionParser
from sh import mktemp, cd, rm # pylint: disable=E0611
from functools import partial
try:
from sh import git_dch as gbp_dch # pylint: disable=E0611
gbp_buildpackage = ['git-buildpackage']
except ImportError:
# In newer versions of git-buildpackage the executables have changed.
# Instead of having various git-* executables, there is only a gbp one,
# which expects the command (dch, buildpackage, etc) as the first argument.
from sh import gbp # pylint: disable=E0611
gbp_dch = partial(gbp, 'dch')
gbp_buildpackage = ['gbp', 'buildpackage']
from devflow import versioning
from devflow import utils
from devflow import BRANCH_TYPES
AVAILABLE_MODES = ["release", "snapshot"]
DESCRIPTION = """Tool for automatic build of Debian packages.
%(prog)s is a helper script for automatic build of Debian packages from
repositories that follow the `git flow` development model
<http://nvie.com/posts/a-successful-git-branching-model/>.
This script must run from inside a clean git repository and will perform the
following steps:
* Clone your repository to a temporary directory
* Merge the current branch with the corresponding debian branch
* Compute the version of the new package and update the python
version files
* Create a new entry in debian/changelog, using `git-dch`
* Create the Debian packages, using `git-buildpackage`
* Tag the appropriate branches if in `release` mode
%(prog)s will work with the packages that are declared in `devflow.conf'
file, which must exist in the top-level directory of the git repository.
"""
def print_help(prog):
print DESCRIPTION % {"prog": prog}
def main():
from devflow.version import __version__ # pylint: disable=E0611,F0401
parser = OptionParser(usage="usage: %prog [options] mode",
version="devflow %s" % __version__,
add_help_option=False)
parser.add_option("-h", "--help",
action="store_true",
default=False,
help="show this help message")
parser.add_option("-k", "--keep-repo",
action="store_true",
dest="keep_repo",
default=False,
help="Do not delete the cloned repository")
parser.add_option("-b", "--build-dir",
dest="build_dir",
default=None,
help="Directory to store created packages")
parser.add_option("-r", "--repo-dir",
dest="repo_dir",
default=None,
help="Directory to clone repository")
parser.add_option("-d", "--dirty",
dest="force_dirty",
default=False,
action="store_true",
help="Do not check if working directory is dirty")
parser.add_option("-c", "--config-file",
dest="config_file",
help="Override default configuration file")
parser.add_option("--no-sign",
dest="sign",
action="store_false",
default=True,
help="Do not sign the packages")
parser.add_option("--key-id",
dest="keyid",
help="Use this keyid for gpg signing")
parser.add_option("--dist",
dest="dist",
default=None,
help="Force distribution in Debian changelog")
parser.add_option("-S", "--source-only",
dest="source_only",
default=False,
action="store_true",
help="Specifies a source-only build, no binary packages"
" need to be made.")
parser.add_option("--debian-branch",
dest="debian_branch",
default=None,
help="Use this debian branch, instead of"
"auto-discovering the debian branch to use")
parser.add_option("--push-back",
dest="push_back",
default=False,
action="store_true",
help="Automatically push branches and tags to repo.")
parser.add_option("--color",
dest="color_output",
default="auto",
help="Enable/disable colored output. Default mode is"
" auto, available options are yes/no")
(options, args) = parser.parse_args()
if options.color_output == "yes":
use_colors = True
elif options.color_output == "no":
use_colors = False
else:
use_colors = sys.stdout.isatty()
red = lambda x: x
green = lambda x: x
if use_colors:
try:
import colors
red = colors.red
green = colors.green
except AttributeError:
pass
print_red = lambda x: sys.stdout.write(red(x) + "\n")
print_green = lambda x: sys.stdout.write(green(x) + "\n")
if options.help:
print_help(parser.get_prog_name())
parser.print_help()
return
# Get build mode
try:
mode = args[0]
except IndexError:
mode = utils.get_build_mode()
if mode not in AVAILABLE_MODES:
raise ValueError(red("Invalid argument! Mode must be one: %s" %
", ".join(AVAILABLE_MODES)))
# Load the repository
original_repo = utils.get_repository()
# Check that repository is clean
toplevel = original_repo.working_dir
if original_repo.is_dirty() and not options.force_dirty:
raise RuntimeError(red("Repository %s is dirty." % toplevel))
# Get packages from configuration file
config = utils.get_config(options.config_file)
packages = config['packages'].keys()
print_green("Will build the following packages:\n" + "\n".join(packages))
# Get current branch name and type and check if it is a valid one
branch = original_repo.head.reference.name
branch = utils.undebianize(branch)
branch_type_str = utils.get_branch_type(branch)
if branch_type_str not in BRANCH_TYPES.keys():
allowed_branches = ", ".join(BRANCH_TYPES.keys())
raise ValueError("Malformed branch name '%s', cannot classify as"
" one of %s" % (branch, allowed_branches))
# Fix needed environment variables
v = utils.get_vcs_info()
os.environ["DEVFLOW_BUILD_MODE"] = mode
os.environ["DEBFULLNAME"] = v.name
os.environ["DEBEMAIL"] = v.email
# Check that base version file and branch are correct
versioning.get_python_version()
# Get the debian branch
if options.debian_branch:
debian_branch = options.debian_branch
else:
debian_branch = utils.get_debian_branch(branch)
origin_debian = "origin/" + debian_branch
# Clone the repo
repo_dir = options.repo_dir or create_temp_directory("df-repo")
repo_dir = os.path.abspath(repo_dir)
repo = original_repo.clone(repo_dir, branch=branch)
print_green("Cloned repository to '%s'." % repo_dir)
build_dir = options.build_dir or create_temp_directory("df-build")
build_dir = os.path.abspath(build_dir)
print_green("Build directory: '%s'" % build_dir)
# Create the debian branch
repo.git.branch(debian_branch, origin_debian)
print_green("Created branch '%s' to track '%s'" %
(debian_branch, origin_debian))
# Go to debian branch
repo.git.checkout(debian_branch)
print_green("Changed to branch '%s'" % debian_branch)
# Merge with starting branch
repo.git.merge(branch)
print_green("Merged branch '%s' into '%s'" % (branch, debian_branch))
# Compute python and debian version
cd(repo_dir)
python_version = versioning.get_python_version()
debian_version = versioning.\
debian_version_from_python_version(python_version)
print_green("The new debian version will be: '%s'" % debian_version)
# Update the version files
versioning.update_version()
if not options.sign:
sign_tag_opt = None
elif options.keyid:
sign_tag_opt = "-u=%s" % options.keyid
elif mode == "release":
sign_tag_opt = "-s"
else:
sign_tag_opt = None
# Tag branch with python version
branch_tag = python_version
tag_message = "%s version %s" % (mode.capitalize(), python_version)
try:
repo.git.tag(branch_tag, branch, sign_tag_opt, "-m %s" % tag_message)
except GitCommandError:
# Tag may already exist, if only the debian branch has changed
pass
upstream_tag = "upstream/" + branch_tag
repo.git.tag(upstream_tag, branch)
# Update changelog
dch = gbp_dch("--debian-branch=%s" % debian_branch,
"--git-author",
"--ignore-regex=\".*\"",
"--multimaint-merge",
"--since=HEAD",
"--new-version=%s" % debian_version)
print_green("Successfully ran '%s'" % " ".join(dch.cmd))
if options.dist is not None:
distribution = options.dist
elif mode == "release":
distribution = utils.get_distribution_codename()
else:
distribution = "unstable"
f = open("debian/changelog", 'r+')
lines = f.readlines()
lines[0] = lines[0].replace("UNRELEASED", distribution)
lines[2] = lines[2].replace("UNRELEASED", "%s build" % mode)
f.seek(0)
f.writelines(lines)
f.close()
if mode == "release":
subprocess.check_call(['editor', "debian/changelog"])
# Add changelog to INDEX
repo.git.add("debian/changelog")
# Commit Changes
repo.git.commit("-s", "debian/changelog",
m="Bump version to %s" % debian_version)
# Tag debian branch
debian_branch_tag = "debian/" + utils.version_to_tag(debian_version)
tag_message = "%s version %s" % (mode.capitalize(), debian_version)
if mode == "release":
repo.git.tag(debian_branch_tag, sign_tag_opt, "-m %s" % tag_message)
# Create debian packages
cd(repo_dir)
version_files = []
for _, pkg_info in config['packages'].items():
if pkg_info.get("version_file"):
version_files.extend(pkg_info.as_list('version_file'))
# Add version.py files to repo
repo.git.add("-f", *version_files)
# Export version info to debuilg environment
os.environ["DEB_DEVFLOW_DEBIAN_VERSION"] = debian_version
os.environ["DEB_DEVFLOW_VERSION"] = python_version
args = list(gbp_buildpackage)
args.extend(["--git-export-dir=%s" % build_dir,
"--git-upstream-branch=%s" % branch,
"--git-debian-branch=%s" % debian_branch,
"--git-export=INDEX",
"--git-ignore-new",
"-sa",
"--source-option=--auto-commit",
"--git-upstream-tag=%s" % upstream_tag])
if options.source_only:
args.append("-S")
if not options.sign:
args.extend(["-uc", "-us"])
elif options.keyid:
args.append("-k\"'%s'\"" % options.keyid)
subprocess.check_call(args)
# Remove cloned repo
if mode != 'release' and not options.keep_repo:
print_green("Removing cloned repo '%s'." % repo_dir)
rm("-r", repo_dir)
# Print final info
info = (("Version", debian_version),
("Upstream branch", branch),
("Upstream tag", branch_tag),
("Debian branch", debian_branch),
("Debian tag", debian_branch_tag),
("Repository directory", repo_dir),
("Packages directory", build_dir))
print_green("\n".join(["%s: %s" % (name, val) for name, val in info]))
# Print help message
if mode == "release":
origin = original_repo.remote().url
repo.create_remote("original_origin", origin)
print_green("Created remote 'original_origin' for the repository '%s'"
% origin)
print_green("To update repositories '%s' and '%s' go to '%s' and run:"
% (toplevel, origin, repo_dir))
for remote in ['origin', 'original_origin']:
objects = [debian_branch, branch_tag, debian_branch_tag]
print_green("git push %s %s" % (remote, " ".join(objects)))
if options.push_back:
objects = [debian_branch, branch_tag, debian_branch_tag]
repo.git.push("origin", *objects)
print_green("Automatically updated origin repo.")
def create_temp_directory(suffix):
create_dir_cmd = mktemp("-d", "/tmp/" + suffix + "-XXXXX")
return create_dir_cmd.stdout.strip()
if __name__ == "__main__":
sys.exit(main()) | devflow/autopkg.py | import os
import sys
import subprocess
from git import GitCommandError
from optparse import OptionParser
from sh import mktemp, cd, rm # pylint: disable=E0611
from functools import partial
try:
from sh import git_dch as gbp_dch # pylint: disable=E0611
gbp_buildpackage = ['git-buildpackage']
except ImportError:
# In newer versions of git-buildpackage the executables have changed.
# Instead of having various git-* executables, there is only a gbp one,
# which expects the command (dch, buildpackage, etc) as the first argument.
from sh import gbp # pylint: disable=E0611
gbp_dch = partial(gbp, 'dch')
gbp_buildpackage = ['gbp', 'buildpackage']
from devflow import versioning
from devflow import utils
from devflow import BRANCH_TYPES
AVAILABLE_MODES = ["release", "snapshot"]
DESCRIPTION = """Tool for automatic build of Debian packages.
%(prog)s is a helper script for automatic build of Debian packages from
repositories that follow the `git flow` development model
<http://nvie.com/posts/a-successful-git-branching-model/>.
This script must run from inside a clean git repository and will perform the
following steps:
* Clone your repository to a temporary directory
* Merge the current branch with the corresponding debian branch
* Compute the version of the new package and update the python
version files
* Create a new entry in debian/changelog, using `git-dch`
* Create the Debian packages, using `git-buildpackage`
* Tag the appropriate branches if in `release` mode
%(prog)s will work with the packages that are declared in `devflow.conf'
file, which must exist in the top-level directory of the git repository.
"""
def print_help(prog):
print DESCRIPTION % {"prog": prog}
def main():
from devflow.version import __version__ # pylint: disable=E0611,F0401
parser = OptionParser(usage="usage: %prog [options] mode",
version="devflow %s" % __version__,
add_help_option=False)
parser.add_option("-h", "--help",
action="store_true",
default=False,
help="show this help message")
parser.add_option("-k", "--keep-repo",
action="store_true",
dest="keep_repo",
default=False,
help="Do not delete the cloned repository")
parser.add_option("-b", "--build-dir",
dest="build_dir",
default=None,
help="Directory to store created packages")
parser.add_option("-r", "--repo-dir",
dest="repo_dir",
default=None,
help="Directory to clone repository")
parser.add_option("-d", "--dirty",
dest="force_dirty",
default=False,
action="store_true",
help="Do not check if working directory is dirty")
parser.add_option("-c", "--config-file",
dest="config_file",
help="Override default configuration file")
parser.add_option("--no-sign",
dest="sign",
action="store_false",
default=True,
help="Do not sign the packages")
parser.add_option("--key-id",
dest="keyid",
help="Use this keyid for gpg signing")
parser.add_option("--dist",
dest="dist",
default=None,
help="Force distribution in Debian changelog")
parser.add_option("-S", "--source-only",
dest="source_only",
default=False,
action="store_true",
help="Specifies a source-only build, no binary packages"
" need to be made.")
parser.add_option("--debian-branch",
dest="debian_branch",
default=None,
help="Use this debian branch, instead of"
"auto-discovering the debian branch to use")
parser.add_option("--push-back",
dest="push_back",
default=False,
action="store_true",
help="Automatically push branches and tags to repo.")
parser.add_option("--color",
dest="color_output",
default="auto",
help="Enable/disable colored output. Default mode is"
" auto, available options are yes/no")
(options, args) = parser.parse_args()
if options.color_output == "yes":
use_colors = True
elif options.color_output == "no":
use_colors = False
else:
use_colors = sys.stdout.isatty()
red = lambda x: x
green = lambda x: x
if use_colors:
try:
import colors
red = colors.red
green = colors.green
except AttributeError:
pass
print_red = lambda x: sys.stdout.write(red(x) + "\n")
print_green = lambda x: sys.stdout.write(green(x) + "\n")
if options.help:
print_help(parser.get_prog_name())
parser.print_help()
return
# Get build mode
try:
mode = args[0]
except IndexError:
mode = utils.get_build_mode()
if mode not in AVAILABLE_MODES:
raise ValueError(red("Invalid argument! Mode must be one: %s" %
", ".join(AVAILABLE_MODES)))
# Load the repository
original_repo = utils.get_repository()
# Check that repository is clean
toplevel = original_repo.working_dir
if original_repo.is_dirty() and not options.force_dirty:
raise RuntimeError(red("Repository %s is dirty." % toplevel))
# Get packages from configuration file
config = utils.get_config(options.config_file)
packages = config['packages'].keys()
print_green("Will build the following packages:\n" + "\n".join(packages))
# Get current branch name and type and check if it is a valid one
branch = original_repo.head.reference.name
branch = utils.undebianize(branch)
branch_type_str = utils.get_branch_type(branch)
if branch_type_str not in BRANCH_TYPES.keys():
allowed_branches = ", ".join(BRANCH_TYPES.keys())
raise ValueError("Malformed branch name '%s', cannot classify as"
" one of %s" % (branch, allowed_branches))
# Fix needed environment variables
v = utils.get_vcs_info()
os.environ["DEVFLOW_BUILD_MODE"] = mode
os.environ["DEBFULLNAME"] = v.name
os.environ["DEBEMAIL"] = v.email
# Check that base version file and branch are correct
versioning.get_python_version()
# Get the debian branch
if options.debian_branch:
debian_branch = options.debian_branch
else:
debian_branch = utils.get_debian_branch(branch)
origin_debian = "origin/" + debian_branch
# Clone the repo
repo_dir = options.repo_dir or create_temp_directory("df-repo")
repo_dir = os.path.abspath(repo_dir)
repo = original_repo.clone(repo_dir, branch=branch)
print_green("Cloned repository to '%s'." % repo_dir)
build_dir = options.build_dir or create_temp_directory("df-build")
build_dir = os.path.abspath(build_dir)
print_green("Build directory: '%s'" % build_dir)
# Create the debian branch
repo.git.branch(debian_branch, origin_debian)
print_green("Created branch '%s' to track '%s'" %
(debian_branch, origin_debian))
# Go to debian branch
repo.git.checkout(debian_branch)
print_green("Changed to branch '%s'" % debian_branch)
# Merge with starting branch
repo.git.merge(branch)
print_green("Merged branch '%s' into '%s'" % (branch, debian_branch))
# Compute python and debian version
cd(repo_dir)
python_version = versioning.get_python_version()
debian_version = versioning.\
debian_version_from_python_version(python_version)
print_green("The new debian version will be: '%s'" % debian_version)
# Update the version files
versioning.update_version()
if not options.sign:
sign_tag_opt = None
elif options.keyid:
sign_tag_opt = "-u=%s" % options.keyid
elif mode == "release":
sign_tag_opt = "-s"
else:
sign_tag_opt = None
# Tag branch with python version
branch_tag = python_version
tag_message = "%s version %s" % (mode.capitalize(), python_version)
try:
repo.git.tag(branch_tag, branch, sign_tag_opt, "-m %s" % tag_message)
except GitCommandError:
# Tag may already exist, if only the debian branch has changed
pass
upstream_tag = "upstream/" + branch_tag
repo.git.tag(upstream_tag, branch)
# Update changelog
dch = gbp_dch("--debian-branch=%s" % debian_branch,
"--git-author",
"--ignore-regex=\".*\"",
"--multimaint-merge",
"--since=HEAD",
"--new-version=%s" % debian_version)
print_green("Successfully ran '%s'" % " ".join(dch.cmd))
if options.dist is not None:
distribution = options.dist
elif mode == "release":
distribution = utils.get_distribution_codename()
else:
distribution = "unstable"
f = open("debian/changelog", 'r+')
lines = f.readlines()
lines[0] = lines[0].replace("UNRELEASED", distribution)
lines[2] = lines[2].replace("UNRELEASED", "%s build" % mode)
f.seek(0)
f.writelines(lines)
f.close()
if mode == "release":
subprocess.check_call(['editor', "debian/changelog"])
# Add changelog to INDEX
repo.git.add("debian/changelog")
# Commit Changes
repo.git.commit("-s", "debian/changelog",
m="Bump version to %s" % debian_version)
# Tag debian branch
debian_branch_tag = "debian/" + utils.version_to_tag(debian_version)
tag_message = "%s version %s" % (mode.capitalize(), debian_version)
if mode == "release":
repo.git.tag(debian_branch_tag, sign_tag_opt, "-m %s" % tag_message)
# Create debian packages
cd(repo_dir)
version_files = []
for _, pkg_info in config['packages'].items():
if pkg_info.get("version_file"):
version_files.extend(pkg_info.as_list('version_file'))
# Add version.py files to repo
repo.git.add("-f", *version_files)
# Export version info to debuilg environment
os.environ["DEB_DEVFLOW_DEBIAN_VERSION"] = debian_version
os.environ["DEB_DEVFLOW_VERSION"] = python_version
args = list(gbp_buildpackage)
args.extend(["--git-export-dir=%s" % build_dir,
"--git-upstream-branch=%s" % branch,
"--git-debian-branch=%s" % debian_branch,
"--git-export=INDEX",
"--git-ignore-new",
"-sa",
"--source-option=--auto-commit",
"--git-upstream-tag=%s" % upstream_tag])
if options.source_only:
args.append("-S")
if not options.sign:
args.extend(["-uc", "-us"])
elif options.keyid:
args.append("-k\"'%s'\"" % options.keyid)
subprocess.check_call(args)
# Remove cloned repo
if mode != 'release' and not options.keep_repo:
print_green("Removing cloned repo '%s'." % repo_dir)
rm("-r", repo_dir)
# Print final info
info = (("Version", debian_version),
("Upstream branch", branch),
("Upstream tag", branch_tag),
("Debian branch", debian_branch),
("Debian tag", debian_branch_tag),
("Repository directory", repo_dir),
("Packages directory", build_dir))
print_green("\n".join(["%s: %s" % (name, val) for name, val in info]))
# Print help message
if mode == "release":
origin = original_repo.remote().url
repo.create_remote("original_origin", origin)
print_green("Created remote 'original_origin' for the repository '%s'"
% origin)
print_green("To update repositories '%s' and '%s' go to '%s' and run:"
% (toplevel, origin, repo_dir))
for remote in ['origin', 'original_origin']:
objects = [debian_branch, branch_tag, debian_branch_tag]
print_green("git push %s %s" % (remote, " ".join(objects)))
if options.push_back:
objects = [debian_branch, branch_tag, debian_branch_tag]
repo.git.push("origin", *objects)
print_green("Automatically updated origin repo.")
def create_temp_directory(suffix):
create_dir_cmd = mktemp("-d", "/tmp/" + suffix + "-XXXXX")
return create_dir_cmd.stdout.strip()
if __name__ == "__main__":
sys.exit(main()) | 0.40251 | 0.119305 |
import os
import tempfile
import unittest
from copy import deepcopy
from pathlib import Path
from varats.tools.bb_config import generate_benchbuild_config
from varats.utils.settings import vara_cfg, bb_cfg
class BenchBuildConfig(unittest.TestCase):
"""Test BenchBuild config."""
@classmethod
def setUpClass(cls):
"""Setup and generate the benchbuild config file."""
cls.tmp_file = tempfile.NamedTemporaryFile()
generate_benchbuild_config(vara_cfg(), cls.tmp_file.name)
cls.bb_cfg = deepcopy(bb_cfg())
cls.bb_cfg.load(cls.tmp_file.name)
@classmethod
def tearDownClass(cls):
cls.tmp_file.close()
def check_all_files_in_config_list(
self, folder, config_list, exclude_list=None
):
"""Check if all python files in a folder are added to the benchbuild
project config."""
if exclude_list is None:
exclude_list = []
for plugin_file in os.listdir(Path("varats") / folder):
if plugin_file in exclude_list:
continue
if os.path.isfile(folder + plugin_file) and\
plugin_file.endswith(".py") and\
plugin_file != "__init__.py":
plugin_python_path = (folder + plugin_file)\
.replace(".py", "")\
.replace("/", ".")
self.assertTrue(
plugin_python_path in config_list,
"Missing: " + plugin_python_path
)
def test_if_all_nodes_have_been_created(self):
"""Test if all the benchbuild config was created with all expected
nodes."""
self.assertTrue(self.bb_cfg["varats"].__contains__("outfile"))
self.assertTrue(self.bb_cfg["varats"].__contains__("result"))
def test_if_slurm_config_was_added(self):
"""Test if all the benchbuild slurm config was created."""
self.assertTrue(self.bb_cfg["slurm"].__contains__("account"))
self.assertTrue(self.bb_cfg["slurm"].__contains__("partition"))
def test_if_projects_were_added(self):
"""Test if all projects where added to the benchbuild config."""
excluded_projects = [
"llvm-all.py", "llvm-min.py", "llvm.py", "glibc.py"
]
loaded_plugins = self.bb_cfg["plugins"]["projects"].value
self.check_all_files_in_config_list(
"varats/projects/c_projects/", loaded_plugins, excluded_projects
)
self.check_all_files_in_config_list(
"varats/projects/cpp_projects/", loaded_plugins, excluded_projects
)
def test_if_experiments_were_added(self):
"""Test if all projects where added to the benchbuild config."""
excluded_experiments = [
"wllvm.py", "phasar.py", "region_instrumentation.py",
"commit_annotation_report.py", "blame_experiment.py"
]
loaded_plugins = self.bb_cfg["plugins"]["experiments"].value
self.check_all_files_in_config_list(
"varats/experiments/", loaded_plugins, excluded_experiments
) | tests/utils/test_bb_config.py | import os
import tempfile
import unittest
from copy import deepcopy
from pathlib import Path
from varats.tools.bb_config import generate_benchbuild_config
from varats.utils.settings import vara_cfg, bb_cfg
class BenchBuildConfig(unittest.TestCase):
"""Test BenchBuild config."""
@classmethod
def setUpClass(cls):
"""Setup and generate the benchbuild config file."""
cls.tmp_file = tempfile.NamedTemporaryFile()
generate_benchbuild_config(vara_cfg(), cls.tmp_file.name)
cls.bb_cfg = deepcopy(bb_cfg())
cls.bb_cfg.load(cls.tmp_file.name)
@classmethod
def tearDownClass(cls):
cls.tmp_file.close()
def check_all_files_in_config_list(
self, folder, config_list, exclude_list=None
):
"""Check if all python files in a folder are added to the benchbuild
project config."""
if exclude_list is None:
exclude_list = []
for plugin_file in os.listdir(Path("varats") / folder):
if plugin_file in exclude_list:
continue
if os.path.isfile(folder + plugin_file) and\
plugin_file.endswith(".py") and\
plugin_file != "__init__.py":
plugin_python_path = (folder + plugin_file)\
.replace(".py", "")\
.replace("/", ".")
self.assertTrue(
plugin_python_path in config_list,
"Missing: " + plugin_python_path
)
def test_if_all_nodes_have_been_created(self):
"""Test if all the benchbuild config was created with all expected
nodes."""
self.assertTrue(self.bb_cfg["varats"].__contains__("outfile"))
self.assertTrue(self.bb_cfg["varats"].__contains__("result"))
def test_if_slurm_config_was_added(self):
"""Test if all the benchbuild slurm config was created."""
self.assertTrue(self.bb_cfg["slurm"].__contains__("account"))
self.assertTrue(self.bb_cfg["slurm"].__contains__("partition"))
def test_if_projects_were_added(self):
"""Test if all projects where added to the benchbuild config."""
excluded_projects = [
"llvm-all.py", "llvm-min.py", "llvm.py", "glibc.py"
]
loaded_plugins = self.bb_cfg["plugins"]["projects"].value
self.check_all_files_in_config_list(
"varats/projects/c_projects/", loaded_plugins, excluded_projects
)
self.check_all_files_in_config_list(
"varats/projects/cpp_projects/", loaded_plugins, excluded_projects
)
def test_if_experiments_were_added(self):
"""Test if all projects where added to the benchbuild config."""
excluded_experiments = [
"wllvm.py", "phasar.py", "region_instrumentation.py",
"commit_annotation_report.py", "blame_experiment.py"
]
loaded_plugins = self.bb_cfg["plugins"]["experiments"].value
self.check_all_files_in_config_list(
"varats/experiments/", loaded_plugins, excluded_experiments
) | 0.522202 | 0.174903 |
from django.contrib.auth import login, authenticate
from django.shortcuts import render,redirect
from .models import *
from .forms import *
from django.views import generic
# Create your views here.
def condition(request):
context = { }
return render(request,'condition.html',context=context)
def index(request):
all_news=News.objects.all()
all_news2=News2.objects.all()
all_slide=Slide.objects.all()
location=Location.objects.all()
phone = Phone.objects.all()
agriculture_suggestion = Suggestion.objects.all()
industrial_suggestion = Suggestion.objects.all()
development_suggestion = Suggestion.objects.all()
service_suggestion = Suggestion.objects.all()
tourist_suggestion = Suggestion.objects.all()
organization_task = Organization_Task.objects.all()
organization_leader = Organization_Leader.objects.all()
organization_council = Organization_Governing_Council.objects.all()
organization_law = Organization_Law.objects.all()
project_ended = Project_Ended.objects.all()
project_processing = Project_UnderProcessing.objects.all()
project_planned = Project_Planned.objects.all()
complaint_follow = Complaint.objects.all()
page_path = request.get_full_path()
if request.method == 'POST':
suggestion_form = SuggestionForm(request.POST)
if suggestion_form.is_valid():
suggestion_form.save()
return redirect('index')
else:
suggestion_form = SuggestionForm()
if request.method == 'POST':
complaint_form = ComplaintForm(request.POST)
if complaint_form.is_valid():
complaint_form.save()
return redirect('index')
else:
complaint_form = ComplaintForm()
context = {'all_news': all_news,'all_news2':all_news2, 'all_slide': all_slide,'suggestion_form':suggestion_form,
'complaint_form':complaint_form,
'page_path':page_path,'location':location,'phone':phone, 'agriculture_suggestion':agriculture_suggestion,
'industrial_suggestion' : industrial_suggestion,'development_suggestion': development_suggestion ,
'service_suggestion': service_suggestion,'tourist_suggestion':tourist_suggestion, 'organization_task':organization_task,
'organization_leader':organization_leader,'organization_council' : organization_council,'organization_law':organization_law,
'project_ended' : project_ended, 'project_processing':project_processing,'project_planned' : project_planned,
'complaint_follow' : complaint_follow,
}
return render(request,'index.html',context=context)
class NewsListView(generic.ListView):
model = News
paginate_by = 4
class NewsDetailView(generic.DetailView):
model = News
class News2DetailView(generic.DetailView):
model = News2
def signup(request):
if request.method == 'POST':
form = SignUpForm(request.POST)
if form.is_valid():
form.save()
username = form.cleaned_data.get('username')
raw_password = form.cleaned_data.get('<PASSWORD>')
user = authenticate(username=username,password=<PASSWORD>)
login(request, user)
return redirect('index')
else:
form = SignUpForm()
return render(request, 'signup.html', {'form': form}) | home/views.py | from django.contrib.auth import login, authenticate
from django.shortcuts import render,redirect
from .models import *
from .forms import *
from django.views import generic
# Create your views here.
def condition(request):
context = { }
return render(request,'condition.html',context=context)
def index(request):
all_news=News.objects.all()
all_news2=News2.objects.all()
all_slide=Slide.objects.all()
location=Location.objects.all()
phone = Phone.objects.all()
agriculture_suggestion = Suggestion.objects.all()
industrial_suggestion = Suggestion.objects.all()
development_suggestion = Suggestion.objects.all()
service_suggestion = Suggestion.objects.all()
tourist_suggestion = Suggestion.objects.all()
organization_task = Organization_Task.objects.all()
organization_leader = Organization_Leader.objects.all()
organization_council = Organization_Governing_Council.objects.all()
organization_law = Organization_Law.objects.all()
project_ended = Project_Ended.objects.all()
project_processing = Project_UnderProcessing.objects.all()
project_planned = Project_Planned.objects.all()
complaint_follow = Complaint.objects.all()
page_path = request.get_full_path()
if request.method == 'POST':
suggestion_form = SuggestionForm(request.POST)
if suggestion_form.is_valid():
suggestion_form.save()
return redirect('index')
else:
suggestion_form = SuggestionForm()
if request.method == 'POST':
complaint_form = ComplaintForm(request.POST)
if complaint_form.is_valid():
complaint_form.save()
return redirect('index')
else:
complaint_form = ComplaintForm()
context = {'all_news': all_news,'all_news2':all_news2, 'all_slide': all_slide,'suggestion_form':suggestion_form,
'complaint_form':complaint_form,
'page_path':page_path,'location':location,'phone':phone, 'agriculture_suggestion':agriculture_suggestion,
'industrial_suggestion' : industrial_suggestion,'development_suggestion': development_suggestion ,
'service_suggestion': service_suggestion,'tourist_suggestion':tourist_suggestion, 'organization_task':organization_task,
'organization_leader':organization_leader,'organization_council' : organization_council,'organization_law':organization_law,
'project_ended' : project_ended, 'project_processing':project_processing,'project_planned' : project_planned,
'complaint_follow' : complaint_follow,
}
return render(request,'index.html',context=context)
class NewsListView(generic.ListView):
model = News
paginate_by = 4
class NewsDetailView(generic.DetailView):
model = News
class News2DetailView(generic.DetailView):
model = News2
def signup(request):
if request.method == 'POST':
form = SignUpForm(request.POST)
if form.is_valid():
form.save()
username = form.cleaned_data.get('username')
raw_password = form.cleaned_data.get('<PASSWORD>')
user = authenticate(username=username,password=<PASSWORD>)
login(request, user)
return redirect('index')
else:
form = SignUpForm()
return render(request, 'signup.html', {'form': form}) | 0.298083 | 0.062703 |
import chain
from behave import given, when, then
from itertools import count
from unittest.mock import MagicMock
from chain.core.domains.state import State
@given("a random number of static chains")
def step_create_random_static_chains(context: dict) -> None:
"""Create a Random Number of Static Chains.
This step will generate a random number of static chains.
"""
nb_chains = context.fake.pyint()
context.chain = [chain(context.dummy_function) for _ in range(nb_chains)]
@given("an odd random number of static chains")
def step_create_odd_random_static_chains(context: dict) -> None:
"""Create an Odd Random Number of Static Chains.
This step will generate an odd random number of static chains.
"""
def dummy(context: State) -> None:
pass
nb_chains = context.fake.pyint(min=1, step=2)
context.chain = [chain(dummy) for _ in range(nb_chains)]
@given("a single static chain")
def step_create_single_random_static_chain(context: dict) -> None:
"""Create a Random Number of Static Chains.
This step will generate a random number of static chain.
"""
def dummy(context: State) -> None:
pass
context.chain = [chain(dummy)]
@given("a new chain with mocked function")
def step_create_mocked_chain(context: dict) -> None:
"""Create a Chain with Mocked Function.
This step will generate a new chain with mocked function and append it on the end
of the created chain.
"""
if "chain" not in context:
context.chain = list()
context.mocked_function = MagicMock(return_value=None)
context.chain.append(chain(context.mocked_function))
@given("add a return value to the mocked function")
def step_add_return_value(context: dict) -> None:
"""Add a Return Value to the Mocked Function.
This step will generate a new return value to the mocked function on the chain.
"""
context.expected_output = context.fake.pydict()
context.mocked_function.return_value = context.expected_output
@given("add an arg return value to the mocked function")
def step_add_return_value_as_args(context: dict) -> None:
"""Add a Return Value to the Mocked Function as Args.
This step will generate a new return value as args to be passed to the next function
on the chain.
"""
context.expected_args = context.fake.pytuple()
context.expected_kwargs = context.fake.pydict()
context.mocked_function.return_value = (
context.expected_args,
context.expected_kwargs,
)
@given("a new chain returning random autoincremented data")
def step_create_autoincrementing_chain(context: dict) -> None:
"""Create a Autoincrementing Chain.
This step will generate a new chain with a function that will always return an
autoincremented data.
"""
if "chain" not in context:
context.chain = list()
context.initial_state.count = count()
def autoincrement(context: State) -> tuple:
counted = next(context.count)
return (counted,), dict()
context.chain.append(chain(autoincrement))
@given("a decorated chain function with output")
def step_create_decorated_function_with_output(context: dict) -> None:
"""Create a New Decorated Chain Function With Output.
This step will generate a new decorated chain function.
"""
expected_output = context.fake.pydict()
@chain
def dummy(context: State, expected_output=expected_output) -> None:
return expected_output
if "chain" not in context:
context.chain = list()
context.expected_output = expected_output
context.chain.append(dummy)
@given("a decorated chain function without output")
def step_create_decorated_function_without_output(context: dict) -> None:
"""Create a New Decorated Chain Function Without Output.
This step will generate a new decorated chain function without adding an output.
"""
expected_output = context.fake.pydict()
@chain
def bar(context: State) -> None:
context.bar = "bar"
if "chain" not in context:
context.chain = list()
context.expected_output = expected_output
context.chain.append(bar)
@when("I reverse the chain")
def step_revese_chain(context: dict) -> None:
"""Reverse the Generated Chain.
This step will reverse the current chain.
"""
context.chain = context.chain[::-1]
@when("I add a counter on the current state")
def step_add_counter_to_state(context: dict) -> None:
"""Add Counter on Current State.
This step will add a counter on the current initial state.
"""
context.initial_state.count = count()
@then("the mocked function should have been called with correct data")
def step_check_args_chain(context: dict) -> None:
"""Check if We Are Passing Args.
This step will check if, during a chain, we are passing args between the chained
functions.
"""
calls = context.mocked_function.call_args_list
last_call = calls[-1]
args = last_call[0]
kwargs = last_call[1]
context.expected_kwargs.update({"context": kwargs["context"]})
assert args == context.expected_args
assert kwargs == context.expected_kwargs
assert kwargs["context"].get_state() == context.initial_state.get_state()
@then("the context should not persist data")
def step_check_reversed_chain(context: dict) -> None:
"""Check the Result of the Reversed Chain.
This step will check the result of the reversed chain to see if it has runned
ignoring the previous state.
"""
calls = context.mocked_function.call_args_list
last_call = calls[-1]
args = last_call[0]
kwargs = last_call[1]
assert args[0] == 0 | chain/tests/acceptance/steps/step_chain.py | import chain
from behave import given, when, then
from itertools import count
from unittest.mock import MagicMock
from chain.core.domains.state import State
@given("a random number of static chains")
def step_create_random_static_chains(context: dict) -> None:
"""Create a Random Number of Static Chains.
This step will generate a random number of static chains.
"""
nb_chains = context.fake.pyint()
context.chain = [chain(context.dummy_function) for _ in range(nb_chains)]
@given("an odd random number of static chains")
def step_create_odd_random_static_chains(context: dict) -> None:
"""Create an Odd Random Number of Static Chains.
This step will generate an odd random number of static chains.
"""
def dummy(context: State) -> None:
pass
nb_chains = context.fake.pyint(min=1, step=2)
context.chain = [chain(dummy) for _ in range(nb_chains)]
@given("a single static chain")
def step_create_single_random_static_chain(context: dict) -> None:
"""Create a Random Number of Static Chains.
This step will generate a random number of static chain.
"""
def dummy(context: State) -> None:
pass
context.chain = [chain(dummy)]
@given("a new chain with mocked function")
def step_create_mocked_chain(context: dict) -> None:
"""Create a Chain with Mocked Function.
This step will generate a new chain with mocked function and append it on the end
of the created chain.
"""
if "chain" not in context:
context.chain = list()
context.mocked_function = MagicMock(return_value=None)
context.chain.append(chain(context.mocked_function))
@given("add a return value to the mocked function")
def step_add_return_value(context: dict) -> None:
"""Add a Return Value to the Mocked Function.
This step will generate a new return value to the mocked function on the chain.
"""
context.expected_output = context.fake.pydict()
context.mocked_function.return_value = context.expected_output
@given("add an arg return value to the mocked function")
def step_add_return_value_as_args(context: dict) -> None:
"""Add a Return Value to the Mocked Function as Args.
This step will generate a new return value as args to be passed to the next function
on the chain.
"""
context.expected_args = context.fake.pytuple()
context.expected_kwargs = context.fake.pydict()
context.mocked_function.return_value = (
context.expected_args,
context.expected_kwargs,
)
@given("a new chain returning random autoincremented data")
def step_create_autoincrementing_chain(context: dict) -> None:
"""Create a Autoincrementing Chain.
This step will generate a new chain with a function that will always return an
autoincremented data.
"""
if "chain" not in context:
context.chain = list()
context.initial_state.count = count()
def autoincrement(context: State) -> tuple:
counted = next(context.count)
return (counted,), dict()
context.chain.append(chain(autoincrement))
@given("a decorated chain function with output")
def step_create_decorated_function_with_output(context: dict) -> None:
"""Create a New Decorated Chain Function With Output.
This step will generate a new decorated chain function.
"""
expected_output = context.fake.pydict()
@chain
def dummy(context: State, expected_output=expected_output) -> None:
return expected_output
if "chain" not in context:
context.chain = list()
context.expected_output = expected_output
context.chain.append(dummy)
@given("a decorated chain function without output")
def step_create_decorated_function_without_output(context: dict) -> None:
"""Create a New Decorated Chain Function Without Output.
This step will generate a new decorated chain function without adding an output.
"""
expected_output = context.fake.pydict()
@chain
def bar(context: State) -> None:
context.bar = "bar"
if "chain" not in context:
context.chain = list()
context.expected_output = expected_output
context.chain.append(bar)
@when("I reverse the chain")
def step_revese_chain(context: dict) -> None:
"""Reverse the Generated Chain.
This step will reverse the current chain.
"""
context.chain = context.chain[::-1]
@when("I add a counter on the current state")
def step_add_counter_to_state(context: dict) -> None:
"""Add Counter on Current State.
This step will add a counter on the current initial state.
"""
context.initial_state.count = count()
@then("the mocked function should have been called with correct data")
def step_check_args_chain(context: dict) -> None:
"""Check if We Are Passing Args.
This step will check if, during a chain, we are passing args between the chained
functions.
"""
calls = context.mocked_function.call_args_list
last_call = calls[-1]
args = last_call[0]
kwargs = last_call[1]
context.expected_kwargs.update({"context": kwargs["context"]})
assert args == context.expected_args
assert kwargs == context.expected_kwargs
assert kwargs["context"].get_state() == context.initial_state.get_state()
@then("the context should not persist data")
def step_check_reversed_chain(context: dict) -> None:
"""Check the Result of the Reversed Chain.
This step will check the result of the reversed chain to see if it has runned
ignoring the previous state.
"""
calls = context.mocked_function.call_args_list
last_call = calls[-1]
args = last_call[0]
kwargs = last_call[1]
assert args[0] == 0 | 0.813572 | 0.605012 |
import torch
import torch.nn as nn
from transformers.generation_utils import GenerationMixin
from transformers.modeling_outputs import Seq2SeqLMOutput
from transformers.models.bert.modeling_bert import BertOnlyMLMHead
from unitorch.modules.prefix_model import (
PrefixConfig,
PrefixTextModel,
_reorder_buffer,
_reorder_buffer_v2,
)
from unitorch.models import GenericModel, GenericOutputs
class UnilmForGeneration(GenericModel, GenerationMixin):
main_input_name = "input_ids"
def __init__(self, config_path):
"""
Args:
config_path: config file path to unilm model
"""
super().__init__()
self.config = PrefixConfig.from_json_file(config_path)
self.config.gradient_checkpointing = False
self.bert = PrefixTextModel(self.config)
self.cls = BertOnlyMLMHead(self.config)
self.init_weights()
self.hist_index = int(self.config.output_hidden_states) + int(self.config.output_attentions) + 2
self.bert.embeddings.word_embeddings.weight = self.cls.predictions.decoder.weight
@property
def device(self) -> torch.device:
"""
`torch.device`: The device on which the module is (assuming that all the module parameters are on the same
device).
"""
return next(self.parameters()).device
def prepare_inputs_for_generation(
self,
decoder_input_ids,
past=None,
**kwargs,
):
"""
Implement in subclasses of [`PreTrainedModel`] for custom behavior to prepare inputs in the generate method.
"""
if past is None:
active_batch_size, _ = decoder_input_ids.size()
prefix_token, prefix_seg, prefix_pos, prefix_mask = (
self.prefix_state["prefix_token"],
self.prefix_state["prefix_seg"],
self.prefix_state["prefix_pos"],
self.prefix_state["prefix_mask"],
)
prefix_len = self.prefix_state["prefix_len"]
outputs = self.bert(
prefix_token[:, :prefix_len],
prefix_seg[:, :prefix_len],
prefix_mask[:, :prefix_len, :prefix_len],
prefix_pos[:, :prefix_len],
)
token_pos = prefix_pos.repeat(1, self.num_beams).view(active_batch_size, prefix_pos.size(1))
token_pos = token_pos[:, prefix_len:]
token_mask = (
prefix_mask.unsqueeze(1)
.repeat(1, self.num_beams, 1, 1)
.view(active_batch_size, prefix_mask.size(1), prefix_mask.size(1))
)
token_mask = token_mask[:, prefix_len:, :]
history_states = outputs[self.hist_index]
decoder_mask_token = torch.ones(active_batch_size, 1).to(decoder_input_ids) * self.config.mask_token_id
decoder_seg_ids = torch.ones(active_batch_size, 2).to(decoder_input_ids) * self.config.target_type_id
else:
(token_pos, token_mask, decoder_mask_token, decoder_seg_ids, history_states,) = (
past[0],
past[1],
past[2],
past[3],
past[4:],
)
return {
"decoder_input_ids": decoder_input_ids,
"decoder_mask_ids": decoder_mask_token,
"decoder_attn_mask": token_mask,
"decoder_seg_ids": decoder_seg_ids,
"decoder_pos_ids": token_pos,
"past_key_values": history_states,
}
@staticmethod
def _reorder_cache(past, beam_idx):
"""
For beam search in huggingface generation mixin
"""
(pos_ids, token_mask, decoder_mask_token, decoder_seg, history_states,) = (
past[0],
past[1],
past[2],
past[3],
past[4:],
)
reordered_past = []
for layer_past in history_states:
reordered_past.append(_reorder_buffer(layer_past, beam_idx))
newpast = [
pos_ids,
token_mask,
decoder_mask_token,
decoder_seg,
] + reordered_past
return newpast
@staticmethod
def _reorder_cache_v2(past, batch_idx, beam_idx):
"""
For faster inference by optimized beam search in generation mixin v2
"""
(pos_ids, token_mask, decoder_mask_token, decoder_seg, history_states,) = (
past[0],
past[1],
past[2],
past[3],
past[4:],
)
reordered_past = []
for layer_past in history_states:
reordered_past.append(_reorder_buffer_v2(layer_past, batch_idx, beam_idx))
pos_ids = pos_ids[beam_idx]
token_mask = token_mask[beam_idx]
decoder_mask_token = decoder_mask_token[beam_idx]
decoder_seg = decoder_seg[beam_idx]
newpast = [
pos_ids,
token_mask,
decoder_mask_token,
decoder_seg,
] + reordered_past
return newpast
def forward(
self,
tokens_ids=None,
attn_mask=None,
seg_ids=None,
pos_ids=None,
decoder_input_ids=None,
decoder_pos_ids=None,
decoder_seg_ids=None,
decoder_attn_mask=None,
decoder_mask_ids=None,
past_key_values=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
"""
Args:
tokens_ids: tokens of encode text & decode
attn_mask: attention mask of tokens
seg_ids: token type ids
pos_ids: position ids
others: used in beam search
Returns: forward logits
"""
if self.training:
outputs = self.bert(
tokens_ids,
seg_ids,
attn_mask,
pos_ids,
)
logits = self.cls(outputs[0])
return logits
decoder_token = torch.cat([decoder_input_ids, decoder_mask_ids], dim=1)
decoder_len = decoder_token.size(1)
decoder_token = decoder_token[:, -2:]
decoder_mask = decoder_attn_mask[
:,
decoder_len - 2 : decoder_len,
: self.prefix_state["prefix_len"] + decoder_len,
]
decoder_pos = decoder_pos_ids[:, decoder_len - 2 : decoder_len]
outputs = self.bert(
decoder_token,
decoder_seg_ids,
decoder_mask,
decoder_pos,
history_states=past_key_values,
)
logits = self.cls(outputs[0])
state4cache = [
decoder_pos_ids,
decoder_attn_mask,
decoder_mask_ids,
decoder_seg_ids,
] + outputs[self.hist_index]
return Seq2SeqLMOutput(logits=logits, past_key_values=state4cache)
def generate(
self,
tokens_ids,
num_beams=5,
decoder_start_token_id=101,
decoder_end_token_id=102,
num_return_sequences=1,
min_gen_seq_length=0,
max_gen_seq_length=48,
repetition_penalty=1.0,
no_repeat_ngram_size=0,
early_stopping=True,
length_penalty=1.0,
num_beam_groups=1,
diversity_penalty=0.0,
diverse_rate=0.0,
do_sample=False,
temperature=1.0,
top_k=50,
top_p=1.0,
):
"""
Args:
tokens_ids: tokens of encode text
"""
self.num_beams = num_beams
prefix_token = tokens_ids
prefix_mask1 = tokens_ids.ne(self.config.pad_token_id).long()
batch_size, prefix_len = prefix_token.size()
total_seq_length = max_gen_seq_length + prefix_len + 1
prefix_mask = prefix_mask1[:, None, :].repeat(1, total_seq_length, 1)
new_mask = torch.zeros(batch_size, total_seq_length, max_gen_seq_length + 1).to(prefix_mask)
tri_mask = torch.ones(batch_size, total_seq_length, max_gen_seq_length + 1).to(prefix_mask)
new_mask[:, prefix_len:, :] = torch.tril(tri_mask[:, prefix_len:, :])
new_mask[:, :, 0] = 0
prefix_mask = torch.cat((prefix_mask, new_mask), dim=-1)
prefix_seg = torch.tensor([self.config.source_type_id] * prefix_len).to(prefix_token)
prefix_seg = prefix_seg[None, :].repeat(batch_size, 1)
prefix_pos0 = torch.ones(batch_size, max_gen_seq_length + 1).to(tokens_ids)
prefix_pos0[:, 0] = 0
prefix_pos = torch.cat((tokens_ids, prefix_pos0.to(tokens_ids)), dim=-1).ne(self.config.pad_token_id)
prefix_pos = torch.cumsum(prefix_pos, dim=-1) - 1
self.prefix_state = dict(
{
"prefix_len": prefix_len,
"prefix_token": prefix_token,
"prefix_seg": prefix_seg,
"prefix_mask": prefix_mask,
"prefix_pos": prefix_pos,
}
)
decoder_seg = (torch.ones(batch_size * self.num_beams, 1) * self.config.target_type_id).to(prefix_token)
decoder_seg[:, 0] = self.config.source_type_id
decoder_mask_token = torch.ones(batch_size * self.num_beams, 1).to(prefix_token) * self.config.mask_token_id
if decoder_start_token_id is not None:
self.config.bos_token_id = decoder_start_token_id
decoder_input_ids = torch.ones(batch_size, 1).to(prefix_token) * self.config.bos_token_id
outputs = super().generate(
decoder_input_ids,
max_length=max_gen_seq_length,
min_length=min_gen_seq_length,
num_beams=num_beams,
do_sample=do_sample,
decoder_start_token_id=decoder_start_token_id,
no_repeat_ngram_size=no_repeat_ngram_size,
early_stopping=early_stopping,
length_penalty=length_penalty,
repetition_penalty=repetition_penalty,
num_return_sequences=num_return_sequences,
bos_token_id=decoder_start_token_id,
eos_token_id=decoder_end_token_id,
num_beam_groups=num_beam_groups,
diversity_penalty=diversity_penalty,
temperature=temperature,
top_k=top_k,
top_p=top_p,
return_dict_in_generate=True,
output_scores=True,
)
sequences = outputs.sequences.reshape(-1, num_return_sequences, outputs.sequences.size(-1))
outputs.sequences = torch.zeros(sequences.size(0), num_return_sequences, max_gen_seq_length).to(
device=sequences.device
)
outputs.sequences[:, :, : sequences.size(-1)].copy_(sequences)
if num_return_sequences == 1:
outputs.sequences = outputs.sequences.reshape(-1, max_gen_seq_length)
return GenericOutputs(sequences=outputs.sequences, sequences_scores=outputs.sequences_scores) | unitorch/models/unilm/modeling.py |
import torch
import torch.nn as nn
from transformers.generation_utils import GenerationMixin
from transformers.modeling_outputs import Seq2SeqLMOutput
from transformers.models.bert.modeling_bert import BertOnlyMLMHead
from unitorch.modules.prefix_model import (
PrefixConfig,
PrefixTextModel,
_reorder_buffer,
_reorder_buffer_v2,
)
from unitorch.models import GenericModel, GenericOutputs
class UnilmForGeneration(GenericModel, GenerationMixin):
main_input_name = "input_ids"
def __init__(self, config_path):
"""
Args:
config_path: config file path to unilm model
"""
super().__init__()
self.config = PrefixConfig.from_json_file(config_path)
self.config.gradient_checkpointing = False
self.bert = PrefixTextModel(self.config)
self.cls = BertOnlyMLMHead(self.config)
self.init_weights()
self.hist_index = int(self.config.output_hidden_states) + int(self.config.output_attentions) + 2
self.bert.embeddings.word_embeddings.weight = self.cls.predictions.decoder.weight
@property
def device(self) -> torch.device:
"""
`torch.device`: The device on which the module is (assuming that all the module parameters are on the same
device).
"""
return next(self.parameters()).device
def prepare_inputs_for_generation(
self,
decoder_input_ids,
past=None,
**kwargs,
):
"""
Implement in subclasses of [`PreTrainedModel`] for custom behavior to prepare inputs in the generate method.
"""
if past is None:
active_batch_size, _ = decoder_input_ids.size()
prefix_token, prefix_seg, prefix_pos, prefix_mask = (
self.prefix_state["prefix_token"],
self.prefix_state["prefix_seg"],
self.prefix_state["prefix_pos"],
self.prefix_state["prefix_mask"],
)
prefix_len = self.prefix_state["prefix_len"]
outputs = self.bert(
prefix_token[:, :prefix_len],
prefix_seg[:, :prefix_len],
prefix_mask[:, :prefix_len, :prefix_len],
prefix_pos[:, :prefix_len],
)
token_pos = prefix_pos.repeat(1, self.num_beams).view(active_batch_size, prefix_pos.size(1))
token_pos = token_pos[:, prefix_len:]
token_mask = (
prefix_mask.unsqueeze(1)
.repeat(1, self.num_beams, 1, 1)
.view(active_batch_size, prefix_mask.size(1), prefix_mask.size(1))
)
token_mask = token_mask[:, prefix_len:, :]
history_states = outputs[self.hist_index]
decoder_mask_token = torch.ones(active_batch_size, 1).to(decoder_input_ids) * self.config.mask_token_id
decoder_seg_ids = torch.ones(active_batch_size, 2).to(decoder_input_ids) * self.config.target_type_id
else:
(token_pos, token_mask, decoder_mask_token, decoder_seg_ids, history_states,) = (
past[0],
past[1],
past[2],
past[3],
past[4:],
)
return {
"decoder_input_ids": decoder_input_ids,
"decoder_mask_ids": decoder_mask_token,
"decoder_attn_mask": token_mask,
"decoder_seg_ids": decoder_seg_ids,
"decoder_pos_ids": token_pos,
"past_key_values": history_states,
}
@staticmethod
def _reorder_cache(past, beam_idx):
"""
For beam search in huggingface generation mixin
"""
(pos_ids, token_mask, decoder_mask_token, decoder_seg, history_states,) = (
past[0],
past[1],
past[2],
past[3],
past[4:],
)
reordered_past = []
for layer_past in history_states:
reordered_past.append(_reorder_buffer(layer_past, beam_idx))
newpast = [
pos_ids,
token_mask,
decoder_mask_token,
decoder_seg,
] + reordered_past
return newpast
@staticmethod
def _reorder_cache_v2(past, batch_idx, beam_idx):
"""
For faster inference by optimized beam search in generation mixin v2
"""
(pos_ids, token_mask, decoder_mask_token, decoder_seg, history_states,) = (
past[0],
past[1],
past[2],
past[3],
past[4:],
)
reordered_past = []
for layer_past in history_states:
reordered_past.append(_reorder_buffer_v2(layer_past, batch_idx, beam_idx))
pos_ids = pos_ids[beam_idx]
token_mask = token_mask[beam_idx]
decoder_mask_token = decoder_mask_token[beam_idx]
decoder_seg = decoder_seg[beam_idx]
newpast = [
pos_ids,
token_mask,
decoder_mask_token,
decoder_seg,
] + reordered_past
return newpast
def forward(
self,
tokens_ids=None,
attn_mask=None,
seg_ids=None,
pos_ids=None,
decoder_input_ids=None,
decoder_pos_ids=None,
decoder_seg_ids=None,
decoder_attn_mask=None,
decoder_mask_ids=None,
past_key_values=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
"""
Args:
tokens_ids: tokens of encode text & decode
attn_mask: attention mask of tokens
seg_ids: token type ids
pos_ids: position ids
others: used in beam search
Returns: forward logits
"""
if self.training:
outputs = self.bert(
tokens_ids,
seg_ids,
attn_mask,
pos_ids,
)
logits = self.cls(outputs[0])
return logits
decoder_token = torch.cat([decoder_input_ids, decoder_mask_ids], dim=1)
decoder_len = decoder_token.size(1)
decoder_token = decoder_token[:, -2:]
decoder_mask = decoder_attn_mask[
:,
decoder_len - 2 : decoder_len,
: self.prefix_state["prefix_len"] + decoder_len,
]
decoder_pos = decoder_pos_ids[:, decoder_len - 2 : decoder_len]
outputs = self.bert(
decoder_token,
decoder_seg_ids,
decoder_mask,
decoder_pos,
history_states=past_key_values,
)
logits = self.cls(outputs[0])
state4cache = [
decoder_pos_ids,
decoder_attn_mask,
decoder_mask_ids,
decoder_seg_ids,
] + outputs[self.hist_index]
return Seq2SeqLMOutput(logits=logits, past_key_values=state4cache)
def generate(
self,
tokens_ids,
num_beams=5,
decoder_start_token_id=101,
decoder_end_token_id=102,
num_return_sequences=1,
min_gen_seq_length=0,
max_gen_seq_length=48,
repetition_penalty=1.0,
no_repeat_ngram_size=0,
early_stopping=True,
length_penalty=1.0,
num_beam_groups=1,
diversity_penalty=0.0,
diverse_rate=0.0,
do_sample=False,
temperature=1.0,
top_k=50,
top_p=1.0,
):
"""
Args:
tokens_ids: tokens of encode text
"""
self.num_beams = num_beams
prefix_token = tokens_ids
prefix_mask1 = tokens_ids.ne(self.config.pad_token_id).long()
batch_size, prefix_len = prefix_token.size()
total_seq_length = max_gen_seq_length + prefix_len + 1
prefix_mask = prefix_mask1[:, None, :].repeat(1, total_seq_length, 1)
new_mask = torch.zeros(batch_size, total_seq_length, max_gen_seq_length + 1).to(prefix_mask)
tri_mask = torch.ones(batch_size, total_seq_length, max_gen_seq_length + 1).to(prefix_mask)
new_mask[:, prefix_len:, :] = torch.tril(tri_mask[:, prefix_len:, :])
new_mask[:, :, 0] = 0
prefix_mask = torch.cat((prefix_mask, new_mask), dim=-1)
prefix_seg = torch.tensor([self.config.source_type_id] * prefix_len).to(prefix_token)
prefix_seg = prefix_seg[None, :].repeat(batch_size, 1)
prefix_pos0 = torch.ones(batch_size, max_gen_seq_length + 1).to(tokens_ids)
prefix_pos0[:, 0] = 0
prefix_pos = torch.cat((tokens_ids, prefix_pos0.to(tokens_ids)), dim=-1).ne(self.config.pad_token_id)
prefix_pos = torch.cumsum(prefix_pos, dim=-1) - 1
self.prefix_state = dict(
{
"prefix_len": prefix_len,
"prefix_token": prefix_token,
"prefix_seg": prefix_seg,
"prefix_mask": prefix_mask,
"prefix_pos": prefix_pos,
}
)
decoder_seg = (torch.ones(batch_size * self.num_beams, 1) * self.config.target_type_id).to(prefix_token)
decoder_seg[:, 0] = self.config.source_type_id
decoder_mask_token = torch.ones(batch_size * self.num_beams, 1).to(prefix_token) * self.config.mask_token_id
if decoder_start_token_id is not None:
self.config.bos_token_id = decoder_start_token_id
decoder_input_ids = torch.ones(batch_size, 1).to(prefix_token) * self.config.bos_token_id
outputs = super().generate(
decoder_input_ids,
max_length=max_gen_seq_length,
min_length=min_gen_seq_length,
num_beams=num_beams,
do_sample=do_sample,
decoder_start_token_id=decoder_start_token_id,
no_repeat_ngram_size=no_repeat_ngram_size,
early_stopping=early_stopping,
length_penalty=length_penalty,
repetition_penalty=repetition_penalty,
num_return_sequences=num_return_sequences,
bos_token_id=decoder_start_token_id,
eos_token_id=decoder_end_token_id,
num_beam_groups=num_beam_groups,
diversity_penalty=diversity_penalty,
temperature=temperature,
top_k=top_k,
top_p=top_p,
return_dict_in_generate=True,
output_scores=True,
)
sequences = outputs.sequences.reshape(-1, num_return_sequences, outputs.sequences.size(-1))
outputs.sequences = torch.zeros(sequences.size(0), num_return_sequences, max_gen_seq_length).to(
device=sequences.device
)
outputs.sequences[:, :, : sequences.size(-1)].copy_(sequences)
if num_return_sequences == 1:
outputs.sequences = outputs.sequences.reshape(-1, max_gen_seq_length)
return GenericOutputs(sequences=outputs.sequences, sequences_scores=outputs.sequences_scores) | 0.923117 | 0.390069 |
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.common.exceptions import TimeoutException
chrome_options = webdriver.ChromeOptions()
chrome_options.add_argument('--headless')
chrome_options.add_argument('--disable-dev-shm-usage')
import pandas as pd
import time
import csv
from yt_requests import yt_search, yt_comments
def comments_to_csv(query, API_KEY, publishedBefore, publishedAfter, maxResults=49, driver_path="C:/WebDriver/bin/chromedriver.exe", csv_path="./youtube_comments.csv", useAPI=True):
"""
Search YouTube video and comment info based on query search results and write data to a csv file.
If `useAPI` is set to False, `youcos` will scrape the comments for each video using Selenium.
Parameters
----------
query : string
The query to search for on YouTube
API_KEY : string
The API key to authenticate requests to YouTube Data API v3
maxResults : int, optional
The maximum number of videos to scrape
driver_path : string, optional
The browser path for Selenium (the default is "C:/WebDriver/bin/chromedriver.exe", which is the typical location for Chrome drivers)
csv_path : string, optional
The file path to save csv file (the default is "../comments.csv", which saves the file in the directory above the current one)
useAPI : boolean, optional
If False, `youcos` scrapes comments for each video using Selenium (the default is True, which makes `youcos` use YouTube v3 API to request comments)
"""
video_list = request_videos(query, API_KEY, publishedBefore, publishedAfter, maxResults=maxResults)
if (useAPI):
request_comments(video_list, API_KEY, csv_path)
else:
scrape_comments(video_list, driver_path, csv_path)
def request_videos(query, API_KEY, publishedBefore, publishedAfter, maxResults=49, driver_path="C:/WebDriver/bin/chromedriver.exe"):
"""
Search YouTube videos based on the given query and return a list of dictionaries containing url, title, and search query.
Parameters
----------
query : string
The query to search for on YouTube
API_KEY : string
The API key to authenticate requests to YouTube Data API v3
driver_path : string, optional
The browser path for Selenium (the default is "C:/WebDriver/bin/chromedriver.exe", which is the typical location for Chrome drivers)
Returns
----------
video_list : list of dict
The list of collected video data, each dictionary with the video's url, title, and search query
Notes
----------
For more info on YouTube v3 API, please visit https://developers.google.com/youtube/v3
"""
video_list = yt_search(query, API_KEY, publishedBefore, publishedAfter, maxResults)
# Check if there are no video results
if not video_list:
return
for video in video_list:
video['query'] = query
return video_list
def request_comments(video_list, API_KEY, csv_path="../comments.csv", as_df=False):
"""
Request comment data using the YouTube v3 API, then write video and comment data to a csv file or return as a Pandas DataFrame if
`as_df` is `True`
Parameters
----------
video_list : list of dict
The list of videos to fetch comments
API_KEY : string
The API key to authenticate requests to YouTube Data API v3
csv_path : string, optional
The location to save the csv file containing comments data
as_df : boolean, optional
If True, return data as a Pandas Dataframe (default is False).
"""
columns = ['query', 'url', 'title', 'upload_date', 'channel', 'views', 'likes', 'dislikes', 'comment_count', 'comment_text', 'comment_author', 'comment_date', 'comment_likes']
df = pd.DataFrame(columns=columns)
# If video list is empty, return empty
for video in video_list:
# Grab all comments for video
comments = yt_comments(video['id'], API_KEY)
# Skip video if comments are disabled
if not comments:
continue
for comment in comments:
youtube_dict = {}
# Write scraped data to csv file
youtube_dict['query'] = video['query']
youtube_dict['url'] = "https://www.youtube.com/watch?v=" + video['id']
youtube_dict['title'] = video['title']
youtube_dict['upload_date'] = video['date']
youtube_dict['channel'] = video['channel']
youtube_dict['views'] = video['views']
youtube_dict['likes'] = video['likes']
youtube_dict['dislikes'] = video['dislikes']
youtube_dict['comment_count'] = video['comment_count']
youtube_dict['comment_text'] = comment['text']
youtube_dict['comment_author'] = comment['author']
youtube_dict['comment_date'] = comment['date']
youtube_dict['comment_likes'] = comment['likes']
df = df.append(youtube_dict, ignore_index=True)
if as_df:
return df
df.to_csv(csv_path, encoding="UTF-8", index=False)
return
def scrape_comments(video_list, driver_path="C:/WebDriver/bin/chromedriver.exe", csv_path="../comments.csv"):
"""
Scrape YouTube video and comment info using Selenium, then write data to a csv file.
Parameters
----------
video_list : list of dict
The list of videos to scrape
driver_path : string, optional
The browser path for Selenium (the default is "C:/WebDriver/bin/chromedriver.exe", which is the typical location for Chrome drivers)
csv_path : string, optional
The location to save the csv file containing comments data
"""
csv_file = open(csv_path,'w', encoding="UTF-8", newline="")
writer = csv.writer(csv_file)
writer.writerow(['query', 'url', 'title', 'upload_date', 'channel', 'no_of_views', 'likes', 'dislikes', 'comment', 'author', 'comment_date', 'no_of_replies','upvotes'])
driver = webdriver.Chrome(executable_path=driver_path)
for video in video_list:
url = video['url']
title = video['title']
upload_date = video['date']
query = video['query']
# Scrape basic video data
print("=" * 40)
print("video title : ", title)
driver.get(url)
v_channel = WebDriverWait(driver, 10).until(EC.presence_of_element_located((By.CSS_SELECTOR,"div#upload-info yt-formatted-string"))).text
print("channel : ",v_channel)
v_views = WebDriverWait(driver, 10).until(EC.presence_of_element_located((By.CSS_SELECTOR,"div#count span.view-count"))).text
print("no. of views : ",v_views)
v_timeUploaded = WebDriverWait(driver, 10).until(EC.presence_of_element_located((By.CSS_SELECTOR,"div#date yt-formatted-string"))).text
print("time uploaded : ",v_timeUploaded)
w = WebDriverWait(driver, 10).until(EC.presence_of_element_located((By.CSS_SELECTOR,"div#top-level-buttons yt-formatted-string")))
w = driver.find_elements_by_css_selector("div#top-level-buttons yt-formatted-string")
v_likes = w[0].text
v_dislikes = w[1].text
print("video has ", v_likes, "likes and ", v_dislikes, " dislikes")
youtube_dict ={}
print("+" * 40)
print("Scraping child links ")
# Load comments section
driver.execute_script('window.scrollTo(0,390);')
time.sleep(2)
try:
# Sort by top comments
print("sorting by top comments")
sort= WebDriverWait(driver, 10).until(EC.presence_of_element_located((By.CSS_SELECTOR,"div#icon-label")))
sort.click()
topcomments =driver.find_element_by_xpath("""//*[@id="menu"]/a[1]/paper-item/paper-item-body/div[1]""")
topcomments.click()
# Loads more comments
for i in range(0,5):
driver.execute_script("window.scrollTo(0,Math.max(document.documentElement.scrollHeight,document.body.scrollHeight,document.documentElement.clientHeight))")
print("scrolling to load more comments")
time.sleep(4)
# Count total number of comments and set index to number of comments if less than 50 otherwise set as 50.
totalcomments= len(driver.find_elements_by_xpath("""//*[@id="content-text"]"""))
if totalcomments < 100:
index= totalcomments
else:
index= 100
# Loop through each comment and scrape info
print("scraping through comments")
ccount = 0
while ccount < index:
try:
comment = driver.find_elements_by_xpath('//*[@id="content-text"]')[ccount].text
except:
comment = ""
try:
authors = driver.find_elements_by_xpath('//a[@id="author-text"]/span')[ccount].text
except:
authors = ""
try:
comment_date = driver.find_elements_by_xpath('//*[@id="published-time-text"]/a')[ccount].text
except:
comment_date = ""
try:
replies = driver.find_elements_by_xpath('//*[@id="more-text"]')[ccount].text
if replies =="View reply":
replies= 1
else:
replies =replies.replace("View ","")
replies =replies.replace(" replies","")
except:
replies = ""
try:
upvotes = str(driver.find_elements_by_xpath('//*[@id="vote-count-middle"]')[ccount].text)
except:
upvotes = ""
# Write scraped data to csv file
youtube_dict['query'] = query
youtube_dict['url'] = url
youtube_dict['title'] = title
youtube_dict['upload_date'] = upload_date
youtube_dict['channel'] = v_channel
youtube_dict['no_of_views'] = v_views
youtube_dict['likes'] = v_likes
youtube_dict['dislikes'] = v_dislikes
youtube_dict['comment'] = comment
youtube_dict['author'] = authors
youtube_dict['comment_date'] = comment_date
youtube_dict['no_of_replies'] = replies
youtube_dict['upvotes'] = upvotes
writer.writerow(youtube_dict.values())
ccount = ccount + 1
# If video errors out, move onto the next one
except TimeoutException as e:
print(title, " errored out: ",str(e))
print("moving onto next video") | youcos/scrape_youtube.py | from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.common.exceptions import TimeoutException
chrome_options = webdriver.ChromeOptions()
chrome_options.add_argument('--headless')
chrome_options.add_argument('--disable-dev-shm-usage')
import pandas as pd
import time
import csv
from yt_requests import yt_search, yt_comments
def comments_to_csv(query, API_KEY, publishedBefore, publishedAfter, maxResults=49, driver_path="C:/WebDriver/bin/chromedriver.exe", csv_path="./youtube_comments.csv", useAPI=True):
"""
Search YouTube video and comment info based on query search results and write data to a csv file.
If `useAPI` is set to False, `youcos` will scrape the comments for each video using Selenium.
Parameters
----------
query : string
The query to search for on YouTube
API_KEY : string
The API key to authenticate requests to YouTube Data API v3
maxResults : int, optional
The maximum number of videos to scrape
driver_path : string, optional
The browser path for Selenium (the default is "C:/WebDriver/bin/chromedriver.exe", which is the typical location for Chrome drivers)
csv_path : string, optional
The file path to save csv file (the default is "../comments.csv", which saves the file in the directory above the current one)
useAPI : boolean, optional
If False, `youcos` scrapes comments for each video using Selenium (the default is True, which makes `youcos` use YouTube v3 API to request comments)
"""
video_list = request_videos(query, API_KEY, publishedBefore, publishedAfter, maxResults=maxResults)
if (useAPI):
request_comments(video_list, API_KEY, csv_path)
else:
scrape_comments(video_list, driver_path, csv_path)
def request_videos(query, API_KEY, publishedBefore, publishedAfter, maxResults=49, driver_path="C:/WebDriver/bin/chromedriver.exe"):
"""
Search YouTube videos based on the given query and return a list of dictionaries containing url, title, and search query.
Parameters
----------
query : string
The query to search for on YouTube
API_KEY : string
The API key to authenticate requests to YouTube Data API v3
driver_path : string, optional
The browser path for Selenium (the default is "C:/WebDriver/bin/chromedriver.exe", which is the typical location for Chrome drivers)
Returns
----------
video_list : list of dict
The list of collected video data, each dictionary with the video's url, title, and search query
Notes
----------
For more info on YouTube v3 API, please visit https://developers.google.com/youtube/v3
"""
video_list = yt_search(query, API_KEY, publishedBefore, publishedAfter, maxResults)
# Check if there are no video results
if not video_list:
return
for video in video_list:
video['query'] = query
return video_list
def request_comments(video_list, API_KEY, csv_path="../comments.csv", as_df=False):
"""
Request comment data using the YouTube v3 API, then write video and comment data to a csv file or return as a Pandas DataFrame if
`as_df` is `True`
Parameters
----------
video_list : list of dict
The list of videos to fetch comments
API_KEY : string
The API key to authenticate requests to YouTube Data API v3
csv_path : string, optional
The location to save the csv file containing comments data
as_df : boolean, optional
If True, return data as a Pandas Dataframe (default is False).
"""
columns = ['query', 'url', 'title', 'upload_date', 'channel', 'views', 'likes', 'dislikes', 'comment_count', 'comment_text', 'comment_author', 'comment_date', 'comment_likes']
df = pd.DataFrame(columns=columns)
# If video list is empty, return empty
for video in video_list:
# Grab all comments for video
comments = yt_comments(video['id'], API_KEY)
# Skip video if comments are disabled
if not comments:
continue
for comment in comments:
youtube_dict = {}
# Write scraped data to csv file
youtube_dict['query'] = video['query']
youtube_dict['url'] = "https://www.youtube.com/watch?v=" + video['id']
youtube_dict['title'] = video['title']
youtube_dict['upload_date'] = video['date']
youtube_dict['channel'] = video['channel']
youtube_dict['views'] = video['views']
youtube_dict['likes'] = video['likes']
youtube_dict['dislikes'] = video['dislikes']
youtube_dict['comment_count'] = video['comment_count']
youtube_dict['comment_text'] = comment['text']
youtube_dict['comment_author'] = comment['author']
youtube_dict['comment_date'] = comment['date']
youtube_dict['comment_likes'] = comment['likes']
df = df.append(youtube_dict, ignore_index=True)
if as_df:
return df
df.to_csv(csv_path, encoding="UTF-8", index=False)
return
def scrape_comments(video_list, driver_path="C:/WebDriver/bin/chromedriver.exe", csv_path="../comments.csv"):
"""
Scrape YouTube video and comment info using Selenium, then write data to a csv file.
Parameters
----------
video_list : list of dict
The list of videos to scrape
driver_path : string, optional
The browser path for Selenium (the default is "C:/WebDriver/bin/chromedriver.exe", which is the typical location for Chrome drivers)
csv_path : string, optional
The location to save the csv file containing comments data
"""
csv_file = open(csv_path,'w', encoding="UTF-8", newline="")
writer = csv.writer(csv_file)
writer.writerow(['query', 'url', 'title', 'upload_date', 'channel', 'no_of_views', 'likes', 'dislikes', 'comment', 'author', 'comment_date', 'no_of_replies','upvotes'])
driver = webdriver.Chrome(executable_path=driver_path)
for video in video_list:
url = video['url']
title = video['title']
upload_date = video['date']
query = video['query']
# Scrape basic video data
print("=" * 40)
print("video title : ", title)
driver.get(url)
v_channel = WebDriverWait(driver, 10).until(EC.presence_of_element_located((By.CSS_SELECTOR,"div#upload-info yt-formatted-string"))).text
print("channel : ",v_channel)
v_views = WebDriverWait(driver, 10).until(EC.presence_of_element_located((By.CSS_SELECTOR,"div#count span.view-count"))).text
print("no. of views : ",v_views)
v_timeUploaded = WebDriverWait(driver, 10).until(EC.presence_of_element_located((By.CSS_SELECTOR,"div#date yt-formatted-string"))).text
print("time uploaded : ",v_timeUploaded)
w = WebDriverWait(driver, 10).until(EC.presence_of_element_located((By.CSS_SELECTOR,"div#top-level-buttons yt-formatted-string")))
w = driver.find_elements_by_css_selector("div#top-level-buttons yt-formatted-string")
v_likes = w[0].text
v_dislikes = w[1].text
print("video has ", v_likes, "likes and ", v_dislikes, " dislikes")
youtube_dict ={}
print("+" * 40)
print("Scraping child links ")
# Load comments section
driver.execute_script('window.scrollTo(0,390);')
time.sleep(2)
try:
# Sort by top comments
print("sorting by top comments")
sort= WebDriverWait(driver, 10).until(EC.presence_of_element_located((By.CSS_SELECTOR,"div#icon-label")))
sort.click()
topcomments =driver.find_element_by_xpath("""//*[@id="menu"]/a[1]/paper-item/paper-item-body/div[1]""")
topcomments.click()
# Loads more comments
for i in range(0,5):
driver.execute_script("window.scrollTo(0,Math.max(document.documentElement.scrollHeight,document.body.scrollHeight,document.documentElement.clientHeight))")
print("scrolling to load more comments")
time.sleep(4)
# Count total number of comments and set index to number of comments if less than 50 otherwise set as 50.
totalcomments= len(driver.find_elements_by_xpath("""//*[@id="content-text"]"""))
if totalcomments < 100:
index= totalcomments
else:
index= 100
# Loop through each comment and scrape info
print("scraping through comments")
ccount = 0
while ccount < index:
try:
comment = driver.find_elements_by_xpath('//*[@id="content-text"]')[ccount].text
except:
comment = ""
try:
authors = driver.find_elements_by_xpath('//a[@id="author-text"]/span')[ccount].text
except:
authors = ""
try:
comment_date = driver.find_elements_by_xpath('//*[@id="published-time-text"]/a')[ccount].text
except:
comment_date = ""
try:
replies = driver.find_elements_by_xpath('//*[@id="more-text"]')[ccount].text
if replies =="View reply":
replies= 1
else:
replies =replies.replace("View ","")
replies =replies.replace(" replies","")
except:
replies = ""
try:
upvotes = str(driver.find_elements_by_xpath('//*[@id="vote-count-middle"]')[ccount].text)
except:
upvotes = ""
# Write scraped data to csv file
youtube_dict['query'] = query
youtube_dict['url'] = url
youtube_dict['title'] = title
youtube_dict['upload_date'] = upload_date
youtube_dict['channel'] = v_channel
youtube_dict['no_of_views'] = v_views
youtube_dict['likes'] = v_likes
youtube_dict['dislikes'] = v_dislikes
youtube_dict['comment'] = comment
youtube_dict['author'] = authors
youtube_dict['comment_date'] = comment_date
youtube_dict['no_of_replies'] = replies
youtube_dict['upvotes'] = upvotes
writer.writerow(youtube_dict.values())
ccount = ccount + 1
# If video errors out, move onto the next one
except TimeoutException as e:
print(title, " errored out: ",str(e))
print("moving onto next video") | 0.736401 | 0.213603 |
import numpy as np
import matplotlib.pyplot as plt
from gdal import Open as OpenGdal
#-------------------------------------------------------------------------------
fn0 = '../data/SVDNB_npp_20150101-20151231_75N060W_{}_v10_c201701311200.avg_rade9.tif'
# vcm - viirs cloud mask
# vcm-orm = outlier removed
# vcm-ntl = background (non-lights) removed
# vcm-orm-ntl = both
gd = OpenGdal(fn0.format('vcm-orm-ntl'))
print gd.RasterXSize, gd.RasterYSize
gt = gd.GetGeoTransform()
#-------------------------------------------------------------------------------
xy2geo = lambda g,x,y: (g[3] + g[5] * y, g[0] + g[1] * x)
geo2xy = lambda g,f,l: ((l - g[0]) / g[1], (f - g[3]) / g[5])
#-------------------------------------------------------------------------------
# geo0 = 51.11, 17.03 # wroclaw
# geo0 = 52.22, 21.01 # warszawa
# geo0 = 50.816, 15.383 # Orle
# geo0 = 50.846015, 16.698650 # tapadla
geo0 = 50.995681, 16.901729
geor = 0.6
f0, l0 = geo0[0] + geor, geo0[1] - geor
f1, l1 = geo0[0] - geor, geo0[1] + geor
#-------------------------------------------------------------------------------
x0,y0 = geo2xy(gt, f0, l0)
x1,y1 = geo2xy(gt, f1, l1)
x0,y0,x1,y1 = [ int(round(x)) for x in (x0,y0,x1,y1) ]
print x0, y0
print x1, y1
print x1-x0, y1-y0
#-------------------------------------------------------------------------------
fig, axes = plt.subplots(1, 2, figsize = (12,6))
#-------------------------------------------------------------------------------
map_i = np.ndarray((x1-x0, y1-y0), order = 'F', dtype = np.float32)
print map_i.flags.f_contiguous
gd.ReadAsArray(x0, y0, x1-x0, y1-y0, buf_obj = map_i)
axes[0].imshow(np.sqrt(map_i),
extent = [l0,l1,f1,f0],
interpolation = 'none', cmap = 'hot')
#-------------------------------------------------------------------------------
gd1 = OpenGdal('../data/eudem_dem_5deg_n50e015.tif')
gt1 = gd1.GetGeoTransform()
x0,y0 = geo2xy(gt1, f0, l0)
x1,y1 = geo2xy(gt1, f1, l1)
x0,y0,x1,y1 = [ int(round(x)) for x in (x0,y0,x1,y1) ]
#-------------------------------------------------------------------------------
map_h = np.ndarray((x1-x0, y1-y0), order = 'F', dtype = np.float32)
gd1.ReadAsArray(x0, y0, x1-x0, y1-y0, buf_obj = map_h)
print map_h.flags.f_contiguous
axes[1].imshow(map_h,
extent = [l0,l1,f1,f0],
interpolation = 'none', cmap = 'BrBG_r')
#-------------------------------------------------------------------------------
plt.show() | sketches/examplemaps.py | import numpy as np
import matplotlib.pyplot as plt
from gdal import Open as OpenGdal
#-------------------------------------------------------------------------------
fn0 = '../data/SVDNB_npp_20150101-20151231_75N060W_{}_v10_c201701311200.avg_rade9.tif'
# vcm - viirs cloud mask
# vcm-orm = outlier removed
# vcm-ntl = background (non-lights) removed
# vcm-orm-ntl = both
gd = OpenGdal(fn0.format('vcm-orm-ntl'))
print gd.RasterXSize, gd.RasterYSize
gt = gd.GetGeoTransform()
#-------------------------------------------------------------------------------
xy2geo = lambda g,x,y: (g[3] + g[5] * y, g[0] + g[1] * x)
geo2xy = lambda g,f,l: ((l - g[0]) / g[1], (f - g[3]) / g[5])
#-------------------------------------------------------------------------------
# geo0 = 51.11, 17.03 # wroclaw
# geo0 = 52.22, 21.01 # warszawa
# geo0 = 50.816, 15.383 # Orle
# geo0 = 50.846015, 16.698650 # tapadla
geo0 = 50.995681, 16.901729
geor = 0.6
f0, l0 = geo0[0] + geor, geo0[1] - geor
f1, l1 = geo0[0] - geor, geo0[1] + geor
#-------------------------------------------------------------------------------
x0,y0 = geo2xy(gt, f0, l0)
x1,y1 = geo2xy(gt, f1, l1)
x0,y0,x1,y1 = [ int(round(x)) for x in (x0,y0,x1,y1) ]
print x0, y0
print x1, y1
print x1-x0, y1-y0
#-------------------------------------------------------------------------------
fig, axes = plt.subplots(1, 2, figsize = (12,6))
#-------------------------------------------------------------------------------
map_i = np.ndarray((x1-x0, y1-y0), order = 'F', dtype = np.float32)
print map_i.flags.f_contiguous
gd.ReadAsArray(x0, y0, x1-x0, y1-y0, buf_obj = map_i)
axes[0].imshow(np.sqrt(map_i),
extent = [l0,l1,f1,f0],
interpolation = 'none', cmap = 'hot')
#-------------------------------------------------------------------------------
gd1 = OpenGdal('../data/eudem_dem_5deg_n50e015.tif')
gt1 = gd1.GetGeoTransform()
x0,y0 = geo2xy(gt1, f0, l0)
x1,y1 = geo2xy(gt1, f1, l1)
x0,y0,x1,y1 = [ int(round(x)) for x in (x0,y0,x1,y1) ]
#-------------------------------------------------------------------------------
map_h = np.ndarray((x1-x0, y1-y0), order = 'F', dtype = np.float32)
gd1.ReadAsArray(x0, y0, x1-x0, y1-y0, buf_obj = map_h)
print map_h.flags.f_contiguous
axes[1].imshow(map_h,
extent = [l0,l1,f1,f0],
interpolation = 'none', cmap = 'BrBG_r')
#-------------------------------------------------------------------------------
plt.show() | 0.245447 | 0.28245 |
from requests import Session
if __name__ == '__main__':
s = Session()
s.trust_env = False
resp = s.get('http://127.0.0.1:8008/')
print(resp.text)
resp = s.get('http://127.0.0.1:8008/')
print(resp.text)
resp = s.get('http://127.0.0.1:8008/')
print(resp.text)
resp = s.post('http://127.0.0.1:8008/', json={"networkid": 1, "publickey": "0x01000000001", "partners": 4,
"onode": "onode://d2429ee@192.168.3.11:8001"})
print(resp.text)
resp = s.post('http://127.0.0.1:8008/', json={"networkid": 1, "publickey": "0x01000000002", "partners": 4,
"onode": "onode://d2429ee@192.168.3.11:8001"})
print(resp.text)
resp = s.post('http://127.0.0.1:8008/', json={"networkid": 1, "publickey": "0x01000000003", "partners": 4,
"onode": "onode://d2429ee@192.168.3.11:8001"})
print(resp.text)
resp = s.post('http://127.0.0.1:8008/', json={"networkid": 1, "publickey": "0x01000000004", "partners": 4,
"onode": "onode://d2429ee@192.168.3.11:8001"})
print(resp.text)
resp = s.post('http://127.0.0.1:8008/', json={"networkid": 1, "publickey": "0x01000000001", "partners": 4,
"onode": "onode://d2429ee@192.168.3.11:8001"})
print(resp.text)
resp = s.post('http://127.0.0.1:8008/', json={"networkid": 1, "publickey": "0x01000000002", "partners": 4,
"onode": "onode://d2429ee@192.168.3.11:8001"})
print(resp.text)
resp = s.post('http://127.0.0.1:8008/', json={"networkid": 1, "publickey": "0x01000000003", "partners": 4,
"onode": "onode://d2429ee@192.168.3.11:8001"})
print(resp.text)
resp = s.post('http://127.0.0.1:8008/', json={"networkid": 1, "publickey": "0x01000000004", "partners": 4,
"onode": "onode://d2429ee@192.168.3.11:8001"})
print(resp.text) | scripts/boostrap_server/run_test.py | from requests import Session
if __name__ == '__main__':
s = Session()
s.trust_env = False
resp = s.get('http://127.0.0.1:8008/')
print(resp.text)
resp = s.get('http://127.0.0.1:8008/')
print(resp.text)
resp = s.get('http://127.0.0.1:8008/')
print(resp.text)
resp = s.post('http://127.0.0.1:8008/', json={"networkid": 1, "publickey": "0x01000000001", "partners": 4,
"onode": "onode://d2429ee@192.168.3.11:8001"})
print(resp.text)
resp = s.post('http://127.0.0.1:8008/', json={"networkid": 1, "publickey": "0x01000000002", "partners": 4,
"onode": "onode://d2429ee@192.168.3.11:8001"})
print(resp.text)
resp = s.post('http://127.0.0.1:8008/', json={"networkid": 1, "publickey": "0x01000000003", "partners": 4,
"onode": "onode://d2429ee@192.168.3.11:8001"})
print(resp.text)
resp = s.post('http://127.0.0.1:8008/', json={"networkid": 1, "publickey": "0x01000000004", "partners": 4,
"onode": "onode://d2429ee@192.168.3.11:8001"})
print(resp.text)
resp = s.post('http://127.0.0.1:8008/', json={"networkid": 1, "publickey": "0x01000000001", "partners": 4,
"onode": "onode://d2429ee@192.168.3.11:8001"})
print(resp.text)
resp = s.post('http://127.0.0.1:8008/', json={"networkid": 1, "publickey": "0x01000000002", "partners": 4,
"onode": "onode://d2429ee@192.168.3.11:8001"})
print(resp.text)
resp = s.post('http://127.0.0.1:8008/', json={"networkid": 1, "publickey": "0x01000000003", "partners": 4,
"onode": "onode://d2429ee@192.168.3.11:8001"})
print(resp.text)
resp = s.post('http://127.0.0.1:8008/', json={"networkid": 1, "publickey": "0x01000000004", "partners": 4,
"onode": "onode://d2429ee@192.168.3.11:8001"})
print(resp.text) | 0.278355 | 0.135747 |
import re
from ply.lex import lex
from errors import error
tokens = [
'ARROW', 'ASSIGN', 'COLON', 'COMMA', 'CONST', 'DEF', 'DIVIDE', 'ELSE', 'EQ',
'FALSE', 'FLOAT', 'FOR', 'FOREIGN', 'GE', 'GT', 'ID', 'IF', 'IN', 'INTEGER',
'LAND', 'LBRACE', 'LBRACKET', 'LE', 'LNOT', 'LOR', 'LPAREN', 'LT', 'MINUS',
'NE', 'PLUS', 'PRINT', 'RANGE', 'RBRACE', 'RBRACKET', 'RETURN', 'RPAREN',
'SEMI', 'STRING', 'TIMES', 'TRUE', 'VAR', 'WHILE'
]
reserved = {
'False',
'True',
# --
'const',
'def',
'else',
'for',
'foreign',
'if',
'in',
'print',
'range',
'return',
'var',
'while'
}
_escapes_re = r'(\\b[0-9a-fA-F]{2})|(\\.)'
_escape_map = {
r'\n' : '\n', # newline
r'\t' : '\t', # tab
r'\r' : '\r', # carriage return
r'\\' : '\\', # backslash
r'\"' : '"', # quote
}
_escape_pat = re.compile(_escapes_re)
t_ignore = ' \t\r'
t_TRUE = r'True'
t_FALSE = r'False'
t_PLUS = r'\+'
t_MINUS = r'-'
t_TIMES = r'\*'
t_DIVIDE = r'/'
t_ASSIGN = r'='
t_SEMI = r'\;'
t_LPAREN = r'\('
t_RPAREN = r'\)'
t_COMMA = r','
t_COLON = r':'
t_ARROW = r'->'
t_LT = r'<'
t_GT = r'>'
t_LE = r'<='
t_GE = r'>='
t_EQ = r'=='
t_NE = r'!='
t_LAND = r'&&'
t_LOR = r'\|\|'
t_LNOT = r'!'
t_LBRACKET = r'\['
t_RBRACKET = r'\]'
t_LBRACE = r'\{'
t_RBRACE = r'\}'
def t_FLOAT(t):
r'(([0-9]+(\.[0-9]*)?[eE][\+-]?[0-9]+)|(\.[0-9]+([eE][\+-]?[0-9]+)?)|([0-9]+\.[0-9]*))'
t.value = float(t.value)
return t
def t_INTEGER(t):
r'(0|0x|0X)?\d+'
if t.value.startswith(('0x','0X')):
t.value = int(t.value,16)
elif t.value.startswith('0'):
t.value = int(t.value,8)
else:
t.value = int(t.value)
return t
def t_STRING(t):
r'\"((\\.)|[^\\\n])*?\"'
t.value = t.value[1:-1]
_escape_token(t)
return t
def t_ID(t):
r'[a-zA-Z_][a-zA-Z0-9_]*'
if t.value in reserved:
t.type = t.value.upper()
return t
#------------------------------------------------------------------------
def t_newline(t):
r'\n+'
t.lexer.lineno += len(t.value)
def t_COMMENT(t):
r'\#.*'
t.lexer.lineno += t.value.count('\n')
def t_error(t):
error(t.lexer.lineno,"Illegal character %r" % t.value[0])
t.lexer.skip(1)
#------------------------------------------------------------------------
# String Escaping
#------------------------------------------------------------------------
class Unescaped(Exception): pass
def escape_token(m):
escape_code = m.group()
if escape_code[0:2] == '\\b' and len(escape_code) == 4:
return chr(int(escape_code[2:],16))
if escape_code in _escape_map:
return _escape_map[escape_code]
else:
raise Unescaped, escape_code
def _escape_token(t):
try:
t.value = _escape_pat.sub(escape_token, t.value)
except Unescaped as e:
escape_code = e.args[0]
error(t.lexer.lineno,"Syntax Error: Unescaped sequence '%s'" % escape_code)
return escape_code
#------------------------------------------------------------------------
# Toplevel
#------------------------------------------------------------------------
def make_lexer():
'''
Utility function for making the lexer object
'''
return lex()
#------------------------------------------------------------------------
# --ddump-lex
#------------------------------------------------------------------------
def ddump_lex(source):
import sys
import errors
lexer = make_lexer()
lexer.input(source)
with errors.listen():
for tok in iter(lexer.token,None):
sys.stdout.write("%s\n" % tok)
#------------------------------------------------------------------------
if __name__ == '__main__':
import sys
if len(sys.argv) != 2:
sys.stderr.write("Usage: %s filename\n" % sys.argv[0])
raise SystemExit(1)
source = open(sys.argv[1]).read()
ddump_lex(source) | blaze/blir/lexer.py | import re
from ply.lex import lex
from errors import error
tokens = [
'ARROW', 'ASSIGN', 'COLON', 'COMMA', 'CONST', 'DEF', 'DIVIDE', 'ELSE', 'EQ',
'FALSE', 'FLOAT', 'FOR', 'FOREIGN', 'GE', 'GT', 'ID', 'IF', 'IN', 'INTEGER',
'LAND', 'LBRACE', 'LBRACKET', 'LE', 'LNOT', 'LOR', 'LPAREN', 'LT', 'MINUS',
'NE', 'PLUS', 'PRINT', 'RANGE', 'RBRACE', 'RBRACKET', 'RETURN', 'RPAREN',
'SEMI', 'STRING', 'TIMES', 'TRUE', 'VAR', 'WHILE'
]
reserved = {
'False',
'True',
# --
'const',
'def',
'else',
'for',
'foreign',
'if',
'in',
'print',
'range',
'return',
'var',
'while'
}
_escapes_re = r'(\\b[0-9a-fA-F]{2})|(\\.)'
_escape_map = {
r'\n' : '\n', # newline
r'\t' : '\t', # tab
r'\r' : '\r', # carriage return
r'\\' : '\\', # backslash
r'\"' : '"', # quote
}
_escape_pat = re.compile(_escapes_re)
t_ignore = ' \t\r'
t_TRUE = r'True'
t_FALSE = r'False'
t_PLUS = r'\+'
t_MINUS = r'-'
t_TIMES = r'\*'
t_DIVIDE = r'/'
t_ASSIGN = r'='
t_SEMI = r'\;'
t_LPAREN = r'\('
t_RPAREN = r'\)'
t_COMMA = r','
t_COLON = r':'
t_ARROW = r'->'
t_LT = r'<'
t_GT = r'>'
t_LE = r'<='
t_GE = r'>='
t_EQ = r'=='
t_NE = r'!='
t_LAND = r'&&'
t_LOR = r'\|\|'
t_LNOT = r'!'
t_LBRACKET = r'\['
t_RBRACKET = r'\]'
t_LBRACE = r'\{'
t_RBRACE = r'\}'
def t_FLOAT(t):
r'(([0-9]+(\.[0-9]*)?[eE][\+-]?[0-9]+)|(\.[0-9]+([eE][\+-]?[0-9]+)?)|([0-9]+\.[0-9]*))'
t.value = float(t.value)
return t
def t_INTEGER(t):
r'(0|0x|0X)?\d+'
if t.value.startswith(('0x','0X')):
t.value = int(t.value,16)
elif t.value.startswith('0'):
t.value = int(t.value,8)
else:
t.value = int(t.value)
return t
def t_STRING(t):
r'\"((\\.)|[^\\\n])*?\"'
t.value = t.value[1:-1]
_escape_token(t)
return t
def t_ID(t):
r'[a-zA-Z_][a-zA-Z0-9_]*'
if t.value in reserved:
t.type = t.value.upper()
return t
#------------------------------------------------------------------------
def t_newline(t):
r'\n+'
t.lexer.lineno += len(t.value)
def t_COMMENT(t):
r'\#.*'
t.lexer.lineno += t.value.count('\n')
def t_error(t):
error(t.lexer.lineno,"Illegal character %r" % t.value[0])
t.lexer.skip(1)
#------------------------------------------------------------------------
# String Escaping
#------------------------------------------------------------------------
class Unescaped(Exception): pass
def escape_token(m):
escape_code = m.group()
if escape_code[0:2] == '\\b' and len(escape_code) == 4:
return chr(int(escape_code[2:],16))
if escape_code in _escape_map:
return _escape_map[escape_code]
else:
raise Unescaped, escape_code
def _escape_token(t):
try:
t.value = _escape_pat.sub(escape_token, t.value)
except Unescaped as e:
escape_code = e.args[0]
error(t.lexer.lineno,"Syntax Error: Unescaped sequence '%s'" % escape_code)
return escape_code
#------------------------------------------------------------------------
# Toplevel
#------------------------------------------------------------------------
def make_lexer():
'''
Utility function for making the lexer object
'''
return lex()
#------------------------------------------------------------------------
# --ddump-lex
#------------------------------------------------------------------------
def ddump_lex(source):
import sys
import errors
lexer = make_lexer()
lexer.input(source)
with errors.listen():
for tok in iter(lexer.token,None):
sys.stdout.write("%s\n" % tok)
#------------------------------------------------------------------------
if __name__ == '__main__':
import sys
if len(sys.argv) != 2:
sys.stderr.write("Usage: %s filename\n" % sys.argv[0])
raise SystemExit(1)
source = open(sys.argv[1]).read()
ddump_lex(source) | 0.243193 | 0.219986 |
import torch
import torch.nn as nn
import torch.nn.functional as F
import math
import numpy as np
from scipy import signal
from scipy import linalg as la
from src.models.sequence.rnns.cells.basic import RNNCell
from src.models.nn.components import LinearActivation, Activation # , get_initializer
from src.models.nn.gate import Gate
forward_aliases = ['euler', 'forward_euler', 'forward', 'forward_diff']
backward_aliases = ['backward', 'backward_diff', 'backward_euler']
bilinear_aliases = ['bilinear', 'tustin', 'trapezoidal', 'trapezoid']
zoh_aliases = ['zoh']
class MemoryCell(RNNCell):
""" This class handles the general architectural wiring of the HiPPO-RNN, in particular the interaction between the hidden state and the linear memory state.
Specific variants can be instantiated by subclassing this with an appropriately defined update_memory() method.
"""
name = None
valid_keys = ['<KEY> 'uh', 'um', 'hxm', 'hx', 'hm', 'hh', 'bias', ]
@property
def default_initializers(self):
return {
'uxh': 'uniform',
'hxm': 'xavier',
'um': 'zero',
'hh': 'xavier',
}
@property
def default_architecture(self):
return {
'ux': True,
'hx': True,
'hm': True,
'hh': False,
'bias': True,
}
def __init__(
self, d_input, d_model, memory_size, memory_order,
memory_activation='id',
gate='G', # 'N' | 'G' | UR'
**kwargs
):
self.memory_size = memory_size
self.memory_order = memory_order
self.memory_activation = memory_activation
self.gate = gate
super(MemoryCell, self).__init__(d_input, d_model, **kwargs)
self.input_to_d_model = self.d_input if self.architecture['hx'] else 0
self.input_to_memory_size = self.d_input if self.architecture['ux'] else 0
# Hidden to memory
self.W_uxh = LinearActivation(
self.input_to_memory_size + self.d_model,
self.memory_size,
bias=self.architecture['bias'],
initializer=self.initializers['uxh'],
activation=self.memory_activation,
activate=True,
)
self.memory_to_d_model = self.memory_size * self.memory_order if self.architecture['hm'] else 0
# Memory to hidden
self.W_hxm = LinearActivation(
self.input_to_d_model + self.memory_to_d_model,
self.d_model,
self.architecture['bias'],
initializer=self.initializers['hxm'],
activation=self.hidden_activation,
activate=False,
)
if self.architecture['hh']:
self.reset_hidden_to_hidden()
else:
self.W_hh = None
# Construct gate with options
if self.gate is not None:
preact_ctor = LinearActivation
preact_args = [
self.input_to_d_model + self.memory_to_d_model,
self.d_model,
self.architecture['bias'],
]
if self.architecture['hh']:
print("input to hidden size, memory to hidden size, hidden size:", self.input_to_d_model, self.memory_to_d_model, self.d_model)
preact_args[0] += self.d_model
self.W_gxm = Gate(self.d_model, preact_ctor, preact_args, mechanism=self.gate)
def reset_parameters(self):
# super().reset_parameters() # TODO find a way to refactor to call super()
self.activate = Activation(self.hidden_activation, self.d_model)
def forward(self, input, state):
h, m, time_step = state
# Update the memory
u = self.forward_memory(input, h, m)
m = self.update_memory(m, u, time_step) # (batch, memory_size, memory_order)
# Update hidden
h = self.forward_hidden(input, h, m)
next_state = (h, m, time_step + 1)
output = self.state_to_tensor(next_state)
return output, next_state
def forward_memory(self, input, h, m):
""" First part of forward pass to construct the memory state update """
input_to_memory = input if self.architecture['ux'] else input.new_empty((0,))
xh = torch.cat((input_to_memory, h), dim=-1)
# Construct the update features
u = self.W_uxh(xh) # (batch, memory_size)
return u
def forward_hidden(self, input, h, m):
input_to_hidden = input if self.architecture['hx'] else input.new_empty((0,))
# Update hidden state from memory
memory_to_hidden = m.view(input.shape[0], self.memory_size*self.memory_order)
xm = torch.cat((input_to_hidden, memory_to_hidden), dim=-1)
hidden_preact = self.W_hxm(xm)
if self.architecture['hh']:
hidden_preact = hidden_preact + self.W_hh(h)
hidden = self.activate(hidden_preact)
# Construct gate if necessary
if self.gate is None:
h = hidden
else:
if self.architecture['hh']:
xm = torch.cat((xm, h), dim=-1)
g = self.W_gxm(xm)
h = (1.-g) * h + g * hidden
return h
def update_memory(self, m, u, time_step):
"""
m: (B, M, N) [batch size, memory size, memory order]
u: (B, M)
Output: (B, M, N)
"""
raise NotImplementedError
def default_state(self, *batch_shape, device=None):
return (
torch.zeros(*batch_shape, self.d_model, device=device, requires_grad=False),
torch.zeros(*batch_shape, self.memory_size, self.memory_order, device=device, requires_grad=False),
0,
)
@property
def state_to_tensor(self):
""" Converts a state into a single output (tensor) """
def fn(state):
h, m, time_step = state
return h
return fn
@property
def d_state(self):
return self.d_model
@property
def d_output(self):
return self.d_model
class LTICell(MemoryCell):
""" A cell where the memory state follows Linear Time Invariant dynamics: c' = Ac + Bf. """
def __init__(
self, d_input, d_model, memory_size, memory_order,
A, B,
dt=0.01,
discretization='zoh',
**kwargs
):
super().__init__(d_input, d_model, memory_size, memory_order, **kwargs)
C = np.ones((1, memory_order))
D = np.zeros((1,))
dA, dB, _, _, _ = signal.cont2discrete((A, B, C, D), dt=dt, method=discretization)
dA = dA - np.eye(memory_order) # puts into form: x += Ax
self.register_buffer('A', torch.Tensor(dA))
self.register_buffer('B', torch.Tensor(dB))
def update_memory(self, m, u, time_step):
u = u.unsqueeze(-1) # (B, M, 1)
return m + F.linear(m, self.A) + F.linear(u, self.B)
class LSICell(MemoryCell):
""" A cell where the memory state Linear 'Scale' Invariant dynamics: c' = 1/t (Ac + Bf). """
def __init__(
self, d_input, d_model, memory_size, memory_order,
A, B,
init_t = 0, # 0 for special case at t=0 (new code), else old code without special case
l_max=1024,
discretization='bilinear',
**kwargs
):
"""
# TODO: make init_t start at arbitrary time (instead of 0 or 1)
"""
# B should have shape (N, 1)
assert len(B.shape) == 2 and B.shape[1] == 1
super().__init__(d_input, d_model, memory_size, memory_order, **kwargs)
assert isinstance(init_t, int)
self.init_t = init_t
self.l_max = l_max
A_stacked = np.empty((l_max, memory_order, memory_order), dtype=A.dtype)
B_stacked = np.empty((l_max, memory_order), dtype=B.dtype)
B = B[:,0]
N = memory_order
for t in range(1, l_max + 1):
At = A / t
Bt = B / t
if discretization in forward_aliases:
A_stacked[t - 1] = np.eye(N) + At
B_stacked[t - 1] = Bt
elif discretization in backward_aliases:
A_stacked[t - 1] = la.solve_triangular(np.eye(N) - At, np.eye(N), lower=True)
B_stacked[t - 1] = la.solve_triangular(np.eye(N) - At, Bt, lower=True)
elif discretization in bilinear_aliases:
A_stacked[t - 1] = la.solve_triangular(np.eye(N) - At / 2, np.eye(N) + At / 2, lower=True)
B_stacked[t - 1] = la.solve_triangular(np.eye(N) - At / 2, Bt, lower=True)
elif discretization in zoh_aliases:
A_stacked[t - 1] = la.expm(A * (math.log(t + 1) - math.log(t)))
B_stacked[t - 1] = la.solve_triangular(A, A_stacked[t - 1] @ B - B, lower=True)
B_stacked = B_stacked[:, :, None]
A_stacked -= np.eye(memory_order) # puts into form: x += Ax
self.register_buffer('A', torch.Tensor(A_stacked))
self.register_buffer('B', torch.Tensor(B_stacked))
def update_memory(self, m, u, time_step):
u = u.unsqueeze(-1) # (B, M, 1)
t = time_step - 1 + self.init_t
if t < 0:
return F.pad(u, (0, self.memory_order - 1))
else:
if t >= self.l_max: t = self.l_max - 1
return m + F.linear(m, self.A[t]) + F.linear(u, self.B[t]) | src/models/sequence/rnns/cells/memory.py |
import torch
import torch.nn as nn
import torch.nn.functional as F
import math
import numpy as np
from scipy import signal
from scipy import linalg as la
from src.models.sequence.rnns.cells.basic import RNNCell
from src.models.nn.components import LinearActivation, Activation # , get_initializer
from src.models.nn.gate import Gate
forward_aliases = ['euler', 'forward_euler', 'forward', 'forward_diff']
backward_aliases = ['backward', 'backward_diff', 'backward_euler']
bilinear_aliases = ['bilinear', 'tustin', 'trapezoidal', 'trapezoid']
zoh_aliases = ['zoh']
class MemoryCell(RNNCell):
""" This class handles the general architectural wiring of the HiPPO-RNN, in particular the interaction between the hidden state and the linear memory state.
Specific variants can be instantiated by subclassing this with an appropriately defined update_memory() method.
"""
name = None
valid_keys = ['<KEY> 'uh', 'um', 'hxm', 'hx', 'hm', 'hh', 'bias', ]
@property
def default_initializers(self):
return {
'uxh': 'uniform',
'hxm': 'xavier',
'um': 'zero',
'hh': 'xavier',
}
@property
def default_architecture(self):
return {
'ux': True,
'hx': True,
'hm': True,
'hh': False,
'bias': True,
}
def __init__(
self, d_input, d_model, memory_size, memory_order,
memory_activation='id',
gate='G', # 'N' | 'G' | UR'
**kwargs
):
self.memory_size = memory_size
self.memory_order = memory_order
self.memory_activation = memory_activation
self.gate = gate
super(MemoryCell, self).__init__(d_input, d_model, **kwargs)
self.input_to_d_model = self.d_input if self.architecture['hx'] else 0
self.input_to_memory_size = self.d_input if self.architecture['ux'] else 0
# Hidden to memory
self.W_uxh = LinearActivation(
self.input_to_memory_size + self.d_model,
self.memory_size,
bias=self.architecture['bias'],
initializer=self.initializers['uxh'],
activation=self.memory_activation,
activate=True,
)
self.memory_to_d_model = self.memory_size * self.memory_order if self.architecture['hm'] else 0
# Memory to hidden
self.W_hxm = LinearActivation(
self.input_to_d_model + self.memory_to_d_model,
self.d_model,
self.architecture['bias'],
initializer=self.initializers['hxm'],
activation=self.hidden_activation,
activate=False,
)
if self.architecture['hh']:
self.reset_hidden_to_hidden()
else:
self.W_hh = None
# Construct gate with options
if self.gate is not None:
preact_ctor = LinearActivation
preact_args = [
self.input_to_d_model + self.memory_to_d_model,
self.d_model,
self.architecture['bias'],
]
if self.architecture['hh']:
print("input to hidden size, memory to hidden size, hidden size:", self.input_to_d_model, self.memory_to_d_model, self.d_model)
preact_args[0] += self.d_model
self.W_gxm = Gate(self.d_model, preact_ctor, preact_args, mechanism=self.gate)
def reset_parameters(self):
# super().reset_parameters() # TODO find a way to refactor to call super()
self.activate = Activation(self.hidden_activation, self.d_model)
def forward(self, input, state):
h, m, time_step = state
# Update the memory
u = self.forward_memory(input, h, m)
m = self.update_memory(m, u, time_step) # (batch, memory_size, memory_order)
# Update hidden
h = self.forward_hidden(input, h, m)
next_state = (h, m, time_step + 1)
output = self.state_to_tensor(next_state)
return output, next_state
def forward_memory(self, input, h, m):
""" First part of forward pass to construct the memory state update """
input_to_memory = input if self.architecture['ux'] else input.new_empty((0,))
xh = torch.cat((input_to_memory, h), dim=-1)
# Construct the update features
u = self.W_uxh(xh) # (batch, memory_size)
return u
def forward_hidden(self, input, h, m):
input_to_hidden = input if self.architecture['hx'] else input.new_empty((0,))
# Update hidden state from memory
memory_to_hidden = m.view(input.shape[0], self.memory_size*self.memory_order)
xm = torch.cat((input_to_hidden, memory_to_hidden), dim=-1)
hidden_preact = self.W_hxm(xm)
if self.architecture['hh']:
hidden_preact = hidden_preact + self.W_hh(h)
hidden = self.activate(hidden_preact)
# Construct gate if necessary
if self.gate is None:
h = hidden
else:
if self.architecture['hh']:
xm = torch.cat((xm, h), dim=-1)
g = self.W_gxm(xm)
h = (1.-g) * h + g * hidden
return h
def update_memory(self, m, u, time_step):
"""
m: (B, M, N) [batch size, memory size, memory order]
u: (B, M)
Output: (B, M, N)
"""
raise NotImplementedError
def default_state(self, *batch_shape, device=None):
return (
torch.zeros(*batch_shape, self.d_model, device=device, requires_grad=False),
torch.zeros(*batch_shape, self.memory_size, self.memory_order, device=device, requires_grad=False),
0,
)
@property
def state_to_tensor(self):
""" Converts a state into a single output (tensor) """
def fn(state):
h, m, time_step = state
return h
return fn
@property
def d_state(self):
return self.d_model
@property
def d_output(self):
return self.d_model
class LTICell(MemoryCell):
""" A cell where the memory state follows Linear Time Invariant dynamics: c' = Ac + Bf. """
def __init__(
self, d_input, d_model, memory_size, memory_order,
A, B,
dt=0.01,
discretization='zoh',
**kwargs
):
super().__init__(d_input, d_model, memory_size, memory_order, **kwargs)
C = np.ones((1, memory_order))
D = np.zeros((1,))
dA, dB, _, _, _ = signal.cont2discrete((A, B, C, D), dt=dt, method=discretization)
dA = dA - np.eye(memory_order) # puts into form: x += Ax
self.register_buffer('A', torch.Tensor(dA))
self.register_buffer('B', torch.Tensor(dB))
def update_memory(self, m, u, time_step):
u = u.unsqueeze(-1) # (B, M, 1)
return m + F.linear(m, self.A) + F.linear(u, self.B)
class LSICell(MemoryCell):
""" A cell where the memory state Linear 'Scale' Invariant dynamics: c' = 1/t (Ac + Bf). """
def __init__(
self, d_input, d_model, memory_size, memory_order,
A, B,
init_t = 0, # 0 for special case at t=0 (new code), else old code without special case
l_max=1024,
discretization='bilinear',
**kwargs
):
"""
# TODO: make init_t start at arbitrary time (instead of 0 or 1)
"""
# B should have shape (N, 1)
assert len(B.shape) == 2 and B.shape[1] == 1
super().__init__(d_input, d_model, memory_size, memory_order, **kwargs)
assert isinstance(init_t, int)
self.init_t = init_t
self.l_max = l_max
A_stacked = np.empty((l_max, memory_order, memory_order), dtype=A.dtype)
B_stacked = np.empty((l_max, memory_order), dtype=B.dtype)
B = B[:,0]
N = memory_order
for t in range(1, l_max + 1):
At = A / t
Bt = B / t
if discretization in forward_aliases:
A_stacked[t - 1] = np.eye(N) + At
B_stacked[t - 1] = Bt
elif discretization in backward_aliases:
A_stacked[t - 1] = la.solve_triangular(np.eye(N) - At, np.eye(N), lower=True)
B_stacked[t - 1] = la.solve_triangular(np.eye(N) - At, Bt, lower=True)
elif discretization in bilinear_aliases:
A_stacked[t - 1] = la.solve_triangular(np.eye(N) - At / 2, np.eye(N) + At / 2, lower=True)
B_stacked[t - 1] = la.solve_triangular(np.eye(N) - At / 2, Bt, lower=True)
elif discretization in zoh_aliases:
A_stacked[t - 1] = la.expm(A * (math.log(t + 1) - math.log(t)))
B_stacked[t - 1] = la.solve_triangular(A, A_stacked[t - 1] @ B - B, lower=True)
B_stacked = B_stacked[:, :, None]
A_stacked -= np.eye(memory_order) # puts into form: x += Ax
self.register_buffer('A', torch.Tensor(A_stacked))
self.register_buffer('B', torch.Tensor(B_stacked))
def update_memory(self, m, u, time_step):
u = u.unsqueeze(-1) # (B, M, 1)
t = time_step - 1 + self.init_t
if t < 0:
return F.pad(u, (0, self.memory_order - 1))
else:
if t >= self.l_max: t = self.l_max - 1
return m + F.linear(m, self.A[t]) + F.linear(u, self.B[t]) | 0.751101 | 0.482978 |
import re
from dataclasses import dataclass
from datetime import datetime
from typing import List, Callable
from pathlib import Path
from cryptysto.utils import read_csv, asset
from cryptysto.types import *
def load_bitfinex_ledger_file(path: Path) -> BitfinexLedger:
return list(
map(
lambda o: BitfinexLedgerEntry(
_id=o[0],
desc=o[1],
currency=o[2],
amount=float(o[3]),
balance=float(o[4]),
date=datetime.strptime(o[5], "%d-%m-%y %H:%M:%S"),
wallet=o[6],
),
read_csv(path),
)
)
def transform_bifinex_le_to_generic(le: BitfinexLedgerEntry) -> GenericOpTypes:
entry: GenericOpTypes = []
if re.match("^Deposit \(.*", le.desc):
entry.append(
Deposit(
exchange="Bitfinex",
date=le.date,
asset=asset(le.currency),
amount=abs(le.amount),
)
)
if re.match("^Deposit Fee \(.*", le.desc):
entry.append(
DepositFee(
exchange="Bitfinex",
date=le.date,
asset=asset(le.currency),
amount=abs(le.amount),
)
)
if re.match("^.+ Withdrawal #\d+", le.desc):
entry.append(
Withdrawal(
exchange="Bitfinex",
date=le.date,
asset=asset(le.currency),
amount=abs(le.amount),
)
)
if re.match("^.+ Withdrawal fee", le.desc):
entry.append(
WithdrawalFee(
exchange="Bitfinex",
date=le.date,
asset=asset(le.currency),
amount=abs(le.amount),
)
)
if re.match("^Exchange .+", le.desc):
entry.append(
Trade(
exchange="Bitfinex",
date=le.date,
asset=asset(le.currency),
amount=le.amount,
)
)
if re.match("^Trading fees for .+", le.desc):
entry.append(
TradeFee(
exchange="Bitfinex",
date=le.date,
asset=asset(le.currency),
amount=abs(le.amount),
)
)
return entry | cryptysto/bitfinex.py | import re
from dataclasses import dataclass
from datetime import datetime
from typing import List, Callable
from pathlib import Path
from cryptysto.utils import read_csv, asset
from cryptysto.types import *
def load_bitfinex_ledger_file(path: Path) -> BitfinexLedger:
return list(
map(
lambda o: BitfinexLedgerEntry(
_id=o[0],
desc=o[1],
currency=o[2],
amount=float(o[3]),
balance=float(o[4]),
date=datetime.strptime(o[5], "%d-%m-%y %H:%M:%S"),
wallet=o[6],
),
read_csv(path),
)
)
def transform_bifinex_le_to_generic(le: BitfinexLedgerEntry) -> GenericOpTypes:
entry: GenericOpTypes = []
if re.match("^Deposit \(.*", le.desc):
entry.append(
Deposit(
exchange="Bitfinex",
date=le.date,
asset=asset(le.currency),
amount=abs(le.amount),
)
)
if re.match("^Deposit Fee \(.*", le.desc):
entry.append(
DepositFee(
exchange="Bitfinex",
date=le.date,
asset=asset(le.currency),
amount=abs(le.amount),
)
)
if re.match("^.+ Withdrawal #\d+", le.desc):
entry.append(
Withdrawal(
exchange="Bitfinex",
date=le.date,
asset=asset(le.currency),
amount=abs(le.amount),
)
)
if re.match("^.+ Withdrawal fee", le.desc):
entry.append(
WithdrawalFee(
exchange="Bitfinex",
date=le.date,
asset=asset(le.currency),
amount=abs(le.amount),
)
)
if re.match("^Exchange .+", le.desc):
entry.append(
Trade(
exchange="Bitfinex",
date=le.date,
asset=asset(le.currency),
amount=le.amount,
)
)
if re.match("^Trading fees for .+", le.desc):
entry.append(
TradeFee(
exchange="Bitfinex",
date=le.date,
asset=asset(le.currency),
amount=abs(le.amount),
)
)
return entry | 0.553505 | 0.13612 |
from os import getcwd
from sys import platform
from csv import DictReader
from graphics import Entry, Image, GraphWin, Point, Rectangle, Text
# Purpose: To output the current working directory w/ the proper trailing delim
# Input: None
# Output: The CWD with the proper OS delim
def myCWD():
myCWD = getcwd()
myOS = platform
if 'win32' in myOS:
myDelim = '\\' # Windows
else:
myDelim = '/' # Linux and Mac
myCWD += myDelim
return myCWD
# Purpose: To reset a csv file to 0
# Input: The filename to reset (str)
# Output: None
def reset(fileName):
f = open(myCWD() + fileName + ".csv", "w")
print("water,gatorade,chips,popcorn,nuts,candy,cash", file=f)
print("0,0,0,0,0,0,0", file=f)
f.close()
return
# Purpose: To open a csv file and read the contents into a dictionary
# Input: The filename (str)
# Output: a dictionary containing the csv contents
def open_csv(fileName):
sales = []
reader = DictReader(open(myCWD() + fileName + ".csv"))
for row in reader:
sales.append(row)
return sales
# Purpose: To save a dictionary to a csv file
# Input: The file to save to (str) and the dictionary to save
# Output: None
def save_csv(fileName, sales):
f = open(fileName + ".csv", "w")
print("water,gatorade,chips,popcorn,nuts,candy,cash", file=f)
print(sales[0]["water"] + ","
+ sales[0]["gatorade"] + ","
+ sales[0]["chips"] + ","
+ sales[0]["popcorn"] + ","
+ sales[0]["nuts"] + ","
+ sales[0]["candy"] + ","
+ sales[0]["cash"], file=f)
f.close()
return
# Purpose: To determine whether a point is in a rectangle or not
# Input: A rectangle and a point
# Output: A True or False whether the point is in the rectangle or not
def isPtInRect(rectangle, point):
point1 = rectangle.getP1() # First rectangle point
point1X = point1.getX() # First rectangle point X coord
point1Y = point1.getY() # First rectangle point Y coord
point2 = rectangle.getP2() # Second rectangle point
point2X = point2.getX() # Second rectangle point X coord
point2Y = point2.getY() # Second rectangle point Y coord
sideOneLength = abs(point1X - point2X)
sideTwoLength = abs(point1Y - point2Y)
pointXvalue = point.getX() # Input point X coord
pointYvalue = point.getY() # Input point Y coord
if (abs(point1X - pointXvalue) <= sideOneLength and \
abs(point2X - pointXvalue) <= sideOneLength) and \
(abs(point1Y - pointYvalue) <= sideTwoLength and \
abs(point2Y - pointYvalue) <= sideTwoLength):
inFlag = True
else:
inFlag = False
return inFlag
# Purpose: to display a success window upon completion of an action
# Input: None
# Output: None
def confirmation():
window0 = GraphWin("Success!", 200,100)
window0.setBackground("white")
text = Text(Point(100,20), "Success!")
text.setFace("courier")
text.draw(window0)
exitImage = Image(Point(100,65), "icons/exit.png")
exitImage.draw(window0)
exitButton = Rectangle(Point(60,48), Point(140,80))
while True:
try:
click = window0.getMouse()
except:
window0.close()
break
if(isPtInRect(exitButton, click)):
window0.close()
break
return
# Purpose: To record the sale of an item
# Input: The item sold (key str), the amount sold, and the dictionary
# Output: The dictionary with the new values
def sell_item(key, quantity, dict):
if(key == "water"):
dict[0]["cash"] = str(float(dict[0]["cash"]) + 1 * quantity)
elif(key == "chips" or key == "popcorn"):
dict[0]["cash"] = str(float(dict[0]["cash"]) + 1.25 * quantity)
elif(key == "gatorade" or key == "nuts" or key == "candy"):
dict[0]["cash"] = str(float(dict[0]["cash"]) + 1.5 * quantity)
dict[0][key] = str(int((dict[0][key])) + quantity)
return dict
# Purpose: A popup window that shows the total sale and calculates change
# Input: The total amount of the sale (float)
# Output: None
def show_total(amount):
totalWin = GraphWin("Transaction", 250,250)
totalWin.setBackground("Yellow")
amountText = Text(Point(125,50), amount)
amountText.setStyle("bold")
amountText.draw(totalWin)
amountLabel = Text(Point(50,50), "Total:")
amountLabel.draw(totalWin)
tenderedBox = Entry(Point(125,100), 5)
tenderedBox.setText("0")
tenderedBox.setFill("white")
tenderedBox.draw(totalWin)
label = Text(Point(50,100), "Given: ")
label.draw(totalWin)
button = Image(Point(125, 200), "icons/button.png")
button.draw(totalWin)
buttonRect = Rectangle(Point(50,184), Point(203,218))
calcFlag = False
while True:
errorFlag = False
try:
click = totalWin.getMouse()
except:
totalWin.close()
break
if(isPtInRect(buttonRect, click)):
if(calcFlag):
change.undraw()
try:
tendered = tenderedBox.getText()
except:
errorFlag = True
tenderedBox.setText("0")
if(float(tendered) < amount):
errorFlag = True
tenderedBox.setText(str(amount))
if(not errorFlag):
change = Text(Point(125, 150), "Change: "
+ str(float(tendered) - amount))
change.setStyle("bold")
change.draw(totalWin)
calcFlag = True
return | snack_till_lib.py | from os import getcwd
from sys import platform
from csv import DictReader
from graphics import Entry, Image, GraphWin, Point, Rectangle, Text
# Purpose: To output the current working directory w/ the proper trailing delim
# Input: None
# Output: The CWD with the proper OS delim
def myCWD():
myCWD = getcwd()
myOS = platform
if 'win32' in myOS:
myDelim = '\\' # Windows
else:
myDelim = '/' # Linux and Mac
myCWD += myDelim
return myCWD
# Purpose: To reset a csv file to 0
# Input: The filename to reset (str)
# Output: None
def reset(fileName):
f = open(myCWD() + fileName + ".csv", "w")
print("water,gatorade,chips,popcorn,nuts,candy,cash", file=f)
print("0,0,0,0,0,0,0", file=f)
f.close()
return
# Purpose: To open a csv file and read the contents into a dictionary
# Input: The filename (str)
# Output: a dictionary containing the csv contents
def open_csv(fileName):
sales = []
reader = DictReader(open(myCWD() + fileName + ".csv"))
for row in reader:
sales.append(row)
return sales
# Purpose: To save a dictionary to a csv file
# Input: The file to save to (str) and the dictionary to save
# Output: None
def save_csv(fileName, sales):
f = open(fileName + ".csv", "w")
print("water,gatorade,chips,popcorn,nuts,candy,cash", file=f)
print(sales[0]["water"] + ","
+ sales[0]["gatorade"] + ","
+ sales[0]["chips"] + ","
+ sales[0]["popcorn"] + ","
+ sales[0]["nuts"] + ","
+ sales[0]["candy"] + ","
+ sales[0]["cash"], file=f)
f.close()
return
# Purpose: To determine whether a point is in a rectangle or not
# Input: A rectangle and a point
# Output: A True or False whether the point is in the rectangle or not
def isPtInRect(rectangle, point):
point1 = rectangle.getP1() # First rectangle point
point1X = point1.getX() # First rectangle point X coord
point1Y = point1.getY() # First rectangle point Y coord
point2 = rectangle.getP2() # Second rectangle point
point2X = point2.getX() # Second rectangle point X coord
point2Y = point2.getY() # Second rectangle point Y coord
sideOneLength = abs(point1X - point2X)
sideTwoLength = abs(point1Y - point2Y)
pointXvalue = point.getX() # Input point X coord
pointYvalue = point.getY() # Input point Y coord
if (abs(point1X - pointXvalue) <= sideOneLength and \
abs(point2X - pointXvalue) <= sideOneLength) and \
(abs(point1Y - pointYvalue) <= sideTwoLength and \
abs(point2Y - pointYvalue) <= sideTwoLength):
inFlag = True
else:
inFlag = False
return inFlag
# Purpose: to display a success window upon completion of an action
# Input: None
# Output: None
def confirmation():
window0 = GraphWin("Success!", 200,100)
window0.setBackground("white")
text = Text(Point(100,20), "Success!")
text.setFace("courier")
text.draw(window0)
exitImage = Image(Point(100,65), "icons/exit.png")
exitImage.draw(window0)
exitButton = Rectangle(Point(60,48), Point(140,80))
while True:
try:
click = window0.getMouse()
except:
window0.close()
break
if(isPtInRect(exitButton, click)):
window0.close()
break
return
# Purpose: To record the sale of an item
# Input: The item sold (key str), the amount sold, and the dictionary
# Output: The dictionary with the new values
def sell_item(key, quantity, dict):
if(key == "water"):
dict[0]["cash"] = str(float(dict[0]["cash"]) + 1 * quantity)
elif(key == "chips" or key == "popcorn"):
dict[0]["cash"] = str(float(dict[0]["cash"]) + 1.25 * quantity)
elif(key == "gatorade" or key == "nuts" or key == "candy"):
dict[0]["cash"] = str(float(dict[0]["cash"]) + 1.5 * quantity)
dict[0][key] = str(int((dict[0][key])) + quantity)
return dict
# Purpose: A popup window that shows the total sale and calculates change
# Input: The total amount of the sale (float)
# Output: None
def show_total(amount):
totalWin = GraphWin("Transaction", 250,250)
totalWin.setBackground("Yellow")
amountText = Text(Point(125,50), amount)
amountText.setStyle("bold")
amountText.draw(totalWin)
amountLabel = Text(Point(50,50), "Total:")
amountLabel.draw(totalWin)
tenderedBox = Entry(Point(125,100), 5)
tenderedBox.setText("0")
tenderedBox.setFill("white")
tenderedBox.draw(totalWin)
label = Text(Point(50,100), "Given: ")
label.draw(totalWin)
button = Image(Point(125, 200), "icons/button.png")
button.draw(totalWin)
buttonRect = Rectangle(Point(50,184), Point(203,218))
calcFlag = False
while True:
errorFlag = False
try:
click = totalWin.getMouse()
except:
totalWin.close()
break
if(isPtInRect(buttonRect, click)):
if(calcFlag):
change.undraw()
try:
tendered = tenderedBox.getText()
except:
errorFlag = True
tenderedBox.setText("0")
if(float(tendered) < amount):
errorFlag = True
tenderedBox.setText(str(amount))
if(not errorFlag):
change = Text(Point(125, 150), "Change: "
+ str(float(tendered) - amount))
change.setStyle("bold")
change.draw(totalWin)
calcFlag = True
return | 0.422266 | 0.196036 |
from collections import OrderedDict
from django.utils import timezone
from django.utils.translation import ugettext as _
from rest_framework import serializers
from rest_framework.reverse import reverse
from timetracker.sheets.models import TimeSheet
class TimeSheetSerializer(serializers.ModelSerializer):
hours_per_project_visualisation_url = serializers.SerializerMethodField()
class Meta:
model = TimeSheet
fields = (
'id',
'title',
'hours_per_project_visualisation_url',
)
def get_hours_per_project_visualisation_url(self, obj):
return reverse(
'api:sheets:hours-per-project-statistics',
kwargs={'sheet_pk': obj.pk},
request=self.context['request'])
class HoursPerProjectStatisticsSerializer(TimeSheetSerializer):
visualisation_title = serializers.SerializerMethodField()
start_date = serializers.SerializerMethodField()
end_date = serializers.SerializerMethodField()
projects = serializers.SerializerMethodField()
days = serializers.SerializerMethodField()
def get_start_date(self, obj):
# Get Monday
return timezone.now().date() - timezone.timedelta(
days=timezone.now().weekday())
def get_end_date(self, obj):
return self.get_start_date(obj) + timezone.timedelta(days=6)
def get_days(self, obj):
date = self.get_start_date(obj)
while date <= self.get_end_date(obj):
yield date
date += timezone.timedelta(days=1)
def get_projects(self, obj):
projects = {}
date = self.get_start_date(obj)
while date <= self.get_end_date(obj):
next_day = date + timezone.timedelta(days=1)
activities = obj.activities.filter(
start_datetime__gte=date,
start_datetime__lt=next_day,
).select_related('project')
for activity in activities:
if activity.project_id not in projects:
projects[activity.project_id] = {
'id': activity.project_id,
'title': activity.project.name,
'days': OrderedDict(),
}
for day in self.get_days(obj):
if day.isoformat() not in projects[
activity.project_id]['days']:
days_dict = projects[activity.project_id]['days']
days_dict[day.isoformat()] = {
'date': day.isoformat(),
'duration_seconds': 0
}
projects[activity.project_id]['days'][date.isoformat(
)]['duration_seconds'] += (activity.duration.seconds)
date = next_day
for project in projects.values():
project['days'] = project['days'].values()
return projects.values()
def get_visualisation_title(self, obj):
return _('Hours per project this week')
class Meta:
model = TimeSheet
fields = (
'id',
'visualisation_title',
'start_date',
'end_date',
'days',
'projects',
) | timetracker/sheets/api/serializers.py | from collections import OrderedDict
from django.utils import timezone
from django.utils.translation import ugettext as _
from rest_framework import serializers
from rest_framework.reverse import reverse
from timetracker.sheets.models import TimeSheet
class TimeSheetSerializer(serializers.ModelSerializer):
hours_per_project_visualisation_url = serializers.SerializerMethodField()
class Meta:
model = TimeSheet
fields = (
'id',
'title',
'hours_per_project_visualisation_url',
)
def get_hours_per_project_visualisation_url(self, obj):
return reverse(
'api:sheets:hours-per-project-statistics',
kwargs={'sheet_pk': obj.pk},
request=self.context['request'])
class HoursPerProjectStatisticsSerializer(TimeSheetSerializer):
visualisation_title = serializers.SerializerMethodField()
start_date = serializers.SerializerMethodField()
end_date = serializers.SerializerMethodField()
projects = serializers.SerializerMethodField()
days = serializers.SerializerMethodField()
def get_start_date(self, obj):
# Get Monday
return timezone.now().date() - timezone.timedelta(
days=timezone.now().weekday())
def get_end_date(self, obj):
return self.get_start_date(obj) + timezone.timedelta(days=6)
def get_days(self, obj):
date = self.get_start_date(obj)
while date <= self.get_end_date(obj):
yield date
date += timezone.timedelta(days=1)
def get_projects(self, obj):
projects = {}
date = self.get_start_date(obj)
while date <= self.get_end_date(obj):
next_day = date + timezone.timedelta(days=1)
activities = obj.activities.filter(
start_datetime__gte=date,
start_datetime__lt=next_day,
).select_related('project')
for activity in activities:
if activity.project_id not in projects:
projects[activity.project_id] = {
'id': activity.project_id,
'title': activity.project.name,
'days': OrderedDict(),
}
for day in self.get_days(obj):
if day.isoformat() not in projects[
activity.project_id]['days']:
days_dict = projects[activity.project_id]['days']
days_dict[day.isoformat()] = {
'date': day.isoformat(),
'duration_seconds': 0
}
projects[activity.project_id]['days'][date.isoformat(
)]['duration_seconds'] += (activity.duration.seconds)
date = next_day
for project in projects.values():
project['days'] = project['days'].values()
return projects.values()
def get_visualisation_title(self, obj):
return _('Hours per project this week')
class Meta:
model = TimeSheet
fields = (
'id',
'visualisation_title',
'start_date',
'end_date',
'days',
'projects',
) | 0.701509 | 0.120103 |
import csv
import json
import logging
import re
from pathlib import Path
from hanzipy.exceptions import NotAHanziCharacter
logging.basicConfig(level=logging.DEBUG)
RADICAL_REGEX = r"[一丨丶⺀丿乙⺃乚⺄亅丷]"
CURRENT_DIR = BASE_DIR = Path(__file__).parent
class HanziDecomposer:
def __init__(self):
self.characters = {}
self.radicals = {}
self.characters_with_component = {}
self.noglyph = "No glyph available"
self.init_decomposition()
self.compile_all_components()
def init_decomposition(
self,
):
# Reading in cjk_decomp - Decomposition Database
decomp_filepath = "{}/data/cjk_decomp.txt".format(CURRENT_DIR)
with open(decomp_filepath) as decomp_file:
lines = decomp_file.readlines()
for line in lines:
colonsplit = line.split(":")
character = colonsplit[0]
decomposition = colonsplit[1]
openbracket = decomposition.index("(")
closebracket = decomposition.index(")")
decomposition_type = decomposition[0:openbracket]
components = decomposition[openbracket + 1 : closebracket].split(
","
) # noqa
self.characters[character] = {
"decomposition_type": decomposition_type,
"components": components,
}
# Reading in radical list
radical_filepath = "{}/data/radical_with_meanings.json".format(CURRENT_DIR)
with open(radical_filepath) as radicals_file:
self.radicals = json.load(radicals_file)
def compile_all_components(
self,
):
filepath = "{}/data/chinese_charfreq_simpl_trad.csv".format(CURRENT_DIR)
with open(filepath) as freq_file:
csvreader = csv.reader(freq_file)
next(csvreader, None) # skip the headers
for row in csvreader:
character = row[1]
line_num = row[0]
decomposition = self.decompose(character)
for component in decomposition["once"]:
if component not in self.characters_with_component:
if component != self.noglyph:
self.characters_with_component.setdefault(component, [])
self.characters_with_component[component].append(character)
elif component != self.noglyph:
self.characters_with_component[component].append(character)
for component in decomposition["radical"]:
if component not in self.characters_with_component:
if component != self.noglyph and not re.search(
RADICAL_REGEX, component
):
self.characters_with_component.setdefault(
component,
[],
)
if self.is_unique(
self.characters_with_component[component],
character,
): # noqa
self.characters_with_component[component].append(
character
) # noqa
elif component != self.noglyph and not re.search(
RADICAL_REGEX, component
):
if self.is_unique(
self.characters_with_component[component],
character,
): # noqa
self.characters_with_component[component].append(
character
) # noqa
logging.info("Done compiling {} characters".format(int(line_num) - 1))
return self.characters_with_component
def is_unique(self, array_list, token):
unique = True
for item in array_list:
if item == token:
unique = False
return unique
def decompose_many(self, characterstring, decomposition_type=None):
characterstring = str(characterstring)
# Not Hanzi
if not re.search(u"[\u4e00-\u9fff]", characterstring):
raise NotAHanziCharacter(characterstring)
decomposed_components = {}
# remove spaces from input string
characterstring = characterstring.replace(r"/\s/g", "")
if not characterstring:
raise "Invalid input"
for idx, char in enumerate(characterstring):
one_character = characterstring[idx : idx + 1]
# don't decompose the same character more than once
if one_character in decomposed_components.keys():
continue
decomposed_components[one_character] = self.decompose(
one_character, decomposition_type
)
return decomposed_components
def decompose(self, character, decomposition_type=None):
"""
Type of decomp:
1 = Only 2 components,
2 = Radical,
3 = Graphical
"""
character = character.replace(r"/\s/g", "")
if self.is_messy(character):
logging.error(self.is_messy(character))
return "Invalid Input"
decomposed_char = {}
if not decomposition_type:
decomposed_char = {
"character": character,
"once": self.once_decomposition(character),
"radical": self.radical_decomposition(character),
"graphical": self.graphical_decomposition(character),
}
elif decomposition_type == 1:
decomposed_char = {
"character": character,
"components": self.once_decomposition(character),
}
elif decomposition_type == 2:
decomposed_char = {
"character": character,
"components": self.radical_decomposition(character),
}
elif decomposition_type == 3:
decomposed_char = {
"character": character,
"components": self.graphical_decomposition(character),
}
else:
return
string = json.dumps(decomposed_char)
jsonoutput = json.loads(string)
return jsonoutput
# Functions to help with Decomposition
def once_decomposition(self, character):
components = self.get_components(character)
return self.replace_numbers(components)
def radical_decomposition(self, character):
final_array = []
if self.is_radical(character):
final_array.append(character)
else:
components = self.get_components(character)
if len(components) == 2:
for j in range(2):
final_array.extend(self.radical_decomposition(components[j]))
else:
final_array.append(character)
return self.replace_numbers(final_array)
def graphical_decomposition(self, character):
final_array = []
components = self.get_components(character)
if len(components) == 2:
for j in range(2):
final_array.extend(self.graphical_decomposition(components[j]))
else:
if not character.isdigit():
final_array.append(character)
else:
final_array.extend(self.resolve_number(character))
return final_array
def replace_numbers(self, characters):
finalreview = []
for char in characters:
if not char.isdigit():
finalreview.append(char)
else:
finalreview.append("No glyph available")
return finalreview
def resolve_number(self, number):
numbers_cleared = []
components = self.get_components(number)
for component in components:
if not component.isdigit():
numbers_cleared.append(component)
else:
numbers_cleared.extend(self.resolve_number(component))
return numbers_cleared
def get_characters_with_component(self, component):
if component in self.radicals.keys():
components = self.find_same_meaning_radicals(component)
characters = []
for component in components:
if self.characters_with_component[component]:
characters.extend(self.characters_with_component[component])
return characters
else:
if component in self.characters_with_component.keys():
return self.characters_with_component[component]
return
def find_same_meaning_radicals(self, radical):
same_radicals = []
meaning = self.radicals[radical]
for radical in self.radicals:
if radical in self.radicals:
if self.radicals[radical] == meaning:
same_radicals.append(radical)
return same_radicals
def is_radical(self, character):
is_rad = False
if self.radicals.get(character):
is_rad = True
return is_rad
def get_components(self, character):
if self.component_exists(character):
if self.characters[character]["decomposition_type"] == "c":
return character
else:
return self.characters[character]["components"]
else:
return character
def get_radical_meaning(self, radical):
if self.is_radical(radical):
return self.radicals[radical]
else:
return
def component_exists(self, component):
return component in self.characters
def is_messy(self, character):
# If no input is sent
if not character:
return True
# If it's not a Chinese character
return not self.get_components(character)
if __name__ == "__main__":
logging.info("Compiling Hanzi characters data...")
# Compile Components into an object array for easy lookup
hanzi = HanziDecomposer()
res = hanzi.decompose("是")
print(res) | hanzipy/decomposer.py | import csv
import json
import logging
import re
from pathlib import Path
from hanzipy.exceptions import NotAHanziCharacter
logging.basicConfig(level=logging.DEBUG)
RADICAL_REGEX = r"[一丨丶⺀丿乙⺃乚⺄亅丷]"
CURRENT_DIR = BASE_DIR = Path(__file__).parent
class HanziDecomposer:
def __init__(self):
self.characters = {}
self.radicals = {}
self.characters_with_component = {}
self.noglyph = "No glyph available"
self.init_decomposition()
self.compile_all_components()
def init_decomposition(
self,
):
# Reading in cjk_decomp - Decomposition Database
decomp_filepath = "{}/data/cjk_decomp.txt".format(CURRENT_DIR)
with open(decomp_filepath) as decomp_file:
lines = decomp_file.readlines()
for line in lines:
colonsplit = line.split(":")
character = colonsplit[0]
decomposition = colonsplit[1]
openbracket = decomposition.index("(")
closebracket = decomposition.index(")")
decomposition_type = decomposition[0:openbracket]
components = decomposition[openbracket + 1 : closebracket].split(
","
) # noqa
self.characters[character] = {
"decomposition_type": decomposition_type,
"components": components,
}
# Reading in radical list
radical_filepath = "{}/data/radical_with_meanings.json".format(CURRENT_DIR)
with open(radical_filepath) as radicals_file:
self.radicals = json.load(radicals_file)
def compile_all_components(
self,
):
filepath = "{}/data/chinese_charfreq_simpl_trad.csv".format(CURRENT_DIR)
with open(filepath) as freq_file:
csvreader = csv.reader(freq_file)
next(csvreader, None) # skip the headers
for row in csvreader:
character = row[1]
line_num = row[0]
decomposition = self.decompose(character)
for component in decomposition["once"]:
if component not in self.characters_with_component:
if component != self.noglyph:
self.characters_with_component.setdefault(component, [])
self.characters_with_component[component].append(character)
elif component != self.noglyph:
self.characters_with_component[component].append(character)
for component in decomposition["radical"]:
if component not in self.characters_with_component:
if component != self.noglyph and not re.search(
RADICAL_REGEX, component
):
self.characters_with_component.setdefault(
component,
[],
)
if self.is_unique(
self.characters_with_component[component],
character,
): # noqa
self.characters_with_component[component].append(
character
) # noqa
elif component != self.noglyph and not re.search(
RADICAL_REGEX, component
):
if self.is_unique(
self.characters_with_component[component],
character,
): # noqa
self.characters_with_component[component].append(
character
) # noqa
logging.info("Done compiling {} characters".format(int(line_num) - 1))
return self.characters_with_component
def is_unique(self, array_list, token):
unique = True
for item in array_list:
if item == token:
unique = False
return unique
def decompose_many(self, characterstring, decomposition_type=None):
characterstring = str(characterstring)
# Not Hanzi
if not re.search(u"[\u4e00-\u9fff]", characterstring):
raise NotAHanziCharacter(characterstring)
decomposed_components = {}
# remove spaces from input string
characterstring = characterstring.replace(r"/\s/g", "")
if not characterstring:
raise "Invalid input"
for idx, char in enumerate(characterstring):
one_character = characterstring[idx : idx + 1]
# don't decompose the same character more than once
if one_character in decomposed_components.keys():
continue
decomposed_components[one_character] = self.decompose(
one_character, decomposition_type
)
return decomposed_components
def decompose(self, character, decomposition_type=None):
"""
Type of decomp:
1 = Only 2 components,
2 = Radical,
3 = Graphical
"""
character = character.replace(r"/\s/g", "")
if self.is_messy(character):
logging.error(self.is_messy(character))
return "Invalid Input"
decomposed_char = {}
if not decomposition_type:
decomposed_char = {
"character": character,
"once": self.once_decomposition(character),
"radical": self.radical_decomposition(character),
"graphical": self.graphical_decomposition(character),
}
elif decomposition_type == 1:
decomposed_char = {
"character": character,
"components": self.once_decomposition(character),
}
elif decomposition_type == 2:
decomposed_char = {
"character": character,
"components": self.radical_decomposition(character),
}
elif decomposition_type == 3:
decomposed_char = {
"character": character,
"components": self.graphical_decomposition(character),
}
else:
return
string = json.dumps(decomposed_char)
jsonoutput = json.loads(string)
return jsonoutput
# Functions to help with Decomposition
def once_decomposition(self, character):
components = self.get_components(character)
return self.replace_numbers(components)
def radical_decomposition(self, character):
final_array = []
if self.is_radical(character):
final_array.append(character)
else:
components = self.get_components(character)
if len(components) == 2:
for j in range(2):
final_array.extend(self.radical_decomposition(components[j]))
else:
final_array.append(character)
return self.replace_numbers(final_array)
def graphical_decomposition(self, character):
final_array = []
components = self.get_components(character)
if len(components) == 2:
for j in range(2):
final_array.extend(self.graphical_decomposition(components[j]))
else:
if not character.isdigit():
final_array.append(character)
else:
final_array.extend(self.resolve_number(character))
return final_array
def replace_numbers(self, characters):
finalreview = []
for char in characters:
if not char.isdigit():
finalreview.append(char)
else:
finalreview.append("No glyph available")
return finalreview
def resolve_number(self, number):
numbers_cleared = []
components = self.get_components(number)
for component in components:
if not component.isdigit():
numbers_cleared.append(component)
else:
numbers_cleared.extend(self.resolve_number(component))
return numbers_cleared
def get_characters_with_component(self, component):
if component in self.radicals.keys():
components = self.find_same_meaning_radicals(component)
characters = []
for component in components:
if self.characters_with_component[component]:
characters.extend(self.characters_with_component[component])
return characters
else:
if component in self.characters_with_component.keys():
return self.characters_with_component[component]
return
def find_same_meaning_radicals(self, radical):
same_radicals = []
meaning = self.radicals[radical]
for radical in self.radicals:
if radical in self.radicals:
if self.radicals[radical] == meaning:
same_radicals.append(radical)
return same_radicals
def is_radical(self, character):
is_rad = False
if self.radicals.get(character):
is_rad = True
return is_rad
def get_components(self, character):
if self.component_exists(character):
if self.characters[character]["decomposition_type"] == "c":
return character
else:
return self.characters[character]["components"]
else:
return character
def get_radical_meaning(self, radical):
if self.is_radical(radical):
return self.radicals[radical]
else:
return
def component_exists(self, component):
return component in self.characters
def is_messy(self, character):
# If no input is sent
if not character:
return True
# If it's not a Chinese character
return not self.get_components(character)
if __name__ == "__main__":
logging.info("Compiling Hanzi characters data...")
# Compile Components into an object array for easy lookup
hanzi = HanziDecomposer()
res = hanzi.decompose("是")
print(res) | 0.48121 | 0.215557 |
from kivy.uix.gridlayout import GridLayout
from kivy.uix.boxlayout import BoxLayout
from kivy.clock import Clock
from kivy.properties import ObjectProperty
from kivy.uix.checkbox import CheckBox
from kivy.uix.label import Label
from kivy.uix.popup import Popup
from kivy.uix.button import Button
from kivy.uix.widget import Widget
from threading import Thread
from customwidgets.text import TextWidget
from customwidgets.plot import PlotWidget
from subscribers.subscriber import Subscriber
class SignalDisplay(GridLayout):
plots_dict={} #Dictionary of current plots in the display
topics_dict={} #Local access to the dictionary of all available topics
viewer_ref=ObjectProperty(None)
display_ref=ObjectProperty(None)
def __init__(self,**kwargs):
super(SignalDisplay,self).__init__(**kwargs)
def setreferences(self,viewer,selector):
self.viewer_ref=viewer
self.selector_ref=selector
def build(self,topics_dict=None):
if topics_dict is not None:
self.topics_dict=topics_dict
else:
self.topics_dict=self.viewer_ref.topics_dict
def add(self,topic_name,widget_class=TextWidget):
#add a widget for the topic
subs=self.topics_dict[topic_name]['subs']
#Get subscriber channels:
channels=subs.getChannels()
newplot=widget_class(channels=channels,title=topic_name)
newGridElement=GridLayout(cols=1,rows=1)
newGridElement.add_widget(newplot)
self.add_widget(newGridElement)
self.plots_dict[topic_name]=newplot
def remove(self,topic_name):
#remove the plot for the corresponding topic
plot=self.plots_dict[topic_name]
parentContainer=plot.parent
parentContainer.parent.remove_widget(parentContainer)
self.plots_dict.pop(topic_name)
def update(self):
for key in self.plots_dict:
sub=self.topics_dict[key]['subs']
plot=self.plots_dict[key]
try:
data=sub.getQueue()
except:
print('Error')
#print(data)
if data is not []:
plot.update(data)
pass | signalslayout/signaldisplay.py |
from kivy.uix.gridlayout import GridLayout
from kivy.uix.boxlayout import BoxLayout
from kivy.clock import Clock
from kivy.properties import ObjectProperty
from kivy.uix.checkbox import CheckBox
from kivy.uix.label import Label
from kivy.uix.popup import Popup
from kivy.uix.button import Button
from kivy.uix.widget import Widget
from threading import Thread
from customwidgets.text import TextWidget
from customwidgets.plot import PlotWidget
from subscribers.subscriber import Subscriber
class SignalDisplay(GridLayout):
plots_dict={} #Dictionary of current plots in the display
topics_dict={} #Local access to the dictionary of all available topics
viewer_ref=ObjectProperty(None)
display_ref=ObjectProperty(None)
def __init__(self,**kwargs):
super(SignalDisplay,self).__init__(**kwargs)
def setreferences(self,viewer,selector):
self.viewer_ref=viewer
self.selector_ref=selector
def build(self,topics_dict=None):
if topics_dict is not None:
self.topics_dict=topics_dict
else:
self.topics_dict=self.viewer_ref.topics_dict
def add(self,topic_name,widget_class=TextWidget):
#add a widget for the topic
subs=self.topics_dict[topic_name]['subs']
#Get subscriber channels:
channels=subs.getChannels()
newplot=widget_class(channels=channels,title=topic_name)
newGridElement=GridLayout(cols=1,rows=1)
newGridElement.add_widget(newplot)
self.add_widget(newGridElement)
self.plots_dict[topic_name]=newplot
def remove(self,topic_name):
#remove the plot for the corresponding topic
plot=self.plots_dict[topic_name]
parentContainer=plot.parent
parentContainer.parent.remove_widget(parentContainer)
self.plots_dict.pop(topic_name)
def update(self):
for key in self.plots_dict:
sub=self.topics_dict[key]['subs']
plot=self.plots_dict[key]
try:
data=sub.getQueue()
except:
print('Error')
#print(data)
if data is not []:
plot.update(data)
pass | 0.51562 | 0.114121 |
import datetime
import gzip
import itertools
import json
import logging
import os
import shutil
import sys
import zipfile
from blsgov_api import load_db_list, get_loader
from config import WRK_DB_DIR, META_FILE_NAME, TMP_DB_DIR, DATA_PREFIX, ASPECT_PREFIX, \
SERIES_PREFIX, JSON_GZ_SUFFIX, JSON_SUFFIX, ZIP_SUFFIX, DB_LIST_FILE_NAME, MAX_SERIES_PER_BATCH, \
MAX_DATA_PER_BATCH, MODIFIED_LESS_THAN
from lock import exclusive_lock
TMP_PREFIX = 'tmp.'
logger = logging.getLogger(__name__)
def log(*args):
s = " ".join([str(i) for i in args])
logger.log(logging.INFO, s)
def update_dbs(db_ids=None, force_all=False):
log('load db lists')
new_db_list = load_db_list()
cur_db_list = []
try:
with gzip.open(DB_LIST_FILE_NAME, 'rt') as f:
cur_db_list = json.loads(f.read())
except:
pass
new_db_list.sort(key=lambda d: d['modified'])
new_db_list = [d for d in new_db_list
if datetime.datetime.now() - datetime.datetime.fromisoformat(d['modified']) < MODIFIED_LESS_THAN]
for ndb in new_db_list:
if db_ids is not None and ndb['id'] not in db_ids:
continue
cdb = next((i for i in cur_db_list if i['id'] == ndb['id']), None)
if cdb is None or cdb['modified'] < ndb['modified'] or force_all: # check corrupted files
if cdb is not None:
cur_db_list.remove(cdb)
cur_db_list.append(ndb)
updater = Updater(ndb['id'])
updater.prepare_update()
with exclusive_lock():
updater.update()
with gzip.open(DB_LIST_FILE_NAME, 'wt') as f:
cur_db_list.sort(key=lambda d: d['modified'])
f.write(json.dumps(cur_db_list, indent=1))
class Updater:
def __init__(self, symbol):
self.symbol = symbol
self.loader = get_loader(symbol)
self.tmp_dir = os.path.join(TMP_DB_DIR, self.symbol.lower())
self.wrk_dir = os.path.join(WRK_DB_DIR, self.symbol.lower())
self.batch_size = 1
def update(self):
log(self.symbol + ": update")
try:
shutil.rmtree(self.wrk_dir)
except FileNotFoundError:
pass
os.makedirs(os.path.dirname(self.wrk_dir), exist_ok=True)
shutil.move(self.tmp_dir, self.wrk_dir)
def prepare_update(self):
log(self.symbol + ": prepare update")
try:
shutil.rmtree(self.tmp_dir)
except FileNotFoundError:
pass
os.makedirs(self.tmp_dir, exist_ok=True)
self.loader.download()
log(self.symbol + ": calc batch size")
series_count = self.loader.approx_series_count()
data_count = self.loader.approx_data_count()
s_batch_count = series_count // MAX_SERIES_PER_BATCH + 1
d_batch_count = data_count // MAX_DATA_PER_BATCH + 1
batch_count = max(s_batch_count, d_batch_count, 1)
self.batch_size = series_count//batch_count
log(self.symbol + ":", "batch_size:", self.batch_size, "batch_count:", batch_count)
self.update_meta()
self.update_series_list()
self.update_data_series(DATA_PREFIX, self.loader.parse_data())
self.update_data_series(ASPECT_PREFIX, self.loader.parse_aspect())
self.loader.clear()
def update_meta(self):
log(self.symbol + ": update meta")
# load meta
meta = self.loader.parse_meta()
meta_fn = os.path.join(self.tmp_dir, META_FILE_NAME)
with gzip.open(meta_fn, 'wt') as f:
f.write(json.dumps(meta, indent=1))
def update_series_list(self):
log(self.symbol + ": update series")
# load series
batch = []
batch_files = []
i = 0
def write_series_batch():
fn = os.path.join(self.tmp_dir, TMP_PREFIX + SERIES_PREFIX + str(i) + JSON_GZ_SUFFIX)
batch_files.append(fn)
batch.sort(key=lambda b: b['id'])
with gzip.open(fn, 'wt') as f:
for b in batch:
f.write(json.dumps(b) + "\n")
for s in self.loader.parse_series():
batch.append(s)
if len(batch) >= self.batch_size:
write_series_batch()
i += 1
batch = []
if len(batch) > 0:
write_series_batch()
log("build sorted index")
def sorted_series_generator():
fds = [{"file": gzip.open(b, 'rt'), 'cur': None} for b in batch_files]
while True:
closed = False
for fd in fds:
if fd['cur'] is None and fd['file'] is not None:
row = fd['file'].readline()
if len(row) == 0:
fd['file'].close()
fd['file'] = None
closed = True
else:
fd['cur'] = json.loads(row)
if closed:
fds = [fd for fd in fds if fd['file'] is not None]
if len(fds) == 0:
break
mx = min(fds, key=lambda fd: fd['cur']['id'])
yield mx['cur']
mx['cur'] = None
batch = []
for s in sorted_series_generator():
batch.append(s)
if len(batch) >= self.batch_size:
fn = os.path.join(self.tmp_dir, SERIES_PREFIX + batch[0]['id'] + '.' + batch[-1]['id'] + JSON_GZ_SUFFIX)
with gzip.open(fn, 'wt') as f:
f.write(array_to_json(batch))
i += 1
batch = []
if len(batch) > 0:
fn = os.path.join(self.tmp_dir, SERIES_PREFIX + batch[0]['id'] + '.' + batch[-1]['id'] + JSON_GZ_SUFFIX)
with gzip.open(fn, 'wt') as f:
f.write(array_to_json(batch))
for bf in batch_files:
os.remove(bf)
def update_data_series(self, prefix, data_source_generator):
log(self.symbol + ":update data " + prefix)
batch_files = []
for fn in os.listdir(self.tmp_dir):
if fn.startswith(SERIES_PREFIX):
nfp = fn.split('.')
batch_fn = os.path.join(self.tmp_dir, TMP_PREFIX + prefix + nfp[1] + '.' + nfp[2] + JSON_GZ_SUFFIX)
batch_files.append({
'from': nfp[1],
'to': nfp[2],
'path': batch_fn,
'fd': gzip.open(batch_fn, 'wt'),
})
for s in data_source_generator:
bf = next((bf for bf in batch_files if bf['from'] <= s['series_id'] <= bf['to']))
bf['fd'].write(json.dumps(s) + "\n")
log("transform gz to zip")
for bf in batch_files:
bf['fd'].close()
with gzip.open(bf['path'], 'rt') as f:
data = f.read()
data = data.split('\n')[:-1]
if len(data) > 0:
data = '[\n' + ',\n'.join(data) + ']'
data = json.loads(data)
data.sort(key=lambda i: (i['series_id'], i['year'], i['period']))
zip_file_name = os.path.join(self.tmp_dir, prefix + bf['from'] + '.' + bf['to'] + ZIP_SUFFIX)
with zipfile.ZipFile(zip_file_name, 'w', compression=zipfile.ZIP_DEFLATED, compresslevel=9) as z:
for s in itertools.groupby(data, key=lambda i:i['series_id']):
# rm duplicates
series = [next(i[1]) for i in itertools.groupby(s[1], key=lambda i: (i['year'], i['period']))]
for i in series:
del i['series_id']
series_fn = s[0] + JSON_SUFFIX
z.writestr(series_fn, array_to_json(series))
os.remove(bf['path'])
def array_to_json(arr):
return "[\n" + ",\n".join([json.dumps(a) for a in arr]) + "\n]"
if __name__ == '__main__':
log(sys.argv)
force = '-f' in sys.argv
all = '-a' in sys.argv
db_ids = [i for i in sys.argv if not i.startswith('-')]
update_dbs(db_ids if not all else None, force) | update.py | import datetime
import gzip
import itertools
import json
import logging
import os
import shutil
import sys
import zipfile
from blsgov_api import load_db_list, get_loader
from config import WRK_DB_DIR, META_FILE_NAME, TMP_DB_DIR, DATA_PREFIX, ASPECT_PREFIX, \
SERIES_PREFIX, JSON_GZ_SUFFIX, JSON_SUFFIX, ZIP_SUFFIX, DB_LIST_FILE_NAME, MAX_SERIES_PER_BATCH, \
MAX_DATA_PER_BATCH, MODIFIED_LESS_THAN
from lock import exclusive_lock
TMP_PREFIX = 'tmp.'
logger = logging.getLogger(__name__)
def log(*args):
s = " ".join([str(i) for i in args])
logger.log(logging.INFO, s)
def update_dbs(db_ids=None, force_all=False):
log('load db lists')
new_db_list = load_db_list()
cur_db_list = []
try:
with gzip.open(DB_LIST_FILE_NAME, 'rt') as f:
cur_db_list = json.loads(f.read())
except:
pass
new_db_list.sort(key=lambda d: d['modified'])
new_db_list = [d for d in new_db_list
if datetime.datetime.now() - datetime.datetime.fromisoformat(d['modified']) < MODIFIED_LESS_THAN]
for ndb in new_db_list:
if db_ids is not None and ndb['id'] not in db_ids:
continue
cdb = next((i for i in cur_db_list if i['id'] == ndb['id']), None)
if cdb is None or cdb['modified'] < ndb['modified'] or force_all: # check corrupted files
if cdb is not None:
cur_db_list.remove(cdb)
cur_db_list.append(ndb)
updater = Updater(ndb['id'])
updater.prepare_update()
with exclusive_lock():
updater.update()
with gzip.open(DB_LIST_FILE_NAME, 'wt') as f:
cur_db_list.sort(key=lambda d: d['modified'])
f.write(json.dumps(cur_db_list, indent=1))
class Updater:
def __init__(self, symbol):
self.symbol = symbol
self.loader = get_loader(symbol)
self.tmp_dir = os.path.join(TMP_DB_DIR, self.symbol.lower())
self.wrk_dir = os.path.join(WRK_DB_DIR, self.symbol.lower())
self.batch_size = 1
def update(self):
log(self.symbol + ": update")
try:
shutil.rmtree(self.wrk_dir)
except FileNotFoundError:
pass
os.makedirs(os.path.dirname(self.wrk_dir), exist_ok=True)
shutil.move(self.tmp_dir, self.wrk_dir)
def prepare_update(self):
log(self.symbol + ": prepare update")
try:
shutil.rmtree(self.tmp_dir)
except FileNotFoundError:
pass
os.makedirs(self.tmp_dir, exist_ok=True)
self.loader.download()
log(self.symbol + ": calc batch size")
series_count = self.loader.approx_series_count()
data_count = self.loader.approx_data_count()
s_batch_count = series_count // MAX_SERIES_PER_BATCH + 1
d_batch_count = data_count // MAX_DATA_PER_BATCH + 1
batch_count = max(s_batch_count, d_batch_count, 1)
self.batch_size = series_count//batch_count
log(self.symbol + ":", "batch_size:", self.batch_size, "batch_count:", batch_count)
self.update_meta()
self.update_series_list()
self.update_data_series(DATA_PREFIX, self.loader.parse_data())
self.update_data_series(ASPECT_PREFIX, self.loader.parse_aspect())
self.loader.clear()
def update_meta(self):
log(self.symbol + ": update meta")
# load meta
meta = self.loader.parse_meta()
meta_fn = os.path.join(self.tmp_dir, META_FILE_NAME)
with gzip.open(meta_fn, 'wt') as f:
f.write(json.dumps(meta, indent=1))
def update_series_list(self):
log(self.symbol + ": update series")
# load series
batch = []
batch_files = []
i = 0
def write_series_batch():
fn = os.path.join(self.tmp_dir, TMP_PREFIX + SERIES_PREFIX + str(i) + JSON_GZ_SUFFIX)
batch_files.append(fn)
batch.sort(key=lambda b: b['id'])
with gzip.open(fn, 'wt') as f:
for b in batch:
f.write(json.dumps(b) + "\n")
for s in self.loader.parse_series():
batch.append(s)
if len(batch) >= self.batch_size:
write_series_batch()
i += 1
batch = []
if len(batch) > 0:
write_series_batch()
log("build sorted index")
def sorted_series_generator():
fds = [{"file": gzip.open(b, 'rt'), 'cur': None} for b in batch_files]
while True:
closed = False
for fd in fds:
if fd['cur'] is None and fd['file'] is not None:
row = fd['file'].readline()
if len(row) == 0:
fd['file'].close()
fd['file'] = None
closed = True
else:
fd['cur'] = json.loads(row)
if closed:
fds = [fd for fd in fds if fd['file'] is not None]
if len(fds) == 0:
break
mx = min(fds, key=lambda fd: fd['cur']['id'])
yield mx['cur']
mx['cur'] = None
batch = []
for s in sorted_series_generator():
batch.append(s)
if len(batch) >= self.batch_size:
fn = os.path.join(self.tmp_dir, SERIES_PREFIX + batch[0]['id'] + '.' + batch[-1]['id'] + JSON_GZ_SUFFIX)
with gzip.open(fn, 'wt') as f:
f.write(array_to_json(batch))
i += 1
batch = []
if len(batch) > 0:
fn = os.path.join(self.tmp_dir, SERIES_PREFIX + batch[0]['id'] + '.' + batch[-1]['id'] + JSON_GZ_SUFFIX)
with gzip.open(fn, 'wt') as f:
f.write(array_to_json(batch))
for bf in batch_files:
os.remove(bf)
def update_data_series(self, prefix, data_source_generator):
log(self.symbol + ":update data " + prefix)
batch_files = []
for fn in os.listdir(self.tmp_dir):
if fn.startswith(SERIES_PREFIX):
nfp = fn.split('.')
batch_fn = os.path.join(self.tmp_dir, TMP_PREFIX + prefix + nfp[1] + '.' + nfp[2] + JSON_GZ_SUFFIX)
batch_files.append({
'from': nfp[1],
'to': nfp[2],
'path': batch_fn,
'fd': gzip.open(batch_fn, 'wt'),
})
for s in data_source_generator:
bf = next((bf for bf in batch_files if bf['from'] <= s['series_id'] <= bf['to']))
bf['fd'].write(json.dumps(s) + "\n")
log("transform gz to zip")
for bf in batch_files:
bf['fd'].close()
with gzip.open(bf['path'], 'rt') as f:
data = f.read()
data = data.split('\n')[:-1]
if len(data) > 0:
data = '[\n' + ',\n'.join(data) + ']'
data = json.loads(data)
data.sort(key=lambda i: (i['series_id'], i['year'], i['period']))
zip_file_name = os.path.join(self.tmp_dir, prefix + bf['from'] + '.' + bf['to'] + ZIP_SUFFIX)
with zipfile.ZipFile(zip_file_name, 'w', compression=zipfile.ZIP_DEFLATED, compresslevel=9) as z:
for s in itertools.groupby(data, key=lambda i:i['series_id']):
# rm duplicates
series = [next(i[1]) for i in itertools.groupby(s[1], key=lambda i: (i['year'], i['period']))]
for i in series:
del i['series_id']
series_fn = s[0] + JSON_SUFFIX
z.writestr(series_fn, array_to_json(series))
os.remove(bf['path'])
def array_to_json(arr):
return "[\n" + ",\n".join([json.dumps(a) for a in arr]) + "\n]"
if __name__ == '__main__':
log(sys.argv)
force = '-f' in sys.argv
all = '-a' in sys.argv
db_ids = [i for i in sys.argv if not i.startswith('-')]
update_dbs(db_ids if not all else None, force) | 0.201499 | 0.071461 |
"""Classes to enumerate TPM data from WMI."""
import logging
from gwinpy.wmi import wmi_query
class TpmInfo(object):
"""Query TPM data in WMI."""
def __init__(self):
self.wmi = wmi_query.WMIQuery(namespace=r'root\cimv2\security\microsofttpm')
def IsActivated(self):
"""Whether the TPM is currently activated.
Returns:
True/False for TPM activated; None for query failure.
"""
query = 'Select IsActivated_InitialValue from Win32_Tpm'
results = self.wmi.Query(query)
if results:
logging.debug('Win32_Tpm/IsActivated_InitialValue: %s',
str(results[0].IsActivated_InitialValue))
return results[0].IsActivated_InitialValue
logging.warning('No results for %s.', query)
return None
def IsEnabled(self):
"""Whether the TPM is currently enabled.
Returns:
True/False for TPM enabled; None for query failure.
"""
query = 'Select IsEnabled_InitialValue from Win32_Tpm'
results = self.wmi.Query(query)
if results:
logging.debug('Win32_Tpm/IsEnabled_InitialValue: %s',
str(results[0].IsEnabled_InitialValue))
return results[0].IsEnabled_InitialValue
logging.warning('No results for %s.', query)
return None
def IsOwned(self):
"""Whether the TPM is currently owned.
Returns:
True/False for TPM ownership; None for query failure.
"""
query = 'Select IsOwned_InitialValue from Win32_Tpm'
results = self.wmi.Query(query)
if results:
logging.debug('Win32_Tpm/IsOwned_InitialValue: %s',
str(results[0].IsOwned_InitialValue))
return results[0].IsOwned_InitialValue
logging.warning('No results for %s.', query)
return None
def TpmPresent(self):
"""Queries the local host for presence of a TPM device.
Returns:
True if device found, else False
"""
query = 'Select * from Win32_Tpm'
results = self.wmi.Query(query)
if len(results): # pylint: disable=g-explicit-length-test
return True
return False
def TpmSpec(self):
"""Queries the local TPM specification.
Returns:
The TPM SpecVersion string, or None.
"""
query = 'Select SpecVersion from Win32_Tpm'
results = self.wmi.Query(query)
if results:
logging.debug('Win32_Tpm/SpecVersion: %s', results[0].SpecVersion.strip())
return results[0].SpecVersion.strip()
logging.warning('No results for %s.', query)
return None
def TpmVersion(self):
"""Queries the local TPM device version.
Returns:
The TPM version string, or None.
"""
query = 'Select ManufacturerVersion from Win32_Tpm'
results = self.wmi.Query(query)
if results:
logging.debug('Win32_Tpm/ManufacturerVersion: %s',
results[0].ManufacturerVersion.strip())
return results[0].ManufacturerVersion.strip()
logging.warning('No results for %s.', query)
return None | gwinpy/wmi/tpm_info.py | """Classes to enumerate TPM data from WMI."""
import logging
from gwinpy.wmi import wmi_query
class TpmInfo(object):
"""Query TPM data in WMI."""
def __init__(self):
self.wmi = wmi_query.WMIQuery(namespace=r'root\cimv2\security\microsofttpm')
def IsActivated(self):
"""Whether the TPM is currently activated.
Returns:
True/False for TPM activated; None for query failure.
"""
query = 'Select IsActivated_InitialValue from Win32_Tpm'
results = self.wmi.Query(query)
if results:
logging.debug('Win32_Tpm/IsActivated_InitialValue: %s',
str(results[0].IsActivated_InitialValue))
return results[0].IsActivated_InitialValue
logging.warning('No results for %s.', query)
return None
def IsEnabled(self):
"""Whether the TPM is currently enabled.
Returns:
True/False for TPM enabled; None for query failure.
"""
query = 'Select IsEnabled_InitialValue from Win32_Tpm'
results = self.wmi.Query(query)
if results:
logging.debug('Win32_Tpm/IsEnabled_InitialValue: %s',
str(results[0].IsEnabled_InitialValue))
return results[0].IsEnabled_InitialValue
logging.warning('No results for %s.', query)
return None
def IsOwned(self):
"""Whether the TPM is currently owned.
Returns:
True/False for TPM ownership; None for query failure.
"""
query = 'Select IsOwned_InitialValue from Win32_Tpm'
results = self.wmi.Query(query)
if results:
logging.debug('Win32_Tpm/IsOwned_InitialValue: %s',
str(results[0].IsOwned_InitialValue))
return results[0].IsOwned_InitialValue
logging.warning('No results for %s.', query)
return None
def TpmPresent(self):
"""Queries the local host for presence of a TPM device.
Returns:
True if device found, else False
"""
query = 'Select * from Win32_Tpm'
results = self.wmi.Query(query)
if len(results): # pylint: disable=g-explicit-length-test
return True
return False
def TpmSpec(self):
"""Queries the local TPM specification.
Returns:
The TPM SpecVersion string, or None.
"""
query = 'Select SpecVersion from Win32_Tpm'
results = self.wmi.Query(query)
if results:
logging.debug('Win32_Tpm/SpecVersion: %s', results[0].SpecVersion.strip())
return results[0].SpecVersion.strip()
logging.warning('No results for %s.', query)
return None
def TpmVersion(self):
"""Queries the local TPM device version.
Returns:
The TPM version string, or None.
"""
query = 'Select ManufacturerVersion from Win32_Tpm'
results = self.wmi.Query(query)
if results:
logging.debug('Win32_Tpm/ManufacturerVersion: %s',
results[0].ManufacturerVersion.strip())
return results[0].ManufacturerVersion.strip()
logging.warning('No results for %s.', query)
return None | 0.891052 | 0.250317 |
from arango import ArangoClient
import getpass
import sys
from mypy_extensions import TypedDict
HostAnalysis = TypedDict(
"HostAnalysis", {"protocol": str, "hostname": str, "port": int}
)
def analyze_host(host: str) -> HostAnalysis:
if host[:8] == "https://":
protocol = "https"
elif host[:7] == "http://":
protocol = "http"
else:
print(f"bad protocol: {host}", file=sys.stderr)
raise RuntimeError
parts = host[len(f"{protocol}://") :].split(":")
hostname = parts[0]
try:
port = int(parts[1])
except IndexError:
port = 8529
except ValueError:
print(f"bad port: {parts[1]}", file=sys.stderr)
raise RuntimeError
return {"protocol": protocol, "hostname": hostname, "port": port}
def main():
if len(sys.argv) < 2:
print("usage: ensure_workspace_metadata.py <arango-host>", file=sys.stderr)
return 1
# Split apart the host parameter into constituents.
try:
args = analyze_host(sys.argv[1])
except RuntimeError:
return 1
# Create a connection to the database.
client = ArangoClient(
protocol=args["protocol"], host=args["hostname"], port=args["port"]
)
# Get a password from the user.
password = getpass.getpass("Password: ")
# Retrieve the workspace mapping collection from the system database.
db = client.db(name="_system", password=password)
coll = db.collection("workspace_mapping")
# Loop through the documents and correct ones with a missing "permissions"
# field.
for doc in coll.all():
if "permissions" not in doc:
doc["permissions"] = {
"owner": "",
"maintainers": [],
"writers": [],
"readers": [],
"public": True,
}
print(f"updating {doc['name']}...", end="")
db.update_document(doc)
print("done")
if __name__ == "__main__":
sys.exit(main()) | devops/scripts/ensure_workspace_metadata.py | from arango import ArangoClient
import getpass
import sys
from mypy_extensions import TypedDict
HostAnalysis = TypedDict(
"HostAnalysis", {"protocol": str, "hostname": str, "port": int}
)
def analyze_host(host: str) -> HostAnalysis:
if host[:8] == "https://":
protocol = "https"
elif host[:7] == "http://":
protocol = "http"
else:
print(f"bad protocol: {host}", file=sys.stderr)
raise RuntimeError
parts = host[len(f"{protocol}://") :].split(":")
hostname = parts[0]
try:
port = int(parts[1])
except IndexError:
port = 8529
except ValueError:
print(f"bad port: {parts[1]}", file=sys.stderr)
raise RuntimeError
return {"protocol": protocol, "hostname": hostname, "port": port}
def main():
if len(sys.argv) < 2:
print("usage: ensure_workspace_metadata.py <arango-host>", file=sys.stderr)
return 1
# Split apart the host parameter into constituents.
try:
args = analyze_host(sys.argv[1])
except RuntimeError:
return 1
# Create a connection to the database.
client = ArangoClient(
protocol=args["protocol"], host=args["hostname"], port=args["port"]
)
# Get a password from the user.
password = getpass.getpass("Password: ")
# Retrieve the workspace mapping collection from the system database.
db = client.db(name="_system", password=password)
coll = db.collection("workspace_mapping")
# Loop through the documents and correct ones with a missing "permissions"
# field.
for doc in coll.all():
if "permissions" not in doc:
doc["permissions"] = {
"owner": "",
"maintainers": [],
"writers": [],
"readers": [],
"public": True,
}
print(f"updating {doc['name']}...", end="")
db.update_document(doc)
print("done")
if __name__ == "__main__":
sys.exit(main()) | 0.291989 | 0.223652 |
import os
import re
from selenium.webdriver.remote.webelement import WebElement
from utils.global_holder import GlobalHolder
class ElementAccessor(object):
@staticmethod
def __set_value(element: WebElement, value: str):
if element is None:
return
GlobalHolder.Browser.execute_script(
'arguments[0].value = arguments[1]',
element,
value)
@staticmethod
def __set_input_value(element: WebElement, value: str):
# input系はvalueを設定
element.clear()
if re.search('[ア-ン]', value) is not None:
# 半角カナが含まれる
ElementAccessor.__set_value(element, value)
else:
element.send_keys(value)
@staticmethod
def set(element: WebElement, value: str):
""" 対象のinputに値を設定 """
if element is None:
return
value = str(value)
tag_name = element.tag_name
if tag_name == 'input':
input_type = element.get_attribute('type')
if input_type == 'radio' or input_type == 'checkbox':
low = value.lower()
# false/0/no/null/undefined
is_false = low == 'false' or low == '0' or low == 'no' \
or low == 'null' or low == 'undefined'
if is_false:
element.clear()
else:
element.click()
else:
ElementAccessor.__set_input_value(element, value)
if tag_name == 'button' or \
tag_name == 'option' or \
tag_name == 'data' or \
tag_name == 'meter' or \
tag_name == 'progress':
ElementAccessor.__set_input_value(element, value)
elif tag_name == 'textarea':
# textareaは特殊
element.click()
ElementAccessor.__set_input_value(element, value)
elif tag_name == 'select':
ElementAccessor.__set_value(element, value)
else:
ElementAccessor.__set_value(element, value)
@staticmethod
def set_file(element: WebElement, file: str):
if element is None:
return
# /を頭につける
file = str(file)
if not file.startswith('/'):
file = '/' + file
# inputにファイルパスを送る
element.clear()
# 絶対パス
path = os.path.abspath('../input' + file)
element.send_keys(path)
@staticmethod
def get(element: WebElement) -> str:
if element is None:
return ''
""" 対象の要素の値を取得 """
if element.tag_name == 'input' or \
element.tag_name == 'select' or \
element.tag_name == 'button' or \
element.tag_name == 'option' or \
element.tag_name == 'data' or \
element.tag_name == 'meter' or \
element.tag_name == 'progress':
# input系はvalueを取得
return element.get_attribute('value')
elif element.tag_name == 'textarea':
# textareaは特殊
return element.text
else:
return element.text
@staticmethod
def check(element: WebElement):
if element is None:
return
return GlobalHolder.Browser.execute_script(
'arguments[0].checked = true',
element)
@staticmethod
def uncheck(element: WebElement):
if element is None:
return
return GlobalHolder.Browser.execute_script(
'arguments[0].checked = false',
element)
@staticmethod
def is_checked(element: WebElement) -> bool:
if element is None:
return False
return GlobalHolder.Browser.execute_script(
'return arguments[0].checked',
element)
@staticmethod
def inner_text(element: WebElement) -> str:
if element is None:
return ''
return GlobalHolder.Browser.execute_script(
'return arguments[0].textContent',
element)
@staticmethod
def inner_html(element: WebElement) -> str:
if element is None:
return ''
return GlobalHolder.Browser.execute_script(
'return arguments[0].innerHTML',
element)
@staticmethod
def set_inner_text(element: WebElement, text: str) -> str:
if element is None:
return ''
return GlobalHolder.Browser.execute_script(
'return arguments[0].textContent = arguments[1]',
element,
text)
@staticmethod
def set_inner_html(element: WebElement, html: str) -> str:
if element is None:
return ''
return GlobalHolder.Browser.execute_script(
'return arguments[0].innerHTML = arguments[1]',
element,
html) | tests/utils/element_accessor.py |
import os
import re
from selenium.webdriver.remote.webelement import WebElement
from utils.global_holder import GlobalHolder
class ElementAccessor(object):
@staticmethod
def __set_value(element: WebElement, value: str):
if element is None:
return
GlobalHolder.Browser.execute_script(
'arguments[0].value = arguments[1]',
element,
value)
@staticmethod
def __set_input_value(element: WebElement, value: str):
# input系はvalueを設定
element.clear()
if re.search('[ア-ン]', value) is not None:
# 半角カナが含まれる
ElementAccessor.__set_value(element, value)
else:
element.send_keys(value)
@staticmethod
def set(element: WebElement, value: str):
""" 対象のinputに値を設定 """
if element is None:
return
value = str(value)
tag_name = element.tag_name
if tag_name == 'input':
input_type = element.get_attribute('type')
if input_type == 'radio' or input_type == 'checkbox':
low = value.lower()
# false/0/no/null/undefined
is_false = low == 'false' or low == '0' or low == 'no' \
or low == 'null' or low == 'undefined'
if is_false:
element.clear()
else:
element.click()
else:
ElementAccessor.__set_input_value(element, value)
if tag_name == 'button' or \
tag_name == 'option' or \
tag_name == 'data' or \
tag_name == 'meter' or \
tag_name == 'progress':
ElementAccessor.__set_input_value(element, value)
elif tag_name == 'textarea':
# textareaは特殊
element.click()
ElementAccessor.__set_input_value(element, value)
elif tag_name == 'select':
ElementAccessor.__set_value(element, value)
else:
ElementAccessor.__set_value(element, value)
@staticmethod
def set_file(element: WebElement, file: str):
if element is None:
return
# /を頭につける
file = str(file)
if not file.startswith('/'):
file = '/' + file
# inputにファイルパスを送る
element.clear()
# 絶対パス
path = os.path.abspath('../input' + file)
element.send_keys(path)
@staticmethod
def get(element: WebElement) -> str:
if element is None:
return ''
""" 対象の要素の値を取得 """
if element.tag_name == 'input' or \
element.tag_name == 'select' or \
element.tag_name == 'button' or \
element.tag_name == 'option' or \
element.tag_name == 'data' or \
element.tag_name == 'meter' or \
element.tag_name == 'progress':
# input系はvalueを取得
return element.get_attribute('value')
elif element.tag_name == 'textarea':
# textareaは特殊
return element.text
else:
return element.text
@staticmethod
def check(element: WebElement):
if element is None:
return
return GlobalHolder.Browser.execute_script(
'arguments[0].checked = true',
element)
@staticmethod
def uncheck(element: WebElement):
if element is None:
return
return GlobalHolder.Browser.execute_script(
'arguments[0].checked = false',
element)
@staticmethod
def is_checked(element: WebElement) -> bool:
if element is None:
return False
return GlobalHolder.Browser.execute_script(
'return arguments[0].checked',
element)
@staticmethod
def inner_text(element: WebElement) -> str:
if element is None:
return ''
return GlobalHolder.Browser.execute_script(
'return arguments[0].textContent',
element)
@staticmethod
def inner_html(element: WebElement) -> str:
if element is None:
return ''
return GlobalHolder.Browser.execute_script(
'return arguments[0].innerHTML',
element)
@staticmethod
def set_inner_text(element: WebElement, text: str) -> str:
if element is None:
return ''
return GlobalHolder.Browser.execute_script(
'return arguments[0].textContent = arguments[1]',
element,
text)
@staticmethod
def set_inner_html(element: WebElement, html: str) -> str:
if element is None:
return ''
return GlobalHolder.Browser.execute_script(
'return arguments[0].innerHTML = arguments[1]',
element,
html) | 0.363986 | 0.157687 |
import contextlib
import io
from itertools import zip_longest
from test import QiskitNatureTestCase
import numpy as np
from qiskit_nature.results import ElectronicStructureResult
class TestElectronicStructureResult(QiskitNatureTestCase):
# pylint: disable=attribute-defined-outside-init
"""Additional tests asserting some edge cases of the ElectronicStructureResult."""
def _assert_printed_result(self, result):
with contextlib.redirect_stdout(io.StringIO()) as out:
print(result)
for truth, expected in zip_longest(out.getvalue().split("\n"), self.expected.split("\n")):
if expected is None:
return
assert truth.strip().startswith(expected.strip())
def test_print_empty(self):
"""Test printing an empty result."""
res = ElectronicStructureResult()
self.expected = """\
=== GROUND STATE ENERGY ===
"""
self._assert_printed_result(res)
def test_print_complex(self):
"""Test printing complex numbers."""
res = ElectronicStructureResult()
res.computed_energies = np.asarray([1.0j])
self.expected = """\
=== GROUND STATE ENERGY ===
* Electronic ground state energy (Hartree): 0.0+1.j
- computed part: 0.0+1.j
"""
self._assert_printed_result(res)
def test_print_complex_dipole(self):
"""Test printing complex dipoles."""
res = ElectronicStructureResult()
res.computed_energies = np.asarray([1.0])
res.nuclear_dipole_moment = (0.0, 0.0, 1.0)
res.computed_dipole_moment = [(0.0, 0.0, 1.0j)]
res.extracted_transformer_dipoles = [{}]
self.expected = """\
=== GROUND STATE ENERGY ===
* Electronic ground state energy (Hartree): 1.
- computed part: 1.
=== DIPOLE MOMENTS ===
~ Nuclear dipole moment (a.u.): [0.0 0.0 1.]
0:
* Electronic dipole moment (a.u.): [0.0 0.0 0.0+1.j]
- computed part: [0.0 0.0 0.0+1.j]
> Dipole moment (a.u.): [0.0 0.0 1.+1.j] Total: 1.+1.j
(debye): [0.0 0.0 2.54174623+2.54174623j] Total: 2.54174623+2.54174623j
"""
self._assert_printed_result(res) | test/results/test_electronic_structure_result.py | import contextlib
import io
from itertools import zip_longest
from test import QiskitNatureTestCase
import numpy as np
from qiskit_nature.results import ElectronicStructureResult
class TestElectronicStructureResult(QiskitNatureTestCase):
# pylint: disable=attribute-defined-outside-init
"""Additional tests asserting some edge cases of the ElectronicStructureResult."""
def _assert_printed_result(self, result):
with contextlib.redirect_stdout(io.StringIO()) as out:
print(result)
for truth, expected in zip_longest(out.getvalue().split("\n"), self.expected.split("\n")):
if expected is None:
return
assert truth.strip().startswith(expected.strip())
def test_print_empty(self):
"""Test printing an empty result."""
res = ElectronicStructureResult()
self.expected = """\
=== GROUND STATE ENERGY ===
"""
self._assert_printed_result(res)
def test_print_complex(self):
"""Test printing complex numbers."""
res = ElectronicStructureResult()
res.computed_energies = np.asarray([1.0j])
self.expected = """\
=== GROUND STATE ENERGY ===
* Electronic ground state energy (Hartree): 0.0+1.j
- computed part: 0.0+1.j
"""
self._assert_printed_result(res)
def test_print_complex_dipole(self):
"""Test printing complex dipoles."""
res = ElectronicStructureResult()
res.computed_energies = np.asarray([1.0])
res.nuclear_dipole_moment = (0.0, 0.0, 1.0)
res.computed_dipole_moment = [(0.0, 0.0, 1.0j)]
res.extracted_transformer_dipoles = [{}]
self.expected = """\
=== GROUND STATE ENERGY ===
* Electronic ground state energy (Hartree): 1.
- computed part: 1.
=== DIPOLE MOMENTS ===
~ Nuclear dipole moment (a.u.): [0.0 0.0 1.]
0:
* Electronic dipole moment (a.u.): [0.0 0.0 0.0+1.j]
- computed part: [0.0 0.0 0.0+1.j]
> Dipole moment (a.u.): [0.0 0.0 1.+1.j] Total: 1.+1.j
(debye): [0.0 0.0 2.54174623+2.54174623j] Total: 2.54174623+2.54174623j
"""
self._assert_printed_result(res) | 0.692954 | 0.458046 |
import os
import os.path as osp
import numpy as np
from scipy.integrate import odeint
import moviepy.editor as mpy
from qtpy.QtCore import Qt
from qtpy.QtCore import QPointF
from qtpy.QtGui import QColor
from nezzle.graphics import EllipseNode
from nezzle.graphics import TextLabel
from nezzle.graphics import CurvedEdge
from nezzle.graphics import Triangle, Hammer
from nezzle.graphics import Network
from nezzle.io import write_image
def create_network(pos_x, pos_y, state, norm_abs_state):
color_white = np.array([255, 255, 255, 0])
color_up = np.array([255, 0, 0, 0])
color_dn = np.array([0, 0, 255, 0])
net = Network('Lorenz network')
x = EllipseNode('X', 40, 40, pos=QPointF(pos_x[0], pos_y[0]))
y = EllipseNode('Y', 40, 40, pos=QPointF(pos_x[1], pos_y[1]))
z = EllipseNode('Z', 40, 40, pos=QPointF(pos_x[2], pos_y[2]))
net.add_node(x)
net.add_node(y)
net.add_node(z)
head = Triangle(width=10, height=10, offset=4)
edge1 = CurvedEdge("EDGE1", x, y, width=4, head=head)
edge1["FILL_COLOR"] = Qt.black
edge1["CP_POS_X"] = -10
edge1["CP_POS_Y"] = -50
head = Triangle(width=10, height=10, offset=4)
edge2 = CurvedEdge("EDGE2", y, x, width=4, head=head)
edge2["FILL_COLOR"] = Qt.black
edge2["CP_POS_X"] = 10
edge2["CP_POS_Y"] = 40
head = Triangle(width=10, height=10, offset=4)
edge3 = CurvedEdge("EDGE3", y, z, width=4, head=head)
edge3["FILL_COLOR"] = Qt.black
edge3["CP_POS_X"] = -28
edge3["CP_POS_Y"] = -28
head = Hammer(width=14, height=4, offset=4)
edge4 = CurvedEdge("EDGE3", z, y, width=4, head=head)
edge4["FILL_COLOR"] = Qt.black
edge4["CP_POS_X"] = 45
edge4["CP_POS_Y"] = 40
head = Triangle(width=10, height=10, offset=4)
edge5 = CurvedEdge("EDGE3", z, x, width=4, head=head)
edge5["FILL_COLOR"] = Qt.black
edge5["CP_POS_X"] = -45
edge5["CP_POS_Y"] = 40
net.add_edge(edge1)
net.add_edge(edge2)
net.add_edge(edge3)
net.add_edge(edge4)
net.add_edge(edge5)
for i, node in enumerate([x, y, z]):
if state[i] > 0.0:
color = color_white + norm_abs_state[i] * (color_up - color_white)
else:
color = color_white + norm_abs_state[i] * (color_dn - color_white)
color[3] = 255
node["FILL_COLOR"] = QColor(*color)
node["BORDER_COLOR"] = Qt.black
node["BORDER_WIDTH"] = 2
node["WIDTH"] = node["HEIGHT"] = 20 + 50 * norm_abs_state[i]
label_name = TextLabel(node, node.iden)
label_name["FONT_SIZE"] = 10 + 30 * norm_abs_state[i]
label_name["TEXT_COLOR"] = Qt.white
label_name.align()
lightness = QColor(node["FILL_COLOR"]).lightness()
if lightness < 200:
label_name["TEXT_COLOR"] = Qt.white
label_name["FONT_BOLD"] = True
else:
label_name["TEXT_COLOR"] = Qt.black
label_name["FONT_BOLD"] = False
net.add_label(label_name)
# end of for
return net
def create_movie(fpaths, fout):
clips = []
for fpath in fpaths:
img = mpy.ImageClip(fpath).set_duration(0.2)
clips.append(img)
concat_clip = mpy.concatenate_videoclips(clips,
bg_color=(255, 255, 255),
method="compose")
concat_clip.write_gif(fout, fps=10)
def update(nav, net):
# Solve the ODE of Lorenz system
def ode(s, t):
sigma = 10
beta = 2.667
rho = 28
x, y, z = s
return [sigma * (y - x), x * (rho - z) - y, x * y - beta * z]
t = np.arange(0, 20, 0.1)
y0 = np.array([0, 1, 1.05])
s = odeint(ode, y0, t)
abs_s = np.abs(s)
norm_abs_s = abs_s / abs_s.max(axis=0)
pos_x = np.array([-100.0, 100.0, 0.0])
pos_y = np.array([0.0, 0.0, 120.0])
dpath = osp.join(osp.dirname(__file__), "lorenz-dynamics-results")
os.makedirs(dpath, exist_ok=True)
fpaths = []
for i, (state, norm_abs_state) in enumerate(zip(s, norm_abs_s)):
net = create_network(pos_x, pos_y, state, norm_abs_state)
fpath = osp.join(dpath, "lorenz-dynamics-%03d.png"%(i))
fpaths.append(fpath)
write_image(net, fpath, scale_width=200, scale_height=200)
# end of for
create_movie(fpaths, osp.join(dpath, "lorenz-dynamics.gif")) | examples/codes/visualize_ode_lorenz.py | import os
import os.path as osp
import numpy as np
from scipy.integrate import odeint
import moviepy.editor as mpy
from qtpy.QtCore import Qt
from qtpy.QtCore import QPointF
from qtpy.QtGui import QColor
from nezzle.graphics import EllipseNode
from nezzle.graphics import TextLabel
from nezzle.graphics import CurvedEdge
from nezzle.graphics import Triangle, Hammer
from nezzle.graphics import Network
from nezzle.io import write_image
def create_network(pos_x, pos_y, state, norm_abs_state):
color_white = np.array([255, 255, 255, 0])
color_up = np.array([255, 0, 0, 0])
color_dn = np.array([0, 0, 255, 0])
net = Network('Lorenz network')
x = EllipseNode('X', 40, 40, pos=QPointF(pos_x[0], pos_y[0]))
y = EllipseNode('Y', 40, 40, pos=QPointF(pos_x[1], pos_y[1]))
z = EllipseNode('Z', 40, 40, pos=QPointF(pos_x[2], pos_y[2]))
net.add_node(x)
net.add_node(y)
net.add_node(z)
head = Triangle(width=10, height=10, offset=4)
edge1 = CurvedEdge("EDGE1", x, y, width=4, head=head)
edge1["FILL_COLOR"] = Qt.black
edge1["CP_POS_X"] = -10
edge1["CP_POS_Y"] = -50
head = Triangle(width=10, height=10, offset=4)
edge2 = CurvedEdge("EDGE2", y, x, width=4, head=head)
edge2["FILL_COLOR"] = Qt.black
edge2["CP_POS_X"] = 10
edge2["CP_POS_Y"] = 40
head = Triangle(width=10, height=10, offset=4)
edge3 = CurvedEdge("EDGE3", y, z, width=4, head=head)
edge3["FILL_COLOR"] = Qt.black
edge3["CP_POS_X"] = -28
edge3["CP_POS_Y"] = -28
head = Hammer(width=14, height=4, offset=4)
edge4 = CurvedEdge("EDGE3", z, y, width=4, head=head)
edge4["FILL_COLOR"] = Qt.black
edge4["CP_POS_X"] = 45
edge4["CP_POS_Y"] = 40
head = Triangle(width=10, height=10, offset=4)
edge5 = CurvedEdge("EDGE3", z, x, width=4, head=head)
edge5["FILL_COLOR"] = Qt.black
edge5["CP_POS_X"] = -45
edge5["CP_POS_Y"] = 40
net.add_edge(edge1)
net.add_edge(edge2)
net.add_edge(edge3)
net.add_edge(edge4)
net.add_edge(edge5)
for i, node in enumerate([x, y, z]):
if state[i] > 0.0:
color = color_white + norm_abs_state[i] * (color_up - color_white)
else:
color = color_white + norm_abs_state[i] * (color_dn - color_white)
color[3] = 255
node["FILL_COLOR"] = QColor(*color)
node["BORDER_COLOR"] = Qt.black
node["BORDER_WIDTH"] = 2
node["WIDTH"] = node["HEIGHT"] = 20 + 50 * norm_abs_state[i]
label_name = TextLabel(node, node.iden)
label_name["FONT_SIZE"] = 10 + 30 * norm_abs_state[i]
label_name["TEXT_COLOR"] = Qt.white
label_name.align()
lightness = QColor(node["FILL_COLOR"]).lightness()
if lightness < 200:
label_name["TEXT_COLOR"] = Qt.white
label_name["FONT_BOLD"] = True
else:
label_name["TEXT_COLOR"] = Qt.black
label_name["FONT_BOLD"] = False
net.add_label(label_name)
# end of for
return net
def create_movie(fpaths, fout):
clips = []
for fpath in fpaths:
img = mpy.ImageClip(fpath).set_duration(0.2)
clips.append(img)
concat_clip = mpy.concatenate_videoclips(clips,
bg_color=(255, 255, 255),
method="compose")
concat_clip.write_gif(fout, fps=10)
def update(nav, net):
# Solve the ODE of Lorenz system
def ode(s, t):
sigma = 10
beta = 2.667
rho = 28
x, y, z = s
return [sigma * (y - x), x * (rho - z) - y, x * y - beta * z]
t = np.arange(0, 20, 0.1)
y0 = np.array([0, 1, 1.05])
s = odeint(ode, y0, t)
abs_s = np.abs(s)
norm_abs_s = abs_s / abs_s.max(axis=0)
pos_x = np.array([-100.0, 100.0, 0.0])
pos_y = np.array([0.0, 0.0, 120.0])
dpath = osp.join(osp.dirname(__file__), "lorenz-dynamics-results")
os.makedirs(dpath, exist_ok=True)
fpaths = []
for i, (state, norm_abs_state) in enumerate(zip(s, norm_abs_s)):
net = create_network(pos_x, pos_y, state, norm_abs_state)
fpath = osp.join(dpath, "lorenz-dynamics-%03d.png"%(i))
fpaths.append(fpath)
write_image(net, fpath, scale_width=200, scale_height=200)
# end of for
create_movie(fpaths, osp.join(dpath, "lorenz-dynamics.gif")) | 0.29381 | 0.404802 |
import math
import numpy
from chainer import cuda
from chainer import function
from chainer.utils import type_check
def _as_mat(x):
if x.ndim == 2:
return x
return x.reshape(len(x), -1)
class Linear(function.Function):
"""Linear function (a.k.a. fully-connected layer or affine transformation).
This function holds a weight matrix ``W`` and a bias vector ``b``.
The weight matrix ``W`` has shape ``(out_size, in_size)``.
This matrix is initialized with i.i.d. Gaussian samples, each of which has
zero mean and deviation :math:`\sqrt{1/\\text{in_size}}`.
The deviation is scaled by factor ``wscale`` if specified.
The bias vector ``b`` is of size ``out_size``.
Each element is initialized with the ``bias`` value.
If ``nobias`` argument is set to True, then this function does not hold a
bias vector.
Let :math:`X` be an input matrix, and :math:`W, b` the weight matrix and
the bias vector, respectively.
Then, the output matrix :math:`Y` is computed by :math:`Y = XW^\\top + b`,
where the addition by :math:`b` is broadcasted across the minibatch.
Args:
in_size (int): Dimension of input vectors.
out_size (int): Dimension of output vectors.
wscale (float): Scaling factor of the weight matrix.
bias (float): Initial bias value.
nobias (bool): If True, then this function does not use the bias.
initialW (2-D array): Initial weight value. If ``None``, then this
function uses to initialize ``wscale``.
initial_bias (1-D array): Initial bias value. If ``None``, then this
function uses to initialize ``bias``.
.. note::
This function accepts an input variable of a non-matrix array.
In this case, the leading dimension is treated as the batch dimension,
and the other dimensions are reduced to one dimension.
"""
def __init__(self, in_size, out_size, wscale=1, bias=0, nobias=False,
initialW=None, initial_bias=None):
self.W = None
self.gW = None
self.b = None
self.gb = None
if initialW is not None:
assert initialW.shape == (out_size, in_size)
self.W = initialW
else:
self.W = numpy.random.normal(
0, wscale * math.sqrt(1. / in_size),
(out_size, in_size)).astype(numpy.float32)
xp = cuda.get_array_module(self.W)
self.gW = xp.full_like(self.W, numpy.nan)
if initial_bias is not None:
assert initial_bias.shape == (out_size,)
self.b = initial_bias
elif not nobias:
self.b = numpy.repeat(numpy.float32(bias), out_size)
if self.b is not None:
self.gb = xp.full_like(self.b, numpy.nan)
@property
def parameter_names(self):
if self.b is None:
return 'W',
return 'W', 'b'
@property
def gradient_names(self):
if self.gb is None:
return 'gW',
return 'gW', 'gb'
def check_type_forward(self, in_types):
type_check.expect(in_types.size() == 1)
x_type, = in_types
type_check.expect(
x_type.dtype == numpy.float32,
x_type.ndim >= 2,
(type_check.Variable(numpy.prod, 'prod')(x_type.shape[1:]) ==
type_check.Variable(self.W.shape[1], 'W.shape[1]')),
)
def zero_grads(self):
self.gW.fill(0)
if self.gb is not None:
self.gb.fill(0)
def forward(self, x):
x = _as_mat(x[0])
Wx = x.dot(self.W.T)
if self.b is not None:
Wx += self.b
return Wx,
def backward(self, x, gy):
_x = _as_mat(x[0])
self.gW += gy[0].T.dot(_x)
if self.gb is not None:
self.gb += gy[0].sum(0)
return gy[0].dot(self.W).reshape(x[0].shape),
class NonparameterizedLinear(function.Function):
"""Nonparameterized linear class.
.. seealso:: :class:`Linear`
"""
def check_type_forward(self, in_types):
type_check.expect(
2 <= in_types.size(),
in_types.size() <= 3,
)
x_type = in_types[0]
w_type = in_types[1]
prod = type_check.Variable(numpy.prod, 'prod')
type_check.expect(
x_type.dtype == numpy.float32,
w_type.dtype == numpy.float32,
x_type.ndim >= 2,
w_type.ndim == 2,
prod(x_type.shape[1:]) == w_type.shape[1],
)
if in_types.size().eval() == 3:
b_type = in_types[2]
type_check.expect(
b_type.ndim == 1,
b_type.shape[0] == w_type.shape[0],
)
def forward(self, x):
W = x[1]
out_size, in_size = W.shape
if len(x) == 3:
func = Linear(
in_size, out_size, initialW=W, initial_bias=x[2])
else:
func = Linear(
in_size, out_size, initialW=W, nobias=True)
self.func = func
if any(isinstance(i, cuda.ndarray) for i in x):
func.to_gpu()
return func.forward(x[:1])
def backward(self, x, gy):
func = self.func
func.zero_grads()
gx = func.backward(x[:1], gy)
if func.gb is None:
return (gx[0], func.gW)
return (gx[0], func.gW, func.gb)
def linear(x, W, b=None):
"""Nonparameterized linear function.
Args:
x (~chainer.Variable): Input variable.
W (~chainer.Variable): Weight variable.
b (~chainer.Variable): Bias variable (optional).
Returns:
~chainer.Variable: Output variable.
.. seealso:: :class:`Linear`
"""
if b is None:
return NonparameterizedLinear()(x, W)
else:
return NonparameterizedLinear()(x, W, b) | chainer/functions/connection/linear.py | import math
import numpy
from chainer import cuda
from chainer import function
from chainer.utils import type_check
def _as_mat(x):
if x.ndim == 2:
return x
return x.reshape(len(x), -1)
class Linear(function.Function):
"""Linear function (a.k.a. fully-connected layer or affine transformation).
This function holds a weight matrix ``W`` and a bias vector ``b``.
The weight matrix ``W`` has shape ``(out_size, in_size)``.
This matrix is initialized with i.i.d. Gaussian samples, each of which has
zero mean and deviation :math:`\sqrt{1/\\text{in_size}}`.
The deviation is scaled by factor ``wscale`` if specified.
The bias vector ``b`` is of size ``out_size``.
Each element is initialized with the ``bias`` value.
If ``nobias`` argument is set to True, then this function does not hold a
bias vector.
Let :math:`X` be an input matrix, and :math:`W, b` the weight matrix and
the bias vector, respectively.
Then, the output matrix :math:`Y` is computed by :math:`Y = XW^\\top + b`,
where the addition by :math:`b` is broadcasted across the minibatch.
Args:
in_size (int): Dimension of input vectors.
out_size (int): Dimension of output vectors.
wscale (float): Scaling factor of the weight matrix.
bias (float): Initial bias value.
nobias (bool): If True, then this function does not use the bias.
initialW (2-D array): Initial weight value. If ``None``, then this
function uses to initialize ``wscale``.
initial_bias (1-D array): Initial bias value. If ``None``, then this
function uses to initialize ``bias``.
.. note::
This function accepts an input variable of a non-matrix array.
In this case, the leading dimension is treated as the batch dimension,
and the other dimensions are reduced to one dimension.
"""
def __init__(self, in_size, out_size, wscale=1, bias=0, nobias=False,
initialW=None, initial_bias=None):
self.W = None
self.gW = None
self.b = None
self.gb = None
if initialW is not None:
assert initialW.shape == (out_size, in_size)
self.W = initialW
else:
self.W = numpy.random.normal(
0, wscale * math.sqrt(1. / in_size),
(out_size, in_size)).astype(numpy.float32)
xp = cuda.get_array_module(self.W)
self.gW = xp.full_like(self.W, numpy.nan)
if initial_bias is not None:
assert initial_bias.shape == (out_size,)
self.b = initial_bias
elif not nobias:
self.b = numpy.repeat(numpy.float32(bias), out_size)
if self.b is not None:
self.gb = xp.full_like(self.b, numpy.nan)
@property
def parameter_names(self):
if self.b is None:
return 'W',
return 'W', 'b'
@property
def gradient_names(self):
if self.gb is None:
return 'gW',
return 'gW', 'gb'
def check_type_forward(self, in_types):
type_check.expect(in_types.size() == 1)
x_type, = in_types
type_check.expect(
x_type.dtype == numpy.float32,
x_type.ndim >= 2,
(type_check.Variable(numpy.prod, 'prod')(x_type.shape[1:]) ==
type_check.Variable(self.W.shape[1], 'W.shape[1]')),
)
def zero_grads(self):
self.gW.fill(0)
if self.gb is not None:
self.gb.fill(0)
def forward(self, x):
x = _as_mat(x[0])
Wx = x.dot(self.W.T)
if self.b is not None:
Wx += self.b
return Wx,
def backward(self, x, gy):
_x = _as_mat(x[0])
self.gW += gy[0].T.dot(_x)
if self.gb is not None:
self.gb += gy[0].sum(0)
return gy[0].dot(self.W).reshape(x[0].shape),
class NonparameterizedLinear(function.Function):
"""Nonparameterized linear class.
.. seealso:: :class:`Linear`
"""
def check_type_forward(self, in_types):
type_check.expect(
2 <= in_types.size(),
in_types.size() <= 3,
)
x_type = in_types[0]
w_type = in_types[1]
prod = type_check.Variable(numpy.prod, 'prod')
type_check.expect(
x_type.dtype == numpy.float32,
w_type.dtype == numpy.float32,
x_type.ndim >= 2,
w_type.ndim == 2,
prod(x_type.shape[1:]) == w_type.shape[1],
)
if in_types.size().eval() == 3:
b_type = in_types[2]
type_check.expect(
b_type.ndim == 1,
b_type.shape[0] == w_type.shape[0],
)
def forward(self, x):
W = x[1]
out_size, in_size = W.shape
if len(x) == 3:
func = Linear(
in_size, out_size, initialW=W, initial_bias=x[2])
else:
func = Linear(
in_size, out_size, initialW=W, nobias=True)
self.func = func
if any(isinstance(i, cuda.ndarray) for i in x):
func.to_gpu()
return func.forward(x[:1])
def backward(self, x, gy):
func = self.func
func.zero_grads()
gx = func.backward(x[:1], gy)
if func.gb is None:
return (gx[0], func.gW)
return (gx[0], func.gW, func.gb)
def linear(x, W, b=None):
"""Nonparameterized linear function.
Args:
x (~chainer.Variable): Input variable.
W (~chainer.Variable): Weight variable.
b (~chainer.Variable): Bias variable (optional).
Returns:
~chainer.Variable: Output variable.
.. seealso:: :class:`Linear`
"""
if b is None:
return NonparameterizedLinear()(x, W)
else:
return NonparameterizedLinear()(x, W, b) | 0.888257 | 0.716975 |
try:
from django.utils.unittest import TestCase
except ImportError:
from unittest import TestCase
from django.core.exceptions import ValidationError
from menuhin.models import MenuItem, is_valid_uri, MenuItemGroup, URI
class IsValidUriTestCase(TestCase):
def test_is_valid_scheme(self):
self.assertTrue(is_valid_uri('http://'))
self.assertTrue(is_valid_uri('https://'))
self.assertTrue(is_valid_uri('//'))
def test_invalid_scheme(self):
with self.assertRaises(ValidationError):
is_valid_uri('ftp://')
def test_is_valid_other(self):
self.assertTrue(is_valid_uri('/a/agd/'))
class BalancingTitlesTestCase(TestCase):
def test_balanced_template(self):
obj = MenuItem(title='{{ a }}')
self.assertTrue(obj.title_has_balanced_template_params())
def test_balanced_format(self):
obj = MenuItem(title='{a}')
self.assertTrue(obj.title_has_balanced_format_params())
def test_title_needs_parsing(self):
obj = MenuItem(title='{a}')
self.assertTrue(obj.title_needs_parsing())
obj2 = MenuItem(title='{{a}}')
self.assertTrue(obj2.title_needs_parsing())
def test_title_doesnt_need_parsing(self):
obj = MenuItem(title='yay, :}}}')
self.assertFalse(obj.title_needs_parsing())
def test_parsed_format(self):
obj = MenuItem(title='yay, {a!s}!')
self.assertEqual('yay, 1!', obj.parsed_title({'a': 1}))
def test_parsed_template(self):
obj = MenuItem(title='yay, {{ a }}!')
self.assertEqual('yay, 2!', obj.parsed_title({'a': 2}))
def test_parsed_nothing_to_do(self):
obj = MenuItem(title='yay, 3!')
self.assertEqual('yay, 3!', obj.parsed_title({'a': 1}))
def test_parsed_unbalanced(self):
obj = MenuItem(title='{ yay, :}}}')
self.assertEqual('{ yay, :}}}', obj.parsed_title({'a': 4}))
class MenuItemBasicTestCase(TestCase):
def test_cleaning(self):
obj = MenuItem(title='x', uri='/a/b/c/')
self.assertEqual(obj.menu_slug, '')
obj.clean()
self.assertEqual(obj.menu_slug, 'a-b-c')
def test_get_absolute_url(self):
obj = MenuItem(title='x', uri='/a/b/c/')
self.assertEqual(obj.get_absolute_url(), '/a/b/c/')
class MyMenuIsNeat(MenuItemGroup):
def get_urls(self, *a, **kw):
yield URI(title='a', path='/a/')
yield URI(title='a', path='/a/')
yield URI(title='a', path='/a/')
class MenuItemGroupTestCase(TestCase):
def test_needs_implementing(self):
with self.assertRaises(NotImplementedError):
MenuItemGroup().get_urls()
def test_implementation_name(self):
x = MyMenuIsNeat()
self.assertEqual(x.title, 'my menu is neat')
def test_calling_urls(self):
menu = MyMenuIsNeat()
menu_urls = tuple(menu.get_urls())
self.assertEqual(len(menu_urls), 3) | menuhin/tests/models.py | try:
from django.utils.unittest import TestCase
except ImportError:
from unittest import TestCase
from django.core.exceptions import ValidationError
from menuhin.models import MenuItem, is_valid_uri, MenuItemGroup, URI
class IsValidUriTestCase(TestCase):
def test_is_valid_scheme(self):
self.assertTrue(is_valid_uri('http://'))
self.assertTrue(is_valid_uri('https://'))
self.assertTrue(is_valid_uri('//'))
def test_invalid_scheme(self):
with self.assertRaises(ValidationError):
is_valid_uri('ftp://')
def test_is_valid_other(self):
self.assertTrue(is_valid_uri('/a/agd/'))
class BalancingTitlesTestCase(TestCase):
def test_balanced_template(self):
obj = MenuItem(title='{{ a }}')
self.assertTrue(obj.title_has_balanced_template_params())
def test_balanced_format(self):
obj = MenuItem(title='{a}')
self.assertTrue(obj.title_has_balanced_format_params())
def test_title_needs_parsing(self):
obj = MenuItem(title='{a}')
self.assertTrue(obj.title_needs_parsing())
obj2 = MenuItem(title='{{a}}')
self.assertTrue(obj2.title_needs_parsing())
def test_title_doesnt_need_parsing(self):
obj = MenuItem(title='yay, :}}}')
self.assertFalse(obj.title_needs_parsing())
def test_parsed_format(self):
obj = MenuItem(title='yay, {a!s}!')
self.assertEqual('yay, 1!', obj.parsed_title({'a': 1}))
def test_parsed_template(self):
obj = MenuItem(title='yay, {{ a }}!')
self.assertEqual('yay, 2!', obj.parsed_title({'a': 2}))
def test_parsed_nothing_to_do(self):
obj = MenuItem(title='yay, 3!')
self.assertEqual('yay, 3!', obj.parsed_title({'a': 1}))
def test_parsed_unbalanced(self):
obj = MenuItem(title='{ yay, :}}}')
self.assertEqual('{ yay, :}}}', obj.parsed_title({'a': 4}))
class MenuItemBasicTestCase(TestCase):
def test_cleaning(self):
obj = MenuItem(title='x', uri='/a/b/c/')
self.assertEqual(obj.menu_slug, '')
obj.clean()
self.assertEqual(obj.menu_slug, 'a-b-c')
def test_get_absolute_url(self):
obj = MenuItem(title='x', uri='/a/b/c/')
self.assertEqual(obj.get_absolute_url(), '/a/b/c/')
class MyMenuIsNeat(MenuItemGroup):
def get_urls(self, *a, **kw):
yield URI(title='a', path='/a/')
yield URI(title='a', path='/a/')
yield URI(title='a', path='/a/')
class MenuItemGroupTestCase(TestCase):
def test_needs_implementing(self):
with self.assertRaises(NotImplementedError):
MenuItemGroup().get_urls()
def test_implementation_name(self):
x = MyMenuIsNeat()
self.assertEqual(x.title, 'my menu is neat')
def test_calling_urls(self):
menu = MyMenuIsNeat()
menu_urls = tuple(menu.get_urls())
self.assertEqual(len(menu_urls), 3) | 0.510008 | 0.405213 |
from api import Base, DBEngine
from datetime import datetime
from fastapi.encoders import jsonable_encoder
from sqlalchemy import Column, ForeignKey, Integer, Float, String, Text, Date, DateTime
from sqlalchemy.orm import relationship
from sqlalchemy.types import ARRAY
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.dialects import postgresql
class CapsBrand(Base):
__tablename__ = 'cap_brands'
__tableargs__ = {
'comment': 'Storage Cap\'s brand.'
'It\'s relationshep from Cap.brand, '
'where brand is ID in this table.'
}
id = Column(Integer, index=True, primary_key=True, autoincrement=True)
name = Column(String(64))
description = Column(Text)
image = Column(String(128))
# caps = relationship("Cap", back_populates="child")
def __repr__(self):
return '<CapsBrand id: {id}, name: {name}>'.format(id=self.id, name=self.name)
def get_dict_repr(self):
return jsonable_encoder(self)
def display(self):
print(self.get_dict_repr())
class Cap(Base):
__tablename__ = 'caps'
__tableargs__ = {
'comment': 'Storage ONE CAP. Image representation, description, '
'price, sell price, date of create and update, '
'brand in name and index, size.'
}
id = Column(Integer, index=True, primary_key=True, autoincrement=True)
name = Column(String(64))
image = Column(String(128))
description = Column(Text)
price = Column(Float, index=True)
created = Column(DateTime(timezone=True), index=True, default=datetime.utcnow)
updated = Column(DateTime(timezone=True), index=True, default=datetime.utcnow)
new_price = Column(Float, index=True)
# Relastionship with CapBrand table.
# TODO(annad): https://vk.com/wall-201010673_1026
caps_brand_id = Column(Integer, ForeignKey('cap_brands.id'))
caps_brand = relationship('CapsBrand', backref='parents', lazy='joined')
# caps_brand = relationship('CapBrand', back_populates='parents')
size = Column(postgresql.ARRAY(postgresql.INTEGER))
def __repr__(self):
return "<Caps id: {id}, name: {name}>".format(id=self.id, name=self.name)
def get_dict_repr(self):
res: dict = jsonable_encoder(self)
res.pop('caps_brand', None)
return res
def display(self):
print(self.get_dict_repr())
class User(Base):
__tablename__ = 'User'
__tableargs = {
'comment': 'The table stores names, e-mails, vk-id, avatar and tokens of users'
}
## TODO(annad): More fields? datatime creating? or...?
id = Column(Integer, index=True, primary_key=True, autoincrement=True)
name = Column(String(64))
email = Column(String(254), index=True)
vk_id = Column(Integer, index=True)
avatar = Column(String(256))
token = Column(String(256)) ## we are don't storage token, because
## it check in vk_app;
def __repr__(self):
return f'<VKUser id: {self.id}, vk-id: {self.vk_id}>'
def get_dict_repr(self):
return jsonable_encoder(self)
def display(self):
print(self.get_dict_repr())
## NOTE(annad): We must check how work with this.
# Base.metadata.create_all(DBEngine, checkfirst=True) | api/models.py | from api import Base, DBEngine
from datetime import datetime
from fastapi.encoders import jsonable_encoder
from sqlalchemy import Column, ForeignKey, Integer, Float, String, Text, Date, DateTime
from sqlalchemy.orm import relationship
from sqlalchemy.types import ARRAY
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.dialects import postgresql
class CapsBrand(Base):
__tablename__ = 'cap_brands'
__tableargs__ = {
'comment': 'Storage Cap\'s brand.'
'It\'s relationshep from Cap.brand, '
'where brand is ID in this table.'
}
id = Column(Integer, index=True, primary_key=True, autoincrement=True)
name = Column(String(64))
description = Column(Text)
image = Column(String(128))
# caps = relationship("Cap", back_populates="child")
def __repr__(self):
return '<CapsBrand id: {id}, name: {name}>'.format(id=self.id, name=self.name)
def get_dict_repr(self):
return jsonable_encoder(self)
def display(self):
print(self.get_dict_repr())
class Cap(Base):
__tablename__ = 'caps'
__tableargs__ = {
'comment': 'Storage ONE CAP. Image representation, description, '
'price, sell price, date of create and update, '
'brand in name and index, size.'
}
id = Column(Integer, index=True, primary_key=True, autoincrement=True)
name = Column(String(64))
image = Column(String(128))
description = Column(Text)
price = Column(Float, index=True)
created = Column(DateTime(timezone=True), index=True, default=datetime.utcnow)
updated = Column(DateTime(timezone=True), index=True, default=datetime.utcnow)
new_price = Column(Float, index=True)
# Relastionship with CapBrand table.
# TODO(annad): https://vk.com/wall-201010673_1026
caps_brand_id = Column(Integer, ForeignKey('cap_brands.id'))
caps_brand = relationship('CapsBrand', backref='parents', lazy='joined')
# caps_brand = relationship('CapBrand', back_populates='parents')
size = Column(postgresql.ARRAY(postgresql.INTEGER))
def __repr__(self):
return "<Caps id: {id}, name: {name}>".format(id=self.id, name=self.name)
def get_dict_repr(self):
res: dict = jsonable_encoder(self)
res.pop('caps_brand', None)
return res
def display(self):
print(self.get_dict_repr())
class User(Base):
__tablename__ = 'User'
__tableargs = {
'comment': 'The table stores names, e-mails, vk-id, avatar and tokens of users'
}
## TODO(annad): More fields? datatime creating? or...?
id = Column(Integer, index=True, primary_key=True, autoincrement=True)
name = Column(String(64))
email = Column(String(254), index=True)
vk_id = Column(Integer, index=True)
avatar = Column(String(256))
token = Column(String(256)) ## we are don't storage token, because
## it check in vk_app;
def __repr__(self):
return f'<VKUser id: {self.id}, vk-id: {self.vk_id}>'
def get_dict_repr(self):
return jsonable_encoder(self)
def display(self):
print(self.get_dict_repr())
## NOTE(annad): We must check how work with this.
# Base.metadata.create_all(DBEngine, checkfirst=True) | 0.461988 | 0.184363 |
import sys
import click
import os
import glob
from flask import Flask, Markup, Response, render_template, render_template_string, send_from_directory, current_app, safe_join
from flask_flatpages import FlatPages, pygmented_markdown, pygments_style_defs
from flask_frozen import Freezer
app = Flask(__name__)
app.config.from_object('settings')
pages = FlatPages(app)
freezer = Freezer(app=app, log_url_for=True, with_static_files=True)
def get_pages(**kwargs):
"""
Convenience function to get one or more pages by one or more of its
metadata items.
"""
pass
def get_pages_by_slug(slug):
for p in pages:
if p.meta.get('slug', None) == slug:
return p
def get_pages_by_tags(*args):
tag_set = set(args)
pages_ = (p for p in pages if tag_set & set(p.meta.get('tags','')))
return sorted(pages_, reverse=True, key=lambda p: p.meta['date'])
def get_pages_by_missing_tags(*args):
tag_set = set(args)
pages_ = (p for p in pages if tag_set - set(p.meta.get('tags','')))
return sorted(pages_, reverse=True, key=lambda p: p.meta['date'])
def get_pages_sorted(sort_by='date', reverse=True, page_type='article'):
pages_ = (p for p in pages if p.meta.get('status','') == 'published' and p.meta.get('type','') == page_type)
return sorted(pages_, reverse=reverse, key=lambda p: p.meta[sort_by])
def get_related_pages(page):
"""
Get related pages by using overlapping tags.
"""
pass
@app.route('/')
def index():
index = get_pages_by_slug('index')
articles = get_pages_by_tags('geo')
other_articles = get_pages_by_tags('other')
return render_template('index.html', **locals())
@app.route('/articles/<slug>/')
def article(slug):
article = get_pages_by_slug(slug)
article_html = article.html.replace("%%THANKS%%", '<p class="thanks">Thanks for reading! Get in touch via <a href="https://twitter.com/kokoalberti">@kokoalberti</a> for any questions or comments. I also post new articles there when they are first published.</p>')
return render_template('article.html', **locals())
@app.route('/articles/<slug>/<path:filename>')
def article_static(slug, filename):
article = get_pages_by_slug(slug)
directory = os.path.dirname(safe_join(current_app.root_path, current_app.config.get("FLATPAGES_ROOT"), article.path))
return send_from_directory(directory, filename)
@app.route('/pages/<slug>/')
def page(slug):
page = get_pages_by_slug(slug)
return render_template('page.html', **locals())
@app.route('/tag/<tag>/')
def tag(tag):
articles = get_pages_by_tags(tag)
article = ''
return render_template('tag.html', **locals())
@app.route('/sitemap.xml')
def sitemap():
server_name = current_app.config.get("SITEMAP_SERVER_NAME")
articles = get_pages_sorted()
pages = get_pages_sorted(page_type='page')
index = get_pages_by_slug('index')
tags = set()
for article in articles:
for tag in article.meta.get("tags",[]):
tags.add(tag)
return Response(render_template('sitemap.xml', **locals()), mimetype='application/xml')
@app.route('/robots.txt')
def robots():
server_name = current_app.config.get("SITEMAP_SERVER_NAME")
return Response(render_template('robots.txt', **locals()), mimetype='text/plain')
@app.route('/google0e9a29b6ad0a512a.html')
def google_verification():
return render_template('google0e9a29b6ad0a512a.html')
@freezer.register_generator
def other_static_files():
"""
Register the URLs for the robots and sitemap routes to frozen flask
"""
yield 'robots', {}
yield 'sitemap', {}
yield 'google_verification', {}
@freezer.register_generator
def article_static_files():
"""
Register the URLS for article's static files (PNG images only for now) to
frozen flask.
"""
static_patterns = ("*.png", "*.jpg", "*.zip")
for p in pages:
directory = os.path.dirname(safe_join(current_app.root_path, current_app.config.get("FLATPAGES_ROOT"), p.path))
files = []
for pattern in static_patterns:
files.extend(glob.glob(os.path.join(directory, "**", pattern), recursive=True))
for static_file in files:
filename = static_file.replace(directory+'/', "")
yield 'article_static', {'slug':p.meta.get('slug'), 'filename':filename}
@app.cli.command()
def freeze():
print("Freezing...")
freezer.freeze() | application.py | import sys
import click
import os
import glob
from flask import Flask, Markup, Response, render_template, render_template_string, send_from_directory, current_app, safe_join
from flask_flatpages import FlatPages, pygmented_markdown, pygments_style_defs
from flask_frozen import Freezer
app = Flask(__name__)
app.config.from_object('settings')
pages = FlatPages(app)
freezer = Freezer(app=app, log_url_for=True, with_static_files=True)
def get_pages(**kwargs):
"""
Convenience function to get one or more pages by one or more of its
metadata items.
"""
pass
def get_pages_by_slug(slug):
for p in pages:
if p.meta.get('slug', None) == slug:
return p
def get_pages_by_tags(*args):
tag_set = set(args)
pages_ = (p for p in pages if tag_set & set(p.meta.get('tags','')))
return sorted(pages_, reverse=True, key=lambda p: p.meta['date'])
def get_pages_by_missing_tags(*args):
tag_set = set(args)
pages_ = (p for p in pages if tag_set - set(p.meta.get('tags','')))
return sorted(pages_, reverse=True, key=lambda p: p.meta['date'])
def get_pages_sorted(sort_by='date', reverse=True, page_type='article'):
pages_ = (p for p in pages if p.meta.get('status','') == 'published' and p.meta.get('type','') == page_type)
return sorted(pages_, reverse=reverse, key=lambda p: p.meta[sort_by])
def get_related_pages(page):
"""
Get related pages by using overlapping tags.
"""
pass
@app.route('/')
def index():
index = get_pages_by_slug('index')
articles = get_pages_by_tags('geo')
other_articles = get_pages_by_tags('other')
return render_template('index.html', **locals())
@app.route('/articles/<slug>/')
def article(slug):
article = get_pages_by_slug(slug)
article_html = article.html.replace("%%THANKS%%", '<p class="thanks">Thanks for reading! Get in touch via <a href="https://twitter.com/kokoalberti">@kokoalberti</a> for any questions or comments. I also post new articles there when they are first published.</p>')
return render_template('article.html', **locals())
@app.route('/articles/<slug>/<path:filename>')
def article_static(slug, filename):
article = get_pages_by_slug(slug)
directory = os.path.dirname(safe_join(current_app.root_path, current_app.config.get("FLATPAGES_ROOT"), article.path))
return send_from_directory(directory, filename)
@app.route('/pages/<slug>/')
def page(slug):
page = get_pages_by_slug(slug)
return render_template('page.html', **locals())
@app.route('/tag/<tag>/')
def tag(tag):
articles = get_pages_by_tags(tag)
article = ''
return render_template('tag.html', **locals())
@app.route('/sitemap.xml')
def sitemap():
server_name = current_app.config.get("SITEMAP_SERVER_NAME")
articles = get_pages_sorted()
pages = get_pages_sorted(page_type='page')
index = get_pages_by_slug('index')
tags = set()
for article in articles:
for tag in article.meta.get("tags",[]):
tags.add(tag)
return Response(render_template('sitemap.xml', **locals()), mimetype='application/xml')
@app.route('/robots.txt')
def robots():
server_name = current_app.config.get("SITEMAP_SERVER_NAME")
return Response(render_template('robots.txt', **locals()), mimetype='text/plain')
@app.route('/google0e9a29b6ad0a512a.html')
def google_verification():
return render_template('google0e9a29b6ad0a512a.html')
@freezer.register_generator
def other_static_files():
"""
Register the URLs for the robots and sitemap routes to frozen flask
"""
yield 'robots', {}
yield 'sitemap', {}
yield 'google_verification', {}
@freezer.register_generator
def article_static_files():
"""
Register the URLS for article's static files (PNG images only for now) to
frozen flask.
"""
static_patterns = ("*.png", "*.jpg", "*.zip")
for p in pages:
directory = os.path.dirname(safe_join(current_app.root_path, current_app.config.get("FLATPAGES_ROOT"), p.path))
files = []
for pattern in static_patterns:
files.extend(glob.glob(os.path.join(directory, "**", pattern), recursive=True))
for static_file in files:
filename = static_file.replace(directory+'/', "")
yield 'article_static', {'slug':p.meta.get('slug'), 'filename':filename}
@app.cli.command()
def freeze():
print("Freezing...")
freezer.freeze() | 0.339061 | 0.076304 |
from nethack_raph.Findable import *
from nethack_raph.glossaries import MONSTERS_GLOSSARY, ITEMS_TO_THROW, LAUNCHERS, MISSILES
class Item(Findable):
CURSED = 0
UNCURSED = 1
BLESSED = 2
UNKNOWNBUC = 3
bad_effects = ['mimic', 'poisonous', 'hallucination', 'stun', 'die', 'acidic', 'lycanthropy', 'slime',
'petrify', 'aggravate']
ambivalent_effects = ['speed toggle'] # can be either good or bad, depending on the circumstances
good_effects = ['cure stoning', 'reduce confusion', 'reduce stunning',
'heal', 'cold resistance', 'disintegration resistance', 'fire resistance',
'poison resistance', 'shock resistance', 'sleep resistance', 'gain level',
'teleport control', 'gain telepathy', 'increase intelligence', 'polymorphing',
'increase strength', 'increase energy', 'teleportitis', 'invisibility'
]
item_glyp_ranges = { # TODO: refactor
'corpse': (1144, 1524),
'weapon': (1907, 1976),
'armor': (1977, 2055),
'ring': (2056, 2083),
'amulet': (2084, 2094),
'tool': (2095, 2144),
'food': (2145, 2177),
'potion': (2178, 2203),
'scroll': (2204, 2245),
'spell_book': (2246, 2288),
'wand': (2289, 2315),
'gold_piece': (2316, 2316),
'gem': (2317, 2352),
}
def __init__(self, name, char, glyph, kernel):
Findable.__init__(self)
self.name = name
self.qty = 1
self.enchants = 0
self.buc = Item.UNKNOWNBUC
self.char = char
self.glyph = glyph
self.kernel = kernel
self.corpse = False
self.turn_of_death = -1000
self.is_food = self.check_if_food()
self.item_type = None
for k, v in Item.item_glyp_ranges.items():
if v[0] <= self.glyph <= v[1]:
self.item_type = k
break
if glyph in ITEMS_TO_THROW:
self.item_type = 'projective'
if glyph in LAUNCHERS:
self.item_type = 'missile'
if glyph in MISSILES:
self.item_type = 'missile'
def __str__(self):
return "?:%s, ch:%s, g:%s" % tuple(map(str, (self.name, self.char, self.glyph)))
def identified(self, id):
self.name = id
def check_if_food(self):
if self.char != '%': return False
if 1144 <= self.glyph <= 1524: # corpse
self.corpse = MONSTERS_GLOSSARY[self.glyph - 1144]['name']
monster_corpse = MONSTERS_GLOSSARY[self.glyph - 1144]['corpse']
if monster_corpse['cannibal'] and self.kernel().hero.race in (None, monster_corpse['cannibal']):
# cannibalism. If we doesn't know the race, it is cannibalism for any monster that can be cannibalised
self.kernel().log("%s is not an edible corpse." % self)
return False
if any([key in monster_corpse for key in Item.bad_effects + Item.ambivalent_effects]):
self.kernel().log("%s is not an edible corpse." % self)
return False
else:
self.kernel().log("%s is an edible corpse." % self)
return True
elif 2152 <= self.glyph <= 2155: # glob (acidic)
self.kernel().log("%s is glob (inedible)" % self)
return False
else:
self.kernel().log("%s is food" % self)
return True
def is_tainted(self):
tainted = bool(self.corpse) and self.kernel().hero.turns - self.turn_of_death >= 30
return tainted | nethack_raph/Item.py | from nethack_raph.Findable import *
from nethack_raph.glossaries import MONSTERS_GLOSSARY, ITEMS_TO_THROW, LAUNCHERS, MISSILES
class Item(Findable):
CURSED = 0
UNCURSED = 1
BLESSED = 2
UNKNOWNBUC = 3
bad_effects = ['mimic', 'poisonous', 'hallucination', 'stun', 'die', 'acidic', 'lycanthropy', 'slime',
'petrify', 'aggravate']
ambivalent_effects = ['speed toggle'] # can be either good or bad, depending on the circumstances
good_effects = ['cure stoning', 'reduce confusion', 'reduce stunning',
'heal', 'cold resistance', 'disintegration resistance', 'fire resistance',
'poison resistance', 'shock resistance', 'sleep resistance', 'gain level',
'teleport control', 'gain telepathy', 'increase intelligence', 'polymorphing',
'increase strength', 'increase energy', 'teleportitis', 'invisibility'
]
item_glyp_ranges = { # TODO: refactor
'corpse': (1144, 1524),
'weapon': (1907, 1976),
'armor': (1977, 2055),
'ring': (2056, 2083),
'amulet': (2084, 2094),
'tool': (2095, 2144),
'food': (2145, 2177),
'potion': (2178, 2203),
'scroll': (2204, 2245),
'spell_book': (2246, 2288),
'wand': (2289, 2315),
'gold_piece': (2316, 2316),
'gem': (2317, 2352),
}
def __init__(self, name, char, glyph, kernel):
Findable.__init__(self)
self.name = name
self.qty = 1
self.enchants = 0
self.buc = Item.UNKNOWNBUC
self.char = char
self.glyph = glyph
self.kernel = kernel
self.corpse = False
self.turn_of_death = -1000
self.is_food = self.check_if_food()
self.item_type = None
for k, v in Item.item_glyp_ranges.items():
if v[0] <= self.glyph <= v[1]:
self.item_type = k
break
if glyph in ITEMS_TO_THROW:
self.item_type = 'projective'
if glyph in LAUNCHERS:
self.item_type = 'missile'
if glyph in MISSILES:
self.item_type = 'missile'
def __str__(self):
return "?:%s, ch:%s, g:%s" % tuple(map(str, (self.name, self.char, self.glyph)))
def identified(self, id):
self.name = id
def check_if_food(self):
if self.char != '%': return False
if 1144 <= self.glyph <= 1524: # corpse
self.corpse = MONSTERS_GLOSSARY[self.glyph - 1144]['name']
monster_corpse = MONSTERS_GLOSSARY[self.glyph - 1144]['corpse']
if monster_corpse['cannibal'] and self.kernel().hero.race in (None, monster_corpse['cannibal']):
# cannibalism. If we doesn't know the race, it is cannibalism for any monster that can be cannibalised
self.kernel().log("%s is not an edible corpse." % self)
return False
if any([key in monster_corpse for key in Item.bad_effects + Item.ambivalent_effects]):
self.kernel().log("%s is not an edible corpse." % self)
return False
else:
self.kernel().log("%s is an edible corpse." % self)
return True
elif 2152 <= self.glyph <= 2155: # glob (acidic)
self.kernel().log("%s is glob (inedible)" % self)
return False
else:
self.kernel().log("%s is food" % self)
return True
def is_tainted(self):
tainted = bool(self.corpse) and self.kernel().hero.turns - self.turn_of_death >= 30
return tainted | 0.369429 | 0.333408 |
import numpy as np
import cvxpy as cp
from copy import deepcopy
from mvmm.multi_view.block_diag.utils import \
get_guess, get_lin_coef, get_row_col_sum_mat
def get_cp_problem_un_lap(Gamma,
eig_var,
epsilon,
B,
alpha,
eta=None,
weights=None,
init_val=None,
obj_mult=1):
"""
Sets up the bd_weights_ update for the unnormalized Laplacian using cvxpy.
min_D - sum_{k1, k2} Gamma_{k1, k2} log(epsilon + D_{k1, k2}) +
alpha * <D, M(eig_var, weights) >
s.t. sum_{k1, k2} D_{k1, k1} = 1 - np.product(D.shape) * epsilon
Optional constraint: deg(A_bp(D)) >= eta
Parameters
----------
Gamma:
The coefficients of the log terms.
eig_var:
Current value of the eigenvector variable.
epsilon:
epsilon
B:
The number of eigenvalues to penalize.
alpha:
The spectral penalty weight.
eta: None, float
(Optional) An optional lower bound on the degrees.
weights: None, array-like, (B, )
Weights to put on the eigenvalues.
init_val:
Guess for the initial value. Note the ECOS solver does not currently
accept inital guesses.
obj_mult: float
Multiply the objective function by a constant. This does not change the problem, but can help some solvers find a solution.
"""
shape = Gamma.shape
var = cp.Variable(shape=np.product(shape), pos=True)
epsilon_tilde = 1 - epsilon * np.product(shape)
log_coef = deepcopy(Gamma).reshape(-1)
lin_coef = alpha * get_lin_coef(eig_var, shape,
weights=weights).reshape(-1)
# set initial value
if type(init_val) == str and init_val == 'guess':
guess = get_guess(log_coef, lin_coef, epsilon, epsilon_tilde)
var.value = guess.reshape(-1)
elif init_val is not None:
var.value = init_val.reshape(-1)
# setup cvxpy problem
objective = -log_coef.T @ cp.log(epsilon + var) + lin_coef.T @ var
if obj_mult is not None:
objective = obj_mult * objective
constraints = [cp.sum(var) == epsilon_tilde]
if eta is not None:
S = get_row_col_sum_mat(shape)
S_rhs = eta * np.ones(sum(shape))
constraints.append(S @ var >= S_rhs)
return var, objective, constraints | mvmm/multi_view/block_diag/sub_prob_cp_un_lap.py | import numpy as np
import cvxpy as cp
from copy import deepcopy
from mvmm.multi_view.block_diag.utils import \
get_guess, get_lin_coef, get_row_col_sum_mat
def get_cp_problem_un_lap(Gamma,
eig_var,
epsilon,
B,
alpha,
eta=None,
weights=None,
init_val=None,
obj_mult=1):
"""
Sets up the bd_weights_ update for the unnormalized Laplacian using cvxpy.
min_D - sum_{k1, k2} Gamma_{k1, k2} log(epsilon + D_{k1, k2}) +
alpha * <D, M(eig_var, weights) >
s.t. sum_{k1, k2} D_{k1, k1} = 1 - np.product(D.shape) * epsilon
Optional constraint: deg(A_bp(D)) >= eta
Parameters
----------
Gamma:
The coefficients of the log terms.
eig_var:
Current value of the eigenvector variable.
epsilon:
epsilon
B:
The number of eigenvalues to penalize.
alpha:
The spectral penalty weight.
eta: None, float
(Optional) An optional lower bound on the degrees.
weights: None, array-like, (B, )
Weights to put on the eigenvalues.
init_val:
Guess for the initial value. Note the ECOS solver does not currently
accept inital guesses.
obj_mult: float
Multiply the objective function by a constant. This does not change the problem, but can help some solvers find a solution.
"""
shape = Gamma.shape
var = cp.Variable(shape=np.product(shape), pos=True)
epsilon_tilde = 1 - epsilon * np.product(shape)
log_coef = deepcopy(Gamma).reshape(-1)
lin_coef = alpha * get_lin_coef(eig_var, shape,
weights=weights).reshape(-1)
# set initial value
if type(init_val) == str and init_val == 'guess':
guess = get_guess(log_coef, lin_coef, epsilon, epsilon_tilde)
var.value = guess.reshape(-1)
elif init_val is not None:
var.value = init_val.reshape(-1)
# setup cvxpy problem
objective = -log_coef.T @ cp.log(epsilon + var) + lin_coef.T @ var
if obj_mult is not None:
objective = obj_mult * objective
constraints = [cp.sum(var) == epsilon_tilde]
if eta is not None:
S = get_row_col_sum_mat(shape)
S_rhs = eta * np.ones(sum(shape))
constraints.append(S @ var >= S_rhs)
return var, objective, constraints | 0.785966 | 0.50354 |
import os
import argparse
import numpy as np
import math as ma
import music21 as m21
THREE_DOTTED_BREVE = 15
THREE_DOTTED_32ND = 0.21875
MIN_VELOCITY = 0
MAX_VELOCITY = 128
MIN_TEMPO = 24
MAX_TEMPO = 160
MAX_PITCH = 128
def load(datapath, sample_freq=4, piano_range=(33, 93), transpose_range=10, stretching_range=10):
text = ""
vocab = set()
if os.path.isfile(datapath):
# Path is an individual midi file
file_extension = os.path.splitext(datapath)[1]
if file_extension == ".midi" or file_extension == ".mid":
text = parse_midi(datapath, sample_freq, piano_range, transpose_range, stretching_range)
vocab = set(text.split(" "))
else:
# Read every file in the given directory
for file in os.listdir(datapath):
file_path = os.path.join(datapath, file)
file_extension = os.path.splitext(file_path)[1]
# Check if it is not a directory and if it has either .midi or .mid extentions
if os.path.isfile(file_path) and (file_extension == ".midi" or file_extension == ".mid"):
encoded_midi = parse_midi(file_path, sample_freq, piano_range, transpose_range, stretching_range)
if len(encoded_midi) > 0:
words = set(encoded_midi.split(" "))
vocab = vocab | words
text += encoded_midi + " "
# Remove last space
text = text[:-1]
return text, vocab
def parse_midi(file_path, sample_freq, piano_range, transpose_range, stretching_range):
# Split datapath into dir and filename
midi_dir = os.path.dirname(file_path)
midi_name = os.path.basename(file_path).split(".")[0]
# If txt version of the midi already exists, load data from it
midi_txt_name = os.path.join(midi_dir, midi_name + ".txt")
if(os.path.isfile(midi_txt_name)):
midi_fp = open(midi_txt_name, "r")
encoded_midi = midi_fp.read()
else:
# Create a music21 stream and open the midi file
midi = m21.midi.MidiFile()
midi.open(file_path)
midi.read()
midi.close()
# Translate midi to stream of notes and chords
encoded_midi = midi2encoding(midi, sample_freq, piano_range, transpose_range, stretching_range)
if len(encoded_midi) > 0:
midi_fp = open(midi_txt_name, "w+")
midi_fp.write(encoded_midi)
midi_fp.flush()
midi_fp.close()
return encoded_midi
def midi2encoding(midi, sample_freq, piano_range, transpose_range, stretching_range):
try:
midi_stream = m21.midi.translate.midiFileToStream(midi)
except:
return []
# Get piano roll from midi stream
piano_roll = midi2piano_roll(midi_stream, sample_freq, piano_range, transpose_range, stretching_range)
# Get encoded midi from piano roll
encoded_midi = piano_roll2encoding(piano_roll)
return " ".join(encoded_midi)
def piano_roll2encoding(piano_roll):
# Transform piano roll into a list of notes in string format
final_encoding = {}
perform_i = 0
for version in piano_roll:
lastTempo = -1
lastVelocity = -1
lastDuration = -1.0
version_encoding = []
for i in range(len(version)):
# Time events are stored at the last row
tempo = version[i,-1][0]
if tempo != 0 and tempo != lastTempo:
version_encoding.append("t_" + str(int(tempo)))
lastTempo = tempo
# Process current time step of the piano_roll
for j in range(len(version[i]) - 1):
duration = version[i,j][0]
velocity = int(version[i,j][1])
if velocity != 0 and velocity != lastVelocity:
version_encoding.append("v_" + str(velocity))
lastVelocity = velocity
if duration != 0 and duration != lastDuration:
duration_tuple = m21.duration.durationTupleFromQuarterLength(duration)
version_encoding.append("d_" + duration_tuple.type + "_" + str(duration_tuple.dots))
lastDuration = duration
if duration != 0 and velocity != 0:
version_encoding.append("n_" + str(j))
# End of time step
if len(version_encoding) > 0 and version_encoding[-1][0] == "w":
# Increase wait by one
version_encoding[-1] = "w_" + str(int(version_encoding[-1].split("_")[1]) + 1)
else:
version_encoding.append("w_1")
# End of piece
version_encoding.append("\n")
# Check if this version of the MIDI is already added
version_encoding_str = " ".join(version_encoding)
if version_encoding_str not in final_encoding:
final_encoding[version_encoding_str] = perform_i
perform_i += 1
return final_encoding.keys()
def write(encoded_midi, path):
# Base class checks if output path exists
midi = encoding2midi(encoded_midi)
midi.open(path, "wb")
midi.write()
midi.close()
def encoding2midi(note_encoding, ts_duration=0.25):
notes = []
velocity = 100
duration = "16th"
dots = 0
ts = 0
for note in note_encoding.split(" "):
if len(note) == 0:
continue
elif note[0] == "w":
wait_count = int(note.split("_")[1])
ts += wait_count
elif note[0] == "n":
pitch = int(note.split("_")[1])
note = m21.note.Note(pitch)
note.duration = m21.duration.Duration(type=duration, dots=dots)
note.offset = ts * ts_duration
note.volume.velocity = velocity
notes.append(note)
elif note[0] == "d":
duration = note.split("_")[1]
dots = int(note.split("_")[2])
elif note[0] == "v":
velocity = int(note.split("_")[1])
elif note[0] == "t":
tempo = int(note.split("_")[1])
if tempo > 0:
mark = m21.tempo.MetronomeMark(number=tempo)
mark.offset = ts * ts_duration
notes.append(mark)
piano = m21.instrument.fromString("Piano")
notes.insert(0, piano)
piano_stream = m21.stream.Stream(notes)
main_stream = m21.stream.Stream([piano_stream])
return m21.midi.translate.streamToMidiFile(main_stream)
def midi_parse_notes(midi_stream, sample_freq):
note_filter = m21.stream.filters.ClassFilter('Note')
note_events = []
for note in midi_stream.recurse().addFilter(note_filter):
pitch = note.pitch.midi
duration = note.duration.quarterLength
velocity = note.volume.velocity
offset = ma.floor(note.offset * sample_freq)
note_events.append((pitch, duration, velocity, offset))
return note_events
def midi_parse_chords(midi_stream, sample_freq):
chord_filter = m21.stream.filters.ClassFilter('Chord')
note_events = []
for chord in midi_stream.recurse().addFilter(chord_filter):
pitches_in_chord = chord.pitches
for pitch in pitches_in_chord:
pitch = pitch.midi
duration = chord.duration.quarterLength
velocity = chord.volume.velocity
offset = ma.floor(chord.offset * sample_freq)
note_events.append((pitch, duration, velocity, offset))
return note_events
def midi_parse_metronome(midi_stream, sample_freq):
metronome_filter = m21.stream.filters.ClassFilter('MetronomeMark')
time_events = []
for metro in midi_stream.recurse().addFilter(metronome_filter):
time = int(metro.number)
offset = ma.floor(metro.offset * sample_freq)
time_events.append((time, offset))
return time_events
def midi2notes(midi_stream, sample_freq, transpose_range):
notes = []
notes += midi_parse_notes(midi_stream, sample_freq)
notes += midi_parse_chords(midi_stream, sample_freq)
# Transpose the notes to all the keys in transpose_range
return transpose_notes(notes, transpose_range)
def midi2piano_roll(midi_stream, sample_freq, piano_range, transpose_range, stretching_range):
# Calculate the amount of time steps in the piano roll
time_steps = ma.floor(midi_stream.duration.quarterLength * sample_freq) + 1
# Parse the midi file into a list of notes (pitch, duration, velocity, offset)
transpositions = midi2notes(midi_stream, sample_freq, transpose_range)
time_events = midi_parse_metronome(midi_stream, sample_freq)
time_streches = strech_time(time_events, stretching_range)
return notes2piano_roll(transpositions, time_streches, time_steps, piano_range)
def notes2piano_roll(transpositions, time_streches, time_steps, piano_range):
performances = []
min_pitch, max_pitch = piano_range
for t_ix in range(len(transpositions)):
for s_ix in range(len(time_streches)):
# Create piano roll with calcualted size.
# Add one dimension to very entry to store velocity and duration.
piano_roll = np.zeros((time_steps, MAX_PITCH + 1, 2))
for note in transpositions[t_ix]:
pitch, duration, velocity, offset = note
if duration == 0.0:
continue
# Force notes to be inside the specified piano_range
pitch = clamp_pitch(pitch, max_pitch, min_pitch)
piano_roll[offset, pitch][0] = clamp_duration(duration)
piano_roll[offset, pitch][1] = discretize_value(velocity, bins=32, range=(MIN_VELOCITY, MAX_VELOCITY))
for time_event in time_streches[s_ix]:
time, offset = time_event
piano_roll[offset, -1][0] = discretize_value(time, bins=100, range=(MIN_TEMPO, MAX_TEMPO))
performances.append(piano_roll)
return performances
def transpose_notes(notes, transpose_range):
transpositions = []
# Modulate the piano_roll for other keys
first_key = -ma.floor(transpose_range/2)
last_key = ma.ceil(transpose_range/2)
for key in range(first_key, last_key):
notes_in_key = []
for n in notes:
pitch, duration, velocity, offset = n
t_pitch = pitch + key
notes_in_key.append((t_pitch, duration, velocity, offset))
transpositions.append(notes_in_key)
return transpositions
def strech_time(time_events, stretching_range):
streches = []
# Modulate the piano_roll for other keys
slower_time = -ma.floor(stretching_range/2)
faster_time = ma.ceil(stretching_range/2)
# Modulate the piano_roll for other keys
for t_strech in range(slower_time, faster_time):
time_events_in_strech = []
for t_ev in time_events:
time, offset = t_ev
s_time = time + 0.05 * t_strech * MAX_TEMPO
time_events_in_strech.append((s_time, offset))
streches.append(time_events_in_strech)
return streches
def discretize_value(val, bins, range):
min_val, max_val = range
val = int(max(min_val, val))
val = int(min(val, max_val))
bin_size = (max_val/bins)
return ma.floor(val/bin_size) * bin_size
def clamp_pitch(pitch, max, min):
while pitch < min:
pitch += 12
while pitch >= max:
pitch -= 12
return pitch
def clamp_duration(duration, max=THREE_DOTTED_BREVE, min=THREE_DOTTED_32ND):
# Max duration is 3-dotted breve
if duration > max:
duration = max
# min duration is 3-dotted breve
if duration < min:
duration = min
duration_tuple = m21.duration.durationTupleFromQuarterLength(duration)
if duration_tuple.type == "inexpressible":
duration_clossest_type = m21.duration.quarterLengthToClosestType(duration)[0]
duration = m21.duration.typeToDuration[duration_clossest_type]
return duration
if __name__ == "__main__":
# Parse arguments
parser = argparse.ArgumentParser(description='midi_encoder.py')
parser.add_argument('--path', type=str, required=True, help="Path to midi data.")
parser.add_argument('--transp', type=int, default=1, help="Transpose range.")
parser.add_argument('--strech', type=int, default=1, help="Time stretching range.")
opt = parser.parse_args()
# Load data and encoded it
text, vocab = load(opt.path, transpose_range=opt.transp, stretching_range=opt.strech)
print(text)
# Write all data to midi file
write(text, "encoded.mid") | workspace/baseline/midi_encoder.py | import os
import argparse
import numpy as np
import math as ma
import music21 as m21
THREE_DOTTED_BREVE = 15
THREE_DOTTED_32ND = 0.21875
MIN_VELOCITY = 0
MAX_VELOCITY = 128
MIN_TEMPO = 24
MAX_TEMPO = 160
MAX_PITCH = 128
def load(datapath, sample_freq=4, piano_range=(33, 93), transpose_range=10, stretching_range=10):
text = ""
vocab = set()
if os.path.isfile(datapath):
# Path is an individual midi file
file_extension = os.path.splitext(datapath)[1]
if file_extension == ".midi" or file_extension == ".mid":
text = parse_midi(datapath, sample_freq, piano_range, transpose_range, stretching_range)
vocab = set(text.split(" "))
else:
# Read every file in the given directory
for file in os.listdir(datapath):
file_path = os.path.join(datapath, file)
file_extension = os.path.splitext(file_path)[1]
# Check if it is not a directory and if it has either .midi or .mid extentions
if os.path.isfile(file_path) and (file_extension == ".midi" or file_extension == ".mid"):
encoded_midi = parse_midi(file_path, sample_freq, piano_range, transpose_range, stretching_range)
if len(encoded_midi) > 0:
words = set(encoded_midi.split(" "))
vocab = vocab | words
text += encoded_midi + " "
# Remove last space
text = text[:-1]
return text, vocab
def parse_midi(file_path, sample_freq, piano_range, transpose_range, stretching_range):
# Split datapath into dir and filename
midi_dir = os.path.dirname(file_path)
midi_name = os.path.basename(file_path).split(".")[0]
# If txt version of the midi already exists, load data from it
midi_txt_name = os.path.join(midi_dir, midi_name + ".txt")
if(os.path.isfile(midi_txt_name)):
midi_fp = open(midi_txt_name, "r")
encoded_midi = midi_fp.read()
else:
# Create a music21 stream and open the midi file
midi = m21.midi.MidiFile()
midi.open(file_path)
midi.read()
midi.close()
# Translate midi to stream of notes and chords
encoded_midi = midi2encoding(midi, sample_freq, piano_range, transpose_range, stretching_range)
if len(encoded_midi) > 0:
midi_fp = open(midi_txt_name, "w+")
midi_fp.write(encoded_midi)
midi_fp.flush()
midi_fp.close()
return encoded_midi
def midi2encoding(midi, sample_freq, piano_range, transpose_range, stretching_range):
try:
midi_stream = m21.midi.translate.midiFileToStream(midi)
except:
return []
# Get piano roll from midi stream
piano_roll = midi2piano_roll(midi_stream, sample_freq, piano_range, transpose_range, stretching_range)
# Get encoded midi from piano roll
encoded_midi = piano_roll2encoding(piano_roll)
return " ".join(encoded_midi)
def piano_roll2encoding(piano_roll):
# Transform piano roll into a list of notes in string format
final_encoding = {}
perform_i = 0
for version in piano_roll:
lastTempo = -1
lastVelocity = -1
lastDuration = -1.0
version_encoding = []
for i in range(len(version)):
# Time events are stored at the last row
tempo = version[i,-1][0]
if tempo != 0 and tempo != lastTempo:
version_encoding.append("t_" + str(int(tempo)))
lastTempo = tempo
# Process current time step of the piano_roll
for j in range(len(version[i]) - 1):
duration = version[i,j][0]
velocity = int(version[i,j][1])
if velocity != 0 and velocity != lastVelocity:
version_encoding.append("v_" + str(velocity))
lastVelocity = velocity
if duration != 0 and duration != lastDuration:
duration_tuple = m21.duration.durationTupleFromQuarterLength(duration)
version_encoding.append("d_" + duration_tuple.type + "_" + str(duration_tuple.dots))
lastDuration = duration
if duration != 0 and velocity != 0:
version_encoding.append("n_" + str(j))
# End of time step
if len(version_encoding) > 0 and version_encoding[-1][0] == "w":
# Increase wait by one
version_encoding[-1] = "w_" + str(int(version_encoding[-1].split("_")[1]) + 1)
else:
version_encoding.append("w_1")
# End of piece
version_encoding.append("\n")
# Check if this version of the MIDI is already added
version_encoding_str = " ".join(version_encoding)
if version_encoding_str not in final_encoding:
final_encoding[version_encoding_str] = perform_i
perform_i += 1
return final_encoding.keys()
def write(encoded_midi, path):
# Base class checks if output path exists
midi = encoding2midi(encoded_midi)
midi.open(path, "wb")
midi.write()
midi.close()
def encoding2midi(note_encoding, ts_duration=0.25):
notes = []
velocity = 100
duration = "16th"
dots = 0
ts = 0
for note in note_encoding.split(" "):
if len(note) == 0:
continue
elif note[0] == "w":
wait_count = int(note.split("_")[1])
ts += wait_count
elif note[0] == "n":
pitch = int(note.split("_")[1])
note = m21.note.Note(pitch)
note.duration = m21.duration.Duration(type=duration, dots=dots)
note.offset = ts * ts_duration
note.volume.velocity = velocity
notes.append(note)
elif note[0] == "d":
duration = note.split("_")[1]
dots = int(note.split("_")[2])
elif note[0] == "v":
velocity = int(note.split("_")[1])
elif note[0] == "t":
tempo = int(note.split("_")[1])
if tempo > 0:
mark = m21.tempo.MetronomeMark(number=tempo)
mark.offset = ts * ts_duration
notes.append(mark)
piano = m21.instrument.fromString("Piano")
notes.insert(0, piano)
piano_stream = m21.stream.Stream(notes)
main_stream = m21.stream.Stream([piano_stream])
return m21.midi.translate.streamToMidiFile(main_stream)
def midi_parse_notes(midi_stream, sample_freq):
note_filter = m21.stream.filters.ClassFilter('Note')
note_events = []
for note in midi_stream.recurse().addFilter(note_filter):
pitch = note.pitch.midi
duration = note.duration.quarterLength
velocity = note.volume.velocity
offset = ma.floor(note.offset * sample_freq)
note_events.append((pitch, duration, velocity, offset))
return note_events
def midi_parse_chords(midi_stream, sample_freq):
chord_filter = m21.stream.filters.ClassFilter('Chord')
note_events = []
for chord in midi_stream.recurse().addFilter(chord_filter):
pitches_in_chord = chord.pitches
for pitch in pitches_in_chord:
pitch = pitch.midi
duration = chord.duration.quarterLength
velocity = chord.volume.velocity
offset = ma.floor(chord.offset * sample_freq)
note_events.append((pitch, duration, velocity, offset))
return note_events
def midi_parse_metronome(midi_stream, sample_freq):
metronome_filter = m21.stream.filters.ClassFilter('MetronomeMark')
time_events = []
for metro in midi_stream.recurse().addFilter(metronome_filter):
time = int(metro.number)
offset = ma.floor(metro.offset * sample_freq)
time_events.append((time, offset))
return time_events
def midi2notes(midi_stream, sample_freq, transpose_range):
notes = []
notes += midi_parse_notes(midi_stream, sample_freq)
notes += midi_parse_chords(midi_stream, sample_freq)
# Transpose the notes to all the keys in transpose_range
return transpose_notes(notes, transpose_range)
def midi2piano_roll(midi_stream, sample_freq, piano_range, transpose_range, stretching_range):
# Calculate the amount of time steps in the piano roll
time_steps = ma.floor(midi_stream.duration.quarterLength * sample_freq) + 1
# Parse the midi file into a list of notes (pitch, duration, velocity, offset)
transpositions = midi2notes(midi_stream, sample_freq, transpose_range)
time_events = midi_parse_metronome(midi_stream, sample_freq)
time_streches = strech_time(time_events, stretching_range)
return notes2piano_roll(transpositions, time_streches, time_steps, piano_range)
def notes2piano_roll(transpositions, time_streches, time_steps, piano_range):
performances = []
min_pitch, max_pitch = piano_range
for t_ix in range(len(transpositions)):
for s_ix in range(len(time_streches)):
# Create piano roll with calcualted size.
# Add one dimension to very entry to store velocity and duration.
piano_roll = np.zeros((time_steps, MAX_PITCH + 1, 2))
for note in transpositions[t_ix]:
pitch, duration, velocity, offset = note
if duration == 0.0:
continue
# Force notes to be inside the specified piano_range
pitch = clamp_pitch(pitch, max_pitch, min_pitch)
piano_roll[offset, pitch][0] = clamp_duration(duration)
piano_roll[offset, pitch][1] = discretize_value(velocity, bins=32, range=(MIN_VELOCITY, MAX_VELOCITY))
for time_event in time_streches[s_ix]:
time, offset = time_event
piano_roll[offset, -1][0] = discretize_value(time, bins=100, range=(MIN_TEMPO, MAX_TEMPO))
performances.append(piano_roll)
return performances
def transpose_notes(notes, transpose_range):
transpositions = []
# Modulate the piano_roll for other keys
first_key = -ma.floor(transpose_range/2)
last_key = ma.ceil(transpose_range/2)
for key in range(first_key, last_key):
notes_in_key = []
for n in notes:
pitch, duration, velocity, offset = n
t_pitch = pitch + key
notes_in_key.append((t_pitch, duration, velocity, offset))
transpositions.append(notes_in_key)
return transpositions
def strech_time(time_events, stretching_range):
streches = []
# Modulate the piano_roll for other keys
slower_time = -ma.floor(stretching_range/2)
faster_time = ma.ceil(stretching_range/2)
# Modulate the piano_roll for other keys
for t_strech in range(slower_time, faster_time):
time_events_in_strech = []
for t_ev in time_events:
time, offset = t_ev
s_time = time + 0.05 * t_strech * MAX_TEMPO
time_events_in_strech.append((s_time, offset))
streches.append(time_events_in_strech)
return streches
def discretize_value(val, bins, range):
min_val, max_val = range
val = int(max(min_val, val))
val = int(min(val, max_val))
bin_size = (max_val/bins)
return ma.floor(val/bin_size) * bin_size
def clamp_pitch(pitch, max, min):
while pitch < min:
pitch += 12
while pitch >= max:
pitch -= 12
return pitch
def clamp_duration(duration, max=THREE_DOTTED_BREVE, min=THREE_DOTTED_32ND):
# Max duration is 3-dotted breve
if duration > max:
duration = max
# min duration is 3-dotted breve
if duration < min:
duration = min
duration_tuple = m21.duration.durationTupleFromQuarterLength(duration)
if duration_tuple.type == "inexpressible":
duration_clossest_type = m21.duration.quarterLengthToClosestType(duration)[0]
duration = m21.duration.typeToDuration[duration_clossest_type]
return duration
if __name__ == "__main__":
# Parse arguments
parser = argparse.ArgumentParser(description='midi_encoder.py')
parser.add_argument('--path', type=str, required=True, help="Path to midi data.")
parser.add_argument('--transp', type=int, default=1, help="Transpose range.")
parser.add_argument('--strech', type=int, default=1, help="Time stretching range.")
opt = parser.parse_args()
# Load data and encoded it
text, vocab = load(opt.path, transpose_range=opt.transp, stretching_range=opt.strech)
print(text)
# Write all data to midi file
write(text, "encoded.mid") | 0.458591 | 0.17427 |
import base64
import json
import googleapiclient.discovery
import string
import time
def process_log_entry(data, context):
data_buffer = base64.b64decode(data['data'])
log_entry = json.loads(data_buffer)
firewall_name = log_entry['jsonPayload']['resource']['name']
project_id = log_entry['resource']['labels']['project_id']
service = create_service()
print('Describing Firewall')
disabled = check_for_disabled(project_id, service, firewall_name)
source_ranges = get_source_ranges(project_id, service, firewall_name)
allow_all = check_for_allowed_all(project_id, service, firewall_name)
if allow_all == True:
time.sleep(20)
disable_firewall(project_id, service, firewall_name)
print("Firewall %s Disabled" % firewall_name)
else:
allowed_ports = get_allowed_ports_list(project_id, service, firewall_name)
ssh_allowed = check_for_port_22(allowed_ports)
print(ssh_allowed)
print(source_ranges)
if ssh_allowed == True and '0.0.0.0/0' in source_ranges and disabled == False:
time.sleep(20)
disable_firewall(project_id, service, firewall_name)
print("Firewall %s Disabled" % firewall_name)
elif ssh_allowed == True and '0.0.0.0/0' in source_ranges and disabled == True:
print("Firewall %s allows SSH from the Internet but is disabled")
else:
print('Firewall %s does not allow SSH inbound from the internet' % firewall_name)
def create_service():
# Construct the service object for interacting with the Cloud Compute API -
# the 'compute' service, at version 'v1'.
# Authentication is provided by application default credentials.
# When running locally, these are available after running
# `gcloud auth application-default login`. When running on Compute
# Engine, these are available from the environment.
return googleapiclient.discovery.build('compute', 'v1')
def get_source_ranges(project_id, client, firewall):
request = client.firewalls().get(project=project_id, firewall=firewall)
response = request.execute()
source_ranges = response['sourceRanges']
print(source_ranges)
return source_ranges
def get_allowed_ports_list(project_id, client, firewall):
request = client.firewalls().get(project=project_id, firewall=firewall)
response = request.execute()
print(response)
ports = []
for each in response['allowed']:
ports_list = each['ports']
for port in ports_list:
ports.append(port)
print(ports)
return ports
def check_for_allowed_all(project_id, client, firewall):
request = client.firewalls().get(project=project_id, firewall=firewall)
response = request.execute()
print(response)
for each in response['allowed']:
if each['IPProtocol'] == 'all':
return True
else:
return False
def check_for_disabled(project_id, client, firewall):
request = client.firewalls().get(project=project_id, firewall=firewall)
response = request.execute()
print(response)
if response['disabled'] == True:
return True
else:
return False
def check_for_port_22(ports):
for item in ports:
if '-' in item:
start_num = item.split("-")[0]
end_num = item.split("-")[1]
if int(start_num) <= 22 <= int(end_num):
return True
else:
return False
elif item == '22':
return True
else:
return False
def disable_firewall(project_id, client, firewall):
firewall_body = {
"name": firewall,
"disabled": "true"
}
request = client.firewalls().patch(project=project_id, firewall=firewall, body=firewall_body)
response = request.execute() | src/main.py | import base64
import json
import googleapiclient.discovery
import string
import time
def process_log_entry(data, context):
data_buffer = base64.b64decode(data['data'])
log_entry = json.loads(data_buffer)
firewall_name = log_entry['jsonPayload']['resource']['name']
project_id = log_entry['resource']['labels']['project_id']
service = create_service()
print('Describing Firewall')
disabled = check_for_disabled(project_id, service, firewall_name)
source_ranges = get_source_ranges(project_id, service, firewall_name)
allow_all = check_for_allowed_all(project_id, service, firewall_name)
if allow_all == True:
time.sleep(20)
disable_firewall(project_id, service, firewall_name)
print("Firewall %s Disabled" % firewall_name)
else:
allowed_ports = get_allowed_ports_list(project_id, service, firewall_name)
ssh_allowed = check_for_port_22(allowed_ports)
print(ssh_allowed)
print(source_ranges)
if ssh_allowed == True and '0.0.0.0/0' in source_ranges and disabled == False:
time.sleep(20)
disable_firewall(project_id, service, firewall_name)
print("Firewall %s Disabled" % firewall_name)
elif ssh_allowed == True and '0.0.0.0/0' in source_ranges and disabled == True:
print("Firewall %s allows SSH from the Internet but is disabled")
else:
print('Firewall %s does not allow SSH inbound from the internet' % firewall_name)
def create_service():
# Construct the service object for interacting with the Cloud Compute API -
# the 'compute' service, at version 'v1'.
# Authentication is provided by application default credentials.
# When running locally, these are available after running
# `gcloud auth application-default login`. When running on Compute
# Engine, these are available from the environment.
return googleapiclient.discovery.build('compute', 'v1')
def get_source_ranges(project_id, client, firewall):
request = client.firewalls().get(project=project_id, firewall=firewall)
response = request.execute()
source_ranges = response['sourceRanges']
print(source_ranges)
return source_ranges
def get_allowed_ports_list(project_id, client, firewall):
request = client.firewalls().get(project=project_id, firewall=firewall)
response = request.execute()
print(response)
ports = []
for each in response['allowed']:
ports_list = each['ports']
for port in ports_list:
ports.append(port)
print(ports)
return ports
def check_for_allowed_all(project_id, client, firewall):
request = client.firewalls().get(project=project_id, firewall=firewall)
response = request.execute()
print(response)
for each in response['allowed']:
if each['IPProtocol'] == 'all':
return True
else:
return False
def check_for_disabled(project_id, client, firewall):
request = client.firewalls().get(project=project_id, firewall=firewall)
response = request.execute()
print(response)
if response['disabled'] == True:
return True
else:
return False
def check_for_port_22(ports):
for item in ports:
if '-' in item:
start_num = item.split("-")[0]
end_num = item.split("-")[1]
if int(start_num) <= 22 <= int(end_num):
return True
else:
return False
elif item == '22':
return True
else:
return False
def disable_firewall(project_id, client, firewall):
firewall_body = {
"name": firewall,
"disabled": "true"
}
request = client.firewalls().patch(project=project_id, firewall=firewall, body=firewall_body)
response = request.execute() | 0.339828 | 0.113064 |
from argparse import ArgumentParser
import os
import subprocess
import logging
import utility
import ome_schema
def extract_metadata(input_path, output_path):
"""
Extract OME metadata from the input file and write it out as a nicely formatted xml using
bftools. (http://www.openmicroscopy.org/site/support/bio-formats5.3/users/comlinetools/display.html)
"""
bf_tools_dir = os.getenv('BFTOOLS_DIR', os.getcwd()) + "/"
command = bf_tools_dir +"showinf -omexml-only -nopix " + input_path + " | " + bf_tools_dir + "xmlindent > " + output_path
p = subprocess.Popen(command, shell=True)
p.wait()
def get_metadata_as_class(input_xml_path):
"""
Return the OME metadata from the input XML file as a Python class. The class is automatically generated
using pyxbgen (http://pyxb.sourceforge.net/pyxbgen_cli.html) and the current OME XML Schema
(https://www.openmicroscopy.org/Schemas/OME/2016-06/ome.xsd).
If you need to use a newer schema you need to regenerate the file ome_schema.py by doing:
pip install pyxb
pyxbgen -m ome_schema -u https://www.openmicroscopy.org/Schemas/OME/2016-06/ome.xsd
where the web address points to the new schema. You can then access the elements of the OME XML as
instance attributes etc.
"""
xml = open(input_xml_path).read()
image_metadata = ome_schema.CreateFromDocument(xml)
return image_metadata
def integer_color_to_rgb(color):
"""
Convert integer color to (r,g,b)
"""
return ((color >> 16) & 255, (color >> 8) & 255, color & 255)
def print_metadata_overview(image_metadata):
"""
Print a reader-friendly metadata summary
"""
print "Number of Images: ", len(image_metadata.Image)
print "Image '0' - Name: ", image_metadata.Image[0].Name
print "Image '0' - Num Channels: ", image_metadata.Image[0].Pixels.SizeC
print "Image '0' - Num Times: ", image_metadata.Image[0].Pixels.SizeT
pixel_size_x = image_metadata.Image[0].Pixels.PhysicalSizeX
pixel_size_y = image_metadata.Image[0].Pixels.PhysicalSizeY
pixel_size_z = image_metadata.Image[0].Pixels.PhysicalSizeZ
pixel_unit_x = image_metadata.Image[0].Pixels.PhysicalSizeXUnit
pixel_unit_y = image_metadata.Image[0].Pixels.PhysicalSizeYUnit
pixel_unit_z = image_metadata.Image[0].Pixels.PhysicalSizeZUnit
print "Image '0' - Pixel Physical Size X: ", pixel_size_x, pixel_unit_x
print "Image '0' - Pixel Physical Size Y: ", pixel_size_y, pixel_unit_y
print "Image '0' - Pixel Physical Size Z: ", pixel_size_z, pixel_unit_z
print "Image '0' - Pixel Size X: ", image_metadata.Image[0].Pixels.SizeX
print "Image '0' - Pixel Size Y:", image_metadata.Image[0].Pixels.SizeY
print "Image '0' - Pixel Size Z:", image_metadata.Image[0].Pixels.SizeZ
print "Image '0' - Pixel Dimension Order: ", image_metadata.Image[0].Pixels.DimensionOrder
print "Image '0' - Pixel Bits: ", image_metadata.Image[0].Pixels.SignificantBits
for idx, eachChannel in enumerate(image_metadata.Image[0].Pixels.Channel):
print "Image '0' - Channel " +str(idx) + " Color: ", integer_color_to_rgb(eachChannel.Color)
if __name__ == "__main__":
# Do setup
tool_name = "extract_metadata"
utility.do_setup(tool_name)
logger1 = logging.getLogger('format_conversion.'+tool_name)
# Suppress XML Parse warnings
pyxb_logger = logging.getLogger('pyxb')
pyxb_logger.setLevel(logging.CRITICAL)
parser = ArgumentParser()
parser.add_argument("-i", "--input_file", type=str, help='Input file in a ZEISS format.')
parser.add_argument("-o", "--output_file", type=str, help='Output metadata file.')
parser.add_argument("--verbose", type=bool, help='Output a simple metadata summary.')
args = parser.parse_args()
logger1.info('Reading Metadata At: ' + args.input_file)
extract_metadata(args.input_file, args.output_file)
if(args.verbose):
image_metadata = get_metadata_as_class(args.output_file)
print_metadata_overview(image_metadata)
logger1.info('Completed Reading Metadata') | src/stack3d/formats/extract_zeiss_metadata.py | from argparse import ArgumentParser
import os
import subprocess
import logging
import utility
import ome_schema
def extract_metadata(input_path, output_path):
"""
Extract OME metadata from the input file and write it out as a nicely formatted xml using
bftools. (http://www.openmicroscopy.org/site/support/bio-formats5.3/users/comlinetools/display.html)
"""
bf_tools_dir = os.getenv('BFTOOLS_DIR', os.getcwd()) + "/"
command = bf_tools_dir +"showinf -omexml-only -nopix " + input_path + " | " + bf_tools_dir + "xmlindent > " + output_path
p = subprocess.Popen(command, shell=True)
p.wait()
def get_metadata_as_class(input_xml_path):
"""
Return the OME metadata from the input XML file as a Python class. The class is automatically generated
using pyxbgen (http://pyxb.sourceforge.net/pyxbgen_cli.html) and the current OME XML Schema
(https://www.openmicroscopy.org/Schemas/OME/2016-06/ome.xsd).
If you need to use a newer schema you need to regenerate the file ome_schema.py by doing:
pip install pyxb
pyxbgen -m ome_schema -u https://www.openmicroscopy.org/Schemas/OME/2016-06/ome.xsd
where the web address points to the new schema. You can then access the elements of the OME XML as
instance attributes etc.
"""
xml = open(input_xml_path).read()
image_metadata = ome_schema.CreateFromDocument(xml)
return image_metadata
def integer_color_to_rgb(color):
"""
Convert integer color to (r,g,b)
"""
return ((color >> 16) & 255, (color >> 8) & 255, color & 255)
def print_metadata_overview(image_metadata):
"""
Print a reader-friendly metadata summary
"""
print "Number of Images: ", len(image_metadata.Image)
print "Image '0' - Name: ", image_metadata.Image[0].Name
print "Image '0' - Num Channels: ", image_metadata.Image[0].Pixels.SizeC
print "Image '0' - Num Times: ", image_metadata.Image[0].Pixels.SizeT
pixel_size_x = image_metadata.Image[0].Pixels.PhysicalSizeX
pixel_size_y = image_metadata.Image[0].Pixels.PhysicalSizeY
pixel_size_z = image_metadata.Image[0].Pixels.PhysicalSizeZ
pixel_unit_x = image_metadata.Image[0].Pixels.PhysicalSizeXUnit
pixel_unit_y = image_metadata.Image[0].Pixels.PhysicalSizeYUnit
pixel_unit_z = image_metadata.Image[0].Pixels.PhysicalSizeZUnit
print "Image '0' - Pixel Physical Size X: ", pixel_size_x, pixel_unit_x
print "Image '0' - Pixel Physical Size Y: ", pixel_size_y, pixel_unit_y
print "Image '0' - Pixel Physical Size Z: ", pixel_size_z, pixel_unit_z
print "Image '0' - Pixel Size X: ", image_metadata.Image[0].Pixels.SizeX
print "Image '0' - Pixel Size Y:", image_metadata.Image[0].Pixels.SizeY
print "Image '0' - Pixel Size Z:", image_metadata.Image[0].Pixels.SizeZ
print "Image '0' - Pixel Dimension Order: ", image_metadata.Image[0].Pixels.DimensionOrder
print "Image '0' - Pixel Bits: ", image_metadata.Image[0].Pixels.SignificantBits
for idx, eachChannel in enumerate(image_metadata.Image[0].Pixels.Channel):
print "Image '0' - Channel " +str(idx) + " Color: ", integer_color_to_rgb(eachChannel.Color)
if __name__ == "__main__":
# Do setup
tool_name = "extract_metadata"
utility.do_setup(tool_name)
logger1 = logging.getLogger('format_conversion.'+tool_name)
# Suppress XML Parse warnings
pyxb_logger = logging.getLogger('pyxb')
pyxb_logger.setLevel(logging.CRITICAL)
parser = ArgumentParser()
parser.add_argument("-i", "--input_file", type=str, help='Input file in a ZEISS format.')
parser.add_argument("-o", "--output_file", type=str, help='Output metadata file.')
parser.add_argument("--verbose", type=bool, help='Output a simple metadata summary.')
args = parser.parse_args()
logger1.info('Reading Metadata At: ' + args.input_file)
extract_metadata(args.input_file, args.output_file)
if(args.verbose):
image_metadata = get_metadata_as_class(args.output_file)
print_metadata_overview(image_metadata)
logger1.info('Completed Reading Metadata') | 0.611266 | 0.195095 |
from __future__ import unicode_literals
from datetime import datetime
import unittest
import warnings
from mixpanel_jql import JQL, raw, Events, People
from mixpanel_jql.query import _f
from mixpanel_jql.exceptions import InvalidJavaScriptText, JQLSyntaxError
class TestJavaScriptArgs(unittest.TestCase):
def setUp(self):
self.query = JQL(api_secret=None, events=Events())
def _assert_invalid_arg(self, arg):
with self.assertRaises(InvalidJavaScriptText):
self.query.filter(arg)
def test_valid_javascript_arg(self):
self.query.filter("e.x == 'y'")
self._assert_invalid_arg(4)
self._assert_invalid_arg(list)
self._assert_invalid_arg(True)
def test_auto_function(self):
self.assertEqual(_f(raw("test")), "test")
self.assertEqual(_f("test"), "function(e){return test}")
class TestSourceParameters(unittest.TestCase):
def _try_invalid_events(self, params):
try:
Events(params)
self.fail("Expected Events syntax error with params: %s" % params)
except JQLSyntaxError as e:
return e
def _try_invalid_people(self, params):
try:
People(params)
self.fail("Expected People syntax error with params: %s" % params)
except JQLSyntaxError as e:
return e
def _try_invalid_join(self, params):
try:
JQL(api_secret="asas", events=Events(), people=People(), join_params=params)
self.fail("Expected Events syntax error with params: %s" % params)
except JQLSyntaxError as e:
return e
def test_bad_event_key(self):
e = self._try_invalid_events({'mew': 32})
self.assertEqual('"mew" is not a valid key in event_params', str(e))
def test_event_date_keys(self):
for k in ('to_date', 'from_date'):
for v in ('2017-10-19', datetime(2017, 10, 19), datetime(2017, 10, 19).date()):
q = Events({k: v})
self.assertIn('2017-10-19', str(q))
# Now a bad key.
e = self._try_invalid_events({'to_date': 232})
self.assertEqual(str(e), 'to_date must be datetime, datetime.date, or str')
def test_event_event_selectors(self):
def good_params():
return {
'event_selectors': [{
'event': 'my_event',
'selector': 'my selector',
'label': 'my label'
}]
}
# Test valid
Events(good_params())
# Bad array
bad_params = good_params()
bad_params['event_selectors'] = 3
e = self._try_invalid_events(bad_params)
self.assertEqual(
str(e), "event_params['event_selectors'] must be iterable")
# Bad key types
for key in ('event', 'selector', 'label'):
bad_params = good_params()
bad_params['event_selectors'][0][key] = 3
e = self._try_invalid_events(bad_params)
self.assertEqual(
str(e), "event_params['event_selectors'][0].%s must be a string" % key)
# Bad key
bad_params = good_params()
bad_params['event_selectors'][0]['mrao'] = 3
e = self._try_invalid_events(bad_params)
self.assertEqual(
str(e), "'mrao' is not a valid key in event_params['event_selectors'][0]")
def test_bad_people_key(self):
e = self._try_invalid_people({'mew': 32})
self.assertEqual('"mew" is not a valid key in people_params', str(e))
def test_people_user_selectors(self):
def good_params():
return {
'user_selectors': [{
'selector': 'my selector',
}]
}
# Test valid
People(good_params())
# Bad key types
bad_params = good_params()
bad_params['user_selectors'][0]['selector'] = 3
e = self._try_invalid_people(bad_params)
self.assertEqual(
str(e), "people_params['user_selectors'][0].selector must be a string")
# Bad key
bad_params = good_params()
bad_params['user_selectors'][0]['mrao'] = 3
e = self._try_invalid_people(bad_params)
self.assertEqual(
str(e), "'mrao' is not a valid key in people_params['user_selectors'][0]")
def test_bad_join_key(self):
e = self._try_invalid_join({'mew': 32})
self.assertEqual('"mew" is not a valid key in join_params', str(e))
def test_join_types(self):
# Good types
for t in ('full', 'left', 'right', 'inner'):
JQL('some_key', events=Events(), people=People(), join_params={'type': t})
# Bad type
e = self._try_invalid_join({'type': 'mew'})
self.assertEqual(
'"mew" is not a valid join type (valid types: full, left, right, inner)',
str(e))
def test_join_selectors(self):
def good_params():
return {
'selectors': [{
'event': 'my_event',
'selector': 'my selector'
}]
}
# Test valid
JQL('some_api_key', events=Events(), people=People(), join_params=good_params())
# Bad array
bad_params = good_params()
bad_params['selectors'] = 3
e = self._try_invalid_join(bad_params)
self.assertEqual(
str(e), "join_params['selectors'] must be iterable")
# Bad key types
for key in ('event', 'selector'):
bad_params = good_params()
bad_params['selectors'][0][key] = 3
e = self._try_invalid_join(bad_params)
self.assertEqual(
str(e), "join_params['selectors'][0].%s must be a string" % key)
# Bad key
bad_params = good_params()
bad_params['selectors'][0]['mrao'] = 3
e = self._try_invalid_join(bad_params)
self.assertEqual(
str(e), "'mrao' is not a valid key in join_params['selectors'][0]")
class TestDeprecatedSyntaxWarnings(unittest.TestCase):
def test_query_plan(self):
with warnings.catch_warnings(record=True) as w:
q = JQL('key', events=Events(), people=People())
q.query_plan()
self.assertIs(w[-1].category, DeprecationWarning)
self.assertIn('query_plan', str(w[-1].message))
def test_params(self):
with warnings.catch_warnings(record=True) as w:
JQL('key', params={})
self.assertEqual(len(w), 1)
self.assertIs(w[-1].category, DeprecationWarning)
self.assertIn('params', str(w[-1].message))
def test_events_boolean(self):
with warnings.catch_warnings(record=True) as w:
JQL('key', events=True)
self.assertEqual(len(w), 1)
self.assertIs(w[-1].category, DeprecationWarning)
self.assertIn('events', str(w[-1].message))
def test_people_boolean(self):
with warnings.catch_warnings(record=True) as w:
JQL('key', people=True)
self.assertEqual(len(w), 1)
self.assertIs(w[-1].category, DeprecationWarning)
self.assertIn('people', str(w[-1].message)) | tests/test_syntax.py |
from __future__ import unicode_literals
from datetime import datetime
import unittest
import warnings
from mixpanel_jql import JQL, raw, Events, People
from mixpanel_jql.query import _f
from mixpanel_jql.exceptions import InvalidJavaScriptText, JQLSyntaxError
class TestJavaScriptArgs(unittest.TestCase):
def setUp(self):
self.query = JQL(api_secret=None, events=Events())
def _assert_invalid_arg(self, arg):
with self.assertRaises(InvalidJavaScriptText):
self.query.filter(arg)
def test_valid_javascript_arg(self):
self.query.filter("e.x == 'y'")
self._assert_invalid_arg(4)
self._assert_invalid_arg(list)
self._assert_invalid_arg(True)
def test_auto_function(self):
self.assertEqual(_f(raw("test")), "test")
self.assertEqual(_f("test"), "function(e){return test}")
class TestSourceParameters(unittest.TestCase):
def _try_invalid_events(self, params):
try:
Events(params)
self.fail("Expected Events syntax error with params: %s" % params)
except JQLSyntaxError as e:
return e
def _try_invalid_people(self, params):
try:
People(params)
self.fail("Expected People syntax error with params: %s" % params)
except JQLSyntaxError as e:
return e
def _try_invalid_join(self, params):
try:
JQL(api_secret="asas", events=Events(), people=People(), join_params=params)
self.fail("Expected Events syntax error with params: %s" % params)
except JQLSyntaxError as e:
return e
def test_bad_event_key(self):
e = self._try_invalid_events({'mew': 32})
self.assertEqual('"mew" is not a valid key in event_params', str(e))
def test_event_date_keys(self):
for k in ('to_date', 'from_date'):
for v in ('2017-10-19', datetime(2017, 10, 19), datetime(2017, 10, 19).date()):
q = Events({k: v})
self.assertIn('2017-10-19', str(q))
# Now a bad key.
e = self._try_invalid_events({'to_date': 232})
self.assertEqual(str(e), 'to_date must be datetime, datetime.date, or str')
def test_event_event_selectors(self):
def good_params():
return {
'event_selectors': [{
'event': 'my_event',
'selector': 'my selector',
'label': 'my label'
}]
}
# Test valid
Events(good_params())
# Bad array
bad_params = good_params()
bad_params['event_selectors'] = 3
e = self._try_invalid_events(bad_params)
self.assertEqual(
str(e), "event_params['event_selectors'] must be iterable")
# Bad key types
for key in ('event', 'selector', 'label'):
bad_params = good_params()
bad_params['event_selectors'][0][key] = 3
e = self._try_invalid_events(bad_params)
self.assertEqual(
str(e), "event_params['event_selectors'][0].%s must be a string" % key)
# Bad key
bad_params = good_params()
bad_params['event_selectors'][0]['mrao'] = 3
e = self._try_invalid_events(bad_params)
self.assertEqual(
str(e), "'mrao' is not a valid key in event_params['event_selectors'][0]")
def test_bad_people_key(self):
e = self._try_invalid_people({'mew': 32})
self.assertEqual('"mew" is not a valid key in people_params', str(e))
def test_people_user_selectors(self):
def good_params():
return {
'user_selectors': [{
'selector': 'my selector',
}]
}
# Test valid
People(good_params())
# Bad key types
bad_params = good_params()
bad_params['user_selectors'][0]['selector'] = 3
e = self._try_invalid_people(bad_params)
self.assertEqual(
str(e), "people_params['user_selectors'][0].selector must be a string")
# Bad key
bad_params = good_params()
bad_params['user_selectors'][0]['mrao'] = 3
e = self._try_invalid_people(bad_params)
self.assertEqual(
str(e), "'mrao' is not a valid key in people_params['user_selectors'][0]")
def test_bad_join_key(self):
e = self._try_invalid_join({'mew': 32})
self.assertEqual('"mew" is not a valid key in join_params', str(e))
def test_join_types(self):
# Good types
for t in ('full', 'left', 'right', 'inner'):
JQL('some_key', events=Events(), people=People(), join_params={'type': t})
# Bad type
e = self._try_invalid_join({'type': 'mew'})
self.assertEqual(
'"mew" is not a valid join type (valid types: full, left, right, inner)',
str(e))
def test_join_selectors(self):
def good_params():
return {
'selectors': [{
'event': 'my_event',
'selector': 'my selector'
}]
}
# Test valid
JQL('some_api_key', events=Events(), people=People(), join_params=good_params())
# Bad array
bad_params = good_params()
bad_params['selectors'] = 3
e = self._try_invalid_join(bad_params)
self.assertEqual(
str(e), "join_params['selectors'] must be iterable")
# Bad key types
for key in ('event', 'selector'):
bad_params = good_params()
bad_params['selectors'][0][key] = 3
e = self._try_invalid_join(bad_params)
self.assertEqual(
str(e), "join_params['selectors'][0].%s must be a string" % key)
# Bad key
bad_params = good_params()
bad_params['selectors'][0]['mrao'] = 3
e = self._try_invalid_join(bad_params)
self.assertEqual(
str(e), "'mrao' is not a valid key in join_params['selectors'][0]")
class TestDeprecatedSyntaxWarnings(unittest.TestCase):
def test_query_plan(self):
with warnings.catch_warnings(record=True) as w:
q = JQL('key', events=Events(), people=People())
q.query_plan()
self.assertIs(w[-1].category, DeprecationWarning)
self.assertIn('query_plan', str(w[-1].message))
def test_params(self):
with warnings.catch_warnings(record=True) as w:
JQL('key', params={})
self.assertEqual(len(w), 1)
self.assertIs(w[-1].category, DeprecationWarning)
self.assertIn('params', str(w[-1].message))
def test_events_boolean(self):
with warnings.catch_warnings(record=True) as w:
JQL('key', events=True)
self.assertEqual(len(w), 1)
self.assertIs(w[-1].category, DeprecationWarning)
self.assertIn('events', str(w[-1].message))
def test_people_boolean(self):
with warnings.catch_warnings(record=True) as w:
JQL('key', people=True)
self.assertEqual(len(w), 1)
self.assertIs(w[-1].category, DeprecationWarning)
self.assertIn('people', str(w[-1].message)) | 0.657978 | 0.29294 |
"Types of matter, things made of matter etc"
from useful import weightedchoice
import json
class Matter:
"Everything on the map is matter"
# What is returned when scanned
description = "It's very generic"
shortdesc = "Matter"
# Resources you get when you mine it, in tons
resources = {}
def __init__(self):
"Reads and sets properties from JSON file"
# Reads file
matterfile = open("matter.json", "r").read()
# Turns into dict with JSON, finds appropriate part for the type
# of object this matter is
dictfull = json.loads(matterfile)[self.shortdesc]
# Sets description from weighted list in JSON file
descdict = dictfull["Description"]
self.setdesc(weightedchoice(descdict))
# Sets resource counts from JSON file
resdict = dictfull["Resources"]
self.setres(eval(weightedchoice(resdict)))
def getdesc(self):
"Returns string"
return self.description
def setdesc(self, newdesc):
"Takes string"
self.description = newdesc
def getshortdesc(self):
"Returns nonspecific description"
return self.shortdesc
def setshortdesc(self, newdesc):
"Sets nonspecifc description"
self.shortdesc = newdesc
def getres(self):
"Returns dict"
return self.resources
def setres(self, newres):
"Takes dict"
self.resources = newres
# Top-level matter objects i.e. in space
class Gas(Matter):
"Has effects on ships, sometimes"
shortdesc = "Gas and dust"
class Planet(Matter):
"Planets have atmospheres, subsectors etc which matter does not"
shortdesc = "Planet"
class PColony(Planet):
"Colonies have populations, shops etc"
shortdesc = "Planetary Colony"
class Asteroid(Matter):
"Asteroids do not have vegetation and are smaller"
shortdesc = "Asteroid"
class AColony(Asteroid):
"People, shops, but also hull integrity and other ship-like things"
shortdesc = "Asteroid Colony"
# Second level objects i.e. on the ground
class Shop(Matter):
"Buy stuff here"
shortdesc = "Shop" | Inactive/Prototype1/matter.py | "Types of matter, things made of matter etc"
from useful import weightedchoice
import json
class Matter:
"Everything on the map is matter"
# What is returned when scanned
description = "It's very generic"
shortdesc = "Matter"
# Resources you get when you mine it, in tons
resources = {}
def __init__(self):
"Reads and sets properties from JSON file"
# Reads file
matterfile = open("matter.json", "r").read()
# Turns into dict with JSON, finds appropriate part for the type
# of object this matter is
dictfull = json.loads(matterfile)[self.shortdesc]
# Sets description from weighted list in JSON file
descdict = dictfull["Description"]
self.setdesc(weightedchoice(descdict))
# Sets resource counts from JSON file
resdict = dictfull["Resources"]
self.setres(eval(weightedchoice(resdict)))
def getdesc(self):
"Returns string"
return self.description
def setdesc(self, newdesc):
"Takes string"
self.description = newdesc
def getshortdesc(self):
"Returns nonspecific description"
return self.shortdesc
def setshortdesc(self, newdesc):
"Sets nonspecifc description"
self.shortdesc = newdesc
def getres(self):
"Returns dict"
return self.resources
def setres(self, newres):
"Takes dict"
self.resources = newres
# Top-level matter objects i.e. in space
class Gas(Matter):
"Has effects on ships, sometimes"
shortdesc = "Gas and dust"
class Planet(Matter):
"Planets have atmospheres, subsectors etc which matter does not"
shortdesc = "Planet"
class PColony(Planet):
"Colonies have populations, shops etc"
shortdesc = "Planetary Colony"
class Asteroid(Matter):
"Asteroids do not have vegetation and are smaller"
shortdesc = "Asteroid"
class AColony(Asteroid):
"People, shops, but also hull integrity and other ship-like things"
shortdesc = "Asteroid Colony"
# Second level objects i.e. on the ground
class Shop(Matter):
"Buy stuff here"
shortdesc = "Shop" | 0.581541 | 0.225076 |
from flask_table import Table, Col
# Declare your table
class ItemTable(Table):
name = Col('Infastructure')
description = Col('Quantity')
cost = Col('Info')
# Get some objects
class Item(object):
def __init__(self, name, description, cost):
self.name = name
self.description = description
self.cost = cost
def create_table(dict):
# dict = {'area': 1, 'millNum': 2, 'nomPower': 3, 'nomPower5': 4, 'maintCost': 5, 'buildCost': 6, 'projTime': 7}
items = [Item('Windmills', dict.get('millNum'), 'units'),
Item('Area', dict.get('area'), 'm^2'),
Item('Nominal Power', dict.get('nomPower'), 'MW'),
Item('Nominal Power After 5 yr', dict.get('nomPower5'), 'MW'),
Item('*MaxPower Acheivable', dict.get('powerAfterWind'), 'MW'),
Item('Preject Time (per tower)', dict.get('projTime'), 'years'),
Item('Build Cost', dict.get('buildCost'), 'million CAD'),
Item('Maintenance Cost', dict.get('maintCost'), 'million CAD/yr'),
Item('Total Cost after 5 yr', str(float(dict.get('buildCost')) + (float(dict.get('maintCost')) * 5)), 'million CAD'),
Item('Total Cost after 10 yr', str(float(dict.get('buildCost')) + (float(dict.get('maintCost')) * 10)), 'million CAD')
]
# Populate the table
table = ItemTable(items)
Html_file= open("table2.html","w")
Html_file.write(table.__html__())
Html_file.close()
# Print the html
print(table.__html__())
finalTable = """
<!DOCTYPE html>
<html>
<head>
<style>
table {
font-family: arial, sans-serif;
border-collapse: collapse;
width: 100%;
}
td, th {
border: 1px solid #dddddd;
text-align: left;
padding: 8px;
}
tr:nth-child(even) {
background-color: #dddddd;
}
</style>
</head>
<body>
"""
finalTable += table.__html__()
finalTable += """
</body>
</html>
"""
Html_file= open("table.html","w")
Html_file.write(finalTable)
Html_file.close() | table.py | from flask_table import Table, Col
# Declare your table
class ItemTable(Table):
name = Col('Infastructure')
description = Col('Quantity')
cost = Col('Info')
# Get some objects
class Item(object):
def __init__(self, name, description, cost):
self.name = name
self.description = description
self.cost = cost
def create_table(dict):
# dict = {'area': 1, 'millNum': 2, 'nomPower': 3, 'nomPower5': 4, 'maintCost': 5, 'buildCost': 6, 'projTime': 7}
items = [Item('Windmills', dict.get('millNum'), 'units'),
Item('Area', dict.get('area'), 'm^2'),
Item('Nominal Power', dict.get('nomPower'), 'MW'),
Item('Nominal Power After 5 yr', dict.get('nomPower5'), 'MW'),
Item('*MaxPower Acheivable', dict.get('powerAfterWind'), 'MW'),
Item('Preject Time (per tower)', dict.get('projTime'), 'years'),
Item('Build Cost', dict.get('buildCost'), 'million CAD'),
Item('Maintenance Cost', dict.get('maintCost'), 'million CAD/yr'),
Item('Total Cost after 5 yr', str(float(dict.get('buildCost')) + (float(dict.get('maintCost')) * 5)), 'million CAD'),
Item('Total Cost after 10 yr', str(float(dict.get('buildCost')) + (float(dict.get('maintCost')) * 10)), 'million CAD')
]
# Populate the table
table = ItemTable(items)
Html_file= open("table2.html","w")
Html_file.write(table.__html__())
Html_file.close()
# Print the html
print(table.__html__())
finalTable = """
<!DOCTYPE html>
<html>
<head>
<style>
table {
font-family: arial, sans-serif;
border-collapse: collapse;
width: 100%;
}
td, th {
border: 1px solid #dddddd;
text-align: left;
padding: 8px;
}
tr:nth-child(even) {
background-color: #dddddd;
}
</style>
</head>
<body>
"""
finalTable += table.__html__()
finalTable += """
</body>
</html>
"""
Html_file= open("table.html","w")
Html_file.write(finalTable)
Html_file.close() | 0.606265 | 0.136005 |
# Standard library:
from datetime import datetime
from datetime import timedelta
# Django:
from django.conf import settings
from django.core.urlresolvers import reverse
from django.http import Http404
from django.http import HttpResponse
from django.shortcuts import get_object_or_404
from django.shortcuts import redirect
from django.shortcuts import render_to_response
from django.template import RequestContext
# RandoPony:
from .models import Populaire
from .models import Rider
from .models import RiderForm
from .tasks import update_google_spreadsheet
from .tasks import email_to_rider
from .tasks import email_to_organizer
from ..pasture.helpers import email2words
def populaires_list(request):
"""Display the populaire re-registration welcome information and
list of events in the sidebar.
"""
seven_days_ago = datetime.today().date() - timedelta(days=7)
pop_list = Populaire.objects.exclude(date__lt=(seven_days_ago))
context = RequestContext(request, {
'events': pop_list,
'admin_email': email2words(settings.ADMINS[0][1]),
})
response = render_to_response('populaires/populaires_list.html', context)
return response
def populaire(request, short_name, date, rider_id=None):
"""Display the populaire information, pre-registered riders list,
and sometime the registration confirmation, or duplicate
registration flash message.
"""
pop = get_object_or_404(
Populaire, short_name=short_name,
date=datetime.strptime(date, '%d%b%Y').date())
if pop.in_past:
template = 'pasture/past_event.html'
context = RequestContext(request, {
'event': pop,
'results_url': pop.in_past,
})
else:
rider_list = Rider.objects.filter(
populaire__short_name=short_name, populaire__date=pop.date)
try:
rider = Rider.objects.get(pk=int(rider_id))
except (Rider.DoesNotExist, TypeError):
rider = None
template = 'populaires/populaire.html'
context = RequestContext(request, {
'populaire': pop,
'registration_closed': pop.registration_closed,
'event_started': pop.started,
'rider': rider,
'duplicate_registration': request.path.endswith('duplicate/'),
'rider_list': rider_list,
'show_filler_photo': len(rider_list) < 15,
})
response = render_to_response(template, context)
return response
def registration_form(request, short_name, date):
"""Display populaire pre-registration form page.
"""
pop = get_object_or_404(
Populaire, short_name=short_name,
date=datetime.strptime(date, '%d%b%Y').date())
if pop.registration_closed:
raise Http404
distance_choices = [(int(dist.strip('kms').strip()), dist.strip())
for dist in pop.distance.split(',')]
if request.method == 'POST':
rider = RiderForm(
request.POST, distance_choices=distance_choices,
instance=Rider(populaire=pop))
try:
new_rider = rider.save(commit=False)
except ValueError:
# Validation error, so re-render form with rider inputs
# and error messages
form = rider
else:
url = _process_registration(pop, new_rider, request)
return redirect(url)
else:
# Unbound form to render entry form
form = RiderForm(distance_choices=distance_choices)
context = RequestContext(request, {
'populaire': pop,
'form': form,
'captcha_question':
'Are you a human? Are you a cyclist? Please prove it. '
'A bicycle has ___ wheels. Fill in the blank:',
})
response = render_to_response('populaires/registration_form.html', context)
return response
def _process_registration(populaire, rider, request):
"""Process rider pre-registration for populaire.
"""
try:
# Check for duplicate registration
check_rider = Rider.objects.get(
first_name=rider.first_name, last_name=rider.last_name,
email=rider.email, populaire=populaire)
url = reverse(
'populaires:prereg-duplicate',
args=(populaire.short_name, populaire.date.strftime('%d%b%Y'),
check_rider.id))
except Rider.DoesNotExist:
# Save new rider pre-registration and send emails to
# rider and brevet organizer
rider.save()
update_google_spreadsheet.delay(populaire.pk)
email_to_rider.delay(populaire.pk, rider.pk, request.get_host())
email_to_organizer.delay(populaire.pk, rider.pk, request.get_host())
url = reverse(
'populaires:prereg-confirm',
args=(populaire.short_name, populaire.date.strftime('%d%b%Y'),
rider.id))
return url
def rider_emails(request, short_name, date, uuid):
"""Display a comma separated list of email addresses for the
riders that have pre-registered for a populaire.
The URL that requests this view includes a namespace UUID for the
populaire to provide a measure of protection from email address
collecting 'bots.
Requests for this view more than 7 days after the populaire will
fail with a 404.
"""
pop = get_object_or_404(
Populaire, short_name=short_name,
date=datetime.strptime(date, '%d%b%Y').date())
if uuid != str(pop.uuid) or pop.in_past:
raise Http404
rider_list = Rider.objects.filter(
populaire__short_name=short_name, populaire__date=pop.date)
email_list = (', '.join(rider.email for rider in rider_list)
or 'No riders have registered yet!')
return HttpResponse(email_list, mimetype='text/plain') | populaires/views.py | # Standard library:
from datetime import datetime
from datetime import timedelta
# Django:
from django.conf import settings
from django.core.urlresolvers import reverse
from django.http import Http404
from django.http import HttpResponse
from django.shortcuts import get_object_or_404
from django.shortcuts import redirect
from django.shortcuts import render_to_response
from django.template import RequestContext
# RandoPony:
from .models import Populaire
from .models import Rider
from .models import RiderForm
from .tasks import update_google_spreadsheet
from .tasks import email_to_rider
from .tasks import email_to_organizer
from ..pasture.helpers import email2words
def populaires_list(request):
"""Display the populaire re-registration welcome information and
list of events in the sidebar.
"""
seven_days_ago = datetime.today().date() - timedelta(days=7)
pop_list = Populaire.objects.exclude(date__lt=(seven_days_ago))
context = RequestContext(request, {
'events': pop_list,
'admin_email': email2words(settings.ADMINS[0][1]),
})
response = render_to_response('populaires/populaires_list.html', context)
return response
def populaire(request, short_name, date, rider_id=None):
"""Display the populaire information, pre-registered riders list,
and sometime the registration confirmation, or duplicate
registration flash message.
"""
pop = get_object_or_404(
Populaire, short_name=short_name,
date=datetime.strptime(date, '%d%b%Y').date())
if pop.in_past:
template = 'pasture/past_event.html'
context = RequestContext(request, {
'event': pop,
'results_url': pop.in_past,
})
else:
rider_list = Rider.objects.filter(
populaire__short_name=short_name, populaire__date=pop.date)
try:
rider = Rider.objects.get(pk=int(rider_id))
except (Rider.DoesNotExist, TypeError):
rider = None
template = 'populaires/populaire.html'
context = RequestContext(request, {
'populaire': pop,
'registration_closed': pop.registration_closed,
'event_started': pop.started,
'rider': rider,
'duplicate_registration': request.path.endswith('duplicate/'),
'rider_list': rider_list,
'show_filler_photo': len(rider_list) < 15,
})
response = render_to_response(template, context)
return response
def registration_form(request, short_name, date):
"""Display populaire pre-registration form page.
"""
pop = get_object_or_404(
Populaire, short_name=short_name,
date=datetime.strptime(date, '%d%b%Y').date())
if pop.registration_closed:
raise Http404
distance_choices = [(int(dist.strip('kms').strip()), dist.strip())
for dist in pop.distance.split(',')]
if request.method == 'POST':
rider = RiderForm(
request.POST, distance_choices=distance_choices,
instance=Rider(populaire=pop))
try:
new_rider = rider.save(commit=False)
except ValueError:
# Validation error, so re-render form with rider inputs
# and error messages
form = rider
else:
url = _process_registration(pop, new_rider, request)
return redirect(url)
else:
# Unbound form to render entry form
form = RiderForm(distance_choices=distance_choices)
context = RequestContext(request, {
'populaire': pop,
'form': form,
'captcha_question':
'Are you a human? Are you a cyclist? Please prove it. '
'A bicycle has ___ wheels. Fill in the blank:',
})
response = render_to_response('populaires/registration_form.html', context)
return response
def _process_registration(populaire, rider, request):
"""Process rider pre-registration for populaire.
"""
try:
# Check for duplicate registration
check_rider = Rider.objects.get(
first_name=rider.first_name, last_name=rider.last_name,
email=rider.email, populaire=populaire)
url = reverse(
'populaires:prereg-duplicate',
args=(populaire.short_name, populaire.date.strftime('%d%b%Y'),
check_rider.id))
except Rider.DoesNotExist:
# Save new rider pre-registration and send emails to
# rider and brevet organizer
rider.save()
update_google_spreadsheet.delay(populaire.pk)
email_to_rider.delay(populaire.pk, rider.pk, request.get_host())
email_to_organizer.delay(populaire.pk, rider.pk, request.get_host())
url = reverse(
'populaires:prereg-confirm',
args=(populaire.short_name, populaire.date.strftime('%d%b%Y'),
rider.id))
return url
def rider_emails(request, short_name, date, uuid):
"""Display a comma separated list of email addresses for the
riders that have pre-registered for a populaire.
The URL that requests this view includes a namespace UUID for the
populaire to provide a measure of protection from email address
collecting 'bots.
Requests for this view more than 7 days after the populaire will
fail with a 404.
"""
pop = get_object_or_404(
Populaire, short_name=short_name,
date=datetime.strptime(date, '%d%b%Y').date())
if uuid != str(pop.uuid) or pop.in_past:
raise Http404
rider_list = Rider.objects.filter(
populaire__short_name=short_name, populaire__date=pop.date)
email_list = (', '.join(rider.email for rider in rider_list)
or 'No riders have registered yet!')
return HttpResponse(email_list, mimetype='text/plain') | 0.535827 | 0.085633 |
import numpy as np
import keras.applications
from keras.layers import Dropout, Dense, BatchNormalization, Flatten
from keras.models import Model
from keras.optimizers import Adam
from helper import constant
from matplotlib import pyplot as plt
class NetworkModel:
def pretrained_model(self):
"""
:return: keras.applications.vgg16.VGG16
"""
input_tensor = keras.Input(shape=(constant.IMAGE_HEIGHT, constant.IMAGE_WIDTH, constant.CHANNEL_NUMBER))
return keras.applications.vgg16.VGG16(include_top=False, weights='imagenet', input_tensor=input_tensor)
def create(self, input_model, x):
"""
:param input_model: keras.applications.vgg16.VGG16
:param x:
:return model: keras.models.Model
"""
for layer in input_model.layers:
layer.trainable = False
model = Model(inputs=input_model.input, outputs=x)
model.compile(
loss=constant.MODEL_LOSS,
optimizer=Adam(lr=constant.LEARNING_RATE, decay=constant.LEARNING_RATE / constant.EPOCHS),
metrics=['accuracy']
)
return model
def plot_example(self, image, label=None):
"""
:param image: image data (numpy array)
:param label: image label (str)
:return:
"""
if label:
plt.title(label.title())
plt.imshow(image)
plt.show()
def plot_loss_and_accuracy(self, history, args):
"""
:param history:
:param args: dictionary of arguments (dict)
:return:
"""
plt.style.use("ggplot")
plt.figure()
plt.plot(np.arange(0, constant.EPOCHS), history.history["loss"], label="train_loss")
plt.plot(np.arange(0, constant.EPOCHS), history.history["val_loss"], label="val_loss")
plt.plot(np.arange(0, constant.EPOCHS), history.history["acc"], label="train_acc")
plt.plot(np.arange(0, constant.EPOCHS), history.history["val_acc"], label="val_acc")
plt.title("Loss and Accuracy")
plt.xlabel("Epoch #")
plt.ylabel("Loss/Accuracy")
plt.legend(loc="upper left")
plt.savefig(args["plot"])
plt.show()
def evaluation_metrics(self, model, x_train, x_test, y_train, y_test):
"""
Evaluate the model on the train/test sets
:param model: keras.models.Model
:param x_train: training examples (numpy array)
:param x_test: test examples (numpy array)
:param y_train: training labels (numpy array)
:param y_test: test labels (numpy array)
"""
score_train = model.evaluate(x_train, y_train)
score_test = model.evaluate(x_test, y_test)
print('[INFO] Accuracy on the Train Images: ', score_train[1])
print('[INFO] Accuracy on the Test Images: ', score_test[1])
def get_prediction_and_label(self, model, lb, example):
"""
:param lb: label binarizer class
:param model: keras.models.Model
:param example: example data (numpy array)
:return:
"""
print("[INFO] classifying example...")
predictions = model.predict(example)[0]
index = np.argmax(predictions)
prediction = predictions[index]
label = lb.classes_[index]
return prediction, label
def add_new_last_layers(self, x):
"""
:param x:
:return:
"""
x = Flatten(name='flatten')(x)
x = Dense(32, activation='relu')(x)
x = BatchNormalization()(x)
x = Dense(constant.CLASSES, activation='softmax')(x)
return x | src/network_model.py | import numpy as np
import keras.applications
from keras.layers import Dropout, Dense, BatchNormalization, Flatten
from keras.models import Model
from keras.optimizers import Adam
from helper import constant
from matplotlib import pyplot as plt
class NetworkModel:
def pretrained_model(self):
"""
:return: keras.applications.vgg16.VGG16
"""
input_tensor = keras.Input(shape=(constant.IMAGE_HEIGHT, constant.IMAGE_WIDTH, constant.CHANNEL_NUMBER))
return keras.applications.vgg16.VGG16(include_top=False, weights='imagenet', input_tensor=input_tensor)
def create(self, input_model, x):
"""
:param input_model: keras.applications.vgg16.VGG16
:param x:
:return model: keras.models.Model
"""
for layer in input_model.layers:
layer.trainable = False
model = Model(inputs=input_model.input, outputs=x)
model.compile(
loss=constant.MODEL_LOSS,
optimizer=Adam(lr=constant.LEARNING_RATE, decay=constant.LEARNING_RATE / constant.EPOCHS),
metrics=['accuracy']
)
return model
def plot_example(self, image, label=None):
"""
:param image: image data (numpy array)
:param label: image label (str)
:return:
"""
if label:
plt.title(label.title())
plt.imshow(image)
plt.show()
def plot_loss_and_accuracy(self, history, args):
"""
:param history:
:param args: dictionary of arguments (dict)
:return:
"""
plt.style.use("ggplot")
plt.figure()
plt.plot(np.arange(0, constant.EPOCHS), history.history["loss"], label="train_loss")
plt.plot(np.arange(0, constant.EPOCHS), history.history["val_loss"], label="val_loss")
plt.plot(np.arange(0, constant.EPOCHS), history.history["acc"], label="train_acc")
plt.plot(np.arange(0, constant.EPOCHS), history.history["val_acc"], label="val_acc")
plt.title("Loss and Accuracy")
plt.xlabel("Epoch #")
plt.ylabel("Loss/Accuracy")
plt.legend(loc="upper left")
plt.savefig(args["plot"])
plt.show()
def evaluation_metrics(self, model, x_train, x_test, y_train, y_test):
"""
Evaluate the model on the train/test sets
:param model: keras.models.Model
:param x_train: training examples (numpy array)
:param x_test: test examples (numpy array)
:param y_train: training labels (numpy array)
:param y_test: test labels (numpy array)
"""
score_train = model.evaluate(x_train, y_train)
score_test = model.evaluate(x_test, y_test)
print('[INFO] Accuracy on the Train Images: ', score_train[1])
print('[INFO] Accuracy on the Test Images: ', score_test[1])
def get_prediction_and_label(self, model, lb, example):
"""
:param lb: label binarizer class
:param model: keras.models.Model
:param example: example data (numpy array)
:return:
"""
print("[INFO] classifying example...")
predictions = model.predict(example)[0]
index = np.argmax(predictions)
prediction = predictions[index]
label = lb.classes_[index]
return prediction, label
def add_new_last_layers(self, x):
"""
:param x:
:return:
"""
x = Flatten(name='flatten')(x)
x = Dense(32, activation='relu')(x)
x = BatchNormalization()(x)
x = Dense(constant.CLASSES, activation='softmax')(x)
return x | 0.943348 | 0.530662 |
from igem_wikisync import wikisync as sync
from igem_wikisync.logger import logger
import os
import sys
from hashlib import md5
config = {
'team': 'UTokyo', # change the team name!
'src_dir': 'public/',
'build_dir': 'build/',
'year': '2021',
'silence_warnings': False,
'poster_mode': False
}
# Our version of html uploading function!
# This supports MediaWiki Templates.
def upload_html(html_files, browser, config, upload_map):
for path in html_files.keys():
file_object = html_files[path]
path_str = str(file_object.path)
ext = file_object.extension
# open file
try:
with open(file_object.src_path, 'r', encoding='utf-8') as file:
contents = file.read()
except Exception:
message = f'Could not open/read {file_object.path}. Skipping.'
print(message)
# logger.error(message)
continue # FIXME Can this be improved?
preprocess = '' + contents + ''
preprocess = preprocess.replace('{', '<!-- replace point -->{<!-- /replace point -->')
preprocess = preprocess.replace('}', '<!-- replace point -->}<!-- /replace point -->')
# parse and modify contents
processed = sync.HTMLparser(
config, file_object.path, preprocess, upload_map)
processed = processed.replace('<!-- replace point -->{<!-- /replace point -->', '{')
processed = processed.replace('<!-- replace point -->}<!-- /replace point -->', '}')
processed = processed.replace('{{', '</html>{{').replace('}}', '}}<html>')
# calculate and store md5 hash of the modified contents
build_hash = md5(processed.encode('utf-8')).hexdigest()
if upload_map[ext][path_str]['md5'] == build_hash:
message = f'Contents of {file_object.path} have been uploaded previously. Skipping.'
print(message)
logger.info(message)
else:
upload_map[ext][path_str]['md5'] = build_hash
build_path = file_object.build_path
try:
# create directory if doesn't exist
if not os.path.isdir(build_path.parent):
os.makedirs(build_path.parent)
# and write the processed contents
with open(build_path, 'w', encoding='utf-8') as file:
file.write(processed)
except Exception:
message = f"Couldn not write {str(file_object.build_path)}. Skipping."
print(message)
logger.error(message)
continue
# FIXME Can this be improved?
# upload
successful = sync.iGEM_upload_page(browser, processed, file_object.upload_URL)
if not successful:
message = f'Could not upload {str(file_object.path)}. Skipping.'
print(message)
logger.error(message)
continue
# FIXME Can this be improved?
else:
pass
# counter[ext] += 1
build_dir = config['build_dir']
# * 2. Load or create upload_map
upload_map = sync.get_upload_map()
# * 3. Create build directory
if not os.path.isdir(build_dir):
os.mkdir(build_dir)
# ? error handling here?
# * 4. Get iGEM credentials from environment variables
credentials = {
'username': os.environ.get('IGEM_USERNAME'),
'password': os.<PASSWORD>.get('<PASSWORD>')
}
# * 5. Load/create cookie file
browser, cookiejar = sync.get_browser_with_cookies()
# * 6. Login to iGEM
login = sync.iGEM_login(browser, credentials, config)
if not login:
message = 'Failed to login.'
# logger.critical(message)
sys.exit(2)
# # * 7. Save cookies
# # TODO: check if this works, might not
# cookiejar.save()
# * 8. Cache files
files = sync.cache_files(upload_map, config)
# * 9. Upload all assets and create a map
uploaded_assets = sync.upload_and_write_assets(files['other'], browser, upload_map, config)
# Our original implementation.
# If files are in `/template` directory, then uploaded to Template:<Team Name>.
for path in files['html'].keys():
html_file = files['html'][path]
if html_file._upload_path.startswith('/template/'):
upload_map['html'][str(path)]['link_URL'] = f'''https://{config['year']}.igem.org/Template:{config['team']}{html_file._upload_path}'''
files['html'][path]._upload_URL = f'''https://{config['year']}.igem.org/wiki/index.php?title=Template:{config['team']}{html_file._upload_path}&action=edit'''
# * 10. write upload map just in case
# things go wrong while dealing with code
sync.write_upload_map(upload_map)
# * 11. Build files and upload changed files
# UTokyo modification: only dealing with css and js files
uploaded_code = sync.build_and_upload({
'html': {},
'css': files['css'],
'js': files['js']
}, browser, config, upload_map)
# UTokyo modification: here, deals with html files in our version of the function
uploaded_code_html = upload_html(files['html'], browser, config, upload_map)
# * 12. Write final upload map
sync.write_upload_map(upload_map)
sync.print_summary(uploaded_assets, uploaded_code) | hexogem/wikisync.py | from igem_wikisync import wikisync as sync
from igem_wikisync.logger import logger
import os
import sys
from hashlib import md5
config = {
'team': 'UTokyo', # change the team name!
'src_dir': 'public/',
'build_dir': 'build/',
'year': '2021',
'silence_warnings': False,
'poster_mode': False
}
# Our version of html uploading function!
# This supports MediaWiki Templates.
def upload_html(html_files, browser, config, upload_map):
for path in html_files.keys():
file_object = html_files[path]
path_str = str(file_object.path)
ext = file_object.extension
# open file
try:
with open(file_object.src_path, 'r', encoding='utf-8') as file:
contents = file.read()
except Exception:
message = f'Could not open/read {file_object.path}. Skipping.'
print(message)
# logger.error(message)
continue # FIXME Can this be improved?
preprocess = '' + contents + ''
preprocess = preprocess.replace('{', '<!-- replace point -->{<!-- /replace point -->')
preprocess = preprocess.replace('}', '<!-- replace point -->}<!-- /replace point -->')
# parse and modify contents
processed = sync.HTMLparser(
config, file_object.path, preprocess, upload_map)
processed = processed.replace('<!-- replace point -->{<!-- /replace point -->', '{')
processed = processed.replace('<!-- replace point -->}<!-- /replace point -->', '}')
processed = processed.replace('{{', '</html>{{').replace('}}', '}}<html>')
# calculate and store md5 hash of the modified contents
build_hash = md5(processed.encode('utf-8')).hexdigest()
if upload_map[ext][path_str]['md5'] == build_hash:
message = f'Contents of {file_object.path} have been uploaded previously. Skipping.'
print(message)
logger.info(message)
else:
upload_map[ext][path_str]['md5'] = build_hash
build_path = file_object.build_path
try:
# create directory if doesn't exist
if not os.path.isdir(build_path.parent):
os.makedirs(build_path.parent)
# and write the processed contents
with open(build_path, 'w', encoding='utf-8') as file:
file.write(processed)
except Exception:
message = f"Couldn not write {str(file_object.build_path)}. Skipping."
print(message)
logger.error(message)
continue
# FIXME Can this be improved?
# upload
successful = sync.iGEM_upload_page(browser, processed, file_object.upload_URL)
if not successful:
message = f'Could not upload {str(file_object.path)}. Skipping.'
print(message)
logger.error(message)
continue
# FIXME Can this be improved?
else:
pass
# counter[ext] += 1
build_dir = config['build_dir']
# * 2. Load or create upload_map
upload_map = sync.get_upload_map()
# * 3. Create build directory
if not os.path.isdir(build_dir):
os.mkdir(build_dir)
# ? error handling here?
# * 4. Get iGEM credentials from environment variables
credentials = {
'username': os.environ.get('IGEM_USERNAME'),
'password': os.<PASSWORD>.get('<PASSWORD>')
}
# * 5. Load/create cookie file
browser, cookiejar = sync.get_browser_with_cookies()
# * 6. Login to iGEM
login = sync.iGEM_login(browser, credentials, config)
if not login:
message = 'Failed to login.'
# logger.critical(message)
sys.exit(2)
# # * 7. Save cookies
# # TODO: check if this works, might not
# cookiejar.save()
# * 8. Cache files
files = sync.cache_files(upload_map, config)
# * 9. Upload all assets and create a map
uploaded_assets = sync.upload_and_write_assets(files['other'], browser, upload_map, config)
# Our original implementation.
# If files are in `/template` directory, then uploaded to Template:<Team Name>.
for path in files['html'].keys():
html_file = files['html'][path]
if html_file._upload_path.startswith('/template/'):
upload_map['html'][str(path)]['link_URL'] = f'''https://{config['year']}.igem.org/Template:{config['team']}{html_file._upload_path}'''
files['html'][path]._upload_URL = f'''https://{config['year']}.igem.org/wiki/index.php?title=Template:{config['team']}{html_file._upload_path}&action=edit'''
# * 10. write upload map just in case
# things go wrong while dealing with code
sync.write_upload_map(upload_map)
# * 11. Build files and upload changed files
# UTokyo modification: only dealing with css and js files
uploaded_code = sync.build_and_upload({
'html': {},
'css': files['css'],
'js': files['js']
}, browser, config, upload_map)
# UTokyo modification: here, deals with html files in our version of the function
uploaded_code_html = upload_html(files['html'], browser, config, upload_map)
# * 12. Write final upload map
sync.write_upload_map(upload_map)
sync.print_summary(uploaded_assets, uploaded_code) | 0.157234 | 0.052595 |
from random import randint
from unicodedata import normalize
def remover_acentos(string):
"""Recebe uma string e retorna a versão dela sem acentos ortográficos e em lowercase."""
normalizado = normalize('NFD', string)
return normalizado.encode('ascii', 'ignore').decode('utf8').lower()
def validar_entrada(string):
"""Recebe uma string e retorna True se ela tiver 1 caractere."""
return len(string) == 1
def obter_palavra():
"""Abre um arquivo com as palavras, armazena em uma lista; obtém um número aleatório de 0 a x, onde x é o número
de itens da lista; e usa esse número para escolher e retornar uma palavra aleatória da lista."""
arquivo = open('palavras_faceis.txt', 'r', encoding='UTF-8')
lista_de_palavras = arquivo.read().split('\n')
arquivo.close()
sorteio = randint(0, len(lista_de_palavras))
return lista_de_palavras[sorteio]
def main():
palavra = obter_palavra()
parcial = "_" * len(palavra) # cria uma string de "_" do tamanho da palavra
n_tentativas = 0
erros = ""
while True:
entrada = input("-- Entre com uma letra --> ").lower()
if validar_entrada(entrada): # se a entrada for válida
n_tentativas += 1
palavra_normalizada = remover_acentos(palavra)
print(f"A palavra é: {palavra}") # Debug
print(f"A letra aparece {palavra_normalizada.count(entrada)} vezes")
print(f"Tentativas = {n_tentativas}")
if remover_acentos(entrada) not in palavra.lower():
erros += entrada + " "
contador = 0
for letra in palavra:
if remover_acentos(letra) == entrada: # se o jogador acertou a letra
parcial_lista = list(parcial) # converte parcial para lista
parcial_lista[contador] = letra # substitui a letra na lista
parcial = "".join(parcial_lista) # converte a lista de volta para string
contador += 1 # adiciona 1 ao contador
print('\n')
print(parcial)
print(f"Erros: {erros.upper()}")
if "_" not in parcial:
print("Game Over\nParabéns!")
break
if __name__ == "__main__":
main() | jogo_da_forca.py | from random import randint
from unicodedata import normalize
def remover_acentos(string):
"""Recebe uma string e retorna a versão dela sem acentos ortográficos e em lowercase."""
normalizado = normalize('NFD', string)
return normalizado.encode('ascii', 'ignore').decode('utf8').lower()
def validar_entrada(string):
"""Recebe uma string e retorna True se ela tiver 1 caractere."""
return len(string) == 1
def obter_palavra():
"""Abre um arquivo com as palavras, armazena em uma lista; obtém um número aleatório de 0 a x, onde x é o número
de itens da lista; e usa esse número para escolher e retornar uma palavra aleatória da lista."""
arquivo = open('palavras_faceis.txt', 'r', encoding='UTF-8')
lista_de_palavras = arquivo.read().split('\n')
arquivo.close()
sorteio = randint(0, len(lista_de_palavras))
return lista_de_palavras[sorteio]
def main():
palavra = obter_palavra()
parcial = "_" * len(palavra) # cria uma string de "_" do tamanho da palavra
n_tentativas = 0
erros = ""
while True:
entrada = input("-- Entre com uma letra --> ").lower()
if validar_entrada(entrada): # se a entrada for válida
n_tentativas += 1
palavra_normalizada = remover_acentos(palavra)
print(f"A palavra é: {palavra}") # Debug
print(f"A letra aparece {palavra_normalizada.count(entrada)} vezes")
print(f"Tentativas = {n_tentativas}")
if remover_acentos(entrada) not in palavra.lower():
erros += entrada + " "
contador = 0
for letra in palavra:
if remover_acentos(letra) == entrada: # se o jogador acertou a letra
parcial_lista = list(parcial) # converte parcial para lista
parcial_lista[contador] = letra # substitui a letra na lista
parcial = "".join(parcial_lista) # converte a lista de volta para string
contador += 1 # adiciona 1 ao contador
print('\n')
print(parcial)
print(f"Erros: {erros.upper()}")
if "_" not in parcial:
print("Game Over\nParabéns!")
break
if __name__ == "__main__":
main() | 0.576184 | 0.406273 |
for i in range(5):
print(i)
''' Output:
0
1
2
3
4
'''
# RANGE and CONTINUE
# ==========================
for i in range(5):
if i == 3:
continue
print(i)
''' Output:
0
1
2
4
'''
# RANGE and BREAK
# ==========================
for i in range(5):
if i == 3:
break
print(i)
''' Output:
0
1
2
'''
# RANGE - BREAK and ELSE
# ==========================
for i in range(1, 8):
print(i)
if i % 7 == 0:
print("multiplo di 7 trovato")
break
else:
print("nessun multiplo di 7 nell'intervallo")
''' Output:
1
2
3
4
nessun multiplo di 7 nell'intervallo
'''
# RANGE - TRY and CONTINUE
# ==========================
for i in range(5):
print('-' * 20)
try:
10 / (i - 3)
except ZeroDivisionError:
print("diviso per 0")
continue
finally:
print("esegui sempre")
print(i)
''' Output:
--------------------
esegui sempre
0
--------------------
esegui sempre
1
--------------------
esegui sempre
2
--------------------
diviso per 0
esegui sempre
--------------------
esegui sempre
4
'''
# LIST
# ==========================
for i in [1, 2, 3, 4]:
print(i)
''' Output:
0
1
2
3
4
'''
# TUPLA
# ==========================
for i in ('a', 'b', 'c', 4):
print(i)
''' Output:
a
b
c
4
'''
# LIST contain TUPLA
# ==========================
for i in [(1, 2), (3, 4), (5, 6)]:
print(i)
''' Output:
(1, 2)
(3, 4)
(5, 6)
'''
for i, j in [(1, 2), (3, 4), (5, 6)]:
print(i, j)
''' Output:
1 2
3 4
5 6
'''
# STRING
# ==========================
for i in 'hello':
print(i)
''' Output:
h
e
l
l
o
'''
# STRING
# ==========================
s = 'hello'
for i in s:
print(i)
''' Output:
h
e
l
l
o
'''
# STRING and INDEX
# ==========================
s = 'hello'
idx = 0
for c in s:
print(idx, c)
idx += 1
''' Output:
0 h
1 e
2 l
3 l
4 o
'''
# STRING and INDEX
# Metodo migliore
# ==========================
s = 'hello'
for i in range(len(s)):
print(i, s[i])
''' Output:
0 h
1 e
2 l
3 l
4 o
'''
# STRING and INDEX
# ==========================
s = 'hello'
for i, c in enumerate(s):
print(i, c)
''' Output:
0 h
1 e
2 l
3 l
4 o
''' | ForLoop.py | for i in range(5):
print(i)
''' Output:
0
1
2
3
4
'''
# RANGE and CONTINUE
# ==========================
for i in range(5):
if i == 3:
continue
print(i)
''' Output:
0
1
2
4
'''
# RANGE and BREAK
# ==========================
for i in range(5):
if i == 3:
break
print(i)
''' Output:
0
1
2
'''
# RANGE - BREAK and ELSE
# ==========================
for i in range(1, 8):
print(i)
if i % 7 == 0:
print("multiplo di 7 trovato")
break
else:
print("nessun multiplo di 7 nell'intervallo")
''' Output:
1
2
3
4
nessun multiplo di 7 nell'intervallo
'''
# RANGE - TRY and CONTINUE
# ==========================
for i in range(5):
print('-' * 20)
try:
10 / (i - 3)
except ZeroDivisionError:
print("diviso per 0")
continue
finally:
print("esegui sempre")
print(i)
''' Output:
--------------------
esegui sempre
0
--------------------
esegui sempre
1
--------------------
esegui sempre
2
--------------------
diviso per 0
esegui sempre
--------------------
esegui sempre
4
'''
# LIST
# ==========================
for i in [1, 2, 3, 4]:
print(i)
''' Output:
0
1
2
3
4
'''
# TUPLA
# ==========================
for i in ('a', 'b', 'c', 4):
print(i)
''' Output:
a
b
c
4
'''
# LIST contain TUPLA
# ==========================
for i in [(1, 2), (3, 4), (5, 6)]:
print(i)
''' Output:
(1, 2)
(3, 4)
(5, 6)
'''
for i, j in [(1, 2), (3, 4), (5, 6)]:
print(i, j)
''' Output:
1 2
3 4
5 6
'''
# STRING
# ==========================
for i in 'hello':
print(i)
''' Output:
h
e
l
l
o
'''
# STRING
# ==========================
s = 'hello'
for i in s:
print(i)
''' Output:
h
e
l
l
o
'''
# STRING and INDEX
# ==========================
s = 'hello'
idx = 0
for c in s:
print(idx, c)
idx += 1
''' Output:
0 h
1 e
2 l
3 l
4 o
'''
# STRING and INDEX
# Metodo migliore
# ==========================
s = 'hello'
for i in range(len(s)):
print(i, s[i])
''' Output:
0 h
1 e
2 l
3 l
4 o
'''
# STRING and INDEX
# ==========================
s = 'hello'
for i, c in enumerate(s):
print(i, c)
''' Output:
0 h
1 e
2 l
3 l
4 o
''' | 0.039426 | 0.165593 |
from torch import Tensor
from torch.nn import CrossEntropyLoss
from torch.nn import BCEWithLogitsLoss
from leanai.training.losses.loss import Loss
from leanai.training.loss_registry import register_loss
@register_loss()
class SparseCrossEntropyLossFromLogits(Loss):
def __init__(self, reduction: str = "mean"):
"""
Compute a sparse cross entropy.
This means that the preds are logits and the targets are not one hot encoded.
:param reduction: Specifies the reduction to apply to the output: `'none'` | `'mean'` | `'sum'`. `'none'`: no reduction will be applied, `'mean'`: the sum of the output will be divided by the number of elements in the output, `'sum'`: the output will be summed. Default: 'mean'.
"""
super().__init__()
self.loss_fun = CrossEntropyLoss(reduction=reduction)
def forward(self, y_pred, y_true):
"""
Compute the sparse cross entropy assuming y_pred to be logits.
:param y_pred: The predictions of the network. Either a NamedTuple pointing at ITensors or a Dict or Tuple of ITensors.
:param y_true: The desired outputs of the network (labels). Either a NamedTuple pointing at ITensors or a Dict or Tuple of ITensors.
"""
if not isinstance(y_true, Tensor):
y_true = y_true.class_id
if not isinstance(y_pred, Tensor):
y_pred = y_pred.class_id
y_true = y_true.long()
if len(y_true.shape) == len(y_pred.shape) and y_true.shape[1] == 1:
y_true = y_true[:, 0]
return self.loss_fun(y_pred, y_true)
@register_loss()
class BinaryCrossEntropyLossFromLogits(Loss):
def __init__(self, reduction: str = "mean"):
"""
Compute a binary cross entropy.
This means that the preds are logits and the targets are a binary (1 or 0) tensor of same shape as logits.
:param reduction: Specifies the reduction to apply to the output: `'none'` | `'mean'` | `'sum'`. `'none'`: no reduction will be applied, `'mean'`: the sum of the output will be divided by the number of elements in the output, `'sum'`: the output will be summed. Default: 'mean'.
"""
super().__init__()
self.loss_fun = BCEWithLogitsLoss(reduction=reduction)
def forward(self, y_pred, y_true):
"""
Compute the sparse cross entropy assuming y_pred to be logits.
:param y_pred: The predictions of the network. Either a NamedTuple pointing at ITensors or a Dict or Tuple of ITensors.
:param y_true: The desired outputs of the network (labels). Either a NamedTuple pointing at ITensors or a Dict or Tuple of ITensors.
"""
if not isinstance(y_true, Tensor):
y_true = y_true.class_id
if not isinstance(y_pred, Tensor):
y_pred = y_pred.class_id
return self.loss_fun(y_pred, y_true)
@register_loss()
class SparseCategoricalAccuracy(Loss):
def __init__(self, reduction: str = "mean", axis=-1):
"""
Compute the sparse mean squared error.
Sparse means that the targets are not one hot encoded.
:param reduction: Specifies the reduction to apply to the output: `'none'` | `'mean'` | `'sum'`. `'none'`: no reduction will be applied, `'mean'`: the sum of the output will be divided by the number of elements in the output, `'sum'`: the output will be summed. Default: 'mean'.
"""
super().__init__()
self.reduction = reduction
self.axis = axis
def forward(self, y_pred, y_true):
"""
Compute the sparse categorical accuracy.
:param y_pred: The predictions of the network. Either a NamedTuple pointing at ITensors or a Dict or Tuple of ITensors.
:param y_true: The desired outputs of the network (labels). Either a NamedTuple pointing at ITensors or a Dict or Tuple of ITensors.
:param axis: (Optional) The axis along which to compute the sparse categorical accuracy.
"""
if not isinstance(y_true, Tensor):
y_true = y_true.class_id
if not isinstance(y_pred, Tensor):
y_pred = y_pred.class_id
pred_class = y_pred.argmax(dim=self.axis)
true_class = y_true.long()
correct_predictions = pred_class == true_class
loss = correct_predictions.float().mean(dim=self.axis)
if self.reduction == "none":
return loss
elif self.reduction == "sum":
return loss.sum()
elif self.reduction == "mean":
return loss.mean() | leanai/training/losses/classification.py | from torch import Tensor
from torch.nn import CrossEntropyLoss
from torch.nn import BCEWithLogitsLoss
from leanai.training.losses.loss import Loss
from leanai.training.loss_registry import register_loss
@register_loss()
class SparseCrossEntropyLossFromLogits(Loss):
def __init__(self, reduction: str = "mean"):
"""
Compute a sparse cross entropy.
This means that the preds are logits and the targets are not one hot encoded.
:param reduction: Specifies the reduction to apply to the output: `'none'` | `'mean'` | `'sum'`. `'none'`: no reduction will be applied, `'mean'`: the sum of the output will be divided by the number of elements in the output, `'sum'`: the output will be summed. Default: 'mean'.
"""
super().__init__()
self.loss_fun = CrossEntropyLoss(reduction=reduction)
def forward(self, y_pred, y_true):
"""
Compute the sparse cross entropy assuming y_pred to be logits.
:param y_pred: The predictions of the network. Either a NamedTuple pointing at ITensors or a Dict or Tuple of ITensors.
:param y_true: The desired outputs of the network (labels). Either a NamedTuple pointing at ITensors or a Dict or Tuple of ITensors.
"""
if not isinstance(y_true, Tensor):
y_true = y_true.class_id
if not isinstance(y_pred, Tensor):
y_pred = y_pred.class_id
y_true = y_true.long()
if len(y_true.shape) == len(y_pred.shape) and y_true.shape[1] == 1:
y_true = y_true[:, 0]
return self.loss_fun(y_pred, y_true)
@register_loss()
class BinaryCrossEntropyLossFromLogits(Loss):
def __init__(self, reduction: str = "mean"):
"""
Compute a binary cross entropy.
This means that the preds are logits and the targets are a binary (1 or 0) tensor of same shape as logits.
:param reduction: Specifies the reduction to apply to the output: `'none'` | `'mean'` | `'sum'`. `'none'`: no reduction will be applied, `'mean'`: the sum of the output will be divided by the number of elements in the output, `'sum'`: the output will be summed. Default: 'mean'.
"""
super().__init__()
self.loss_fun = BCEWithLogitsLoss(reduction=reduction)
def forward(self, y_pred, y_true):
"""
Compute the sparse cross entropy assuming y_pred to be logits.
:param y_pred: The predictions of the network. Either a NamedTuple pointing at ITensors or a Dict or Tuple of ITensors.
:param y_true: The desired outputs of the network (labels). Either a NamedTuple pointing at ITensors or a Dict or Tuple of ITensors.
"""
if not isinstance(y_true, Tensor):
y_true = y_true.class_id
if not isinstance(y_pred, Tensor):
y_pred = y_pred.class_id
return self.loss_fun(y_pred, y_true)
@register_loss()
class SparseCategoricalAccuracy(Loss):
def __init__(self, reduction: str = "mean", axis=-1):
"""
Compute the sparse mean squared error.
Sparse means that the targets are not one hot encoded.
:param reduction: Specifies the reduction to apply to the output: `'none'` | `'mean'` | `'sum'`. `'none'`: no reduction will be applied, `'mean'`: the sum of the output will be divided by the number of elements in the output, `'sum'`: the output will be summed. Default: 'mean'.
"""
super().__init__()
self.reduction = reduction
self.axis = axis
def forward(self, y_pred, y_true):
"""
Compute the sparse categorical accuracy.
:param y_pred: The predictions of the network. Either a NamedTuple pointing at ITensors or a Dict or Tuple of ITensors.
:param y_true: The desired outputs of the network (labels). Either a NamedTuple pointing at ITensors or a Dict or Tuple of ITensors.
:param axis: (Optional) The axis along which to compute the sparse categorical accuracy.
"""
if not isinstance(y_true, Tensor):
y_true = y_true.class_id
if not isinstance(y_pred, Tensor):
y_pred = y_pred.class_id
pred_class = y_pred.argmax(dim=self.axis)
true_class = y_true.long()
correct_predictions = pred_class == true_class
loss = correct_predictions.float().mean(dim=self.axis)
if self.reduction == "none":
return loss
elif self.reduction == "sum":
return loss.sum()
elif self.reduction == "mean":
return loss.mean() | 0.964128 | 0.776178 |
import torch
import torch.nn as nn
import torch.nn.functional as F
from .util import d
class GRU(nn.Module):
"""
Transformer for classifying sequences
"""
def __init__(self, emb, depth, hidden_size, seq_length, num_tokens, num_classes,
ff_hidden_mult=2, dropout=0.0, directions=1):
"""
:param emb: Embedding dimension
:param heads: nr. of attention heads
:param depth: Number of transformer blocks
:param seq_length: Expected maximum sequence length
:param num_tokens: Number of tokens (usually words) in the vocabulary
:param num_classes: Number of classes.
:param max_pool: If true, use global max pooling in the last layer. If false, use global
average pooling.
"""
super().__init__()
self.num_tokens, self.hidden_size = num_tokens, hidden_size
self.token_embedding = nn.Embedding(embedding_dim=emb, num_embeddings=num_tokens)
# self.pos_embedding = nn.Embedding(embedding_dim=emb, num_embeddings=seq_length)
self.gru = nn.GRU(emb, self.hidden_size, depth, batch_first=True,
dropout=dropout, bidirectional=(directions==2))
self.ff = nn.Sequential(
nn.Linear(directions*self.hidden_size, ff_hidden_mult * directions*self.hidden_size),
nn.ReLU(),
nn.Linear(ff_hidden_mult * directions*self.hidden_size, num_classes)
)
self.do = nn.Dropout(dropout)
def forward(self, x):
"""
:param x: A batch by sequence length integer tensor of token indices.
:return: predicted log-probability vectors for each token based on the preceding tokens.
"""
x, lens = x
x = self.token_embedding(x)
b, t, e = x.size()
x = self.do(x)
hs, hn = self.gru(x)
# extract hidden at last non-pad token
x = hs[torch.arange(b, device=d()), torch.clamp(lens-1, max=t-1), :]
x = self.ff(x)
return F.log_softmax(x, dim=1) | former/rnn.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from .util import d
class GRU(nn.Module):
"""
Transformer for classifying sequences
"""
def __init__(self, emb, depth, hidden_size, seq_length, num_tokens, num_classes,
ff_hidden_mult=2, dropout=0.0, directions=1):
"""
:param emb: Embedding dimension
:param heads: nr. of attention heads
:param depth: Number of transformer blocks
:param seq_length: Expected maximum sequence length
:param num_tokens: Number of tokens (usually words) in the vocabulary
:param num_classes: Number of classes.
:param max_pool: If true, use global max pooling in the last layer. If false, use global
average pooling.
"""
super().__init__()
self.num_tokens, self.hidden_size = num_tokens, hidden_size
self.token_embedding = nn.Embedding(embedding_dim=emb, num_embeddings=num_tokens)
# self.pos_embedding = nn.Embedding(embedding_dim=emb, num_embeddings=seq_length)
self.gru = nn.GRU(emb, self.hidden_size, depth, batch_first=True,
dropout=dropout, bidirectional=(directions==2))
self.ff = nn.Sequential(
nn.Linear(directions*self.hidden_size, ff_hidden_mult * directions*self.hidden_size),
nn.ReLU(),
nn.Linear(ff_hidden_mult * directions*self.hidden_size, num_classes)
)
self.do = nn.Dropout(dropout)
def forward(self, x):
"""
:param x: A batch by sequence length integer tensor of token indices.
:return: predicted log-probability vectors for each token based on the preceding tokens.
"""
x, lens = x
x = self.token_embedding(x)
b, t, e = x.size()
x = self.do(x)
hs, hn = self.gru(x)
# extract hidden at last non-pad token
x = hs[torch.arange(b, device=d()), torch.clamp(lens-1, max=t-1), :]
x = self.ff(x)
return F.log_softmax(x, dim=1) | 0.959173 | 0.457561 |
import discord
from discord.ext import commands
import database as db
import variables as var
from functions import get_prefix
def has_command_permission():
async def predicate(ctx: commands.Context):
plugin_name = ctx.cog.__cog_name__
cmd_name = ctx.command.name
guild_doc = await db.PERMISSIONS.find_one({"_id": ctx.guild.id})
try:
permitted_roles = [i for i in guild_doc[plugin_name][cmd_name]]
author_roles = [i.id for i in ctx.author.roles]
if not permitted_roles:
return True
else:
permission = any(
item in permitted_roles for item in author_roles
)
if permission:
return True
except KeyError:
return True
return commands.check(predicate)
class Permissions(commands.Cog):
def __init__(self, bot):
self.bot = bot
@commands.command(name="allperms")
async def all_perms(self, ctx):
embed = discord.Embed(
title=f"Command role permissions", color=var.C_MAIN
)
guild_doc = await db.PERMISSIONS.find_one(
{"_id": ctx.guild.id}, {"_id": 0}
)
for i in guild_doc:
perms = guild_doc[i]
cmds = [x for x in perms]
roles = [x for x in perms.values()]
value = ""
for c in cmds:
role_ids = roles[cmds.index(c)]
mentioned = [f"<@&{x}>" for x in role_ids]
stringed = ", ".join(mentioned)
value += f"{c}: {stringed}\n"
if guild_doc[i] == {}:
value = None
embed.add_field(name=i, value=value, inline=False)
await ctx.send(embed=embed)
@commands.command(
name="setperm",
aliases=["setpermission", "addperm", "addpermission"]
)
@commands.has_permissions(administrator=True)
async def set_perm(self, ctx, plugin=None):
cogs = [
'Leveling',
'Moderation',
'ReactionRoles',
'Welcome',
'Verification',
'Chatbot',
'AutoMod',
"Karma",
'Fun',
'Giveaway'
]
if plugin is not None and plugin.lower() in [i.lower() for i in cogs]:
embed = discord.Embed(
title=f"All commands for {plugin}",
color=var.C_GREEN
).add_field(
name="Note",
value=(
"Make sure to not enter the command name with the prefix,"
" that would trigger the command. Just enter the command"
" name followed by a space and then role"
" (ID or Mention can be used)"
)
)
if plugin.lower() == "reactionroles":
plugin_name = "ReactionRoles"
elif plugin.lower() == "automod":
plugin_name = "AutoMod"
else:
plugin_name = plugin.capitalize()
desc = (
"Type the name of the command (without prefix) and the role "
"with a space to let members with that role be able to use the"
" command\n Type `cancel` to stop the process\n\n"
)
for i in self.bot.cogs[plugin_name].walk_commands():
desc += f"`{i}`\n"
embed.description = desc
await ctx.send(embed=embed)
def message_check(message):
return (
message.author == ctx.author
and message.channel.id == ctx.channel.id
)
while True:
user_msg = await self.bot.wait_for(
"message", check=message_check
)
if user_msg.content in ["cancel", "`cancel`", "```cancel```"]:
await ctx.send(
f"Cancelled permissions change for {plugin} plugin")
break
else:
guild_doc = await db.PERMISSIONS.find_one(
{"_id": ctx.guild.id}
)
data = user_msg.content.split(" ")
if len(data) != 2:
await ctx.send(
embed=discord.Embed(
title="Invalid format",
description=(
"You don't need to start over again,"
" just send the message in correct "
"format as shown below"
),
color=var.C_ORANGE
).add_field(
name="Format",
value="`command_name role`"
).set_footer(
text=(
"Don't enter the command name with prefix, "
"that would trigger the command, "
"just write the command name"
)
)
)
elif data[0].lower() not in [
str(i).lower()
for i in self.bot.cogs[plugin_name].walk_commands()
]:
await ctx.send(
embed=discord.Embed(
title="Command not found",
description=(
f"There is no command named "
f"`{data[0].lower()}`` in"
f" **{plugin_name}**. "
f"Try again with correct command in"
f" {plugin_name} plugin"
),
color=var.C_ORANGE
)
)
elif (
data[1].strip("<>@&").isnumeric() == False or
ctx.guild.get_role(int(data[1].strip("<>@&"))) is None
):
await ctx.send(
embed=discord.Embed(
title="Role not found",
description=(
f"There is no role with the ID `{data[1]}`."
" Try again with correct role mention or ID"
),
color=var.C_ORANGE
)
)
elif (
data[0].lower() in guild_doc[plugin_name].keys()
and (
int(data[1].strip("<>@&"))
in guild_doc[plugin_name][data[0].lower()]
)
):
mention = ctx.guild.get_role(
int(data[1].strip('<>@&'))
).mention
await ctx.send(
embed=discord.Embed(
description=(
f"{mention}"
f" role already has permissions for"
f" **{data[0].lower()}**"
),
color=var.C_RED
)
)
else:
guild_doc = await db.PERMISSIONS.find_one(
{"_id": ctx.guild.id}
)
role = ctx.guild.get_role(int(data[1].strip("<>@&")))
plugin_dict = guild_doc[plugin_name]
new_dict = plugin_dict.copy()
try:
current_list = plugin_dict[data[0].lower()]
except KeyError:
current_list = []
new_list = current_list.copy()
new_list.append(role.id)
new_dict.update({data[0].lower(): new_list})
new_data = {
"$set": {
plugin_name: new_dict
}
}
await db.PERMISSIONS.update_one(guild_doc, new_data)
await ctx.send(
embed=discord.Embed(
title="Successfully updated permissions",
description=(
f"{var.E_ACCEPT} Users with {role.mention}"
f" can now use the command {data[0].lower()}"
),
color=var.C_GREEN
).add_field(
name="To view all permissions",
value=f"```{await get_prefix(ctx)}allperms```"
)
)
break
else:
await ctx.send(
embed=discord.Embed(
description="🚫 You need to define a valid plugin!",
color=var.C_RED
).add_field(
name="Format",
value=f"`{await get_prefix(ctx)}setperm <plugin>`"
).set_footer(
text=(
f"You can view all plugins"
f" by using the command {await get_prefix(ctx)}plugins"
)
)
)
@commands.command(
name="removeperm", aliases=["removepermission", "disablepermission"]
)
@commands.has_permissions(administrator=True)
async def remove_perm(self, ctx, cmd=None, role: discord.Role = None):
if cmd and role is not None:
guild_doc = await db.PERMISSIONS.find_one(
{"_id": ctx.guild.id}, {"_id": 0}
)
all_perm_commands = [x for i in guild_doc.values() for x in i]
if cmd not in all_perm_commands:
await ctx.send(
embed=discord.Embed(
title="Invalid command",
description="This command has no permissions setup",
color=var.C_RED
)
)
else:
plugin_name = [
x for x in guild_doc if cmd in guild_doc[x].keys()
][0]
plugin_dict = guild_doc[plugin_name]
new_dict = plugin_dict.copy()
role_list = plugin_dict[cmd]
new_list = role_list.copy()
try:
new_list.remove(role.id)
new_dict.update({cmd: new_list})
new_data = {
"$set": {
plugin_name: new_dict
}
}
await db.PERMISSIONS.update_one(guild_doc, new_data)
await ctx.send(
embed=discord.Embed(
title="Successfully removed permission",
description=(
f"{var.E_ACCEPT} Members with {role.mention} "
f"role can't use **{cmd}** command anymore"
),
color=var.C_GREEN
).add_field(
name="To add new command permission",
value=(
f"```{await get_prefix(ctx)}addperm <plugin>```"
)
)
)
except ValueError:
await ctx.send(
embed=discord.Embed(
title="Invalid combination",
description=(
f"The command {cmd} "
"has no permissions setup with role"
f" {ctx.guild.get_role(role.id).mention}"
),
color=var.C_RED
)
)
else:
await ctx.send(
embed=discord.Embed(
description=(
"🚫 You need to define the command name and the role"
),
color=var.C_RED
).add_field(
name="Format",
value=f"`{await get_prefix(ctx)}removeperm <command> <role>`"
).set_footer(
text=(
f"You can view all plugins by using the permissions"
f" setup using {await get_prefix(ctx)}allperms"
)
)
)
def setup(bot):
bot.add_cog(Permissions(bot)) | axiol/ext/permissions.py | import discord
from discord.ext import commands
import database as db
import variables as var
from functions import get_prefix
def has_command_permission():
async def predicate(ctx: commands.Context):
plugin_name = ctx.cog.__cog_name__
cmd_name = ctx.command.name
guild_doc = await db.PERMISSIONS.find_one({"_id": ctx.guild.id})
try:
permitted_roles = [i for i in guild_doc[plugin_name][cmd_name]]
author_roles = [i.id for i in ctx.author.roles]
if not permitted_roles:
return True
else:
permission = any(
item in permitted_roles for item in author_roles
)
if permission:
return True
except KeyError:
return True
return commands.check(predicate)
class Permissions(commands.Cog):
def __init__(self, bot):
self.bot = bot
@commands.command(name="allperms")
async def all_perms(self, ctx):
embed = discord.Embed(
title=f"Command role permissions", color=var.C_MAIN
)
guild_doc = await db.PERMISSIONS.find_one(
{"_id": ctx.guild.id}, {"_id": 0}
)
for i in guild_doc:
perms = guild_doc[i]
cmds = [x for x in perms]
roles = [x for x in perms.values()]
value = ""
for c in cmds:
role_ids = roles[cmds.index(c)]
mentioned = [f"<@&{x}>" for x in role_ids]
stringed = ", ".join(mentioned)
value += f"{c}: {stringed}\n"
if guild_doc[i] == {}:
value = None
embed.add_field(name=i, value=value, inline=False)
await ctx.send(embed=embed)
@commands.command(
name="setperm",
aliases=["setpermission", "addperm", "addpermission"]
)
@commands.has_permissions(administrator=True)
async def set_perm(self, ctx, plugin=None):
cogs = [
'Leveling',
'Moderation',
'ReactionRoles',
'Welcome',
'Verification',
'Chatbot',
'AutoMod',
"Karma",
'Fun',
'Giveaway'
]
if plugin is not None and plugin.lower() in [i.lower() for i in cogs]:
embed = discord.Embed(
title=f"All commands for {plugin}",
color=var.C_GREEN
).add_field(
name="Note",
value=(
"Make sure to not enter the command name with the prefix,"
" that would trigger the command. Just enter the command"
" name followed by a space and then role"
" (ID or Mention can be used)"
)
)
if plugin.lower() == "reactionroles":
plugin_name = "ReactionRoles"
elif plugin.lower() == "automod":
plugin_name = "AutoMod"
else:
plugin_name = plugin.capitalize()
desc = (
"Type the name of the command (without prefix) and the role "
"with a space to let members with that role be able to use the"
" command\n Type `cancel` to stop the process\n\n"
)
for i in self.bot.cogs[plugin_name].walk_commands():
desc += f"`{i}`\n"
embed.description = desc
await ctx.send(embed=embed)
def message_check(message):
return (
message.author == ctx.author
and message.channel.id == ctx.channel.id
)
while True:
user_msg = await self.bot.wait_for(
"message", check=message_check
)
if user_msg.content in ["cancel", "`cancel`", "```cancel```"]:
await ctx.send(
f"Cancelled permissions change for {plugin} plugin")
break
else:
guild_doc = await db.PERMISSIONS.find_one(
{"_id": ctx.guild.id}
)
data = user_msg.content.split(" ")
if len(data) != 2:
await ctx.send(
embed=discord.Embed(
title="Invalid format",
description=(
"You don't need to start over again,"
" just send the message in correct "
"format as shown below"
),
color=var.C_ORANGE
).add_field(
name="Format",
value="`command_name role`"
).set_footer(
text=(
"Don't enter the command name with prefix, "
"that would trigger the command, "
"just write the command name"
)
)
)
elif data[0].lower() not in [
str(i).lower()
for i in self.bot.cogs[plugin_name].walk_commands()
]:
await ctx.send(
embed=discord.Embed(
title="Command not found",
description=(
f"There is no command named "
f"`{data[0].lower()}`` in"
f" **{plugin_name}**. "
f"Try again with correct command in"
f" {plugin_name} plugin"
),
color=var.C_ORANGE
)
)
elif (
data[1].strip("<>@&").isnumeric() == False or
ctx.guild.get_role(int(data[1].strip("<>@&"))) is None
):
await ctx.send(
embed=discord.Embed(
title="Role not found",
description=(
f"There is no role with the ID `{data[1]}`."
" Try again with correct role mention or ID"
),
color=var.C_ORANGE
)
)
elif (
data[0].lower() in guild_doc[plugin_name].keys()
and (
int(data[1].strip("<>@&"))
in guild_doc[plugin_name][data[0].lower()]
)
):
mention = ctx.guild.get_role(
int(data[1].strip('<>@&'))
).mention
await ctx.send(
embed=discord.Embed(
description=(
f"{mention}"
f" role already has permissions for"
f" **{data[0].lower()}**"
),
color=var.C_RED
)
)
else:
guild_doc = await db.PERMISSIONS.find_one(
{"_id": ctx.guild.id}
)
role = ctx.guild.get_role(int(data[1].strip("<>@&")))
plugin_dict = guild_doc[plugin_name]
new_dict = plugin_dict.copy()
try:
current_list = plugin_dict[data[0].lower()]
except KeyError:
current_list = []
new_list = current_list.copy()
new_list.append(role.id)
new_dict.update({data[0].lower(): new_list})
new_data = {
"$set": {
plugin_name: new_dict
}
}
await db.PERMISSIONS.update_one(guild_doc, new_data)
await ctx.send(
embed=discord.Embed(
title="Successfully updated permissions",
description=(
f"{var.E_ACCEPT} Users with {role.mention}"
f" can now use the command {data[0].lower()}"
),
color=var.C_GREEN
).add_field(
name="To view all permissions",
value=f"```{await get_prefix(ctx)}allperms```"
)
)
break
else:
await ctx.send(
embed=discord.Embed(
description="🚫 You need to define a valid plugin!",
color=var.C_RED
).add_field(
name="Format",
value=f"`{await get_prefix(ctx)}setperm <plugin>`"
).set_footer(
text=(
f"You can view all plugins"
f" by using the command {await get_prefix(ctx)}plugins"
)
)
)
@commands.command(
name="removeperm", aliases=["removepermission", "disablepermission"]
)
@commands.has_permissions(administrator=True)
async def remove_perm(self, ctx, cmd=None, role: discord.Role = None):
if cmd and role is not None:
guild_doc = await db.PERMISSIONS.find_one(
{"_id": ctx.guild.id}, {"_id": 0}
)
all_perm_commands = [x for i in guild_doc.values() for x in i]
if cmd not in all_perm_commands:
await ctx.send(
embed=discord.Embed(
title="Invalid command",
description="This command has no permissions setup",
color=var.C_RED
)
)
else:
plugin_name = [
x for x in guild_doc if cmd in guild_doc[x].keys()
][0]
plugin_dict = guild_doc[plugin_name]
new_dict = plugin_dict.copy()
role_list = plugin_dict[cmd]
new_list = role_list.copy()
try:
new_list.remove(role.id)
new_dict.update({cmd: new_list})
new_data = {
"$set": {
plugin_name: new_dict
}
}
await db.PERMISSIONS.update_one(guild_doc, new_data)
await ctx.send(
embed=discord.Embed(
title="Successfully removed permission",
description=(
f"{var.E_ACCEPT} Members with {role.mention} "
f"role can't use **{cmd}** command anymore"
),
color=var.C_GREEN
).add_field(
name="To add new command permission",
value=(
f"```{await get_prefix(ctx)}addperm <plugin>```"
)
)
)
except ValueError:
await ctx.send(
embed=discord.Embed(
title="Invalid combination",
description=(
f"The command {cmd} "
"has no permissions setup with role"
f" {ctx.guild.get_role(role.id).mention}"
),
color=var.C_RED
)
)
else:
await ctx.send(
embed=discord.Embed(
description=(
"🚫 You need to define the command name and the role"
),
color=var.C_RED
).add_field(
name="Format",
value=f"`{await get_prefix(ctx)}removeperm <command> <role>`"
).set_footer(
text=(
f"You can view all plugins by using the permissions"
f" setup using {await get_prefix(ctx)}allperms"
)
)
)
def setup(bot):
bot.add_cog(Permissions(bot)) | 0.357568 | 0.218471 |
from sqlalchemy import Column, Integer, String, create_engine, DateTime
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker, relationship
from sqlalchemy import Column, Integer, String, create_engine, DateTime, ForeignKey
from sqlalchemy.orm import sessionmaker
from datetime import datetime
from persistence import db_path
engine = create_engine('sqlite:///' + db_path, echo=False)
Base = declarative_base()
class Attendee(Base):
"""Each class represents a database table"""
__tablename__ = 'attendee_db'
id = Column(Integer, primary_key=True)
name = Column('name', String(255), nullable=False, unique=True)
demand = relationship("Demand", uselist=False, back_populates="attendee_db")
creation_date = Column('creation_date', DateTime, default=datetime.utcnow, nullable=False)
email = Column('email', String(255))
ssn = Column('ssn', String(255))
def __eq__(self, other):
if other is None and self is not None:
return False
elif other is not None and self is None:
return False
else:
if other.name != self.name:
return False
elif other.email != self.email:
return False
elif self.creation_date != other.creation_date:
return False
elif self.id != other.id:
return False
elif self.ssn != other.ssn:
return False
return True
class Demand(Base):
__tablename__ = 'demand_db'
id = Column(Integer, primary_key=True)
attendee_id = Column(Integer, ForeignKey('attendee.id'))
demand = relationship("Attendee", back_populates="demand")
name = Column('name', String(255), nullable=False, unique=True)
creation_date = Column('creation_date', DateTime, default=datetime.utcnow, nullable=False)
def initiate_engine_session_base(engine_path, echo=True):
_engine = create_engine('sqlite:///' + engine_path, echo=echo)
_Session = sessionmaker(bind=_engine)
_Base = declarative_base()
return _engine, _Session, _Base
def tear_down_test_db():
Base.metadata.drop_all(engine)
Base.metadata.create_all(engine)
if __name__ == '__main__':
Base.metadata.create_all(engine) | persistence/models.py | from sqlalchemy import Column, Integer, String, create_engine, DateTime
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker, relationship
from sqlalchemy import Column, Integer, String, create_engine, DateTime, ForeignKey
from sqlalchemy.orm import sessionmaker
from datetime import datetime
from persistence import db_path
engine = create_engine('sqlite:///' + db_path, echo=False)
Base = declarative_base()
class Attendee(Base):
"""Each class represents a database table"""
__tablename__ = 'attendee_db'
id = Column(Integer, primary_key=True)
name = Column('name', String(255), nullable=False, unique=True)
demand = relationship("Demand", uselist=False, back_populates="attendee_db")
creation_date = Column('creation_date', DateTime, default=datetime.utcnow, nullable=False)
email = Column('email', String(255))
ssn = Column('ssn', String(255))
def __eq__(self, other):
if other is None and self is not None:
return False
elif other is not None and self is None:
return False
else:
if other.name != self.name:
return False
elif other.email != self.email:
return False
elif self.creation_date != other.creation_date:
return False
elif self.id != other.id:
return False
elif self.ssn != other.ssn:
return False
return True
class Demand(Base):
__tablename__ = 'demand_db'
id = Column(Integer, primary_key=True)
attendee_id = Column(Integer, ForeignKey('attendee.id'))
demand = relationship("Attendee", back_populates="demand")
name = Column('name', String(255), nullable=False, unique=True)
creation_date = Column('creation_date', DateTime, default=datetime.utcnow, nullable=False)
def initiate_engine_session_base(engine_path, echo=True):
_engine = create_engine('sqlite:///' + engine_path, echo=echo)
_Session = sessionmaker(bind=_engine)
_Base = declarative_base()
return _engine, _Session, _Base
def tear_down_test_db():
Base.metadata.drop_all(engine)
Base.metadata.create_all(engine)
if __name__ == '__main__':
Base.metadata.create_all(engine) | 0.646237 | 0.094385 |
import tensorflow as _tf
from abc import ABC as _ABC, abstractmethod as _abstractmethod
from .input import UpdateInput
from .mlp import MLP
class UpdateLayer(_tf.keras.layers.Layer, _ABC):
"""Base Update Layer abstract class to be inherited by Update Layer implementations.
Abstract class to define handling of Batched UpdateInput for update layer to perform Update.
Child classes must implement the update method, which takes as argument a non-batched flattened
UpdateInput.
"""
def __init__(self, hidden_state_size=10, message_size=10, *args, **kwargs):
"""Update Layer abstract class constructor."""
super(UpdateLayer, self).__init__(*args, **kwargs)
self.hidden_state_size = hidden_state_size
self.message_size = message_size
def call(self, inputs: UpdateInput, training=None):
# Flatten batch-nodes
batch_size = _tf.shape(inputs.hidden)[0]
num_nodes = _tf.shape(inputs.hidden)[1]
flattened_size = batch_size * num_nodes
messages = _tf.reshape(inputs.messages, [flattened_size, self.message_size])
hidden = _tf.reshape(inputs.hidden, [flattened_size, self.hidden_state_size])
hidden_initial = _tf.reshape(
inputs.hidden_initial, [flattened_size, self.hidden_state_size])
# Call update function
new_hidden = self.update(
UpdateInput(
hidden=hidden,
hidden_initial=hidden_initial,
messages=messages,
), training=training
)
# Restore batches
return _tf.reshape(new_hidden, [batch_size, num_nodes, self.hidden_state_size])
@_abstractmethod
def update(self, inputs: UpdateInput, training=None):
"""Update method to apply to a certain flattened UpdateInput."""
pass
class FeedForwardUpdate(UpdateLayer):
"""Update function layer with a Feed Forward NN for GNN model."""
def __init__(
self, hidden_state_size=10, message_size=10, activation="relu", layer=None,
num_layers=4, output_activation=None, units=50, *args, **kwargs
):
super(FeedForwardUpdate, self).__init__(
hidden_state_size=hidden_state_size, message_size=message_size, *args, **kwargs)
self.mlp = MLP(
activation=activation, layer=layer, name="update-ff-net", num_layers=num_layers,
output_activation=output_activation, output_units=hidden_state_size, units=units,
**kwargs)
def build(self, input_shapes):
build_shapes = input_shapes.hidden[-1] + input_shapes.messages[-1]
self.mlp.build(_tf.TensorShape([None, build_shapes]))
super(UpdateLayer, self).build([])
def update(self, inputs: UpdateInput, training=None):
hidden = inputs.hidden
messages = inputs.messages
_input = _tf.concat([hidden, messages], axis=-1)
return self.mlp(_input, training=training)
class GRUUpdate(UpdateLayer):
"""Update function layer with GRU for GNN model."""
def __init__(self, hidden_state_size=10, message_size=10, *args, **kwargs):
super(GRUUpdate, self).__init__(
hidden_state_size=hidden_state_size, message_size=message_size, *args, **kwargs)
self.gru = _tf.keras.layers.GRUCell(units=hidden_state_size, name="update-gru")
def update(self, inputs: UpdateInput, training=None):
hidden = inputs.hidden
messages = inputs.messages
new_hidden, _ = self.gru(messages, states=hidden, training=training)
return new_hidden | gnn/update.py | import tensorflow as _tf
from abc import ABC as _ABC, abstractmethod as _abstractmethod
from .input import UpdateInput
from .mlp import MLP
class UpdateLayer(_tf.keras.layers.Layer, _ABC):
"""Base Update Layer abstract class to be inherited by Update Layer implementations.
Abstract class to define handling of Batched UpdateInput for update layer to perform Update.
Child classes must implement the update method, which takes as argument a non-batched flattened
UpdateInput.
"""
def __init__(self, hidden_state_size=10, message_size=10, *args, **kwargs):
"""Update Layer abstract class constructor."""
super(UpdateLayer, self).__init__(*args, **kwargs)
self.hidden_state_size = hidden_state_size
self.message_size = message_size
def call(self, inputs: UpdateInput, training=None):
# Flatten batch-nodes
batch_size = _tf.shape(inputs.hidden)[0]
num_nodes = _tf.shape(inputs.hidden)[1]
flattened_size = batch_size * num_nodes
messages = _tf.reshape(inputs.messages, [flattened_size, self.message_size])
hidden = _tf.reshape(inputs.hidden, [flattened_size, self.hidden_state_size])
hidden_initial = _tf.reshape(
inputs.hidden_initial, [flattened_size, self.hidden_state_size])
# Call update function
new_hidden = self.update(
UpdateInput(
hidden=hidden,
hidden_initial=hidden_initial,
messages=messages,
), training=training
)
# Restore batches
return _tf.reshape(new_hidden, [batch_size, num_nodes, self.hidden_state_size])
@_abstractmethod
def update(self, inputs: UpdateInput, training=None):
"""Update method to apply to a certain flattened UpdateInput."""
pass
class FeedForwardUpdate(UpdateLayer):
"""Update function layer with a Feed Forward NN for GNN model."""
def __init__(
self, hidden_state_size=10, message_size=10, activation="relu", layer=None,
num_layers=4, output_activation=None, units=50, *args, **kwargs
):
super(FeedForwardUpdate, self).__init__(
hidden_state_size=hidden_state_size, message_size=message_size, *args, **kwargs)
self.mlp = MLP(
activation=activation, layer=layer, name="update-ff-net", num_layers=num_layers,
output_activation=output_activation, output_units=hidden_state_size, units=units,
**kwargs)
def build(self, input_shapes):
build_shapes = input_shapes.hidden[-1] + input_shapes.messages[-1]
self.mlp.build(_tf.TensorShape([None, build_shapes]))
super(UpdateLayer, self).build([])
def update(self, inputs: UpdateInput, training=None):
hidden = inputs.hidden
messages = inputs.messages
_input = _tf.concat([hidden, messages], axis=-1)
return self.mlp(_input, training=training)
class GRUUpdate(UpdateLayer):
"""Update function layer with GRU for GNN model."""
def __init__(self, hidden_state_size=10, message_size=10, *args, **kwargs):
super(GRUUpdate, self).__init__(
hidden_state_size=hidden_state_size, message_size=message_size, *args, **kwargs)
self.gru = _tf.keras.layers.GRUCell(units=hidden_state_size, name="update-gru")
def update(self, inputs: UpdateInput, training=None):
hidden = inputs.hidden
messages = inputs.messages
new_hidden, _ = self.gru(messages, states=hidden, training=training)
return new_hidden | 0.936786 | 0.371222 |
import numpy as np
import random
import torch
from torch.backends import cudnn
from absl import app, flags
from datasets import ML1M, ML100K, Flixster, Douban, YahooMusic
from model import GCCF
from hyperparameters import hparams
from utils import get_adj
cudnn.deterministic = True
cudnn.benchmark = False
seed = 123
torch.manual_seed(seed)
np.random.seed(seed)
random.seed(seed)
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
FLAGS = flags.FLAGS
flags.DEFINE_string('data_name', '', 'dataset name')
flags.DEFINE_string('root_dir', '', 'dataset directory path')
def main(argv):
if FLAGS.data_name == 'ml-1m':
dataset = ML1M(FLAGS.root_dir, device)
elif FLAGS.data_name == 'ml-100k':
dataset = ML100K(FLAGS.root_dir, device)
elif FLAGS.data_name == 'flixster':
dataset = Flixster(FLAGS.root_dir, device)
elif FLAGS.data_name == 'douban':
dataset = Douban(FLAGS.root_dir, device)
elif FLAGS.data_name == 'yahoo_music':
dataset = YahooMusic(FLAGS.root_dir, device)
else:
raise Exception
data_hparams = hparams[FLAGS.data_name]
train_user, train_movie, train_rating = dataset.get_train_data()
test_user, test_movie, test_rating = dataset.get_test_data()
num_users = dataset.get_num_users()
num_movies = dataset.get_num_movies()
user_adj = get_adj(num_users, num_movies, train_user, train_movie, device)
movie_adj = get_adj(num_movies, num_users, train_movie, train_user, device)
epochs = 1000
model = GCCF(num_users, num_movies, data_hparams).to(device)
criterion = torch.nn.MSELoss()
optimizer = torch.optim.Adam(model.parameters(), lr=data_hparams["lr"], weight_decay=data_hparams["weight_decay"])
min_test_loss = 999.
for epoch in range(epochs):
model.train()
optimizer.zero_grad()
predict = model(user_adj, movie_adj, train_user, train_movie)
loss = criterion(predict, train_rating)
loss.backward()
optimizer.step()
with torch.no_grad():
model.eval()
test_predict = model(user_adj, movie_adj, test_user, test_movie)
test_loss = criterion(dataset.inverse_transform(test_predict), dataset.inverse_transform(test_rating))
if min_test_loss > test_loss:
min_test_loss = test_loss
print('Epoch {:04d}, Train Loss: {:.6f}, Test Loss: {:.6f}'.format(epoch+1, loss.item(), test_loss.item()))
print('Min Test Loss: {:.6f}'.format(min_test_loss))
if __name__ == '__main__':
app.run(main) | main.py | import numpy as np
import random
import torch
from torch.backends import cudnn
from absl import app, flags
from datasets import ML1M, ML100K, Flixster, Douban, YahooMusic
from model import GCCF
from hyperparameters import hparams
from utils import get_adj
cudnn.deterministic = True
cudnn.benchmark = False
seed = 123
torch.manual_seed(seed)
np.random.seed(seed)
random.seed(seed)
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
FLAGS = flags.FLAGS
flags.DEFINE_string('data_name', '', 'dataset name')
flags.DEFINE_string('root_dir', '', 'dataset directory path')
def main(argv):
if FLAGS.data_name == 'ml-1m':
dataset = ML1M(FLAGS.root_dir, device)
elif FLAGS.data_name == 'ml-100k':
dataset = ML100K(FLAGS.root_dir, device)
elif FLAGS.data_name == 'flixster':
dataset = Flixster(FLAGS.root_dir, device)
elif FLAGS.data_name == 'douban':
dataset = Douban(FLAGS.root_dir, device)
elif FLAGS.data_name == 'yahoo_music':
dataset = YahooMusic(FLAGS.root_dir, device)
else:
raise Exception
data_hparams = hparams[FLAGS.data_name]
train_user, train_movie, train_rating = dataset.get_train_data()
test_user, test_movie, test_rating = dataset.get_test_data()
num_users = dataset.get_num_users()
num_movies = dataset.get_num_movies()
user_adj = get_adj(num_users, num_movies, train_user, train_movie, device)
movie_adj = get_adj(num_movies, num_users, train_movie, train_user, device)
epochs = 1000
model = GCCF(num_users, num_movies, data_hparams).to(device)
criterion = torch.nn.MSELoss()
optimizer = torch.optim.Adam(model.parameters(), lr=data_hparams["lr"], weight_decay=data_hparams["weight_decay"])
min_test_loss = 999.
for epoch in range(epochs):
model.train()
optimizer.zero_grad()
predict = model(user_adj, movie_adj, train_user, train_movie)
loss = criterion(predict, train_rating)
loss.backward()
optimizer.step()
with torch.no_grad():
model.eval()
test_predict = model(user_adj, movie_adj, test_user, test_movie)
test_loss = criterion(dataset.inverse_transform(test_predict), dataset.inverse_transform(test_rating))
if min_test_loss > test_loss:
min_test_loss = test_loss
print('Epoch {:04d}, Train Loss: {:.6f}, Test Loss: {:.6f}'.format(epoch+1, loss.item(), test_loss.item()))
print('Min Test Loss: {:.6f}'.format(min_test_loss))
if __name__ == '__main__':
app.run(main) | 0.727782 | 0.218148 |
BAZEL_SKYLIB_RELEASE = "1.0.3"
BAZEL_SKYLIB_SHA256 = "1c531376ac7e5a180e0237938a2536de0c54d93f5c278634818e0efc952dd56c"
OPENCENSUS_PROTO_RELEASE = "0.3.0"
OPENCENSUS_PROTO_SHA256 = "b7e13f0b4259e80c3070b583c2f39e53153085a6918718b1c710caf7037572b0"
PGV_GIT_SHA = "278964a8052f96a2f514add0298098f63fb7f47f" # June 9, 2020
PGV_SHA256 = "e368733c9fb7f8489591ffaf269170d7658cc0cd1ee322b601512b769446d3c8"
GOOGLEAPIS_GIT_SHA = "82944da21578a53b74e547774cf62ed31a05b841" # Dec 2, 2019
GOOGLEAPIS_SHA = "a45019af4d3290f02eaeb1ce10990166978c807cb33a9692141a076ba46d1405"
PROMETHEUS_GIT_SHA = "60555c9708c786597e6b07bf846d0dc5c2a46f54" # Jun 23, 2020
PROMETHEUS_SHA = "6748b42f6879ad4d045c71019d2512c94be3dd86f60965e9e31e44a3f464323e"
UDPA_RELEASE = "0.0.1"
UDPA_SHA256 = "83a7dcc316d741031f34c0409021432b74a39c4811845a177133f02f948fe2d8"
ZIPKINAPI_RELEASE = "0.2.2"
ZIPKINAPI_SHA256 = "688c4fe170821dd589f36ec45aaadc03a618a40283bc1f97da8fa11686fc816b"
RULES_PROTO_GIT_SHA = "40298556293ae502c66579620a7ce867d5f57311" # Aug 17, 2020
RULES_PROTO_SHA256 = "aa1ee19226f707d44bee44c720915199c20c84a23318bb0597ed4e5c873ccbd5"
REPOSITORY_LOCATIONS = dict(
bazel_skylib = dict(
sha256 = BAZEL_SKYLIB_SHA256,
urls = ["https://github.com/bazelbuild/bazel-skylib/releases/download/" + BAZEL_SKYLIB_RELEASE + "/bazel-skylib-" + BAZEL_SKYLIB_RELEASE + ".tar.gz"],
),
com_envoyproxy_protoc_gen_validate = dict(
sha256 = PGV_SHA256,
strip_prefix = "protoc-gen-validate-" + PGV_GIT_SHA,
urls = ["https://github.com/envoyproxy/protoc-gen-validate/archive/" + PGV_GIT_SHA + ".tar.gz"],
),
com_google_googleapis = dict(
# TODO(dio): Consider writing a Starlark macro for importing Google API proto.
sha256 = GOOGLEAPIS_SHA,
strip_prefix = "googleapis-" + GOOGLEAPIS_GIT_SHA,
urls = ["https://github.com/googleapis/googleapis/archive/" + GOOGLEAPIS_GIT_SHA + ".tar.gz"],
),
com_github_cncf_udpa = dict(
sha256 = UDPA_SHA256,
strip_prefix = "udpa-" + UDPA_RELEASE,
urls = ["https://github.com/cncf/udpa/archive/v" + UDPA_RELEASE + ".tar.gz"],
),
prometheus_metrics_model = dict(
sha256 = PROMETHEUS_SHA,
strip_prefix = "client_model-" + PROMETHEUS_GIT_SHA,
urls = ["https://github.com/prometheus/client_model/archive/" + PROMETHEUS_GIT_SHA + ".tar.gz"],
),
opencensus_proto = dict(
sha256 = OPENCENSUS_PROTO_SHA256,
strip_prefix = "opencensus-proto-" + OPENCENSUS_PROTO_RELEASE + "/src",
urls = ["https://github.com/census-instrumentation/opencensus-proto/archive/v" + OPENCENSUS_PROTO_RELEASE + ".tar.gz"],
),
rules_proto = dict(
sha256 = RULES_PROTO_SHA256,
strip_prefix = "rules_proto-" + RULES_PROTO_GIT_SHA + "",
urls = ["https://github.com/bazelbuild/rules_proto/archive/" + RULES_PROTO_GIT_SHA + ".tar.gz"],
),
com_github_openzipkin_zipkinapi = dict(
sha256 = ZIPKINAPI_SHA256,
strip_prefix = "zipkin-api-" + ZIPKINAPI_RELEASE,
urls = ["https://github.com/openzipkin/zipkin-api/archive/" + ZIPKINAPI_RELEASE + ".tar.gz"],
),
) | generated_api_shadow/bazel/repository_locations.bzl | BAZEL_SKYLIB_RELEASE = "1.0.3"
BAZEL_SKYLIB_SHA256 = "1c531376ac7e5a180e0237938a2536de0c54d93f5c278634818e0efc952dd56c"
OPENCENSUS_PROTO_RELEASE = "0.3.0"
OPENCENSUS_PROTO_SHA256 = "b7e13f0b4259e80c3070b583c2f39e53153085a6918718b1c710caf7037572b0"
PGV_GIT_SHA = "278964a8052f96a2f514add0298098f63fb7f47f" # June 9, 2020
PGV_SHA256 = "e368733c9fb7f8489591ffaf269170d7658cc0cd1ee322b601512b769446d3c8"
GOOGLEAPIS_GIT_SHA = "82944da21578a53b74e547774cf62ed31a05b841" # Dec 2, 2019
GOOGLEAPIS_SHA = "a45019af4d3290f02eaeb1ce10990166978c807cb33a9692141a076ba46d1405"
PROMETHEUS_GIT_SHA = "60555c9708c786597e6b07bf846d0dc5c2a46f54" # Jun 23, 2020
PROMETHEUS_SHA = "6748b42f6879ad4d045c71019d2512c94be3dd86f60965e9e31e44a3f464323e"
UDPA_RELEASE = "0.0.1"
UDPA_SHA256 = "83a7dcc316d741031f34c0409021432b74a39c4811845a177133f02f948fe2d8"
ZIPKINAPI_RELEASE = "0.2.2"
ZIPKINAPI_SHA256 = "688c4fe170821dd589f36ec45aaadc03a618a40283bc1f97da8fa11686fc816b"
RULES_PROTO_GIT_SHA = "40298556293ae502c66579620a7ce867d5f57311" # Aug 17, 2020
RULES_PROTO_SHA256 = "aa1ee19226f707d44bee44c720915199c20c84a23318bb0597ed4e5c873ccbd5"
REPOSITORY_LOCATIONS = dict(
bazel_skylib = dict(
sha256 = BAZEL_SKYLIB_SHA256,
urls = ["https://github.com/bazelbuild/bazel-skylib/releases/download/" + BAZEL_SKYLIB_RELEASE + "/bazel-skylib-" + BAZEL_SKYLIB_RELEASE + ".tar.gz"],
),
com_envoyproxy_protoc_gen_validate = dict(
sha256 = PGV_SHA256,
strip_prefix = "protoc-gen-validate-" + PGV_GIT_SHA,
urls = ["https://github.com/envoyproxy/protoc-gen-validate/archive/" + PGV_GIT_SHA + ".tar.gz"],
),
com_google_googleapis = dict(
# TODO(dio): Consider writing a Starlark macro for importing Google API proto.
sha256 = GOOGLEAPIS_SHA,
strip_prefix = "googleapis-" + GOOGLEAPIS_GIT_SHA,
urls = ["https://github.com/googleapis/googleapis/archive/" + GOOGLEAPIS_GIT_SHA + ".tar.gz"],
),
com_github_cncf_udpa = dict(
sha256 = UDPA_SHA256,
strip_prefix = "udpa-" + UDPA_RELEASE,
urls = ["https://github.com/cncf/udpa/archive/v" + UDPA_RELEASE + ".tar.gz"],
),
prometheus_metrics_model = dict(
sha256 = PROMETHEUS_SHA,
strip_prefix = "client_model-" + PROMETHEUS_GIT_SHA,
urls = ["https://github.com/prometheus/client_model/archive/" + PROMETHEUS_GIT_SHA + ".tar.gz"],
),
opencensus_proto = dict(
sha256 = OPENCENSUS_PROTO_SHA256,
strip_prefix = "opencensus-proto-" + OPENCENSUS_PROTO_RELEASE + "/src",
urls = ["https://github.com/census-instrumentation/opencensus-proto/archive/v" + OPENCENSUS_PROTO_RELEASE + ".tar.gz"],
),
rules_proto = dict(
sha256 = RULES_PROTO_SHA256,
strip_prefix = "rules_proto-" + RULES_PROTO_GIT_SHA + "",
urls = ["https://github.com/bazelbuild/rules_proto/archive/" + RULES_PROTO_GIT_SHA + ".tar.gz"],
),
com_github_openzipkin_zipkinapi = dict(
sha256 = ZIPKINAPI_SHA256,
strip_prefix = "zipkin-api-" + ZIPKINAPI_RELEASE,
urls = ["https://github.com/openzipkin/zipkin-api/archive/" + ZIPKINAPI_RELEASE + ".tar.gz"],
),
) | 0.24899 | 0.163713 |
from datetime import datetime as dt
import logging
from secrets import token_urlsafe
from flask import (
request,
render_template,
flash,
redirect,
Blueprint,
url_for,
current_app
)
from flask_babel import lazy_gettext as _l
from flask_login import current_user, logout_user, login_user
from {{cookiecutter.app_name}}.extensions import db
from {{cookiecutter.app_name}}.forms import LoginForm, RegisterForm, ResetPasswordReq, ResetPassword
from {{cookiecutter.app_name}}.models import User
from {{cookiecutter.app_name}}.email import send_email
logger = logging.getLogger(__name__)
bp = Blueprint('auth', __name__, url_prefix="/auth", static_folder="../static")
def create_user(form):
username = form.username.data
email = form.email.data
existing_user = User.query.filter(
User.username == username or User.email == email
).first()
if existing_user:
flash(_l(f'{username} ({email}) already created!'), 'info')
return redirect(url_for('auth.login'))
else:
now = dt.now().replace(second=0, microsecond=0)
new_user = User(
username=username,
email=email,
created=now,
token=token_urlsafe(),
token_expiration=dt.now()
)
new_user.set_password(form.password.data)
flash(_l(f'{username} you are registered now'), 'success')
db.session.add(new_user)
db.session.commit()
logger.info('Form action')
return True
@bp.route('/login', methods=['GET', 'POST'])
def login():
if current_user.is_authenticated:
return redirect(url_for('main.index'))
form = LoginForm()
if form.validate_on_submit():
user = User.query.filter_by(username=form.username.data).first()
if user is None or not user.check_password(form.password.data):
flash(_l('Invalid username or password'), 'info')
return redirect(url_for('auth.login'))
login_user(user)
next_page = request.args.get('next')
return redirect(next_page or url_for('main.index'))
return render_template('auth/login.html', form=form)
@bp.route('/logout')
def logout():
logout_user()
flash("You are logged out.", "success")
return redirect(url_for('auth.login'))
@bp.route('/register', methods=['GET', 'POST'])
def register():
if current_user.is_authenticated:
return redirect(url_for('main.index'))
form = RegisterForm()
if form.validate_on_submit():
create_user(form)
return redirect(url_for('auth.login'))
return render_template('auth/register.html', form=form)
@bp.route("/reset_password", methods=['GET', 'POST'])
def reset_password():
if current_user.is_authenticated:
return redirect(url_for('main.index'))
form = ResetPasswordReq()
if form.validate_on_submit():
email = form.email.data
user = User.query.filter_by(email=email).first()
if user:
token = user.verify_expiration_token()
db.session.commit()
send_email(
_l('Request change password'),
sender=current_app.config['ADMINS'][0],
recipients=[user.email],
text_body=render_template(
'email/reset_password.txt',
token=token),
html_body=render_template(
'email/reset_password.html',
token=token)
)
flash("Email sent, check your mail now!", "info")
return redirect(url_for('auth.login'))
flash("This email not registered", "info")
return render_template('auth/reset_password_req.html', form=form)
@bp.route("/reset_password_token/<token>", methods=['GET', 'POST'])
def reset_password_token(token):
if current_user.is_authenticated:
return redirect(url_for('main.index'))
form = ResetPassword()
if form.validate_on_submit():
user = User.query.filter_by(token=token).first()
if user:
user.set_password(form.password.data)
db.session.commit()
flash("Password changed!", "success")
return redirect(url_for('auth.login'))
return render_template('auth/reset_password.html', form=form) | {{cookiecutter.project_name}}/{{cookiecutter.app_name}}/auth/views.py | from datetime import datetime as dt
import logging
from secrets import token_urlsafe
from flask import (
request,
render_template,
flash,
redirect,
Blueprint,
url_for,
current_app
)
from flask_babel import lazy_gettext as _l
from flask_login import current_user, logout_user, login_user
from {{cookiecutter.app_name}}.extensions import db
from {{cookiecutter.app_name}}.forms import LoginForm, RegisterForm, ResetPasswordReq, ResetPassword
from {{cookiecutter.app_name}}.models import User
from {{cookiecutter.app_name}}.email import send_email
logger = logging.getLogger(__name__)
bp = Blueprint('auth', __name__, url_prefix="/auth", static_folder="../static")
def create_user(form):
username = form.username.data
email = form.email.data
existing_user = User.query.filter(
User.username == username or User.email == email
).first()
if existing_user:
flash(_l(f'{username} ({email}) already created!'), 'info')
return redirect(url_for('auth.login'))
else:
now = dt.now().replace(second=0, microsecond=0)
new_user = User(
username=username,
email=email,
created=now,
token=token_urlsafe(),
token_expiration=dt.now()
)
new_user.set_password(form.password.data)
flash(_l(f'{username} you are registered now'), 'success')
db.session.add(new_user)
db.session.commit()
logger.info('Form action')
return True
@bp.route('/login', methods=['GET', 'POST'])
def login():
if current_user.is_authenticated:
return redirect(url_for('main.index'))
form = LoginForm()
if form.validate_on_submit():
user = User.query.filter_by(username=form.username.data).first()
if user is None or not user.check_password(form.password.data):
flash(_l('Invalid username or password'), 'info')
return redirect(url_for('auth.login'))
login_user(user)
next_page = request.args.get('next')
return redirect(next_page or url_for('main.index'))
return render_template('auth/login.html', form=form)
@bp.route('/logout')
def logout():
logout_user()
flash("You are logged out.", "success")
return redirect(url_for('auth.login'))
@bp.route('/register', methods=['GET', 'POST'])
def register():
if current_user.is_authenticated:
return redirect(url_for('main.index'))
form = RegisterForm()
if form.validate_on_submit():
create_user(form)
return redirect(url_for('auth.login'))
return render_template('auth/register.html', form=form)
@bp.route("/reset_password", methods=['GET', 'POST'])
def reset_password():
if current_user.is_authenticated:
return redirect(url_for('main.index'))
form = ResetPasswordReq()
if form.validate_on_submit():
email = form.email.data
user = User.query.filter_by(email=email).first()
if user:
token = user.verify_expiration_token()
db.session.commit()
send_email(
_l('Request change password'),
sender=current_app.config['ADMINS'][0],
recipients=[user.email],
text_body=render_template(
'email/reset_password.txt',
token=token),
html_body=render_template(
'email/reset_password.html',
token=token)
)
flash("Email sent, check your mail now!", "info")
return redirect(url_for('auth.login'))
flash("This email not registered", "info")
return render_template('auth/reset_password_req.html', form=form)
@bp.route("/reset_password_token/<token>", methods=['GET', 'POST'])
def reset_password_token(token):
if current_user.is_authenticated:
return redirect(url_for('main.index'))
form = ResetPassword()
if form.validate_on_submit():
user = User.query.filter_by(token=token).first()
if user:
user.set_password(form.password.data)
db.session.commit()
flash("Password changed!", "success")
return redirect(url_for('auth.login'))
return render_template('auth/reset_password.html', form=form) | 0.321993 | 0.057838 |
__all__ = [
"End",
"Byte",
"Short",
"Int",
"Long",
"Float",
"Double",
"ByteArray",
"String",
"List",
"Compound",
"IntArray",
"LongArray",
"EndInstantiation",
"OutOfRange",
"IncompatibleItemType",
"CastError",
]
from struct import Struct, error as StructError
import numpy as np
from .literal.serializer import serialize_tag
# Struct formats used to pack and unpack numeric values
def get_format(fmt, string):
"""Return a dictionary containing a format for each byte order."""
return {"big": fmt(">" + string), "little": fmt("<" + string)}
BYTE = get_format(Struct, "b")
SHORT = get_format(Struct, "h")
USHORT = get_format(Struct, "H")
INT = get_format(Struct, "i")
LONG = get_format(Struct, "q")
FLOAT = get_format(Struct, "f")
DOUBLE = get_format(Struct, "d")
# Custom errors
class EndInstantiation(TypeError):
"""Raised when trying to instantiate an `End` tag."""
def __init__(self):
super().__init__("End tags can't be instantiated")
class OutOfRange(ValueError):
"""Raised when a numeric value is out of range."""
def __init__(self, value):
super().__init__(f"{value!r} is out of range")
class IncompatibleItemType(TypeError):
"""Raised when a list item is incompatible with the subtype of the list."""
def __init__(self, item, subtype):
super().__init__(f"{item!r} should be a {subtype.__name__} tag")
self.item = item
self.subtype = subtype
class CastError(ValueError):
"""Raised when an object couldn't be casted to the appropriate tag type."""
def __init__(self, obj, tag_type):
super().__init__(f"Couldn't cast {obj!r} to {tag_type.__name__}")
self.obj = obj
self.tag_type = tag_type
# Read/write helpers for numeric and string values
def read_numeric(fmt, buff, byteorder="big"):
"""Read a numeric value from a file-like object."""
try:
fmt = fmt[byteorder]
return fmt.unpack(buff.read(fmt.size))[0]
except StructError:
return 0
except KeyError as exc:
raise ValueError("Invalid byte order") from exc
def write_numeric(fmt, value, buff, byteorder="big"):
"""Write a numeric value to a file-like object."""
try:
buff.write(fmt[byteorder].pack(value))
except KeyError as exc:
raise ValueError("Invalid byte order") from exc
def read_string(buff, byteorder="big"):
"""Read a string from a file-like object."""
length = read_numeric(USHORT, buff, byteorder)
return buff.read(length).decode("utf-8")
def write_string(value, buff, byteorder="big"):
"""Write a string to a file-like object."""
data = value.encode("utf-8")
write_numeric(USHORT, len(data), buff, byteorder)
buff.write(data)
# Tag definitions
class Base:
"""Base class inherited by all nbt tags.
This class is not meant to be instantiated. Derived classes that
define a tag id are required to override the `parse` classmethod and
the `write` method.
Class attributes:
all_tags -- Dictionnary mapping tag ids to child classes
"""
__slots__ = ()
all_tags = {}
tag_id = None
serializer = None
def __init_subclass__(cls):
# Add class to the `all_tags` dictionnary if it has a tag id
if cls.tag_id is not None and cls.tag_id not in cls.all_tags:
cls.all_tags[cls.tag_id] = cls
@classmethod
def get_tag(cls, tag_id):
"""Return the class corresponding to the given tag id."""
return cls.all_tags[tag_id]
@classmethod
def parse(cls, buff, byteorder="big"):
"""Parse data from a file-like object and return a tag instance."""
def write(self, buff, byteorder="big"):
"""Write the binary representation of the tag to a file-like object."""
def match(self, other):
"""Check whether the tag recursively matches a specific subset of values."""
if hasattr(other, "tag_id") and self.tag_id != other.tag_id:
return False
return self == other
def snbt(self):
return serialize_tag(self)
def __repr__(self):
if self.tag_id is not None:
return f"{self.__class__.__name__}({super().__repr__()})"
return super().__repr__()
class End(Base):
"""Nbt tag used to mark the end of a compound tag."""
__slots__ = ()
tag_id = 0
def __new__(cls, *args, **kwargs):
raise EndInstantiation()
class Numeric(Base):
"""Intermediate class that represents a numeric nbt tag.
This class is not meant to be instantiated. It inherits from the
`Base` class and defines an additional class attribute `fmt`.
Derived classes must assign this attribute to the struct format
corresponding to the tag type. They must also inherit from a builtin
numeric type (`int` or `float`).
The class overrides `parse` and `write` and uses the `fmt`
attribute to pack and unpack the tag value.
Class attributes:
fmt -- The struct format used to pack and unpack the tag value
"""
__slots__ = ()
serializer = "numeric"
fmt = None
suffix = ""
@classmethod
def parse(cls, buff, byteorder="big"):
return cls(read_numeric(cls.fmt, buff, byteorder))
def write(self, buff, byteorder="big"):
write_numeric(self.fmt, self, buff, byteorder)
class NumericInteger(Numeric, int):
"""Intermediate class that represents a numeric integer nbt tag.
This class is not meant to be instantiated. It inherits from the
`Base` class and `int` and defines additional class attribute.
Derived class will inherit the `as_unsigned` property and
`from_unsigned` class method.
Class attributes:
range -- The supported range of values
mask -- The largest number that can be represented
bits -- The bit length of the largest number that can be represented
"""
__slots__ = ()
range = None
mask = None
bits = None
def __init_subclass__(cls):
super().__init_subclass__()
limit = 2 ** (8 * cls.fmt["big"].size - 1)
cls.range = range(-limit, limit)
cls.mask = limit * 2 - 1
cls.bits = cls.mask.bit_length()
def __new__(cls, *args, **kwargs):
self = super().__new__(cls, *args, **kwargs)
if int(self) not in cls.range:
raise OutOfRange(self)
return self
@property
def as_unsigned(self):
"""Interpret the value of the tag as an unsigned integer."""
return self & self.mask
@classmethod
def from_unsigned(cls, value):
"""Create a tag from an unsigned integer."""
return cls(value - (value * 2 & cls.mask + 1))
class Byte(NumericInteger):
"""Nbt tag representing a signed byte."""
__slots__ = ()
tag_id = 1
fmt = BYTE
suffix = "b"
class Short(NumericInteger):
"""Nbt tag representing a signed 16 bit integer."""
__slots__ = ()
tag_id = 2
fmt = SHORT
suffix = "s"
class Int(NumericInteger):
"""Nbt tag representing a signed 32 bit integer."""
__slots__ = ()
tag_id = 3
fmt = INT
class Long(NumericInteger):
"""Nbt tag representing a signed 64 bit integer."""
__slots__ = ()
tag_id = 4
fmt = LONG
suffix = "L"
class Float(Numeric, float):
"""Nbt tag representing a single-precision floating point number."""
__slots__ = ()
tag_id = 5
fmt = FLOAT
suffix = "f"
class Double(Numeric, float):
"""Nbt tag representing a double-precision floating point number."""
__slots__ = ()
tag_id = 6
fmt = DOUBLE
suffix = "d"
class Array(Base, np.ndarray):
"""Intermediate class that represents an array nbt tag.
This class is not meant to be instantiated. It inherits from the
`Base` class and the numpy `ndarray` type.
Class attributes:
item_type -- The numpy array data type
array_prefix -- The literal array prefix
wrapper -- The tag used to wrap the integer
"""
__slots__ = ()
serializer = "array"
item_type = None
array_prefix = None
wrapper = None
def __new__(cls, value=None, *, length=0, byteorder="big"):
item_type = cls.item_type[byteorder]
if value is None:
return np.zeros((length,), item_type).view(cls)
return np.asarray(value, item_type).view(cls)
@classmethod
def parse(cls, buff, byteorder="big"):
item_type = cls.item_type[byteorder]
data = buff.read(read_numeric(INT, buff, byteorder) * item_type.itemsize)
return cls(np.frombuffer(data, item_type), byteorder=byteorder)
def write(self, buff, byteorder="big"):
write_numeric(INT, len(self), buff, byteorder)
array = self if self.item_type[byteorder] is self.dtype else self.byteswap()
buff.write(array.tobytes())
def __getitem__(self, index):
if isinstance(index, slice):
return super().__getitem__(index)
return int.__new__(self.wrapper, super().__getitem__(index))
def __bool__(self):
return all(self)
def __repr__(self):
return f'{self.__class__.__name__}([{", ".join(map(str, self))}])'
class ByteArray(Array):
"""Nbt tag representing an array of signed bytes."""
__slots__ = ()
tag_id = 7
item_type = get_format(np.dtype, "b")
array_prefix = "B"
wrapper = Byte
class String(Base, str):
"""Nbt tag representing a string."""
__slots__ = ()
tag_id = 8
serializer = "string"
@classmethod
def parse(cls, buff, byteorder="big"):
return cls(read_string(buff, byteorder))
def write(self, buff, byteorder="big"):
write_string(self, buff, byteorder)
class ListMeta(type):
"""Allows class indexing to create and return subclasses on the fly.
This metaclass is used by the List tag class definition. It allows
the class to create and return subclasses of itself when it is
indexed with a tag type. If a subclass of the specified type has
already been created, the existing subclass will be returned.
"""
def __init__(cls, name, bases, dct):
super().__init__(name, bases, dct)
cls.variants = {}
def __getitem__(cls, item):
if item is End:
return List
try:
return List.variants[item]
except KeyError:
variant = type(
f"{List.__name__}[{item.__name__}]",
(List,),
{"__slots__": (), "subtype": item},
)
List.variants[item] = variant
return variant
class List(Base, list, metaclass=ListMeta):
"""Nbt tag representing a list of other nbt tags.
The list can only hold a single type of tag. To enforce this
constraint, the class must be subclassed and define an appropriate
subtype. The `ListMeta` metaclass is used to seamlessly implement
this operation. This means that accessing List[TagName] will return
a subclass of List with the subtype TagName.
On top of that, List inherits from Base and the python builtin list
type. This means that all the usual list operations are supported
on list tag instances. Mutating operations have been overwritten to
include an isinstance() check. For instance, when calling the
`append` method, the appended item will be wrapped by the defined
subtype if isinstance(item, TagName) returns False.
Class attributes:
subtype -- The nbt tag that will be used to wrap list items
"""
__slots__ = ()
tag_id = 9
serializer = "list"
subtype = End
def __new__(cls, iterable=()):
if cls.subtype is End:
iterable = tuple(iterable)
subtype = cls.infer_list_subtype(iterable)
cls = cls[subtype]
return super().__new__(cls, iterable)
def __init__(self, iterable=()):
super().__init__(map(self.cast_item, iterable))
@staticmethod
def infer_list_subtype(items):
"""Infer a list subtype from a collection of items."""
subtype = End
for item in items:
item_type = type(item)
if not issubclass(item_type, Base):
continue
if subtype is End:
subtype = item_type
if not issubclass(subtype, List):
return subtype
elif subtype is not item_type:
stype, itype = subtype, item_type
generic = List
while issubclass(stype, List) and issubclass(itype, List):
stype, itype = stype.subtype, itype.subtype
generic = List[generic]
if stype is End:
subtype = item_type
elif itype is not End:
return generic.subtype
return subtype
@classmethod
def parse(cls, buff, byteorder="big"):
tag = cls.get_tag(read_numeric(BYTE, buff, byteorder))
length = read_numeric(INT, buff, byteorder)
return cls[tag](tag.parse(buff, byteorder) for _ in range(length))
def write(self, buff, byteorder="big"):
write_numeric(BYTE, self.subtype.tag_id, buff, byteorder)
write_numeric(INT, len(self), buff, byteorder)
for elem in self:
elem.write(buff, byteorder)
def match(self, other):
if not isinstance(other, list):
return False
if not other:
return not self
return all(any(item.match(other_item) for item in self) for other_item in other)
def get(self, index, default=None):
return (self.get_all(index) or [default])[0]
def get_all(self, index):
try:
return (index.get(self) if hasattr(index, "is_nbt_path") else
[super().__getitem__(index)])
except IndexError:
return []
def __getitem__(self, index):
if hasattr(index, "is_nbt_path"):
values = index.get(self)
if not values:
raise IndexError(index)
return values[0]
return super().__getitem__(index)
def __setitem__(self, index, value):
if hasattr(index, "is_nbt_path"):
index.set(self, value)
else:
super().__setitem__(index, [self.cast_item(item) for item in value]
if isinstance(index, slice) else self.cast_item(value))
def __delitem__(self, index):
if hasattr(index, "is_nbt_path"):
index.delete(self)
else:
super().__delitem__(index)
def append(self, value):
super().append(self.cast_item(value))
def extend(self, iterable):
super().extend(map(self.cast_item, iterable))
def insert(self, index, value):
super().insert(index, self.cast_item(value))
@classmethod
def cast_item(cls, item):
"""Cast list item to the appropriate tag type."""
if not isinstance(item, cls.subtype):
incompatible = isinstance(item, Base) and not any(
issubclass(cls.subtype, tag_type) and isinstance(item, tag_type)
for tag_type in cls.all_tags.values()
)
if incompatible:
raise IncompatibleItemType(item, cls.subtype)
try:
return cls.subtype(item)
except EndInstantiation:
raise ValueError(
"List tags without an explicit subtype must either be empty or "
"instantiated with elements from which a subtype can be inferred"
) from None
except (IncompatibleItemType, CastError):
raise
except Exception as exc:
raise CastError(item, cls.subtype) from exc
return item
class Compound(Base, dict):
"""Nbt tag that represents a mapping of strings to other nbt tags.
The Compound class inherits both from Base and the python builtin
dict type. This means that all the operations that are usually
available on python dictionaries are supported.
Class attributes:
end_tag -- Bytes used to mark the end of the compound
"""
__slots__ = ()
tag_id = 10
serializer = "compound"
end_tag = b"\x00"
@classmethod
def parse(cls, buff, byteorder="big"):
self = cls()
tag_id = read_numeric(BYTE, buff, byteorder)
while tag_id != 0:
name = read_string(buff, byteorder)
self[name] = cls.get_tag(tag_id).parse(buff, byteorder)
tag_id = read_numeric(BYTE, buff, byteorder)
return self
def write(self, buff, byteorder="big"):
for name, tag in self.items():
write_numeric(BYTE, tag.tag_id, buff, byteorder)
write_string(name, buff, byteorder)
tag.write(buff, byteorder)
buff.write(self.end_tag)
def match(self, other):
return (
isinstance(other, dict)
and self.keys() >= other.keys()
and all(self[key].match(value) for key, value in other.items())
)
def get(self, key, default=None):
if hasattr(key, "is_nbt_path"):
return (key.get(self) or [default])[0]
return super().get(key, default)
def get_all(self, key):
try:
return (key.get(self) if hasattr(key, "is_nbt_path") else
[super().__getitem__(key)])
except KeyError:
return []
def __contains__(self, item):
if hasattr(item, "is_nbt_path"):
return bool(item.get(self))
return super().__contains__(item)
def __getitem__(self, key):
if hasattr(key, "is_nbt_path"):
values = key.get(self)
if not values:
raise KeyError(key)
return values[0]
return super().__getitem__(key)
def __setitem__(self, key, value):
if hasattr(key, "is_nbt_path"):
key.set(self, value)
else:
super().__setitem__(key, value)
def __delitem__(self, key):
if hasattr(key, "is_nbt_path"):
key.delete(self)
else:
super().__delitem__(key)
def merge(self, other):
"""Recursively merge tags from another compound."""
for key, value in other.items():
if key in self and (
isinstance(self[key], Compound) and isinstance(value, dict)
):
self[key].merge(value)
else:
self[key] = value
def with_defaults(self, other):
"""Return a new compound with recursively applied default values."""
result = Compound(other)
for key, value in self.items():
if key in result and (
isinstance(value, Compound) and isinstance(result[key], dict)
):
value = value.with_defaults(result[key])
result[key] = value
return result
class IntArray(Array):
"""Nbt tag representing an array of signed integers."""
__slots__ = ()
tag_id = 11
item_type = get_format(np.dtype, "i4")
array_prefix = "I"
wrapper = Int
class LongArray(Array):
"""Nbt tag representing an array of signed longs."""
__slots__ = ()
tag_id = 12
item_type = get_format(np.dtype, "i8")
array_prefix = "L"
wrapper = Long | nbtlib/tag.py | __all__ = [
"End",
"Byte",
"Short",
"Int",
"Long",
"Float",
"Double",
"ByteArray",
"String",
"List",
"Compound",
"IntArray",
"LongArray",
"EndInstantiation",
"OutOfRange",
"IncompatibleItemType",
"CastError",
]
from struct import Struct, error as StructError
import numpy as np
from .literal.serializer import serialize_tag
# Struct formats used to pack and unpack numeric values
def get_format(fmt, string):
"""Return a dictionary containing a format for each byte order."""
return {"big": fmt(">" + string), "little": fmt("<" + string)}
BYTE = get_format(Struct, "b")
SHORT = get_format(Struct, "h")
USHORT = get_format(Struct, "H")
INT = get_format(Struct, "i")
LONG = get_format(Struct, "q")
FLOAT = get_format(Struct, "f")
DOUBLE = get_format(Struct, "d")
# Custom errors
class EndInstantiation(TypeError):
"""Raised when trying to instantiate an `End` tag."""
def __init__(self):
super().__init__("End tags can't be instantiated")
class OutOfRange(ValueError):
"""Raised when a numeric value is out of range."""
def __init__(self, value):
super().__init__(f"{value!r} is out of range")
class IncompatibleItemType(TypeError):
"""Raised when a list item is incompatible with the subtype of the list."""
def __init__(self, item, subtype):
super().__init__(f"{item!r} should be a {subtype.__name__} tag")
self.item = item
self.subtype = subtype
class CastError(ValueError):
"""Raised when an object couldn't be casted to the appropriate tag type."""
def __init__(self, obj, tag_type):
super().__init__(f"Couldn't cast {obj!r} to {tag_type.__name__}")
self.obj = obj
self.tag_type = tag_type
# Read/write helpers for numeric and string values
def read_numeric(fmt, buff, byteorder="big"):
"""Read a numeric value from a file-like object."""
try:
fmt = fmt[byteorder]
return fmt.unpack(buff.read(fmt.size))[0]
except StructError:
return 0
except KeyError as exc:
raise ValueError("Invalid byte order") from exc
def write_numeric(fmt, value, buff, byteorder="big"):
"""Write a numeric value to a file-like object."""
try:
buff.write(fmt[byteorder].pack(value))
except KeyError as exc:
raise ValueError("Invalid byte order") from exc
def read_string(buff, byteorder="big"):
"""Read a string from a file-like object."""
length = read_numeric(USHORT, buff, byteorder)
return buff.read(length).decode("utf-8")
def write_string(value, buff, byteorder="big"):
"""Write a string to a file-like object."""
data = value.encode("utf-8")
write_numeric(USHORT, len(data), buff, byteorder)
buff.write(data)
# Tag definitions
class Base:
"""Base class inherited by all nbt tags.
This class is not meant to be instantiated. Derived classes that
define a tag id are required to override the `parse` classmethod and
the `write` method.
Class attributes:
all_tags -- Dictionnary mapping tag ids to child classes
"""
__slots__ = ()
all_tags = {}
tag_id = None
serializer = None
def __init_subclass__(cls):
# Add class to the `all_tags` dictionnary if it has a tag id
if cls.tag_id is not None and cls.tag_id not in cls.all_tags:
cls.all_tags[cls.tag_id] = cls
@classmethod
def get_tag(cls, tag_id):
"""Return the class corresponding to the given tag id."""
return cls.all_tags[tag_id]
@classmethod
def parse(cls, buff, byteorder="big"):
"""Parse data from a file-like object and return a tag instance."""
def write(self, buff, byteorder="big"):
"""Write the binary representation of the tag to a file-like object."""
def match(self, other):
"""Check whether the tag recursively matches a specific subset of values."""
if hasattr(other, "tag_id") and self.tag_id != other.tag_id:
return False
return self == other
def snbt(self):
return serialize_tag(self)
def __repr__(self):
if self.tag_id is not None:
return f"{self.__class__.__name__}({super().__repr__()})"
return super().__repr__()
class End(Base):
"""Nbt tag used to mark the end of a compound tag."""
__slots__ = ()
tag_id = 0
def __new__(cls, *args, **kwargs):
raise EndInstantiation()
class Numeric(Base):
"""Intermediate class that represents a numeric nbt tag.
This class is not meant to be instantiated. It inherits from the
`Base` class and defines an additional class attribute `fmt`.
Derived classes must assign this attribute to the struct format
corresponding to the tag type. They must also inherit from a builtin
numeric type (`int` or `float`).
The class overrides `parse` and `write` and uses the `fmt`
attribute to pack and unpack the tag value.
Class attributes:
fmt -- The struct format used to pack and unpack the tag value
"""
__slots__ = ()
serializer = "numeric"
fmt = None
suffix = ""
@classmethod
def parse(cls, buff, byteorder="big"):
return cls(read_numeric(cls.fmt, buff, byteorder))
def write(self, buff, byteorder="big"):
write_numeric(self.fmt, self, buff, byteorder)
class NumericInteger(Numeric, int):
"""Intermediate class that represents a numeric integer nbt tag.
This class is not meant to be instantiated. It inherits from the
`Base` class and `int` and defines additional class attribute.
Derived class will inherit the `as_unsigned` property and
`from_unsigned` class method.
Class attributes:
range -- The supported range of values
mask -- The largest number that can be represented
bits -- The bit length of the largest number that can be represented
"""
__slots__ = ()
range = None
mask = None
bits = None
def __init_subclass__(cls):
super().__init_subclass__()
limit = 2 ** (8 * cls.fmt["big"].size - 1)
cls.range = range(-limit, limit)
cls.mask = limit * 2 - 1
cls.bits = cls.mask.bit_length()
def __new__(cls, *args, **kwargs):
self = super().__new__(cls, *args, **kwargs)
if int(self) not in cls.range:
raise OutOfRange(self)
return self
@property
def as_unsigned(self):
"""Interpret the value of the tag as an unsigned integer."""
return self & self.mask
@classmethod
def from_unsigned(cls, value):
"""Create a tag from an unsigned integer."""
return cls(value - (value * 2 & cls.mask + 1))
class Byte(NumericInteger):
"""Nbt tag representing a signed byte."""
__slots__ = ()
tag_id = 1
fmt = BYTE
suffix = "b"
class Short(NumericInteger):
"""Nbt tag representing a signed 16 bit integer."""
__slots__ = ()
tag_id = 2
fmt = SHORT
suffix = "s"
class Int(NumericInteger):
"""Nbt tag representing a signed 32 bit integer."""
__slots__ = ()
tag_id = 3
fmt = INT
class Long(NumericInteger):
"""Nbt tag representing a signed 64 bit integer."""
__slots__ = ()
tag_id = 4
fmt = LONG
suffix = "L"
class Float(Numeric, float):
"""Nbt tag representing a single-precision floating point number."""
__slots__ = ()
tag_id = 5
fmt = FLOAT
suffix = "f"
class Double(Numeric, float):
"""Nbt tag representing a double-precision floating point number."""
__slots__ = ()
tag_id = 6
fmt = DOUBLE
suffix = "d"
class Array(Base, np.ndarray):
"""Intermediate class that represents an array nbt tag.
This class is not meant to be instantiated. It inherits from the
`Base` class and the numpy `ndarray` type.
Class attributes:
item_type -- The numpy array data type
array_prefix -- The literal array prefix
wrapper -- The tag used to wrap the integer
"""
__slots__ = ()
serializer = "array"
item_type = None
array_prefix = None
wrapper = None
def __new__(cls, value=None, *, length=0, byteorder="big"):
item_type = cls.item_type[byteorder]
if value is None:
return np.zeros((length,), item_type).view(cls)
return np.asarray(value, item_type).view(cls)
@classmethod
def parse(cls, buff, byteorder="big"):
item_type = cls.item_type[byteorder]
data = buff.read(read_numeric(INT, buff, byteorder) * item_type.itemsize)
return cls(np.frombuffer(data, item_type), byteorder=byteorder)
def write(self, buff, byteorder="big"):
write_numeric(INT, len(self), buff, byteorder)
array = self if self.item_type[byteorder] is self.dtype else self.byteswap()
buff.write(array.tobytes())
def __getitem__(self, index):
if isinstance(index, slice):
return super().__getitem__(index)
return int.__new__(self.wrapper, super().__getitem__(index))
def __bool__(self):
return all(self)
def __repr__(self):
return f'{self.__class__.__name__}([{", ".join(map(str, self))}])'
class ByteArray(Array):
"""Nbt tag representing an array of signed bytes."""
__slots__ = ()
tag_id = 7
item_type = get_format(np.dtype, "b")
array_prefix = "B"
wrapper = Byte
class String(Base, str):
"""Nbt tag representing a string."""
__slots__ = ()
tag_id = 8
serializer = "string"
@classmethod
def parse(cls, buff, byteorder="big"):
return cls(read_string(buff, byteorder))
def write(self, buff, byteorder="big"):
write_string(self, buff, byteorder)
class ListMeta(type):
"""Allows class indexing to create and return subclasses on the fly.
This metaclass is used by the List tag class definition. It allows
the class to create and return subclasses of itself when it is
indexed with a tag type. If a subclass of the specified type has
already been created, the existing subclass will be returned.
"""
def __init__(cls, name, bases, dct):
super().__init__(name, bases, dct)
cls.variants = {}
def __getitem__(cls, item):
if item is End:
return List
try:
return List.variants[item]
except KeyError:
variant = type(
f"{List.__name__}[{item.__name__}]",
(List,),
{"__slots__": (), "subtype": item},
)
List.variants[item] = variant
return variant
class List(Base, list, metaclass=ListMeta):
"""Nbt tag representing a list of other nbt tags.
The list can only hold a single type of tag. To enforce this
constraint, the class must be subclassed and define an appropriate
subtype. The `ListMeta` metaclass is used to seamlessly implement
this operation. This means that accessing List[TagName] will return
a subclass of List with the subtype TagName.
On top of that, List inherits from Base and the python builtin list
type. This means that all the usual list operations are supported
on list tag instances. Mutating operations have been overwritten to
include an isinstance() check. For instance, when calling the
`append` method, the appended item will be wrapped by the defined
subtype if isinstance(item, TagName) returns False.
Class attributes:
subtype -- The nbt tag that will be used to wrap list items
"""
__slots__ = ()
tag_id = 9
serializer = "list"
subtype = End
def __new__(cls, iterable=()):
if cls.subtype is End:
iterable = tuple(iterable)
subtype = cls.infer_list_subtype(iterable)
cls = cls[subtype]
return super().__new__(cls, iterable)
def __init__(self, iterable=()):
super().__init__(map(self.cast_item, iterable))
@staticmethod
def infer_list_subtype(items):
"""Infer a list subtype from a collection of items."""
subtype = End
for item in items:
item_type = type(item)
if not issubclass(item_type, Base):
continue
if subtype is End:
subtype = item_type
if not issubclass(subtype, List):
return subtype
elif subtype is not item_type:
stype, itype = subtype, item_type
generic = List
while issubclass(stype, List) and issubclass(itype, List):
stype, itype = stype.subtype, itype.subtype
generic = List[generic]
if stype is End:
subtype = item_type
elif itype is not End:
return generic.subtype
return subtype
@classmethod
def parse(cls, buff, byteorder="big"):
tag = cls.get_tag(read_numeric(BYTE, buff, byteorder))
length = read_numeric(INT, buff, byteorder)
return cls[tag](tag.parse(buff, byteorder) for _ in range(length))
def write(self, buff, byteorder="big"):
write_numeric(BYTE, self.subtype.tag_id, buff, byteorder)
write_numeric(INT, len(self), buff, byteorder)
for elem in self:
elem.write(buff, byteorder)
def match(self, other):
if not isinstance(other, list):
return False
if not other:
return not self
return all(any(item.match(other_item) for item in self) for other_item in other)
def get(self, index, default=None):
return (self.get_all(index) or [default])[0]
def get_all(self, index):
try:
return (index.get(self) if hasattr(index, "is_nbt_path") else
[super().__getitem__(index)])
except IndexError:
return []
def __getitem__(self, index):
if hasattr(index, "is_nbt_path"):
values = index.get(self)
if not values:
raise IndexError(index)
return values[0]
return super().__getitem__(index)
def __setitem__(self, index, value):
if hasattr(index, "is_nbt_path"):
index.set(self, value)
else:
super().__setitem__(index, [self.cast_item(item) for item in value]
if isinstance(index, slice) else self.cast_item(value))
def __delitem__(self, index):
if hasattr(index, "is_nbt_path"):
index.delete(self)
else:
super().__delitem__(index)
def append(self, value):
super().append(self.cast_item(value))
def extend(self, iterable):
super().extend(map(self.cast_item, iterable))
def insert(self, index, value):
super().insert(index, self.cast_item(value))
@classmethod
def cast_item(cls, item):
"""Cast list item to the appropriate tag type."""
if not isinstance(item, cls.subtype):
incompatible = isinstance(item, Base) and not any(
issubclass(cls.subtype, tag_type) and isinstance(item, tag_type)
for tag_type in cls.all_tags.values()
)
if incompatible:
raise IncompatibleItemType(item, cls.subtype)
try:
return cls.subtype(item)
except EndInstantiation:
raise ValueError(
"List tags without an explicit subtype must either be empty or "
"instantiated with elements from which a subtype can be inferred"
) from None
except (IncompatibleItemType, CastError):
raise
except Exception as exc:
raise CastError(item, cls.subtype) from exc
return item
class Compound(Base, dict):
"""Nbt tag that represents a mapping of strings to other nbt tags.
The Compound class inherits both from Base and the python builtin
dict type. This means that all the operations that are usually
available on python dictionaries are supported.
Class attributes:
end_tag -- Bytes used to mark the end of the compound
"""
__slots__ = ()
tag_id = 10
serializer = "compound"
end_tag = b"\x00"
@classmethod
def parse(cls, buff, byteorder="big"):
self = cls()
tag_id = read_numeric(BYTE, buff, byteorder)
while tag_id != 0:
name = read_string(buff, byteorder)
self[name] = cls.get_tag(tag_id).parse(buff, byteorder)
tag_id = read_numeric(BYTE, buff, byteorder)
return self
def write(self, buff, byteorder="big"):
for name, tag in self.items():
write_numeric(BYTE, tag.tag_id, buff, byteorder)
write_string(name, buff, byteorder)
tag.write(buff, byteorder)
buff.write(self.end_tag)
def match(self, other):
return (
isinstance(other, dict)
and self.keys() >= other.keys()
and all(self[key].match(value) for key, value in other.items())
)
def get(self, key, default=None):
if hasattr(key, "is_nbt_path"):
return (key.get(self) or [default])[0]
return super().get(key, default)
def get_all(self, key):
try:
return (key.get(self) if hasattr(key, "is_nbt_path") else
[super().__getitem__(key)])
except KeyError:
return []
def __contains__(self, item):
if hasattr(item, "is_nbt_path"):
return bool(item.get(self))
return super().__contains__(item)
def __getitem__(self, key):
if hasattr(key, "is_nbt_path"):
values = key.get(self)
if not values:
raise KeyError(key)
return values[0]
return super().__getitem__(key)
def __setitem__(self, key, value):
if hasattr(key, "is_nbt_path"):
key.set(self, value)
else:
super().__setitem__(key, value)
def __delitem__(self, key):
if hasattr(key, "is_nbt_path"):
key.delete(self)
else:
super().__delitem__(key)
def merge(self, other):
"""Recursively merge tags from another compound."""
for key, value in other.items():
if key in self and (
isinstance(self[key], Compound) and isinstance(value, dict)
):
self[key].merge(value)
else:
self[key] = value
def with_defaults(self, other):
"""Return a new compound with recursively applied default values."""
result = Compound(other)
for key, value in self.items():
if key in result and (
isinstance(value, Compound) and isinstance(result[key], dict)
):
value = value.with_defaults(result[key])
result[key] = value
return result
class IntArray(Array):
"""Nbt tag representing an array of signed integers."""
__slots__ = ()
tag_id = 11
item_type = get_format(np.dtype, "i4")
array_prefix = "I"
wrapper = Int
class LongArray(Array):
"""Nbt tag representing an array of signed longs."""
__slots__ = ()
tag_id = 12
item_type = get_format(np.dtype, "i8")
array_prefix = "L"
wrapper = Long | 0.893629 | 0.263682 |
from functools import wraps
import logging
import time
from stevedore import ExtensionManager
import numpy as np
logger = logging.getLogger(__name__)
SQRT3 = np.sqrt(3)
SQRT2 = np.sqrt(2)
SQRTPI = np.sqrt(np.pi)
IMAGE_MAX = 255.99999
class NoiseError(Exception):
def __init__(self, noise, thresh):
self.noise = noise
self.thresh = thresh
self.message = "Image too noisy ({} > {})".format(noise, thresh)
class NotSupportedError(Exception):
message = "Method not supported by class"
def logo(version):
logo_text = "\n"
logo_text += " ___ ___ " + '\n'
logo_text += " | \\ | . | " + '\n'
logo_text += " |__/ |__ |__ __ __ " + '\n'
logo_text += " | | | | | | | | |__| " + '\n'
logo_text += " | \\__| | | |__/ | |__ " + '\n'
logo_text += " __/ " + '\n'
logo_text += f"\n Fibrous Tissue Image Toolkit v{version}\n"
return logo_text
def numpy_remove(list1, list2):
"""
numpy_remove(list1, list2)
Deletes overlapping elements of list2 from list1
"""
return np.delete(list1, np.where(np.isin(list1, list2)))
def unit_vector(vector, axis=-1):
"""
unit_vector(vector, axis=-1)
Returns unit vector of vector
"""
vector = np.array(vector)
magnitude_2 = np.resize(
np.sum(vector**2, axis=axis), vector.shape)
u_vector = np.sqrt(vector**2 / magnitude_2) * np.sign(vector)
return u_vector
def label_set(labels, background=0):
"""Return a unique set of non-background values in labels"""
unique_labels = np.unique(labels)
# Remove any labels corresponding to the background
indices = np.where(unique_labels != background)
unique_labels = unique_labels[indices]
return unique_labels
def nanmean(array_like, weights=None):
if weights is None:
weights = np.ones(array_like.shape)
# Ensure None and NaN objects are filtered out. We need to use
# equality comparison for None at each array element here since
# numpy.where cannot handle identity checks
array_like = np.array(
np.where(
array_like == None, np.nan, array_like # noqa: 501
),
dtype=float
)
weights = np.array(
np.where(
weights == None, np.nan, weights # noqa: 501
),
dtype=float
)
indices = ~np.isnan(array_like) * ~np.isnan(weights)
try:
average = np.average(
array_like[indices], weights=weights[indices])
except ZeroDivisionError:
average = None
return average
def ring(image, index, sizes, value):
index = np.array(index)
sizes = np.array(sizes)
for size in sizes:
indices = np.concatenate((index - size, index + size))
if indices[0] >= 0:
start = max([indices[1], 0])
end = min([indices[3], image.shape[1]]) + 1
image[indices[0], start: end] = value
if indices[2] < image.shape[0]:
start = max([indices[1], 0])
end = min([indices[3], image.shape[1]]) + 1
image[indices[2], start: end] = value
if indices[1] >= 0:
start = max([indices[0], 0])
end = min([indices[2], image.shape[0]]) + 1
image[start: end, indices[1]] = value
if indices[3] < image.shape[1]:
start = max([indices[0], 0])
end = min([indices[2], image.shape[0]]) + 1
image[start: end, indices[3]] = value
return image
def clear_border(image, thickness=1):
for i in range(thickness):
image[:, 0 + i] = 0
image[0 + i, :] = 0
image[:, -(1 + i)] = 0
image[-(1 + i), :] = 0
return image
def flatten_list(list_of_lists):
"""Returned a flattened version of a list of lists"""
flat_list = [
val
for sublist in list_of_lists
for val in sublist
]
return flat_list
def matrix_split(matrix, nrows, ncols):
"""Split a matrix into sub-matrices"""
assert matrix.ndim == 2
rows = np.array_split(matrix, ncols, axis=0)
grid = []
for item in rows:
grid += np.array_split(item, nrows, axis=-1)
return grid
def load_plugins():
"""Load PyFibre plugins via Stevedore. """
mgr = ExtensionManager(
namespace='pyfibre.plugins',
invoke_on_load=True
)
plugins = [ext.obj for ext in mgr]
return plugins
def log_time(message):
"""Use as a decorator around a callable to automatically record
elapsed time to the log. Can be personalised with an extra string
message argument
Example
-------
>>> @log_time(name='TEST')
>>> def function(x, y):
>>> return x * y
>>> ...
>>>
>>> function(2, 3)
6
Will produce a log message:
>>>
INFO: TOTAL TEST TIME .. s
"""
def log_time_decorator(func):
"""Decorator around function to be called"""
@wraps(func)
def function_wrapper(*args, **kwargs):
"""Actual wrapper around callable, including log
instructions"""
start = time.time()
result = func(*args, **kwargs)
logger.info(
# f"TOTAL TIME = "
f"TOTAL {message.upper()} TIME = "
f"{round(time.time() - start, 3)} s")
return result
return function_wrapper
return log_time_decorator | pyfibre/utilities.py | from functools import wraps
import logging
import time
from stevedore import ExtensionManager
import numpy as np
logger = logging.getLogger(__name__)
SQRT3 = np.sqrt(3)
SQRT2 = np.sqrt(2)
SQRTPI = np.sqrt(np.pi)
IMAGE_MAX = 255.99999
class NoiseError(Exception):
def __init__(self, noise, thresh):
self.noise = noise
self.thresh = thresh
self.message = "Image too noisy ({} > {})".format(noise, thresh)
class NotSupportedError(Exception):
message = "Method not supported by class"
def logo(version):
logo_text = "\n"
logo_text += " ___ ___ " + '\n'
logo_text += " | \\ | . | " + '\n'
logo_text += " |__/ |__ |__ __ __ " + '\n'
logo_text += " | | | | | | | | |__| " + '\n'
logo_text += " | \\__| | | |__/ | |__ " + '\n'
logo_text += " __/ " + '\n'
logo_text += f"\n Fibrous Tissue Image Toolkit v{version}\n"
return logo_text
def numpy_remove(list1, list2):
"""
numpy_remove(list1, list2)
Deletes overlapping elements of list2 from list1
"""
return np.delete(list1, np.where(np.isin(list1, list2)))
def unit_vector(vector, axis=-1):
"""
unit_vector(vector, axis=-1)
Returns unit vector of vector
"""
vector = np.array(vector)
magnitude_2 = np.resize(
np.sum(vector**2, axis=axis), vector.shape)
u_vector = np.sqrt(vector**2 / magnitude_2) * np.sign(vector)
return u_vector
def label_set(labels, background=0):
"""Return a unique set of non-background values in labels"""
unique_labels = np.unique(labels)
# Remove any labels corresponding to the background
indices = np.where(unique_labels != background)
unique_labels = unique_labels[indices]
return unique_labels
def nanmean(array_like, weights=None):
if weights is None:
weights = np.ones(array_like.shape)
# Ensure None and NaN objects are filtered out. We need to use
# equality comparison for None at each array element here since
# numpy.where cannot handle identity checks
array_like = np.array(
np.where(
array_like == None, np.nan, array_like # noqa: 501
),
dtype=float
)
weights = np.array(
np.where(
weights == None, np.nan, weights # noqa: 501
),
dtype=float
)
indices = ~np.isnan(array_like) * ~np.isnan(weights)
try:
average = np.average(
array_like[indices], weights=weights[indices])
except ZeroDivisionError:
average = None
return average
def ring(image, index, sizes, value):
index = np.array(index)
sizes = np.array(sizes)
for size in sizes:
indices = np.concatenate((index - size, index + size))
if indices[0] >= 0:
start = max([indices[1], 0])
end = min([indices[3], image.shape[1]]) + 1
image[indices[0], start: end] = value
if indices[2] < image.shape[0]:
start = max([indices[1], 0])
end = min([indices[3], image.shape[1]]) + 1
image[indices[2], start: end] = value
if indices[1] >= 0:
start = max([indices[0], 0])
end = min([indices[2], image.shape[0]]) + 1
image[start: end, indices[1]] = value
if indices[3] < image.shape[1]:
start = max([indices[0], 0])
end = min([indices[2], image.shape[0]]) + 1
image[start: end, indices[3]] = value
return image
def clear_border(image, thickness=1):
for i in range(thickness):
image[:, 0 + i] = 0
image[0 + i, :] = 0
image[:, -(1 + i)] = 0
image[-(1 + i), :] = 0
return image
def flatten_list(list_of_lists):
"""Returned a flattened version of a list of lists"""
flat_list = [
val
for sublist in list_of_lists
for val in sublist
]
return flat_list
def matrix_split(matrix, nrows, ncols):
"""Split a matrix into sub-matrices"""
assert matrix.ndim == 2
rows = np.array_split(matrix, ncols, axis=0)
grid = []
for item in rows:
grid += np.array_split(item, nrows, axis=-1)
return grid
def load_plugins():
"""Load PyFibre plugins via Stevedore. """
mgr = ExtensionManager(
namespace='pyfibre.plugins',
invoke_on_load=True
)
plugins = [ext.obj for ext in mgr]
return plugins
def log_time(message):
"""Use as a decorator around a callable to automatically record
elapsed time to the log. Can be personalised with an extra string
message argument
Example
-------
>>> @log_time(name='TEST')
>>> def function(x, y):
>>> return x * y
>>> ...
>>>
>>> function(2, 3)
6
Will produce a log message:
>>>
INFO: TOTAL TEST TIME .. s
"""
def log_time_decorator(func):
"""Decorator around function to be called"""
@wraps(func)
def function_wrapper(*args, **kwargs):
"""Actual wrapper around callable, including log
instructions"""
start = time.time()
result = func(*args, **kwargs)
logger.info(
# f"TOTAL TIME = "
f"TOTAL {message.upper()} TIME = "
f"{round(time.time() - start, 3)} s")
return result
return function_wrapper
return log_time_decorator | 0.837421 | 0.490785 |
import os
import pytest
import pyvista as pv
from ansys.dpf import core
from ansys.dpf.post import examples
# enable off_screen plotting to avoid test interruption
pv.OFF_SCREEN = True
# currently running dpf on docker. Used for testing on CI
running_docker = os.environ.get("DPF_DOCKER", False)
def resolve_test_file(basename, additional_path=""):
"""Resolves a test file's full path based on the base name and the
environment.
Normally returns local path unless server is running on docker and
this repository has been mapped to the docker image at /dpf.
"""
if running_docker:
# assumes repository root is mounted at '/dpf'
test_files_path = "/dpf/tests/testfiles"
return os.path.join(test_files_path, additional_path, basename)
else:
# otherwise, assume file is local
test_path = os.path.dirname(os.path.abspath(__file__))
test_files_path = os.path.join(test_path, "testfiles")
filename = os.path.join(test_files_path, additional_path, basename)
if not os.path.isfile(filename):
raise FileNotFoundError(f"Unable to locate {basename} at {test_files_path}")
return filename
@pytest.fixture()
def allkindofcomplexity():
"""Resolve the path of the "allKindOfComplexity.rst" result file."""
return examples.download_all_kinds_of_complexity()
@pytest.fixture()
def modalallkindofcomplexity():
"""Resolve the path of the "allKindOfComplexity.rst" result file."""
return examples.download_all_kinds_of_complexity_modal()
@pytest.fixture()
def simple_bar():
"""Resolve the path of the "ASimpleBar.rst" result file."""
return examples.simple_bar
@pytest.fixture()
def static_rst():
"""Resolve the path of the "static.rst" result file."""
return examples.static_rst
@pytest.fixture()
def complex_model():
"""Resolve the path of the "msup/plate1.rst" result file."""
return examples.complex_rst
@pytest.fixture()
def model_ns():
"""Resolve the path of the "model_with_ns.rst" result file."""
return examples.multishells_rst
@pytest.fixture()
def plate_msup():
"""Resolve the path of the "msup/plate1.rst" result file.
Originally:
UnitTestDataFiles/DataProcessing/expansion/msup/Transient/plate1/file.rst
"""
return examples.msup_transient
@pytest.fixture()
def rth_transient():
"""Resolve the path of the "rth/rth_transient.rth" result file."""
return examples.transient_therm
@pytest.fixture()
def rth_steady_state():
"""Resolve the path of the "rth/rth_steady_state.rth" result file."""
return examples.steady_therm
@pytest.fixture()
def rth_electric():
"""Resolve the path of the "rth/rth_electric.rth" result file."""
return examples.electric_therm
@pytest.fixture(scope="session", autouse=True)
def cleanup(request):
"""Cleanup a testing directory once we are finished."""
def close_servers():
core.server.shutdown_all_session_servers()
request.addfinalizer(close_servers) | tests/conftest.py | import os
import pytest
import pyvista as pv
from ansys.dpf import core
from ansys.dpf.post import examples
# enable off_screen plotting to avoid test interruption
pv.OFF_SCREEN = True
# currently running dpf on docker. Used for testing on CI
running_docker = os.environ.get("DPF_DOCKER", False)
def resolve_test_file(basename, additional_path=""):
"""Resolves a test file's full path based on the base name and the
environment.
Normally returns local path unless server is running on docker and
this repository has been mapped to the docker image at /dpf.
"""
if running_docker:
# assumes repository root is mounted at '/dpf'
test_files_path = "/dpf/tests/testfiles"
return os.path.join(test_files_path, additional_path, basename)
else:
# otherwise, assume file is local
test_path = os.path.dirname(os.path.abspath(__file__))
test_files_path = os.path.join(test_path, "testfiles")
filename = os.path.join(test_files_path, additional_path, basename)
if not os.path.isfile(filename):
raise FileNotFoundError(f"Unable to locate {basename} at {test_files_path}")
return filename
@pytest.fixture()
def allkindofcomplexity():
"""Resolve the path of the "allKindOfComplexity.rst" result file."""
return examples.download_all_kinds_of_complexity()
@pytest.fixture()
def modalallkindofcomplexity():
"""Resolve the path of the "allKindOfComplexity.rst" result file."""
return examples.download_all_kinds_of_complexity_modal()
@pytest.fixture()
def simple_bar():
"""Resolve the path of the "ASimpleBar.rst" result file."""
return examples.simple_bar
@pytest.fixture()
def static_rst():
"""Resolve the path of the "static.rst" result file."""
return examples.static_rst
@pytest.fixture()
def complex_model():
"""Resolve the path of the "msup/plate1.rst" result file."""
return examples.complex_rst
@pytest.fixture()
def model_ns():
"""Resolve the path of the "model_with_ns.rst" result file."""
return examples.multishells_rst
@pytest.fixture()
def plate_msup():
"""Resolve the path of the "msup/plate1.rst" result file.
Originally:
UnitTestDataFiles/DataProcessing/expansion/msup/Transient/plate1/file.rst
"""
return examples.msup_transient
@pytest.fixture()
def rth_transient():
"""Resolve the path of the "rth/rth_transient.rth" result file."""
return examples.transient_therm
@pytest.fixture()
def rth_steady_state():
"""Resolve the path of the "rth/rth_steady_state.rth" result file."""
return examples.steady_therm
@pytest.fixture()
def rth_electric():
"""Resolve the path of the "rth/rth_electric.rth" result file."""
return examples.electric_therm
@pytest.fixture(scope="session", autouse=True)
def cleanup(request):
"""Cleanup a testing directory once we are finished."""
def close_servers():
core.server.shutdown_all_session_servers()
request.addfinalizer(close_servers) | 0.561335 | 0.351784 |
import json
from datetime import datetime
from unittest.mock import Mock, patch
import graphene
import pytest
from django.utils.dateparse import parse_datetime
from django.utils.text import slugify
from graphql_relay import to_global_id
from prices import Money
from remote_works.graphql.core.enums import ReportingPeriod
from remote_works.graphql.product.enums import StockAvailability
from remote_works.graphql.product.types import resolve_attribute_list
from remote_works.product.models import (
Attribute, AttributeValue, Category, Skill, SkillImage, SkillType,
SkillVariant)
from remote_works.product.tasks import update_variants_names
from tests.api.utils import get_graphql_content
from tests.utils import create_image, create_pdf_file_with_image_ext
from .utils import assert_no_permission, get_multipart_request_body
def test_resolve_attribute_list(color_attribute):
value = color_attribute.values.first()
attributes_hstore = {str(color_attribute.pk): str(value.pk)}
res = resolve_attribute_list(attributes_hstore, Attribute.objects.all())
assert len(res) == 1
assert res[0].attribute.name == color_attribute.name
assert res[0].value.name == value.name
# test passing invalid hstore should resolve to empty list
attr_pk = str(Attribute.objects.order_by('pk').last().pk + 1)
val_pk = str(AttributeValue.objects.order_by('pk').last().pk + 1)
attributes_hstore = {attr_pk: val_pk}
res = resolve_attribute_list(attributes_hstore, Attribute.objects.all())
assert res == []
def test_fetch_all_products(user_api_client, product):
query = """
query {
skills(first: 1) {
totalCount
edges {
node {
id
}
}
}
}
"""
response = user_api_client.post_graphql(query)
content = get_graphql_content(response)
num_skills = Skill.objects.count()
assert content['data']['skills']['totalCount'] == num_products
assert len(content['data']['skills']['edges']) == num_products
@pytest.mark.djangodb
def test_fetch_unavailable_products(user_api_client, product):
Skill.objects.update(is_published=False)
query = """
query {
skills(first: 1) {
totalCount
edges {
node {
id
}
}
}
}
"""
response = user_api_client.post_graphql(query)
content = get_graphql_content(response)
assert content['data']['skills']['totalCount'] == 0
assert not content['data']['skills']['edges']
def test_skill_query(staff_api_client, product, permission_manage_products):
category = Category.objects.first()
skill = category.products.first()
query = """
query {
category(id: "%(category_id)s") {
skills(first: 20) {
edges {
node {
id
name
url
thumbnailUrl
thumbnail{
url
alt
}
images {
url
}
variants {
name
stockQuantity
}
availability {
available,
priceRange {
start {
gross {
amount
currency
localized
}
net {
amount
currency
localized
}
currency
}
}
}
purchaseCost {
start {
amount
}
stop {
amount
}
}
margin {
start
stop
}
}
}
}
}
}
""" % {'category_id': graphene.Node.to_global_id('Category', category.id)}
staff_api_client.user.user_permissions.add(permission_manage_products)
response = staff_api_client.post_graphql(query)
content = get_graphql_content(response)
assert content['data']['category'] is not None
skill_edges_data = content['data']['category']['skills']['edges']
assert len(skill_edges_data) == category.products.count()
skill_data = skill_edges_data[0]['node']
assert skill_data['name'] == product.name
assert skill_data['url'] == product.get_absolute_url()
gross = skill_data['availability']['priceRange']['start']['gross']
assert float(gross['amount']) == float(product.price.amount)
from remote_works.product.utils.costs import get_skill_costs_data
purchase_cost, margin = get_skill_costs_data(product)
assert purchase_cost.start.amount == skill_data[
'purchaseCost']['start']['amount']
assert purchase_cost.stop.amount == skill_data[
'purchaseCost']['stop']['amount']
assert margin[0] == skill_data['margin']['start']
assert margin[1] == skill_data['margin']['stop']
def test_skill_query_search(user_api_client, skill_type, category):
blue_skill = Skill.objects.create(
name='Blue Paint', price=Money('10.00', 'USD'),
skill_type=skill_type, category=category)
Skill.objects.create(
name='Red Paint', price=Money('10.00', 'USD'),
skill_type=skill_type, category=category)
query = """
query productSearch($query: String) {
skills(query: $query, first: 10) {
edges {
node {
name
}
}
}
}
"""
response = user_api_client.post_graphql(query, {'query': 'blu p4int'})
content = get_graphql_content(response)
skills = content['data']['skills']['edges']
assert len(products) == 1
assert products[0]['node']['name'] == blue_product.name
def test_query_skill_image_by_id(user_api_client, skill_with_image):
image = skill_with_image.images.first()
query = """
query productImageById($imageId: ID!, $productId: ID!) {
skill(id: $productId) {
imageById(id: $imageId) {
id
url
}
}
}
"""
variables = {
'productId': graphene.Node.to_global_id('Skill', skill_with_image.pk),
'imageId': graphene.Node.to_global_id('SkillImage', image.pk)}
response = user_api_client.post_graphql(query, variables)
get_graphql_content(response)
def test_skill_with_collections(
staff_api_client, product, collection, permission_manage_products):
query = """
query getSkill($productID: ID!) {
skill(id: $productID) {
collections {
name
}
}
}
"""
product.collections.add(collection)
product.save()
skill_id = graphene.Node.to_global_id('Skill', product.id)
variables = {'productID': skill_id}
staff_api_client.user.user_permissions.add(permission_manage_products)
response = staff_api_client.post_graphql(query, variables)
content = get_graphql_content(response)
data = content['data']['skill']
assert data['collections'][0]['name'] == collection.name
assert len(data['collections']) == 1
def test_filter_skill_by_category(user_api_client, product):
category = product.category
query = """
query getSkills($categoryId: ID) {
skills(categories: [$categoryId], first: 1) {
edges {
node {
name
}
}
}
}
"""
variables = {
'categoryId': graphene.Node.to_global_id('Category', category.id)}
response = user_api_client.post_graphql(query, variables)
content = get_graphql_content(response)
skill_data = content['data']['skills']['edges'][0]['node']
assert skill_data['name'] == product.name
def test_fetch_skill_by_id(user_api_client, product):
query = """
query ($productId: ID!) {
node(id: $productId) {
... on Skill {
name
}
}
}
"""
variables = {
'productId': graphene.Node.to_global_id('Skill', product.id)}
response = user_api_client.post_graphql(query, variables)
content = get_graphql_content(response)
skill_data = content['data']['node']
assert skill_data['name'] == product.name
def _fetch_product(client, product, permissions=None):
query = """
query ($productId: ID!) {
node(id: $productId) {
... on Skill {
name,
isPublished
}
}
}
"""
variables = {
'productId': graphene.Node.to_global_id('Skill', product.id)}
response = client.post_graphql(
query, variables, permissions=permissions, check_no_permissions=False)
content = get_graphql_content(response)
return content['data']['node']
def test_fetch_unpublished_skill_staff_user(
staff_api_client, unavailable_product, permission_manage_products):
skill_data = _fetch_product(
staff_api_client,
unavailable_product,
permissions=[permission_manage_products])
assert skill_data['name'] == unavailable_product.name
assert skill_data['isPublished'] == unavailable_product.is_published
def test_fetch_unpublished_skill_customer(
user_api_client, unavailable_product):
skill_data = _fetch_product(user_api_client, unavailable_product)
assert skill_data is None
def test_fetch_unpublished_skill_anonymous_user(
api_client, unavailable_product):
skill_data = _fetch_product(api_client, unavailable_product)
assert skill_data is None
def test_filter_products_by_attributes(user_api_client, product):
skill_attr = product.skill_type.skill_attributes.first()
attr_value = skill_attr.values.first()
filter_by = '%s:%s' % (skill_attr.slug, attr_value.slug)
query = """
query {
skills(attributes: ["%(filter_by)s"], first: 1) {
edges {
node {
name
}
}
}
}
""" % {'filter_by': filter_by}
response = user_api_client.post_graphql(query)
content = get_graphql_content(response)
skill_data = content['data']['skills']['edges'][0]['node']
assert skill_data['name'] == product.name
def test_filter_products_by_categories(
user_api_client, categories_tree, product):
category = categories_tree.children.first()
product.category = category
product.save()
query = """
query {
skills(categories: ["%(category_id)s"], first: 1) {
edges {
node {
name
}
}
}
}
""" % {'category_id': graphene.Node.to_global_id('Category', category.id)}
response = user_api_client.post_graphql(query)
content = get_graphql_content(response)
skill_data = content['data']['skills']['edges'][0]['node']
assert skill_data['name'] == product.name
def test_filter_products_by_collections(
user_api_client, collection, product):
collection.products.add(product)
query = """
query {
skills(collections: ["%(collection_id)s"], first: 1) {
edges {
node {
name
}
}
}
}
""" % {'collection_id': graphene.Node.to_global_id(
'Collection', collection.id)}
response = user_api_client.post_graphql(query)
content = get_graphql_content(response)
skill_data = content['data']['skills']['edges'][0]['node']
assert skill_data['name'] == product.name
def test_sort_products(user_api_client, product):
# set price and update date of the first skill
product.price = Money('10.00', 'USD')
product.updated_at = datetime.utcnow()
product.save()
# Create the second skill with higher price and date
product.pk = None
product.price = Money('20.00', 'USD')
product.updated_at = datetime.utcnow()
product.save()
query = """
query {
skills(sortBy: %(sort_by_skill_order)s, first: 2) {
edges {
node {
price {
amount
}
updatedAt
}
}
}
}
"""
asc_price_query = query % {
'sort_by_skill_order': '{field: PRICE, direction:ASC}'}
response = user_api_client.post_graphql(asc_price_query)
content = get_graphql_content(response)
price_0 = content['data']['skills']['edges'][0]['node']['price']['amount']
price_1 = content['data']['skills']['edges'][1]['node']['price']['amount']
assert price_0 < price_1
desc_price_query = query % {
'sort_by_skill_order': '{field: PRICE, direction:DESC}'}
response = user_api_client.post_graphql(desc_price_query)
content = get_graphql_content(response)
price_0 = content['data']['skills']['edges'][0]['node']['price']['amount']
price_1 = content['data']['skills']['edges'][1]['node']['price']['amount']
assert price_0 > price_1
asc_date_query = query % {
'sort_by_skill_order': '{field: DATE, direction:ASC}'}
response = user_api_client.post_graphql(asc_date_query)
content = get_graphql_content(response)
date_0 = content['data']['skills']['edges'][0]['node']['updatedAt'] ## parse_datetime
date_1 = content['data']['skills']['edges'][1]['node']['updatedAt']
assert parse_datetime(date_0) < parse_datetime(date_1)
desc_date_query = query % {
'sort_by_skill_order': '{field: DATE, direction:DESC}'}
response = user_api_client.post_graphql(desc_date_query)
content = get_graphql_content(response)
date_0 = content['data']['skills']['edges'][0]['node']['updatedAt']
date_1 = content['data']['skills']['edges'][1]['node']['updatedAt']
assert parse_datetime(date_0) > parse_datetime(date_1)
def test_create_product(
staff_api_client, skill_type, category, size_attribute,
permission_manage_products):
query = """
mutation createSkill(
$productTypeId: ID!,
$categoryId: ID!,
$name: String!,
$description: String!,
$descriptionJson: JSONString!,
$isPublished: Boolean!,
$chargeTaxes: Boolean!,
$taxRate: TaxRateType!,
$price: Decimal!,
$attributes: [AttributeValueInput!]) {
productCreate(
input: {
category: $categoryId,
productType: $productTypeId,
name: $name,
description: $description,
descriptionJson: $descriptionJson,
isPublished: $isPublished,
chargeTaxes: $chargeTaxes,
taxRate: $taxRate,
price: $price,
attributes: $attributes
}) {
skill {
category {
name
}
description
descriptionJson
isPublished
chargeTaxes
taxRate
name
price {
amount
}
productType {
name
}
attributes {
attribute {
slug
}
value {
slug
}
}
}
errors {
message
field
}
}
}
"""
skill_type_id = graphene.Node.to_global_id(
'SkillType', skill_type.pk)
category_id = graphene.Node.to_global_id(
'Category', category.pk)
skill_description = 'test description'
skill_description_json = json.dumps({'content': 'description'})
skill_name = '<NAME>'
skill_is_published = True
skill_charge_taxes = True
skill_tax_rate = 'STANDARD'
skill_price = 22.33
# Default attribute defined in skill_type fixture
color_attr = skill_type.skill_attributes.get(name='Color')
color_value_slug = color_attr.values.first().slug
color_attr_slug = color_attr.slug
# Add second attribute
skill_type.skill_attributes.add(size_attribute)
size_attr_slug = skill_type.skill_attributes.get(name='Size').slug
non_existent_attr_value = 'The cake is a lie'
# test creating root skill
variables = {
'productTypeId': skill_type_id,
'categoryId': category_id,
'name': skill_name,
'description': skill_description,
'descriptionJson': skill_description_json,
'isPublished': skill_is_published,
'chargeTaxes': skill_charge_taxes,
'taxRate': skill_tax_rate,
'price': skill_price,
'attributes': [
{'slug': color_attr_slug, 'value': color_value_slug},
{'slug': size_attr_slug, 'value': non_existent_attr_value}]}
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_products])
content = get_graphql_content(response)
data = content['data']['productCreate']
assert data['errors'] == []
assert data['skill']['name'] == skill_name
assert data['skill']['description'] == skill_description
assert data['skill']['descriptionJson'] == skill_description_json
assert data['skill']['isPublished'] == skill_is_published
assert data['skill']['chargeTaxes'] == skill_charge_taxes
assert data['skill']['taxRate'] == skill_tax_rate
assert data['skill']['productType']['name'] == skill_type.name
assert data['skill']['category']['name'] == category.name
values = (
data['skill']['attributes'][0]['value']['slug'],
data['skill']['attributes'][1]['value']['slug'])
assert slugify(non_existent_attr_value) in values
assert color_value_slug in values
QUERY_CREATE_SKILL_WITHOUT_VARIANTS = """
mutation createSkill(
$productTypeId: ID!,
$categoryId: ID!
$name: String!,
$description: String!,
$price: Decimal!,
$sku: String,
$quantity: Int,
$trackInventory: Boolean)
{
productCreate(
input: {
category: $categoryId,
productType: $productTypeId,
name: $name,
description: $description,
price: $price,
sku: $sku,
quantity: $quantity,
trackInventory: $trackInventory
})
{
skill {
id
name
variants{
id
sku
quantity
trackInventory
}
category {
name
}
productType {
name
}
}
errors {
message
field
}
}
}
"""
def test_create_skill_without_variants(
staff_api_client, skill_type_without_variant, category,
permission_manage_products):
query = QUERY_CREATE_SKILL_WITHOUT_VARIANTS
skill_type = skill_type_without_variant
skill_type_id = graphene.Node.to_global_id(
'SkillType', skill_type.pk)
category_id = graphene.Node.to_global_id(
'Category', category.pk)
skill_name = '<NAME>'
skill_description = 'description'
skill_price = 10
sku = 'sku'
quantity = 1
track_inventory = True
variables = {
'productTypeId': skill_type_id,
'categoryId': category_id,
'name': skill_name,
'description': skill_description,
'price': skill_price,
'sku': sku,
'quantity': quantity,
'trackInventory': track_inventory}
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_products])
content = get_graphql_content(response)
data = content['data']['productCreate']
assert data['errors'] == []
assert data['skill']['name'] == skill_name
assert data['skill']['productType']['name'] == skill_type.name
assert data['skill']['category']['name'] == category.name
assert data['skill']['variants'][0]['sku'] == sku
assert data['skill']['variants'][0]['quantity'] == quantity
assert data['skill']['variants'][0]['trackInventory'] == track_inventory
def test_create_skill_without_variants_sku_validation(
staff_api_client, skill_type_without_variant, category,
permission_manage_products):
query = QUERY_CREATE_SKILL_WITHOUT_VARIANTS
skill_type = skill_type_without_variant
skill_type_id = graphene.Node.to_global_id(
'SkillType', skill_type.pk)
category_id = graphene.Node.to_global_id(
'Category', category.pk)
skill_name = '<NAME>'
skill_description = 'description'
skill_price = 10
quantity = 1
track_inventory = True
variables = {
'productTypeId': skill_type_id,
'categoryId': category_id,
'name': skill_name,
'description': skill_description,
'price': skill_price,
'sku': None,
'quantity': quantity,
'trackInventory': track_inventory}
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_products])
content = get_graphql_content(response)
data = content['data']['productCreate']
assert data['errors'][0]['field'] == 'sku'
assert data['errors'][0]['message'] == 'This field cannot be blank.'
def test_create_skill_without_variants_sku_duplication(
staff_api_client, skill_type_without_variant, category,
permission_manage_products, skill_with_default_variant):
query = QUERY_CREATE_SKILL_WITHOUT_VARIANTS
skill_type = skill_type_without_variant
skill_type_id = graphene.Node.to_global_id(
'SkillType', skill_type.pk)
category_id = graphene.Node.to_global_id(
'Category', category.pk)
skill_name = '<NAME>'
skill_description = 'description'
skill_price = 10
quantity = 1
track_inventory = True
sku = '1234'
variables = {
'productTypeId': skill_type_id,
'categoryId': category_id,
'name': skill_name,
'description': skill_description,
'price': skill_price,
'sku': sku,
'quantity': quantity,
'trackInventory': track_inventory}
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_products])
content = get_graphql_content(response)
data = content['data']['productCreate']
assert data['errors'][0]['field'] == 'sku'
assert data['errors'][0]['message'] == 'Skill with this SKU already exists.'
def test_update_product(
staff_api_client, category, non_default_category, product,
permission_manage_products):
query = """
mutation updateSkill(
$productId: ID!,
$categoryId: ID!,
$name: String!,
$description: String!,
$isPublished: Boolean!,
$chargeTaxes: Boolean!,
$taxRate: TaxRateType!,
$price: Decimal!,
$attributes: [AttributeValueInput!]) {
productUpdate(
id: $productId,
input: {
category: $categoryId,
name: $name,
description: $description,
isPublished: $isPublished,
chargeTaxes: $chargeTaxes,
taxRate: $taxRate,
price: $price,
attributes: $attributes
}) {
skill {
category {
name
}
description
isPublished
chargeTaxes
taxRate
name
price {
amount
}
productType {
name
}
attributes {
attribute {
name
}
value {
name
}
}
}
errors {
message
field
}
}
}
"""
skill_id = graphene.Node.to_global_id('Skill', product.pk)
category_id = graphene.Node.to_global_id(
'Category', non_default_category.pk)
skill_description = 'updated description'
skill_name = 'updated name'
skill_isPublished = True
skill_chargeTaxes = True
skill_taxRate = 'STANDARD'
skill_price = "33.12"
variables = {
'productId': skill_id,
'categoryId': category_id,
'name': skill_name,
'description': skill_description,
'isPublished': skill_isPublished,
'chargeTaxes': skill_chargeTaxes,
'taxRate': skill_taxRate,
'price': skill_price}
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_products])
content = get_graphql_content(response)
data = content['data']['productUpdate']
assert data['errors'] == []
assert data['skill']['name'] == skill_name
assert data['skill']['description'] == skill_description
assert data['skill']['isPublished'] == skill_isPublished
assert data['skill']['chargeTaxes'] == skill_chargeTaxes
assert data['skill']['taxRate'] == skill_taxRate
assert not data['skill']['category']['name'] == category.name
def test_update_skill_without_variants(
staff_api_client, skill_with_default_variant,
permission_manage_products):
query = """
mutation updateSkill(
$productId: ID!,
$sku: String,
$quantity: Int,
$trackInventory: Boolean,
$description: String)
{
productUpdate(
id: $productId,
input: {
sku: $sku,
quantity: $quantity,
trackInventory: $trackInventory,
description: $description
})
{
skill {
id
variants{
id
sku
quantity
trackInventory
}
}
errors {
message
field
}
}
}
"""
skill = skill_with_default_variant
skill_id = graphene.Node.to_global_id('Skill', product.pk)
skill_sku = "test_sku"
skill_quantity = 10
skill_track_inventory = False
skill_description = "test description"
variables = {
'productId': skill_id,
'sku': skill_sku,
'quantity': skill_quantity,
'trackInventory': skill_track_inventory,
'description': skill_description}
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_products])
content = get_graphql_content(response)
data = content['data']['productUpdate']
assert data['errors'] == []
skill = data['skill']['variants'][0]
assert product['sku'] == skill_sku
assert product['quantity'] == skill_quantity
assert product['trackInventory'] == skill_track_inventory
def test_update_skill_without_variants_sku_duplication(
staff_api_client, skill_with_default_variant,
permission_manage_products, product):
query = """
mutation updateSkill(
$productId: ID!,
$sku: String)
{
productUpdate(
id: $productId,
input: {
sku: $sku
})
{
skill {
id
}
errors {
message
field
}
}
}"""
skill = skill_with_default_variant
skill_id = graphene.Node.to_global_id('Skill', product.pk)
skill_sku = "123"
variables = {
'productId': skill_id,
'sku': skill_sku}
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_products])
content = get_graphql_content(response)
data = content['data']['productUpdate']
assert data['errors']
assert data['errors'][0]['field'] == 'sku'
assert data['errors'][0]['message'] == 'Skill with this SKU already exists.'
def test_delete_product(staff_api_client, product, permission_manage_products):
query = """
mutation DeleteSkill($id: ID!) {
productDelete(id: $id) {
skill {
name
id
}
errors {
field
message
}
}
}
"""
node_id = graphene.Node.to_global_id('Skill', product.id)
variables = {'id': node_id}
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_products])
content = get_graphql_content(response)
data = content['data']['productDelete']
assert data['skill']['name'] == product.name
with pytest.raises(product._meta.model.DoesNotExist):
product.refresh_from_db()
assert node_id == data['skill']['id']
def test_skill_type(user_api_client, skill_type):
query = """
query {
skillTypes(first: 20) {
totalCount
edges {
node {
id
name
skills(first: 1) {
edges {
node {
id
}
}
}
}
}
}
}
"""
response = user_api_client.post_graphql(query)
content = get_graphql_content(response)
no_skill_types = SkillType.objects.count()
assert content['data']['skillTypes']['totalCount'] == no_skill_types
assert len(content['data']['skillTypes']['edges']) == no_skill_types
def test_skill_type_query(
user_api_client, staff_api_client, skill_type, product,
permission_manage_products):
query = """
query getSkillType($id: ID!) {
productType(id: $id) {
name
skills(first: 20) {
totalCount
edges {
node {
name
}
}
}
taxRate
}
}
"""
no_skills = Skill.objects.count()
product.is_published = False
product.save()
variables = {
'id': graphene.Node.to_global_id('SkillType', skill_type.id)}
response = user_api_client.post_graphql(query, variables)
content = get_graphql_content(response)
data = content['data']
assert data['productType']['skills']['totalCount'] == no_skills - 1
staff_api_client.user.user_permissions.add(permission_manage_products)
response = staff_api_client.post_graphql(query, variables)
content = get_graphql_content(response)
data = content['data']
assert data['productType']['skills']['totalCount'] == no_products
assert data['productType']['taxRate'] == skill_type.tax_rate.upper()
def test_skill_type_create_mutation(
staff_api_client, skill_type, permission_manage_products):
query = """
mutation createSkillType(
$name: String!,
$taxRate: TaxRateType!,
$hasVariants: Boolean!,
$isDeliveryRequired: Boolean!,
$productAttributes: [ID],
$variantAttributes: [ID]) {
productTypeCreate(
input: {
name: $name,
taxRate: $taxRate,
hasVariants: $hasVariants,
isDeliveryRequired: $isDeliveryRequired,
productAttributes: $productAttributes,
variantAttributes: $variantAttributes}) {
productType {
name
taxRate
isDeliveryRequired
hasVariants
variantAttributes {
name
values {
name
}
}
productAttributes {
name
values {
name
}
}
}
}
}
"""
skill_type_name = 'test type'
has_variants = True
require_delivery = True
skill_attributes = skill_type.skill_attributes.all()
skill_attributes_ids = [
graphene.Node.to_global_id('Attribute', att.id) for att in
skill_attributes]
variant_attributes = skill_type.variant_attributes.all()
variant_attributes_ids = [
graphene.Node.to_global_id('Attribute', att.id) for att in
variant_attributes]
variables = {
'name': skill_type_name, 'hasVariants': has_variants,
'taxRate': 'STANDARD',
'isDeliveryRequired': require_delivery,
'productAttributes': skill_attributes_ids,
'variantAttributes': variant_attributes_ids}
initial_count = SkillType.objects.count()
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_products])
content = get_graphql_content(response)
assert SkillType.objects.count() == initial_count + 1
data = content['data']['productTypeCreate']['productType']
assert data['name'] == skill_type_name
assert data['hasVariants'] == has_variants
assert data['isDeliveryRequired'] == require_delivery
pa = skill_attributes[0]
assert data['productAttributes'][0]['name'] == pa.name
pa_values = data['productAttributes'][0]['values']
assert sorted([value['name'] for value in pa_values]) == sorted(
[value.name for value in pa.values.all()])
va = variant_attributes[0]
assert data['variantAttributes'][0]['name'] == va.name
va_values = data['variantAttributes'][0]['values']
assert sorted([value['name'] for value in va_values]) == sorted(
[value.name for value in va.values.all()])
new_instance = SkillType.objects.latest('pk')
assert new_instance.tax_rate == 'standard'
def test_skill_type_update_mutation(
staff_api_client, skill_type, permission_manage_products):
query = """
mutation updateSkillType(
$id: ID!,
$name: String!,
$hasVariants: Boolean!,
$isDeliveryRequired: Boolean!,
$productAttributes: [ID],
) {
productTypeUpdate(
id: $id,
input: {
name: $name,
hasVariants: $hasVariants,
isDeliveryRequired: $isDeliveryRequired,
productAttributes: $productAttributes
}) {
productType {
name
isDeliveryRequired
hasVariants
variantAttributes {
id
}
productAttributes {
id
}
}
}
}
"""
skill_type_name = 'test type updated'
has_variants = True
require_delivery = False
skill_type_id = graphene.Node.to_global_id(
'SkillType', skill_type.id)
# Test scenario: remove all skill attributes using [] as input
# but do not change variant attributes
skill_attributes = []
skill_attributes_ids = [
graphene.Node.to_global_id('Attribute', att.id) for att in
skill_attributes]
variant_attributes = skill_type.variant_attributes.all()
variables = {
'id': skill_type_id, 'name': skill_type_name,
'hasVariants': has_variants,
'isDeliveryRequired': require_delivery,
'productAttributes': skill_attributes_ids}
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_products])
content = get_graphql_content(response)
data = content['data']['productTypeUpdate']['productType']
assert data['name'] == skill_type_name
assert data['hasVariants'] == has_variants
assert data['isDeliveryRequired'] == require_delivery
assert len(data['productAttributes']) == 0
assert len(data['variantAttributes']) == (
variant_attributes.count())
def test_skill_type_delete_mutation(
staff_api_client, skill_type, permission_manage_products):
query = """
mutation deleteSkillType($id: ID!) {
productTypeDelete(id: $id) {
productType {
name
}
}
}
"""
variables = {
'id': graphene.Node.to_global_id('SkillType', skill_type.id)}
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_products])
content = get_graphql_content(response)
data = content['data']['productTypeDelete']
assert data['productType']['name'] == skill_type.name
with pytest.raises(skill_type._meta.model.DoesNotExist):
skill_type.refresh_from_db()
def test_skill_image_create_mutation(
monkeypatch, staff_api_client, product, permission_manage_products):
query = """
mutation createSkillImage($image: Upload!, $skill: ID!) {
productImageCreate(input: {image: $image, skill: $skill}) {
image {
id
}
}
}
"""
mock_create_thumbnails = Mock(return_value=None)
monkeypatch.setattr(
('remote_works.graphql.skill.mutations.skills.'
'create_skill_thumbnails.delay'),
mock_create_thumbnails)
image_file, image_name = create_image()
variables = {
'skill': graphene.Node.to_global_id('Skill', product.id),
'image': image_name}
body = get_multipart_request_body(query, variables, image_file, image_name)
response = staff_api_client.post_multipart(
body, permissions=[permission_manage_products])
get_graphql_content(response)
product.refresh_from_db()
skill_image = product.images.last()
assert skill_image.image.file
# The image creation should have triggered a warm-up
mock_create_thumbnails.assert_called_once_with(skill_image.pk)
def test_invalid_skill_image_create_mutation(
staff_api_client, product, permission_manage_products):
query = """
mutation createSkillImage($image: Upload!, $skill: ID!) {
productImageCreate(input: {image: $image, skill: $skill}) {
image {
id
url
sortTask
}
errors {
field
message
}
}
}
"""
image_file, image_name = create_pdf_file_with_image_ext()
variables = {
'skill': graphene.Node.to_global_id('Skill', product.id),
'image': image_name}
body = get_multipart_request_body(query, variables, image_file, image_name)
response = staff_api_client.post_multipart(
body, permissions=[permission_manage_products])
content = get_graphql_content(response)
assert content['data']['productImageCreate']['errors'] == [{
'field': 'image',
'message': 'Invalid file type'}]
product.refresh_from_db()
assert product.images.count() == 0
def test_skill_image_update_mutation(
monkeypatch,
staff_api_client, skill_with_image, permission_manage_products):
query = """
mutation updateSkillImage($imageId: ID!, $alt: String) {
productImageUpdate(id: $imageId, input: {alt: $alt}) {
image {
alt
}
}
}
"""
mock_create_thumbnails = Mock(return_value=None)
monkeypatch.setattr(
('remote_works.graphql.skill.mutations.skills.'
'create_skill_thumbnails.delay'),
mock_create_thumbnails)
image_obj = skill_with_image.images.first()
alt = 'damage alt'
variables = {
'alt': alt,
'imageId': graphene.Node.to_global_id('SkillImage', image_obj.id)}
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_products])
content = get_graphql_content(response)
assert content['data']['productImageUpdate']['image']['alt'] == alt
# We did not update the image field,
# the image should not have triggered a warm-up
assert mock_create_thumbnails.call_count == 0
def test_skill_image_delete(
staff_api_client, skill_with_image, permission_manage_products):
skill = skill_with_image
query = """
mutation deleteSkillImage($id: ID!) {
productImageDelete(id: $id) {
image {
id
url
}
}
}
"""
image_obj = product.images.first()
node_id = graphene.Node.to_global_id('SkillImage', image_obj.id)
variables = {'id': node_id}
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_products])
content = get_graphql_content(response)
data = content['data']['productImageDelete']
assert image_obj.image.url in data['image']['url']
with pytest.raises(image_obj._meta.model.DoesNotExist):
image_obj.refresh_from_db()
assert node_id == data['image']['id']
def test_retask_images(
staff_api_client, skill_with_images, permission_manage_products):
query = """
mutation reorderImages($skill_id: ID!, $images_ids: [ID]!) {
productImageReorder(productId: $skill_id, imagesIds: $images_ids) {
skill {
id
}
}
}
"""
skill = skill_with_images
images = product.images.all()
image_0 = images[0]
image_1 = images[1]
image_0_id = graphene.Node.to_global_id('SkillImage', image_0.id)
image_1_id = graphene.Node.to_global_id('SkillImage', image_1.id)
skill_id = graphene.Node.to_global_id('Skill', product.id)
variables = {
'skill_id': skill_id, 'images_ids': [image_1_id, image_0_id]}
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_products])
get_graphql_content(response)
# Check if task has been changed
product.refresh_from_db()
reordered_images = product.images.all()
reordered_image_0 = reordered_images[0]
reordered_image_1 = reordered_images[1]
assert image_0.id == reordered_image_1.id
assert image_1.id == reordered_image_0.id
ASSIGN_VARIANT_QUERY = """
mutation assignVariantImageMutation($variantId: ID!, $imageId: ID!) {
variantImageAssign(variantId: $variantId, imageId: $imageId) {
errors {
field
message
}
productVariant {
id
}
}
}
"""
def test_assign_variant_image(
staff_api_client, user_api_client, skill_with_image,
permission_manage_products):
query = ASSIGN_VARIANT_QUERY
variant = skill_with_image.variants.first()
image = skill_with_image.images.first()
variables = {
'variantId': to_global_id('SkillVariant', variant.pk),
'imageId': to_global_id('SkillImage', image.pk)}
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_products])
get_graphql_content(response)
variant.refresh_from_db()
assert variant.images.first() == image
def test_assign_variant_image_from_different_product(
staff_api_client, user_api_client, skill_with_image,
permission_manage_products):
query = ASSIGN_VARIANT_QUERY
variant = skill_with_image.variants.first()
skill_with_image.pk = None
skill_with_image.save()
image_2 = SkillImage.objects.create(product=skill_with_image)
variables = {
'variantId': to_global_id('SkillVariant', variant.pk),
'imageId': to_global_id('SkillImage', image_2.pk)}
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_products])
content = get_graphql_content(response)
assert content['data']['variantImageAssign']['errors'][0]['field'] == 'imageId'
# check permissions
response = user_api_client.post_graphql(query, variables)
assert_no_permission(response)
UNASSIGN_VARIANT_IMAGE_QUERY = """
mutation unassignVariantImageMutation($variantId: ID!, $imageId: ID!) {
variantImageUnassign(variantId: $variantId, imageId: $imageId) {
errors {
field
message
}
productVariant {
id
}
}
}
"""
def test_unassign_variant_image(
staff_api_client, skill_with_image, permission_manage_products):
query = UNASSIGN_VARIANT_IMAGE_QUERY
image = skill_with_image.images.first()
variant = skill_with_image.variants.first()
variant.variant_images.create(image=image)
variables = {
'variantId': to_global_id('SkillVariant', variant.pk),
'imageId': to_global_id('SkillImage', image.pk)}
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_products])
get_graphql_content(response)
variant.refresh_from_db()
assert variant.images.count() == 0
def test_unassign_not_assigned_variant_image(
staff_api_client, skill_with_image, permission_manage_products):
query = UNASSIGN_VARIANT_IMAGE_QUERY
variant = skill_with_image.variants.first()
image_2 = SkillImage.objects.create(product=skill_with_image)
variables = {
'variantId': to_global_id('SkillVariant', variant.pk),
'imageId': to_global_id('SkillImage', image_2.pk)}
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_products])
content = get_graphql_content(response)
assert content['data']['variantImageUnassign']['errors'][0]['field'] == (
'imageId')
@patch('remote_works.skill.tasks.update_variants_names.delay')
def test_skill_type_update_changes_variant_name(
mock_update_variants_names, staff_api_client, skill_type,
product, permission_manage_products):
query = """
mutation updateSkillType(
$id: ID!,
$hasVariants: Boolean!,
$isDeliveryRequired: Boolean!,
$variantAttributes: [ID],
) {
productTypeUpdate(
id: $id,
input: {
hasVariants: $hasVariants,
isDeliveryRequired: $isDeliveryRequired,
variantAttributes: $variantAttributes}) {
productType {
id
}
}
}
"""
variant = product.variants.first()
variant.name = '<NAME>'
variant.save()
has_variants = True
require_delivery = False
skill_type_id = graphene.Node.to_global_id(
'SkillType', skill_type.id)
variant_attributes = skill_type.variant_attributes.all()
variant_attributes_ids = [
graphene.Node.to_global_id('Attribute', att.id) for att in
variant_attributes]
variables = {
'id': skill_type_id,
'hasVariants': has_variants,
'isDeliveryRequired': require_delivery,
'variantAttributes': variant_attributes_ids}
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_products])
content = get_graphql_content(response)
variant_attributes = set(variant_attributes)
variant_attributes_ids = [attr.pk for attr in variant_attributes]
mock_update_variants_names.assert_called_once_with(
skill_type.pk, variant_attributes_ids)
@patch('remote_works.skill.tasks._update_variants_names')
def test_skill_update_variants_names(mock__update_variants_names,
skill_type):
variant_attributes = [skill_type.variant_attributes.first()]
variant_attr_ids = [attr.pk for attr in variant_attributes]
update_variants_names(skill_type.pk, variant_attr_ids)
mock__update_variants_names.call_count == 1
def test_skill_variants_by_ids(user_api_client, variant):
query = """
query getSkill($ids: [ID!]) {
productVariants(ids: $ids, first: 1) {
edges {
node {
id
}
}
}
}
"""
variant_id = graphene.Node.to_global_id('SkillVariant', variant.id)
variables = {'ids': [variant_id]}
response = user_api_client.post_graphql(query, variables)
content = get_graphql_content(response)
data = content['data']['productVariants']
assert data['edges'][0]['node']['id'] == variant_id
assert len(data['edges']) == 1
def test_skill_variants_no_ids_list(user_api_client, variant):
query = """
query getSkillVariants {
productVariants(first: 10) {
edges {
node {
id
}
}
}
}
"""
response = user_api_client.post_graphql(query)
content = get_graphql_content(response)
data = content['data']['productVariants']
assert len(data['edges']) == SkillVariant.objects.count()
@pytest.mark.parametrize('skill_price, variant_override, api_variant_price', [
(100, None, 100),
(100, 200, 200),
(100, 0, 0)
])
def test_skill_variant_price(
skill_price, variant_override, api_variant_price,
user_api_client, variant):
# Set price override on variant that is different than skill price
skill = variant.product
product.price = Money(amount=skill_price, currency='USD')
product.save()
if variant_override is not None:
product.variants.update(
price_override=Money(amount=variant_override, currency='USD'))
else:
product.variants.update(price_override=None)
# Drop other variants
# skill.variants.exclude(id=variant.pk).delete()
query = """
query getSkillVariants($id: ID!) {
skill(id: $id) {
variants {
price {
amount
}
}
}
}
"""
skill_id = graphene.Node.to_global_id('Skill', variant.product.id)
variables = {'id': skill_id}
response = user_api_client.post_graphql(query, variables)
content = get_graphql_content(response)
data = content['data']['skill']
variant_price = data['variants'][0]['price']
assert variant_price['amount'] == api_variant_price
def test_stock_availability_filter(user_api_client, product):
query = """
query Skills($stockAvailability: StockAvailability) {
skills(stockAvailability: $stockAvailability, first: 1) {
totalCount
edges {
node {
id
}
}
}
}
"""
# fetch skills in availability
variables = {'stockAvailability': StockAvailability.IN_STOCK.name}
response = user_api_client.post_graphql(query, variables)
content = get_graphql_content(response)
assert content['data']['skills']['totalCount'] == 1
# fetch out of availability
variables = {'stockAvailability': StockAvailability.OUT_OF_STOCK.name}
response = user_api_client.post_graphql(query, variables)
content = get_graphql_content(response)
assert content['data']['skills']['totalCount'] == 0
# Change skill availability availability and test again
product.variants.update(quantity=0)
# There should be no skills in availability
variables = {'stockAvailability': StockAvailability.IN_STOCK.name}
response = user_api_client.post_graphql(query, variables)
content = get_graphql_content(response)
assert content['data']['skills']['totalCount'] == 0
def test_report_skill_sales(
staff_api_client, task_with_lines, permission_manage_products,
permission_manage_orders):
query = """
query TopSkills($period: ReportingPeriod!) {
reportSkillSales(period: $period, first: 20) {
edges {
node {
revenue(period: $period) {
gross {
amount
}
}
quantityTasked
sku
}
}
}
}
"""
variables = {'period': ReportingPeriod.TODAY.name}
permissions = [permission_manage_orders, permission_manage_products]
response = staff_api_client.post_graphql(query, variables, permissions)
content = get_graphql_content(response)
edges = content['data']['reportSkillSales']['edges']
node_a = edges[0]['node']
line_a = task_with_lines.lines.get(skill_sku=node_a['sku'])
assert node_a['quantityTasked'] == line_a.quantity
assert (
node_a['revenue']['gross']['amount'] ==
line_a.quantity * line_a.unit_price_gross.amount)
node_b = edges[1]['node']
line_b = task_with_lines.lines.get(skill_sku=node_b['sku'])
assert node_b['quantityTasked'] == line_b.quantity
assert (
node_b['revenue']['gross']['amount'] ==
line_b.quantity * line_b.unit_price_gross.amount)
def test_variant_revenue_permissions(
staff_api_client, permission_manage_products,
permission_manage_orders, product):
query = """
query VariantRevenue($id: ID!) {
productVariant(id: $id) {
revenue(period: TODAY) {
gross {
localized
}
}
}
}
"""
variant = product.variants.first()
variables = {
'id': graphene.Node.to_global_id('SkillVariant', variant.pk)}
permissions = [permission_manage_orders, permission_manage_products]
response = staff_api_client.post_graphql(query, variables, permissions)
content = get_graphql_content(response)
assert content['data']['productVariant']['revenue']
def test_variant_quantity_permissions(
staff_api_client, permission_manage_products, product):
query = """
query Quantity($id: ID!) {
productVariant(id: $id) {
quantity
}
}
"""
variant = product.variants.first()
variables = {
'id': graphene.Node.to_global_id('SkillVariant', variant.pk)}
permissions = [permission_manage_products]
response = staff_api_client.post_graphql(query, variables, permissions)
content = get_graphql_content(response)
assert 'quantity' in content['data']['productVariant']
def test_variant_quantity_ordered_permissions(
staff_api_client, permission_manage_products,
permission_manage_orders, product):
query = """
query QuantityTasked($id: ID!) {
productVariant(id: $id) {
quantityTasked
}
}
"""
variant = product.variants.first()
variables = {
'id': graphene.Node.to_global_id('SkillVariant', variant.pk)}
permissions = [permission_manage_orders, permission_manage_products]
response = staff_api_client.post_graphql(query, variables, permissions)
content = get_graphql_content(response)
assert 'quantityTasked' in content['data']['productVariant']
def test_variant_quantity_allocated_permissions(
staff_api_client, permission_manage_products,
permission_manage_orders, product):
query = """
query QuantityAllocated($id: ID!) {
productVariant(id: $id) {
quantityAllocated
}
}
"""
variant = product.variants.first()
variables = {
'id': graphene.Node.to_global_id('SkillVariant', variant.pk)}
permissions = [permission_manage_orders, permission_manage_products]
response = staff_api_client.post_graphql(query, variables, permissions)
content = get_graphql_content(response)
assert 'quantityAllocated' in content['data']['productVariant']
def test_variant_margin_permissions(
staff_api_client, permission_manage_products,
permission_manage_orders, product):
query = """
query Margin($id: ID!) {
productVariant(id: $id) {
margin
}
}
"""
variant = product.variants.first()
variables = {
'id': graphene.Node.to_global_id('SkillVariant', variant.pk)}
permissions = [permission_manage_orders, permission_manage_products]
response = staff_api_client.post_graphql(query, variables, permissions)
content = get_graphql_content(response)
assert 'margin' in content['data']['productVariant'] | tests/api/test_skill.py | import json
from datetime import datetime
from unittest.mock import Mock, patch
import graphene
import pytest
from django.utils.dateparse import parse_datetime
from django.utils.text import slugify
from graphql_relay import to_global_id
from prices import Money
from remote_works.graphql.core.enums import ReportingPeriod
from remote_works.graphql.product.enums import StockAvailability
from remote_works.graphql.product.types import resolve_attribute_list
from remote_works.product.models import (
Attribute, AttributeValue, Category, Skill, SkillImage, SkillType,
SkillVariant)
from remote_works.product.tasks import update_variants_names
from tests.api.utils import get_graphql_content
from tests.utils import create_image, create_pdf_file_with_image_ext
from .utils import assert_no_permission, get_multipart_request_body
def test_resolve_attribute_list(color_attribute):
value = color_attribute.values.first()
attributes_hstore = {str(color_attribute.pk): str(value.pk)}
res = resolve_attribute_list(attributes_hstore, Attribute.objects.all())
assert len(res) == 1
assert res[0].attribute.name == color_attribute.name
assert res[0].value.name == value.name
# test passing invalid hstore should resolve to empty list
attr_pk = str(Attribute.objects.order_by('pk').last().pk + 1)
val_pk = str(AttributeValue.objects.order_by('pk').last().pk + 1)
attributes_hstore = {attr_pk: val_pk}
res = resolve_attribute_list(attributes_hstore, Attribute.objects.all())
assert res == []
def test_fetch_all_products(user_api_client, product):
query = """
query {
skills(first: 1) {
totalCount
edges {
node {
id
}
}
}
}
"""
response = user_api_client.post_graphql(query)
content = get_graphql_content(response)
num_skills = Skill.objects.count()
assert content['data']['skills']['totalCount'] == num_products
assert len(content['data']['skills']['edges']) == num_products
@pytest.mark.djangodb
def test_fetch_unavailable_products(user_api_client, product):
Skill.objects.update(is_published=False)
query = """
query {
skills(first: 1) {
totalCount
edges {
node {
id
}
}
}
}
"""
response = user_api_client.post_graphql(query)
content = get_graphql_content(response)
assert content['data']['skills']['totalCount'] == 0
assert not content['data']['skills']['edges']
def test_skill_query(staff_api_client, product, permission_manage_products):
category = Category.objects.first()
skill = category.products.first()
query = """
query {
category(id: "%(category_id)s") {
skills(first: 20) {
edges {
node {
id
name
url
thumbnailUrl
thumbnail{
url
alt
}
images {
url
}
variants {
name
stockQuantity
}
availability {
available,
priceRange {
start {
gross {
amount
currency
localized
}
net {
amount
currency
localized
}
currency
}
}
}
purchaseCost {
start {
amount
}
stop {
amount
}
}
margin {
start
stop
}
}
}
}
}
}
""" % {'category_id': graphene.Node.to_global_id('Category', category.id)}
staff_api_client.user.user_permissions.add(permission_manage_products)
response = staff_api_client.post_graphql(query)
content = get_graphql_content(response)
assert content['data']['category'] is not None
skill_edges_data = content['data']['category']['skills']['edges']
assert len(skill_edges_data) == category.products.count()
skill_data = skill_edges_data[0]['node']
assert skill_data['name'] == product.name
assert skill_data['url'] == product.get_absolute_url()
gross = skill_data['availability']['priceRange']['start']['gross']
assert float(gross['amount']) == float(product.price.amount)
from remote_works.product.utils.costs import get_skill_costs_data
purchase_cost, margin = get_skill_costs_data(product)
assert purchase_cost.start.amount == skill_data[
'purchaseCost']['start']['amount']
assert purchase_cost.stop.amount == skill_data[
'purchaseCost']['stop']['amount']
assert margin[0] == skill_data['margin']['start']
assert margin[1] == skill_data['margin']['stop']
def test_skill_query_search(user_api_client, skill_type, category):
blue_skill = Skill.objects.create(
name='Blue Paint', price=Money('10.00', 'USD'),
skill_type=skill_type, category=category)
Skill.objects.create(
name='Red Paint', price=Money('10.00', 'USD'),
skill_type=skill_type, category=category)
query = """
query productSearch($query: String) {
skills(query: $query, first: 10) {
edges {
node {
name
}
}
}
}
"""
response = user_api_client.post_graphql(query, {'query': 'blu p4int'})
content = get_graphql_content(response)
skills = content['data']['skills']['edges']
assert len(products) == 1
assert products[0]['node']['name'] == blue_product.name
def test_query_skill_image_by_id(user_api_client, skill_with_image):
image = skill_with_image.images.first()
query = """
query productImageById($imageId: ID!, $productId: ID!) {
skill(id: $productId) {
imageById(id: $imageId) {
id
url
}
}
}
"""
variables = {
'productId': graphene.Node.to_global_id('Skill', skill_with_image.pk),
'imageId': graphene.Node.to_global_id('SkillImage', image.pk)}
response = user_api_client.post_graphql(query, variables)
get_graphql_content(response)
def test_skill_with_collections(
staff_api_client, product, collection, permission_manage_products):
query = """
query getSkill($productID: ID!) {
skill(id: $productID) {
collections {
name
}
}
}
"""
product.collections.add(collection)
product.save()
skill_id = graphene.Node.to_global_id('Skill', product.id)
variables = {'productID': skill_id}
staff_api_client.user.user_permissions.add(permission_manage_products)
response = staff_api_client.post_graphql(query, variables)
content = get_graphql_content(response)
data = content['data']['skill']
assert data['collections'][0]['name'] == collection.name
assert len(data['collections']) == 1
def test_filter_skill_by_category(user_api_client, product):
category = product.category
query = """
query getSkills($categoryId: ID) {
skills(categories: [$categoryId], first: 1) {
edges {
node {
name
}
}
}
}
"""
variables = {
'categoryId': graphene.Node.to_global_id('Category', category.id)}
response = user_api_client.post_graphql(query, variables)
content = get_graphql_content(response)
skill_data = content['data']['skills']['edges'][0]['node']
assert skill_data['name'] == product.name
def test_fetch_skill_by_id(user_api_client, product):
query = """
query ($productId: ID!) {
node(id: $productId) {
... on Skill {
name
}
}
}
"""
variables = {
'productId': graphene.Node.to_global_id('Skill', product.id)}
response = user_api_client.post_graphql(query, variables)
content = get_graphql_content(response)
skill_data = content['data']['node']
assert skill_data['name'] == product.name
def _fetch_product(client, product, permissions=None):
query = """
query ($productId: ID!) {
node(id: $productId) {
... on Skill {
name,
isPublished
}
}
}
"""
variables = {
'productId': graphene.Node.to_global_id('Skill', product.id)}
response = client.post_graphql(
query, variables, permissions=permissions, check_no_permissions=False)
content = get_graphql_content(response)
return content['data']['node']
def test_fetch_unpublished_skill_staff_user(
staff_api_client, unavailable_product, permission_manage_products):
skill_data = _fetch_product(
staff_api_client,
unavailable_product,
permissions=[permission_manage_products])
assert skill_data['name'] == unavailable_product.name
assert skill_data['isPublished'] == unavailable_product.is_published
def test_fetch_unpublished_skill_customer(
user_api_client, unavailable_product):
skill_data = _fetch_product(user_api_client, unavailable_product)
assert skill_data is None
def test_fetch_unpublished_skill_anonymous_user(
api_client, unavailable_product):
skill_data = _fetch_product(api_client, unavailable_product)
assert skill_data is None
def test_filter_products_by_attributes(user_api_client, product):
skill_attr = product.skill_type.skill_attributes.first()
attr_value = skill_attr.values.first()
filter_by = '%s:%s' % (skill_attr.slug, attr_value.slug)
query = """
query {
skills(attributes: ["%(filter_by)s"], first: 1) {
edges {
node {
name
}
}
}
}
""" % {'filter_by': filter_by}
response = user_api_client.post_graphql(query)
content = get_graphql_content(response)
skill_data = content['data']['skills']['edges'][0]['node']
assert skill_data['name'] == product.name
def test_filter_products_by_categories(
user_api_client, categories_tree, product):
category = categories_tree.children.first()
product.category = category
product.save()
query = """
query {
skills(categories: ["%(category_id)s"], first: 1) {
edges {
node {
name
}
}
}
}
""" % {'category_id': graphene.Node.to_global_id('Category', category.id)}
response = user_api_client.post_graphql(query)
content = get_graphql_content(response)
skill_data = content['data']['skills']['edges'][0]['node']
assert skill_data['name'] == product.name
def test_filter_products_by_collections(
user_api_client, collection, product):
collection.products.add(product)
query = """
query {
skills(collections: ["%(collection_id)s"], first: 1) {
edges {
node {
name
}
}
}
}
""" % {'collection_id': graphene.Node.to_global_id(
'Collection', collection.id)}
response = user_api_client.post_graphql(query)
content = get_graphql_content(response)
skill_data = content['data']['skills']['edges'][0]['node']
assert skill_data['name'] == product.name
def test_sort_products(user_api_client, product):
# set price and update date of the first skill
product.price = Money('10.00', 'USD')
product.updated_at = datetime.utcnow()
product.save()
# Create the second skill with higher price and date
product.pk = None
product.price = Money('20.00', 'USD')
product.updated_at = datetime.utcnow()
product.save()
query = """
query {
skills(sortBy: %(sort_by_skill_order)s, first: 2) {
edges {
node {
price {
amount
}
updatedAt
}
}
}
}
"""
asc_price_query = query % {
'sort_by_skill_order': '{field: PRICE, direction:ASC}'}
response = user_api_client.post_graphql(asc_price_query)
content = get_graphql_content(response)
price_0 = content['data']['skills']['edges'][0]['node']['price']['amount']
price_1 = content['data']['skills']['edges'][1]['node']['price']['amount']
assert price_0 < price_1
desc_price_query = query % {
'sort_by_skill_order': '{field: PRICE, direction:DESC}'}
response = user_api_client.post_graphql(desc_price_query)
content = get_graphql_content(response)
price_0 = content['data']['skills']['edges'][0]['node']['price']['amount']
price_1 = content['data']['skills']['edges'][1]['node']['price']['amount']
assert price_0 > price_1
asc_date_query = query % {
'sort_by_skill_order': '{field: DATE, direction:ASC}'}
response = user_api_client.post_graphql(asc_date_query)
content = get_graphql_content(response)
date_0 = content['data']['skills']['edges'][0]['node']['updatedAt'] ## parse_datetime
date_1 = content['data']['skills']['edges'][1]['node']['updatedAt']
assert parse_datetime(date_0) < parse_datetime(date_1)
desc_date_query = query % {
'sort_by_skill_order': '{field: DATE, direction:DESC}'}
response = user_api_client.post_graphql(desc_date_query)
content = get_graphql_content(response)
date_0 = content['data']['skills']['edges'][0]['node']['updatedAt']
date_1 = content['data']['skills']['edges'][1]['node']['updatedAt']
assert parse_datetime(date_0) > parse_datetime(date_1)
def test_create_product(
staff_api_client, skill_type, category, size_attribute,
permission_manage_products):
query = """
mutation createSkill(
$productTypeId: ID!,
$categoryId: ID!,
$name: String!,
$description: String!,
$descriptionJson: JSONString!,
$isPublished: Boolean!,
$chargeTaxes: Boolean!,
$taxRate: TaxRateType!,
$price: Decimal!,
$attributes: [AttributeValueInput!]) {
productCreate(
input: {
category: $categoryId,
productType: $productTypeId,
name: $name,
description: $description,
descriptionJson: $descriptionJson,
isPublished: $isPublished,
chargeTaxes: $chargeTaxes,
taxRate: $taxRate,
price: $price,
attributes: $attributes
}) {
skill {
category {
name
}
description
descriptionJson
isPublished
chargeTaxes
taxRate
name
price {
amount
}
productType {
name
}
attributes {
attribute {
slug
}
value {
slug
}
}
}
errors {
message
field
}
}
}
"""
skill_type_id = graphene.Node.to_global_id(
'SkillType', skill_type.pk)
category_id = graphene.Node.to_global_id(
'Category', category.pk)
skill_description = 'test description'
skill_description_json = json.dumps({'content': 'description'})
skill_name = '<NAME>'
skill_is_published = True
skill_charge_taxes = True
skill_tax_rate = 'STANDARD'
skill_price = 22.33
# Default attribute defined in skill_type fixture
color_attr = skill_type.skill_attributes.get(name='Color')
color_value_slug = color_attr.values.first().slug
color_attr_slug = color_attr.slug
# Add second attribute
skill_type.skill_attributes.add(size_attribute)
size_attr_slug = skill_type.skill_attributes.get(name='Size').slug
non_existent_attr_value = 'The cake is a lie'
# test creating root skill
variables = {
'productTypeId': skill_type_id,
'categoryId': category_id,
'name': skill_name,
'description': skill_description,
'descriptionJson': skill_description_json,
'isPublished': skill_is_published,
'chargeTaxes': skill_charge_taxes,
'taxRate': skill_tax_rate,
'price': skill_price,
'attributes': [
{'slug': color_attr_slug, 'value': color_value_slug},
{'slug': size_attr_slug, 'value': non_existent_attr_value}]}
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_products])
content = get_graphql_content(response)
data = content['data']['productCreate']
assert data['errors'] == []
assert data['skill']['name'] == skill_name
assert data['skill']['description'] == skill_description
assert data['skill']['descriptionJson'] == skill_description_json
assert data['skill']['isPublished'] == skill_is_published
assert data['skill']['chargeTaxes'] == skill_charge_taxes
assert data['skill']['taxRate'] == skill_tax_rate
assert data['skill']['productType']['name'] == skill_type.name
assert data['skill']['category']['name'] == category.name
values = (
data['skill']['attributes'][0]['value']['slug'],
data['skill']['attributes'][1]['value']['slug'])
assert slugify(non_existent_attr_value) in values
assert color_value_slug in values
QUERY_CREATE_SKILL_WITHOUT_VARIANTS = """
mutation createSkill(
$productTypeId: ID!,
$categoryId: ID!
$name: String!,
$description: String!,
$price: Decimal!,
$sku: String,
$quantity: Int,
$trackInventory: Boolean)
{
productCreate(
input: {
category: $categoryId,
productType: $productTypeId,
name: $name,
description: $description,
price: $price,
sku: $sku,
quantity: $quantity,
trackInventory: $trackInventory
})
{
skill {
id
name
variants{
id
sku
quantity
trackInventory
}
category {
name
}
productType {
name
}
}
errors {
message
field
}
}
}
"""
def test_create_skill_without_variants(
staff_api_client, skill_type_without_variant, category,
permission_manage_products):
query = QUERY_CREATE_SKILL_WITHOUT_VARIANTS
skill_type = skill_type_without_variant
skill_type_id = graphene.Node.to_global_id(
'SkillType', skill_type.pk)
category_id = graphene.Node.to_global_id(
'Category', category.pk)
skill_name = '<NAME>'
skill_description = 'description'
skill_price = 10
sku = 'sku'
quantity = 1
track_inventory = True
variables = {
'productTypeId': skill_type_id,
'categoryId': category_id,
'name': skill_name,
'description': skill_description,
'price': skill_price,
'sku': sku,
'quantity': quantity,
'trackInventory': track_inventory}
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_products])
content = get_graphql_content(response)
data = content['data']['productCreate']
assert data['errors'] == []
assert data['skill']['name'] == skill_name
assert data['skill']['productType']['name'] == skill_type.name
assert data['skill']['category']['name'] == category.name
assert data['skill']['variants'][0]['sku'] == sku
assert data['skill']['variants'][0]['quantity'] == quantity
assert data['skill']['variants'][0]['trackInventory'] == track_inventory
def test_create_skill_without_variants_sku_validation(
staff_api_client, skill_type_without_variant, category,
permission_manage_products):
query = QUERY_CREATE_SKILL_WITHOUT_VARIANTS
skill_type = skill_type_without_variant
skill_type_id = graphene.Node.to_global_id(
'SkillType', skill_type.pk)
category_id = graphene.Node.to_global_id(
'Category', category.pk)
skill_name = '<NAME>'
skill_description = 'description'
skill_price = 10
quantity = 1
track_inventory = True
variables = {
'productTypeId': skill_type_id,
'categoryId': category_id,
'name': skill_name,
'description': skill_description,
'price': skill_price,
'sku': None,
'quantity': quantity,
'trackInventory': track_inventory}
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_products])
content = get_graphql_content(response)
data = content['data']['productCreate']
assert data['errors'][0]['field'] == 'sku'
assert data['errors'][0]['message'] == 'This field cannot be blank.'
def test_create_skill_without_variants_sku_duplication(
staff_api_client, skill_type_without_variant, category,
permission_manage_products, skill_with_default_variant):
query = QUERY_CREATE_SKILL_WITHOUT_VARIANTS
skill_type = skill_type_without_variant
skill_type_id = graphene.Node.to_global_id(
'SkillType', skill_type.pk)
category_id = graphene.Node.to_global_id(
'Category', category.pk)
skill_name = '<NAME>'
skill_description = 'description'
skill_price = 10
quantity = 1
track_inventory = True
sku = '1234'
variables = {
'productTypeId': skill_type_id,
'categoryId': category_id,
'name': skill_name,
'description': skill_description,
'price': skill_price,
'sku': sku,
'quantity': quantity,
'trackInventory': track_inventory}
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_products])
content = get_graphql_content(response)
data = content['data']['productCreate']
assert data['errors'][0]['field'] == 'sku'
assert data['errors'][0]['message'] == 'Skill with this SKU already exists.'
def test_update_product(
staff_api_client, category, non_default_category, product,
permission_manage_products):
query = """
mutation updateSkill(
$productId: ID!,
$categoryId: ID!,
$name: String!,
$description: String!,
$isPublished: Boolean!,
$chargeTaxes: Boolean!,
$taxRate: TaxRateType!,
$price: Decimal!,
$attributes: [AttributeValueInput!]) {
productUpdate(
id: $productId,
input: {
category: $categoryId,
name: $name,
description: $description,
isPublished: $isPublished,
chargeTaxes: $chargeTaxes,
taxRate: $taxRate,
price: $price,
attributes: $attributes
}) {
skill {
category {
name
}
description
isPublished
chargeTaxes
taxRate
name
price {
amount
}
productType {
name
}
attributes {
attribute {
name
}
value {
name
}
}
}
errors {
message
field
}
}
}
"""
skill_id = graphene.Node.to_global_id('Skill', product.pk)
category_id = graphene.Node.to_global_id(
'Category', non_default_category.pk)
skill_description = 'updated description'
skill_name = 'updated name'
skill_isPublished = True
skill_chargeTaxes = True
skill_taxRate = 'STANDARD'
skill_price = "33.12"
variables = {
'productId': skill_id,
'categoryId': category_id,
'name': skill_name,
'description': skill_description,
'isPublished': skill_isPublished,
'chargeTaxes': skill_chargeTaxes,
'taxRate': skill_taxRate,
'price': skill_price}
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_products])
content = get_graphql_content(response)
data = content['data']['productUpdate']
assert data['errors'] == []
assert data['skill']['name'] == skill_name
assert data['skill']['description'] == skill_description
assert data['skill']['isPublished'] == skill_isPublished
assert data['skill']['chargeTaxes'] == skill_chargeTaxes
assert data['skill']['taxRate'] == skill_taxRate
assert not data['skill']['category']['name'] == category.name
def test_update_skill_without_variants(
staff_api_client, skill_with_default_variant,
permission_manage_products):
query = """
mutation updateSkill(
$productId: ID!,
$sku: String,
$quantity: Int,
$trackInventory: Boolean,
$description: String)
{
productUpdate(
id: $productId,
input: {
sku: $sku,
quantity: $quantity,
trackInventory: $trackInventory,
description: $description
})
{
skill {
id
variants{
id
sku
quantity
trackInventory
}
}
errors {
message
field
}
}
}
"""
skill = skill_with_default_variant
skill_id = graphene.Node.to_global_id('Skill', product.pk)
skill_sku = "test_sku"
skill_quantity = 10
skill_track_inventory = False
skill_description = "test description"
variables = {
'productId': skill_id,
'sku': skill_sku,
'quantity': skill_quantity,
'trackInventory': skill_track_inventory,
'description': skill_description}
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_products])
content = get_graphql_content(response)
data = content['data']['productUpdate']
assert data['errors'] == []
skill = data['skill']['variants'][0]
assert product['sku'] == skill_sku
assert product['quantity'] == skill_quantity
assert product['trackInventory'] == skill_track_inventory
def test_update_skill_without_variants_sku_duplication(
staff_api_client, skill_with_default_variant,
permission_manage_products, product):
query = """
mutation updateSkill(
$productId: ID!,
$sku: String)
{
productUpdate(
id: $productId,
input: {
sku: $sku
})
{
skill {
id
}
errors {
message
field
}
}
}"""
skill = skill_with_default_variant
skill_id = graphene.Node.to_global_id('Skill', product.pk)
skill_sku = "123"
variables = {
'productId': skill_id,
'sku': skill_sku}
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_products])
content = get_graphql_content(response)
data = content['data']['productUpdate']
assert data['errors']
assert data['errors'][0]['field'] == 'sku'
assert data['errors'][0]['message'] == 'Skill with this SKU already exists.'
def test_delete_product(staff_api_client, product, permission_manage_products):
query = """
mutation DeleteSkill($id: ID!) {
productDelete(id: $id) {
skill {
name
id
}
errors {
field
message
}
}
}
"""
node_id = graphene.Node.to_global_id('Skill', product.id)
variables = {'id': node_id}
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_products])
content = get_graphql_content(response)
data = content['data']['productDelete']
assert data['skill']['name'] == product.name
with pytest.raises(product._meta.model.DoesNotExist):
product.refresh_from_db()
assert node_id == data['skill']['id']
def test_skill_type(user_api_client, skill_type):
query = """
query {
skillTypes(first: 20) {
totalCount
edges {
node {
id
name
skills(first: 1) {
edges {
node {
id
}
}
}
}
}
}
}
"""
response = user_api_client.post_graphql(query)
content = get_graphql_content(response)
no_skill_types = SkillType.objects.count()
assert content['data']['skillTypes']['totalCount'] == no_skill_types
assert len(content['data']['skillTypes']['edges']) == no_skill_types
def test_skill_type_query(
user_api_client, staff_api_client, skill_type, product,
permission_manage_products):
query = """
query getSkillType($id: ID!) {
productType(id: $id) {
name
skills(first: 20) {
totalCount
edges {
node {
name
}
}
}
taxRate
}
}
"""
no_skills = Skill.objects.count()
product.is_published = False
product.save()
variables = {
'id': graphene.Node.to_global_id('SkillType', skill_type.id)}
response = user_api_client.post_graphql(query, variables)
content = get_graphql_content(response)
data = content['data']
assert data['productType']['skills']['totalCount'] == no_skills - 1
staff_api_client.user.user_permissions.add(permission_manage_products)
response = staff_api_client.post_graphql(query, variables)
content = get_graphql_content(response)
data = content['data']
assert data['productType']['skills']['totalCount'] == no_products
assert data['productType']['taxRate'] == skill_type.tax_rate.upper()
def test_skill_type_create_mutation(
staff_api_client, skill_type, permission_manage_products):
query = """
mutation createSkillType(
$name: String!,
$taxRate: TaxRateType!,
$hasVariants: Boolean!,
$isDeliveryRequired: Boolean!,
$productAttributes: [ID],
$variantAttributes: [ID]) {
productTypeCreate(
input: {
name: $name,
taxRate: $taxRate,
hasVariants: $hasVariants,
isDeliveryRequired: $isDeliveryRequired,
productAttributes: $productAttributes,
variantAttributes: $variantAttributes}) {
productType {
name
taxRate
isDeliveryRequired
hasVariants
variantAttributes {
name
values {
name
}
}
productAttributes {
name
values {
name
}
}
}
}
}
"""
skill_type_name = 'test type'
has_variants = True
require_delivery = True
skill_attributes = skill_type.skill_attributes.all()
skill_attributes_ids = [
graphene.Node.to_global_id('Attribute', att.id) for att in
skill_attributes]
variant_attributes = skill_type.variant_attributes.all()
variant_attributes_ids = [
graphene.Node.to_global_id('Attribute', att.id) for att in
variant_attributes]
variables = {
'name': skill_type_name, 'hasVariants': has_variants,
'taxRate': 'STANDARD',
'isDeliveryRequired': require_delivery,
'productAttributes': skill_attributes_ids,
'variantAttributes': variant_attributes_ids}
initial_count = SkillType.objects.count()
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_products])
content = get_graphql_content(response)
assert SkillType.objects.count() == initial_count + 1
data = content['data']['productTypeCreate']['productType']
assert data['name'] == skill_type_name
assert data['hasVariants'] == has_variants
assert data['isDeliveryRequired'] == require_delivery
pa = skill_attributes[0]
assert data['productAttributes'][0]['name'] == pa.name
pa_values = data['productAttributes'][0]['values']
assert sorted([value['name'] for value in pa_values]) == sorted(
[value.name for value in pa.values.all()])
va = variant_attributes[0]
assert data['variantAttributes'][0]['name'] == va.name
va_values = data['variantAttributes'][0]['values']
assert sorted([value['name'] for value in va_values]) == sorted(
[value.name for value in va.values.all()])
new_instance = SkillType.objects.latest('pk')
assert new_instance.tax_rate == 'standard'
def test_skill_type_update_mutation(
staff_api_client, skill_type, permission_manage_products):
query = """
mutation updateSkillType(
$id: ID!,
$name: String!,
$hasVariants: Boolean!,
$isDeliveryRequired: Boolean!,
$productAttributes: [ID],
) {
productTypeUpdate(
id: $id,
input: {
name: $name,
hasVariants: $hasVariants,
isDeliveryRequired: $isDeliveryRequired,
productAttributes: $productAttributes
}) {
productType {
name
isDeliveryRequired
hasVariants
variantAttributes {
id
}
productAttributes {
id
}
}
}
}
"""
skill_type_name = 'test type updated'
has_variants = True
require_delivery = False
skill_type_id = graphene.Node.to_global_id(
'SkillType', skill_type.id)
# Test scenario: remove all skill attributes using [] as input
# but do not change variant attributes
skill_attributes = []
skill_attributes_ids = [
graphene.Node.to_global_id('Attribute', att.id) for att in
skill_attributes]
variant_attributes = skill_type.variant_attributes.all()
variables = {
'id': skill_type_id, 'name': skill_type_name,
'hasVariants': has_variants,
'isDeliveryRequired': require_delivery,
'productAttributes': skill_attributes_ids}
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_products])
content = get_graphql_content(response)
data = content['data']['productTypeUpdate']['productType']
assert data['name'] == skill_type_name
assert data['hasVariants'] == has_variants
assert data['isDeliveryRequired'] == require_delivery
assert len(data['productAttributes']) == 0
assert len(data['variantAttributes']) == (
variant_attributes.count())
def test_skill_type_delete_mutation(
staff_api_client, skill_type, permission_manage_products):
query = """
mutation deleteSkillType($id: ID!) {
productTypeDelete(id: $id) {
productType {
name
}
}
}
"""
variables = {
'id': graphene.Node.to_global_id('SkillType', skill_type.id)}
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_products])
content = get_graphql_content(response)
data = content['data']['productTypeDelete']
assert data['productType']['name'] == skill_type.name
with pytest.raises(skill_type._meta.model.DoesNotExist):
skill_type.refresh_from_db()
def test_skill_image_create_mutation(
monkeypatch, staff_api_client, product, permission_manage_products):
query = """
mutation createSkillImage($image: Upload!, $skill: ID!) {
productImageCreate(input: {image: $image, skill: $skill}) {
image {
id
}
}
}
"""
mock_create_thumbnails = Mock(return_value=None)
monkeypatch.setattr(
('remote_works.graphql.skill.mutations.skills.'
'create_skill_thumbnails.delay'),
mock_create_thumbnails)
image_file, image_name = create_image()
variables = {
'skill': graphene.Node.to_global_id('Skill', product.id),
'image': image_name}
body = get_multipart_request_body(query, variables, image_file, image_name)
response = staff_api_client.post_multipart(
body, permissions=[permission_manage_products])
get_graphql_content(response)
product.refresh_from_db()
skill_image = product.images.last()
assert skill_image.image.file
# The image creation should have triggered a warm-up
mock_create_thumbnails.assert_called_once_with(skill_image.pk)
def test_invalid_skill_image_create_mutation(
staff_api_client, product, permission_manage_products):
query = """
mutation createSkillImage($image: Upload!, $skill: ID!) {
productImageCreate(input: {image: $image, skill: $skill}) {
image {
id
url
sortTask
}
errors {
field
message
}
}
}
"""
image_file, image_name = create_pdf_file_with_image_ext()
variables = {
'skill': graphene.Node.to_global_id('Skill', product.id),
'image': image_name}
body = get_multipart_request_body(query, variables, image_file, image_name)
response = staff_api_client.post_multipart(
body, permissions=[permission_manage_products])
content = get_graphql_content(response)
assert content['data']['productImageCreate']['errors'] == [{
'field': 'image',
'message': 'Invalid file type'}]
product.refresh_from_db()
assert product.images.count() == 0
def test_skill_image_update_mutation(
monkeypatch,
staff_api_client, skill_with_image, permission_manage_products):
query = """
mutation updateSkillImage($imageId: ID!, $alt: String) {
productImageUpdate(id: $imageId, input: {alt: $alt}) {
image {
alt
}
}
}
"""
mock_create_thumbnails = Mock(return_value=None)
monkeypatch.setattr(
('remote_works.graphql.skill.mutations.skills.'
'create_skill_thumbnails.delay'),
mock_create_thumbnails)
image_obj = skill_with_image.images.first()
alt = 'damage alt'
variables = {
'alt': alt,
'imageId': graphene.Node.to_global_id('SkillImage', image_obj.id)}
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_products])
content = get_graphql_content(response)
assert content['data']['productImageUpdate']['image']['alt'] == alt
# We did not update the image field,
# the image should not have triggered a warm-up
assert mock_create_thumbnails.call_count == 0
def test_skill_image_delete(
staff_api_client, skill_with_image, permission_manage_products):
skill = skill_with_image
query = """
mutation deleteSkillImage($id: ID!) {
productImageDelete(id: $id) {
image {
id
url
}
}
}
"""
image_obj = product.images.first()
node_id = graphene.Node.to_global_id('SkillImage', image_obj.id)
variables = {'id': node_id}
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_products])
content = get_graphql_content(response)
data = content['data']['productImageDelete']
assert image_obj.image.url in data['image']['url']
with pytest.raises(image_obj._meta.model.DoesNotExist):
image_obj.refresh_from_db()
assert node_id == data['image']['id']
def test_retask_images(
staff_api_client, skill_with_images, permission_manage_products):
query = """
mutation reorderImages($skill_id: ID!, $images_ids: [ID]!) {
productImageReorder(productId: $skill_id, imagesIds: $images_ids) {
skill {
id
}
}
}
"""
skill = skill_with_images
images = product.images.all()
image_0 = images[0]
image_1 = images[1]
image_0_id = graphene.Node.to_global_id('SkillImage', image_0.id)
image_1_id = graphene.Node.to_global_id('SkillImage', image_1.id)
skill_id = graphene.Node.to_global_id('Skill', product.id)
variables = {
'skill_id': skill_id, 'images_ids': [image_1_id, image_0_id]}
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_products])
get_graphql_content(response)
# Check if task has been changed
product.refresh_from_db()
reordered_images = product.images.all()
reordered_image_0 = reordered_images[0]
reordered_image_1 = reordered_images[1]
assert image_0.id == reordered_image_1.id
assert image_1.id == reordered_image_0.id
ASSIGN_VARIANT_QUERY = """
mutation assignVariantImageMutation($variantId: ID!, $imageId: ID!) {
variantImageAssign(variantId: $variantId, imageId: $imageId) {
errors {
field
message
}
productVariant {
id
}
}
}
"""
def test_assign_variant_image(
staff_api_client, user_api_client, skill_with_image,
permission_manage_products):
query = ASSIGN_VARIANT_QUERY
variant = skill_with_image.variants.first()
image = skill_with_image.images.first()
variables = {
'variantId': to_global_id('SkillVariant', variant.pk),
'imageId': to_global_id('SkillImage', image.pk)}
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_products])
get_graphql_content(response)
variant.refresh_from_db()
assert variant.images.first() == image
def test_assign_variant_image_from_different_product(
staff_api_client, user_api_client, skill_with_image,
permission_manage_products):
query = ASSIGN_VARIANT_QUERY
variant = skill_with_image.variants.first()
skill_with_image.pk = None
skill_with_image.save()
image_2 = SkillImage.objects.create(product=skill_with_image)
variables = {
'variantId': to_global_id('SkillVariant', variant.pk),
'imageId': to_global_id('SkillImage', image_2.pk)}
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_products])
content = get_graphql_content(response)
assert content['data']['variantImageAssign']['errors'][0]['field'] == 'imageId'
# check permissions
response = user_api_client.post_graphql(query, variables)
assert_no_permission(response)
UNASSIGN_VARIANT_IMAGE_QUERY = """
mutation unassignVariantImageMutation($variantId: ID!, $imageId: ID!) {
variantImageUnassign(variantId: $variantId, imageId: $imageId) {
errors {
field
message
}
productVariant {
id
}
}
}
"""
def test_unassign_variant_image(
staff_api_client, skill_with_image, permission_manage_products):
query = UNASSIGN_VARIANT_IMAGE_QUERY
image = skill_with_image.images.first()
variant = skill_with_image.variants.first()
variant.variant_images.create(image=image)
variables = {
'variantId': to_global_id('SkillVariant', variant.pk),
'imageId': to_global_id('SkillImage', image.pk)}
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_products])
get_graphql_content(response)
variant.refresh_from_db()
assert variant.images.count() == 0
def test_unassign_not_assigned_variant_image(
staff_api_client, skill_with_image, permission_manage_products):
query = UNASSIGN_VARIANT_IMAGE_QUERY
variant = skill_with_image.variants.first()
image_2 = SkillImage.objects.create(product=skill_with_image)
variables = {
'variantId': to_global_id('SkillVariant', variant.pk),
'imageId': to_global_id('SkillImage', image_2.pk)}
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_products])
content = get_graphql_content(response)
assert content['data']['variantImageUnassign']['errors'][0]['field'] == (
'imageId')
@patch('remote_works.skill.tasks.update_variants_names.delay')
def test_skill_type_update_changes_variant_name(
mock_update_variants_names, staff_api_client, skill_type,
product, permission_manage_products):
query = """
mutation updateSkillType(
$id: ID!,
$hasVariants: Boolean!,
$isDeliveryRequired: Boolean!,
$variantAttributes: [ID],
) {
productTypeUpdate(
id: $id,
input: {
hasVariants: $hasVariants,
isDeliveryRequired: $isDeliveryRequired,
variantAttributes: $variantAttributes}) {
productType {
id
}
}
}
"""
variant = product.variants.first()
variant.name = '<NAME>'
variant.save()
has_variants = True
require_delivery = False
skill_type_id = graphene.Node.to_global_id(
'SkillType', skill_type.id)
variant_attributes = skill_type.variant_attributes.all()
variant_attributes_ids = [
graphene.Node.to_global_id('Attribute', att.id) for att in
variant_attributes]
variables = {
'id': skill_type_id,
'hasVariants': has_variants,
'isDeliveryRequired': require_delivery,
'variantAttributes': variant_attributes_ids}
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_products])
content = get_graphql_content(response)
variant_attributes = set(variant_attributes)
variant_attributes_ids = [attr.pk for attr in variant_attributes]
mock_update_variants_names.assert_called_once_with(
skill_type.pk, variant_attributes_ids)
@patch('remote_works.skill.tasks._update_variants_names')
def test_skill_update_variants_names(mock__update_variants_names,
skill_type):
variant_attributes = [skill_type.variant_attributes.first()]
variant_attr_ids = [attr.pk for attr in variant_attributes]
update_variants_names(skill_type.pk, variant_attr_ids)
mock__update_variants_names.call_count == 1
def test_skill_variants_by_ids(user_api_client, variant):
query = """
query getSkill($ids: [ID!]) {
productVariants(ids: $ids, first: 1) {
edges {
node {
id
}
}
}
}
"""
variant_id = graphene.Node.to_global_id('SkillVariant', variant.id)
variables = {'ids': [variant_id]}
response = user_api_client.post_graphql(query, variables)
content = get_graphql_content(response)
data = content['data']['productVariants']
assert data['edges'][0]['node']['id'] == variant_id
assert len(data['edges']) == 1
def test_skill_variants_no_ids_list(user_api_client, variant):
query = """
query getSkillVariants {
productVariants(first: 10) {
edges {
node {
id
}
}
}
}
"""
response = user_api_client.post_graphql(query)
content = get_graphql_content(response)
data = content['data']['productVariants']
assert len(data['edges']) == SkillVariant.objects.count()
@pytest.mark.parametrize('skill_price, variant_override, api_variant_price', [
(100, None, 100),
(100, 200, 200),
(100, 0, 0)
])
def test_skill_variant_price(
skill_price, variant_override, api_variant_price,
user_api_client, variant):
# Set price override on variant that is different than skill price
skill = variant.product
product.price = Money(amount=skill_price, currency='USD')
product.save()
if variant_override is not None:
product.variants.update(
price_override=Money(amount=variant_override, currency='USD'))
else:
product.variants.update(price_override=None)
# Drop other variants
# skill.variants.exclude(id=variant.pk).delete()
query = """
query getSkillVariants($id: ID!) {
skill(id: $id) {
variants {
price {
amount
}
}
}
}
"""
skill_id = graphene.Node.to_global_id('Skill', variant.product.id)
variables = {'id': skill_id}
response = user_api_client.post_graphql(query, variables)
content = get_graphql_content(response)
data = content['data']['skill']
variant_price = data['variants'][0]['price']
assert variant_price['amount'] == api_variant_price
def test_stock_availability_filter(user_api_client, product):
query = """
query Skills($stockAvailability: StockAvailability) {
skills(stockAvailability: $stockAvailability, first: 1) {
totalCount
edges {
node {
id
}
}
}
}
"""
# fetch skills in availability
variables = {'stockAvailability': StockAvailability.IN_STOCK.name}
response = user_api_client.post_graphql(query, variables)
content = get_graphql_content(response)
assert content['data']['skills']['totalCount'] == 1
# fetch out of availability
variables = {'stockAvailability': StockAvailability.OUT_OF_STOCK.name}
response = user_api_client.post_graphql(query, variables)
content = get_graphql_content(response)
assert content['data']['skills']['totalCount'] == 0
# Change skill availability availability and test again
product.variants.update(quantity=0)
# There should be no skills in availability
variables = {'stockAvailability': StockAvailability.IN_STOCK.name}
response = user_api_client.post_graphql(query, variables)
content = get_graphql_content(response)
assert content['data']['skills']['totalCount'] == 0
def test_report_skill_sales(
staff_api_client, task_with_lines, permission_manage_products,
permission_manage_orders):
query = """
query TopSkills($period: ReportingPeriod!) {
reportSkillSales(period: $period, first: 20) {
edges {
node {
revenue(period: $period) {
gross {
amount
}
}
quantityTasked
sku
}
}
}
}
"""
variables = {'period': ReportingPeriod.TODAY.name}
permissions = [permission_manage_orders, permission_manage_products]
response = staff_api_client.post_graphql(query, variables, permissions)
content = get_graphql_content(response)
edges = content['data']['reportSkillSales']['edges']
node_a = edges[0]['node']
line_a = task_with_lines.lines.get(skill_sku=node_a['sku'])
assert node_a['quantityTasked'] == line_a.quantity
assert (
node_a['revenue']['gross']['amount'] ==
line_a.quantity * line_a.unit_price_gross.amount)
node_b = edges[1]['node']
line_b = task_with_lines.lines.get(skill_sku=node_b['sku'])
assert node_b['quantityTasked'] == line_b.quantity
assert (
node_b['revenue']['gross']['amount'] ==
line_b.quantity * line_b.unit_price_gross.amount)
def test_variant_revenue_permissions(
staff_api_client, permission_manage_products,
permission_manage_orders, product):
query = """
query VariantRevenue($id: ID!) {
productVariant(id: $id) {
revenue(period: TODAY) {
gross {
localized
}
}
}
}
"""
variant = product.variants.first()
variables = {
'id': graphene.Node.to_global_id('SkillVariant', variant.pk)}
permissions = [permission_manage_orders, permission_manage_products]
response = staff_api_client.post_graphql(query, variables, permissions)
content = get_graphql_content(response)
assert content['data']['productVariant']['revenue']
def test_variant_quantity_permissions(
staff_api_client, permission_manage_products, product):
query = """
query Quantity($id: ID!) {
productVariant(id: $id) {
quantity
}
}
"""
variant = product.variants.first()
variables = {
'id': graphene.Node.to_global_id('SkillVariant', variant.pk)}
permissions = [permission_manage_products]
response = staff_api_client.post_graphql(query, variables, permissions)
content = get_graphql_content(response)
assert 'quantity' in content['data']['productVariant']
def test_variant_quantity_ordered_permissions(
staff_api_client, permission_manage_products,
permission_manage_orders, product):
query = """
query QuantityTasked($id: ID!) {
productVariant(id: $id) {
quantityTasked
}
}
"""
variant = product.variants.first()
variables = {
'id': graphene.Node.to_global_id('SkillVariant', variant.pk)}
permissions = [permission_manage_orders, permission_manage_products]
response = staff_api_client.post_graphql(query, variables, permissions)
content = get_graphql_content(response)
assert 'quantityTasked' in content['data']['productVariant']
def test_variant_quantity_allocated_permissions(
staff_api_client, permission_manage_products,
permission_manage_orders, product):
query = """
query QuantityAllocated($id: ID!) {
productVariant(id: $id) {
quantityAllocated
}
}
"""
variant = product.variants.first()
variables = {
'id': graphene.Node.to_global_id('SkillVariant', variant.pk)}
permissions = [permission_manage_orders, permission_manage_products]
response = staff_api_client.post_graphql(query, variables, permissions)
content = get_graphql_content(response)
assert 'quantityAllocated' in content['data']['productVariant']
def test_variant_margin_permissions(
staff_api_client, permission_manage_products,
permission_manage_orders, product):
query = """
query Margin($id: ID!) {
productVariant(id: $id) {
margin
}
}
"""
variant = product.variants.first()
variables = {
'id': graphene.Node.to_global_id('SkillVariant', variant.pk)}
permissions = [permission_manage_orders, permission_manage_products]
response = staff_api_client.post_graphql(query, variables, permissions)
content = get_graphql_content(response)
assert 'margin' in content['data']['productVariant'] | 0.49585 | 0.360799 |
import datetime
import os
import unittest
import pymetacode.configuration
class TestConfiguration(unittest.TestCase):
def setUp(self):
self.configuration = pymetacode.configuration.Configuration()
self.filename = 'foo.yaml'
def tearDown(self):
if os.path.exists(self.filename):
os.remove(self.filename)
def test_instantiate_class(self):
pass
def test_has_package_property(self):
self.assertTrue(hasattr(self.configuration, 'package'))
def test_package_property_has_keys(self):
self.assertListEqual(['name', 'author', 'author_email', 'year',
'description', 'urls', 'keywords',
'install_requires'],
list(self.configuration.package.keys()))
def test_package_property_year_is_set_to_current_year(self):
current_year = datetime.date.strftime(datetime.date.today(), '%Y')
self.assertEqual(current_year, self.configuration.package['year'])
def test_has_documentation_property(self):
self.assertTrue(hasattr(self.configuration, 'documentation'))
def test_documentation_property_has_keys(self):
self.assertListEqual(['logo', 'favicon'],
list(self.configuration.documentation.keys()))
def test_has_options_property(self):
self.assertTrue(hasattr(self.configuration, 'options'))
def test_documentation_property_has_keys(self):
self.assertListEqual(['logging', 'git'],
list(self.configuration.options.keys()))
def test_to_dict_returns_dict(self):
result = self.configuration.to_dict()
self.assertTrue(isinstance(result, dict))
def test_to_file_writes_yaml_file(self):
self.configuration.to_file(name=self.filename)
self.assertTrue(os.path.exists(self.filename))
def test_to_file_writes_contents_to_yaml_file(self):
self.configuration.to_file(name=self.filename)
with open(self.filename) as file:
contents = file.read()
self.assertIn('package:', contents)
def test_from_dict_without_dict_raises(self):
with self.assertRaises(ValueError):
self.configuration.from_dict()
def test_from_dict_sets_properties(self):
dict_ = {
'package': {
'name': 'foo',
'urls': {
'main': 'https://foo.local/',
},
},
}
self.configuration.from_dict(dict_)
self.assertEqual(dict_['package']['name'],
self.configuration.package['name'])
def test_from_dict_does_not_set_unknown_attribute(self):
attribute = 'foo'
dict_ = dict()
dict_[attribute] = 'foo'
self.configuration.from_dict(dict_)
self.assertFalse(hasattr(self.configuration, attribute))
def test_from_file_sets_properties(self):
self.configuration.package['name'] = 'foo'
self.configuration.package['author'] = '<NAME>'
self.configuration.to_file(self.filename)
new_config = pymetacode.configuration.Configuration()
new_config.from_file(self.filename)
self.assertEqual(new_config.package['name'],
self.configuration.package['name'])
self.assertEqual(new_config.package['author'],
self.configuration.package['author']) | tests/test_configuration.py | import datetime
import os
import unittest
import pymetacode.configuration
class TestConfiguration(unittest.TestCase):
def setUp(self):
self.configuration = pymetacode.configuration.Configuration()
self.filename = 'foo.yaml'
def tearDown(self):
if os.path.exists(self.filename):
os.remove(self.filename)
def test_instantiate_class(self):
pass
def test_has_package_property(self):
self.assertTrue(hasattr(self.configuration, 'package'))
def test_package_property_has_keys(self):
self.assertListEqual(['name', 'author', 'author_email', 'year',
'description', 'urls', 'keywords',
'install_requires'],
list(self.configuration.package.keys()))
def test_package_property_year_is_set_to_current_year(self):
current_year = datetime.date.strftime(datetime.date.today(), '%Y')
self.assertEqual(current_year, self.configuration.package['year'])
def test_has_documentation_property(self):
self.assertTrue(hasattr(self.configuration, 'documentation'))
def test_documentation_property_has_keys(self):
self.assertListEqual(['logo', 'favicon'],
list(self.configuration.documentation.keys()))
def test_has_options_property(self):
self.assertTrue(hasattr(self.configuration, 'options'))
def test_documentation_property_has_keys(self):
self.assertListEqual(['logging', 'git'],
list(self.configuration.options.keys()))
def test_to_dict_returns_dict(self):
result = self.configuration.to_dict()
self.assertTrue(isinstance(result, dict))
def test_to_file_writes_yaml_file(self):
self.configuration.to_file(name=self.filename)
self.assertTrue(os.path.exists(self.filename))
def test_to_file_writes_contents_to_yaml_file(self):
self.configuration.to_file(name=self.filename)
with open(self.filename) as file:
contents = file.read()
self.assertIn('package:', contents)
def test_from_dict_without_dict_raises(self):
with self.assertRaises(ValueError):
self.configuration.from_dict()
def test_from_dict_sets_properties(self):
dict_ = {
'package': {
'name': 'foo',
'urls': {
'main': 'https://foo.local/',
},
},
}
self.configuration.from_dict(dict_)
self.assertEqual(dict_['package']['name'],
self.configuration.package['name'])
def test_from_dict_does_not_set_unknown_attribute(self):
attribute = 'foo'
dict_ = dict()
dict_[attribute] = 'foo'
self.configuration.from_dict(dict_)
self.assertFalse(hasattr(self.configuration, attribute))
def test_from_file_sets_properties(self):
self.configuration.package['name'] = 'foo'
self.configuration.package['author'] = '<NAME>'
self.configuration.to_file(self.filename)
new_config = pymetacode.configuration.Configuration()
new_config.from_file(self.filename)
self.assertEqual(new_config.package['name'],
self.configuration.package['name'])
self.assertEqual(new_config.package['author'],
self.configuration.package['author']) | 0.458349 | 0.439807 |
import weakref
import services
import sims4.gsi.archive
with sims4.reload.protected(globals()):
tracked_objects_dict = {}
deleted_objs = []
logger = sims4.log.Logger('GameplayArchiver')
MAX_DELETED_SIM_RECORDS = 10
def logged_gsi_object_deleted(obj):
deleted_id = tracked_objects_dict[obj]
del tracked_objects_dict[obj]
deleted_objs.append(deleted_id)
if len(deleted_objs) > MAX_DELETED_SIM_RECORDS:
obj_to_cleanup = deleted_objs.pop(0)
for archive_entries in sims4.gsi.archive.archive_data.values():
if isinstance(archive_entries, dict):
if obj_to_cleanup in archive_entries:
del archive_entries[obj_to_cleanup]
def print_num_archive_records():
logger.warn('---------- Start GSI Archive Dump ----------')
for (archive_type, archive_entries) in sims4.gsi.archive.archive_data.items():
if isinstance(archive_entries, list):
logger.warn('Type: {}, Entries: {}', archive_type, len(archive_entries))
elif isinstance(archive_entries, dict):
logger.warn('Type: {}', archive_type)
for (sim_id, sim_data_entries) in archive_entries.items():
logger.warn(' Sim Id: {}, Num Entries: {}', sim_id, len(sim_data_entries))
else:
logger.error('I have no idea what this entry is....')
else:
logger.error('I have no idea what this entry is....')
logger.warn('---------- End GSI Archive Dump ----------')
class GameplayArchiver(sims4.gsi.archive.Archiver):
def archive(self, *args, object_id=None, **kwargs):
if self._sim_specific:
cur_sim = services.object_manager().get(object_id)
if cur_sim is not None and not cur_sim.is_selectable:
cur_sim_ref = weakref.ref(cur_sim, logged_gsi_object_deleted)
tracked_objects_dict[cur_sim_ref] = object_id
if cur_sim_ref not in tracked_objects_dict and object_id in deleted_objs:
deleted_objs.remove(object_id)
time_service = services.time_service()
if time_service.sim_timeline is not None:
game_time = str(time_service.sim_now)
else:
game_time = str(services.game_clock_service().now())
super().archive(*args, object_id=object_id, game_time=game_time, **kwargs) | S4/S4 Library/simulation/gsi_handlers/gameplay_archiver.py | import weakref
import services
import sims4.gsi.archive
with sims4.reload.protected(globals()):
tracked_objects_dict = {}
deleted_objs = []
logger = sims4.log.Logger('GameplayArchiver')
MAX_DELETED_SIM_RECORDS = 10
def logged_gsi_object_deleted(obj):
deleted_id = tracked_objects_dict[obj]
del tracked_objects_dict[obj]
deleted_objs.append(deleted_id)
if len(deleted_objs) > MAX_DELETED_SIM_RECORDS:
obj_to_cleanup = deleted_objs.pop(0)
for archive_entries in sims4.gsi.archive.archive_data.values():
if isinstance(archive_entries, dict):
if obj_to_cleanup in archive_entries:
del archive_entries[obj_to_cleanup]
def print_num_archive_records():
logger.warn('---------- Start GSI Archive Dump ----------')
for (archive_type, archive_entries) in sims4.gsi.archive.archive_data.items():
if isinstance(archive_entries, list):
logger.warn('Type: {}, Entries: {}', archive_type, len(archive_entries))
elif isinstance(archive_entries, dict):
logger.warn('Type: {}', archive_type)
for (sim_id, sim_data_entries) in archive_entries.items():
logger.warn(' Sim Id: {}, Num Entries: {}', sim_id, len(sim_data_entries))
else:
logger.error('I have no idea what this entry is....')
else:
logger.error('I have no idea what this entry is....')
logger.warn('---------- End GSI Archive Dump ----------')
class GameplayArchiver(sims4.gsi.archive.Archiver):
def archive(self, *args, object_id=None, **kwargs):
if self._sim_specific:
cur_sim = services.object_manager().get(object_id)
if cur_sim is not None and not cur_sim.is_selectable:
cur_sim_ref = weakref.ref(cur_sim, logged_gsi_object_deleted)
tracked_objects_dict[cur_sim_ref] = object_id
if cur_sim_ref not in tracked_objects_dict and object_id in deleted_objs:
deleted_objs.remove(object_id)
time_service = services.time_service()
if time_service.sim_timeline is not None:
game_time = str(time_service.sim_now)
else:
game_time = str(services.game_clock_service().now())
super().archive(*args, object_id=object_id, game_time=game_time, **kwargs) | 0.223631 | 0.136091 |
#
# # 读入数据
# In[1]:
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
# In[2]:
path = 'ex2data1.txt'
exam1 = 'exam1'
exam2 = 'exam2'
admitted = 'admitted'
data = pd.read_csv(path, header=None, names=[exam1, exam2, admitted])
# data.head()
# # 可视化
# In[3]:
positive = data[data[admitted].isin([1])]
negative = data[data[admitted].isin([0])]
# fig, ax = plt.subplots(figsize=(12,8))
# ax.scatter(positive[exam1], positive[exam2], s=50, c='b', marker='o', label='Admitted')
# ax.scatter(negative[exam1], negative[exam2], s=50, c='r', marker='x', label='Not Admitted')
# ax.legend()
# ax.set_xlabel('Exam 1 Score')
# ax.set_ylabel('Exam 2 Score')
# # sigmod函数
# In[4]:
def sigmod(z):
return 1. / (1. + np.exp(-z))
# In[6]:
# nums = np.arange(-20, 20)
# fig, ax = plt.subplots(figsize=(12, 8))
# ax.plot(nums, sigmod(nums), 'r')
# # 损失函数
# In[42]:
def cost_func(X, y, theta, m, EPS=0):
h = sigmod(X * theta)
#print(h)
# first = np.log(h)
# second = np.log(1 - h)
return -np.sum(np.multiply(y, np.log(h + EPS)) + np.multiply(1 - y, np.log(1 - h + EPS))) / m
# # 初始化输入输出
# In[32]:
rows = data.shape[0]
cols = data.shape[1]
# rows, cols
# In[12]:
X = np.mat(np.ones((rows, cols)))
X[:, 1:] = data.iloc[:, 0:cols-1].values
# X[:5,:]
# In[20]:
y = np.mat(data.iloc[:,cols-1].values).T
# y[:5,:]
# In[59]:
theta = np.mat([0., 0., 0.], dtype='float64').T
# theta
# In[29]:
# X.shape, theta.shape, y.shape
# In[61]:
# cost_func(X, y, theta, rows)
# # 梯度下降
# In[62]:
#O(iters * n * m * n * n)
def batch_gradient_decent(X, y, theta, m, alpha=0.01, num_of_iters=1000):
#获取参数数量
num_of_parameters = theta.shape[0]
#保存损失函数值
cost_list = []
#用于保存theta的临时向量
theta_tmp = theta.copy()
for i in range(num_of_iters):
bias = sigmod(X * theta) - y
for j in range(num_of_parameters):
theta_tmp[j, 0] = theta[j, 0] - (alpha / m) * np.sum(np.multiply(bias, X[:, j]))
theta = theta_tmp
cost_list.append(cost_func(X, y, theta, rows))
return theta, cost_list
# In[64]:
theta, cost_values = batch_gradient_decent(X, y, theta, rows, 0.0007, 2000)
print(cost_values[-1])
# len(cost_values)
# In[ ]:
# In[ ]: | StudyNotesOfML/2. Logistic regression/Logistic regression.py |
#
# # 读入数据
# In[1]:
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
# In[2]:
path = 'ex2data1.txt'
exam1 = 'exam1'
exam2 = 'exam2'
admitted = 'admitted'
data = pd.read_csv(path, header=None, names=[exam1, exam2, admitted])
# data.head()
# # 可视化
# In[3]:
positive = data[data[admitted].isin([1])]
negative = data[data[admitted].isin([0])]
# fig, ax = plt.subplots(figsize=(12,8))
# ax.scatter(positive[exam1], positive[exam2], s=50, c='b', marker='o', label='Admitted')
# ax.scatter(negative[exam1], negative[exam2], s=50, c='r', marker='x', label='Not Admitted')
# ax.legend()
# ax.set_xlabel('Exam 1 Score')
# ax.set_ylabel('Exam 2 Score')
# # sigmod函数
# In[4]:
def sigmod(z):
return 1. / (1. + np.exp(-z))
# In[6]:
# nums = np.arange(-20, 20)
# fig, ax = plt.subplots(figsize=(12, 8))
# ax.plot(nums, sigmod(nums), 'r')
# # 损失函数
# In[42]:
def cost_func(X, y, theta, m, EPS=0):
h = sigmod(X * theta)
#print(h)
# first = np.log(h)
# second = np.log(1 - h)
return -np.sum(np.multiply(y, np.log(h + EPS)) + np.multiply(1 - y, np.log(1 - h + EPS))) / m
# # 初始化输入输出
# In[32]:
rows = data.shape[0]
cols = data.shape[1]
# rows, cols
# In[12]:
X = np.mat(np.ones((rows, cols)))
X[:, 1:] = data.iloc[:, 0:cols-1].values
# X[:5,:]
# In[20]:
y = np.mat(data.iloc[:,cols-1].values).T
# y[:5,:]
# In[59]:
theta = np.mat([0., 0., 0.], dtype='float64').T
# theta
# In[29]:
# X.shape, theta.shape, y.shape
# In[61]:
# cost_func(X, y, theta, rows)
# # 梯度下降
# In[62]:
#O(iters * n * m * n * n)
def batch_gradient_decent(X, y, theta, m, alpha=0.01, num_of_iters=1000):
#获取参数数量
num_of_parameters = theta.shape[0]
#保存损失函数值
cost_list = []
#用于保存theta的临时向量
theta_tmp = theta.copy()
for i in range(num_of_iters):
bias = sigmod(X * theta) - y
for j in range(num_of_parameters):
theta_tmp[j, 0] = theta[j, 0] - (alpha / m) * np.sum(np.multiply(bias, X[:, j]))
theta = theta_tmp
cost_list.append(cost_func(X, y, theta, rows))
return theta, cost_list
# In[64]:
theta, cost_values = batch_gradient_decent(X, y, theta, rows, 0.0007, 2000)
print(cost_values[-1])
# len(cost_values)
# In[ ]:
# In[ ]: | 0.227985 | 0.616186 |
import requests
from urllib.parse import quote
class ActionNetworkApi:
"""Python wrapper for Action Network API."""
def __init__(self, api_key, **kwargs):
"""Instantiate the API client and get config."""
self.headers = {"OSDI-API-Token": api_key}
self.refresh_config()
self.base_url = self.config.get('links', {}).get('self', 'https://actionnetwork.org/api/v2/')
print(self.config['motd'])
def refresh_config(self):
"""Get a new version of the base_url config."""
self.config = requests.get(url="https://actionnetwork.org/api/v2/",
headers=self.headers).json()
def resource_to_url(self, resource):
"""Convert a named endpoint into a URL.
Args:
resource (str):
resource name (e.g. 'links', 'people', etc.)
Returns:
(str) Full resource endpoint URL.
"""
if resource in self.config.get('_links', {}).keys():
return self.config['_links'][resource]['href']
try:
return self.config['_links']["osdi:{0}".format(resource)]['href']
except KeyError:
raise KeyError("Unknown Resource %s", resource)
def get_resource(self, resource):
"""Get a resource endpoint by name.
Args:
resource (str):
Resource endpoint of the format 'people', 'events', 'lists', etc.
Returns:
(dict) API response from endpoint or `None` if not found/valid.
"""
url = self.resource_to_url(resource)
return requests.get(url, headers=self.headers).json()
def get_person(self, person_id=None, search_by='email', search_string=None):
"""Search for a user.
Args:
search_by (str):
Field by which to search for a user. 'email' is the default.
search_string (str):
String to search for within the field given by `search_by`
Returns:
(dict) person json if found, otherwise `None`
"""
if person_id:
url = "{0}people/{1}".format(self.base_url, person_id)
else:
url = "{0}people/?filter={1} eq '{2}'".format(
self.base_url,
search_by,
quote(search_string))
resp = requests.get(url, headers=self.headers)
return resp.json()
def create_person(self,
email=None,
given_name='',
family_name='',
address=list(),
city='',
state='',
country='',
postal_code='',
tags=list(),
custom_fields=dict()):
"""Create a user.
Documentation here: https://actionnetwork.org/docs/v2/person_signup_helper
Args:
email ((str, list)):
email address (or, if list, addresses) of the person
given_name (str, optional):
first name of the person
family_name (str, optional):
last name of the person
address ((str, list), optional):
address of the person. if a str, then one address line
only. if a list, then address_lines in action network
will be respected (for apartments or companies etc.)
city (str, optional):
city of the person.
country (str, optional):
country code for the person.
postal_code (str, optional):
postal or zip code of the person.
tags ((str, list), optional):
add any tags you want when creating a person.
custom_fields (dict, optional):
dict of custom fields to pass to the api
Returns:
(dict) A fully fleshed out dictionary representing a person,
containing the above attributes and additional attributes
set by Action Network.
"""
url = "{0}people/".format(self.base_url)
payload = {
'person': {
'family_name': family_name,
'given_name': given_name,
'postal_addresses': [{
'address_lines': list(address),
'locality': city,
'region': state,
'country': country,
'postal_code': postal_code
}],
'email_addresses': [{
'address': email
}],
'custom_fields': custom_fields,
},
'add_tags': list(tags)
}
resp = requests.post(url, json=payload, headers=self.headers)
return resp.json()
def update_person(self,
person_id=None,
email=None,
given_name=None,
family_name=None,
address=list(),
city=None,
state=None,
country=None,
postal_code=None,
tags=list(),
custom_fields=dict()):
"""Update a user.
Args:
email ((str, list)):
email address (or, if list, addresses) of the person
given_name (str, optional):
first name of the person
family_name (str, optional):
last name of the person
address ((str, list), optional):
address of the person. if a str, then one address line
only. if a list, then address_lines in action network
will be respected (for apartments or companies etc.)
city (str, optional):
city of the person.
country (str, optional):
country code for the person.
postal_code (str, optional):
postal or zip code of the person.
tags ((str, list), optional):
add any tags you want when creating a person.
custom_fields (dict, optional):
dict of custom fields to pass to the api
Returns:
(dict) A fully fleshed out dictionary representing a person, containing the above
attributes and additional attributes set by Action Network.
"""
url = "{0}people/{1}".format(self.base_url, person_id)
payload = {
'family_name': family_name,
'given_name': given_name,
'postal_addresses': [{
'address_lines': list(address),
'locality': city,
'region': state,
'country': country,
'postal_code': postal_code
}],
'email_addresses': [{
'address': email
}],
'add_tags': list(tags),
'custom_fields': custom_fields,
}
resp = requests.put(url, json=payload, headers=self.headers)
return resp.json()
def search(self, resource, operator, term):
"""Search for a given `term` within a `resource`.
Args:
resource (str):
Resource family within which to search. Should be one of
'people', 'events', etc.
operator (str):
Operator by which to search. Should be something like
'eq', 'gt', 'lt', etc.
term (str):
Term for which to search. Can be an email, name, etc.
Returns:
(dict) Object if found, otherwise `None`.
"""
pass | pyactionnetwork/api.py |
import requests
from urllib.parse import quote
class ActionNetworkApi:
"""Python wrapper for Action Network API."""
def __init__(self, api_key, **kwargs):
"""Instantiate the API client and get config."""
self.headers = {"OSDI-API-Token": api_key}
self.refresh_config()
self.base_url = self.config.get('links', {}).get('self', 'https://actionnetwork.org/api/v2/')
print(self.config['motd'])
def refresh_config(self):
"""Get a new version of the base_url config."""
self.config = requests.get(url="https://actionnetwork.org/api/v2/",
headers=self.headers).json()
def resource_to_url(self, resource):
"""Convert a named endpoint into a URL.
Args:
resource (str):
resource name (e.g. 'links', 'people', etc.)
Returns:
(str) Full resource endpoint URL.
"""
if resource in self.config.get('_links', {}).keys():
return self.config['_links'][resource]['href']
try:
return self.config['_links']["osdi:{0}".format(resource)]['href']
except KeyError:
raise KeyError("Unknown Resource %s", resource)
def get_resource(self, resource):
"""Get a resource endpoint by name.
Args:
resource (str):
Resource endpoint of the format 'people', 'events', 'lists', etc.
Returns:
(dict) API response from endpoint or `None` if not found/valid.
"""
url = self.resource_to_url(resource)
return requests.get(url, headers=self.headers).json()
def get_person(self, person_id=None, search_by='email', search_string=None):
"""Search for a user.
Args:
search_by (str):
Field by which to search for a user. 'email' is the default.
search_string (str):
String to search for within the field given by `search_by`
Returns:
(dict) person json if found, otherwise `None`
"""
if person_id:
url = "{0}people/{1}".format(self.base_url, person_id)
else:
url = "{0}people/?filter={1} eq '{2}'".format(
self.base_url,
search_by,
quote(search_string))
resp = requests.get(url, headers=self.headers)
return resp.json()
def create_person(self,
email=None,
given_name='',
family_name='',
address=list(),
city='',
state='',
country='',
postal_code='',
tags=list(),
custom_fields=dict()):
"""Create a user.
Documentation here: https://actionnetwork.org/docs/v2/person_signup_helper
Args:
email ((str, list)):
email address (or, if list, addresses) of the person
given_name (str, optional):
first name of the person
family_name (str, optional):
last name of the person
address ((str, list), optional):
address of the person. if a str, then one address line
only. if a list, then address_lines in action network
will be respected (for apartments or companies etc.)
city (str, optional):
city of the person.
country (str, optional):
country code for the person.
postal_code (str, optional):
postal or zip code of the person.
tags ((str, list), optional):
add any tags you want when creating a person.
custom_fields (dict, optional):
dict of custom fields to pass to the api
Returns:
(dict) A fully fleshed out dictionary representing a person,
containing the above attributes and additional attributes
set by Action Network.
"""
url = "{0}people/".format(self.base_url)
payload = {
'person': {
'family_name': family_name,
'given_name': given_name,
'postal_addresses': [{
'address_lines': list(address),
'locality': city,
'region': state,
'country': country,
'postal_code': postal_code
}],
'email_addresses': [{
'address': email
}],
'custom_fields': custom_fields,
},
'add_tags': list(tags)
}
resp = requests.post(url, json=payload, headers=self.headers)
return resp.json()
def update_person(self,
person_id=None,
email=None,
given_name=None,
family_name=None,
address=list(),
city=None,
state=None,
country=None,
postal_code=None,
tags=list(),
custom_fields=dict()):
"""Update a user.
Args:
email ((str, list)):
email address (or, if list, addresses) of the person
given_name (str, optional):
first name of the person
family_name (str, optional):
last name of the person
address ((str, list), optional):
address of the person. if a str, then one address line
only. if a list, then address_lines in action network
will be respected (for apartments or companies etc.)
city (str, optional):
city of the person.
country (str, optional):
country code for the person.
postal_code (str, optional):
postal or zip code of the person.
tags ((str, list), optional):
add any tags you want when creating a person.
custom_fields (dict, optional):
dict of custom fields to pass to the api
Returns:
(dict) A fully fleshed out dictionary representing a person, containing the above
attributes and additional attributes set by Action Network.
"""
url = "{0}people/{1}".format(self.base_url, person_id)
payload = {
'family_name': family_name,
'given_name': given_name,
'postal_addresses': [{
'address_lines': list(address),
'locality': city,
'region': state,
'country': country,
'postal_code': postal_code
}],
'email_addresses': [{
'address': email
}],
'add_tags': list(tags),
'custom_fields': custom_fields,
}
resp = requests.put(url, json=payload, headers=self.headers)
return resp.json()
def search(self, resource, operator, term):
"""Search for a given `term` within a `resource`.
Args:
resource (str):
Resource family within which to search. Should be one of
'people', 'events', etc.
operator (str):
Operator by which to search. Should be something like
'eq', 'gt', 'lt', etc.
term (str):
Term for which to search. Can be an email, name, etc.
Returns:
(dict) Object if found, otherwise `None`.
"""
pass | 0.838779 | 0.134435 |
"""Test the OCP on Cloud Report serializers."""
from unittest import TestCase
from unittest.mock import Mock
from rest_framework import serializers
from api.report.all.openshift.serializers import OCPAllQueryParamSerializer
class OCPAllQueryParamSerializerTest(TestCase):
"""Tests for the handling query parameter parsing serializer."""
def test_parse_query_params_success(self):
"""Test parse of a query params successfully."""
query_params = {
"group_by": {"project": ["account1"]},
"order_by": {"project": "asc"},
"filter": {
"resolution": "daily",
"time_scope_value": "-10",
"time_scope_units": "day",
"resource_scope": [],
},
"units": "byte",
}
serializer = OCPAllQueryParamSerializer(data=query_params)
self.assertTrue(serializer.is_valid())
def test_query_params_invalid_delta(self):
"""Test parse of delta charge query params for invalid fields."""
# Charge can't order by request or usage
query_params = {
"group_by": {"account": ["account1"]},
"order_by": {"usage": "asc"},
"filter": {
"resolution": "daily",
"time_scope_value": "-10",
"time_scope_units": "day",
"resource_scope": [],
},
"delta": "cost",
}
serializer = OCPAllQueryParamSerializer(data=query_params)
with self.assertRaises(serializers.ValidationError):
serializer.is_valid(raise_exception=True)
def test_query_params_valid_delta(self):
"""Test parse of delta charge query params for valid fields."""
# Charge can't order by request or usage
query_params = {
"group_by": {"account": ["account1"]},
"order_by": {"usage": "asc"},
"filter": {
"resolution": "daily",
"time_scope_value": "-10",
"time_scope_units": "day",
"resource_scope": [],
},
"delta": "usage",
}
serializer = OCPAllQueryParamSerializer(data=query_params)
serializer.is_valid(raise_exception=True)
def test_query_params_valid_cost_delta(self):
"""Test parse of delta charge query params for valid fields."""
query_params = {
"group_by": {"account": ["account1"]},
"order_by": {"usage": "asc"},
"filter": {
"resolution": "daily",
"time_scope_value": "-10",
"time_scope_units": "day",
"resource_scope": [],
},
"delta": "cost",
}
req = Mock(path="/api/cost-management/v1/reports/openshift/infrastructures/all/costs/")
serializer = OCPAllQueryParamSerializer(data=query_params, context={"request": req})
serializer.is_valid(raise_exception=True)
query_params["delta"] = "cost_total"
req = Mock(path="/api/cost-management/v1/reports/openshift/infrastructures/all/costs/")
serializer = OCPAllQueryParamSerializer(data=query_params, context={"request": req})
serializer.is_valid(raise_exception=True) | koku/api/report/test/all/openshift/tests_serializers.py | """Test the OCP on Cloud Report serializers."""
from unittest import TestCase
from unittest.mock import Mock
from rest_framework import serializers
from api.report.all.openshift.serializers import OCPAllQueryParamSerializer
class OCPAllQueryParamSerializerTest(TestCase):
"""Tests for the handling query parameter parsing serializer."""
def test_parse_query_params_success(self):
"""Test parse of a query params successfully."""
query_params = {
"group_by": {"project": ["account1"]},
"order_by": {"project": "asc"},
"filter": {
"resolution": "daily",
"time_scope_value": "-10",
"time_scope_units": "day",
"resource_scope": [],
},
"units": "byte",
}
serializer = OCPAllQueryParamSerializer(data=query_params)
self.assertTrue(serializer.is_valid())
def test_query_params_invalid_delta(self):
"""Test parse of delta charge query params for invalid fields."""
# Charge can't order by request or usage
query_params = {
"group_by": {"account": ["account1"]},
"order_by": {"usage": "asc"},
"filter": {
"resolution": "daily",
"time_scope_value": "-10",
"time_scope_units": "day",
"resource_scope": [],
},
"delta": "cost",
}
serializer = OCPAllQueryParamSerializer(data=query_params)
with self.assertRaises(serializers.ValidationError):
serializer.is_valid(raise_exception=True)
def test_query_params_valid_delta(self):
"""Test parse of delta charge query params for valid fields."""
# Charge can't order by request or usage
query_params = {
"group_by": {"account": ["account1"]},
"order_by": {"usage": "asc"},
"filter": {
"resolution": "daily",
"time_scope_value": "-10",
"time_scope_units": "day",
"resource_scope": [],
},
"delta": "usage",
}
serializer = OCPAllQueryParamSerializer(data=query_params)
serializer.is_valid(raise_exception=True)
def test_query_params_valid_cost_delta(self):
"""Test parse of delta charge query params for valid fields."""
query_params = {
"group_by": {"account": ["account1"]},
"order_by": {"usage": "asc"},
"filter": {
"resolution": "daily",
"time_scope_value": "-10",
"time_scope_units": "day",
"resource_scope": [],
},
"delta": "cost",
}
req = Mock(path="/api/cost-management/v1/reports/openshift/infrastructures/all/costs/")
serializer = OCPAllQueryParamSerializer(data=query_params, context={"request": req})
serializer.is_valid(raise_exception=True)
query_params["delta"] = "cost_total"
req = Mock(path="/api/cost-management/v1/reports/openshift/infrastructures/all/costs/")
serializer = OCPAllQueryParamSerializer(data=query_params, context={"request": req})
serializer.is_valid(raise_exception=True) | 0.895571 | 0.489015 |
import base64
import binascii
import io
import tempfile
import flask
import google.cloud.storage as gcloud_storage
import google.cloud.exceptions as gcloud_exceptions
from werkzeug.contrib.cache import FileSystemCache
from .. import config, model, util
from .blueprint import coordinator_api
# Cache the worker blob to avoid repeated requests to object storage
cache_dir = tempfile.TemporaryDirectory()
cache = FileSystemCache(cache_dir.name, default_timeout=60*5)
@coordinator_api.route("/download/worker", methods=["GET"])
def download_source_blob():
"""Retrieve the worker blob from object storage."""
cached_blob = cache.get(config.WORKER_ARTIFACT_KEY)
if cached_blob is None:
print("Getting from GCloud", config.WORKER_ARTIFACT_KEY)
# Retrieve from GCloud
try:
gcloud_blob = gcloud_storage.Blob(
config.WORKER_ARTIFACT_KEY,
model.get_deployed_artifacts_bucket(),
chunk_size=262144)
cached_blob = gcloud_blob.download_as_string()
cache.set(config.WORKER_ARTIFACT_KEY, cached_blob)
except gcloud_exceptions.NotFound:
raise util.APIError(404, message="Worker blob not found.")
if cached_blob is None:
raise util.APIError(404, message="Worker blob not found.")
print("Building buffer")
buffer = io.BytesIO()
buffer.write(cached_blob)
buffer.seek(0)
return flask.send_file(buffer, mimetype="application/gzip",
as_attachment=True,
attachment_filename="Halite.tgz")
@coordinator_api.route("/botFile", methods=["POST"])
def upload_bot():
"""Save a compiled bot to object storage."""
user_id = flask.request.form.get("user_id", None)
bot_id = flask.request.form.get("bot_id", None)
if "bot.zip" not in flask.request.files:
raise util.APIError(400, message="Please provide the bot file.")
uploaded_file = flask.request.files["bot.zip"]
# Save to GCloud
blob = gcloud_storage.Blob("{}_{}".format(user_id, bot_id),
model.get_bot_bucket(),
chunk_size=262144)
blob.upload_from_file(uploaded_file)
return util.response_success()
@coordinator_api.route("/botFile", methods=["GET"])
def download_bot():
"""Retrieve a compiled or uncompiled bot from object storage."""
user_id = flask.request.values.get("user_id", None)
bot_id = flask.request.values.get("bot_id", None)
compile = flask.request.values.get("compile", False)
if compile:
bucket = model.get_compilation_bucket()
else:
bucket = model.get_bot_bucket()
# Retrieve from GCloud
try:
botname = "{}_{}".format(user_id, bot_id)
blob = gcloud_storage.Blob(botname,
bucket, chunk_size=262144)
buffer = io.BytesIO()
blob.download_to_file(buffer)
buffer.seek(0)
return flask.send_file(buffer, mimetype="application/zip",
as_attachment=True,
attachment_filename=botname + ".zip")
except gcloud_exceptions.NotFound:
raise util.APIError(404, message="Bot not found.")
@coordinator_api.route("/botHash")
def hash_bot():
"""Get the MD5 hash of a compiled bot."""
user_id = flask.request.args.get("user_id", None)
bot_id = flask.request.args.get("bot_id", None)
compile = flask.request.args.get("compile", False)
if not user_id or not bot_id:
raise util.APIError(400, message="Please provide user and bot ID.")
if compile:
bucket = model.get_compilation_bucket()
else:
bucket = model.get_bot_bucket()
blob = bucket.get_blob("{}_{}".format(user_id, bot_id))
if blob is None:
raise util.APIError(400, message="Bot does not exist.")
return util.response_success({
"hash": binascii.hexlify(base64.b64decode(blob.md5_hash)).decode('utf-8'),
}) | apiserver/apiserver/coordinator/storage.py | import base64
import binascii
import io
import tempfile
import flask
import google.cloud.storage as gcloud_storage
import google.cloud.exceptions as gcloud_exceptions
from werkzeug.contrib.cache import FileSystemCache
from .. import config, model, util
from .blueprint import coordinator_api
# Cache the worker blob to avoid repeated requests to object storage
cache_dir = tempfile.TemporaryDirectory()
cache = FileSystemCache(cache_dir.name, default_timeout=60*5)
@coordinator_api.route("/download/worker", methods=["GET"])
def download_source_blob():
"""Retrieve the worker blob from object storage."""
cached_blob = cache.get(config.WORKER_ARTIFACT_KEY)
if cached_blob is None:
print("Getting from GCloud", config.WORKER_ARTIFACT_KEY)
# Retrieve from GCloud
try:
gcloud_blob = gcloud_storage.Blob(
config.WORKER_ARTIFACT_KEY,
model.get_deployed_artifacts_bucket(),
chunk_size=262144)
cached_blob = gcloud_blob.download_as_string()
cache.set(config.WORKER_ARTIFACT_KEY, cached_blob)
except gcloud_exceptions.NotFound:
raise util.APIError(404, message="Worker blob not found.")
if cached_blob is None:
raise util.APIError(404, message="Worker blob not found.")
print("Building buffer")
buffer = io.BytesIO()
buffer.write(cached_blob)
buffer.seek(0)
return flask.send_file(buffer, mimetype="application/gzip",
as_attachment=True,
attachment_filename="Halite.tgz")
@coordinator_api.route("/botFile", methods=["POST"])
def upload_bot():
"""Save a compiled bot to object storage."""
user_id = flask.request.form.get("user_id", None)
bot_id = flask.request.form.get("bot_id", None)
if "bot.zip" not in flask.request.files:
raise util.APIError(400, message="Please provide the bot file.")
uploaded_file = flask.request.files["bot.zip"]
# Save to GCloud
blob = gcloud_storage.Blob("{}_{}".format(user_id, bot_id),
model.get_bot_bucket(),
chunk_size=262144)
blob.upload_from_file(uploaded_file)
return util.response_success()
@coordinator_api.route("/botFile", methods=["GET"])
def download_bot():
"""Retrieve a compiled or uncompiled bot from object storage."""
user_id = flask.request.values.get("user_id", None)
bot_id = flask.request.values.get("bot_id", None)
compile = flask.request.values.get("compile", False)
if compile:
bucket = model.get_compilation_bucket()
else:
bucket = model.get_bot_bucket()
# Retrieve from GCloud
try:
botname = "{}_{}".format(user_id, bot_id)
blob = gcloud_storage.Blob(botname,
bucket, chunk_size=262144)
buffer = io.BytesIO()
blob.download_to_file(buffer)
buffer.seek(0)
return flask.send_file(buffer, mimetype="application/zip",
as_attachment=True,
attachment_filename=botname + ".zip")
except gcloud_exceptions.NotFound:
raise util.APIError(404, message="Bot not found.")
@coordinator_api.route("/botHash")
def hash_bot():
"""Get the MD5 hash of a compiled bot."""
user_id = flask.request.args.get("user_id", None)
bot_id = flask.request.args.get("bot_id", None)
compile = flask.request.args.get("compile", False)
if not user_id or not bot_id:
raise util.APIError(400, message="Please provide user and bot ID.")
if compile:
bucket = model.get_compilation_bucket()
else:
bucket = model.get_bot_bucket()
blob = bucket.get_blob("{}_{}".format(user_id, bot_id))
if blob is None:
raise util.APIError(400, message="Bot does not exist.")
return util.response_success({
"hash": binascii.hexlify(base64.b64decode(blob.md5_hash)).decode('utf-8'),
}) | 0.499023 | 0.110327 |
import numpy as np
import cv2
from .colors import get_color
class BoundBox:
def __init__(self, xmin, ymin, xmax, ymax, c=None, classes=None):
self.xmin = xmin
self.ymin = ymin
self.xmax = xmax
self.ymax = ymax
self.c = c
self.classes = classes
self.label = -1
self.score = -1
def get_label(self):
if self.label == -1:
self.label = np.argmax(self.classes)
return self.label
def get_score(self):
if self.score == -1:
self.score = self.classes[self.get_label()]
return self.score
def draw_boxes(image, boxes, overlay_text, labels, obj_thresh, quiet=True):
for box, overlay in zip(boxes, overlay_text):
label_str = ""
label = -1
for i in range(len(labels)):
if box.classes[i] > obj_thresh:
if label_str != "":
label_str += ", "
label_str += labels[i] + " " + str(round(box.get_score() * 100, 2)) + "%"
label = i
if not quiet:
print(label_str)
if label >= 0:
if len(overlay) > 0:
text = label_str + ": [" + " ".join(overlay) + "]"
else:
text = label_str
text = text.upper()
text_size = cv2.getTextSize(text, cv2.FONT_HERSHEY_SIMPLEX, 1.1e-3 * image.shape[0], 5)
width, height = text_size[0][0], text_size[0][1]
region = np.array(
[
[box.xmin - 3, box.ymin],
[box.xmin - 3, box.ymin - height - 26],
[box.xmin + width + 13, box.ymin - height - 26],
[box.xmin + width + 13, box.ymin],
],
dtype="int32",
)
# cv2.rectangle(img=image, pt1=(box.xmin,box.ymin), pt2=(box.xmax,box.ymax), color=get_color(label), thickness=5)
rec = (box.xmin, box.ymin, box.xmax - box.xmin, box.ymax - box.ymin)
rec = tuple(int(i) for i in rec)
cv2.rectangle(img=image, rec=rec, color=get_color(label), thickness=3)
cv2.fillPoly(img=image, pts=[region], color=get_color(label))
cv2.putText(
img=image,
text=text,
org=(box.xmin + 13, box.ymin - 13),
fontFace=cv2.FONT_HERSHEY_SIMPLEX,
fontScale=1e-3 * image.shape[0],
color=(0, 0, 0),
thickness=1,
)
return image | utils/bbox.py |
import numpy as np
import cv2
from .colors import get_color
class BoundBox:
def __init__(self, xmin, ymin, xmax, ymax, c=None, classes=None):
self.xmin = xmin
self.ymin = ymin
self.xmax = xmax
self.ymax = ymax
self.c = c
self.classes = classes
self.label = -1
self.score = -1
def get_label(self):
if self.label == -1:
self.label = np.argmax(self.classes)
return self.label
def get_score(self):
if self.score == -1:
self.score = self.classes[self.get_label()]
return self.score
def draw_boxes(image, boxes, overlay_text, labels, obj_thresh, quiet=True):
for box, overlay in zip(boxes, overlay_text):
label_str = ""
label = -1
for i in range(len(labels)):
if box.classes[i] > obj_thresh:
if label_str != "":
label_str += ", "
label_str += labels[i] + " " + str(round(box.get_score() * 100, 2)) + "%"
label = i
if not quiet:
print(label_str)
if label >= 0:
if len(overlay) > 0:
text = label_str + ": [" + " ".join(overlay) + "]"
else:
text = label_str
text = text.upper()
text_size = cv2.getTextSize(text, cv2.FONT_HERSHEY_SIMPLEX, 1.1e-3 * image.shape[0], 5)
width, height = text_size[0][0], text_size[0][1]
region = np.array(
[
[box.xmin - 3, box.ymin],
[box.xmin - 3, box.ymin - height - 26],
[box.xmin + width + 13, box.ymin - height - 26],
[box.xmin + width + 13, box.ymin],
],
dtype="int32",
)
# cv2.rectangle(img=image, pt1=(box.xmin,box.ymin), pt2=(box.xmax,box.ymax), color=get_color(label), thickness=5)
rec = (box.xmin, box.ymin, box.xmax - box.xmin, box.ymax - box.ymin)
rec = tuple(int(i) for i in rec)
cv2.rectangle(img=image, rec=rec, color=get_color(label), thickness=3)
cv2.fillPoly(img=image, pts=[region], color=get_color(label))
cv2.putText(
img=image,
text=text,
org=(box.xmin + 13, box.ymin - 13),
fontFace=cv2.FONT_HERSHEY_SIMPLEX,
fontScale=1e-3 * image.shape[0],
color=(0, 0, 0),
thickness=1,
)
return image | 0.445288 | 0.27133 |
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
from ast import literal_eval
import compas
import compas_rhino
from compas.utilities import flatten
from compas.utilities import geometric_key
from compas_rhino.geometry import RhinoPoint
from compas_rhino.geometry import RhinoCurve
from compas_rhino.modifiers import VertexModifier
from compas_rhino.modifiers import EdgeModifier
from compas_rhino.modifiers import FaceModifier
from compas_rhino.selectors import VertexSelector
from compas_rhino.selectors import EdgeSelector
from compas_rhino.selectors import FaceSelector
try:
import Rhino
from Rhino.Geometry import Point3d
import rhinoscriptsyntax as rs
except ImportError:
compas.raise_if_ironpython()
__all__ = ['DiagramHelper']
def match_edges(diagram, keys):
temp = compas_rhino.get_objects(name="{}.edge.*".format(diagram.name))
names = compas_rhino.get_object_names(temp)
guids = []
for guid, name in zip(temp, names):
parts = name.split('.')[2].split('-')
u = literal_eval(parts[0])
v = literal_eval(parts[1])
if (u, v) in keys or (v, u) in keys:
guids.append(guid)
return guids
def match_vertices(diagram, keys):
temp = compas_rhino.get_objects(name="{}.vertex.*".format(diagram.name))
names = compas_rhino.get_object_names(temp)
guids = []
for guid, name in zip(temp, names):
parts = name.split('.')
key = literal_eval(parts[2])
if key in keys:
guids.append(guid)
return guids
class DiagramHelper(VertexSelector,
EdgeSelector,
FaceSelector,
VertexModifier,
EdgeModifier,
FaceModifier):
@staticmethod
def highlight_edges(diagram, keys):
guids = match_edges(diagram, keys)
rs.EnableRedraw(False)
rs.SelectObjects(guids)
rs.EnableRedraw(True)
@staticmethod
def unhighlight_edges(diagram, keys):
guids = match_edges(diagram, keys)
rs.EnableRedraw(False)
rs.UnselectObjects(guids)
rs.EnableRedraw(True)
@staticmethod
def highlight_vertices(diagram, keys):
guids = match_vertices(diagram, keys)
rs.EnableRedraw(False)
rs.SelectObjects(guids)
rs.EnableRedraw(True)
@staticmethod
def unhighlight_vertices(diagram, keys):
guids = match_vertices(diagram, keys)
rs.EnableRedraw(False)
rs.UnselectObjects(guids)
rs.EnableRedraw(True)
@staticmethod
def select_vertices_where(diagram, keys):
rs.UnselectAllObjects()
DiagramHelper.highlight_vertices(diagram, keys)
@staticmethod
def select_vertices_on_boundary(diagram):
rs.UnselectAllObjects()
key = DiagramHelper.select_vertex(diagram)
if key is None:
return
boundaries = diagram.vertices_on_boundaries()
for boundary in boundaries:
if key in boundary:
DiagramHelper.highlight_vertices(diagram, boundary)
return boundary
@staticmethod
def select_vertices_on_curve(diagram):
rs.UnselectAllObjects()
guid = compas_rhino.select_curve()
keys = DiagramHelper.identify_vertices_on_curve(diagram, guid)
DiagramHelper.highlight_vertices(diagram, keys)
return keys
@staticmethod
def select_vertices_on_curves(diagram):
rs.UnselectAllObjects()
guids = compas_rhino.select_curves()
keys = DiagramHelper.identify_vertices_on_curves(diagram, guids)
DiagramHelper.highlight_vertices(diagram, keys)
return keys
@staticmethod
def select_edges_on_curves(diagram):
rs.UnselectAllObjects()
guids = compas_rhino.select_curves()
keys = DiagramHelper.identify_edges_on_curves(diagram, guids)
DiagramHelper.highlight_edges(diagram, keys)
return keys
@staticmethod
def select_continuous_edges(diagram):
rs.UnselectAllObjects()
keys = DiagramHelper.select_edges(diagram)
if not keys:
return
keys = [diagram.get_continuous_edges(key) for key in keys]
keys = list(set(list(flatten(keys))))
DiagramHelper.highlight_edges(diagram, keys)
return keys
@staticmethod
def select_parallel_edges(diagram):
rs.UnselectAllObjects()
keys = DiagramHelper.select_edges(diagram)
if not keys:
return
keys = [diagram.get_parallel_edges(key) for key in keys]
keys = list(set(list(flatten(keys))))
DiagramHelper.highlight_edges(diagram, keys)
return keys
@staticmethod
def identify_vertices_on_points(diagram, guids):
gkey_key = diagram.gkey_key()
keys = []
for guid in guids:
point = RhinoPoint.from_guid(guid)
gkey = geometric_key(point.xyz)
if gkey in gkey_key:
key = gkey_key[gkey]
keys.append(key)
return keys
@staticmethod
def identify_vertices_on_curve(diagram, guid):
gkey_key = diagram.gkey_key()
keys = []
curve = RhinoCurve.from_guid(guid)
for key in diagram.vertices():
xyz = diagram.vertex_coordinates(key)
closest = curve.closest_point(xyz)
gkey = geometric_key(closest)
if gkey in gkey_key:
if key == gkey_key[gkey]:
keys.append(key)
return keys
@staticmethod
def identify_vertices_on_curves(diagram, guids):
gkey_key = diagram.gkey_key()
keys = []
for guid in guids:
curve = RhinoCurve.from_guid(guid)
for key in diagram.vertices():
xyz = diagram.vertex_coordinates(key)
closest = curve.closest_point(xyz)
gkey = geometric_key(closest)
if gkey in gkey_key:
if key == gkey_key[gkey]:
keys.append(key)
return keys
@staticmethod
def identify_edges_on_curves(diagram, guids):
edges = []
for guid in guids:
keys = DiagramHelper.identify_vertices_on_curve(diagram, guid)
if keys:
vertices = set(keys)
for u, v in diagram.edges():
if u in vertices and v in vertices:
edges.append((u, v))
return edges
@staticmethod
def move(diagram):
color = Rhino.ApplicationSettings.AppearanceSettings.FeedbackColor
origin = {key: diagram.vertex_coordinates(key) for key in diagram.vertices()}
vertex = {key: diagram.vertex_coordinates(key) for key in diagram.vertices()}
edges = list(diagram.edges())
start = compas_rhino.pick_point('Point to move from?')
if not start:
return False
def OnDynamicDraw(sender, e):
current = list(e.CurrentPoint)
vec = [current[i] - start[i] for i in range(3)]
for key in vertex:
vertex[key] = [origin[key][i] + vec[i] for i in range(3)]
for u, v in iter(edges):
sp = vertex[u]
ep = vertex[v]
sp = Point3d(*sp)
ep = Point3d(*ep)
e.Display.DrawDottedLine(sp, ep, color)
gp = Rhino.Input.Custom.GetPoint()
gp.SetCommandPrompt('Point to move to?')
gp.DynamicDraw += OnDynamicDraw
gp.Get()
if gp.CommandResult() == Rhino.Commands.Result.Success:
end = list(gp.Point())
vec = [end[i] - start[i] for i in range(3)]
for key, attr in diagram.vertices(True):
attr['x'] += vec[0]
attr['y'] += vec[1]
attr['z'] += vec[2]
return True
return False | src/compas_tna/rhino/diagramhelper.py | from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
from ast import literal_eval
import compas
import compas_rhino
from compas.utilities import flatten
from compas.utilities import geometric_key
from compas_rhino.geometry import RhinoPoint
from compas_rhino.geometry import RhinoCurve
from compas_rhino.modifiers import VertexModifier
from compas_rhino.modifiers import EdgeModifier
from compas_rhino.modifiers import FaceModifier
from compas_rhino.selectors import VertexSelector
from compas_rhino.selectors import EdgeSelector
from compas_rhino.selectors import FaceSelector
try:
import Rhino
from Rhino.Geometry import Point3d
import rhinoscriptsyntax as rs
except ImportError:
compas.raise_if_ironpython()
__all__ = ['DiagramHelper']
def match_edges(diagram, keys):
temp = compas_rhino.get_objects(name="{}.edge.*".format(diagram.name))
names = compas_rhino.get_object_names(temp)
guids = []
for guid, name in zip(temp, names):
parts = name.split('.')[2].split('-')
u = literal_eval(parts[0])
v = literal_eval(parts[1])
if (u, v) in keys or (v, u) in keys:
guids.append(guid)
return guids
def match_vertices(diagram, keys):
temp = compas_rhino.get_objects(name="{}.vertex.*".format(diagram.name))
names = compas_rhino.get_object_names(temp)
guids = []
for guid, name in zip(temp, names):
parts = name.split('.')
key = literal_eval(parts[2])
if key in keys:
guids.append(guid)
return guids
class DiagramHelper(VertexSelector,
EdgeSelector,
FaceSelector,
VertexModifier,
EdgeModifier,
FaceModifier):
@staticmethod
def highlight_edges(diagram, keys):
guids = match_edges(diagram, keys)
rs.EnableRedraw(False)
rs.SelectObjects(guids)
rs.EnableRedraw(True)
@staticmethod
def unhighlight_edges(diagram, keys):
guids = match_edges(diagram, keys)
rs.EnableRedraw(False)
rs.UnselectObjects(guids)
rs.EnableRedraw(True)
@staticmethod
def highlight_vertices(diagram, keys):
guids = match_vertices(diagram, keys)
rs.EnableRedraw(False)
rs.SelectObjects(guids)
rs.EnableRedraw(True)
@staticmethod
def unhighlight_vertices(diagram, keys):
guids = match_vertices(diagram, keys)
rs.EnableRedraw(False)
rs.UnselectObjects(guids)
rs.EnableRedraw(True)
@staticmethod
def select_vertices_where(diagram, keys):
rs.UnselectAllObjects()
DiagramHelper.highlight_vertices(diagram, keys)
@staticmethod
def select_vertices_on_boundary(diagram):
rs.UnselectAllObjects()
key = DiagramHelper.select_vertex(diagram)
if key is None:
return
boundaries = diagram.vertices_on_boundaries()
for boundary in boundaries:
if key in boundary:
DiagramHelper.highlight_vertices(diagram, boundary)
return boundary
@staticmethod
def select_vertices_on_curve(diagram):
rs.UnselectAllObjects()
guid = compas_rhino.select_curve()
keys = DiagramHelper.identify_vertices_on_curve(diagram, guid)
DiagramHelper.highlight_vertices(diagram, keys)
return keys
@staticmethod
def select_vertices_on_curves(diagram):
rs.UnselectAllObjects()
guids = compas_rhino.select_curves()
keys = DiagramHelper.identify_vertices_on_curves(diagram, guids)
DiagramHelper.highlight_vertices(diagram, keys)
return keys
@staticmethod
def select_edges_on_curves(diagram):
rs.UnselectAllObjects()
guids = compas_rhino.select_curves()
keys = DiagramHelper.identify_edges_on_curves(diagram, guids)
DiagramHelper.highlight_edges(diagram, keys)
return keys
@staticmethod
def select_continuous_edges(diagram):
rs.UnselectAllObjects()
keys = DiagramHelper.select_edges(diagram)
if not keys:
return
keys = [diagram.get_continuous_edges(key) for key in keys]
keys = list(set(list(flatten(keys))))
DiagramHelper.highlight_edges(diagram, keys)
return keys
@staticmethod
def select_parallel_edges(diagram):
rs.UnselectAllObjects()
keys = DiagramHelper.select_edges(diagram)
if not keys:
return
keys = [diagram.get_parallel_edges(key) for key in keys]
keys = list(set(list(flatten(keys))))
DiagramHelper.highlight_edges(diagram, keys)
return keys
@staticmethod
def identify_vertices_on_points(diagram, guids):
gkey_key = diagram.gkey_key()
keys = []
for guid in guids:
point = RhinoPoint.from_guid(guid)
gkey = geometric_key(point.xyz)
if gkey in gkey_key:
key = gkey_key[gkey]
keys.append(key)
return keys
@staticmethod
def identify_vertices_on_curve(diagram, guid):
gkey_key = diagram.gkey_key()
keys = []
curve = RhinoCurve.from_guid(guid)
for key in diagram.vertices():
xyz = diagram.vertex_coordinates(key)
closest = curve.closest_point(xyz)
gkey = geometric_key(closest)
if gkey in gkey_key:
if key == gkey_key[gkey]:
keys.append(key)
return keys
@staticmethod
def identify_vertices_on_curves(diagram, guids):
gkey_key = diagram.gkey_key()
keys = []
for guid in guids:
curve = RhinoCurve.from_guid(guid)
for key in diagram.vertices():
xyz = diagram.vertex_coordinates(key)
closest = curve.closest_point(xyz)
gkey = geometric_key(closest)
if gkey in gkey_key:
if key == gkey_key[gkey]:
keys.append(key)
return keys
@staticmethod
def identify_edges_on_curves(diagram, guids):
edges = []
for guid in guids:
keys = DiagramHelper.identify_vertices_on_curve(diagram, guid)
if keys:
vertices = set(keys)
for u, v in diagram.edges():
if u in vertices and v in vertices:
edges.append((u, v))
return edges
@staticmethod
def move(diagram):
color = Rhino.ApplicationSettings.AppearanceSettings.FeedbackColor
origin = {key: diagram.vertex_coordinates(key) for key in diagram.vertices()}
vertex = {key: diagram.vertex_coordinates(key) for key in diagram.vertices()}
edges = list(diagram.edges())
start = compas_rhino.pick_point('Point to move from?')
if not start:
return False
def OnDynamicDraw(sender, e):
current = list(e.CurrentPoint)
vec = [current[i] - start[i] for i in range(3)]
for key in vertex:
vertex[key] = [origin[key][i] + vec[i] for i in range(3)]
for u, v in iter(edges):
sp = vertex[u]
ep = vertex[v]
sp = Point3d(*sp)
ep = Point3d(*ep)
e.Display.DrawDottedLine(sp, ep, color)
gp = Rhino.Input.Custom.GetPoint()
gp.SetCommandPrompt('Point to move to?')
gp.DynamicDraw += OnDynamicDraw
gp.Get()
if gp.CommandResult() == Rhino.Commands.Result.Success:
end = list(gp.Point())
vec = [end[i] - start[i] for i in range(3)]
for key, attr in diagram.vertices(True):
attr['x'] += vec[0]
attr['y'] += vec[1]
attr['z'] += vec[2]
return True
return False | 0.571288 | 0.119511 |
import abc
import os
import re
import numpy as np
from erhsh.utils.print_util import TblPrinter
class CheckpointLoader(object):
def __init__(self, checkpoint_path):
self.checkpoint_path = checkpoint_path
@abc.abstractmethod
def _load_checkpoint(self):
pass
def __list(self, filter_key=None):
param_dict = self._load_checkpoint()
filter_dict = {}
for k, v in param_dict.items():
if filter_key \
and (filter_key not in k) \
and (not re.search(filter_key, k, re.M | re.I)):
continue
filter_dict[k] = v
return param_dict, filter_dict
def __get(self, key):
param_dict = self._load_checkpoint()
return param_dict.get(key)
def list(self, filter_key=None):
param_dict, filter_dict = self.__list(filter_key=filter_key)
ret = {}
tp = TblPrinter("Param Keys", "Value Shape")
for k, v in filter_dict.items():
v = str(v.shape)
ret[k] = v
tp.add_row(k, v)
tp.print()
print("Filter/Total: {}/{}".format(len(ret), len(param_dict)))
return ret
def get(self, key):
v = self.__get(key)
if v is None:
print("param key not found! key={}".format(key))
return
tp = TblPrinter("Param Keys", "Value Shape", "Value Type")
tp.add_row(key, str(v.shape), str(v.dtype))
vf = v.flatten()
length = len(vf)
if length <= 100:
tp.add_row(vf)
elif length <= 200:
tp.add_row(vf[:100])
tp.add_row(vf[100:])
else:
tp.add_row(vf[:100])
tp.add_row("...")
tp.add_row(vf[-100:])
tp.print()
print("Max:{:.7f}, Min:{:.7f}, Mean:{:.7f}".format(v.max(), v.min(), v.mean()))
def list_dump(self, filter_key=None, dump_to=None):
print("Begin dump {0} to {1}".format(self.checkpoint_path, dump_to))
print("Filter is: {}".format(filter_key))
_, filter_dict = self.__list(filter_key=filter_key)
for k, v in filter_dict.items():
dump_file = os.path.join(dump_to, k + '.npy')
np.save(dump_file, v)
print("Dump to: {}".format(dump_file))
def get_dump(self, key, dump_to=None):
print("Begin dump {0} to {1}".format(self.checkpoint_path, dump_to))
print("Key is: {}".format(key))
v = self.__get(key)
dump_file = os.path.join(dump_to, key + '.npy')
np.save(dump_file, v)
print("Dump to: {}".format(dump_file)) | erhsh/common/checkpoint.py | import abc
import os
import re
import numpy as np
from erhsh.utils.print_util import TblPrinter
class CheckpointLoader(object):
def __init__(self, checkpoint_path):
self.checkpoint_path = checkpoint_path
@abc.abstractmethod
def _load_checkpoint(self):
pass
def __list(self, filter_key=None):
param_dict = self._load_checkpoint()
filter_dict = {}
for k, v in param_dict.items():
if filter_key \
and (filter_key not in k) \
and (not re.search(filter_key, k, re.M | re.I)):
continue
filter_dict[k] = v
return param_dict, filter_dict
def __get(self, key):
param_dict = self._load_checkpoint()
return param_dict.get(key)
def list(self, filter_key=None):
param_dict, filter_dict = self.__list(filter_key=filter_key)
ret = {}
tp = TblPrinter("Param Keys", "Value Shape")
for k, v in filter_dict.items():
v = str(v.shape)
ret[k] = v
tp.add_row(k, v)
tp.print()
print("Filter/Total: {}/{}".format(len(ret), len(param_dict)))
return ret
def get(self, key):
v = self.__get(key)
if v is None:
print("param key not found! key={}".format(key))
return
tp = TblPrinter("Param Keys", "Value Shape", "Value Type")
tp.add_row(key, str(v.shape), str(v.dtype))
vf = v.flatten()
length = len(vf)
if length <= 100:
tp.add_row(vf)
elif length <= 200:
tp.add_row(vf[:100])
tp.add_row(vf[100:])
else:
tp.add_row(vf[:100])
tp.add_row("...")
tp.add_row(vf[-100:])
tp.print()
print("Max:{:.7f}, Min:{:.7f}, Mean:{:.7f}".format(v.max(), v.min(), v.mean()))
def list_dump(self, filter_key=None, dump_to=None):
print("Begin dump {0} to {1}".format(self.checkpoint_path, dump_to))
print("Filter is: {}".format(filter_key))
_, filter_dict = self.__list(filter_key=filter_key)
for k, v in filter_dict.items():
dump_file = os.path.join(dump_to, k + '.npy')
np.save(dump_file, v)
print("Dump to: {}".format(dump_file))
def get_dump(self, key, dump_to=None):
print("Begin dump {0} to {1}".format(self.checkpoint_path, dump_to))
print("Key is: {}".format(key))
v = self.__get(key)
dump_file = os.path.join(dump_to, key + '.npy')
np.save(dump_file, v)
print("Dump to: {}".format(dump_file)) | 0.363195 | 0.094678 |
from collections import MutableMapping
import math
import random
class SkipList(MutableMapping):
__slots__ = '_head', '_tail', '_n', '_height'
#------------------------------- nested _Node class -------------------------------
class _Node:
__slots__ = '_key', '_value', '_next'
"""Lightweight composite to store key-value pairs as map items."""
def __init__(self, k, v, height):
self._key = k
self._value = v
self._next = [None] * (height)
def __eq__(self, other):
if other == None:
return False
return self._key == other._key # compare items based on their keys
def __ne__(self, other):
return not (self == other) # opposite of __eq__
def __lt__(self, other):
return self._key < other._key # compare items based on their keys
def __repr__(self):
return str(self._value)
def __init__(self):
"""Create an empty map."""
self._head = self._Node(-math.inf, 'head',
1) # Head: the first node in a skip list
# Tail: the last node in a skip list
self._tail = self._Node(math.inf, 'tail', 1)
# Initially, there's no item -> head is directly linked to the tail
self._head._next[0] = self._tail
self._n = 0 # Initially, there's no item, so _n = 0
self._height = 1 # Initially, the height of a skip list is 1
def __getitem__(self, k, update=None):
"""Return value associated with key k (raise KeyError if not found)."""
node, _ = self.do_find(k)
if node is None:
raise KeyError(f'{k} not found')
return node
def __setitem__(self, k, v):
"""Assign value v to key k, overwriting existing value if present."""
node, update = self.do_find(k)
if node:
node._value = v
return
new_height = self.get_random_height()
height = self._height
update.extend([self._head for _ in range(height, new_height)])
self._head._next.extend(
[self._tail for _ in range(height, new_height)])
self._tail._next.extend([None for _ in range(height, new_height)])
self._height = max(self._height, new_height)
new_node = self._Node(k, v, new_height)
new_node._next = [update[level]._next[level]
for level in range(new_height)]
for level in range(new_height):
update[level]._next[level] = new_node
self._n += 1
def __delitem__(self, k):
"""Remove item associated with key k (raise KeyError if not found)."""
node, update = self.do_find(k)
if not node:
raise KeyError(f'{k} not found')
for i in reversed(range(len(node._next))):
update[i]._next[i] = node._next[i]
if len(self._head._next) > 1 and self._head._next[i] == self._tail:
self._height -= 1
head = self._head._next.pop()
del head
self._n -= 1
del node
def __len__(self):
"""Return number of items in the map."""
return self._n
def __iter__(self):
"""Generate iteration of the map's keys."""
# iterate over the base height (=> height = 0)
node = self._head._next[0]
while not node is self._tail:
yield node._key
node = node._next[0]
def get_random_height(self):
height = 1
while random.choice([True, False]):
height += 1
return height
def do_find(self, k: int):
height = self._height
update = [self._head] * height
current = self._head
result = None
for level in reversed(range(height)):
node = current
while node._next[level] != self._tail and node._next[level]._key < k:
node = node._next[level]
if node._next[level]._key == k:
result = node._next[level]
update[level] = node
return result, update
def print_tree(self):
print('^^^^^^^^^^^^^^^^^^^^^^^^^^')
node = self._head
while node != None:
print('#', end='\t')
for i in range(self._height):
lnk = node._next[i] if i < len(node._next) else None
if node is self._tail:
print_val = '+'
elif lnk == None:
print_val = '.'
elif node._key == -math.inf:
print_val = '-'
elif node._key == math.inf:
print_val = '+'
else:
print_val = node._key
print(print_val, end='\t')
print()
node = node._next[0]
for h in reversed(range(self._height)):
print(f"At height #{h}, ", end='')
node = self._head
while node != None:
print(node._key, end=' -> ')
# print(f'h: {h}, node: {node._next}')
node = node._next[h]
print()
print('vvvvvvvvvvvvvvvvvvvvvvvvvv')
'''
if __name__ == '__main__':
sl = SkipList()
for i in range(10):
sl[i] = chr(65 + i)
print(f'sl[{i}] = {sl[i]}')
sl.print_tree()
for i in range(0, 10, 3):
print(i)
del sl[i]
sl.print_tree()
for i in range(10):
try:
print(sl[i])
except KeyError as e:
print(e)
''' | Assignments/Assignment_Midterm/DS_Mid_201911189/skiplist.py | from collections import MutableMapping
import math
import random
class SkipList(MutableMapping):
__slots__ = '_head', '_tail', '_n', '_height'
#------------------------------- nested _Node class -------------------------------
class _Node:
__slots__ = '_key', '_value', '_next'
"""Lightweight composite to store key-value pairs as map items."""
def __init__(self, k, v, height):
self._key = k
self._value = v
self._next = [None] * (height)
def __eq__(self, other):
if other == None:
return False
return self._key == other._key # compare items based on their keys
def __ne__(self, other):
return not (self == other) # opposite of __eq__
def __lt__(self, other):
return self._key < other._key # compare items based on their keys
def __repr__(self):
return str(self._value)
def __init__(self):
"""Create an empty map."""
self._head = self._Node(-math.inf, 'head',
1) # Head: the first node in a skip list
# Tail: the last node in a skip list
self._tail = self._Node(math.inf, 'tail', 1)
# Initially, there's no item -> head is directly linked to the tail
self._head._next[0] = self._tail
self._n = 0 # Initially, there's no item, so _n = 0
self._height = 1 # Initially, the height of a skip list is 1
def __getitem__(self, k, update=None):
"""Return value associated with key k (raise KeyError if not found)."""
node, _ = self.do_find(k)
if node is None:
raise KeyError(f'{k} not found')
return node
def __setitem__(self, k, v):
"""Assign value v to key k, overwriting existing value if present."""
node, update = self.do_find(k)
if node:
node._value = v
return
new_height = self.get_random_height()
height = self._height
update.extend([self._head for _ in range(height, new_height)])
self._head._next.extend(
[self._tail for _ in range(height, new_height)])
self._tail._next.extend([None for _ in range(height, new_height)])
self._height = max(self._height, new_height)
new_node = self._Node(k, v, new_height)
new_node._next = [update[level]._next[level]
for level in range(new_height)]
for level in range(new_height):
update[level]._next[level] = new_node
self._n += 1
def __delitem__(self, k):
"""Remove item associated with key k (raise KeyError if not found)."""
node, update = self.do_find(k)
if not node:
raise KeyError(f'{k} not found')
for i in reversed(range(len(node._next))):
update[i]._next[i] = node._next[i]
if len(self._head._next) > 1 and self._head._next[i] == self._tail:
self._height -= 1
head = self._head._next.pop()
del head
self._n -= 1
del node
def __len__(self):
"""Return number of items in the map."""
return self._n
def __iter__(self):
"""Generate iteration of the map's keys."""
# iterate over the base height (=> height = 0)
node = self._head._next[0]
while not node is self._tail:
yield node._key
node = node._next[0]
def get_random_height(self):
height = 1
while random.choice([True, False]):
height += 1
return height
def do_find(self, k: int):
height = self._height
update = [self._head] * height
current = self._head
result = None
for level in reversed(range(height)):
node = current
while node._next[level] != self._tail and node._next[level]._key < k:
node = node._next[level]
if node._next[level]._key == k:
result = node._next[level]
update[level] = node
return result, update
def print_tree(self):
print('^^^^^^^^^^^^^^^^^^^^^^^^^^')
node = self._head
while node != None:
print('#', end='\t')
for i in range(self._height):
lnk = node._next[i] if i < len(node._next) else None
if node is self._tail:
print_val = '+'
elif lnk == None:
print_val = '.'
elif node._key == -math.inf:
print_val = '-'
elif node._key == math.inf:
print_val = '+'
else:
print_val = node._key
print(print_val, end='\t')
print()
node = node._next[0]
for h in reversed(range(self._height)):
print(f"At height #{h}, ", end='')
node = self._head
while node != None:
print(node._key, end=' -> ')
# print(f'h: {h}, node: {node._next}')
node = node._next[h]
print()
print('vvvvvvvvvvvvvvvvvvvvvvvvvv')
'''
if __name__ == '__main__':
sl = SkipList()
for i in range(10):
sl[i] = chr(65 + i)
print(f'sl[{i}] = {sl[i]}')
sl.print_tree()
for i in range(0, 10, 3):
print(i)
del sl[i]
sl.print_tree()
for i in range(10):
try:
print(sl[i])
except KeyError as e:
print(e)
''' | 0.7586 | 0.143608 |
import os
import sys
import shutil
import platform
import subprocess
import secrets
import click
import jinja2
from . import __version__
src = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'skeleton')
jinja_env = jinja2.Environment(loader=jinja2.FileSystemLoader(os.path.dirname(os.path.abspath(__file__))))
@click.command()
@click.argument('app_name', type=click.STRING)
@click.option(
'-d', '--dir', default=None, type=click.Path(exists=True, writable=True),
help='Where to create your app. Defaults to the current directory.')
@click.option('-e', '--env', is_flag=True, help='Create a virtual environment.')
@click.option('-g', '--git', is_flag=True, help='Initialize a git repository.')
@click.version_option(__version__, '-V', '--version')
@click.help_option('-h', '--help')
def create_flask_app(app_name, dir, env, git):
"""Create a flask app skeleton."""
dest = os.path.abspath(os.path.join(os.getcwd() if dir is None else dir, app_name))
try:
summary = jinja_env.get_template('summary.jinja')
except jinja2.TemplateNotFound:
pass
else:
click.echo(summary.render(dict(
app_name=app_name,
path=dest,
version=platform.python_version(),
env=env,
git=git)))
click.confirm('Continue with these settings?', abort=True)
if os.path.exists(dest):
click.confirm('The destination already exists. Overwrite?', abort=True)
shutil.rmtree(dest)
click.echo('Copying files...')
shutil.copytree(src, dest)
with open(os.path.join(dest, ".env"), "a") as f:
f.writelines(["\n", "SECRET_KEY=%s" % secrets.token_hex(32)])
if env is True:
create_env(dest)
if git is True:
init_git_repo(dest)
click.echo('Done! App created in: %s' % dest)
def create_env(dest, env_name='env'):
"""
Create a virtual environment.
:param dest: The full path to the project root.
"""
click.echo('Creating a virtual environment...')
virtualenv = shutil.which('virtualenv')
if virtualenv is None:
click.echo('Failed to find virtualenv executable...Skipping!')
return False
env_path = os.path.join(dest, env_name)
try:
subprocess.run([virtualenv, '--python=%s' % sys.executable, env_path], check=True)
except subprocess.SubprocessError:
shutil.rmtree(env_path)
click.echo('A problem occured whith virtualenv...Skipping!')
return False
with open(os.path.join(dest, '.gitignore'), 'a') as f:
f.writelines(['\n', '%s/' % os.path.basename(env_path)])
click.echo('Installing packages...')
pip = os.path.join(env_path, 'bin/pip')
requirements = os.path.join(dest, 'requirements.txt')
try:
subprocess.run([pip, 'install', '-r', requirements], check=True)
subprocess.run([pip, 'freeze', '>', requirements], check=True)
except subprocess.SubprocessError:
click.echo('A problem occurred with pip...Skipping!')
return False
else:
return True
def init_git_repo(dest):
"""
Initialize a git repository.
:param dest: The full path to the project root.
"""
click.echo('Initializing git repository...')
git = shutil.which('git')
if git is None:
click.echo('Failed to find git executable...Skipping!')
return False
os.environ['GIT_WORK_TREE'] = dest
os.environ['GIT_DIR'] = os.path.join(dest, '.git')
try:
subprocess.run([git, 'init'], check=True)
click.echo('Committing changes...')
subprocess.run([git, 'add', dest], check=True)
subprocess.run([git, 'commit', '-m', '"Creates app skeleton."'], check=True)
subprocess.run([git, 'checkout', '-b', 'devel'], check=True)
except subprocess.SubprocessError:
click.echo('A problem occurred whith git...Skipping!')
return False
else:
return True | flask_skeleton/core.py | import os
import sys
import shutil
import platform
import subprocess
import secrets
import click
import jinja2
from . import __version__
src = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'skeleton')
jinja_env = jinja2.Environment(loader=jinja2.FileSystemLoader(os.path.dirname(os.path.abspath(__file__))))
@click.command()
@click.argument('app_name', type=click.STRING)
@click.option(
'-d', '--dir', default=None, type=click.Path(exists=True, writable=True),
help='Where to create your app. Defaults to the current directory.')
@click.option('-e', '--env', is_flag=True, help='Create a virtual environment.')
@click.option('-g', '--git', is_flag=True, help='Initialize a git repository.')
@click.version_option(__version__, '-V', '--version')
@click.help_option('-h', '--help')
def create_flask_app(app_name, dir, env, git):
"""Create a flask app skeleton."""
dest = os.path.abspath(os.path.join(os.getcwd() if dir is None else dir, app_name))
try:
summary = jinja_env.get_template('summary.jinja')
except jinja2.TemplateNotFound:
pass
else:
click.echo(summary.render(dict(
app_name=app_name,
path=dest,
version=platform.python_version(),
env=env,
git=git)))
click.confirm('Continue with these settings?', abort=True)
if os.path.exists(dest):
click.confirm('The destination already exists. Overwrite?', abort=True)
shutil.rmtree(dest)
click.echo('Copying files...')
shutil.copytree(src, dest)
with open(os.path.join(dest, ".env"), "a") as f:
f.writelines(["\n", "SECRET_KEY=%s" % secrets.token_hex(32)])
if env is True:
create_env(dest)
if git is True:
init_git_repo(dest)
click.echo('Done! App created in: %s' % dest)
def create_env(dest, env_name='env'):
"""
Create a virtual environment.
:param dest: The full path to the project root.
"""
click.echo('Creating a virtual environment...')
virtualenv = shutil.which('virtualenv')
if virtualenv is None:
click.echo('Failed to find virtualenv executable...Skipping!')
return False
env_path = os.path.join(dest, env_name)
try:
subprocess.run([virtualenv, '--python=%s' % sys.executable, env_path], check=True)
except subprocess.SubprocessError:
shutil.rmtree(env_path)
click.echo('A problem occured whith virtualenv...Skipping!')
return False
with open(os.path.join(dest, '.gitignore'), 'a') as f:
f.writelines(['\n', '%s/' % os.path.basename(env_path)])
click.echo('Installing packages...')
pip = os.path.join(env_path, 'bin/pip')
requirements = os.path.join(dest, 'requirements.txt')
try:
subprocess.run([pip, 'install', '-r', requirements], check=True)
subprocess.run([pip, 'freeze', '>', requirements], check=True)
except subprocess.SubprocessError:
click.echo('A problem occurred with pip...Skipping!')
return False
else:
return True
def init_git_repo(dest):
"""
Initialize a git repository.
:param dest: The full path to the project root.
"""
click.echo('Initializing git repository...')
git = shutil.which('git')
if git is None:
click.echo('Failed to find git executable...Skipping!')
return False
os.environ['GIT_WORK_TREE'] = dest
os.environ['GIT_DIR'] = os.path.join(dest, '.git')
try:
subprocess.run([git, 'init'], check=True)
click.echo('Committing changes...')
subprocess.run([git, 'add', dest], check=True)
subprocess.run([git, 'commit', '-m', '"Creates app skeleton."'], check=True)
subprocess.run([git, 'checkout', '-b', 'devel'], check=True)
except subprocess.SubprocessError:
click.echo('A problem occurred whith git...Skipping!')
return False
else:
return True | 0.249082 | 0.047426 |
from helpers.api_request import request_url
from helpers.api_token import get_token
from config.api import settings
token = get_token(settings.CREDENTIALS_ADM)
token_unprivileges = get_token(settings.CREDENTIALS_USER)
aws_data = settings.STACK_POST_AWS
aws_stack_name = settings.STACK_NAME_AWS
gcp_data = settings.STACK_POST_GCP
gcp_stack_name = settings.STACK_NAME_GCP
def test_create_stack_aws():
response = request_url(verb='POST', uri='stacks/', headers={
"Authorization": f"Bearer {token}"}, json=aws_data)
result = response.get('status_code')
if result != 409:
assert result == 200
def test_create_stack_gcp():
response = request_url(verb='POST', uri='stacks/', headers={
"Authorization": f"Bearer {token}"}, json=gcp_data)
result = response.get('status_code')
if result != 409:
assert result == 200
def test_try_create_stack_as_unprivilege_user():
response = request_url(
verb='POST',
uri='stacks/',
headers={
"Authorization": f"Bearer {token_unprivileges}"},
json=aws_data)
result = response.get('status_code')
assert result == 403
def test_try_create_stack_as_not_authenticated_user():
response = request_url(
verb='POST',
uri='stacks/',
headers={
"Authorization": f"Bearer {token_unprivileges}"},
json=aws_data)
result = response.get('status_code')
assert result == 403
def test_list_stack_by_name():
response = request_url(verb='GET', uri=f'stacks/{aws_stack_name}', headers={
"Authorization": f"Bearer {token}"})
result = response.get('status_code')
assert result == 200
def test_try_list_stack_by_name_as_unprivilege_user():
response = request_url(verb='GET', uri=f'stacks/{aws_stack_name}', headers={
"Authorization": f"Bearer {token_unprivileges}"})
result = response.get('status_code')
assert result == 200
def test_delete_stack_by_name():
response = request_url(verb='DELETE', uri=f'stacks/{aws_stack_name}', headers={
"Authorization": f"Bearer {token}"})
assert response.get('status_code') == 200
def test_create_stack_for_test_by_id():
response = request_url(verb='POST', uri='stacks/', headers={
"Authorization": f"Bearer {token}"}, json=aws_data)
result = response.get('status_code')
if result != 409:
assert result == 200
def test_list_stack_by_id():
response = request_url(verb='GET', uri=f'stacks/{aws_stack_name}', headers={
"Authorization": f"Bearer {token}"})
stack_id = response.get("json").get("id")
response = request_url(verb='GET', uri=f'stacks/{stack_id}', headers={
"Authorization": f"Bearer {token}"})
result = response.get('status_code')
assert result == 200
def test_delete_stack_by_id():
response = request_url(verb='GET', uri=f'stacks/{aws_stack_name}', headers={
"Authorization": f"Bearer {token}"})
stack_id = response.get("json").get("id")
response = request_url(verb='DELETE', uri=f'stacks/{stack_id}', headers={
"Authorization": f"Bearer {token}"})
assert response.get('status_code') == 200
def test_create_stack_aws_for_poc():
response = request_url(verb='POST', uri='stacks/', headers={
"Authorization": f"Bearer {token}"}, json=aws_data)
result = response.get('status_code')
if result != 409:
assert result == 200 | sld-api-backend/test/test_02_crud_stacks.py | from helpers.api_request import request_url
from helpers.api_token import get_token
from config.api import settings
token = get_token(settings.CREDENTIALS_ADM)
token_unprivileges = get_token(settings.CREDENTIALS_USER)
aws_data = settings.STACK_POST_AWS
aws_stack_name = settings.STACK_NAME_AWS
gcp_data = settings.STACK_POST_GCP
gcp_stack_name = settings.STACK_NAME_GCP
def test_create_stack_aws():
response = request_url(verb='POST', uri='stacks/', headers={
"Authorization": f"Bearer {token}"}, json=aws_data)
result = response.get('status_code')
if result != 409:
assert result == 200
def test_create_stack_gcp():
response = request_url(verb='POST', uri='stacks/', headers={
"Authorization": f"Bearer {token}"}, json=gcp_data)
result = response.get('status_code')
if result != 409:
assert result == 200
def test_try_create_stack_as_unprivilege_user():
response = request_url(
verb='POST',
uri='stacks/',
headers={
"Authorization": f"Bearer {token_unprivileges}"},
json=aws_data)
result = response.get('status_code')
assert result == 403
def test_try_create_stack_as_not_authenticated_user():
response = request_url(
verb='POST',
uri='stacks/',
headers={
"Authorization": f"Bearer {token_unprivileges}"},
json=aws_data)
result = response.get('status_code')
assert result == 403
def test_list_stack_by_name():
response = request_url(verb='GET', uri=f'stacks/{aws_stack_name}', headers={
"Authorization": f"Bearer {token}"})
result = response.get('status_code')
assert result == 200
def test_try_list_stack_by_name_as_unprivilege_user():
response = request_url(verb='GET', uri=f'stacks/{aws_stack_name}', headers={
"Authorization": f"Bearer {token_unprivileges}"})
result = response.get('status_code')
assert result == 200
def test_delete_stack_by_name():
response = request_url(verb='DELETE', uri=f'stacks/{aws_stack_name}', headers={
"Authorization": f"Bearer {token}"})
assert response.get('status_code') == 200
def test_create_stack_for_test_by_id():
response = request_url(verb='POST', uri='stacks/', headers={
"Authorization": f"Bearer {token}"}, json=aws_data)
result = response.get('status_code')
if result != 409:
assert result == 200
def test_list_stack_by_id():
response = request_url(verb='GET', uri=f'stacks/{aws_stack_name}', headers={
"Authorization": f"Bearer {token}"})
stack_id = response.get("json").get("id")
response = request_url(verb='GET', uri=f'stacks/{stack_id}', headers={
"Authorization": f"Bearer {token}"})
result = response.get('status_code')
assert result == 200
def test_delete_stack_by_id():
response = request_url(verb='GET', uri=f'stacks/{aws_stack_name}', headers={
"Authorization": f"Bearer {token}"})
stack_id = response.get("json").get("id")
response = request_url(verb='DELETE', uri=f'stacks/{stack_id}', headers={
"Authorization": f"Bearer {token}"})
assert response.get('status_code') == 200
def test_create_stack_aws_for_poc():
response = request_url(verb='POST', uri='stacks/', headers={
"Authorization": f"Bearer {token}"}, json=aws_data)
result = response.get('status_code')
if result != 409:
assert result == 200 | 0.461259 | 0.257616 |
from threading import Thread
from time import sleep
import WindowManager as WinMan
import MapManager as MapMan
import globals as global_vars
import curses
class InputListener(Thread):
def __init__(self):
Thread.__init__(self)
def run(self):
"""
periodically querys the keyboard keypress and acts accordingly in its
own thread. responds to the following keypresses: F1, F2, F3, F4 and mouse clicks
"""
while not global_vars.quit:
sleep(0.01)
key = WinMan.key_pressed()
if key == 268: #F4 / Quit
global_vars.quit = True
continue
if global_vars.hidden_run:
continue
if key == 265: #F1 / Pause
global_vars.pause = True
WinMan.replace_option("Pause", "Resume")
WinMan.replace_option("Faster", "Single Step")
WinMan.replace_option("Slower", "")
WinMan.update()
global_vars.swap_step_duration = global_vars.step_duration
global_vars.step_duration = 0.1
while True:
key = WinMan.key_pressed()
if key == 265: #F1 / Resume (changed at this point)
global_vars.pause = False
global_vars.step_duration = global_vars.swap_step_duration
WinMan.replace_option("Resume", "Pause")
WinMan.replace_option("Single Step", "Faster")
WinMan.replace_option("", "Slower")
break
elif key == 266:
global_vars.single_step = True
elif key == 268: #F4 / Quit
global_vars.quit = True
break
elif key == curses.KEY_MOUSE:
id, x, y, z, bstate = curses.getmouse()
MapMan.set_watched_entity(y - 2, x)
WinMan.update(tile_info=MapMan.watch_info())
sleep(0.01)
elif key == 266: #F2 / Faster
global_vars.step_duration = round(
global_vars.step_duration - 0.1, 1
)
if global_vars.step_duration <= 0:
global_vars.step_duration = 0.1
WinMan.update()
elif key == 267: #F3 / Slower
global_vars.step_duration = round(
global_vars.step_duration + 0.1, 1
)
if global_vars.step_duration > 2:
global_vars.step_duration = 2
WinMan.update()
elif key == curses.KEY_MOUSE:
id, x, y, z, bstate = curses.getmouse()
MapMan.set_watched_entity(y - 2, x) | lib/InputListener.py | from threading import Thread
from time import sleep
import WindowManager as WinMan
import MapManager as MapMan
import globals as global_vars
import curses
class InputListener(Thread):
def __init__(self):
Thread.__init__(self)
def run(self):
"""
periodically querys the keyboard keypress and acts accordingly in its
own thread. responds to the following keypresses: F1, F2, F3, F4 and mouse clicks
"""
while not global_vars.quit:
sleep(0.01)
key = WinMan.key_pressed()
if key == 268: #F4 / Quit
global_vars.quit = True
continue
if global_vars.hidden_run:
continue
if key == 265: #F1 / Pause
global_vars.pause = True
WinMan.replace_option("Pause", "Resume")
WinMan.replace_option("Faster", "Single Step")
WinMan.replace_option("Slower", "")
WinMan.update()
global_vars.swap_step_duration = global_vars.step_duration
global_vars.step_duration = 0.1
while True:
key = WinMan.key_pressed()
if key == 265: #F1 / Resume (changed at this point)
global_vars.pause = False
global_vars.step_duration = global_vars.swap_step_duration
WinMan.replace_option("Resume", "Pause")
WinMan.replace_option("Single Step", "Faster")
WinMan.replace_option("", "Slower")
break
elif key == 266:
global_vars.single_step = True
elif key == 268: #F4 / Quit
global_vars.quit = True
break
elif key == curses.KEY_MOUSE:
id, x, y, z, bstate = curses.getmouse()
MapMan.set_watched_entity(y - 2, x)
WinMan.update(tile_info=MapMan.watch_info())
sleep(0.01)
elif key == 266: #F2 / Faster
global_vars.step_duration = round(
global_vars.step_duration - 0.1, 1
)
if global_vars.step_duration <= 0:
global_vars.step_duration = 0.1
WinMan.update()
elif key == 267: #F3 / Slower
global_vars.step_duration = round(
global_vars.step_duration + 0.1, 1
)
if global_vars.step_duration > 2:
global_vars.step_duration = 2
WinMan.update()
elif key == curses.KEY_MOUSE:
id, x, y, z, bstate = curses.getmouse()
MapMan.set_watched_entity(y - 2, x) | 0.236604 | 0.104021 |
import gzip
import logging
from typing import Dict
import bioregistry
import click
import pandas as pd
from tqdm import tqdm
from .models import Alt, Reference, Resource, Synonym, Xref, create_all, drop_all, engine, session
from ...cli_utils import verbose_option
from ...resource_utils import ensure_alts, ensure_inspector_javert, ensure_ooh_na_na, ensure_synonyms
__all__ = [
'load',
]
logger = logging.getLogger(__name__)
@click.command()
@verbose_option
@click.option('--load-resources', is_flag=True)
@click.option('--load-names', is_flag=True)
@click.option('--load-alts', is_flag=True)
@click.option('--load-xrefs', is_flag=True)
@click.option('--load-synonyms', is_flag=True)
@click.option('-a', '--load-all', is_flag=True)
@click.option('--reset', is_flag=True)
def load(
load_all: bool,
load_resources: bool = False,
load_names: bool = False,
load_alts: bool = False,
load_xrefs: bool = True,
load_synonyms: bool = False,
reset: bool = False,
) -> None:
"""Load the database."""
if reset:
drop_all()
create_all()
if load_resources or load_all:
prefix_to_resource: Dict[str, Resource] = {}
prefixes = {resource.prefix for resource in Resource.query.all()}
for prefix, entry in tqdm(bioregistry.read_bioregistry().items(), desc='loading resources'):
if bioregistry.is_deprecated(prefix):
continue
if prefix in prefixes:
continue
prefix_to_resource[prefix] = resource_model = Resource(
prefix=prefix,
name=entry['name'],
pattern=bioregistry.get_pattern(prefix),
)
session.add(resource_model)
session.commit()
ooh_na_na_path = ensure_ooh_na_na()
synonyms_path = ensure_synonyms()
xrefs_path = ensure_inspector_javert()
if load_alts or load_all:
alts_path = ensure_alts()
alts_df = pd.read_csv(alts_path, sep='\t', dtype=str) # prefix, alt, identifier
logger.info('inserting %d alt identifiers', len(alts_df.index))
alts_df.to_sql(name=Alt.__tablename__, con=engine, if_exists='append', index=False)
logger.info('committing alt identifier')
session.commit()
logger.info('done committing alt identifiers')
for label, path, table, columns, checker in [
('names', ooh_na_na_path, Reference, None, load_names),
('synonyms', synonyms_path, Synonym, ['prefix', 'identifier', 'name'], load_synonyms),
('xrefs', xrefs_path, Xref, ['prefix', 'identifier', 'xref_prefix', 'xref_identifier', 'source'], load_xrefs),
]:
if not checker and not load_all:
continue
logger.info('beginning insertion of %s', label)
conn = engine.raw_connection()
logger.info('inserting with low-level copy of %s from: %s', label, path)
if columns:
columns = ', '.join(columns)
logger.info('corresponding to columns: %s', columns)
columns = f' ({columns})'
else:
columns = ''
with conn.cursor() as cursor, gzip.open(path) as file:
# next(file) # skip the header
sql = f'''COPY {table.__tablename__}{columns} FROM STDIN WITH CSV HEADER DELIMITER E'\\t' QUOTE E'\\b';'''
logger.info('running SQL: %s', sql)
cursor.copy_expert(sql=sql, file=file)
logger.info('committing %s', label)
conn.commit()
logger.info('done committing %s', label)
logger.info(f'number resources loaded: {Resource.query.count():,}')
logger.info(f'number references loaded: {Reference.query.count():,}')
logger.info(f'number alts loaded: {Alt.query.count():,}')
logger.info(f'number synonyms loaded: {Synonym.query.count():,}')
logger.info(f'number xrefs loaded: {Xref.query.count():,}')
if __name__ == '__main__':
load() | src/pyobo/database/sql/legacy_loader.py | import gzip
import logging
from typing import Dict
import bioregistry
import click
import pandas as pd
from tqdm import tqdm
from .models import Alt, Reference, Resource, Synonym, Xref, create_all, drop_all, engine, session
from ...cli_utils import verbose_option
from ...resource_utils import ensure_alts, ensure_inspector_javert, ensure_ooh_na_na, ensure_synonyms
__all__ = [
'load',
]
logger = logging.getLogger(__name__)
@click.command()
@verbose_option
@click.option('--load-resources', is_flag=True)
@click.option('--load-names', is_flag=True)
@click.option('--load-alts', is_flag=True)
@click.option('--load-xrefs', is_flag=True)
@click.option('--load-synonyms', is_flag=True)
@click.option('-a', '--load-all', is_flag=True)
@click.option('--reset', is_flag=True)
def load(
load_all: bool,
load_resources: bool = False,
load_names: bool = False,
load_alts: bool = False,
load_xrefs: bool = True,
load_synonyms: bool = False,
reset: bool = False,
) -> None:
"""Load the database."""
if reset:
drop_all()
create_all()
if load_resources or load_all:
prefix_to_resource: Dict[str, Resource] = {}
prefixes = {resource.prefix for resource in Resource.query.all()}
for prefix, entry in tqdm(bioregistry.read_bioregistry().items(), desc='loading resources'):
if bioregistry.is_deprecated(prefix):
continue
if prefix in prefixes:
continue
prefix_to_resource[prefix] = resource_model = Resource(
prefix=prefix,
name=entry['name'],
pattern=bioregistry.get_pattern(prefix),
)
session.add(resource_model)
session.commit()
ooh_na_na_path = ensure_ooh_na_na()
synonyms_path = ensure_synonyms()
xrefs_path = ensure_inspector_javert()
if load_alts or load_all:
alts_path = ensure_alts()
alts_df = pd.read_csv(alts_path, sep='\t', dtype=str) # prefix, alt, identifier
logger.info('inserting %d alt identifiers', len(alts_df.index))
alts_df.to_sql(name=Alt.__tablename__, con=engine, if_exists='append', index=False)
logger.info('committing alt identifier')
session.commit()
logger.info('done committing alt identifiers')
for label, path, table, columns, checker in [
('names', ooh_na_na_path, Reference, None, load_names),
('synonyms', synonyms_path, Synonym, ['prefix', 'identifier', 'name'], load_synonyms),
('xrefs', xrefs_path, Xref, ['prefix', 'identifier', 'xref_prefix', 'xref_identifier', 'source'], load_xrefs),
]:
if not checker and not load_all:
continue
logger.info('beginning insertion of %s', label)
conn = engine.raw_connection()
logger.info('inserting with low-level copy of %s from: %s', label, path)
if columns:
columns = ', '.join(columns)
logger.info('corresponding to columns: %s', columns)
columns = f' ({columns})'
else:
columns = ''
with conn.cursor() as cursor, gzip.open(path) as file:
# next(file) # skip the header
sql = f'''COPY {table.__tablename__}{columns} FROM STDIN WITH CSV HEADER DELIMITER E'\\t' QUOTE E'\\b';'''
logger.info('running SQL: %s', sql)
cursor.copy_expert(sql=sql, file=file)
logger.info('committing %s', label)
conn.commit()
logger.info('done committing %s', label)
logger.info(f'number resources loaded: {Resource.query.count():,}')
logger.info(f'number references loaded: {Reference.query.count():,}')
logger.info(f'number alts loaded: {Alt.query.count():,}')
logger.info(f'number synonyms loaded: {Synonym.query.count():,}')
logger.info(f'number xrefs loaded: {Xref.query.count():,}')
if __name__ == '__main__':
load() | 0.576542 | 0.077797 |
import numpy as np
import sys, os, re, music21
from optparse import OptionParser
from multiprocessing import Process
from collections import deque
from sqlalchemy import desc, asc
from db import Song, Track, Note, get_sessions
from ngram_helper import key_transpose_pitch
from exceptions import InvalidKeySignature
NUM_NOTES = 128
class RomanTrainer(object):
"""
A RomanTrainer is the model trainer
1. for a given process / database, and
2. for a given roman numeral.
"""
def __init__(self,p_id,rt_id,counts,options):
"""
Initialize the RomanTrainer
Args:
p_id: process id
rt_id: roman numeral id
counts: counts matrix
options: options passed into script
"""
self.p_id = p_id
self.rt_id = rt_id
self.counts = counts
self.triple = deque()
self.options = options
# assume the user has specified a major key
self.dest_key = (music21.key.Key(options.key).sharps,0)
def transposed_triple(self):
"""
Transpose a triple into the appropriate key
Returns:
int[]: the transposed triple
"""
res = []
notes = list(self.triple)
for note in notes:
src_key = (note.track.key_sig_top,note.track.key_sig_bottom)
res.append(key_transpose_pitch(note.pitch,src_key,self.dest_key))
return res
def train(self,note):
"""
Train this RomanTrained on a given note
Args:
note: the note to train on
"""
self.triple.append(note)
if len(self.triple) > 3:
# remove the old note
old_note = self.triple.popleft()
try:
# increment the matrix, where appropriate
np.add.at(self.counts, tuple(self.transposed_triple()), 1)
except InvalidKeySignature, e:
# remove the bad note, append the old note.
self.triple.pop()
self.triple.appendleft(old_note)
def write(self):
"""
Write the numpy counts matrix out to file.
"""
with open(os.path.join(self.options.outdir,str(self.p_id),str(self.rt_id) + ".npy"), 'w') as outfile:
np.save(outfile, self.counts)
class TrackTrainer(Process):
"""
Separate process to train ngram models, all music sourcing from one database
"""
def __init__(self,p_id,session,options):
"""
Initialize the TrackTrainer process
Args:
p_id: process id
session: the database session to load from
options (dict): options passed to script
"""
Process.__init__(self)
self.session = session
self.options = options
self.rts = []
matrix_size = (NUM_NOTES, NUM_NOTES, NUM_NOTES)
# construct the roman trainers
for i in xrange(7):
rt = RomanTrainer(p_id,i + 1,np.zeros(matrix_size, dtype=np.int16),options)
self.rts.append(rt)
def run(self):
"""
Start the process, training on each track separately
"""
# iterate through all the tracks
for trk in self.session.query(Track).all():
self.train(trk)
# write all the rts
for rt in self.rts:
rt.write()
def train(self,trk):
"""
Train the ngram model on a specific track
Args:
trk: the track on which to train
"""
print os.path.basename(trk.song.title), ":", trk.instr_name
# skip percurssion tracks
regexp = re.compile(r'drum|cymbal', re.IGNORECASE)
if trk.channel == 9 or regexp.search(trk.instr_name) is not None:
# print 'skipped percussion track'
return
# skip bass tracks
regexp = re.compile(r'bass', re.IGNORECASE)
if (trk.channel >= 32 and trk.channel <= 39) or regexp.search(trk.instr_name) is not None:
# print 'skipped bass track'
return
# and through all the notes in a track
for note in trk.notes:
if note.pitch < 0 or note.pitch >= NUM_NOTES:
pass
# train using the appropriate rt
if note.roman:
self.rts[note.roman-1].train(note)
def main():
parser = OptionParser()
parser.add_option("-o", "--outdir", dest="outdir")
parser.add_option("-t", "--poolsize", dest="pool_size", default=8, type="int")
parser.add_option("-k", "--key", dest="key", default="C")
parser.add_option("-u", "--username", dest="db_username", default="postgres")
parser.add_option("-p", "--password", dest="db_password", default="<PASSWORD>")
(options, args) = parser.parse_args()
# make the process output directory if not there already
for p_id in xrange(options.pool_size):
print options.outdir
pt = os.path.join(options.outdir,str(p_id) + "/")
print pt
if not os.path.exists(pt):
os.mkdir(pt)
sessions = get_sessions(options.pool_size,options.db_username,options.db_password)
processes = []
# construct and start the threads
for i in xrange(options.pool_size):
p = TrackTrainer(str(i),sessions[i],options)
processes.append(p)
p.start()
# wait for processes to complete
for p in processes:
p.join()
# construct cumulative counts matrices
matrix_size = (NUM_NOTES, NUM_NOTES, NUM_NOTES)
cumulative_counts = []
for i in xrange(7):
cumulative_counts.append(np.zeros(matrix_size, dtype=np.int16))
for p_id in xrange(options.pool_size):
for rt_id in xrange(7):
with open(os.path.join(options.outdir + "/",str(p_id) + "/",str(rt_id + 1) + ".npy")) as f:
counts = np.load(f)
cumulative_counts[rt_id] = np.add(cumulative_counts[rt_id],counts)
for i in xrange(7):
with open(os.path.join(options.outdir + "/",str(i+1) + ".npy"), "w") as f:
np.save(f,cumulative_counts[i])
if __name__ == '__main__':
main() | src/artist_generator/ngram/train.py | import numpy as np
import sys, os, re, music21
from optparse import OptionParser
from multiprocessing import Process
from collections import deque
from sqlalchemy import desc, asc
from db import Song, Track, Note, get_sessions
from ngram_helper import key_transpose_pitch
from exceptions import InvalidKeySignature
NUM_NOTES = 128
class RomanTrainer(object):
"""
A RomanTrainer is the model trainer
1. for a given process / database, and
2. for a given roman numeral.
"""
def __init__(self,p_id,rt_id,counts,options):
"""
Initialize the RomanTrainer
Args:
p_id: process id
rt_id: roman numeral id
counts: counts matrix
options: options passed into script
"""
self.p_id = p_id
self.rt_id = rt_id
self.counts = counts
self.triple = deque()
self.options = options
# assume the user has specified a major key
self.dest_key = (music21.key.Key(options.key).sharps,0)
def transposed_triple(self):
"""
Transpose a triple into the appropriate key
Returns:
int[]: the transposed triple
"""
res = []
notes = list(self.triple)
for note in notes:
src_key = (note.track.key_sig_top,note.track.key_sig_bottom)
res.append(key_transpose_pitch(note.pitch,src_key,self.dest_key))
return res
def train(self,note):
"""
Train this RomanTrained on a given note
Args:
note: the note to train on
"""
self.triple.append(note)
if len(self.triple) > 3:
# remove the old note
old_note = self.triple.popleft()
try:
# increment the matrix, where appropriate
np.add.at(self.counts, tuple(self.transposed_triple()), 1)
except InvalidKeySignature, e:
# remove the bad note, append the old note.
self.triple.pop()
self.triple.appendleft(old_note)
def write(self):
"""
Write the numpy counts matrix out to file.
"""
with open(os.path.join(self.options.outdir,str(self.p_id),str(self.rt_id) + ".npy"), 'w') as outfile:
np.save(outfile, self.counts)
class TrackTrainer(Process):
"""
Separate process to train ngram models, all music sourcing from one database
"""
def __init__(self,p_id,session,options):
"""
Initialize the TrackTrainer process
Args:
p_id: process id
session: the database session to load from
options (dict): options passed to script
"""
Process.__init__(self)
self.session = session
self.options = options
self.rts = []
matrix_size = (NUM_NOTES, NUM_NOTES, NUM_NOTES)
# construct the roman trainers
for i in xrange(7):
rt = RomanTrainer(p_id,i + 1,np.zeros(matrix_size, dtype=np.int16),options)
self.rts.append(rt)
def run(self):
"""
Start the process, training on each track separately
"""
# iterate through all the tracks
for trk in self.session.query(Track).all():
self.train(trk)
# write all the rts
for rt in self.rts:
rt.write()
def train(self,trk):
"""
Train the ngram model on a specific track
Args:
trk: the track on which to train
"""
print os.path.basename(trk.song.title), ":", trk.instr_name
# skip percurssion tracks
regexp = re.compile(r'drum|cymbal', re.IGNORECASE)
if trk.channel == 9 or regexp.search(trk.instr_name) is not None:
# print 'skipped percussion track'
return
# skip bass tracks
regexp = re.compile(r'bass', re.IGNORECASE)
if (trk.channel >= 32 and trk.channel <= 39) or regexp.search(trk.instr_name) is not None:
# print 'skipped bass track'
return
# and through all the notes in a track
for note in trk.notes:
if note.pitch < 0 or note.pitch >= NUM_NOTES:
pass
# train using the appropriate rt
if note.roman:
self.rts[note.roman-1].train(note)
def main():
parser = OptionParser()
parser.add_option("-o", "--outdir", dest="outdir")
parser.add_option("-t", "--poolsize", dest="pool_size", default=8, type="int")
parser.add_option("-k", "--key", dest="key", default="C")
parser.add_option("-u", "--username", dest="db_username", default="postgres")
parser.add_option("-p", "--password", dest="db_password", default="<PASSWORD>")
(options, args) = parser.parse_args()
# make the process output directory if not there already
for p_id in xrange(options.pool_size):
print options.outdir
pt = os.path.join(options.outdir,str(p_id) + "/")
print pt
if not os.path.exists(pt):
os.mkdir(pt)
sessions = get_sessions(options.pool_size,options.db_username,options.db_password)
processes = []
# construct and start the threads
for i in xrange(options.pool_size):
p = TrackTrainer(str(i),sessions[i],options)
processes.append(p)
p.start()
# wait for processes to complete
for p in processes:
p.join()
# construct cumulative counts matrices
matrix_size = (NUM_NOTES, NUM_NOTES, NUM_NOTES)
cumulative_counts = []
for i in xrange(7):
cumulative_counts.append(np.zeros(matrix_size, dtype=np.int16))
for p_id in xrange(options.pool_size):
for rt_id in xrange(7):
with open(os.path.join(options.outdir + "/",str(p_id) + "/",str(rt_id + 1) + ".npy")) as f:
counts = np.load(f)
cumulative_counts[rt_id] = np.add(cumulative_counts[rt_id],counts)
for i in xrange(7):
with open(os.path.join(options.outdir + "/",str(i+1) + ".npy"), "w") as f:
np.save(f,cumulative_counts[i])
if __name__ == '__main__':
main() | 0.486575 | 0.244262 |
from math import ceil
from copy import copy
import torch
import numpy as np
import skimage
from skimage import io
from skimage import color
from sklearn.decomposition import PCA
from sklearn.preprocessing import minmax_scale
class RunningAverage:
def __init__(self):
self.iter = 0
self.avg = 0.0
def update(self, x):
self.avg = (self.avg * self.iter + x) / (self.iter + 1)
self.iter += 1
def __str__(self):
if self.iter == 0:
return '-'
return f'{self.avg:.4f}'
def pca(ebd, n_points=256, n_components=3):
H, W, D = ebd.shape
randp = np.random.rand(n_points, 2) * np.float32([H, W])
randp = np.floor(randp).astype(np.int32)
randp = ebd[randp[:, 0], randp[:, 1], :]
pca = PCA(n_components=n_components).fit(randp)
ebd = pca.transform(ebd.reshape(H * W, D))
ebd = minmax_scale(ebd).reshape(H, W, -1)
return ebd
def make_grid(arrs, per_row=-1, padding=2, pad_value=0):
assert len(arrs) > 0
for arr in arrs:
assert arr.shape[:2] == arrs[0].shape[:2]
arrs = copy(arrs)
n_arr = len(arrs)
for i in range(n_arr):
if arrs[i].ndim == 2:
arrs[i] = color.gray2rgb(arrs[i])
for i in range(n_arr):
if arrs[i].dtype == np.dtype(np.uint8):
arrs[i] = skimage.img_as_float(arrs[i])
imgH, imgW, _ = arrs[0].shape
per_row = n_arr if per_row == -1 else per_row
per_col = ceil(n_arr / per_row)
gridW = per_row * imgW + (per_row - 1) * padding
gridH = per_col * imgH + (per_col - 1) * padding
grid = np.full((gridH, gridW, 3), pad_value, dtype=np.float64)
for i in range(n_arr):
c = (i % per_row) * (imgW + padding)
r = (i // per_row) * (imgH + padding)
grid[r:r+imgH, c:c+imgW] = arrs[i]
return grid
def save_grid(arrs, filename, *args, **kwargs):
grid = make_grid(arrs, *args, **kwargs)
grid = (grid * 255).clip(0, 255).astype(np.uint8)
io.imsave(filename, grid, quality=100)
def np2torch(data):
if data.ndim == 4:
return torch.from_numpy(data.transpose([0, 3, 1, 2]))
if data.ndim == 3:
return torch.from_numpy(data.transpose([2, 0, 1]))
assert False, 'Input should has 3 or 4 dimensions'
def torch2np(data):
data = data.numpy()
if data.ndim == 4:
return data.transpose([0, 2, 3, 1])
if data.ndim == 3:
return data.transpose([1, 2, 0])
assert False, 'Input should has 3 or 4 dimensions' | posetrack/util.py | from math import ceil
from copy import copy
import torch
import numpy as np
import skimage
from skimage import io
from skimage import color
from sklearn.decomposition import PCA
from sklearn.preprocessing import minmax_scale
class RunningAverage:
def __init__(self):
self.iter = 0
self.avg = 0.0
def update(self, x):
self.avg = (self.avg * self.iter + x) / (self.iter + 1)
self.iter += 1
def __str__(self):
if self.iter == 0:
return '-'
return f'{self.avg:.4f}'
def pca(ebd, n_points=256, n_components=3):
H, W, D = ebd.shape
randp = np.random.rand(n_points, 2) * np.float32([H, W])
randp = np.floor(randp).astype(np.int32)
randp = ebd[randp[:, 0], randp[:, 1], :]
pca = PCA(n_components=n_components).fit(randp)
ebd = pca.transform(ebd.reshape(H * W, D))
ebd = minmax_scale(ebd).reshape(H, W, -1)
return ebd
def make_grid(arrs, per_row=-1, padding=2, pad_value=0):
assert len(arrs) > 0
for arr in arrs:
assert arr.shape[:2] == arrs[0].shape[:2]
arrs = copy(arrs)
n_arr = len(arrs)
for i in range(n_arr):
if arrs[i].ndim == 2:
arrs[i] = color.gray2rgb(arrs[i])
for i in range(n_arr):
if arrs[i].dtype == np.dtype(np.uint8):
arrs[i] = skimage.img_as_float(arrs[i])
imgH, imgW, _ = arrs[0].shape
per_row = n_arr if per_row == -1 else per_row
per_col = ceil(n_arr / per_row)
gridW = per_row * imgW + (per_row - 1) * padding
gridH = per_col * imgH + (per_col - 1) * padding
grid = np.full((gridH, gridW, 3), pad_value, dtype=np.float64)
for i in range(n_arr):
c = (i % per_row) * (imgW + padding)
r = (i // per_row) * (imgH + padding)
grid[r:r+imgH, c:c+imgW] = arrs[i]
return grid
def save_grid(arrs, filename, *args, **kwargs):
grid = make_grid(arrs, *args, **kwargs)
grid = (grid * 255).clip(0, 255).astype(np.uint8)
io.imsave(filename, grid, quality=100)
def np2torch(data):
if data.ndim == 4:
return torch.from_numpy(data.transpose([0, 3, 1, 2]))
if data.ndim == 3:
return torch.from_numpy(data.transpose([2, 0, 1]))
assert False, 'Input should has 3 or 4 dimensions'
def torch2np(data):
data = data.numpy()
if data.ndim == 4:
return data.transpose([0, 2, 3, 1])
if data.ndim == 3:
return data.transpose([1, 2, 0])
assert False, 'Input should has 3 or 4 dimensions' | 0.820541 | 0.543711 |
model = {
u'yn ': 0,
u'dd ': 1,
u' yn': 2,
u' y ': 3,
u'ydd': 4,
u'eth': 5,
u'th ': 6,
u' i ': 7,
u'aet': 8,
u'd y': 9,
u'ch ': 10,
u'od ': 11,
u'ol ': 12,
u'edd': 13,
u' ga': 14,
u' gw': 15,
u"'r ": 16,
u'au ': 17,
u'ddi': 18,
u'ad ': 19,
u' cy': 20,
u' gy': 21,
u' ei': 22,
u' o ': 23,
u'iad': 24,
u'yr ': 25,
u'an ': 26,
u'bod': 27,
u'wed': 28,
u' bo': 29,
u' dd': 30,
u'el ': 31,
u'n y': 32,
u' am': 33,
u'di ': 34,
u'edi': 35,
u'on ': 36,
u' we': 37,
u' ym': 38,
u' ar': 39,
u' rh': 40,
u'odd': 41,
u' ca': 42,
u' ma': 43,
u'ael': 44,
u'oed': 45,
u'dae': 46,
u'n a': 47,
u'dda': 48,
u'er ': 49,
u'h y': 50,
u'all': 51,
u'ei ': 52,
u' ll': 53,
u'am ': 54,
u'eu ': 55,
u'fod': 56,
u'fyd': 57,
u'l y': 58,
u'n g': 59,
u'wyn': 60,
u'd a': 61,
u'i g': 62,
u'mae': 63,
u'neu': 64,
u'os ': 65,
u' ne': 66,
u'd i': 67,
u'dod': 68,
u'dol': 69,
u'n c': 70,
u'r h': 71,
u'wyd': 72,
u'wyr': 73,
u'ai ': 74,
u'ar ': 75,
u'in ': 76,
u'rth': 77,
u' fy': 78,
u' he': 79,
u' me': 80,
u' yr': 81,
u"'n ": 82,
u'dia': 83,
u'est': 84,
u'h c': 85,
u'hai': 86,
u'i d': 87,
u'id ': 88,
u'r y': 89,
u'y b': 90,
u' dy': 91,
u' ha': 92,
u'ada': 93,
u'i b': 94,
u'n i': 95,
u'ote': 96,
u'rot': 97,
u'tes': 98,
u'y g': 99,
u'yd ': 100,
u' ad': 101,
u' mr': 102,
u' un': 103,
u'cyn': 104,
u'dau': 105,
u'ddy': 106,
u'edo': 107,
u'i c': 108,
u'i w': 109,
u'ith': 110,
u'lae': 111,
u'lla': 112,
u'nd ': 113,
u'oda': 114,
u'ryd': 115,
u'tho': 116,
u' a ': 117,
u' dr': 118,
u'aid': 119,
u'ain': 120,
u'ddo': 121,
u'dyd': 122,
u'fyn': 123,
u'gyn': 124,
u'hol': 125,
u'io ': 126,
u'o a': 127,
u'wch': 128,
u'wyb': 129,
u'ybo': 130,
u'ych': 131,
u' br': 132,
u' by': 133,
u' di': 134,
u' fe': 135,
u' na': 136,
u" o'": 137,
u' pe': 138,
u'art': 139,
u'byd': 140,
u'dro': 141,
u'gal': 142,
u'l e': 143,
u'lai': 144,
u'mr ': 145,
u'n n': 146,
u'r a': 147,
u'rhy': 148,
u'wn ': 149,
u'ynn': 150,
u' on': 151,
u' r ': 152,
u'cae': 153,
u'd g': 154,
u'd o': 155,
u'd w': 156,
u'gan': 157,
u'gwy': 158,
u'n d': 159,
u'n f': 160,
u'n o': 161,
u'ned': 162,
u'ni ': 163,
u"o'r": 164,
u'r d': 165,
u'ud ': 166,
u'wei': 167,
u'wrt': 168,
u' an': 169,
u' cw': 170,
u' da': 171,
u' ni': 172,
u' pa': 173,
u' pr': 174,
u' wy': 175,
u'd e': 176,
u'dai': 177,
u'dim': 178,
u'eud': 179,
u'gwa': 180,
u'idd': 181,
u'im ': 182,
u'iri': 183,
u'lwy': 184,
u'n b': 185,
u'nol': 186,
u'r o': 187,
u'rwy': 188,
u' ch': 189,
u' er': 190,
u' fo': 191,
u' ge': 192,
u' hy': 193,
u" i'": 194,
u' ro': 195,
u' sa': 196,
u' tr': 197,
u'bob': 198,
u'cwy': 199,
u'cyf': 200,
u'dio': 201,
u'dyn': 202,
u'eit': 203,
u'hel': 204,
u'hyn': 205,
u'ich': 206,
u'll ': 207,
u'mdd': 208,
u'n r': 209,
u'ond': 210,
u'pro': 211,
u'r c': 212,
u'r g': 213,
u'red': 214,
u'rha': 215,
u'u a': 216,
u'u c': 217,
u'u y': 218,
u'y c': 219,
u'ymd': 220,
u'ymr': 221,
u'yw ': 222,
u' ac': 223,
u' be': 224,
u' bl': 225,
u' co': 226,
u' os': 227,
u'adw': 228,
u'ae ': 229,
u'af ': 230,
u'd p': 231,
u'efn': 232,
u'eic': 233,
u'en ': 234,
u'eol': 235,
u'es ': 236,
u'fer': 237,
u'gel': 238,
u'h g': 239,
u'hod': 240,
u'ied': 241,
u'ir ': 242,
u'laf': 243,
u'n h': 244,
u'na ': 245,
u'nyd': 246,
u'odo': 247,
u'ofy': 248,
u'rdd': 249,
u'rie': 250,
u'ros': 251,
u'stw': 252,
u'twy': 253,
u'yda': 254,
u'yng': 255,
u' at': 256,
u' de': 257,
u' go': 258,
u' id': 259,
u' oe': 260,
u' â ': 261,
u"'ch": 262,
u'ac ': 263,
u'ach': 264,
u"ae'": 265,
u'al ': 266,
u'bl ': 267,
u'd c': 268,
u'd l': 269,
u'dan': 270,
u'dde': 271,
u'ddw': 272,
u'dir': 273,
u'dla': 274,
u'ed ': 275,
u'ela': 276,
u'ell': 277,
u'ene': 278,
u'ewn': 279,
u'gyd': 280,
u'hau': 281,
u'hyw': 282,
u'i a': 283,
u'i f': 284,
u'iol': 285,
u'ion': 286,
u'l a': 287,
u'l i': 288,
u'lia': 289,
u'med': 290,
u'mon': 291,
u'n s': 292,
u'no ': 293,
u'obl': 294,
u'ola': 295,
u'ref': 296,
u'rn ': 297,
u'thi': 298,
u'un ': 299,
} | env/lib/python2.7/site-packages/guess_language/data/models/cy.py | model = {
u'yn ': 0,
u'dd ': 1,
u' yn': 2,
u' y ': 3,
u'ydd': 4,
u'eth': 5,
u'th ': 6,
u' i ': 7,
u'aet': 8,
u'd y': 9,
u'ch ': 10,
u'od ': 11,
u'ol ': 12,
u'edd': 13,
u' ga': 14,
u' gw': 15,
u"'r ": 16,
u'au ': 17,
u'ddi': 18,
u'ad ': 19,
u' cy': 20,
u' gy': 21,
u' ei': 22,
u' o ': 23,
u'iad': 24,
u'yr ': 25,
u'an ': 26,
u'bod': 27,
u'wed': 28,
u' bo': 29,
u' dd': 30,
u'el ': 31,
u'n y': 32,
u' am': 33,
u'di ': 34,
u'edi': 35,
u'on ': 36,
u' we': 37,
u' ym': 38,
u' ar': 39,
u' rh': 40,
u'odd': 41,
u' ca': 42,
u' ma': 43,
u'ael': 44,
u'oed': 45,
u'dae': 46,
u'n a': 47,
u'dda': 48,
u'er ': 49,
u'h y': 50,
u'all': 51,
u'ei ': 52,
u' ll': 53,
u'am ': 54,
u'eu ': 55,
u'fod': 56,
u'fyd': 57,
u'l y': 58,
u'n g': 59,
u'wyn': 60,
u'd a': 61,
u'i g': 62,
u'mae': 63,
u'neu': 64,
u'os ': 65,
u' ne': 66,
u'd i': 67,
u'dod': 68,
u'dol': 69,
u'n c': 70,
u'r h': 71,
u'wyd': 72,
u'wyr': 73,
u'ai ': 74,
u'ar ': 75,
u'in ': 76,
u'rth': 77,
u' fy': 78,
u' he': 79,
u' me': 80,
u' yr': 81,
u"'n ": 82,
u'dia': 83,
u'est': 84,
u'h c': 85,
u'hai': 86,
u'i d': 87,
u'id ': 88,
u'r y': 89,
u'y b': 90,
u' dy': 91,
u' ha': 92,
u'ada': 93,
u'i b': 94,
u'n i': 95,
u'ote': 96,
u'rot': 97,
u'tes': 98,
u'y g': 99,
u'yd ': 100,
u' ad': 101,
u' mr': 102,
u' un': 103,
u'cyn': 104,
u'dau': 105,
u'ddy': 106,
u'edo': 107,
u'i c': 108,
u'i w': 109,
u'ith': 110,
u'lae': 111,
u'lla': 112,
u'nd ': 113,
u'oda': 114,
u'ryd': 115,
u'tho': 116,
u' a ': 117,
u' dr': 118,
u'aid': 119,
u'ain': 120,
u'ddo': 121,
u'dyd': 122,
u'fyn': 123,
u'gyn': 124,
u'hol': 125,
u'io ': 126,
u'o a': 127,
u'wch': 128,
u'wyb': 129,
u'ybo': 130,
u'ych': 131,
u' br': 132,
u' by': 133,
u' di': 134,
u' fe': 135,
u' na': 136,
u" o'": 137,
u' pe': 138,
u'art': 139,
u'byd': 140,
u'dro': 141,
u'gal': 142,
u'l e': 143,
u'lai': 144,
u'mr ': 145,
u'n n': 146,
u'r a': 147,
u'rhy': 148,
u'wn ': 149,
u'ynn': 150,
u' on': 151,
u' r ': 152,
u'cae': 153,
u'd g': 154,
u'd o': 155,
u'd w': 156,
u'gan': 157,
u'gwy': 158,
u'n d': 159,
u'n f': 160,
u'n o': 161,
u'ned': 162,
u'ni ': 163,
u"o'r": 164,
u'r d': 165,
u'ud ': 166,
u'wei': 167,
u'wrt': 168,
u' an': 169,
u' cw': 170,
u' da': 171,
u' ni': 172,
u' pa': 173,
u' pr': 174,
u' wy': 175,
u'd e': 176,
u'dai': 177,
u'dim': 178,
u'eud': 179,
u'gwa': 180,
u'idd': 181,
u'im ': 182,
u'iri': 183,
u'lwy': 184,
u'n b': 185,
u'nol': 186,
u'r o': 187,
u'rwy': 188,
u' ch': 189,
u' er': 190,
u' fo': 191,
u' ge': 192,
u' hy': 193,
u" i'": 194,
u' ro': 195,
u' sa': 196,
u' tr': 197,
u'bob': 198,
u'cwy': 199,
u'cyf': 200,
u'dio': 201,
u'dyn': 202,
u'eit': 203,
u'hel': 204,
u'hyn': 205,
u'ich': 206,
u'll ': 207,
u'mdd': 208,
u'n r': 209,
u'ond': 210,
u'pro': 211,
u'r c': 212,
u'r g': 213,
u'red': 214,
u'rha': 215,
u'u a': 216,
u'u c': 217,
u'u y': 218,
u'y c': 219,
u'ymd': 220,
u'ymr': 221,
u'yw ': 222,
u' ac': 223,
u' be': 224,
u' bl': 225,
u' co': 226,
u' os': 227,
u'adw': 228,
u'ae ': 229,
u'af ': 230,
u'd p': 231,
u'efn': 232,
u'eic': 233,
u'en ': 234,
u'eol': 235,
u'es ': 236,
u'fer': 237,
u'gel': 238,
u'h g': 239,
u'hod': 240,
u'ied': 241,
u'ir ': 242,
u'laf': 243,
u'n h': 244,
u'na ': 245,
u'nyd': 246,
u'odo': 247,
u'ofy': 248,
u'rdd': 249,
u'rie': 250,
u'ros': 251,
u'stw': 252,
u'twy': 253,
u'yda': 254,
u'yng': 255,
u' at': 256,
u' de': 257,
u' go': 258,
u' id': 259,
u' oe': 260,
u' â ': 261,
u"'ch": 262,
u'ac ': 263,
u'ach': 264,
u"ae'": 265,
u'al ': 266,
u'bl ': 267,
u'd c': 268,
u'd l': 269,
u'dan': 270,
u'dde': 271,
u'ddw': 272,
u'dir': 273,
u'dla': 274,
u'ed ': 275,
u'ela': 276,
u'ell': 277,
u'ene': 278,
u'ewn': 279,
u'gyd': 280,
u'hau': 281,
u'hyw': 282,
u'i a': 283,
u'i f': 284,
u'iol': 285,
u'ion': 286,
u'l a': 287,
u'l i': 288,
u'lia': 289,
u'med': 290,
u'mon': 291,
u'n s': 292,
u'no ': 293,
u'obl': 294,
u'ola': 295,
u'ref': 296,
u'rn ': 297,
u'thi': 298,
u'un ': 299,
} | 0.36625 | 0.067117 |