code
stringlengths 22
1.05M
| apis
listlengths 1
3.31k
| extract_api
stringlengths 75
3.25M
|
|---|---|---|
import pygame
WHITE = (48, 48, 48)
displaywidth = 470
displayheight = 840
displayobj = None
clock = None
imgbackA = pygame.image.load('image/back.png')
imgbackB = imgbackA.copy()
def iotsetcaption(caption):
pygame.display.set_caption(caption)
def iotbackdraw(image, x, y):
global displayobj
displayobj.blit(image, (x, y))
def iotgo():
global displayobj
global clock
backAposy = 0
backBposy = -displayheight
while True:
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
key = pygame.key.get_pressed()
if key[pygame.K_SPACE]:
pygame.quit()
break
backAposy = backAposy + 2
backBposy = backBposy + 2
if displayheight <= backAposy:
backAposy = 0
backBposy = -displayheight
displayobj.fill(WHITE)
iotbackdraw(imgbackA, 0, backAposy)
iotbackdraw(imgbackB, 0, backBposy)
pygame.display.update()
clock.tick(60)
pygame.quit()
def base():
global displayobj
global clock
pygame.init()
iotsetcaption("IoT Game")
displayobj = pygame.display.set_mode((displaywidth, displayheight))
clock = pygame.time.Clock()
iotgo()
base()
|
[
"pygame.quit",
"pygame.event.get",
"pygame.display.set_mode",
"pygame.init",
"pygame.display.update",
"pygame.image.load",
"pygame.display.set_caption",
"pygame.time.Clock",
"pygame.key.get_pressed"
] |
[((142, 177), 'pygame.image.load', 'pygame.image.load', (['"""image/back.png"""'], {}), "('image/back.png')\n", (159, 177), False, 'import pygame\n'), ((243, 278), 'pygame.display.set_caption', 'pygame.display.set_caption', (['caption'], {}), '(caption)\n', (269, 278), False, 'import pygame\n'), ((1070, 1083), 'pygame.quit', 'pygame.quit', ([], {}), '()\n', (1081, 1083), False, 'import pygame\n'), ((1141, 1154), 'pygame.init', 'pygame.init', ([], {}), '()\n', (1152, 1154), False, 'import pygame\n'), ((1202, 1256), 'pygame.display.set_mode', 'pygame.display.set_mode', (['(displaywidth, displayheight)'], {}), '((displaywidth, displayheight))\n', (1225, 1256), False, 'import pygame\n'), ((1269, 1288), 'pygame.time.Clock', 'pygame.time.Clock', ([], {}), '()\n', (1286, 1288), False, 'import pygame\n'), ((508, 526), 'pygame.event.get', 'pygame.event.get', ([], {}), '()\n', (524, 526), False, 'import pygame\n'), ((614, 638), 'pygame.key.get_pressed', 'pygame.key.get_pressed', ([], {}), '()\n', (636, 638), False, 'import pygame\n'), ((1019, 1042), 'pygame.display.update', 'pygame.display.update', ([], {}), '()\n', (1040, 1042), False, 'import pygame\n'), ((683, 696), 'pygame.quit', 'pygame.quit', ([], {}), '()\n', (694, 696), False, 'import pygame\n'), ((586, 599), 'pygame.quit', 'pygame.quit', ([], {}), '()\n', (597, 599), False, 'import pygame\n')]
|
import inspect
from django.http.response import JsonResponse
from django.shortcuts import render, redirect
from django.contrib.auth import login, logout
from django.http import HttpResponseBadRequest
from Login.models import M_User, T_Attr
from utils.make_display_data import make_user_config_data
from utils.need_login import need_login
import secrets
# Create your views here.
class SecrectCode:
def __init__(self):
self.secret_code=""
secretCode = SecrectCode()
def index(request):
if request.method=="POST":
id = request.POST["id"]
password = request.POST["password"]
if M_User.objects.filter(username=id, password=password).exists():
user = M_User.objects.get(username=id, password=password)
login(request, user)
return redirect('/book/', permanent=True)
else:
return render(request, "index.html", {"msg" :"IDかPASSWORDが間違っています"})
else:
return render(request, "index.html")
def logout_user(request):
"""
ログアウトします
"""
msg={"msg":""}
try:
logout(request)
msg={}
msg["msg"] = "ログアウトしました。"
return render(request, "index.html", msg)
except:
msg = {"msg" :"ログアウトに失敗しました"}
return render(request, "index.html", msg)
def change_password(request):
"""
パスワード変更する
"""
msg={"msg":""}
if request.method == "POST":
if request.POST["secret_code"] != "" and request.POST["secret_code"] == secretCode.secret_code :
# password 変更処理
username = request.POST["username"]
u = M_User.objects.get(username= username)
u.password = request.POST["password"]
u.save()
msg["msg"] = "パスワードを変更しました。"
return render(request, "index.html", msg)
else:
msg["msg"] ="パスワード変更に失敗しました。"
return render(request, "change_password.html", msg)
else:
return render(request, "change_password.html")
def check_and_publish_code(request):
"""
Ajax でリクエストが来る想定。
パスワード変更時、id入力 -> idがあればパスワード入力フォーム表示-> パスワード変更 という流れ。
"""
if request.method=="POST":
id = request.POST["id"]
if M_User.objects.filter(username=id).exists():
secretCode.secret_code = secrets.token_hex(16)
return JsonResponse({"secret":secretCode.secret_code })
else:
return HttpResponseBadRequest(request)
else:
return redirect('/book/', permanent=True)
def signup(request):
"""
新規登録画面を返します
"""
if request.method=="POST":
id = request.POST["id"]
password = request.POST["password"]
name = request.POST["name"]
email = request.POST["email"]
user = M_User(username = id, password = password, email = email, name = name)
if M_User.objects.filter(username=id).exists():
msg="既に存在しているidです。"
res ={"msg":msg}
return render(request, "signup.html", res)
else:
user.save()
msg={}
msg["msg"] = "ユーザー登録しました。\nログインしてください。"
return render(request, "index.html", msg)
return render(request, "signup.html")
@need_login(redirect_field_name='index.html', err_msg="サインアップ、ログインが必要です")
def user_config(request):
"""
config.htmlを表示する。
"""
data = make_user_config_data(username=request.user)
return render(request, "config.html", data)
# この部分はajax にする予定
def register_attr(request):
"""
ajax 処理で使用する。お気に入りを追加して、画面に表示しなおす為に、データを返す
Returns:
[str]: 登録されたお気に入りの名称
"""
if request.method == "POST":
code = request.POST.get("code", None)
if code is None:
return HttpResponseBadRequest(request)
else :
string = request.POST.get("string", "")
if string != "" and not T_Attr.objects.filter(id=request.user, code=code, string=string).exists():
attr = T_Attr(id=request.user, code=code, string=string)
try:
attr.save()
data = {"string":string}
return JsonResponse(data)
except Exception as e:
return HttpResponseBadRequest(request)
return HttpResponseBadRequest(request)
def delete_attr(request):
"""
POSTでリクエストが飛んできた時のみ、T_Attr のデータを削除します
"""
username = request.user
if request.method == "POST":
code = int(request.POST.get("code",-1))
string = request.POST.get("string","")
try:
attr = T_Attr.objects.get(id=username, code=code, string=string)
attr.delete()
data = make_user_config_data(username)
except Exception as e:
print(f"{inspect.currentframe().f_back.f_code.co_filename},{inspect.currentframe().f_back.f_lineno},{e}")
data = make_user_config_data(username)
data["msg"] = "削除に失敗"
return render(request, "config.html", data)
return render(request, "config.html", data)
else:
data = make_user_config_data(username)
return render(request, "config.html", data)
|
[
"utils.make_display_data.make_user_config_data",
"Login.models.M_User",
"Login.models.T_Attr.objects.filter",
"Login.models.T_Attr.objects.get",
"Login.models.M_User.objects.filter",
"django.shortcuts.redirect",
"django.http.response.JsonResponse",
"django.http.HttpResponseBadRequest",
"Login.models.M_User.objects.get",
"secrets.token_hex",
"django.contrib.auth.logout",
"utils.need_login.need_login",
"Login.models.T_Attr",
"inspect.currentframe",
"django.shortcuts.render",
"django.contrib.auth.login"
] |
[((3320, 3392), 'utils.need_login.need_login', 'need_login', ([], {'redirect_field_name': '"""index.html"""', 'err_msg': '"""サインアップ、ログインが必要です"""'}), "(redirect_field_name='index.html', err_msg='サインアップ、ログインが必要です')\n", (3330, 3392), False, 'from utils.need_login import need_login\n'), ((3285, 3315), 'django.shortcuts.render', 'render', (['request', '"""signup.html"""'], {}), "(request, 'signup.html')\n", (3291, 3315), False, 'from django.shortcuts import render, redirect\n'), ((3473, 3517), 'utils.make_display_data.make_user_config_data', 'make_user_config_data', ([], {'username': 'request.user'}), '(username=request.user)\n', (3494, 3517), False, 'from utils.make_display_data import make_user_config_data\n'), ((3530, 3566), 'django.shortcuts.render', 'render', (['request', '"""config.html"""', 'data'], {}), "(request, 'config.html', data)\n", (3536, 3566), False, 'from django.shortcuts import render, redirect\n'), ((4411, 4442), 'django.http.HttpResponseBadRequest', 'HttpResponseBadRequest', (['request'], {}), '(request)\n', (4433, 4442), False, 'from django.http import HttpResponseBadRequest\n'), ((990, 1019), 'django.shortcuts.render', 'render', (['request', '"""index.html"""'], {}), "(request, 'index.html')\n", (996, 1019), False, 'from django.shortcuts import render, redirect\n'), ((1120, 1135), 'django.contrib.auth.logout', 'logout', (['request'], {}), '(request)\n', (1126, 1135), False, 'from django.contrib.auth import login, logout\n'), ((1203, 1237), 'django.shortcuts.render', 'render', (['request', '"""index.html"""', 'msg'], {}), "(request, 'index.html', msg)\n", (1209, 1237), False, 'from django.shortcuts import render, redirect\n'), ((2031, 2070), 'django.shortcuts.render', 'render', (['request', '"""change_password.html"""'], {}), "(request, 'change_password.html')\n", (2037, 2070), False, 'from django.shortcuts import render, redirect\n'), ((2560, 2594), 'django.shortcuts.redirect', 'redirect', (['"""/book/"""'], {'permanent': '(True)'}), "('/book/', permanent=True)\n", (2568, 2594), False, 'from django.shortcuts import render, redirect\n'), ((2856, 2918), 'Login.models.M_User', 'M_User', ([], {'username': 'id', 'password': 'password', 'email': 'email', 'name': 'name'}), '(username=id, password=password, email=email, name=name)\n', (2862, 2918), False, 'from Login.models import M_User, T_Attr\n'), ((5176, 5212), 'django.shortcuts.render', 'render', (['request', '"""config.html"""', 'data'], {}), "(request, 'config.html', data)\n", (5182, 5212), False, 'from django.shortcuts import render, redirect\n'), ((5240, 5271), 'utils.make_display_data.make_user_config_data', 'make_user_config_data', (['username'], {}), '(username)\n', (5261, 5271), False, 'from utils.make_display_data import make_user_config_data\n'), ((5288, 5324), 'django.shortcuts.render', 'render', (['request', '"""config.html"""', 'data'], {}), "(request, 'config.html', data)\n", (5294, 5324), False, 'from django.shortcuts import render, redirect\n'), ((726, 776), 'Login.models.M_User.objects.get', 'M_User.objects.get', ([], {'username': 'id', 'password': 'password'}), '(username=id, password=password)\n', (744, 776), False, 'from Login.models import M_User, T_Attr\n'), ((790, 810), 'django.contrib.auth.login', 'login', (['request', 'user'], {}), '(request, user)\n', (795, 810), False, 'from django.contrib.auth import login, logout\n'), ((831, 865), 'django.shortcuts.redirect', 'redirect', (['"""/book/"""'], {'permanent': '(True)'}), "('/book/', permanent=True)\n", (839, 865), False, 'from django.shortcuts import render, redirect\n'), ((901, 962), 'django.shortcuts.render', 'render', (['request', '"""index.html"""', "{'msg': 'IDかPASSWORDが間違っています'}"], {}), "(request, 'index.html', {'msg': 'IDかPASSWORDが間違っています'})\n", (907, 962), False, 'from django.shortcuts import render, redirect\n'), ((1306, 1340), 'django.shortcuts.render', 'render', (['request', '"""index.html"""', 'msg'], {}), "(request, 'index.html', msg)\n", (1312, 1340), False, 'from django.shortcuts import render, redirect\n'), ((1672, 1709), 'Login.models.M_User.objects.get', 'M_User.objects.get', ([], {'username': 'username'}), '(username=username)\n', (1690, 1709), False, 'from Login.models import M_User, T_Attr\n'), ((1846, 1880), 'django.shortcuts.render', 'render', (['request', '"""index.html"""', 'msg'], {}), "(request, 'index.html', msg)\n", (1852, 1880), False, 'from django.shortcuts import render, redirect\n'), ((1959, 2003), 'django.shortcuts.render', 'render', (['request', '"""change_password.html"""', 'msg'], {}), "(request, 'change_password.html', msg)\n", (1965, 2003), False, 'from django.shortcuts import render, redirect\n'), ((2375, 2396), 'secrets.token_hex', 'secrets.token_hex', (['(16)'], {}), '(16)\n', (2392, 2396), False, 'import secrets\n'), ((2417, 2465), 'django.http.response.JsonResponse', 'JsonResponse', (["{'secret': secretCode.secret_code}"], {}), "({'secret': secretCode.secret_code})\n", (2429, 2465), False, 'from django.http.response import JsonResponse\n'), ((2501, 2532), 'django.http.HttpResponseBadRequest', 'HttpResponseBadRequest', (['request'], {}), '(request)\n', (2523, 2532), False, 'from django.http import HttpResponseBadRequest\n'), ((3067, 3102), 'django.shortcuts.render', 'render', (['request', '"""signup.html"""', 'res'], {}), "(request, 'signup.html', res)\n", (3073, 3102), False, 'from django.shortcuts import render, redirect\n'), ((3236, 3270), 'django.shortcuts.render', 'render', (['request', '"""index.html"""', 'msg'], {}), "(request, 'index.html', msg)\n", (3242, 3270), False, 'from django.shortcuts import render, redirect\n'), ((3856, 3887), 'django.http.HttpResponseBadRequest', 'HttpResponseBadRequest', (['request'], {}), '(request)\n', (3878, 3887), False, 'from django.http import HttpResponseBadRequest\n'), ((4728, 4785), 'Login.models.T_Attr.objects.get', 'T_Attr.objects.get', ([], {'id': 'username', 'code': 'code', 'string': 'string'}), '(id=username, code=code, string=string)\n', (4746, 4785), False, 'from Login.models import M_User, T_Attr\n'), ((4833, 4864), 'utils.make_display_data.make_user_config_data', 'make_user_config_data', (['username'], {}), '(username)\n', (4854, 4864), False, 'from utils.make_display_data import make_user_config_data\n'), ((642, 695), 'Login.models.M_User.objects.filter', 'M_User.objects.filter', ([], {'username': 'id', 'password': 'password'}), '(username=id, password=password)\n', (663, 695), False, 'from Login.models import M_User, T_Attr\n'), ((2292, 2326), 'Login.models.M_User.objects.filter', 'M_User.objects.filter', ([], {'username': 'id'}), '(username=id)\n', (2313, 2326), False, 'from Login.models import M_User, T_Attr\n'), ((2939, 2973), 'Login.models.M_User.objects.filter', 'M_User.objects.filter', ([], {'username': 'id'}), '(username=id)\n', (2960, 2973), False, 'from Login.models import M_User, T_Attr\n'), ((4093, 4142), 'Login.models.T_Attr', 'T_Attr', ([], {'id': 'request.user', 'code': 'code', 'string': 'string'}), '(id=request.user, code=code, string=string)\n', (4099, 4142), False, 'from Login.models import M_User, T_Attr\n'), ((5036, 5067), 'utils.make_display_data.make_user_config_data', 'make_user_config_data', (['username'], {}), '(username)\n', (5057, 5067), False, 'from utils.make_display_data import make_user_config_data\n'), ((5123, 5159), 'django.shortcuts.render', 'render', (['request', '"""config.html"""', 'data'], {}), "(request, 'config.html', data)\n", (5129, 5159), False, 'from django.shortcuts import render, redirect\n'), ((4272, 4290), 'django.http.response.JsonResponse', 'JsonResponse', (['data'], {}), '(data)\n', (4284, 4290), False, 'from django.http.response import JsonResponse\n'), ((4359, 4390), 'django.http.HttpResponseBadRequest', 'HttpResponseBadRequest', (['request'], {}), '(request)\n', (4381, 4390), False, 'from django.http import HttpResponseBadRequest\n'), ((3994, 4058), 'Login.models.T_Attr.objects.filter', 'T_Attr.objects.filter', ([], {'id': 'request.user', 'code': 'code', 'string': 'string'}), '(id=request.user, code=code, string=string)\n', (4015, 4058), False, 'from Login.models import M_User, T_Attr\n'), ((4970, 4992), 'inspect.currentframe', 'inspect.currentframe', ([], {}), '()\n', (4990, 4992), False, 'import inspect\n'), ((4919, 4941), 'inspect.currentframe', 'inspect.currentframe', ([], {}), '()\n', (4939, 4941), False, 'import inspect\n')]
|
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
target = ["True", "False"]
el_decay = ["True", "False"]
error = np.array([[4.478, 3.483],
[3.647, 2.502]])
fig, ax = plt.subplots()
im = ax.imshow(error)
# We want to show all ticks...
ax.set_xticks(np.arange(len(el_decay)))
ax.set_yticks(np.arange(len(target)))
# ... and label them with the respective list entries
ax.set_xticklabels(el_decay)
ax.set_yticklabels(target)
ax.set_ylabel("Target electron")
ax.set_xlabel("Electron-Electron shift decay")
# Rotate the tick labels and set their alignment.
plt.setp(ax.get_xticklabels(), rotation=45, ha="right",
rotation_mode="anchor")
# Loop over data dimensions and create text annotations.
for i in range(len(target)):
for j in range(len(el_decay)):
text = ax.text(j, i, error[i, j],
ha="center", va="center", color="w")
ax.set_title("Shift: Energy error (mHa) for Nitrogen")
fig.tight_layout()
plt.show()
|
[
"numpy.array",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.show"
] |
[((135, 177), 'numpy.array', 'np.array', (['[[4.478, 3.483], [3.647, 2.502]]'], {}), '([[4.478, 3.483], [3.647, 2.502]])\n', (143, 177), True, 'import numpy as np\n'), ((210, 224), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (222, 224), True, 'import matplotlib.pyplot as plt\n'), ((986, 996), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (994, 996), True, 'import matplotlib.pyplot as plt\n')]
|
# Import from system libraries
from flask_mongoengine import MongoEngine
# MongoEngine load to db variable
db = MongoEngine()
# Function to initialize db to app
def initialize_db(app):
db.init_app(app)
|
[
"flask_mongoengine.MongoEngine"
] |
[((113, 126), 'flask_mongoengine.MongoEngine', 'MongoEngine', ([], {}), '()\n', (124, 126), False, 'from flask_mongoengine import MongoEngine\n')]
|
"""Provide variant calling with VarScan from TGI at Wash U.
http://varscan.sourceforge.net/
"""
import os
import sys
from bcbio import broad, utils
from bcbio.distributed.transaction import file_transaction, tx_tmpdir
from bcbio.pipeline import config_utils
from bcbio.provenance import do
from bcbio.variation import samtools, vcfutils
from bcbio.variation.vcfutils import (combine_variant_files, write_empty_vcf,
get_paired_bams, bgzip_and_index)
import pysam
def run_varscan(align_bams, items, ref_file, assoc_files,
region=None, out_file=None):
paired = get_paired_bams(align_bams, items)
if paired and paired.normal_bam and paired.tumor_bam:
call_file = samtools.shared_variantcall(_varscan_paired, "varscan",
align_bams, ref_file, items,
assoc_files, region, out_file)
else:
vcfutils.check_paired_problems(items)
call_file = samtools.shared_variantcall(_varscan_work, "varscan",
align_bams, ref_file,
items, assoc_files,
region, out_file)
return call_file
def _get_jvm_opts(config, tmp_dir):
"""Retrieve common options for running VarScan.
Handles jvm_opts, setting user and country to English to avoid issues
with different locales producing non-compliant VCF.
"""
resources = config_utils.get_resources("varscan", config)
jvm_opts = resources.get("jvm_opts", ["-Xmx750m", "-Xmx2g"])
jvm_opts = config_utils.adjust_opts(jvm_opts,
{"algorithm": {"memory_adjust":
{"magnitude": 1.1, "direction": "decrease"}}})
jvm_opts += ["-Duser.language=en", "-Duser.country=US"]
jvm_opts += broad.get_default_jvm_opts(tmp_dir)
return " ".join(jvm_opts)
def _varscan_options_from_config(config):
"""Retrieve additional options for VarScan from the configuration.
"""
opts = ["--min-coverage 5", "--p-value 0.98", "--strand-filter 1"]
resources = config_utils.get_resources("varscan", config)
if resources.get("options"):
opts += [str(x) for x in resources["options"]]
return opts
def spv_freq_filter(line, tumor_index):
"""Filter VarScan calls based on the SPV value and frequency.
Removes calls with SPV < 0.05 and a tumor FREQ > 0.35.
False positives dominate these higher frequency, low SPV calls. They appear
to be primarily non-somatic/germline variants not removed by other filters.
"""
if line.startswith("#CHROM"):
headers = [('##FILTER=<ID=SpvFreq,Description="High frequency (tumor FREQ > 0.35) '
'and low p-value for somatic (SPV < 0.05)">')]
return "\n".join(headers) + "\n" + line
elif line.startswith("#"):
return line
else:
parts = line.split("\t")
sample_ft = {a: v for (a, v) in zip(parts[8].split(":"), parts[9 + tumor_index].split(":"))}
freq = utils.safe_to_float(sample_ft.get("FREQ"))
spvs = [x for x in parts[7].split(";") if x.startswith("SPV=")]
spv = utils.safe_to_float(spvs[0].split("=")[-1] if spvs else None)
fname = None
if spv is not None and freq is not None:
if spv < 0.05 and freq > 0.35:
fname = "SpvFreq"
if fname:
if parts[6] in set([".", "PASS"]):
parts[6] = fname
else:
parts[6] += ";%s" % fname
line = "\t".join(parts)
return line
def _varscan_paired(align_bams, ref_file, items, target_regions, out_file):
"""Run a paired VarScan analysis, also known as "somatic". """
max_read_depth = "1000"
config = items[0]["config"]
paired = get_paired_bams(align_bams, items)
if not paired.normal_bam:
affected_batch = items[0]["metadata"]["batch"]
message = ("Batch {} requires both tumor and normal BAM files for"
" VarScan cancer calling").format(affected_batch)
raise ValueError(message)
if not utils.file_exists(out_file):
assert out_file.endswith(".vcf.gz"), "Expect bgzipped output to VarScan"
normal_mpileup_cl = samtools.prep_mpileup([paired.normal_bam], ref_file,
config, max_read_depth,
target_regions=target_regions,
want_bcf=False)
tumor_mpileup_cl = samtools.prep_mpileup([paired.tumor_bam], ref_file,
config, max_read_depth,
target_regions=target_regions,
want_bcf=False)
base, ext = utils.splitext_plus(out_file)
indel_file = base + "-indel.vcf"
snp_file = base + "-snp.vcf"
with file_transaction(config, indel_file, snp_file) as (tx_indel, tx_snp):
with tx_tmpdir(items[0]) as tmp_dir:
jvm_opts = _get_jvm_opts(config, tmp_dir)
opts = " ".join(_varscan_options_from_config(config))
remove_zerocoverage = r"{ ifne grep -v -P '\t0\t\t$' || true; }"
export = utils.local_path_export()
varscan_cmd = ("{export} varscan {jvm_opts} somatic "
"<({normal_mpileup_cl} | {remove_zerocoverage}) "
"<({tumor_mpileup_cl} | {remove_zerocoverage}) "
"--output-snp {tx_snp} --output-indel {tx_indel} "
"--output-vcf {opts} ")
# add minimum AF
min_af = float(utils.get_in(paired.tumor_config, ("algorithm",
"min_allele_fraction"), 10)) / 100.0
varscan_cmd += "--min-var-freq {min_af} "
do.run(varscan_cmd.format(**locals()), "Varscan", None, None)
to_combine = []
for fname in [snp_file, indel_file]:
if utils.file_exists(fname):
fix_file = "%s-fix.vcf.gz" % (utils.splitext_plus(fname)[0])
with file_transaction(config, fix_file) as tx_fix_file:
fix_ambig_ref = vcfutils.fix_ambiguous_cl()
fix_ambig_alt = vcfutils.fix_ambiguous_cl(5)
py_cl = os.path.join(os.path.dirname(sys.executable), "py")
normal_name = paired.normal_name
tumor_name = paired.tumor_name
cmd = ("cat {fname} | "
"{py_cl} -x 'bcbio.variation.varscan.fix_varscan_output(x,"
""" "{normal_name}", "{tumor_name}")' | """
"{fix_ambig_ref} | {fix_ambig_alt} | ifne vcfuniqalleles | "
"""{py_cl} -x 'bcbio.variation.vcfutils.add_contig_to_header(x, "{ref_file}")' | """
"""bcftools filter -m + -s REJECT -e "SS != '.' && SS != '2'" 2> /dev/null | """
"{py_cl} -x 'bcbio.variation.varscan.spv_freq_filter(x, 1)' | "
"bgzip -c > {tx_fix_file}")
do.run(cmd.format(**locals()), "Varscan paired fix")
to_combine.append(fix_file)
if not to_combine:
out_file = write_empty_vcf(out_file, config)
else:
out_file = combine_variant_files(to_combine,
out_file, ref_file, config,
region=target_regions)
if os.path.getsize(out_file) == 0:
write_empty_vcf(out_file)
if out_file.endswith(".gz"):
out_file = bgzip_and_index(out_file, config)
def fix_varscan_output(line, normal_name="", tumor_name=""):
"""Fix a varscan VCF line.
Fixes the ALT column and also fixes floating point values
output as strings to by Floats: FREQ, SSC.
This function was contributed by <NAME> <<EMAIL>>,
with minor modifications by <NAME> <<EMAIL>>.
"""
line = line.strip()
tofix = ("##INFO=<ID=SSC", "##FORMAT=<ID=FREQ")
if(line.startswith("##")):
if line.startswith(tofix):
line = line.replace('Number=1,Type=String',
'Number=1,Type=Float')
return line
line = line.split("\t")
if line[0].startswith("#CHROM"):
if tumor_name and normal_name:
mapping = {"NORMAL": normal_name, "TUMOR": tumor_name}
base_header = line[:9]
old_samples = line[9:]
if len(old_samples) == 0:
return "\t".join(line)
samples = [mapping[sample_name] for sample_name in old_samples]
assert len(old_samples) == len(samples)
return "\t".join(base_header + samples)
else:
return "\t".join(line)
try:
REF, ALT = line[3:5]
except ValueError:
return "\t".join(line)
def _normalize_freq(line, sample_i):
"""Ensure FREQ genotype value is float as defined in header.
"""
ft_parts = line[8].split(":")
dat = line[sample_i].split(":")
# Non-conforming no-call sample, don't try to fix FREQ
if len(dat) != len(ft_parts):
return line
freq_i = ft_parts.index("FREQ")
try:
dat[freq_i] = str(float(dat[freq_i].rstrip("%")) / 100)
except ValueError: # illegal binary characters -- set frequency to zero
dat[freq_i] = "0.0"
line[sample_i] = ":".join(dat)
return line
if len(line) > 9:
line = _normalize_freq(line, 9)
if len(line) > 10:
line = _normalize_freq(line, 10)
# HACK: The position of the SS= changes, so we just search for it
ss_vals = [item for item in line[7].split(";") if item.startswith("SS=")]
if len(ss_vals) > 0:
somatic_status = int(ss_vals[0].split("=")[1]) # Get the number
else:
somatic_status = None
if somatic_status == 5:
# "Unknown" states are broken in current versions of VarScan
# so we just bail out here for now
return
# fix FREQ for any additional samples -- multi-sample VarScan calling
if len(line) > 11:
for i in range(11, len(line)):
line = _normalize_freq(line, i)
#FIXME: VarScan also produces invalid REF records (e.g. CAA/A)
# This is not handled yet.
if "+" in ALT or "-" in ALT:
if "/" not in ALT:
if ALT[0] == "+":
R = REF
A = REF + ALT[1:]
elif ALT[0] == "-":
R = REF + ALT[1:]
A = REF
else:
Ins = [p[1:] for p in ALT.split("/") if p[0] == "+"]
Del = [p[1:] for p in ALT.split("/") if p[0] == "-"]
if len(Del):
REF += sorted(Del, key=lambda x: len(x))[-1]
A = ",".join([REF[::-1].replace(p[::-1], "", 1)[::-1]
for p in Del] + [REF + p for p in Ins])
R = REF
REF = R
ALT = A
else:
ALT = ALT.replace('/', ',')
line[3] = REF
line[4] = ALT
return "\t".join(line)
def _create_sample_list(in_bams, vcf_file):
"""Pull sample names from input BAMs and create input sample list.
"""
out_file = "%s-sample_list.txt" % os.path.splitext(vcf_file)[0]
with open(out_file, "w") as out_handle:
for in_bam in in_bams:
with pysam.Samfile(in_bam, "rb") as work_bam:
for rg in work_bam.header.get("RG", []):
out_handle.write("%s\n" % rg["SM"])
return out_file
def _varscan_work(align_bams, ref_file, items, target_regions, out_file):
"""Perform SNP and indel genotyping with VarScan.
"""
config = items[0]["config"]
orig_out_file = out_file
out_file = orig_out_file.replace(".vcf.gz", ".vcf")
max_read_depth = "1000"
sample_list = _create_sample_list(align_bams, out_file)
mpileup = samtools.prep_mpileup(align_bams, ref_file, config, max_read_depth,
target_regions=target_regions, want_bcf=False)
# VarScan fails to generate a header on files that start with
# zerocoverage calls; strip these with grep, we're not going to
# call on them
remove_zerocoverage = r"{ ifne grep -v -P '\t0\t\t$' || true; }"
# we use ifne from moreutils to ensure we process only on files with input, skipping otherwise
# http://manpages.ubuntu.com/manpages/natty/man1/ifne.1.html
with tx_tmpdir(items[0]) as tmp_dir:
jvm_opts = _get_jvm_opts(config, tmp_dir)
opts = " ".join(_varscan_options_from_config(config))
min_af = float(utils.get_in(config, ("algorithm", "min_allele_fraction"), 10)) / 100.0
fix_ambig_ref = vcfutils.fix_ambiguous_cl()
fix_ambig_alt = vcfutils.fix_ambiguous_cl(5)
py_cl = os.path.join(os.path.dirname(sys.executable), "py")
export = utils.local_path_export()
cmd = ("{export} {mpileup} | {remove_zerocoverage} | "
"ifne varscan {jvm_opts} mpileup2cns {opts} "
"--vcf-sample-list {sample_list} --min-var-freq {min_af} --output-vcf --variants | "
"""{py_cl} -x 'bcbio.variation.vcfutils.add_contig_to_header(x, "{ref_file}")' | """
"{py_cl} -x 'bcbio.variation.varscan.fix_varscan_output(x)' | "
"{fix_ambig_ref} | {fix_ambig_alt} | ifne vcfuniqalleles > {out_file}")
do.run(cmd.format(**locals()), "Varscan", None,
[do.file_exists(out_file)])
os.remove(sample_list)
# VarScan can create completely empty files in regions without
# variants, so we create a correctly formatted empty file
if os.path.getsize(out_file) == 0:
write_empty_vcf(out_file)
if orig_out_file.endswith(".gz"):
vcfutils.bgzip_and_index(out_file, config)
|
[
"os.remove",
"bcbio.variation.samtools.prep_mpileup",
"bcbio.variation.vcfutils.fix_ambiguous_cl",
"bcbio.pipeline.config_utils.adjust_opts",
"bcbio.utils.local_path_export",
"bcbio.distributed.transaction.tx_tmpdir",
"bcbio.variation.vcfutils.combine_variant_files",
"bcbio.distributed.transaction.file_transaction",
"bcbio.utils.file_exists",
"os.path.dirname",
"bcbio.variation.vcfutils.write_empty_vcf",
"bcbio.provenance.do.file_exists",
"os.path.getsize",
"bcbio.variation.vcfutils.check_paired_problems",
"bcbio.broad.get_default_jvm_opts",
"bcbio.variation.vcfutils.bgzip_and_index",
"bcbio.variation.vcfutils.get_paired_bams",
"bcbio.pipeline.config_utils.get_resources",
"pysam.Samfile",
"bcbio.utils.get_in",
"os.path.splitext",
"bcbio.utils.splitext_plus",
"bcbio.variation.samtools.shared_variantcall"
] |
[((622, 656), 'bcbio.variation.vcfutils.get_paired_bams', 'get_paired_bams', (['align_bams', 'items'], {}), '(align_bams, items)\n', (637, 656), False, 'from bcbio.variation.vcfutils import combine_variant_files, write_empty_vcf, get_paired_bams, bgzip_and_index\n'), ((1546, 1591), 'bcbio.pipeline.config_utils.get_resources', 'config_utils.get_resources', (['"""varscan"""', 'config'], {}), "('varscan', config)\n", (1572, 1591), False, 'from bcbio.pipeline import config_utils\n'), ((1672, 1790), 'bcbio.pipeline.config_utils.adjust_opts', 'config_utils.adjust_opts', (['jvm_opts', "{'algorithm': {'memory_adjust': {'magnitude': 1.1, 'direction': 'decrease'}}}"], {}), "(jvm_opts, {'algorithm': {'memory_adjust': {\n 'magnitude': 1.1, 'direction': 'decrease'}}})\n", (1696, 1790), False, 'from bcbio.pipeline import config_utils\n'), ((1957, 1992), 'bcbio.broad.get_default_jvm_opts', 'broad.get_default_jvm_opts', (['tmp_dir'], {}), '(tmp_dir)\n', (1983, 1992), False, 'from bcbio import broad, utils\n'), ((2233, 2278), 'bcbio.pipeline.config_utils.get_resources', 'config_utils.get_resources', (['"""varscan"""', 'config'], {}), "('varscan', config)\n", (2259, 2278), False, 'from bcbio.pipeline import config_utils\n'), ((3938, 3972), 'bcbio.variation.vcfutils.get_paired_bams', 'get_paired_bams', (['align_bams', 'items'], {}), '(align_bams, items)\n', (3953, 3972), False, 'from bcbio.variation.vcfutils import combine_variant_files, write_empty_vcf, get_paired_bams, bgzip_and_index\n'), ((12420, 12538), 'bcbio.variation.samtools.prep_mpileup', 'samtools.prep_mpileup', (['align_bams', 'ref_file', 'config', 'max_read_depth'], {'target_regions': 'target_regions', 'want_bcf': '(False)'}), '(align_bams, ref_file, config, max_read_depth,\n target_regions=target_regions, want_bcf=False)\n', (12441, 12538), False, 'from bcbio.variation import samtools, vcfutils\n'), ((14015, 14037), 'os.remove', 'os.remove', (['sample_list'], {}), '(sample_list)\n', (14024, 14037), False, 'import os\n'), ((735, 854), 'bcbio.variation.samtools.shared_variantcall', 'samtools.shared_variantcall', (['_varscan_paired', '"""varscan"""', 'align_bams', 'ref_file', 'items', 'assoc_files', 'region', 'out_file'], {}), "(_varscan_paired, 'varscan', align_bams,\n ref_file, items, assoc_files, region, out_file)\n", (762, 854), False, 'from bcbio.variation import samtools, vcfutils\n'), ((965, 1002), 'bcbio.variation.vcfutils.check_paired_problems', 'vcfutils.check_paired_problems', (['items'], {}), '(items)\n', (995, 1002), False, 'from bcbio.variation import samtools, vcfutils\n'), ((1023, 1140), 'bcbio.variation.samtools.shared_variantcall', 'samtools.shared_variantcall', (['_varscan_work', '"""varscan"""', 'align_bams', 'ref_file', 'items', 'assoc_files', 'region', 'out_file'], {}), "(_varscan_work, 'varscan', align_bams, ref_file,\n items, assoc_files, region, out_file)\n", (1050, 1140), False, 'from bcbio.variation import samtools, vcfutils\n'), ((4248, 4275), 'bcbio.utils.file_exists', 'utils.file_exists', (['out_file'], {}), '(out_file)\n', (4265, 4275), False, 'from bcbio import broad, utils\n'), ((4386, 4513), 'bcbio.variation.samtools.prep_mpileup', 'samtools.prep_mpileup', (['[paired.normal_bam]', 'ref_file', 'config', 'max_read_depth'], {'target_regions': 'target_regions', 'want_bcf': '(False)'}), '([paired.normal_bam], ref_file, config, max_read_depth,\n target_regions=target_regions, want_bcf=False)\n', (4407, 4513), False, 'from bcbio.variation import samtools, vcfutils\n'), ((4687, 4813), 'bcbio.variation.samtools.prep_mpileup', 'samtools.prep_mpileup', (['[paired.tumor_bam]', 'ref_file', 'config', 'max_read_depth'], {'target_regions': 'target_regions', 'want_bcf': '(False)'}), '([paired.tumor_bam], ref_file, config, max_read_depth,\n target_regions=target_regions, want_bcf=False)\n', (4708, 4813), False, 'from bcbio.variation import samtools, vcfutils\n'), ((4977, 5006), 'bcbio.utils.splitext_plus', 'utils.splitext_plus', (['out_file'], {}), '(out_file)\n', (4996, 5006), False, 'from bcbio import broad, utils\n'), ((12966, 12985), 'bcbio.distributed.transaction.tx_tmpdir', 'tx_tmpdir', (['items[0]'], {}), '(items[0])\n', (12975, 12985), False, 'from bcbio.distributed.transaction import file_transaction, tx_tmpdir\n'), ((13229, 13256), 'bcbio.variation.vcfutils.fix_ambiguous_cl', 'vcfutils.fix_ambiguous_cl', ([], {}), '()\n', (13254, 13256), False, 'from bcbio.variation import samtools, vcfutils\n'), ((13281, 13309), 'bcbio.variation.vcfutils.fix_ambiguous_cl', 'vcfutils.fix_ambiguous_cl', (['(5)'], {}), '(5)\n', (13306, 13309), False, 'from bcbio.variation import samtools, vcfutils\n'), ((13395, 13420), 'bcbio.utils.local_path_export', 'utils.local_path_export', ([], {}), '()\n', (13418, 13420), False, 'from bcbio import broad, utils\n'), ((14174, 14199), 'os.path.getsize', 'os.path.getsize', (['out_file'], {}), '(out_file)\n', (14189, 14199), False, 'import os\n'), ((14214, 14239), 'bcbio.variation.vcfutils.write_empty_vcf', 'write_empty_vcf', (['out_file'], {}), '(out_file)\n', (14229, 14239), False, 'from bcbio.variation.vcfutils import combine_variant_files, write_empty_vcf, get_paired_bams, bgzip_and_index\n'), ((14287, 14329), 'bcbio.variation.vcfutils.bgzip_and_index', 'vcfutils.bgzip_and_index', (['out_file', 'config'], {}), '(out_file, config)\n', (14311, 14329), False, 'from bcbio.variation import samtools, vcfutils\n'), ((5098, 5144), 'bcbio.distributed.transaction.file_transaction', 'file_transaction', (['config', 'indel_file', 'snp_file'], {}), '(config, indel_file, snp_file)\n', (5114, 5144), False, 'from bcbio.distributed.transaction import file_transaction, tx_tmpdir\n'), ((6281, 6305), 'bcbio.utils.file_exists', 'utils.file_exists', (['fname'], {}), '(fname)\n', (6298, 6305), False, 'from bcbio import broad, utils\n'), ((7594, 7627), 'bcbio.variation.vcfutils.write_empty_vcf', 'write_empty_vcf', (['out_file', 'config'], {}), '(out_file, config)\n', (7609, 7627), False, 'from bcbio.variation.vcfutils import combine_variant_files, write_empty_vcf, get_paired_bams, bgzip_and_index\n'), ((7665, 7754), 'bcbio.variation.vcfutils.combine_variant_files', 'combine_variant_files', (['to_combine', 'out_file', 'ref_file', 'config'], {'region': 'target_regions'}), '(to_combine, out_file, ref_file, config, region=\n target_regions)\n', (7686, 7754), False, 'from bcbio.variation.vcfutils import combine_variant_files, write_empty_vcf, get_paired_bams, bgzip_and_index\n'), ((7851, 7876), 'os.path.getsize', 'os.path.getsize', (['out_file'], {}), '(out_file)\n', (7866, 7876), False, 'import os\n'), ((7895, 7920), 'bcbio.variation.vcfutils.write_empty_vcf', 'write_empty_vcf', (['out_file'], {}), '(out_file)\n', (7910, 7920), False, 'from bcbio.variation.vcfutils import combine_variant_files, write_empty_vcf, get_paired_bams, bgzip_and_index\n'), ((7981, 8014), 'bcbio.variation.vcfutils.bgzip_and_index', 'bgzip_and_index', (['out_file', 'config'], {}), '(out_file, config)\n', (7996, 8014), False, 'from bcbio.variation.vcfutils import combine_variant_files, write_empty_vcf, get_paired_bams, bgzip_and_index\n'), ((11765, 11791), 'os.path.splitext', 'os.path.splitext', (['vcf_file'], {}), '(vcf_file)\n', (11781, 11791), False, 'import os\n'), ((13339, 13370), 'os.path.dirname', 'os.path.dirname', (['sys.executable'], {}), '(sys.executable)\n', (13354, 13370), False, 'import os\n'), ((5185, 5204), 'bcbio.distributed.transaction.tx_tmpdir', 'tx_tmpdir', (['items[0]'], {}), '(items[0])\n', (5194, 5204), False, 'from bcbio.distributed.transaction import file_transaction, tx_tmpdir\n'), ((5451, 5476), 'bcbio.utils.local_path_export', 'utils.local_path_export', ([], {}), '()\n', (5474, 5476), False, 'from bcbio import broad, utils\n'), ((11887, 11914), 'pysam.Samfile', 'pysam.Samfile', (['in_bam', '"""rb"""'], {}), "(in_bam, 'rb')\n", (11900, 11914), False, 'import pysam\n'), ((13133, 13195), 'bcbio.utils.get_in', 'utils.get_in', (['config', "('algorithm', 'min_allele_fraction')", '(10)'], {}), "(config, ('algorithm', 'min_allele_fraction'), 10)\n", (13145, 13195), False, 'from bcbio import broad, utils\n'), ((13984, 14008), 'bcbio.provenance.do.file_exists', 'do.file_exists', (['out_file'], {}), '(out_file)\n', (13998, 14008), False, 'from bcbio.provenance import do\n'), ((6405, 6439), 'bcbio.distributed.transaction.file_transaction', 'file_transaction', (['config', 'fix_file'], {}), '(config, fix_file)\n', (6421, 6439), False, 'from bcbio.distributed.transaction import file_transaction, tx_tmpdir\n'), ((6492, 6519), 'bcbio.variation.vcfutils.fix_ambiguous_cl', 'vcfutils.fix_ambiguous_cl', ([], {}), '()\n', (6517, 6519), False, 'from bcbio.variation import samtools, vcfutils\n'), ((6556, 6584), 'bcbio.variation.vcfutils.fix_ambiguous_cl', 'vcfutils.fix_ambiguous_cl', (['(5)'], {}), '(5)\n', (6581, 6584), False, 'from bcbio.variation import samtools, vcfutils\n'), ((5909, 5984), 'bcbio.utils.get_in', 'utils.get_in', (['paired.tumor_config', "('algorithm', 'min_allele_fraction')", '(10)'], {}), "(paired.tumor_config, ('algorithm', 'min_allele_fraction'), 10)\n", (5921, 5984), False, 'from bcbio import broad, utils\n'), ((6353, 6379), 'bcbio.utils.splitext_plus', 'utils.splitext_plus', (['fname'], {}), '(fname)\n', (6372, 6379), False, 'from bcbio import broad, utils\n'), ((6626, 6657), 'os.path.dirname', 'os.path.dirname', (['sys.executable'], {}), '(sys.executable)\n', (6641, 6657), False, 'import os\n')]
|
from django.db import models
class Deletable(models.Model):
deleted = models.BooleanField(default=False)
def delete(self, *args, **kwargs):
self.deleted = True
return self.save()
class Meta:
abstract = True
|
[
"django.db.models.BooleanField"
] |
[((76, 110), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (95, 110), False, 'from django.db import models\n')]
|
bl_info = {
"name": "RTE Debug",
"author": "<NAME>",
"blender": (2, 75, 0),
"location": "Info header, render engine menu",
"description": "Debug implementation of the Realtime Engine Framework",
"warning": "",
"wiki_url": "",
"tracker_url": "",
"support": 'TESTING',
"category": "Render"}
if "bpy" in locals():
import imp
imp.reload(addon)
else:
import bpy
from .addon import DebugEngine
def register():
panels = [getattr(bpy.types, t) for t in dir(bpy.types) if 'PT' in t]
for panel in panels:
if hasattr(panel, 'COMPAT_ENGINES') and 'BLENDER_GAME' in panel.COMPAT_ENGINES:
panel.COMPAT_ENGINES.add('RTE_DEBUG')
bpy.utils.register_module(__name__)
def unregister():
panels = [getattr(bpy.types, t) for t in dir(bpy.types) if 'PT' in t]
for panel in panels:
if hasattr(panel, 'COMPAT_ENGINES') and 'RTE_FRAMEWORK' in panel.COMPAT_ENGINES:
panel.COMPAT_ENGINES.remove('RTE_DEBUG')
bpy.utils.unregister_module(__name__)
|
[
"imp.reload",
"bpy.utils.register_module",
"bpy.utils.unregister_module"
] |
[((371, 388), 'imp.reload', 'imp.reload', (['addon'], {}), '(addon)\n', (381, 388), False, 'import imp\n'), ((704, 739), 'bpy.utils.register_module', 'bpy.utils.register_module', (['__name__'], {}), '(__name__)\n', (729, 739), False, 'import bpy\n'), ((1005, 1042), 'bpy.utils.unregister_module', 'bpy.utils.unregister_module', (['__name__'], {}), '(__name__)\n', (1032, 1042), False, 'import bpy\n')]
|
import math
def segment_builder(arg, tail):
req_list = arg[0]
divisions = arg[1]
division_dec = divisions / 100
# number of segments to be created in the element passed
partition_count = int(math.ceil(100 / divisions))
if len(arg) == 3:
segments = arg[2]
else:
g = int(100 / divisions + 1)
segments = tuple([n for n in range(1, g)])
def builder(segments):
"""
Calculate the remainder of numbers that won't be evenly split
into groups, and calculates at which point the groups need to start
catering for the extra numbers.
Extra numbers are divided up into the last groups of the list given.
For example: if we have a list of [1, 2, 3, 4, 5] and we ask to split
the group in thirds, the ouput would be [1][2, 3][4, 5]
"""
remainder = int(len(req_list) % partition_count)
"""
Removes the remainder from the length of the passed in list, this way
we can continue with an even division. math.floor is used for precaution only.
"""
partition_size = math.floor(division_dec * (len(req_list) - remainder))
segment_storage = []
if remainder > 0:
"""
Calculates the number of extras from the length of the list passed.
Group extras: tells us when to start adding the extras to generated segments
"""
group_extras = partition_count - remainder
pos = 0 # keep track of position
if tail == False:
cycle = reversed(range(0, partition_count))
else:
cycle = range(0, partition_count)
for group in cycle:
if group < group_extras:
segment_storage.append([pos, pos + partition_size])
pos += partition_size
else:
segment_storage.append([pos, pos + partition_size + 1])
pos += partition_size + 1
else:
if tail == False:
cycle = reversed(range(0, len(req_list), partition_size))
else:
cycle = range(0, len(req_list), partition_size)
for ind in cycle:
segment_storage.append([ind, ind+partition_size])
return req_list[segment_storage[segments-1][0]:segment_storage[segments-1][1]]
if isinstance(segments, int) and segments * division_dec <= 1:
return builder(segments)
elif isinstance(segments, tuple):
multi_segment = []
for segment in segments:
multi_segment.append(builder(segment))
return multi_segment
|
[
"math.ceil"
] |
[((212, 238), 'math.ceil', 'math.ceil', (['(100 / divisions)'], {}), '(100 / divisions)\n', (221, 238), False, 'import math\n')]
|
import io
from fastapi import FastAPI
from fastapi.responses import StreamingResponse
from fastapi.staticfiles import StaticFiles
app = FastAPI()
# create a 'static files' directory
# create a '/static' prefix for all files
# serve files from the 'media/' directory under the '/static/' route
# /Big_Buck_Bunny_1080p.avi becomes '/static/Big_Buck_Bunny_1080p.avi'
# name='static' is used internally by FastAPI
app.mount("/static", StaticFiles(directory="media"), name="static")
@app.get("/")
async def main():
# open the movie file to stream it
movie = open("media/Big_Buck_Bunny_1080p.avi", "rb")
# return a stream response with the movie and a MIME type of 'video/avi'
return StreamingResponse(movie, media_type="video/avi")
|
[
"fastapi.responses.StreamingResponse",
"fastapi.staticfiles.StaticFiles",
"fastapi.FastAPI"
] |
[((138, 147), 'fastapi.FastAPI', 'FastAPI', ([], {}), '()\n', (145, 147), False, 'from fastapi import FastAPI\n'), ((436, 466), 'fastapi.staticfiles.StaticFiles', 'StaticFiles', ([], {'directory': '"""media"""'}), "(directory='media')\n", (447, 466), False, 'from fastapi.staticfiles import StaticFiles\n'), ((701, 749), 'fastapi.responses.StreamingResponse', 'StreamingResponse', (['movie'], {'media_type': '"""video/avi"""'}), "(movie, media_type='video/avi')\n", (718, 749), False, 'from fastapi.responses import StreamingResponse\n')]
|
import numpy as np
import copy
class Particle:
def __init__(self, lb, ub):
"""Initialize the particle.
Attributes
----------
lb : float
lower bounds for initial values
ub : float
upper bounds for initial values
"""
self.lb = lb
self.ub = ub
self.position = np.random.uniform(lb, ub, size=lb.shape[0])
self.velocity = np.random.uniform(lb, ub, size=lb.shape[0])
self.fitness = None
self.pbest_position = self.position
self.pbest_fitness = float('inf')
def move(self):
self.position += self.velocity
class Swarm:
def __init__(self, function_list, n_particles, n_iterations,
lb, ub, w=0.7, c1=2.0, c2=2.0):
"""Initialize the swarm.
Attributes
---------
function_list : list
list of functions to optimize
n_particles : int
number of particles in swarm
n_iterations : int
number of optimization iterations
lb : float
lower bounds for initial values
ub : float
upper bounds for initial values
w : float
inertia weight
c1 : float
cognitive weight
c2 : float
social weight
"""
self.function_list = function_list
self.n_obj = len(function_list)
self.n_particles = n_particles
self.n_iterations = n_iterations
assert len(lb) == len(ub)
self.lb = np.array(lb)
self.ub = np.array(ub)
self.w = w
self.c1 = c1
self.c2 = c2
self.gbest_position = np.random.uniform(lb, ub, size=self.lb.shape[0])
self.gbest_fitness = float('inf')
self.population = []
self.iteration = 0
def reset_environment(self):
self.population = []
self.iteration = 0
def termination_check(self):
if self.iteration > self.n_iterations:
return False
else:
return True
def initialise_swarm(self):
for _ in range(self.n_particles):
self.population.append(Particle(self.lb, self.ub))
def eval_fitness(self, particle):
"""Evaluate particle fitness based on all functions in function_list"""
_fitness = 0
for func in self.function_list:
_fitness += func(particle.position)
particle.fitness = _fitness
def swarm_eval_fitness(self):
for particle in self.population:
self.eval_fitness(particle)
def update_velocity(self, particle):
inertia = self.w * particle.velocity
cognitive = (self.c1 * np.random.uniform()
* (particle.pbest_position - particle.position))
social = (self.c2 * np.random.uniform()
* (self.gbest_position - particle.position))
particle.velocity = inertia + cognitive + social
def swarm_update_velocity(self):
for particle in self.population:
self.update_velocity(particle)
def update_pbest(self, particle):
if particle.fitness < particle.pbest_fitness:
particle.pbest_fitness = particle.fitness
particle.pbest_position = particle.position
def update_gbest(self, particle):
if particle.fitness < self.gbest_fitness:
self.gbest_fitness = copy.deepcopy(particle.fitness)
self.gbest_position = copy.deepcopy(particle.position)
def swarm_update_best(self):
for particle in self.population:
self.update_pbest(particle)
self.update_gbest(particle)
def swarm_move(self):
for particle in self.population:
particle.move()
def optimise(self):
self.reset_environment()
self.initialise_swarm()
while self.termination_check():
self.swarm_eval_fitness()
self.swarm_update_best()
self.swarm_update_velocity()
self.swarm_move()
self.iteration += 1
if __name__ == '__main__':
print('MOPSO: Aggregating Approach')
def function_one(position):
return np.square(position[0])
def function_two(position):
return np.square(position[0] - 2)
function_list = [function_one, function_two]
n_particles = 30
n_iterations = 100
lb = [-100]
ub = [100]
swarm = Swarm(function_list=function_list,
n_particles=n_particles,
n_iterations=n_iterations,
lb=lb,
ub=ub)
swarm.optimise()
print('gbest_position: ', swarm.gbest_position)
print('gbest_fitness: ', swarm.gbest_fitness)
|
[
"numpy.random.uniform",
"copy.deepcopy",
"numpy.square",
"numpy.array"
] |
[((362, 405), 'numpy.random.uniform', 'np.random.uniform', (['lb', 'ub'], {'size': 'lb.shape[0]'}), '(lb, ub, size=lb.shape[0])\n', (379, 405), True, 'import numpy as np\n'), ((430, 473), 'numpy.random.uniform', 'np.random.uniform', (['lb', 'ub'], {'size': 'lb.shape[0]'}), '(lb, ub, size=lb.shape[0])\n', (447, 473), True, 'import numpy as np\n'), ((1556, 1568), 'numpy.array', 'np.array', (['lb'], {}), '(lb)\n', (1564, 1568), True, 'import numpy as np\n'), ((1587, 1599), 'numpy.array', 'np.array', (['ub'], {}), '(ub)\n', (1595, 1599), True, 'import numpy as np\n'), ((1693, 1741), 'numpy.random.uniform', 'np.random.uniform', (['lb', 'ub'], {'size': 'self.lb.shape[0]'}), '(lb, ub, size=self.lb.shape[0])\n', (1710, 1741), True, 'import numpy as np\n'), ((4201, 4223), 'numpy.square', 'np.square', (['position[0]'], {}), '(position[0])\n', (4210, 4223), True, 'import numpy as np\n'), ((4272, 4298), 'numpy.square', 'np.square', (['(position[0] - 2)'], {}), '(position[0] - 2)\n', (4281, 4298), True, 'import numpy as np\n'), ((3421, 3452), 'copy.deepcopy', 'copy.deepcopy', (['particle.fitness'], {}), '(particle.fitness)\n', (3434, 3452), False, 'import copy\n'), ((3487, 3519), 'copy.deepcopy', 'copy.deepcopy', (['particle.position'], {}), '(particle.position)\n', (3500, 3519), False, 'import copy\n'), ((2715, 2734), 'numpy.random.uniform', 'np.random.uniform', ([], {}), '()\n', (2732, 2734), True, 'import numpy as np\n'), ((2833, 2852), 'numpy.random.uniform', 'np.random.uniform', ([], {}), '()\n', (2850, 2852), True, 'import numpy as np\n')]
|
import pygame
from pygame.locals import *
from constants import *
from copy import deepcopy
import numpy as np
from heuristic import *
class Player(object):
def __init__(self, color, player_num):
self.color = color
self.direction = UP
self.player_num = player_num
self.move_counter = 0 # Keeps track of movement to regulate growth rate
loc = P1_LOC if player_num == 1 else P2_LOC
self.segments = [Rect(loc[0], loc[1], CELL_WIDTH, CELL_WIDTH)]
def direction_valid(self,direction):
if (direction == UP and self.direction == DOWN):
return False
if (direction == LEFT and self.direction == RIGHT):
return False
if (direction == DOWN and self.direction == UP):
return False
if (direction == RIGHT and self.direction == LEFT):
return False
return True
def set_direction(self, direction):
if self.direction_valid(direction):
self.direction = direction
def set_color(self, color):
self.color = color
def clone(self, player=None, direction=None):
if player == None:
player = self
cloned_player = deepcopy(player)
if direction != None:
cloned_player.direction = direction
cloned_player.move()
return cloned_player
def get_state(self, other_player):
state = np.zeros((GAME_HEIGHT/CELL_WIDTH, GAME_WIDTH/CELL_WIDTH))
for rect in self.segments:
loc = rect.topleft
x,y = loc[0]/CELL_WIDTH, loc[1]/CELL_WIDTH
state[y,x] = FRIENDLY
for rect in other_player.segments:
loc = rect.topleft
x,y = loc[0]/CELL_WIDTH, loc[1]/CELL_WIDTH
state[y,x] = OPPONENT
return state
def has_collided(self, other_player, head = None):
segments_to_check = self.segments[:]
if head == None:
head = self.segments[0]
segments_to_check.pop(0)
head_loc = head.topleft
return (not (0 <= head_loc[0] <= GAME_WIDTH - CELL_WIDTH) or
not (0 <= head_loc[1] <= GAME_HEIGHT - CELL_WIDTH) or
head.collidelist(segments_to_check) != -1 or
head.collidelist(other_player.segments) != -1)
def draw(self, display_surface):
for segment in self.segments:
pygame.draw.rect(display_surface, self.color, segment)
def move(self):
head_loc = self.segments[0].topleft
delta = DIRECTION_DELTAS[self.direction]
new_x = head_loc[0] + delta['x'] * CELL_WIDTH
new_y = head_loc[1] + delta['y'] * CELL_WIDTH
head = Rect(new_x, new_y, CELL_WIDTH, CELL_WIDTH)
self.segments.insert(0, head)
self.move_counter = (self.move_counter + 1) % PLAYER_GROWTH_RATE
if self.move_counter == 0:
self.segments.pop() # Remove last segment of tail
""" Chooses the next move to make in the game.
Subclasses of Player (aka custom bots) should override this method.
other_player is a dict object with the following key/values:
direction: The other player's current direction (i.e. UP)
segments: Copy of list of segments of the other player
"""
def choose_move(self, other_player):
self.move()
|
[
"pygame.draw.rect",
"copy.deepcopy",
"numpy.zeros"
] |
[((1204, 1220), 'copy.deepcopy', 'deepcopy', (['player'], {}), '(player)\n', (1212, 1220), False, 'from copy import deepcopy\n'), ((1417, 1478), 'numpy.zeros', 'np.zeros', (['(GAME_HEIGHT / CELL_WIDTH, GAME_WIDTH / CELL_WIDTH)'], {}), '((GAME_HEIGHT / CELL_WIDTH, GAME_WIDTH / CELL_WIDTH))\n', (1425, 1478), True, 'import numpy as np\n'), ((2397, 2451), 'pygame.draw.rect', 'pygame.draw.rect', (['display_surface', 'self.color', 'segment'], {}), '(display_surface, self.color, segment)\n', (2413, 2451), False, 'import pygame\n')]
|
import paho.mqtt.client as pmqtt
import paho.mqtt.subscribe as smqtt
import json
import time
import logging
class mqtt:
def __init__(self, broker: str, username: str, password: str, port=1883):
self.client = ""
self.broker = broker
self.port = port
self.username = username
self.password = password
self.connect()
def connect(self):
self.client = pmqtt.Client()
self.client.connect(host=self.broker, port=self.port)
def publish(self, topic:str, message:str):
if topic == "" or message == "":
raise Exception("Topic and Message required")
published = self.client.publish(topic, message)
while not published.is_published():
time.sleep(0.5)
print("published:", published.rc)
def subscribe(self, topics:[], cb:callable):
if topics == []:
raise Exception("Need a topic to listen to")
logging.debug("Starting the MQTT subscriber")
smqtt.callback(cb, topics, hostname=self.broker)
#subscribed.callback(cb,)
#self.client.subscribe(topic)
#self.client.on
|
[
"paho.mqtt.client.Client",
"time.sleep",
"logging.debug",
"paho.mqtt.subscribe.callback"
] |
[((413, 427), 'paho.mqtt.client.Client', 'pmqtt.Client', ([], {}), '()\n', (425, 427), True, 'import paho.mqtt.client as pmqtt\n'), ((959, 1004), 'logging.debug', 'logging.debug', (['"""Starting the MQTT subscriber"""'], {}), "('Starting the MQTT subscriber')\n", (972, 1004), False, 'import logging\n'), ((1013, 1061), 'paho.mqtt.subscribe.callback', 'smqtt.callback', (['cb', 'topics'], {'hostname': 'self.broker'}), '(cb, topics, hostname=self.broker)\n', (1027, 1061), True, 'import paho.mqtt.subscribe as smqtt\n'), ((753, 768), 'time.sleep', 'time.sleep', (['(0.5)'], {}), '(0.5)\n', (763, 768), False, 'import time\n')]
|
import re
from typing import Iterator, Tuple
import requests
from bs4 import BeautifulSoup
from ...db.models import BeerDB
from ...db.tables import Shop as DBShop
from . import NoBeersError, NotABeerError, Shop, ShopBeer
DIGITS = set("0123456789")
def keep_until_japanese(text: str) -> str:
chars = []
for c in text:
if ord(c) < 0x3000: # first japanese characters
chars.append(c)
else:
break
return "".join(chars)
class IchiGoIchiAle(Shop):
short_name = "ichigo"
display_name = "<NAME>"
def _iter_pages(self) -> Iterator[BeautifulSoup]:
i = 1
while True:
url = f"https://151l.shop/?mode=grp&gid=1978037&sort=n&page={i}"
page = requests.get(url).text
yield BeautifulSoup(page, "html.parser")
i += 1
def _iter_page_beers(self, page_soup: BeautifulSoup) -> Iterator[Tuple[BeautifulSoup, str]]:
empty = True
for item in page_soup("li", class_="productlist_list"):
if item.find("span", class_="item_soldout") is not None:
continue
url = "https://151l.shop/" + item.find("a")["href"]
page = requests.get(url).text
yield BeautifulSoup(page, "html.parser"), url
empty = False
if empty:
raise NoBeersError
def _parse_beer_page(self, page_soup, url) -> ShopBeer:
title = page_soup.find("h2", class_="product_name").get_text().strip()
name_match = re.search(r"[((]([^))]*)[))]$", title)
if name_match is None:
raise NotABeerError
raw_name = name_match.group(1).strip()
price_text = page_soup.find("span", class_="product_price").get_text().strip()
price_match = re.search(r"税込([0-9,]+)円", price_text)
if price_match is None:
raise NotABeerError
price = int(price_match.group(1).replace(",", ""))
desc = page_soup.find("div", class_="product_explain").get_text()
ml_match = re.search(r"容量:(\d+)ml", desc.lower())
if ml_match is None:
raise NotABeerError
ml = int(ml_match.group(1))
image_url = page_soup.find("img", class_="product_img_main_img")["src"]
try:
return ShopBeer(
raw_name=raw_name,
url=url,
milliliters=ml,
price=price,
quantity=1,
image_url=image_url,
)
except UnboundLocalError:
raise NotABeerError
def iter_beers(self) -> Iterator[ShopBeer]:
for listing_page in self._iter_pages():
try:
for beer_page, url in self._iter_page_beers(listing_page):
try:
yield self._parse_beer_page(beer_page, url)
except NotABeerError:
continue
except Exception as e:
print(f"Unexpected exception while parsing page, skipping.\n{e}")
except NoBeersError:
break
def get_db_entry(self, db: BeerDB) -> DBShop:
return db.insert_shop(
name=self.display_name,
url="https://151l.shop/",
image_url="https://img21.shop-pro.jp/PA01423/875/PA01423875.png?cmsp_timestamp=20201017123822",
shipping_fee=950,
)
|
[
"bs4.BeautifulSoup",
"re.search",
"requests.get"
] |
[((1514, 1551), 're.search', 're.search', (['"""[((]([^))]*)[))]$"""', 'title'], {}), "('[((]([^))]*)[))]$', title)\n", (1523, 1551), False, 'import re\n'), ((1772, 1809), 're.search', 're.search', (['"""税込([0-9,]+)円"""', 'price_text'], {}), "('税込([0-9,]+)円', price_text)\n", (1781, 1809), False, 'import re\n'), ((742, 759), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (754, 759), False, 'import requests\n'), ((783, 817), 'bs4.BeautifulSoup', 'BeautifulSoup', (['page', '"""html.parser"""'], {}), "(page, 'html.parser')\n", (796, 817), False, 'from bs4 import BeautifulSoup\n'), ((1197, 1214), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (1209, 1214), False, 'import requests\n'), ((1238, 1272), 'bs4.BeautifulSoup', 'BeautifulSoup', (['page', '"""html.parser"""'], {}), "(page, 'html.parser')\n", (1251, 1272), False, 'from bs4 import BeautifulSoup\n')]
|
#= -------------------------------------------------------------------------
# @file hello_world.py
#
# @date 02/14/16 10:41:21
# @author <NAME>
# @email <EMAIL>
#
# @brief
#
# @detail
#
# Licence:
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or (at
# your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
#---------------------------------------------------------------------------=#
import kivy
kivy.require('1.9.0')
from kivy.app import App
from kivy.uix.button import Label
class HelloApp(App):
def build(self):
return Label(text='Hello World')
if __name__ == "__main__":
HelloApp().run()
|
[
"kivy.require",
"kivy.uix.button.Label"
] |
[((854, 875), 'kivy.require', 'kivy.require', (['"""1.9.0"""'], {}), "('1.9.0')\n", (866, 875), False, 'import kivy\n'), ((995, 1020), 'kivy.uix.button.Label', 'Label', ([], {'text': '"""Hello World"""'}), "(text='Hello World')\n", (1000, 1020), False, 'from kivy.uix.button import Label\n')]
|
import folium as folium
from flask import Flask, render_template
import rethinkdb as rtdb
import os
from dotenv import load_dotenv
import requests
import json
app = Flask(__name__)
@app.route('/')
def hello_world():
return 'Hello World!'
def get_route_polyline(route):
# "http://routesapi.chartr.in/transit/dimts/get_transit_route_details?route=534UP"
url = f"http://routesapi.chartr.in/transit/dimts/get_transit_route_details?route={route}UP"
response = requests.get(url)
response_dict = json.loads(response.text)
polyline = list()
stop_names = list()
if response_dict['msg'] == 'Found':
stop_list = response_dict['transit_route'][0]['stops']
for stop in stop_list:
polyline.append((float(stop['lat']), float(stop['lon'])))
stop_names.append(stop['name'])
return True, polyline, stop_names
else:
return False, polyline, stop_names
def plot_map(bus_number, coords, ac, route=None):
m = folium.Map(location=[28.630691, 77.217648], zoom_start=11)
if ac == "ac":
folium.Marker(coords, popup=bus_number, icon=folium.Icon(color='red')).add_to(m)
else:
folium.Marker(coords, popup=bus_number, icon=folium.Icon(color='red')).add_to(m)
if route is not None:
got_polyline, route_polyline, stop_names = get_route_polyline(route)
if got_polyline:
folium.PolyLine(route_polyline, color='black', weight=1.5, opacity=1).add_to(m)
for idx, stop_coord in enumerate(route_polyline):
stop_name = stop_names[idx]
folium.CircleMarker(location=stop_coord, radius=4, popup=stop_name,
fill_color='blue', color='red', fill_opacity=1).add_to(m)
return m._repr_html_()
# view/DL1PC0588/534DOWN
@app.route("/view/<bus_number>/<route>", methods=["GET"])
def view(bus_number, route):
# rethinkdb connection
env_path = '.env'
load_dotenv(env_path)
rDB_name = os.getenv("rDB_name")
realtime_table = os.getenv("realtime_table")
host = os.getenv("host")
port = os.getenv("port")
r = rtdb.RethinkDB()
rconn = r.connect(host=host, port=port)
bus_data = json.loads(r.db(rDB_name).table(realtime_table).get(bus_number).to_json().run(rconn))
if bus_data is not None:
ac = bus_data['ac']
coordinates = bus_data['lat'], bus_data['lng']
folium_map = plot_map(bus_number, coordinates, ac, route=route)
return render_template('views/view.html', map=folium_map, bus=bus_number)
else:
return render_template('views/not_found.html')
if __name__ == '__main__':
app.run()
|
[
"json.loads",
"flask.Flask",
"rethinkdb.RethinkDB",
"dotenv.load_dotenv",
"folium.Map",
"requests.get",
"flask.render_template",
"folium.PolyLine",
"folium.CircleMarker",
"os.getenv",
"folium.Icon"
] |
[((166, 181), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (171, 181), False, 'from flask import Flask, render_template\n'), ((476, 493), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (488, 493), False, 'import requests\n'), ((514, 539), 'json.loads', 'json.loads', (['response.text'], {}), '(response.text)\n', (524, 539), False, 'import json\n'), ((989, 1047), 'folium.Map', 'folium.Map', ([], {'location': '[28.630691, 77.217648]', 'zoom_start': '(11)'}), '(location=[28.630691, 77.217648], zoom_start=11)\n', (999, 1047), True, 'import folium as folium\n'), ((1953, 1974), 'dotenv.load_dotenv', 'load_dotenv', (['env_path'], {}), '(env_path)\n', (1964, 1974), False, 'from dotenv import load_dotenv\n'), ((1990, 2011), 'os.getenv', 'os.getenv', (['"""rDB_name"""'], {}), "('rDB_name')\n", (1999, 2011), False, 'import os\n'), ((2033, 2060), 'os.getenv', 'os.getenv', (['"""realtime_table"""'], {}), "('realtime_table')\n", (2042, 2060), False, 'import os\n'), ((2072, 2089), 'os.getenv', 'os.getenv', (['"""host"""'], {}), "('host')\n", (2081, 2089), False, 'import os\n'), ((2101, 2118), 'os.getenv', 'os.getenv', (['"""port"""'], {}), "('port')\n", (2110, 2118), False, 'import os\n'), ((2128, 2144), 'rethinkdb.RethinkDB', 'rtdb.RethinkDB', ([], {}), '()\n', (2142, 2144), True, 'import rethinkdb as rtdb\n'), ((2490, 2556), 'flask.render_template', 'render_template', (['"""views/view.html"""'], {'map': 'folium_map', 'bus': 'bus_number'}), "('views/view.html', map=folium_map, bus=bus_number)\n", (2505, 2556), False, 'from flask import Flask, render_template\n'), ((2582, 2621), 'flask.render_template', 'render_template', (['"""views/not_found.html"""'], {}), "('views/not_found.html')\n", (2597, 2621), False, 'from flask import Flask, render_template\n'), ((1395, 1464), 'folium.PolyLine', 'folium.PolyLine', (['route_polyline'], {'color': '"""black"""', 'weight': '(1.5)', 'opacity': '(1)'}), "(route_polyline, color='black', weight=1.5, opacity=1)\n", (1410, 1464), True, 'import folium as folium\n'), ((1120, 1144), 'folium.Icon', 'folium.Icon', ([], {'color': '"""red"""'}), "(color='red')\n", (1131, 1144), True, 'import folium as folium\n'), ((1219, 1243), 'folium.Icon', 'folium.Icon', ([], {'color': '"""red"""'}), "(color='red')\n", (1230, 1243), True, 'import folium as folium\n'), ((1597, 1716), 'folium.CircleMarker', 'folium.CircleMarker', ([], {'location': 'stop_coord', 'radius': '(4)', 'popup': 'stop_name', 'fill_color': '"""blue"""', 'color': '"""red"""', 'fill_opacity': '(1)'}), "(location=stop_coord, radius=4, popup=stop_name,\n fill_color='blue', color='red', fill_opacity=1)\n", (1616, 1716), True, 'import folium as folium\n')]
|
from baseconv import BaseConverter, BASE16_ALPHABET
from uuid import UUID, uuid4
BASE = 16
HEX_DOUBLE_WORD_LENGTH = 8
HEX_DOUBLE_WORD_UPPER_BYTE = slice(-HEX_DOUBLE_WORD_LENGTH, -(HEX_DOUBLE_WORD_LENGTH - 2))
MAX_DOUBLE_WORD = (1 << 31)
OLD_BIT_FLAG = 0x80
NEW_BIT_FLAG_MASK = OLD_BIT_FLAG - 1
BASE16 = BaseConverter(BASE16_ALPHABET.lower())
class UUIDGenerator():
uuid = None
def __init__(self):
while True:
uuid = uuid4()
replacer = BASE16.encode(int(BASE16.decode(str(uuid)[HEX_DOUBLE_WORD_UPPER_BYTE])) & NEW_BIT_FLAG_MASK)
if int(replacer, BASE) >= 16:
break
self.uuid = UUID(str(uuid)[:-(HEX_DOUBLE_WORD_LENGTH)] + replacer + str(uuid)[-(HEX_DOUBLE_WORD_LENGTH-2):])
@staticmethod
def new_version(uuid):
return not bool(int(BASE16.decode(str(uuid)[HEX_DOUBLE_WORD_UPPER_BYTE])) & OLD_BIT_FLAG)
@staticmethod
def int_to_uuid(int_id):
int_id = int(int_id)
myid = str(uuid4())
replacer1 = BASE16.encode(MAX_DOUBLE_WORD - int_id)
replacer2 = BASE16.encode(int(BASE16.decode(myid[HEX_DOUBLE_WORD_UPPER_BYTE])) | OLD_BIT_FLAG)
return UUID(replacer1 + myid[HEX_DOUBLE_WORD_LENGTH:-(HEX_DOUBLE_WORD_LENGTH)] +
replacer2 + myid[-(HEX_DOUBLE_WORD_LENGTH-2):])
@staticmethod
def uuid_to_int(uuid):
inverse_id = int(BASE16.decode(uuid[:HEX_DOUBLE_WORD_LENGTH]))
return (MAX_DOUBLE_WORD - inverse_id)
@staticmethod
def str_to_uuid(str_uuid):
try:
return UUID(hex=str_uuid)
except:
raise ValueError("UUID (%s provided is NOT a proper uuid" % str_uuid)
@staticmethod
def format_uuid_hex(uuid_str):
if len(uuid_str) != 32:
raise ValueError("UUID (%s provided is NOT a proper uuid" % uuid_str)
return uuid_str[:7]+'-'+uuid_str[7:11]+'-'+uuid_str[11:15]+'-'+uuid_str[15:19]+'-'+uuid_str[19:]
|
[
"uuid.uuid4",
"baseconv.BASE16_ALPHABET.lower",
"uuid.UUID"
] |
[((318, 341), 'baseconv.BASE16_ALPHABET.lower', 'BASE16_ALPHABET.lower', ([], {}), '()\n', (339, 341), False, 'from baseconv import BaseConverter, BASE16_ALPHABET\n'), ((1180, 1305), 'uuid.UUID', 'UUID', (['(replacer1 + myid[HEX_DOUBLE_WORD_LENGTH:-HEX_DOUBLE_WORD_LENGTH] +\n replacer2 + myid[-(HEX_DOUBLE_WORD_LENGTH - 2):])'], {}), '(replacer1 + myid[HEX_DOUBLE_WORD_LENGTH:-HEX_DOUBLE_WORD_LENGTH] +\n replacer2 + myid[-(HEX_DOUBLE_WORD_LENGTH - 2):])\n', (1184, 1305), False, 'from uuid import UUID, uuid4\n'), ((447, 454), 'uuid.uuid4', 'uuid4', ([], {}), '()\n', (452, 454), False, 'from uuid import UUID, uuid4\n'), ((992, 999), 'uuid.uuid4', 'uuid4', ([], {}), '()\n', (997, 999), False, 'from uuid import UUID, uuid4\n'), ((1567, 1585), 'uuid.UUID', 'UUID', ([], {'hex': 'str_uuid'}), '(hex=str_uuid)\n', (1571, 1585), False, 'from uuid import UUID, uuid4\n')]
|
import board
import neopixel
import time
from time import sleep
pixel_pin = board.D18
num_pixels = 8
ORDER = neopixel.RGB
ColorDict = { "black":0x000000, "white":0x101010, "red":0x100000, "blue":0x000010, "green":0x001000, "yellow":0x101000, "orange":0x100600, "pink":0x100508, "teal":0x100508, "teal":0x000808, "purple":0x080008}
#Example of colorString : "Fear:blue,Surprise:yellow"
def SetColors(colorString):
EmoDict = {}
colorsEmo = colorString.split(",")
for k in range(len(colorsEmo)):
oneColorEmo = colorsEmo[k].split(":")
#print(oneColorEmo, ColorDict(oneColorEmo[1]))
EmoDict[oneColorEmo[0]] = ColorDict[oneColorEmo[1]]
return EmoDict
def LightAll(color):
pixels = neopixel.NeoPixel(pixel_pin, num_pixels, brightness=1, auto_write=False, pixel_order = ORDER)
pixels.fill(ColorDict[color])
def LightLast(emotion, EmoDict):
pixels = neopixel.NeoPixel(pixel_pin, num_pixels, brightness=1, auto_write=False, pixel_order = ORDER)
if (len(pixels)>1):
for k in range(1, len(pixels)):
pixels[k] = pixels[k-1]
pixels[0] = EmoDict[emotion]
print(pixels)
pixels.show()
def play():
EmoDict = SetColors("neutral:white,surprise:yellow,happiness:orange,fear:blue,disgust:green,sad:purple")
LightAll("black")
while(True):
LightLast("happiness",EmoDict)
time.sleep(0.8)
LightLast("surprise",EmoDict)
time.sleep(0.8)
LightLast("fear",EmoDict)
time.sleep(0.8)
LightLast("neutral",EmoDict)
time.sleep(0.8)
LightLast("sad",EmoDict)
time.sleep(0.8)
return 0
play()
|
[
"neopixel.NeoPixel",
"time.sleep"
] |
[((722, 817), 'neopixel.NeoPixel', 'neopixel.NeoPixel', (['pixel_pin', 'num_pixels'], {'brightness': '(1)', 'auto_write': '(False)', 'pixel_order': 'ORDER'}), '(pixel_pin, num_pixels, brightness=1, auto_write=False,\n pixel_order=ORDER)\n', (739, 817), False, 'import neopixel\n'), ((897, 992), 'neopixel.NeoPixel', 'neopixel.NeoPixel', (['pixel_pin', 'num_pixels'], {'brightness': '(1)', 'auto_write': '(False)', 'pixel_order': 'ORDER'}), '(pixel_pin, num_pixels, brightness=1, auto_write=False,\n pixel_order=ORDER)\n', (914, 992), False, 'import neopixel\n'), ((1368, 1383), 'time.sleep', 'time.sleep', (['(0.8)'], {}), '(0.8)\n', (1378, 1383), False, 'import time\n'), ((1430, 1445), 'time.sleep', 'time.sleep', (['(0.8)'], {}), '(0.8)\n', (1440, 1445), False, 'import time\n'), ((1488, 1503), 'time.sleep', 'time.sleep', (['(0.8)'], {}), '(0.8)\n', (1498, 1503), False, 'import time\n'), ((1549, 1564), 'time.sleep', 'time.sleep', (['(0.8)'], {}), '(0.8)\n', (1559, 1564), False, 'import time\n'), ((1606, 1621), 'time.sleep', 'time.sleep', (['(0.8)'], {}), '(0.8)\n', (1616, 1621), False, 'import time\n')]
|
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Tests for google.apphosting.tools.devappserver2.devappserver2."""
import argparse
import os
import platform
import unittest
import google
import mock
from google.appengine.tools.devappserver2 import devappserver2
class WinError(Exception):
pass
class FakeApplicationConfiguration(object):
def __init__(self, modules):
self.modules = modules
class FakeModuleConfiguration(object):
def __init__(self, module_name):
self.module_name = module_name
class CreateModuleToSettingTest(unittest.TestCase):
def setUp(self):
self.application_configuration = FakeApplicationConfiguration([
FakeModuleConfiguration('m1'), FakeModuleConfiguration('m2'),
FakeModuleConfiguration('m3')])
def test_none(self):
self.assertEquals(
{},
devappserver2.DevelopmentServer._create_module_to_setting(
None, self.application_configuration, '--option'))
def test_dict(self):
self.assertEquals(
{'m1': 3, 'm3': 1},
devappserver2.DevelopmentServer._create_module_to_setting(
{'m1': 3, 'm3': 1}, self.application_configuration, '--option'))
def test_single_value(self):
self.assertEquals(
{'m1': True, 'm2': True, 'm3': True},
devappserver2.DevelopmentServer._create_module_to_setting(
True, self.application_configuration, '--option'))
def test_dict_with_unknown_modules(self):
self.assertEquals(
{'m1': 3.5},
devappserver2.DevelopmentServer._create_module_to_setting(
{'m1': 3.5, 'm4': 2.7}, self.application_configuration, '--option'))
class DatastoreEmulatorSupportcheckTest(unittest.TestCase):
@mock.patch.object(os.path, 'exists', return_value=False)
@mock.patch.object(devappserver2.DevelopmentServer,
'_correct_datastore_emulator_cmd', return_value=None)
def test_fail_missing_emulator(self, mock_correction, unused_mock):
options = argparse.Namespace()
# Following flags simulate the scenario of invoking dev_appserver.py from
# google-cloud-sdk/platform/google_appengine
options.support_datastore_emulator = True
options.datastore_emulator_cmd = None
with self.assertRaises(devappserver2.MissingDatastoreEmulatorError) as ctx:
dev_server = devappserver2.DevelopmentServer()
dev_server._options = options
dev_server._check_datastore_emulator_support()
mock_correction.assert_called_once_with()
self.assertIn('Cannot find Cloud Datastore Emulator', ctx.exception.message)
class PlatformSupportCheckTest(unittest.TestCase):
def test_succeed_non_python3_windows(self):
with mock.patch.object(platform, 'system', return_value='Windows'):
devappserver2.DevelopmentServer._check_platform_support({'python2'})
platform.system.assert_not_called()
def test_succeed_python3_non_windows(self):
with mock.patch.object(platform, 'system', return_value='Linux'):
devappserver2.DevelopmentServer._check_platform_support({'python3'})
platform.system.assert_called_once_with()
def test_fail_python3_windows(self):
with mock.patch.object(platform, 'system', return_value='Windows'):
with self.assertRaises(OSError):
devappserver2.DevelopmentServer._check_platform_support(
{'python3', 'python2'})
platform.system.assert_called_once_with()
if __name__ == '__main__':
unittest.main()
|
[
"unittest.main",
"mock.patch.object",
"argparse.Namespace",
"google.appengine.tools.devappserver2.devappserver2.DevelopmentServer._create_module_to_setting",
"platform.system.assert_called_once_with",
"platform.system.assert_not_called",
"google.appengine.tools.devappserver2.devappserver2.DevelopmentServer._check_platform_support",
"google.appengine.tools.devappserver2.devappserver2.DevelopmentServer"
] |
[((2268, 2324), 'mock.patch.object', 'mock.patch.object', (['os.path', '"""exists"""'], {'return_value': '(False)'}), "(os.path, 'exists', return_value=False)\n", (2285, 2324), False, 'import mock\n'), ((2328, 2436), 'mock.patch.object', 'mock.patch.object', (['devappserver2.DevelopmentServer', '"""_correct_datastore_emulator_cmd"""'], {'return_value': 'None'}), "(devappserver2.DevelopmentServer,\n '_correct_datastore_emulator_cmd', return_value=None)\n", (2345, 2436), False, 'import mock\n'), ((3985, 4000), 'unittest.main', 'unittest.main', ([], {}), '()\n', (3998, 4000), False, 'import unittest\n'), ((2538, 2558), 'argparse.Namespace', 'argparse.Namespace', ([], {}), '()\n', (2556, 2558), False, 'import argparse\n'), ((1393, 1505), 'google.appengine.tools.devappserver2.devappserver2.DevelopmentServer._create_module_to_setting', 'devappserver2.DevelopmentServer._create_module_to_setting', (['None', 'self.application_configuration', '"""--option"""'], {}), "(None, self.\n application_configuration, '--option')\n", (1450, 1505), False, 'from google.appengine.tools.devappserver2 import devappserver2\n'), ((1598, 1724), 'google.appengine.tools.devappserver2.devappserver2.DevelopmentServer._create_module_to_setting', 'devappserver2.DevelopmentServer._create_module_to_setting', (["{'m1': 3, 'm3': 1}", 'self.application_configuration', '"""--option"""'], {}), "({'m1': 3, 'm3': 1\n }, self.application_configuration, '--option')\n", (1655, 1724), False, 'from google.appengine.tools.devappserver2 import devappserver2\n'), ((1843, 1955), 'google.appengine.tools.devappserver2.devappserver2.DevelopmentServer._create_module_to_setting', 'devappserver2.DevelopmentServer._create_module_to_setting', (['(True)', 'self.application_configuration', '"""--option"""'], {}), "(True, self.\n application_configuration, '--option')\n", (1900, 1955), False, 'from google.appengine.tools.devappserver2 import devappserver2\n'), ((2062, 2191), 'google.appengine.tools.devappserver2.devappserver2.DevelopmentServer._create_module_to_setting', 'devappserver2.DevelopmentServer._create_module_to_setting', (["{'m1': 3.5, 'm4': 2.7}", 'self.application_configuration', '"""--option"""'], {}), "({'m1': 3.5, 'm4':\n 2.7}, self.application_configuration, '--option')\n", (2119, 2191), False, 'from google.appengine.tools.devappserver2 import devappserver2\n'), ((2873, 2906), 'google.appengine.tools.devappserver2.devappserver2.DevelopmentServer', 'devappserver2.DevelopmentServer', ([], {}), '()\n', (2904, 2906), False, 'from google.appengine.tools.devappserver2 import devappserver2\n'), ((3234, 3295), 'mock.patch.object', 'mock.patch.object', (['platform', '"""system"""'], {'return_value': '"""Windows"""'}), "(platform, 'system', return_value='Windows')\n", (3251, 3295), False, 'import mock\n'), ((3303, 3371), 'google.appengine.tools.devappserver2.devappserver2.DevelopmentServer._check_platform_support', 'devappserver2.DevelopmentServer._check_platform_support', (["{'python2'}"], {}), "({'python2'})\n", (3358, 3371), False, 'from google.appengine.tools.devappserver2 import devappserver2\n'), ((3378, 3413), 'platform.system.assert_not_called', 'platform.system.assert_not_called', ([], {}), '()\n', (3411, 3413), False, 'import platform\n'), ((3470, 3529), 'mock.patch.object', 'mock.patch.object', (['platform', '"""system"""'], {'return_value': '"""Linux"""'}), "(platform, 'system', return_value='Linux')\n", (3487, 3529), False, 'import mock\n'), ((3537, 3605), 'google.appengine.tools.devappserver2.devappserver2.DevelopmentServer._check_platform_support', 'devappserver2.DevelopmentServer._check_platform_support', (["{'python3'}"], {}), "({'python3'})\n", (3592, 3605), False, 'from google.appengine.tools.devappserver2 import devappserver2\n'), ((3612, 3653), 'platform.system.assert_called_once_with', 'platform.system.assert_called_once_with', ([], {}), '()\n', (3651, 3653), False, 'import platform\n'), ((3703, 3764), 'mock.patch.object', 'mock.patch.object', (['platform', '"""system"""'], {'return_value': '"""Windows"""'}), "(platform, 'system', return_value='Windows')\n", (3720, 3764), False, 'import mock\n'), ((3912, 3953), 'platform.system.assert_called_once_with', 'platform.system.assert_called_once_with', ([], {}), '()\n', (3951, 3953), False, 'import platform\n'), ((3813, 3892), 'google.appengine.tools.devappserver2.devappserver2.DevelopmentServer._check_platform_support', 'devappserver2.DevelopmentServer._check_platform_support', (["{'python3', 'python2'}"], {}), "({'python3', 'python2'})\n", (3868, 3892), False, 'from google.appengine.tools.devappserver2 import devappserver2\n')]
|
from django.conf.urls import url
from django.contrib.auth import views as auth_views
from . import views
app_name = 'users'
urlpatterns = [
# ex: /users/signup
url(r'^signup/', views.SignupView.as_view(), name='signup'),
# ex: /users/login
url(r'^login/', auth_views.login, name='login'),
# ex: /users/logout
url(r'^logout/', auth_views.logout, name='logout'),
# ex: /users/profile
url(r'^profile/', views.ProfileView.as_view(), name='profile'),
]
|
[
"django.conf.urls.url"
] |
[((257, 303), 'django.conf.urls.url', 'url', (['"""^login/"""', 'auth_views.login'], {'name': '"""login"""'}), "('^login/', auth_views.login, name='login')\n", (260, 303), False, 'from django.conf.urls import url\n'), ((334, 383), 'django.conf.urls.url', 'url', (['"""^logout/"""', 'auth_views.logout'], {'name': '"""logout"""'}), "('^logout/', auth_views.logout, name='logout')\n", (337, 383), False, 'from django.conf.urls import url\n')]
|
import numpy as np
def to_array(image):
array = np.array(image, dtype=np.float32)[..., :3]
array = array / 255.
return array
def l2_normalize(x, axis=0):
norm = np.linalg.norm(x, axis=axis, keepdims=True)
return x / norm
def distance(a, b):
# Euclidean distance
# return np.linalg.norm(a - b)
# Cosine distance, ||a|| and ||b|| is one because embeddings are normalized.
# No need to compute np.dot(a, b) / (np.linalg.norm(a) * np.linalg.norm(b))
return np.dot(a, b)
|
[
"numpy.array",
"numpy.dot",
"numpy.linalg.norm"
] |
[((180, 223), 'numpy.linalg.norm', 'np.linalg.norm', (['x'], {'axis': 'axis', 'keepdims': '(True)'}), '(x, axis=axis, keepdims=True)\n', (194, 223), True, 'import numpy as np\n'), ((498, 510), 'numpy.dot', 'np.dot', (['a', 'b'], {}), '(a, b)\n', (504, 510), True, 'import numpy as np\n'), ((53, 86), 'numpy.array', 'np.array', (['image'], {'dtype': 'np.float32'}), '(image, dtype=np.float32)\n', (61, 86), True, 'import numpy as np\n')]
|
# coding: utf-8
#------------------------------
# [从]服务器上报
#------------------------------
import sys
import os
import json
import time
import threading
import subprocess
import shutil
sys.path.append("/usr/local/lib/python2.7/site-packages")
import psutil
root_dir = os.getcwd()
sys.path.append(root_dir + "/class/core")
reload(sys)
sys.setdefaultencoding('utf-8')
import db
import common
#------------Private Methods--------------
def updateStatus(sid, status):
common.M('video_tmp').where(
"id=?", (sid,)).setField('status', status)
def isMasterNode():
run_model = common.getSysKV('run_model')
run_is_master = common.getSysKV('run_is_master')
if (run_model == '1') or (run_is_master == '1'):
return True
return False
#------------Private Methods--------------
def reportData(data):
_list = common.M('node').field('id,port,name,ip').where(
'ismaster=?', (1,)).select()
if len(_list) > 0:
_url = "http://" + str(_list[0]['ip']) + \
":" + str(_list[0]['port'])
api_url = _url + "/async_master_api/reportData"
ret = common.httpPost(api_url, {
"mark": common.getSysKV('run_mark'),
"data": data,
'name': _list[0]['name']
})
rr = json.loads(ret)
return rr
def pingServer():
_list = common.M('node').field('id,port,name,ip').select()
for x in xrange(0, len(_list)):
_url = "http://" + str(_list[x]['ip']) + \
":" + str(_list[x]['port'])
api_url = _url + "/async_master_api/ping"
try:
ret = common.httpPost(api_url, {
"mark": common.getSysKV('run_mark'),
'name': _list[x]['name']
})
rr = json.loads(ret)
if rr['code'] == 0:
common.M('node').where(
'name=?', (_list[x]['name'],)).setField('status', 1)
except Exception as e:
common.M('node').where(
'name=?', (_list[x]['name'],)).setField('status', 0)
return True
def serverReport():
time_sleep = 3
while True:
if isMasterNode():
time.sleep(time_sleep)
continue
c = os.getloadavg()
data = {}
data['one'] = float(c[0])
data['five'] = float(c[1])
data['fifteen'] = float(c[2])
data['max'] = psutil.cpu_count() * 2
data['limit'] = data['max']
data['safe'] = data['max'] * 0.75
data['report_time'] = common.getDate()
r = reportData(data)
if r['code'] != 0:
print('同步失败![%s]', common.getDate())
time.sleep(time_sleep)
def serverPing():
while True:
pingServer()
time.sleep(3)
def startTask():
import time
try:
while True:
time.sleep(2)
except:
time.sleep(60)
startTask()
if __name__ == "__main__":
t = threading.Thread(target=serverReport)
t.setDaemon(True)
t.start()
t = threading.Thread(target=serverPing)
t.setDaemon(True)
t.start()
startTask()
|
[
"sys.path.append",
"threading.Thread",
"json.loads",
"os.getcwd",
"os.getloadavg",
"common.getSysKV",
"time.sleep",
"common.getDate",
"common.M",
"sys.setdefaultencoding",
"psutil.cpu_count"
] |
[((187, 244), 'sys.path.append', 'sys.path.append', (['"""/usr/local/lib/python2.7/site-packages"""'], {}), "('/usr/local/lib/python2.7/site-packages')\n", (202, 244), False, 'import sys\n'), ((271, 282), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (280, 282), False, 'import os\n'), ((283, 324), 'sys.path.append', 'sys.path.append', (["(root_dir + '/class/core')"], {}), "(root_dir + '/class/core')\n", (298, 324), False, 'import sys\n'), ((338, 369), 'sys.setdefaultencoding', 'sys.setdefaultencoding', (['"""utf-8"""'], {}), "('utf-8')\n", (360, 369), False, 'import sys\n'), ((593, 621), 'common.getSysKV', 'common.getSysKV', (['"""run_model"""'], {}), "('run_model')\n", (608, 621), False, 'import common\n'), ((642, 674), 'common.getSysKV', 'common.getSysKV', (['"""run_is_master"""'], {}), "('run_is_master')\n", (657, 674), False, 'import common\n'), ((2934, 2971), 'threading.Thread', 'threading.Thread', ([], {'target': 'serverReport'}), '(target=serverReport)\n', (2950, 2971), False, 'import threading\n'), ((3017, 3052), 'threading.Thread', 'threading.Thread', ([], {'target': 'serverPing'}), '(target=serverPing)\n', (3033, 3052), False, 'import threading\n'), ((1281, 1296), 'json.loads', 'json.loads', (['ret'], {}), '(ret)\n', (1291, 1296), False, 'import json\n'), ((2228, 2243), 'os.getloadavg', 'os.getloadavg', ([], {}), '()\n', (2241, 2243), False, 'import os\n'), ((2522, 2538), 'common.getDate', 'common.getDate', ([], {}), '()\n', (2536, 2538), False, 'import common\n'), ((2654, 2676), 'time.sleep', 'time.sleep', (['time_sleep'], {}), '(time_sleep)\n', (2664, 2676), False, 'import time\n'), ((2742, 2755), 'time.sleep', 'time.sleep', (['(3)'], {}), '(3)\n', (2752, 2755), False, 'import time\n'), ((1761, 1776), 'json.loads', 'json.loads', (['ret'], {}), '(ret)\n', (1771, 1776), False, 'import json\n'), ((2171, 2193), 'time.sleep', 'time.sleep', (['time_sleep'], {}), '(time_sleep)\n', (2181, 2193), False, 'import time\n'), ((2391, 2409), 'psutil.cpu_count', 'psutil.cpu_count', ([], {}), '()\n', (2407, 2409), False, 'import psutil\n'), ((2832, 2845), 'time.sleep', 'time.sleep', (['(2)'], {}), '(2)\n', (2842, 2845), False, 'import time\n'), ((2866, 2880), 'time.sleep', 'time.sleep', (['(60)'], {}), '(60)\n', (2876, 2880), False, 'import time\n'), ((1164, 1191), 'common.getSysKV', 'common.getSysKV', (['"""run_mark"""'], {}), "('run_mark')\n", (1179, 1191), False, 'import common\n'), ((2635, 2651), 'common.getDate', 'common.getDate', ([], {}), '()\n', (2649, 2651), False, 'import common\n'), ((475, 496), 'common.M', 'common.M', (['"""video_tmp"""'], {}), "('video_tmp')\n", (483, 496), False, 'import common\n'), ((1347, 1363), 'common.M', 'common.M', (['"""node"""'], {}), "('node')\n", (1355, 1363), False, 'import common\n'), ((1659, 1686), 'common.getSysKV', 'common.getSysKV', (['"""run_mark"""'], {}), "('run_mark')\n", (1674, 1686), False, 'import common\n'), ((845, 861), 'common.M', 'common.M', (['"""node"""'], {}), "('node')\n", (853, 861), False, 'import common\n'), ((1825, 1841), 'common.M', 'common.M', (['"""node"""'], {}), "('node')\n", (1833, 1841), False, 'import common\n'), ((1965, 1981), 'common.M', 'common.M', (['"""node"""'], {}), "('node')\n", (1973, 1981), False, 'import common\n')]
|
from spydashserver.plugins import PluginConfig
plugin_config = PluginConfig("notes", "notes.Notes", models='notes.models')
|
[
"spydashserver.plugins.PluginConfig"
] |
[((64, 123), 'spydashserver.plugins.PluginConfig', 'PluginConfig', (['"""notes"""', '"""notes.Notes"""'], {'models': '"""notes.models"""'}), "('notes', 'notes.Notes', models='notes.models')\n", (76, 123), False, 'from spydashserver.plugins import PluginConfig\n')]
|
''' control systems - ode simulation
@link https://www.youtube.com/watch?v=yp5x8RMNi7o
'''
import numpy as np
from scipy.integrate import odeint
from matplotlib import pyplot as plt
def sys_ode(x, t):
# set system constants
c = 4 # damping constant
k = 2 # spring stiffness constant
m = 20 # point-mass
F = 5 # input force into the system
# compute state first derivative
dx1 = x[1]
dx2 = (F - c*x[1] - k*x[0])/m
return [dx1, dx2]
def sim():
# set constants
t_0 = 0
t_f = 60
period = 0.1
# set state initial condition
x_init = [0, 0]
# set a discrete time stamp
t = np.arange(t_0, t_f, period)
x = odeint(sys_ode, x_init, t)
x1 = x[:,0]
x2 = x[:,1]
plt.plot(t,x1)
plt.plot(t,x2)
plt.title('Mass-Spring-Damper System')
plt.xlabel('t')
plt.ylabel('x(t)')
plt.legend(['x1', 'x2'])
plt.grid()
plt.show()
|
[
"matplotlib.pyplot.title",
"matplotlib.pyplot.show",
"matplotlib.pyplot.plot",
"scipy.integrate.odeint",
"matplotlib.pyplot.legend",
"numpy.arange",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.grid"
] |
[((611, 638), 'numpy.arange', 'np.arange', (['t_0', 't_f', 'period'], {}), '(t_0, t_f, period)\n', (620, 638), True, 'import numpy as np\n'), ((645, 671), 'scipy.integrate.odeint', 'odeint', (['sys_ode', 'x_init', 't'], {}), '(sys_ode, x_init, t)\n', (651, 671), False, 'from scipy.integrate import odeint\n'), ((704, 719), 'matplotlib.pyplot.plot', 'plt.plot', (['t', 'x1'], {}), '(t, x1)\n', (712, 719), True, 'from matplotlib import pyplot as plt\n'), ((721, 736), 'matplotlib.pyplot.plot', 'plt.plot', (['t', 'x2'], {}), '(t, x2)\n', (729, 736), True, 'from matplotlib import pyplot as plt\n'), ((738, 776), 'matplotlib.pyplot.title', 'plt.title', (['"""Mass-Spring-Damper System"""'], {}), "('Mass-Spring-Damper System')\n", (747, 776), True, 'from matplotlib import pyplot as plt\n'), ((779, 794), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""t"""'], {}), "('t')\n", (789, 794), True, 'from matplotlib import pyplot as plt\n'), ((797, 815), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""x(t)"""'], {}), "('x(t)')\n", (807, 815), True, 'from matplotlib import pyplot as plt\n'), ((818, 842), 'matplotlib.pyplot.legend', 'plt.legend', (["['x1', 'x2']"], {}), "(['x1', 'x2'])\n", (828, 842), True, 'from matplotlib import pyplot as plt\n'), ((845, 855), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (853, 855), True, 'from matplotlib import pyplot as plt\n'), ((858, 868), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (866, 868), True, 'from matplotlib import pyplot as plt\n')]
|
import random
from fineract.objects.hook import Hook
number = random.randint(0, 10000)
def test_create_hook(fineract):
events = [
{
'actionName': 'DISBURSE',
'entityName': 'LOAN'
},
{
'actionName': 'REPAYMENT',
'entityName': 'LOAN'
}
]
hook = Hook.create_web_hook(fineract.request_handler, 'Test ' + str(number),
'https://localhost:8443', events)
assert isinstance(hook, Hook)
def test_get_all_hooks(fineract):
hooks = [hook for hook in fineract.get_hooks()]
assert len(hooks) >= 1
def test_get_single_hook(fineract):
hooks = [hook for hook in fineract.get_hooks()]
assert fineract.get_hooks(hooks[0].id)
def test_hook_templates(fineract):
assert Hook.template(fineract.request_handler)
def test_hook_exists(fineract):
assert Hook.exists(fineract.request_handler, 'Test ' + str(number))
def test_get_hook_by_name(fineract):
assert Hook.get_by_name(fineract.request_handler, 'Test ' + str(number))
def test_get_hook_by_id(fineract):
hook = Hook.get_by_name(fineract.request_handler, 'Test ' + str(number))
assert Hook.get(fineract.request_handler, hook.id)
def test_hook_update(fineract):
events = [
{
'actionName': 'DISBURSE',
'entityName': 'LOAN'
}
]
hook = Hook.get_by_name(fineract.request_handler, 'Test ' + str(number))
hook = hook.update('https://localhost:8443', events)
assert len(hook.events) == 1
|
[
"fineract.objects.hook.Hook.get",
"fineract.objects.hook.Hook.template",
"random.randint"
] |
[((64, 88), 'random.randint', 'random.randint', (['(0)', '(10000)'], {}), '(0, 10000)\n', (78, 88), False, 'import random\n'), ((805, 844), 'fineract.objects.hook.Hook.template', 'Hook.template', (['fineract.request_handler'], {}), '(fineract.request_handler)\n', (818, 844), False, 'from fineract.objects.hook import Hook\n'), ((1192, 1235), 'fineract.objects.hook.Hook.get', 'Hook.get', (['fineract.request_handler', 'hook.id'], {}), '(fineract.request_handler, hook.id)\n', (1200, 1235), False, 'from fineract.objects.hook import Hook\n')]
|
from django.db import models
from django.contrib.postgres.fields import JSONField, ArrayField
from users.models import User
from django.contrib.auth.models import Group
from django.conf import settings
from asset.models import Host, HostGroup
class Line(models.Model):
name = models.CharField(max_length=255, unique=True, verbose_name=u"产品线")
date_created = models.DateTimeField(verbose_name=u'创建时间', auto_now_add=True)
date_updated = models.DateTimeField(verbose_name=u'更新时间', auto_now=True)
def __str__(self):
return self.name
class Project(models.Model):
name = models.CharField(max_length=255, null=True)
user = models.ManyToManyField(User)
user_group = models.ForeignKey(Group, null=True, on_delete=models.SET_NULL)
host_group = models.ForeignKey(HostGroup, null=True, on_delete=models.SET_NULL)
sls = models.FilePathField(path=settings.SALT_STATE_DIRECTORY,
allow_files=False, allow_folders=True, recursive=True)
description = models.TextField(null=True)
tags = ArrayField(models.CharField(max_length=255), default=list)
status = models.IntegerField(null=True)
line = models.ForeignKey(Line, null=True, related_name=u"business", verbose_name=u"产品线",
on_delete=models.SET_NULL)
date_created = models.DateTimeField(verbose_name=u'创建时间', auto_now_add=True)
date_updated = models.DateTimeField(verbose_name=u'更新时间', auto_now=True)
def __str__(self):
return self.name
|
[
"django.db.models.TextField",
"django.db.models.ManyToManyField",
"django.db.models.CharField",
"django.db.models.ForeignKey",
"django.db.models.IntegerField",
"django.db.models.DateTimeField",
"django.db.models.FilePathField"
] |
[((282, 348), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(255)', 'unique': '(True)', 'verbose_name': 'u"""产品线"""'}), "(max_length=255, unique=True, verbose_name=u'产品线')\n", (298, 348), False, 'from django.db import models\n'), ((368, 429), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'verbose_name': 'u"""创建时间"""', 'auto_now_add': '(True)'}), "(verbose_name=u'创建时间', auto_now_add=True)\n", (388, 429), False, 'from django.db import models\n'), ((449, 506), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'verbose_name': 'u"""更新时间"""', 'auto_now': '(True)'}), "(verbose_name=u'更新时间', auto_now=True)\n", (469, 506), False, 'from django.db import models\n'), ((598, 641), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(255)', 'null': '(True)'}), '(max_length=255, null=True)\n', (614, 641), False, 'from django.db import models\n'), ((653, 681), 'django.db.models.ManyToManyField', 'models.ManyToManyField', (['User'], {}), '(User)\n', (675, 681), False, 'from django.db import models\n'), ((699, 761), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Group'], {'null': '(True)', 'on_delete': 'models.SET_NULL'}), '(Group, null=True, on_delete=models.SET_NULL)\n', (716, 761), False, 'from django.db import models\n'), ((779, 845), 'django.db.models.ForeignKey', 'models.ForeignKey', (['HostGroup'], {'null': '(True)', 'on_delete': 'models.SET_NULL'}), '(HostGroup, null=True, on_delete=models.SET_NULL)\n', (796, 845), False, 'from django.db import models\n'), ((856, 971), 'django.db.models.FilePathField', 'models.FilePathField', ([], {'path': 'settings.SALT_STATE_DIRECTORY', 'allow_files': '(False)', 'allow_folders': '(True)', 'recursive': '(True)'}), '(path=settings.SALT_STATE_DIRECTORY, allow_files=False,\n allow_folders=True, recursive=True)\n', (876, 971), False, 'from django.db import models\n'), ((1017, 1044), 'django.db.models.TextField', 'models.TextField', ([], {'null': '(True)'}), '(null=True)\n', (1033, 1044), False, 'from django.db import models\n'), ((1128, 1158), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'null': '(True)'}), '(null=True)\n', (1147, 1158), False, 'from django.db import models\n'), ((1170, 1283), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Line'], {'null': '(True)', 'related_name': 'u"""business"""', 'verbose_name': 'u"""产品线"""', 'on_delete': 'models.SET_NULL'}), "(Line, null=True, related_name=u'business', verbose_name=\n u'产品线', on_delete=models.SET_NULL)\n", (1187, 1283), False, 'from django.db import models\n'), ((1327, 1388), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'verbose_name': 'u"""创建时间"""', 'auto_now_add': '(True)'}), "(verbose_name=u'创建时间', auto_now_add=True)\n", (1347, 1388), False, 'from django.db import models\n'), ((1408, 1465), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'verbose_name': 'u"""更新时间"""', 'auto_now': '(True)'}), "(verbose_name=u'更新时间', auto_now=True)\n", (1428, 1465), False, 'from django.db import models\n'), ((1067, 1099), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(255)'}), '(max_length=255)\n', (1083, 1099), False, 'from django.db import models\n')]
|
import numpy as np
a_matris = [[2,0,0],
[0,2,0],
[0,0,2]]
x_matris = []
b_matris = [2, 4, 9]
u_a_matris = np.triu(a_matris)
x3 = float(b_matris[2])/u_a_matris[2][2]
x2 = float(b_matris[1] - x3*u_a_matris[1][2])/u_a_matris[1][1]
x1 = float(b_matris[0] - x2*u_a_matris[0][1] - x3*u_a_matris[0][2])/u_a_matris[0][0]
print(x1, x2, x3)
|
[
"numpy.triu"
] |
[((132, 149), 'numpy.triu', 'np.triu', (['a_matris'], {}), '(a_matris)\n', (139, 149), True, 'import numpy as np\n')]
|
import os
import logging
import galsim
import galsim.config
import piff
import numpy as np
import ngmix
if ngmix.__version__[0:2] == "v1":
NGMIX_V2 = False
from ngmix.fitting import LMSimple
from ngmix.admom import Admom
else:
NGMIX_V2 = True
from ngmix.fitting import Fitter
from ngmix.admom import AdmomFitter
from scipy.interpolate import CloughTocher2DInterpolator
logger = logging.getLogger(__name__)
# pixel scale used for fitting the Piff models
PIFF_SCALE = 0.25
class DES_Piff(object):
"""A wrapper for Piff to use with Galsim.
This wrapper uses ngmix to fit smooth models to the Piff PSF images. The
parameters of these models are then interpolated across the SE image
and used to generate a smooth approximation to the PSF.
Parameters
----------
file_name : str
The file with the Piff psf solution.
smooth : bool, optional
If True, then smooth the Piff PSFs. Default of False.
"""
_req_params = {'file_name': str}
_opt_params = {}
_single_params = []
_takes_rng = False
def __init__(self, file_name, smooth=False):
self.file_name = file_name
# Read the Piff file. This may fail if the Piff
# file is missing. We catch this and continue
# since if we're substituting in some different
# PSF model for rejectlisted piff files, we'll
# never actually use self._piff
try:
self._piff = piff.read(
os.path.expanduser(os.path.expandvars(file_name)))
except IOError:
print("failed to load Piff file, hopefully it's rejectlisted...")
self._piff = None
self._did_fit = False
self.smooth = smooth
def _fit_smooth_model(self):
dxy = 256
ny = 4096 // dxy + 1
nx = 2048 // dxy + 1
xloc = np.empty((ny, nx), dtype=np.float64)
yloc = np.empty((ny, nx), dtype=np.float64)
pars = np.empty((ny, nx, 3), dtype=np.float64)
for yi, yl in enumerate(np.linspace(1, 4096, ny)):
for xi, xl in enumerate(np.linspace(1, 2048, nx)):
rng = np.random.RandomState(seed=yi + nx * xi)
xloc[yi, xi] = xl
yloc[yi, xi] = yl
pos = galsim.PositionD(x=xl, y=yl)
gs_img = self._draw(pos).drawImage(
nx=19, ny=19, scale=PIFF_SCALE, method='sb')
img = gs_img.array
nse = np.std(
np.concatenate([img[0, :], img[-1, :]]))
obs = ngmix.Observation(
image=img,
weight=np.ones_like(img)/nse**2,
jacobian=ngmix.jacobian.DiagonalJacobian(
x=9, y=9, scale=PIFF_SCALE))
_g1 = np.nan
_g2 = np.nan
_T = np.nan
# there are some nutty PSFs
if gs_img.calculateFWHM() > 0.5:
for _ in range(5):
try:
if NGMIX_V2:
am = AdmomFitter(rng=rng)
res = am.go(obs, 0.3)
if res['flags'] != 0:
continue
lm = Fitter(model='turb')
lm_res = lm.go(obs, res['pars'])
if lm_res['flags'] == 0:
_g1 = lm_res['pars'][2]
_g2 = lm_res['pars'][3]
_T = lm_res['pars'][4]
break
else:
am = Admom(obs, rng=rng)
am.go(0.3)
res = am.get_result()
if res['flags'] != 0:
continue
lm = LMSimple(obs, 'turb')
lm.go(res['pars'])
lm_res = lm.get_result()
if lm_res['flags'] == 0:
_g1 = lm_res['pars'][2]
_g2 = lm_res['pars'][3]
_T = lm_res['pars'][4]
break
except ngmix.gexceptions.GMixRangeError:
pass
try:
irr, irc, icc = ngmix.moments.g2mom(_g1, _g2, _T)
# this is a fudge factor that gets the overall PSF FWHM
# correct
# the naive correction for the pixel size is
# a bit too small
pixel_var = PIFF_SCALE * PIFF_SCALE / 12 * 1.73
irr -= pixel_var
icc -= pixel_var
_g1, _g2, _T = ngmix.moments.mom2g(irr, irc, icc)
except Exception:
_g1 = np.nan
_g2 = np.nan
_T = np.nan
pars[yi, xi, 0] = _g1
pars[yi, xi, 1] = _g2
pars[yi, xi, 2] = _T
xloc = xloc.ravel()
yloc = yloc.ravel()
pos = np.stack([xloc, yloc], axis=1)
assert pos.shape == (xloc.shape[0], 2)
# make interps
g1 = pars[:, :, 0].ravel()
msk = np.isfinite(g1)
if len(msk) < 10:
raise ValueError('DES Piff fitting failed too much!')
if np.any(~msk):
g1[~msk] = np.mean(g1[msk])
self._g1int = CloughTocher2DInterpolator(
pos, g1, fill_value=np.mean(g1[msk]))
g2 = pars[:, :, 1].ravel()
msk = np.isfinite(g2)
if len(msk) < 10:
raise ValueError('DES Piff fitting failed too much!')
if np.any(~msk):
g2[~msk] = np.mean(g2[msk])
self._g2int = CloughTocher2DInterpolator(
pos, g2, fill_value=np.mean(g2[msk]))
T = pars[:, :, 2].ravel()
msk = np.isfinite(T)
if len(msk) < 10:
raise ValueError('DES Piff fitting failed too much!')
if np.any(~msk):
T[~msk] = np.mean(T[msk])
self._Tint = CloughTocher2DInterpolator(
pos, T, fill_value=np.mean(T[msk]))
self._did_fit = True
def _draw(self, image_pos, wcs=None, n_pix=None,
x_interpolant='lanczos15', gsparams=None):
"""Get an image of the PSF at the given location.
Parameters
----------
image_pos : galsim.Position
The image position for the PSF.
wcs : galsim.BaseWCS or subclass, optional
The WCS to use to draw the PSF.
n_pix : int, optional
The image size to use when drawing without smoothing. Defaults to
53 pixels if not given
x_interpolant : str, optional
The interpolant to use.
gsparams : galsim.GSParams, optional
Ootional galsim configuration data to pass along.
Returns
-------
psf : galsim.InterpolatedImage
The PSF at the image position.
"""
if wcs is not None:
if n_pix is not None:
n_pix = n_pix
else:
n_pix = 53
pixel_wcs = wcs.local(image_pos)
else:
n_pix = 19
pixel_wcs = galsim.PixelScale(PIFF_SCALE)
# nice and big image size here cause this has been a problem
image = galsim.ImageD(ncol=n_pix, nrow=n_pix, wcs=pixel_wcs)
psf = self.getPiff().draw(
image_pos.x,
image_pos.y,
image=image,
center=True,
)
psf = galsim.InterpolatedImage(
galsim.ImageD(psf.array), # make sure galsim is not keeping state
wcs=pixel_wcs,
gsparams=gsparams,
x_interpolant=x_interpolant
).withFlux(
1.0
)
return psf
def getPiff(self):
return self._piff
def getPSF(
self, image_pos, wcs=None,
smooth=False, n_pix=None, **kwargs
):
"""Get an image of the PSF at the given location.
Parameters
----------
image_pos : galsim.Position
The image position for the PSF.
wcs : galsim.BaseWCS or subclass, optional
The WCS to use to draw the PSF. Currently used only when smoothing
is turned off.
smooth : bool, optional
If True, then smooth the Piff PSFs. Default of False.
n_pix : int, optional
The image size to use when drawing without smoothing.
**kargs : extra keyword arguments
These are all ignored.
Returns
-------
psf : galsim.GSObject
The PSF at the image position.
"""
if smooth or self.smooth:
if not self._did_fit:
self._fit_smooth_model()
arr = np.array([
np.clip(image_pos.x, 1, 2048),
np.clip(image_pos.y, 1, 4096)])
_g1 = self._g1int(arr)[0]
_g2 = self._g2int(arr)[0]
_T = self._Tint(arr)[0]
if np.any(np.isnan(np.array([_g1, _g2, _T]))):
logger.debug("Piff smooth fit params are NaN: %s %s %s %s", image_pos, _g1, _g2, _T)
raise RuntimeError("NaN smooth Piff params at %s!" % image_pos)
pars = np.array([0, 0, _g1, _g2, _T, 1])
obj = ngmix.gmix.make_gmix_model(pars, 'turb').make_galsim_object()
return obj.withFlux(1)
else:
return self._draw(image_pos, wcs=wcs, n_pix=n_pix)
class PiffLoader(galsim.config.InputLoader):
def getKwargs(self, config, base, logger):
req = {'file_name': str}
opt = {}
kwargs, safe = galsim.config.GetAllParams(
config, base, req=req, opt=opt)
return kwargs, safe
# add a config input section
galsim.config.RegisterInputType('des_piff', PiffLoader(DES_Piff))
# and a builder
def BuildDES_Piff(config, base, ignore, gsparams, logger):
des_piff = galsim.config.GetInputObj('des_piff', config, base, 'DES_Piff')
opt = {'flux': float,
'num': int,
'image_pos': galsim.PositionD,
'x_interpolant': str,
'smooth': bool}
params, safe = galsim.config.GetAllParams(
config, base, opt=opt, ignore=ignore)
if 'image_pos' in params:
image_pos = params['image_pos']
elif 'image_pos' in base:
image_pos = base['image_pos']
else:
raise galsim.GalSimConfigError(
"DES_Piff requested, but no image_pos defined in base.")
if 'wcs' not in base:
raise galsim.GalSimConfigError(
"DES_Piff requested, but no wcs defined in base.")
wcs = base['wcs']
if gsparams:
gsparams = galsim.GSParams(**gsparams)
else:
gsparams = None
psf = des_piff.getPSF(
image_pos,
wcs,
smooth=params.get('smooth', False),
gsparams=gsparams)
if 'flux' in params:
psf = psf.withFlux(params['flux'])
# we make sure to declare the returned object as not safe for reuse
can_be_reused = False
return psf, can_be_reused
def BuildDES_Piff_with_substitute(config, base, ignore, gsparams, logger):
# This builder usually just calls BuildDES_Piff, but can also
# be passed use_substitute = True, in which case it builds some
# other PSF. We use this for rejectlisted Piff files.
if "use_substitute" in config:
use_substitute = galsim.config.ParseValue(config, "use_substitute",
base, bool)[0]
else:
use_substitute = False
if use_substitute:
return (galsim.config.BuildGSObject(
config, "substitute_psf", base=base,
gsparams=gsparams, logger=logger))
else:
ignore += ["use_substitute", "substitute_psf"]
return BuildDES_Piff(config, base, ignore, gsparams, logger)
galsim.config.RegisterObjectType(
'DES_Piff', BuildDES_Piff_with_substitute, input_type='des_piff')
|
[
"galsim.config.BuildGSObject",
"numpy.empty",
"numpy.clip",
"numpy.mean",
"ngmix.gmix.make_gmix_model",
"galsim.PixelScale",
"galsim.config.GetAllParams",
"galsim.config.GetInputObj",
"galsim.PositionD",
"galsim.ImageD",
"galsim.config.RegisterObjectType",
"numpy.isfinite",
"numpy.random.RandomState",
"ngmix.jacobian.DiagonalJacobian",
"numpy.linspace",
"numpy.stack",
"ngmix.fitting.LMSimple",
"numpy.ones_like",
"galsim.config.ParseValue",
"galsim.GSParams",
"os.path.expandvars",
"ngmix.moments.mom2g",
"galsim.GalSimConfigError",
"numpy.concatenate",
"ngmix.fitting.Fitter",
"ngmix.moments.g2mom",
"ngmix.admom.Admom",
"numpy.any",
"numpy.array",
"ngmix.admom.AdmomFitter",
"logging.getLogger"
] |
[((407, 434), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (424, 434), False, 'import logging\n'), ((12281, 12383), 'galsim.config.RegisterObjectType', 'galsim.config.RegisterObjectType', (['"""DES_Piff"""', 'BuildDES_Piff_with_substitute'], {'input_type': '"""des_piff"""'}), "('DES_Piff', BuildDES_Piff_with_substitute,\n input_type='des_piff')\n", (12313, 12383), False, 'import galsim\n'), ((10347, 10410), 'galsim.config.GetInputObj', 'galsim.config.GetInputObj', (['"""des_piff"""', 'config', 'base', '"""DES_Piff"""'], {}), "('des_piff', config, base, 'DES_Piff')\n", (10372, 10410), False, 'import galsim\n'), ((10582, 10646), 'galsim.config.GetAllParams', 'galsim.config.GetAllParams', (['config', 'base'], {'opt': 'opt', 'ignore': 'ignore'}), '(config, base, opt=opt, ignore=ignore)\n', (10608, 10646), False, 'import galsim\n'), ((1862, 1898), 'numpy.empty', 'np.empty', (['(ny, nx)'], {'dtype': 'np.float64'}), '((ny, nx), dtype=np.float64)\n', (1870, 1898), True, 'import numpy as np\n'), ((1914, 1950), 'numpy.empty', 'np.empty', (['(ny, nx)'], {'dtype': 'np.float64'}), '((ny, nx), dtype=np.float64)\n', (1922, 1950), True, 'import numpy as np\n'), ((1966, 2005), 'numpy.empty', 'np.empty', (['(ny, nx, 3)'], {'dtype': 'np.float64'}), '((ny, nx, 3), dtype=np.float64)\n', (1974, 2005), True, 'import numpy as np\n'), ((5414, 5444), 'numpy.stack', 'np.stack', (['[xloc, yloc]'], {'axis': '(1)'}), '([xloc, yloc], axis=1)\n', (5422, 5444), True, 'import numpy as np\n'), ((5565, 5580), 'numpy.isfinite', 'np.isfinite', (['g1'], {}), '(g1)\n', (5576, 5580), True, 'import numpy as np\n'), ((5684, 5696), 'numpy.any', 'np.any', (['(~msk)'], {}), '(~msk)\n', (5690, 5696), True, 'import numpy as np\n'), ((5888, 5903), 'numpy.isfinite', 'np.isfinite', (['g2'], {}), '(g2)\n', (5899, 5903), True, 'import numpy as np\n'), ((6007, 6019), 'numpy.any', 'np.any', (['(~msk)'], {}), '(~msk)\n', (6013, 6019), True, 'import numpy as np\n'), ((6210, 6224), 'numpy.isfinite', 'np.isfinite', (['T'], {}), '(T)\n', (6221, 6224), True, 'import numpy as np\n'), ((6328, 6340), 'numpy.any', 'np.any', (['(~msk)'], {}), '(~msk)\n', (6334, 6340), True, 'import numpy as np\n'), ((7700, 7752), 'galsim.ImageD', 'galsim.ImageD', ([], {'ncol': 'n_pix', 'nrow': 'n_pix', 'wcs': 'pixel_wcs'}), '(ncol=n_pix, nrow=n_pix, wcs=pixel_wcs)\n', (7713, 7752), False, 'import galsim\n'), ((10057, 10115), 'galsim.config.GetAllParams', 'galsim.config.GetAllParams', (['config', 'base'], {'req': 'req', 'opt': 'opt'}), '(config, base, req=req, opt=opt)\n', (10083, 10115), False, 'import galsim\n'), ((10955, 11030), 'galsim.GalSimConfigError', 'galsim.GalSimConfigError', (['"""DES_Piff requested, but no wcs defined in base."""'], {}), "('DES_Piff requested, but no wcs defined in base.')\n", (10979, 11030), False, 'import galsim\n'), ((11103, 11130), 'galsim.GSParams', 'galsim.GSParams', ([], {}), '(**gsparams)\n', (11118, 11130), False, 'import galsim\n'), ((12020, 12123), 'galsim.config.BuildGSObject', 'galsim.config.BuildGSObject', (['config', '"""substitute_psf"""'], {'base': 'base', 'gsparams': 'gsparams', 'logger': 'logger'}), "(config, 'substitute_psf', base=base, gsparams=\n gsparams, logger=logger)\n", (12047, 12123), False, 'import galsim\n'), ((2038, 2062), 'numpy.linspace', 'np.linspace', (['(1)', '(4096)', 'ny'], {}), '(1, 4096, ny)\n', (2049, 2062), True, 'import numpy as np\n'), ((5721, 5737), 'numpy.mean', 'np.mean', (['g1[msk]'], {}), '(g1[msk])\n', (5728, 5737), True, 'import numpy as np\n'), ((6044, 6060), 'numpy.mean', 'np.mean', (['g2[msk]'], {}), '(g2[msk])\n', (6051, 6060), True, 'import numpy as np\n'), ((6364, 6379), 'numpy.mean', 'np.mean', (['T[msk]'], {}), '(T[msk])\n', (6371, 6379), True, 'import numpy as np\n'), ((7584, 7613), 'galsim.PixelScale', 'galsim.PixelScale', (['PIFF_SCALE'], {}), '(PIFF_SCALE)\n', (7601, 7613), False, 'import galsim\n'), ((9664, 9697), 'numpy.array', 'np.array', (['[0, 0, _g1, _g2, _T, 1]'], {}), '([0, 0, _g1, _g2, _T, 1])\n', (9672, 9697), True, 'import numpy as np\n'), ((10819, 10905), 'galsim.GalSimConfigError', 'galsim.GalSimConfigError', (['"""DES_Piff requested, but no image_pos defined in base."""'], {}), "(\n 'DES_Piff requested, but no image_pos defined in base.')\n", (10843, 10905), False, 'import galsim\n'), ((11823, 11885), 'galsim.config.ParseValue', 'galsim.config.ParseValue', (['config', '"""use_substitute"""', 'base', 'bool'], {}), "(config, 'use_substitute', base, bool)\n", (11847, 11885), False, 'import galsim\n'), ((2101, 2125), 'numpy.linspace', 'np.linspace', (['(1)', '(2048)', 'nx'], {}), '(1, 2048, nx)\n', (2112, 2125), True, 'import numpy as np\n'), ((2150, 2190), 'numpy.random.RandomState', 'np.random.RandomState', ([], {'seed': '(yi + nx * xi)'}), '(seed=yi + nx * xi)\n', (2171, 2190), True, 'import numpy as np\n'), ((2282, 2310), 'galsim.PositionD', 'galsim.PositionD', ([], {'x': 'xl', 'y': 'yl'}), '(x=xl, y=yl)\n', (2298, 2310), False, 'import galsim\n'), ((5820, 5836), 'numpy.mean', 'np.mean', (['g1[msk]'], {}), '(g1[msk])\n', (5827, 5836), True, 'import numpy as np\n'), ((6143, 6159), 'numpy.mean', 'np.mean', (['g2[msk]'], {}), '(g2[msk])\n', (6150, 6159), True, 'import numpy as np\n'), ((6460, 6475), 'numpy.mean', 'np.mean', (['T[msk]'], {}), '(T[msk])\n', (6467, 6475), True, 'import numpy as np\n'), ((1513, 1542), 'os.path.expandvars', 'os.path.expandvars', (['file_name'], {}), '(file_name)\n', (1531, 1542), False, 'import os\n'), ((2513, 2552), 'numpy.concatenate', 'np.concatenate', (['[img[0, :], img[-1, :]]'], {}), '([img[0, :], img[-1, :]])\n', (2527, 2552), True, 'import numpy as np\n'), ((7952, 7976), 'galsim.ImageD', 'galsim.ImageD', (['psf.array'], {}), '(psf.array)\n', (7965, 7976), False, 'import galsim\n'), ((9213, 9242), 'numpy.clip', 'np.clip', (['image_pos.x', '(1)', '(2048)'], {}), '(image_pos.x, 1, 2048)\n', (9220, 9242), True, 'import numpy as np\n'), ((9260, 9289), 'numpy.clip', 'np.clip', (['image_pos.y', '(1)', '(4096)'], {}), '(image_pos.y, 1, 4096)\n', (9267, 9289), True, 'import numpy as np\n'), ((9436, 9460), 'numpy.array', 'np.array', (['[_g1, _g2, _T]'], {}), '([_g1, _g2, _T])\n', (9444, 9460), True, 'import numpy as np\n'), ((9716, 9756), 'ngmix.gmix.make_gmix_model', 'ngmix.gmix.make_gmix_model', (['pars', '"""turb"""'], {}), "(pars, 'turb')\n", (9742, 9756), False, 'import ngmix\n'), ((2708, 2767), 'ngmix.jacobian.DiagonalJacobian', 'ngmix.jacobian.DiagonalJacobian', ([], {'x': '(9)', 'y': '(9)', 'scale': 'PIFF_SCALE'}), '(x=9, y=9, scale=PIFF_SCALE)\n', (2739, 2767), False, 'import ngmix\n'), ((4594, 4627), 'ngmix.moments.g2mom', 'ngmix.moments.g2mom', (['_g1', '_g2', '_T'], {}), '(_g1, _g2, _T)\n', (4613, 4627), False, 'import ngmix\n'), ((5046, 5080), 'ngmix.moments.mom2g', 'ngmix.moments.mom2g', (['irr', 'irc', 'icc'], {}), '(irr, irc, icc)\n', (5065, 5080), False, 'import ngmix\n'), ((2653, 2670), 'numpy.ones_like', 'np.ones_like', (['img'], {}), '(img)\n', (2665, 2670), True, 'import numpy as np\n'), ((3121, 3141), 'ngmix.admom.AdmomFitter', 'AdmomFitter', ([], {'rng': 'rng'}), '(rng=rng)\n', (3132, 3141), False, 'from ngmix.admom import AdmomFitter\n'), ((3333, 3353), 'ngmix.fitting.Fitter', 'Fitter', ([], {'model': '"""turb"""'}), "(model='turb')\n", (3339, 3353), False, 'from ngmix.fitting import Fitter\n'), ((3768, 3787), 'ngmix.admom.Admom', 'Admom', (['obs'], {'rng': 'rng'}), '(obs, rng=rng)\n', (3773, 3787), False, 'from ngmix.admom import Admom\n'), ((4022, 4043), 'ngmix.fitting.LMSimple', 'LMSimple', (['obs', '"""turb"""'], {}), "(obs, 'turb')\n", (4030, 4043), False, 'from ngmix.fitting import LMSimple\n')]
|
from django.contrib import admin
from .models import *
admin.site.register(BlogList)
admin.site.register(Blog)
admin.site.register(Comment)
|
[
"django.contrib.admin.site.register"
] |
[((56, 85), 'django.contrib.admin.site.register', 'admin.site.register', (['BlogList'], {}), '(BlogList)\n', (75, 85), False, 'from django.contrib import admin\n'), ((86, 111), 'django.contrib.admin.site.register', 'admin.site.register', (['Blog'], {}), '(Blog)\n', (105, 111), False, 'from django.contrib import admin\n'), ((112, 140), 'django.contrib.admin.site.register', 'admin.site.register', (['Comment'], {}), '(Comment)\n', (131, 140), False, 'from django.contrib import admin\n')]
|
import json
from io import BytesIO
def test_ping(app):
client = app.test_client()
resp = client.get('/ping')
data = json.loads(resp.data.decode())
assert resp.status_code == 200
assert 'records' in data['message']
assert 'success' in data['status']
def test_add_user(app):
"""Ensure a new user can be added to the database."""
with app.test_client() as client:
data = {
'name': 'test',
'foo': 'bar',
'image_1': (BytesIO(b'my file contents'), "image1.jpg")
}
response = client.post('/upload', content_type='multipart/form-data', data=data)
assert response.status_code == 204
|
[
"io.BytesIO"
] |
[((493, 521), 'io.BytesIO', 'BytesIO', (["b'my file contents'"], {}), "(b'my file contents')\n", (500, 521), False, 'from io import BytesIO\n')]
|
"""Functions for sending DNS queries and checking recieved answers checking"""
# pylint: disable=C0301
# flake8: noqa
from ipaddress import IPv4Address, IPv6Address
import random
from typing import Iterable, Optional, Set, Union
import dns.message
import dns.flags
import pydnstest.matchpart
import pydnstest.mock_client
def unset_flag(message: dns.message.Message, flag: int) -> dns.message.Message:
"""Unsets given flag in given DNS message."""
message.flags &= ~flag
return message
def send_and_check(question: Union[dns.message.Message, bytes], # pylint: disable=R0913
expected: dns.message.Message,
server: Union[IPv4Address, IPv6Address],
match_fields: Set[str],
port: int = 53,
tcp: bool = False,
timeout: int = pydnstest.mock_client.SOCKET_OPERATION_TIMEOUT,
unset_flags: Iterable[int] = tuple()) -> bool:
"""Checks if DNS answer recieved for a question from a server matches expected one in specified
field. See pydnstest.matchpart for more information on match fields
Returns True on success, raises an exceptions on failure.
"""
print("Sending query:\n%s\n" % str(question))
answer = get_answer(question, server, port, tcp, timeout=timeout)
for flag in unset_flags:
answer = unset_flag(answer, flag)
print("Got answer:\n%s\n" % answer)
print("Matching:\n%s\n%s\n" % (match_fields, expected))
for field in match_fields:
pydnstest.matchpart.match_part(expected, answer, field)
return True
def get_answer(question: Union[dns.message.Message, bytes],
server: Union[IPv4Address, IPv6Address],
port: int = 53,
tcp: bool = False,
timeout: int = pydnstest.mock_client.SOCKET_OPERATION_TIMEOUT) -> dns.message.Message:
"""Get an DNS message with answer with specific query"""
sock = pydnstest.mock_client.setup_socket(str(server), port, tcp=tcp)
with sock:
pydnstest.mock_client.send_query(sock, question)
return pydnstest.mock_client.get_dns_message(sock, timeout=timeout)
def string_answer(question: Union[dns.message.Message, bytes],
server: Union[IPv4Address, IPv6Address],
port: int = 53,
tcp: bool = False) -> str:
"""Prints answer of a server. Good for generating tests."""
return get_answer(question, server, port, tcp).to_text()
def randomize_case(label: bytes) -> bytes:
"""Randomize case in a DNS name label"""
output = []
for byte in label:
if random.randint(0, 1):
output.append(bytes([byte]).swapcase())
else:
output.append(bytes([byte]))
return b''.join(output)
def make_random_case_query(name: str, *args, **kwargs) -> dns.message.Message:
"""Proxy for dns.message.make_query with rANdoM-cASe"""
query = dns.message.make_query(name, *args, **kwargs)
for label in query.question[0].name.labels:
label = randomize_case(label)
return query
|
[
"random.randint"
] |
[((2650, 2670), 'random.randint', 'random.randint', (['(0)', '(1)'], {}), '(0, 1)\n', (2664, 2670), False, 'import random\n')]
|
from django.conf.urls import url
from django.urls import path
from . import views
from django.conf import settings
from django.conf.urls.static import static
urlpatterns = [
path('', views.index, name='index'),
path('login/', views.login, name='login'),
]
|
[
"django.urls.path"
] |
[((181, 216), 'django.urls.path', 'path', (['""""""', 'views.index'], {'name': '"""index"""'}), "('', views.index, name='index')\n", (185, 216), False, 'from django.urls import path\n'), ((222, 263), 'django.urls.path', 'path', (['"""login/"""', 'views.login'], {'name': '"""login"""'}), "('login/', views.login, name='login')\n", (226, 263), False, 'from django.urls import path\n')]
|
#-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2015, Continuum Analytics, Inc. All rights reserved.
#
# Powered by the Bokeh Development Team.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
from __future__ import absolute_import, print_function
import logging
logger = logging.getLogger(__name__)
import uuid
from bokeh.exceptions import UnauthorizedException
from flask import (
request, session, flash, redirect, url_for, render_template, jsonify
)
from .app import bokeh_app
from .models import user, docs, convenience
class AbstractAuthentication(object):
def current_user_name(self):
"""obtain current user name from the current request
current request is obtained from flask request thread local
object
"""
raise NotImplementedError
def login(self, username):
"""login the user, sets whatever request information is necessary
(usually, session['username'] = username)
"""
raise NotImplementedError
def logout(self):
"""logs out the user, sets whatever request information is necessary
usually, session.pop('username')
"""
raise NotImplementedError
def current_user(self):
"""returns bokeh User object from self.current_user_name
"""
username = self.current_user_name()
if username is None:
return None
bokehuser = user.User.load(bokeh_app.servermodel_storage, username)
return bokehuser
def login_get(self):
"""custom login view
"""
raise NotImplementedError
def login_post(self):
"""custom login submission. Request form will have
username, password, and possibly an api field.
api indicates that we are
submitting via python, and we should try to return error
codes rather than flash messages
"""
raise NotImplementedError
def login_from_apikey(self):
"""login URL using apikey. This is usually generated
by the python client
"""
raise NotImplementedError
def register_get(self):
"""custom register view
"""
raise NotImplementedError
def register_post(self):
"""custom register submission
request form will have username, password, password_confirm,
and possibly an api field. api indicates that we are
submitting via python, and we should try to return error
codes rather than flash messages
"""
raise NotImplementedError
def can_write_doc(self, docid):
"""whether or not a user can write to a doc
"""
raise NotImplementedError
def can_read_doc(self, docid):
"""whether or not a user can read a doc
"""
raise NotImplementedError
class SingleUserAuthentication(AbstractAuthentication):
def can_write_doc(self, doc_or_docid, temporary_docid=None, userobj=None):
return True
def can_read_doc(self, doc_or_docid, temporary_docid=None, userobj=None):
return True
def current_user_name(self):
return "defaultuser"
def current_user(self):
"""returns bokeh User object matching defaultuser
if the user does not exist, one will be created
"""
username = self.current_user_name()
bokehuser = user.User.load(bokeh_app.servermodel_storage, username)
if bokehuser is not None:
return bokehuser
bokehuser = user.new_user(bokeh_app.servermodel_storage, "defaultuser",
str(uuid.uuid4()), apikey='nokey', docs=[])
return bokehuser
class MultiUserAuthentication(AbstractAuthentication):
def can_write_doc(self, doc_or_docid, temporary_docid=None, userobj=None):
if not isinstance(doc_or_docid, docs.Doc):
doc = docs.Doc.load(bokeh_app.servermodel_storage, doc_or_docid)
else:
doc = doc_or_docid
if userobj is None:
userobj = self.current_user()
return convenience.can_write_from_request(doc, request, userobj,
temporary_docid=temporary_docid)
def can_read_doc(self, doc_or_docid, temporary_docid=None, userobj=None):
if not isinstance(doc_or_docid, docs.Doc):
doc = docs.Doc.load(bokeh_app.servermodel_storage, doc_or_docid)
else:
doc = doc_or_docid
if userobj is None:
userobj = self.current_user()
return convenience.can_read_from_request(doc, request, userobj)
def login(self, username):
session['username'] = username
def print_connection_info(self, bokehuser):
logger.info("connect using the following")
command = "output_server(docname, username='%s', userapikey='%s')"
command = command % (bokehuser.username, bokehuser.apikey)
logger.info(command)
def current_user_name(self):
# users can be authenticated by logging in (setting the session)
# or by setting fields in the http header (api keys, etc..)
username = session.get('username', None)
if username:
return username
else:
# check for auth via apis and headers
bokehuser = user.apiuser_from_request(bokeh_app, request)
if bokehuser:
return bokehuser.username
return None
def register_get(self):
return render_template("register.html", title="Register")
def login_get(self):
return render_template("login.html", title="Login")
def register_post_api(self):
username = request.values['username']
password = request.values['password']
try:
bokehuser = user.new_user(
bokeh_app.servermodel_storage, username, password
)
self.login(username)
self.print_connection_info(bokehuser)
except UnauthorizedException:
return jsonify(status=False,
error="user already exists")
return jsonify(status=True,
userapikey=bokehuser.apikey
)
def register_post(self):
if request.values.get('api', None):
return self.register_post_api()
username = request.values['username']
password = request.values['password']
password_confirm = request.values['password_confirm']
if password != password_confirm:
flash("password and confirmation do not match")
return redirect(url_for('.register_get'))
try:
bokehuser = user.new_user(
bokeh_app.servermodel_storage, username, password
)
self.login(username)
self.print_connection_info(bokehuser)
except UnauthorizedException:
flash("user already exists")
return redirect(url_for('.register_get'))
return redirect(url_for(".index"))
def login_post_api(self):
username = request.values['username']
password = request.values['password']
try:
bokehuser = user.auth_user(bokeh_app.servermodel_storage,
username,
password)
self.login(username)
self.print_connection_info(bokehuser)
except UnauthorizedException:
return jsonify(status=False,
error="incorrect login ")
return jsonify(status=True,
userapikey=bokehuser.apikey
)
def login_post(self):
if request.values.get('api', None):
return self.login_post_api()
username = request.values['username']
password = request.values['password']
try:
bokehuser = user.auth_user(bokeh_app.servermodel_storage,
username,
password=password)
self.login(username)
self.print_connection_info(bokehuser)
except UnauthorizedException:
flash("incorrect login exists")
return redirect(url_for('.login_get'))
return redirect(url_for(".index"))
def login_from_apikey(self):
username = request.values.get('username')
apikey = request.values.get('userapikey')
try:
bokehuser = user.auth_user(bokeh_app.servermodel_storage,
username,
apikey=apikey)
self.login(username)
self.print_connection_info(bokehuser)
except UnauthorizedException:
flash("incorrect login")
return redirect(url_for('.login_get'))
return redirect(url_for(".index"))
def logout(self):
session.pop('username', None)
return redirect(url_for(".index"))
|
[
"flask.flash",
"flask.session.pop",
"uuid.uuid4",
"flask.session.get",
"flask.request.values.get",
"flask.jsonify",
"flask.url_for",
"flask.render_template",
"logging.getLogger"
] |
[((438, 465), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (455, 465), False, 'import logging\n'), ((5278, 5307), 'flask.session.get', 'session.get', (['"""username"""', 'None'], {}), "('username', None)\n", (5289, 5307), False, 'from flask import request, session, flash, redirect, url_for, render_template, jsonify\n'), ((5623, 5673), 'flask.render_template', 'render_template', (['"""register.html"""'], {'title': '"""Register"""'}), "('register.html', title='Register')\n", (5638, 5673), False, 'from flask import request, session, flash, redirect, url_for, render_template, jsonify\n'), ((5715, 5759), 'flask.render_template', 'render_template', (['"""login.html"""'], {'title': '"""Login"""'}), "('login.html', title='Login')\n", (5730, 5759), False, 'from flask import request, session, flash, redirect, url_for, render_template, jsonify\n'), ((6255, 6304), 'flask.jsonify', 'jsonify', ([], {'status': '(True)', 'userapikey': 'bokehuser.apikey'}), '(status=True, userapikey=bokehuser.apikey)\n', (6262, 6304), False, 'from flask import request, session, flash, redirect, url_for, render_template, jsonify\n'), ((6393, 6424), 'flask.request.values.get', 'request.values.get', (['"""api"""', 'None'], {}), "('api', None)\n", (6411, 6424), False, 'from flask import request, session, flash, redirect, url_for, render_template, jsonify\n'), ((7708, 7757), 'flask.jsonify', 'jsonify', ([], {'status': '(True)', 'userapikey': 'bokehuser.apikey'}), '(status=True, userapikey=bokehuser.apikey)\n', (7715, 7757), False, 'from flask import request, session, flash, redirect, url_for, render_template, jsonify\n'), ((7843, 7874), 'flask.request.values.get', 'request.values.get', (['"""api"""', 'None'], {}), "('api', None)\n", (7861, 7874), False, 'from flask import request, session, flash, redirect, url_for, render_template, jsonify\n'), ((8511, 8541), 'flask.request.values.get', 'request.values.get', (['"""username"""'], {}), "('username')\n", (8529, 8541), False, 'from flask import request, session, flash, redirect, url_for, render_template, jsonify\n'), ((8559, 8591), 'flask.request.values.get', 'request.values.get', (['"""userapikey"""'], {}), "('userapikey')\n", (8577, 8591), False, 'from flask import request, session, flash, redirect, url_for, render_template, jsonify\n'), ((9062, 9091), 'flask.session.pop', 'session.pop', (['"""username"""', 'None'], {}), "('username', None)\n", (9073, 9091), False, 'from flask import request, session, flash, redirect, url_for, render_template, jsonify\n'), ((6677, 6724), 'flask.flash', 'flash', (['"""password and confirmation do not match"""'], {}), "('password and confirmation do not match')\n", (6682, 6724), False, 'from flask import request, session, flash, redirect, url_for, render_template, jsonify\n'), ((7155, 7172), 'flask.url_for', 'url_for', (['""".index"""'], {}), "('.index')\n", (7162, 7172), False, 'from flask import request, session, flash, redirect, url_for, render_template, jsonify\n'), ((8439, 8456), 'flask.url_for', 'url_for', (['""".index"""'], {}), "('.index')\n", (8446, 8456), False, 'from flask import request, session, flash, redirect, url_for, render_template, jsonify\n'), ((9012, 9029), 'flask.url_for', 'url_for', (['""".index"""'], {}), "('.index')\n", (9019, 9029), False, 'from flask import request, session, flash, redirect, url_for, render_template, jsonify\n'), ((9116, 9133), 'flask.url_for', 'url_for', (['""".index"""'], {}), "('.index')\n", (9123, 9133), False, 'from flask import request, session, flash, redirect, url_for, render_template, jsonify\n'), ((3747, 3759), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (3757, 3759), False, 'import uuid\n'), ((6162, 6212), 'flask.jsonify', 'jsonify', ([], {'status': '(False)', 'error': '"""user already exists"""'}), "(status=False, error='user already exists')\n", (6169, 6212), False, 'from flask import request, session, flash, redirect, url_for, render_template, jsonify\n'), ((6753, 6777), 'flask.url_for', 'url_for', (['""".register_get"""'], {}), "('.register_get')\n", (6760, 6777), False, 'from flask import request, session, flash, redirect, url_for, render_template, jsonify\n'), ((7048, 7076), 'flask.flash', 'flash', (['"""user already exists"""'], {}), "('user already exists')\n", (7053, 7076), False, 'from flask import request, session, flash, redirect, url_for, render_template, jsonify\n'), ((7618, 7665), 'flask.jsonify', 'jsonify', ([], {'status': '(False)', 'error': '"""incorrect login """'}), "(status=False, error='incorrect login ')\n", (7625, 7665), False, 'from flask import request, session, flash, redirect, url_for, render_template, jsonify\n'), ((8332, 8363), 'flask.flash', 'flash', (['"""incorrect login exists"""'], {}), "('incorrect login exists')\n", (8337, 8363), False, 'from flask import request, session, flash, redirect, url_for, render_template, jsonify\n'), ((8912, 8936), 'flask.flash', 'flash', (['"""incorrect login"""'], {}), "('incorrect login')\n", (8917, 8936), False, 'from flask import request, session, flash, redirect, url_for, render_template, jsonify\n'), ((7105, 7129), 'flask.url_for', 'url_for', (['""".register_get"""'], {}), "('.register_get')\n", (7112, 7129), False, 'from flask import request, session, flash, redirect, url_for, render_template, jsonify\n'), ((8392, 8413), 'flask.url_for', 'url_for', (['""".login_get"""'], {}), "('.login_get')\n", (8399, 8413), False, 'from flask import request, session, flash, redirect, url_for, render_template, jsonify\n'), ((8965, 8986), 'flask.url_for', 'url_for', (['""".login_get"""'], {}), "('.login_get')\n", (8972, 8986), False, 'from flask import request, session, flash, redirect, url_for, render_template, jsonify\n')]
|
from wtforms import StringField, validators
from kaira.app import App
from kaira.response import response
from kaira.wtf import KairaForm
app = App()
class SigninForm(KairaForm):
username = StringField('Username', [validators.Length(min=4, max=25)])
password = StringField('Password', [validators.Length(min=6, max=35)])
@app.route("/", methods=['GET', 'POST'])
def form_boostrap(request):
form = SigninForm(request)
if form.validate_on_submit():
return response.redirect('/done')
return response.template('boostrap.html', form=form)
@app.route("/done")
def done(request):
return response.text('Done!')
if __name__ == '__main__':
app.run(debug=True, host="0.0.0.0", port=8000)
|
[
"wtforms.validators.Length",
"kaira.app.App",
"kaira.response.response.text",
"kaira.response.response.template",
"kaira.response.response.redirect"
] |
[((148, 153), 'kaira.app.App', 'App', ([], {}), '()\n', (151, 153), False, 'from kaira.app import App\n'), ((526, 571), 'kaira.response.response.template', 'response.template', (['"""boostrap.html"""'], {'form': 'form'}), "('boostrap.html', form=form)\n", (543, 571), False, 'from kaira.response import response\n'), ((624, 646), 'kaira.response.response.text', 'response.text', (['"""Done!"""'], {}), "('Done!')\n", (637, 646), False, 'from kaira.response import response\n'), ((487, 513), 'kaira.response.response.redirect', 'response.redirect', (['"""/done"""'], {}), "('/done')\n", (504, 513), False, 'from kaira.response import response\n'), ((225, 257), 'wtforms.validators.Length', 'validators.Length', ([], {'min': '(4)', 'max': '(25)'}), '(min=4, max=25)\n', (242, 257), False, 'from wtforms import StringField, validators\n'), ((300, 332), 'wtforms.validators.Length', 'validators.Length', ([], {'min': '(6)', 'max': '(35)'}), '(min=6, max=35)\n', (317, 332), False, 'from wtforms import StringField, validators\n')]
|
# 调整图像,使其累积直方图与另一幅图像相匹配,各个通道独立匹配。
import matplotlib.pyplot as plt
from skimage import data, img_as_float, io
from skimage import exposure
from skimage.exposure import match_histograms
reference = io.imread('/home/qiao/PythonProjects/Scikit-image_On_CT/Test_Img/9.jpg')
image = io.imread('/home/qiao/PythonProjects/Scikit-image_On_CT/Test_Img/10.jpg')
matched = match_histograms(image, reference, multichannel=True)
fig, (ax1, ax2, ax3) = plt.subplots(nrows=1, ncols=3, figsize=(8, 3),
sharex=True, sharey=True)
for aa in (ax1, ax2, ax3):
aa.set_axis_off()
ax1.imshow(image)
ax1.set_title('Source')
ax2.imshow(reference)
ax2.set_title('Reference')
ax3.imshow(matched)
ax3.set_title('Matched')
plt.tight_layout()
plt.show()
|
[
"skimage.exposure.match_histograms",
"matplotlib.pyplot.show",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.tight_layout",
"skimage.io.imread"
] |
[((198, 270), 'skimage.io.imread', 'io.imread', (['"""/home/qiao/PythonProjects/Scikit-image_On_CT/Test_Img/9.jpg"""'], {}), "('/home/qiao/PythonProjects/Scikit-image_On_CT/Test_Img/9.jpg')\n", (207, 270), False, 'from skimage import data, img_as_float, io\n'), ((280, 353), 'skimage.io.imread', 'io.imread', (['"""/home/qiao/PythonProjects/Scikit-image_On_CT/Test_Img/10.jpg"""'], {}), "('/home/qiao/PythonProjects/Scikit-image_On_CT/Test_Img/10.jpg')\n", (289, 353), False, 'from skimage import data, img_as_float, io\n'), ((366, 419), 'skimage.exposure.match_histograms', 'match_histograms', (['image', 'reference'], {'multichannel': '(True)'}), '(image, reference, multichannel=True)\n', (382, 419), False, 'from skimage.exposure import match_histograms\n'), ((444, 516), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'nrows': '(1)', 'ncols': '(3)', 'figsize': '(8, 3)', 'sharex': '(True)', 'sharey': '(True)'}), '(nrows=1, ncols=3, figsize=(8, 3), sharex=True, sharey=True)\n', (456, 516), True, 'import matplotlib.pyplot as plt\n'), ((740, 758), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (756, 758), True, 'import matplotlib.pyplot as plt\n'), ((759, 769), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (767, 769), True, 'import matplotlib.pyplot as plt\n')]
|
import logging
import os
from datetime import datetime
import tempfile
import requests
from requests.adapters import HTTPAdapter
from urllib3.util.retry import Retry
from apimetrics_agent import VERSION
from .controller import handle_api_request
logger = logging.getLogger(__name__) # pylint: disable=invalid-name
class APImetricsThread(object):
def __init__(self, config):
self.config = config
def handle_definition(self, definition, complete_cb):
logger.debug("handle_definition")
# exception_str = None
result = None
if not definition:
logger.error("definition not set")
complete_cb()
return
test_key_str = self.validate_data(definition)
if not test_key_str:
logger.error("Invalid request data: %s", definition)
complete_cb()
return
cert_file_name = None
key_file_name = None
if definition["request"].get("ssl_cert"):
with tempfile.NamedTemporaryFile(mode="w", delete=False) as cert:
cert.write(definition["request"]["ssl_cert"])
definition["_cert_file"] = cert.name
cert_file_name = cert.name
logger.debug("Using cert file %s", cert_file_name)
del definition["request"]["ssl_cert"]
if definition["request"].get("ssl_key"):
with tempfile.NamedTemporaryFile(mode="w", delete=False) as cert:
cert.write(definition["request"]["ssl_key"])
definition["_key_file"] = cert.name
key_file_name = cert.name
del definition["request"]["ssl_key"]
elif self.config.ssl_cert_file:
definition["_cert_file"] = self.config.ssl_cert_file
definition["_key_file"] = self.config.ssl_key_file
try:
complete_cb()
result = handle_api_request(definition)
except Exception as ex: # pylint: disable=W0703
logger.error("Exception in handle_api_request %s", ex)
result = {
"test_key_str": test_key_str,
"result_key_str": definition["result_key_str"],
"start_time": datetime.utcnow().isoformat(),
"request": definition["request"],
"response": None,
"exception": "Problem with test agent: {}".format(repr(ex)),
}
if definition.get("expected_trigger_time"):
result["expected_trigger_time"] = definition["expected_trigger_time"]
if definition.get("trigger_time"):
result["trigger_time"] = definition["trigger_time"]
if cert_file_name:
try:
os.remove(cert_file_name)
except FileNotFoundError:
pass
if key_file_name:
try:
os.remove(key_file_name)
except FileNotFoundError:
pass
res = self.send_result_to_gae(result, test_key_str)
logger.info("Got response %d %s", res.status_code, res.reason)
# logger.debug(res.data) #read(decode_content=True))
def validate_data(self, output):
logger.debug("validate_data")
if (
"access_token" in output
and self.config.access_token == output["access_token"]
):
return output["test_key_str"]
return None
def send_result_to_gae(self, result, test_key_str):
logger.debug("send_result_to_gae")
url = "{}/remote-api/1/test/{}/".format(self.config.host_url, test_key_str)
result["version"] = VERSION
session = requests.Session()
retries = Retry(
total=5,
backoff_factor=1,
method_whitelist=["POST"],
status_forcelist=[500, 501, 502, 503, 504],
)
session.mount(self.config.host_url, HTTPAdapter(max_retries=retries))
logger.info("Calling %s %s proxy: %s", "POST", url, self.config.proxies)
return session.post(url, json=result, proxies=self.config.proxies, verify=False)
def handle_request(config, definition, complete_cb=None):
logger.debug("handle_request")
thread = APImetricsThread(config)
thread.handle_definition(definition, complete_cb)
|
[
"tempfile.NamedTemporaryFile",
"os.remove",
"requests.adapters.HTTPAdapter",
"requests.Session",
"datetime.datetime.utcnow",
"urllib3.util.retry.Retry",
"logging.getLogger"
] |
[((256, 283), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (273, 283), False, 'import logging\n'), ((3681, 3699), 'requests.Session', 'requests.Session', ([], {}), '()\n', (3697, 3699), False, 'import requests\n'), ((3718, 3825), 'urllib3.util.retry.Retry', 'Retry', ([], {'total': '(5)', 'backoff_factor': '(1)', 'method_whitelist': "['POST']", 'status_forcelist': '[500, 501, 502, 503, 504]'}), "(total=5, backoff_factor=1, method_whitelist=['POST'],\n status_forcelist=[500, 501, 502, 503, 504])\n", (3723, 3825), False, 'from urllib3.util.retry import Retry\n'), ((3925, 3957), 'requests.adapters.HTTPAdapter', 'HTTPAdapter', ([], {'max_retries': 'retries'}), '(max_retries=retries)\n', (3936, 3957), False, 'from requests.adapters import HTTPAdapter\n'), ((1006, 1057), 'tempfile.NamedTemporaryFile', 'tempfile.NamedTemporaryFile', ([], {'mode': '"""w"""', 'delete': '(False)'}), "(mode='w', delete=False)\n", (1033, 1057), False, 'import tempfile\n'), ((2755, 2780), 'os.remove', 'os.remove', (['cert_file_name'], {}), '(cert_file_name)\n', (2764, 2780), False, 'import os\n'), ((2899, 2923), 'os.remove', 'os.remove', (['key_file_name'], {}), '(key_file_name)\n', (2908, 2923), False, 'import os\n'), ((1417, 1468), 'tempfile.NamedTemporaryFile', 'tempfile.NamedTemporaryFile', ([], {'mode': '"""w"""', 'delete': '(False)'}), "(mode='w', delete=False)\n", (1444, 1468), False, 'import tempfile\n'), ((2246, 2263), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (2261, 2263), False, 'from datetime import datetime\n')]
|
from django.contrib import admin
from .models import (Assignment, CourseDocuments, CourseVideo, Forum,
ForumReply, Quiz, QuizQuestion, QuizResult, StudentAnswer,
StudentAssignment)
admin.site.register(CourseDocuments)
admin.site.register(CourseVideo)
admin.site.register(Quiz)
admin.site.register(QuizQuestion)
admin.site.register(StudentAnswer)
admin.site.register(Assignment)
admin.site.register(StudentAssignment)
admin.site.register(QuizResult)
admin.site.register(Forum)
admin.site.register(ForumReply)
|
[
"django.contrib.admin.site.register"
] |
[((225, 261), 'django.contrib.admin.site.register', 'admin.site.register', (['CourseDocuments'], {}), '(CourseDocuments)\n', (244, 261), False, 'from django.contrib import admin\n'), ((262, 294), 'django.contrib.admin.site.register', 'admin.site.register', (['CourseVideo'], {}), '(CourseVideo)\n', (281, 294), False, 'from django.contrib import admin\n'), ((295, 320), 'django.contrib.admin.site.register', 'admin.site.register', (['Quiz'], {}), '(Quiz)\n', (314, 320), False, 'from django.contrib import admin\n'), ((321, 354), 'django.contrib.admin.site.register', 'admin.site.register', (['QuizQuestion'], {}), '(QuizQuestion)\n', (340, 354), False, 'from django.contrib import admin\n'), ((355, 389), 'django.contrib.admin.site.register', 'admin.site.register', (['StudentAnswer'], {}), '(StudentAnswer)\n', (374, 389), False, 'from django.contrib import admin\n'), ((390, 421), 'django.contrib.admin.site.register', 'admin.site.register', (['Assignment'], {}), '(Assignment)\n', (409, 421), False, 'from django.contrib import admin\n'), ((422, 460), 'django.contrib.admin.site.register', 'admin.site.register', (['StudentAssignment'], {}), '(StudentAssignment)\n', (441, 460), False, 'from django.contrib import admin\n'), ((461, 492), 'django.contrib.admin.site.register', 'admin.site.register', (['QuizResult'], {}), '(QuizResult)\n', (480, 492), False, 'from django.contrib import admin\n'), ((493, 519), 'django.contrib.admin.site.register', 'admin.site.register', (['Forum'], {}), '(Forum)\n', (512, 519), False, 'from django.contrib import admin\n'), ((520, 551), 'django.contrib.admin.site.register', 'admin.site.register', (['ForumReply'], {}), '(ForumReply)\n', (539, 551), False, 'from django.contrib import admin\n')]
|
from tempfile import mkdtemp
import hashlib
from shutil import rmtree, copy
import os
import os.path
import subprocess
import struct
import sys
import unittest
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives.asymmetric.utils import Prehashed
from cryptography.hazmat.primitives.hashes import Hash, SHA1
import cryptography.hazmat.primitives.serialization as crypto_serialization
import cryptography.hazmat.primitives.hashes as crypto_hashes
import cryptography.hazmat.primitives.asymmetric.ec as crypto_ec
from cryptography.x509 import load_der_x509_certificate
import xattr
import rpm_head_signing
class TestRpmHeadSigning(unittest.TestCase):
pkg_numbers = ['1', '2']
@classmethod
def setUpClass(cls):
cls.asset_dir = os.path.join(
os.path.dirname(os.path.realpath(__file__)),
'test_assets',
)
def setUp(self):
self.tmpdir = mkdtemp(prefix='test-rpm_head_signing-', dir=os.path.abspath('.'))
def tearDown(self):
rmtree(self.tmpdir)
self.tmpdir = None
def compare_files(self, asset_name, tmp_name):
with open(os.path.join(self.asset_dir, asset_name), 'rb') as asset_file:
with open(os.path.join(self.tmpdir, tmp_name), 'rb') as tmp_file:
self.assertEqual(
asset_file.read(),
tmp_file.read(),
"Asset file %s is different from tmp file %s" % (asset_name, tmp_name),
)
def test_extract(self):
rpm_head_signing.extract_header(
os.path.join(self.asset_dir, 'testpkg-1.noarch.rpm'),
os.path.join(self.tmpdir, 'testpkg-1.noarch.rpm.hdr.tmp'),
os.path.join(self.tmpdir, 'digests.out.tmp'),
)
rpm_head_signing.extract_header(
os.path.join(self.asset_dir, 'testpkg-2.noarch.rpm'),
os.path.join(self.tmpdir, 'testpkg-2.noarch.rpm.hdr.tmp'),
os.path.join(self.tmpdir, 'digests.out.tmp'),
)
self.compare_files("testpkg-1.noarch.rpm.hdr", "testpkg-1.noarch.rpm.hdr.tmp")
self.compare_files("testpkg-2.noarch.rpm.hdr", "testpkg-2.noarch.rpm.hdr.tmp")
self.compare_files("digests.out", "digests.out.tmp")
def test_insert_no_ima(self):
copy(
os.path.join(self.asset_dir, 'gpgkey.asc'),
os.path.join(self.tmpdir, 'gpgkey.key'),
)
for pkg in self.pkg_numbers:
copy(
os.path.join(self.asset_dir, "testpkg-%s.noarch.rpm" % pkg),
os.path.join(self.tmpdir, "testpkg-%s.noarch.rpm" % pkg),
)
res = subprocess.check_output(
[
'rpm',
'--define', '%%_keyringpath %s' % self.tmpdir,
'--define', '%%_keyringpath %s' % self.tmpdir,
'-Kv',
os.path.join(self.tmpdir, 'testpkg-%s.noarch.rpm' % pkg),
],
)
self.assertTrue(b'SHA1 digest: OK' in res)
self.assertFalse(b'Header V3 RSA' in res)
rpm_head_signing.insert_signature(
os.path.join(self.tmpdir, 'testpkg-%s.noarch.rpm' % pkg),
os.path.join(self.asset_dir, 'testpkg-%s.noarch.rpm.hdr.sig' % pkg)
)
res = subprocess.check_output(
[
'rpm',
'--define', '%%_keyringpath %s' % self.tmpdir,
'--define', '%%_keyringpath %s' % self.tmpdir,
'-Kvvvvvvvv',
os.path.join(self.tmpdir, 'testpkg-%s.noarch.rpm' % pkg),
],
)
self.assertTrue(b'SHA1 digest: OK' in res)
self.assertTrue(b'Header V3 RSA' in res)
self.assertTrue(b'15f712be: ok' in res.lower())
def test_insert_ima(self):
self._ima_insertion_test(None)
def test_insert_ima_valgrind(self):
valgrind_logfile = os.environ.get(
'VALGRIND_LOG_FILE',
'%s/valgrind.log' % self.tmpdir,
)
self._ima_insertion_test(
[
'valgrind',
'--tool=memcheck',
'--track-fds=yes',
'--leak-check=full',
'--track-origins=yes',
'--log-file=%s' % valgrind_logfile,
'--',
sys.executable,
'test_insert.py',
]
)
with open(valgrind_logfile, 'r') as logfile:
log = logfile.read()
if os.environ.get('PRINT_VALGRIND_LOG'):
print('---- START OF VALGRIND LOG ----')
print(log)
print('---- END OF VALGRIND LOG ----')
if 'insertlib.c' in log:
raise Exception("insertlib.c found in the Valgrind log")
def _ima_insertion_test(self, insert_command):
copy(
os.path.join(self.asset_dir, 'gpgkey.asc'),
os.path.join(self.tmpdir, 'gpgkey.key'),
)
for pkg in self.pkg_numbers:
copy(
os.path.join(self.asset_dir, "testpkg-%s.noarch.rpm" % pkg),
os.path.join(self.tmpdir, "testpkg-%s.noarch.rpm" % pkg),
)
res = subprocess.check_output(
[
'rpm',
'--define', '%%_keyringpath %s' % self.tmpdir,
'--define', '%%_keyringpath %s' % self.tmpdir,
'-Kv',
os.path.join(self.tmpdir, 'testpkg-%s.noarch.rpm' % pkg),
],
)
self.assertTrue(b'SHA1 digest: OK' in res)
self.assertFalse(b'Header V3 RSA' in res)
if insert_command is None:
rpm_head_signing.insert_signature(
os.path.join(self.tmpdir, 'testpkg-%s.noarch.rpm' % pkg),
os.path.join(self.asset_dir, 'testpkg-%s.noarch.rpm.hdr.sig' % pkg),
ima_presigned_path=os.path.join(self.asset_dir, 'digests.out.signed'),
)
else:
subprocess.check_call(
insert_command + [
os.path.join(self.tmpdir, 'testpkg-%s.noarch.rpm' % pkg),
os.path.join(self.asset_dir, 'testpkg-%s.noarch.rpm.hdr.sig' % pkg),
os.path.join(self.asset_dir, 'digests.out.signed'),
]
)
res = subprocess.check_output(
[
'rpm',
'--define', '%%_keyringpath %s' % self.tmpdir,
'--define', '%%_keyringpath %s' % self.tmpdir,
'-Kvvvv',
os.path.join(self.tmpdir, 'testpkg-%s.noarch.rpm' % pkg),
],
)
self.assertTrue(b'SHA1 digest: OK' in res)
self.assertTrue(b'Header V3 RSA' in res)
self.assertTrue(b'15f712be: ok' in res.lower())
extracted_dir = os.path.join(self.tmpdir, 'testpkg-%s.noarch.extracted' % pkg)
os.mkdir(extracted_dir)
rpm_head_signing.extract_rpm_with_filesigs(
os.path.join(self.tmpdir, 'testpkg-%s.noarch.rpm' % pkg),
extracted_dir,
)
with open(os.path.join(self.asset_dir, 'imacert.der'), 'rb') as f:
cert = load_der_x509_certificate(f.read(), backend=default_backend())
pubkey = cert.public_key()
evmctl_help = subprocess.check_output(['evmctl', '--help'])
for (where, dnames, fnames) in os.walk(extracted_dir):
for fname in fnames:
# Always run the manual evmctl check.
alternative_evmctl_check(
os.path.join(where, fname),
pubkey,
)
if b'--xattr-user' in evmctl_help:
subprocess.check_call(
[
'evmctl',
'-v',
'--key', os.path.join(self.asset_dir, 'imacert.der'),
'ima_verify',
'--xattr-user',
os.path.join(where, fname),
],
)
else:
if not os.environ.get('ONLY_ALTERNATIVE_EVMCTL_CHECK'):
raise Exception("Can't test evmctl")
def alternative_evmctl_check(file_path, pubkey):
# In RHEL7, evmctl is too old, so we won't be able to run the
# evmctl check
ima_sig = bytearray(xattr.getxattr(file_path, 'user.ima'))
if ima_sig[0] != 3:
raise Exception("IMA signature has wrong prefix (%s)" % ima_sig[0])
if ima_sig[1] != 2:
raise Exception("IMA signature has wrong version (%s)" % ima_sig[1])
algo_id = ima_sig[2]
if algo_id == 7: # SHA224
hasher = hashlib.sha224()
crypto_algo = crypto_hashes.SHA224()
elif algo_id == 4: # SHA256
hasher = hashlib.sha256()
crypto_algo = crypto_hashes.SHA256()
elif algo_id == 5: # SHA384
hasher = hashlib.sha384()
crypto_algo = crypto_hashes.SHA384()
elif algo_id == 6: # SHA512
hasher = hashlib.sha512()
crypto_algo = crypto_hashes.SHA512()
else:
raise Exception("IMA signature has invalid algo: %d" % algo_id)
crypto_algo = Prehashed(crypto_algo)
if sys.version_info.major == 3:
# X962 is only supported on Cryptography 2.5+
# We are a bit lazy and just check for py3 instead of checking this more carefully
# Check the Key ID
key_id = ima_sig[3:7]
keybytes = pubkey.public_bytes(
crypto_serialization.Encoding.X962,
crypto_serialization.PublicFormat.UncompressedPoint,
)
keybytes_digester = Hash(SHA1())
keybytes_digester.update(keybytes)
keybytes_digest = keybytes_digester.finalize()
correct_keyid = keybytes_digest[-4:]
if correct_keyid != key_id:
raise Exception("IMA signature has invalid key ID: %s != %s" % (correct_keyid, key_id))
# Check the signature itself
(sig_size,) = struct.unpack('>H', ima_sig[7:9])
sig = ima_sig[9:]
if len(sig) != sig_size:
raise Exception("IMA signature size invalid: %d != %d" % (len(sig), sig_size))
with open(file_path, 'rb') as f:
hasher.update(f.read())
file_digest = hasher.digest()
pubkey.verify(
bytes(sig),
bytes(file_digest),
crypto_ec.ECDSA(crypto_algo),
)
if __name__ == '__main__':
unittest.main()
|
[
"os.mkdir",
"os.walk",
"shutil.rmtree",
"hashlib.sha512",
"os.path.join",
"unittest.main",
"cryptography.hazmat.primitives.asymmetric.ec.ECDSA",
"os.path.abspath",
"cryptography.hazmat.primitives.hashes.SHA256",
"cryptography.hazmat.primitives.hashes.SHA384",
"cryptography.hazmat.primitives.asymmetric.utils.Prehashed",
"hashlib.sha384",
"hashlib.sha256",
"cryptography.hazmat.primitives.hashes.SHA1",
"hashlib.sha224",
"xattr.getxattr",
"cryptography.hazmat.primitives.hashes.SHA512",
"subprocess.check_output",
"struct.unpack",
"os.path.realpath",
"cryptography.hazmat.backends.default_backend",
"os.environ.get",
"cryptography.hazmat.primitives.hashes.SHA224"
] |
[((9595, 9617), 'cryptography.hazmat.primitives.asymmetric.utils.Prehashed', 'Prehashed', (['crypto_algo'], {}), '(crypto_algo)\n', (9604, 9617), False, 'from cryptography.hazmat.primitives.asymmetric.utils import Prehashed\n'), ((10391, 10424), 'struct.unpack', 'struct.unpack', (['""">H"""', 'ima_sig[7:9]'], {}), "('>H', ima_sig[7:9])\n", (10404, 10424), False, 'import struct\n'), ((10815, 10830), 'unittest.main', 'unittest.main', ([], {}), '()\n', (10828, 10830), False, 'import unittest\n'), ((1041, 1060), 'shutil.rmtree', 'rmtree', (['self.tmpdir'], {}), '(self.tmpdir)\n', (1047, 1060), False, 'from shutil import rmtree, copy\n'), ((4026, 4094), 'os.environ.get', 'os.environ.get', (['"""VALGRIND_LOG_FILE"""', "('%s/valgrind.log' % self.tmpdir)"], {}), "('VALGRIND_LOG_FILE', '%s/valgrind.log' % self.tmpdir)\n", (4040, 4094), False, 'import os\n'), ((4613, 4649), 'os.environ.get', 'os.environ.get', (['"""PRINT_VALGRIND_LOG"""'], {}), "('PRINT_VALGRIND_LOG')\n", (4627, 4649), False, 'import os\n'), ((8784, 8821), 'xattr.getxattr', 'xattr.getxattr', (['file_path', '"""user.ima"""'], {}), "(file_path, 'user.ima')\n", (8798, 8821), False, 'import xattr\n'), ((9097, 9113), 'hashlib.sha224', 'hashlib.sha224', ([], {}), '()\n', (9111, 9113), False, 'import hashlib\n'), ((9136, 9158), 'cryptography.hazmat.primitives.hashes.SHA224', 'crypto_hashes.SHA224', ([], {}), '()\n', (9156, 9158), True, 'import cryptography.hazmat.primitives.hashes as crypto_hashes\n'), ((10746, 10774), 'cryptography.hazmat.primitives.asymmetric.ec.ECDSA', 'crypto_ec.ECDSA', (['crypto_algo'], {}), '(crypto_algo)\n', (10761, 10774), True, 'import cryptography.hazmat.primitives.asymmetric.ec as crypto_ec\n'), ((1601, 1653), 'os.path.join', 'os.path.join', (['self.asset_dir', '"""testpkg-1.noarch.rpm"""'], {}), "(self.asset_dir, 'testpkg-1.noarch.rpm')\n", (1613, 1653), False, 'import os\n'), ((1667, 1724), 'os.path.join', 'os.path.join', (['self.tmpdir', '"""testpkg-1.noarch.rpm.hdr.tmp"""'], {}), "(self.tmpdir, 'testpkg-1.noarch.rpm.hdr.tmp')\n", (1679, 1724), False, 'import os\n'), ((1738, 1782), 'os.path.join', 'os.path.join', (['self.tmpdir', '"""digests.out.tmp"""'], {}), "(self.tmpdir, 'digests.out.tmp')\n", (1750, 1782), False, 'import os\n'), ((1847, 1899), 'os.path.join', 'os.path.join', (['self.asset_dir', '"""testpkg-2.noarch.rpm"""'], {}), "(self.asset_dir, 'testpkg-2.noarch.rpm')\n", (1859, 1899), False, 'import os\n'), ((1913, 1970), 'os.path.join', 'os.path.join', (['self.tmpdir', '"""testpkg-2.noarch.rpm.hdr.tmp"""'], {}), "(self.tmpdir, 'testpkg-2.noarch.rpm.hdr.tmp')\n", (1925, 1970), False, 'import os\n'), ((1984, 2028), 'os.path.join', 'os.path.join', (['self.tmpdir', '"""digests.out.tmp"""'], {}), "(self.tmpdir, 'digests.out.tmp')\n", (1996, 2028), False, 'import os\n'), ((2337, 2379), 'os.path.join', 'os.path.join', (['self.asset_dir', '"""gpgkey.asc"""'], {}), "(self.asset_dir, 'gpgkey.asc')\n", (2349, 2379), False, 'import os\n'), ((2393, 2432), 'os.path.join', 'os.path.join', (['self.tmpdir', '"""gpgkey.key"""'], {}), "(self.tmpdir, 'gpgkey.key')\n", (2405, 2432), False, 'import os\n'), ((4958, 5000), 'os.path.join', 'os.path.join', (['self.asset_dir', '"""gpgkey.asc"""'], {}), "(self.asset_dir, 'gpgkey.asc')\n", (4970, 5000), False, 'import os\n'), ((5014, 5053), 'os.path.join', 'os.path.join', (['self.tmpdir', '"""gpgkey.key"""'], {}), "(self.tmpdir, 'gpgkey.key')\n", (5026, 5053), False, 'import os\n'), ((7069, 7131), 'os.path.join', 'os.path.join', (['self.tmpdir', "('testpkg-%s.noarch.extracted' % pkg)"], {}), "(self.tmpdir, 'testpkg-%s.noarch.extracted' % pkg)\n", (7081, 7131), False, 'import os\n'), ((7145, 7168), 'os.mkdir', 'os.mkdir', (['extracted_dir'], {}), '(extracted_dir)\n', (7153, 7168), False, 'import os\n'), ((7581, 7626), 'subprocess.check_output', 'subprocess.check_output', (["['evmctl', '--help']"], {}), "(['evmctl', '--help'])\n", (7604, 7626), False, 'import subprocess\n'), ((7671, 7693), 'os.walk', 'os.walk', (['extracted_dir'], {}), '(extracted_dir)\n', (7678, 7693), False, 'import os\n'), ((9209, 9225), 'hashlib.sha256', 'hashlib.sha256', ([], {}), '()\n', (9223, 9225), False, 'import hashlib\n'), ((9248, 9270), 'cryptography.hazmat.primitives.hashes.SHA256', 'crypto_hashes.SHA256', ([], {}), '()\n', (9268, 9270), True, 'import cryptography.hazmat.primitives.hashes as crypto_hashes\n'), ((10053, 10059), 'cryptography.hazmat.primitives.hashes.SHA1', 'SHA1', ([], {}), '()\n', (10057, 10059), False, 'from cryptography.hazmat.primitives.hashes import Hash, SHA1\n'), ((831, 857), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (847, 857), False, 'import os\n'), ((986, 1006), 'os.path.abspath', 'os.path.abspath', (['"""."""'], {}), "('.')\n", (1001, 1006), False, 'import os\n'), ((1158, 1198), 'os.path.join', 'os.path.join', (['self.asset_dir', 'asset_name'], {}), '(self.asset_dir, asset_name)\n', (1170, 1198), False, 'import os\n'), ((2515, 2574), 'os.path.join', 'os.path.join', (['self.asset_dir', "('testpkg-%s.noarch.rpm' % pkg)"], {}), "(self.asset_dir, 'testpkg-%s.noarch.rpm' % pkg)\n", (2527, 2574), False, 'import os\n'), ((2592, 2648), 'os.path.join', 'os.path.join', (['self.tmpdir', "('testpkg-%s.noarch.rpm' % pkg)"], {}), "(self.tmpdir, 'testpkg-%s.noarch.rpm' % pkg)\n", (2604, 2648), False, 'import os\n'), ((3196, 3252), 'os.path.join', 'os.path.join', (['self.tmpdir', "('testpkg-%s.noarch.rpm' % pkg)"], {}), "(self.tmpdir, 'testpkg-%s.noarch.rpm' % pkg)\n", (3208, 3252), False, 'import os\n'), ((3270, 3337), 'os.path.join', 'os.path.join', (['self.asset_dir', "('testpkg-%s.noarch.rpm.hdr.sig' % pkg)"], {}), "(self.asset_dir, 'testpkg-%s.noarch.rpm.hdr.sig' % pkg)\n", (3282, 3337), False, 'import os\n'), ((5136, 5195), 'os.path.join', 'os.path.join', (['self.asset_dir', "('testpkg-%s.noarch.rpm' % pkg)"], {}), "(self.asset_dir, 'testpkg-%s.noarch.rpm' % pkg)\n", (5148, 5195), False, 'import os\n'), ((5213, 5269), 'os.path.join', 'os.path.join', (['self.tmpdir', "('testpkg-%s.noarch.rpm' % pkg)"], {}), "(self.tmpdir, 'testpkg-%s.noarch.rpm' % pkg)\n", (5225, 5269), False, 'import os\n'), ((7242, 7298), 'os.path.join', 'os.path.join', (['self.tmpdir', "('testpkg-%s.noarch.rpm' % pkg)"], {}), "(self.tmpdir, 'testpkg-%s.noarch.rpm' % pkg)\n", (7254, 7298), False, 'import os\n'), ((9321, 9337), 'hashlib.sha384', 'hashlib.sha384', ([], {}), '()\n', (9335, 9337), False, 'import hashlib\n'), ((9360, 9382), 'cryptography.hazmat.primitives.hashes.SHA384', 'crypto_hashes.SHA384', ([], {}), '()\n', (9380, 9382), True, 'import cryptography.hazmat.primitives.hashes as crypto_hashes\n'), ((1243, 1278), 'os.path.join', 'os.path.join', (['self.tmpdir', 'tmp_name'], {}), '(self.tmpdir, tmp_name)\n', (1255, 1278), False, 'import os\n'), ((2933, 2989), 'os.path.join', 'os.path.join', (['self.tmpdir', "('testpkg-%s.noarch.rpm' % pkg)"], {}), "(self.tmpdir, 'testpkg-%s.noarch.rpm' % pkg)\n", (2945, 2989), False, 'import os\n'), ((3628, 3684), 'os.path.join', 'os.path.join', (['self.tmpdir', "('testpkg-%s.noarch.rpm' % pkg)"], {}), "(self.tmpdir, 'testpkg-%s.noarch.rpm' % pkg)\n", (3640, 3684), False, 'import os\n'), ((5554, 5610), 'os.path.join', 'os.path.join', (['self.tmpdir', "('testpkg-%s.noarch.rpm' % pkg)"], {}), "(self.tmpdir, 'testpkg-%s.noarch.rpm' % pkg)\n", (5566, 5610), False, 'import os\n'), ((5865, 5921), 'os.path.join', 'os.path.join', (['self.tmpdir', "('testpkg-%s.noarch.rpm' % pkg)"], {}), "(self.tmpdir, 'testpkg-%s.noarch.rpm' % pkg)\n", (5877, 5921), False, 'import os\n'), ((5943, 6010), 'os.path.join', 'os.path.join', (['self.asset_dir', "('testpkg-%s.noarch.rpm.hdr.sig' % pkg)"], {}), "(self.asset_dir, 'testpkg-%s.noarch.rpm.hdr.sig' % pkg)\n", (5955, 6010), False, 'import os\n'), ((6781, 6837), 'os.path.join', 'os.path.join', (['self.tmpdir', "('testpkg-%s.noarch.rpm' % pkg)"], {}), "(self.tmpdir, 'testpkg-%s.noarch.rpm' % pkg)\n", (6793, 6837), False, 'import os\n'), ((7368, 7411), 'os.path.join', 'os.path.join', (['self.asset_dir', '"""imacert.der"""'], {}), "(self.asset_dir, 'imacert.der')\n", (7380, 7411), False, 'import os\n'), ((9433, 9449), 'hashlib.sha512', 'hashlib.sha512', ([], {}), '()\n', (9447, 9449), False, 'import hashlib\n'), ((9472, 9494), 'cryptography.hazmat.primitives.hashes.SHA512', 'crypto_hashes.SHA512', ([], {}), '()\n', (9492, 9494), True, 'import cryptography.hazmat.primitives.hashes as crypto_hashes\n'), ((6051, 6101), 'os.path.join', 'os.path.join', (['self.asset_dir', '"""digests.out.signed"""'], {}), "(self.asset_dir, 'digests.out.signed')\n", (6063, 6101), False, 'import os\n'), ((7492, 7509), 'cryptography.hazmat.backends.default_backend', 'default_backend', ([], {}), '()\n', (7507, 7509), False, 'from cryptography.hazmat.backends import default_backend\n'), ((7860, 7886), 'os.path.join', 'os.path.join', (['where', 'fname'], {}), '(where, fname)\n', (7872, 7886), False, 'import os\n'), ((6241, 6297), 'os.path.join', 'os.path.join', (['self.tmpdir', "('testpkg-%s.noarch.rpm' % pkg)"], {}), "(self.tmpdir, 'testpkg-%s.noarch.rpm' % pkg)\n", (6253, 6297), False, 'import os\n'), ((6323, 6390), 'os.path.join', 'os.path.join', (['self.asset_dir', "('testpkg-%s.noarch.rpm.hdr.sig' % pkg)"], {}), "(self.asset_dir, 'testpkg-%s.noarch.rpm.hdr.sig' % pkg)\n", (6335, 6390), False, 'import os\n'), ((6416, 6466), 'os.path.join', 'os.path.join', (['self.asset_dir', '"""digests.out.signed"""'], {}), "(self.asset_dir, 'digests.out.signed')\n", (6428, 6466), False, 'import os\n'), ((8509, 8556), 'os.environ.get', 'os.environ.get', (['"""ONLY_ALTERNATIVE_EVMCTL_CHECK"""'], {}), "('ONLY_ALTERNATIVE_EVMCTL_CHECK')\n", (8523, 8556), False, 'import os\n'), ((8196, 8239), 'os.path.join', 'os.path.join', (['self.asset_dir', '"""imacert.der"""'], {}), "(self.asset_dir, 'imacert.der')\n", (8208, 8239), False, 'import os\n'), ((8367, 8393), 'os.path.join', 'os.path.join', (['where', 'fname'], {}), '(where, fname)\n', (8379, 8393), False, 'import os\n')]
|
"""
Usage:
arduCryoFridgeCLI.py [--port=<USBportname>] configure [--ontime=<ontime>] [--offtime=<offtime>]
arduCryoFridgeCLI.py [--port=<USBportname>] switch [--on | --off] [--now | --delay=<delay>]
arduCryoFridgeCLI.py [--port=<USBportname>] (-s | --status)
arduCryoFridgeCLI.py [--port=<USBportname>] -q
arduCryoFridgeCLI.py -h | --help
Options:
--port=<USBportname> Specify USB port: done before running other commands.
--ontime=<ontime> Duration of ontime minutes.
--offtime=<offtime> Duration of offtime minutes.
--delay=<delay> Start on/off cycle in delay [default: 0] minutes.
-s --status Read out and report PT410 status.
-q Query program version + version run on the arduino.
-h --help Show this screen.
"""
from docopt import docopt
import serial
import serial.tools.list_ports
baud = 9600
programVersion = 1.0
# will try to autodetect port first, if no port detected, will prompt user to input a port
# doesn't work with third-party Arduino knockoffs (in which case, user specifies port)
def autodetect():
ports = serial.tools.list_ports.comports()
connected = False
print("Available ports: ")
for port, desc, hwid in sorted(ports):
print("{}: {} [{}]".format(port, desc, hwid))
if desc == "USB2.0-Serial":
try:
ser = serial.Serial(port, baud)
print("Connected to: " + port + '\n')
connected = True
return ser
except Exception as e:
print("\nCouldn't open port: " + str(e))
ser = None
if not(connected):
print("No likely serial port found. Use command '--port=<USBportname>' to manually specify a port.")
if __name__ == "__main__":
args = docopt(__doc__) # docopt saves arguments and options as key:value pairs in a dictionary
print(args)
if args['--port'] == None:
ser = autodetect()
else:
ser = serial.Serial(args['--port'], baud)
if args['configure'] == True:
if args['--ontime'] != None:
ontime = args['--ontime']
print("Ontime = " + ontime)
ser.readline() # waits until arduino prints "UNO is ready!"
ser.write(('A'+ str(ontime)).encode())
elif args['--offtime'] != None:
offtime = args['--offtime']
print("Offtime = " + offtime)
ser.readline()
ser.write(('B'+ str(offtime)).encode())
elif args['switch'] == True:
if args['--on'] == True:
if args['--now'] == True:
print("switch compressor on NOW")
ser.readline()
ser.write('G'.encode())
else:
delay = args['--delay']
print("delay turning on by " + str(delay) + " minutes")
ser.readline()
ser.write(('Z'+str(delay)).encode())
elif args['--off'] == True:
if args['--now'] == True:
print("switch compressor off NOW")
ser.readline()
ser.write('X'.encode())
else:
delay = args['--delay']
print("delay turning off by " + str(delay) + " minutes")
ser.readline()
ser.write(('Z'+str(delay)).encode())
print(ser.readline())
elif args['--status'] != False:
print("PT410 status: ")
ser.readline()
ser.write('S'.encode())
LEDStatus = ser.readline()
print(LEDStatus)
button1Status = ser.readline()
print(button1Status)
button2Status = ser.readline()
print(button2Status)
button3Status = ser.readline()
print(button3Status)
elif args['-q'] != False:
print("Python program version: " + str(programVersion))
ser.readline()
ser.write('Q'.encode())
arduinoProgramVersion = ser.readline()
print(str(arduinoProgramVersion))
else:
print('nothing left to do')
|
[
"serial.Serial",
"serial.tools.list_ports.comports",
"docopt.docopt"
] |
[((1115, 1149), 'serial.tools.list_ports.comports', 'serial.tools.list_ports.comports', ([], {}), '()\n', (1147, 1149), False, 'import serial\n'), ((1806, 1821), 'docopt.docopt', 'docopt', (['__doc__'], {}), '(__doc__)\n', (1812, 1821), False, 'from docopt import docopt\n'), ((1994, 2029), 'serial.Serial', 'serial.Serial', (["args['--port']", 'baud'], {}), "(args['--port'], baud)\n", (2007, 2029), False, 'import serial\n'), ((1375, 1400), 'serial.Serial', 'serial.Serial', (['port', 'baud'], {}), '(port, baud)\n', (1388, 1400), False, 'import serial\n')]
|
import aoc_common as ac
import numpy as np
from aocd.models import Puzzle
puzzle = Puzzle(year=2019, day=11)
ram = [int(x) for x in puzzle.input_data.split(",")]
pointer = 0
relative_base = 0
painting = {(0, 0): 0}
coord = (0, 0)
color = 0 # Part One
color = 1 # Part Two
direction = "N"
our_computer = ac.full_intcode_computer(ram, pointer, relative_base, locals())
while True:
try:
new_color = next(our_computer)
d_color = next(our_computer)
painting[coord] = new_color
coord, direction = ac.robot_turner(coord, direction, d_color)
if coord in painting:
color = painting[coord]
else:
color = 0
except:
break
# print(len(painting.keys()))
x = []
y = []
z = []
for k, v in painting.items():
x.append(int(k[0]))
y.append(int(k[1]))
z.append(int(v))
min_x = abs(min(x))
min_y = abs(min(y))
x = [i + min_x for i in x]
y = [j + min_y for j in y]
message = np.zeros([6, 43])
message[y, x] = z
# message = np.where(message == 0, " ","■")
ac.screen(painting)
# print(np.array2string(np.flipud(message), max_line_width=np.inf))
|
[
"aoc_common.robot_turner",
"numpy.zeros",
"aoc_common.screen",
"aocd.models.Puzzle"
] |
[((84, 109), 'aocd.models.Puzzle', 'Puzzle', ([], {'year': '(2019)', 'day': '(11)'}), '(year=2019, day=11)\n', (90, 109), False, 'from aocd.models import Puzzle\n'), ((960, 977), 'numpy.zeros', 'np.zeros', (['[6, 43]'], {}), '([6, 43])\n', (968, 977), True, 'import numpy as np\n'), ((1042, 1061), 'aoc_common.screen', 'ac.screen', (['painting'], {}), '(painting)\n', (1051, 1061), True, 'import aoc_common as ac\n'), ((531, 573), 'aoc_common.robot_turner', 'ac.robot_turner', (['coord', 'direction', 'd_color'], {}), '(coord, direction, d_color)\n', (546, 573), True, 'import aoc_common as ac\n')]
|
"""
This python function is triggered when a new audio file is dropped into the S3 bucket that has
been configured for audio ingestion. It will ensure that no Transcribe job already exists for this
filename, and will then trigger the main Step Functions workflow to process this file.
Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
SPDX-License-Identifier: Apache-2.0
"""
import json
import urllib.parse
import boto3
import pcaconfiguration as cf
def lambda_handler(event, context):
# Load our configuration
cf.loadConfiguration()
# Get the object from the event and validate it exists
s3 = boto3.client("s3")
bucket = event['Records'][0]['s3']['bucket']['name']
key = urllib.parse.unquote_plus(event['Records'][0]['s3']['object']['key'], encoding='utf-8')
try:
response = s3.get_object(Bucket=bucket, Key=key)
except Exception as e:
print(e)
raise Exception(
'Error getting object {} from bucket {}. Make sure they exist and your bucket is in the same region as this function.'.format(
key, bucket))
# Check a Transcribe job isn't in progress for this file-name
jobName = cf.generateJobName(key)
try:
# If it exists (e.g. doesn't exception) then we may want to delete iz
transcribe = boto3.client('transcribe')
currentJobStatus = transcribe.get_transcription_job(TranscriptionJobName=jobName)["TranscriptionJob"]["TranscriptionJobStatus"]
except Exception as e:
# Job didn't already exist - no problem here
currentJobStatus = ""
# If there's a job already running then the input file may have been copied - quit
if (currentJobStatus == "IN_PROGRESS") or (currentJobStatus == "QUEUED"):
# Throw an exception if this is the case
raise Exception(
'A Transcription job named \'{}\' is already in progress - cannot continue.'.format(jobName))
# Now find our Step Function
ourStepFunction = cf.appConfig[cf.COMP_SFN_NAME]
sfnClient = boto3.client('stepfunctions')
response = sfnMachinesResult = sfnClient.list_state_machines(maxResults = 1000)
sfnArnList = list(filter(lambda x: x["stateMachineArn"].endswith(ourStepFunction), sfnMachinesResult["stateMachines"]))
if sfnArnList == []:
# Doesn't exist
raise Exception(
'Cannot find configured Step Function \'{}\' in the AWS account in this region - cannot begin workflow.'.format(ourStepFunction))
sfnArn = sfnArnList[0]['stateMachineArn']
# Decide what language this should be transcribed in - leave it blank to trigger auto-detection
if cf.isAutoLanguageDetectionSet():
transcribeLanguage = ""
else:
transcribeLanguage = cf.appConfig[cf.CONF_TRANSCRIBE_LANG][0]
# Trigger a new Step Function execution
parameters = '{\n \"bucket\": \"' + bucket + '\",\n' +\
' \"key\": \"' + key + '\",\n' +\
' \"langCode\": \"' + transcribeLanguage + '\"\n' +\
'}'
sfnClient.start_execution(stateMachineArn = sfnArn, input = parameters)
# Everything was successful
return {
'statusCode': 200,
'body': json.dumps('Post-call analytics workflow for file ' + key + ' successfully started.')
}
# Main entrypoint
if __name__ == "__main__":
event = {
"Records": [
{
"s3": {
"s3SchemaVersion": "1.0",
"configurationId": "eca58aa9-dd2b-4405-94d5-d5fba7fd0a16",
"bucket": {
"name": "ajk-call-analytics-demo",
"ownerIdentity": {
"principalId": "A39I0T5T4Z0PZJ"
},
"arn": "arn:aws:s3:::ajk-call-analytics-demo"
},
"object": {
"key": "audio/example-call.wav",
"size": 963023,
"eTag": "8588ee73ae57d72c072f4bc401627724",
"sequencer": "005E99B1F567D61004"
}
}
}
]
}
lambda_handler(event, "")
|
[
"pcaconfiguration.isAutoLanguageDetectionSet",
"boto3.client",
"json.dumps",
"pcaconfiguration.generateJobName",
"pcaconfiguration.loadConfiguration"
] |
[((540, 562), 'pcaconfiguration.loadConfiguration', 'cf.loadConfiguration', ([], {}), '()\n', (560, 562), True, 'import pcaconfiguration as cf\n'), ((632, 650), 'boto3.client', 'boto3.client', (['"""s3"""'], {}), "('s3')\n", (644, 650), False, 'import boto3\n'), ((1191, 1214), 'pcaconfiguration.generateJobName', 'cf.generateJobName', (['key'], {}), '(key)\n', (1209, 1214), True, 'import pcaconfiguration as cf\n'), ((2045, 2074), 'boto3.client', 'boto3.client', (['"""stepfunctions"""'], {}), "('stepfunctions')\n", (2057, 2074), False, 'import boto3\n'), ((2653, 2684), 'pcaconfiguration.isAutoLanguageDetectionSet', 'cf.isAutoLanguageDetectionSet', ([], {}), '()\n', (2682, 2684), True, 'import pcaconfiguration as cf\n'), ((1323, 1349), 'boto3.client', 'boto3.client', (['"""transcribe"""'], {}), "('transcribe')\n", (1335, 1349), False, 'import boto3\n'), ((3213, 3302), 'json.dumps', 'json.dumps', (["('Post-call analytics workflow for file ' + key + ' successfully started.')"], {}), "('Post-call analytics workflow for file ' + key +\n ' successfully started.')\n", (3223, 3302), False, 'import json\n')]
|
import os
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import numpy as np
import os
import plot_voltage
import pdn_params as pdn
from cython.sim_pdn import sim_throttling_wrapper
TEST_LIST_spec=[
"429.mcf",
"433.milc",
"435.gromacs",
"436.cactusADM",
"437.leslie3d",
"444.namd",
"445.gobmk",
"453.povray",
"454.calculix",
"456.hmmer",
"458.sjeng",
"459.GemsFDTD",
"462.libquantum",
"464.h264ref",
# "470.lbm",
"471.omnetpp",
"473.astar",
"481.wrf", \
"482.sphinx3", \
]
def sim_throttling(power, pwr_throttle):
print("Sim throttling...")
THROTTLE_DUR = pdn.THROTTLE_DUR
LEADTIME= pdn.LEADTIME
VDC = pdn.VDC
THRES = pdn.THRES
L = pdn.L
C = pdn.C
R = pdn.R
CLK = pdn.CLK
CLK_THROTTLE = pdn.CLK_THROTTLE
voltage, ve_cycles, power = sim_throttling_wrapper(power, pwr_throttle, THRES, L, C, R, VDC, CLK, CLK_THROTTLE, LEADTIME, THROTTLE_DUR)
plot_voltage.print_power(voltage, power, ve_cycles)
return voltage, power, ve_cycles
def run(print_stats=False):
VDC = pdn.VDC
THRES = pdn.THRES
L = pdn.L
C = pdn.C
R = pdn.R
CLK = pdn.CLK
THR_CLK = pdn.CLK_THROTTLE
HOME = os.environ['HOME']
#get power scaling constants
dirs = ["/home/jimmy/output_10_9/gem5_out/482.sphinx3_5_1000000_DESKTOP_HarvardPowerPredictor_4000000000",
"/home/jimmy/output_10_9/gem5_out/482.sphinx3_5_1000000_DESKTOP_HarvardPowerPredictor_2000000000"]
power = [plot_voltage.get_data(d, 'power.bin', np.single) for d in dirs]
(static_scale, dyn_scale) = plot_voltage.get_pwr_scaling(power[0],power[1],4E9,2E9)
d = "/home/jimmy/output_10_14/gem5_out/482.sphinx3_20_1000000_DESKTOP_HarvardPowerPredictor_fastforwardtest"
orig_data = plot_voltage.get_voltage(d, np.single, THRES, L, C, R, VDC, CLK, 0, static_scale, dyn_scale)
np.set_printoptions(threshold=np.inf)
thr_data = plot_voltage.get_voltage(d, np.single, THRES, L, C, R, VDC, CLK, THR_CLK-CLK, static_scale, dyn_scale)
mit_data = sim_throttling(orig_data[1], thr_data[1])
power_data = [orig_data[1],thr_data[1], mit_data[1]]
volt_data = [orig_data[0],thr_data[0], mit_data[0]]
#transform 2ghz to 4ghz
volt_test = np.copy(thr_data[0][0:100000])
volt_test = volt_test - 0.005
plot_voltage.plot([orig_data[0],thr_data[0], volt_test],
orig_data[2],
'10_14_mit_test',
labels=["fullclk","throttle", "test"])
if __name__ == "__main__":
run(True)
|
[
"plot_voltage.print_power",
"numpy.set_printoptions",
"numpy.copy",
"plot_voltage.get_voltage",
"plot_voltage.get_data",
"plot_voltage.plot",
"matplotlib.use",
"plot_voltage.get_pwr_scaling",
"cython.sim_pdn.sim_throttling_wrapper"
] |
[((28, 49), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (42, 49), False, 'import matplotlib\n'), ((899, 1010), 'cython.sim_pdn.sim_throttling_wrapper', 'sim_throttling_wrapper', (['power', 'pwr_throttle', 'THRES', 'L', 'C', 'R', 'VDC', 'CLK', 'CLK_THROTTLE', 'LEADTIME', 'THROTTLE_DUR'], {}), '(power, pwr_throttle, THRES, L, C, R, VDC, CLK,\n CLK_THROTTLE, LEADTIME, THROTTLE_DUR)\n', (921, 1010), False, 'from cython.sim_pdn import sim_throttling_wrapper\n'), ((1011, 1062), 'plot_voltage.print_power', 'plot_voltage.print_power', (['voltage', 'power', 've_cycles'], {}), '(voltage, power, ve_cycles)\n', (1035, 1062), False, 'import plot_voltage\n'), ((1648, 1724), 'plot_voltage.get_pwr_scaling', 'plot_voltage.get_pwr_scaling', (['power[0]', 'power[1]', '(4000000000.0)', '(2000000000.0)'], {}), '(power[0], power[1], 4000000000.0, 2000000000.0)\n', (1676, 1724), False, 'import plot_voltage\n'), ((1839, 1935), 'plot_voltage.get_voltage', 'plot_voltage.get_voltage', (['d', 'np.single', 'THRES', 'L', 'C', 'R', 'VDC', 'CLK', '(0)', 'static_scale', 'dyn_scale'], {}), '(d, np.single, THRES, L, C, R, VDC, CLK, 0,\n static_scale, dyn_scale)\n', (1863, 1935), False, 'import plot_voltage\n'), ((1937, 1974), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'threshold': 'np.inf'}), '(threshold=np.inf)\n', (1956, 1974), True, 'import numpy as np\n'), ((1995, 2103), 'plot_voltage.get_voltage', 'plot_voltage.get_voltage', (['d', 'np.single', 'THRES', 'L', 'C', 'R', 'VDC', 'CLK', '(THR_CLK - CLK)', 'static_scale', 'dyn_scale'], {}), '(d, np.single, THRES, L, C, R, VDC, CLK, THR_CLK -\n CLK, static_scale, dyn_scale)\n', (2019, 2103), False, 'import plot_voltage\n'), ((2314, 2344), 'numpy.copy', 'np.copy', (['thr_data[0][0:100000]'], {}), '(thr_data[0][0:100000])\n', (2321, 2344), True, 'import numpy as np\n'), ((2386, 2519), 'plot_voltage.plot', 'plot_voltage.plot', (['[orig_data[0], thr_data[0], volt_test]', 'orig_data[2]', '"""10_14_mit_test"""'], {'labels': "['fullclk', 'throttle', 'test']"}), "([orig_data[0], thr_data[0], volt_test], orig_data[2],\n '10_14_mit_test', labels=['fullclk', 'throttle', 'test'])\n", (2403, 2519), False, 'import plot_voltage\n'), ((1552, 1600), 'plot_voltage.get_data', 'plot_voltage.get_data', (['d', '"""power.bin"""', 'np.single'], {}), "(d, 'power.bin', np.single)\n", (1573, 1600), False, 'import plot_voltage\n')]
|
from django.contrib import auth
from django.contrib.auth.decorators import login_required
from django.core.exceptions import PermissionDenied
from django.shortcuts import redirect
from era.utils.functools import unidec, omit
@unidec
def role_required(method, req, *args, **kw):
if req.user.role in kw.get('allow', []):
return method(req, *args, **omit(kw, 'allow'))
raise PermissionDenied()
@unidec
def anonymous_required(method, req, *args, **kw):
if req.user.is_authenticated():
if kw.get('logout', False):
auth.logout(req)
return redirect(req.get_full_path())
else:
return redirect('/')
return method(req, *args, **omit(kw, 'logout'))
|
[
"django.shortcuts.redirect",
"django.contrib.auth.logout",
"era.utils.functools.omit",
"django.core.exceptions.PermissionDenied"
] |
[((390, 408), 'django.core.exceptions.PermissionDenied', 'PermissionDenied', ([], {}), '()\n', (406, 408), False, 'from django.core.exceptions import PermissionDenied\n'), ((553, 569), 'django.contrib.auth.logout', 'auth.logout', (['req'], {}), '(req)\n', (564, 569), False, 'from django.contrib import auth\n'), ((652, 665), 'django.shortcuts.redirect', 'redirect', (['"""/"""'], {}), "('/')\n", (660, 665), False, 'from django.shortcuts import redirect\n'), ((698, 716), 'era.utils.functools.omit', 'omit', (['kw', '"""logout"""'], {}), "(kw, 'logout')\n", (702, 716), False, 'from era.utils.functools import unidec, omit\n'), ((361, 378), 'era.utils.functools.omit', 'omit', (['kw', '"""allow"""'], {}), "(kw, 'allow')\n", (365, 378), False, 'from era.utils.functools import unidec, omit\n')]
|
""" Test Beacon command """
import json
from f5sdk.cs import ManagementClient
from f5sdk.cs.beacon.insights import InsightsClient
from f5sdk.cs.beacon.declare import DeclareClient
from f5sdk.cs.beacon.token import TokenClient
from f5cli.config import AuthConfigurationClient
from f5cli.commands.cmd_cs import cli
from ...global_test_imports import pytest, CliRunner
# Test Constants
MOCK_CONFIG_CLIENT_READ_AUTH_RETURN_VALUE = {
'user': 'test_user',
'password': '<PASSWORD>'
}
class TestCommandBeacon(object):
""" Test Class: command beacon """
@classmethod
def setup_class(cls):
""" Setup func """
cls.runner = CliRunner()
@classmethod
def teardown_class(cls):
""" Teardown func """
@staticmethod
@pytest.fixture
def config_client_read_auth_fixture(mocker):
""" PyTest fixture mocking AuthConfigurationClient's read_auth method """
mock_config_client_read_auth = mocker.patch.object(
AuthConfigurationClient, "read_auth")
mock_config_client_read_auth.return_value = MOCK_CONFIG_CLIENT_READ_AUTH_RETURN_VALUE
return mock_config_client_read_auth
@staticmethod
@pytest.fixture
def mgmt_client_fixture(mocker):
""" PyTest fixture returning mocked Cloud Services Management Client """
mock_management_client = mocker.patch.object(ManagementClient, '__init__')
mock_management_client.return_value = None
return mock_management_client
@pytest.mark.usefixtures("config_client_read_auth_fixture")
@pytest.mark.usefixtures("mgmt_client_fixture")
def test_cmd_beacon_insights_list(self, mocker):
""" List all configured beacon insights
Given
- The Insights Client returns a successful response
When
- User executes a 'list'
Then
- The 'list' command returns a successful response
"""
mock_response = {
'foo': 'bar'
}
mocker.patch.object(
InsightsClient, "list", return_value=mock_response)
result = self.runner.invoke(cli, ['beacon', 'insights', 'list'])
assert result.output == json.dumps(mock_response, indent=4, sort_keys=True) + '\n'
@pytest.mark.usefixtures("config_client_read_auth_fixture")
@pytest.mark.usefixtures("mgmt_client_fixture")
def test_cmd_beacon_insights_create(self, mocker):
""" Creating a beacon insight
Given
- The Insights Client returns a successful response
When
- User executes a 'create' with a declaration
Then
- The 'create' command returns a successful response
and creates an insight
"""
mock_response = {
'title': 'foo',
'description': 'blah'
}
mocker.patch.object(
InsightsClient, "create", return_value=mock_response)
result = self.runner.invoke(cli, ['beacon', 'insights', 'create',
'--declaration', './test/fake_declaration.json'])
assert result.output == json.dumps(mock_response, indent=4, sort_keys=True) + '\n'
@pytest.mark.usefixtures("config_client_read_auth_fixture")
@pytest.mark.usefixtures("mgmt_client_fixture")
def test_cmd_beacon_insights_update(self, mocker):
""" Updating a beacon insight
Given
- The Insights Client returns a successful response
When
- User executes a 'update' with a declaration with the same name
Then
- The 'update' command returns a successful response
and updates the specified insight
"""
mock_response = {
'title': 'foo',
'description': 'blah2'
}
mocker.patch.object(
InsightsClient, "create", return_value=mock_response)
result = self.runner.invoke(cli, ['beacon', 'insights', 'update',
'--declaration', './test/fake_declaration.json'])
assert result.output == json.dumps(mock_response, indent=4, sort_keys=True) + '\n'
@pytest.mark.usefixtures("config_client_read_auth_fixture")
@pytest.mark.usefixtures("mgmt_client_fixture")
def test_cmd_beacon_insights_delete(self, mocker):
""" Deleting a beacon insight
Given
- The Insights Client returns a successful response
When
- User executes a 'delete' with the name of the insight to be deleted
Then
- The 'delete' command returns a successful response
and delete the specified insight
"""
mocker.patch.object(
InsightsClient, "delete", return_value={})
result = self.runner.invoke(cli, [
'beacon', 'insights', 'delete', '--name', 'foo', '--auto-approve'])
assert result.output == json.dumps(
{'message': 'Insight deleted successfully'},
indent=4, sort_keys=True) + '\n'
@pytest.mark.usefixtures("config_client_read_auth_fixture")
@pytest.mark.usefixtures("mgmt_client_fixture")
def test_cmd_beacon_insights_show(self, mocker):
""" Show a beacon insight
Given
- The Insights Client returns a successful response
When
- User executes a 'show' with a name of the insight
Then
- The 'show' command returns requested insight
"""
mock_response = {
'title': 'foo',
'description': 'blah'
}
mocker.patch.object(
InsightsClient, "show", return_value=mock_response)
result = self.runner.invoke(cli, ['beacon', 'insights', 'show', '--name', 'foo'])
assert result.output == json.dumps(mock_response, indent=4, sort_keys=True) + '\n'
@pytest.mark.usefixtures("config_client_read_auth_fixture")
@pytest.mark.usefixtures("mgmt_client_fixture")
def test_cmd_beacon_declare_show(self, mocker):
""" Show a beacon declaration
Given
- The Declare Client returns a mocked response
When
- User executes a 'show'
Then
- The 'show' command returns the mocked response
"""
mock_response = {'foo': 'bar'}
mocker.patch.object(
DeclareClient, "create", return_value=mock_response
)
result = self.runner.invoke(cli, ['beacon', 'declare', 'show'])
assert result.output == json.dumps(mock_response, indent=4, sort_keys=True) + '\n'
@pytest.mark.usefixtures("config_client_read_auth_fixture")
@pytest.mark.usefixtures("mgmt_client_fixture")
def test_cmd_beacon_declare_create(self, mocker):
""" Create/update a beacon declaration
Given
- The Declare Client returns a mocked response
When
- User executes a 'create'
Then
- The 'create' command returns the mocked response
"""
mock_response = {'foo': 'bar'}
mocker.patch.object(
DeclareClient, "create", return_value=mock_response
)
result = self.runner.invoke(
cli, ['beacon', 'declare', 'create', '--declaration', './foo.json']
)
assert result.output == json.dumps(mock_response, indent=4, sort_keys=True) + '\n'
@pytest.mark.usefixtures("config_client_read_auth_fixture")
@pytest.mark.usefixtures("mgmt_client_fixture")
def test_cmd_beacon_token_create(self, mocker):
""" Creating a beacon token
Given
- The Token Client returns a successful response
When
- User executes a 'create' with a declaration
Then
- The 'create' command returns a successful response
and creates an token
"""
mock_response = {
'title': 'foo',
'description': 'blah'
}
mocker.patch.object(
TokenClient, "create", return_value=mock_response)
result = self.runner.invoke(cli, ['beacon', 'token', 'create',
'--declaration', './test/fake_declaration.json'])
assert result.output == json.dumps(mock_response, indent=4, sort_keys=True) + '\n'
@pytest.mark.usefixtures("config_client_read_auth_fixture")
@pytest.mark.usefixtures("mgmt_client_fixture")
def test_cmd_beacon_token_delete(self, mocker):
""" Deleting a beacon token
Given
- The Token Client returns a successful response
When
- User executes a 'delete' with the name of the token to be deleted
Then
- The 'delete' command returns a successful response
and delete the specified token
"""
mocker.patch.object(
TokenClient, "delete", return_value={})
result = self.runner.invoke(cli, [
'beacon', 'token', 'delete', '--name', 'foo', '--auto-approve'])
assert result.output == json.dumps(
{'message': 'Token deleted successfully'},
indent=4, sort_keys=True) + '\n'
@pytest.mark.usefixtures("config_client_read_auth_fixture")
@pytest.mark.usefixtures("mgmt_client_fixture")
def test_cmd_beacon_token_show(self, mocker):
""" Show a beacon token
Given
- The Token Client returns a successful response
When
- User executes a 'show' with a name of the token
Then
- The 'show' command returns requested token
"""
mock_response = {
'title': 'foo',
'description': 'blah'
}
mocker.patch.object(
TokenClient, "show", return_value=mock_response)
result = self.runner.invoke(cli, ['beacon', 'token', 'show', '--name', 'foo'])
assert result.output == json.dumps(mock_response, indent=4, sort_keys=True) + '\n'
@pytest.mark.usefixtures("config_client_read_auth_fixture")
@pytest.mark.usefixtures("mgmt_client_fixture")
def test_cmd_beacon_token_list(self, mocker):
""" List all configured beacon token
Given
- The Token Client returns a successful response
When
- User executes a 'list'
Then
- The 'list' command returns a successful response
"""
mock_response = {
'foo': 'bar'
}
mocker.patch.object(
TokenClient, "list", return_value=mock_response)
result = self.runner.invoke(cli, ['beacon', 'token', 'list'])
assert result.output == json.dumps(mock_response, indent=4, sort_keys=True) + '\n'
|
[
"json.dumps"
] |
[((2178, 2229), 'json.dumps', 'json.dumps', (['mock_response'], {'indent': '(4)', 'sort_keys': '(True)'}), '(mock_response, indent=4, sort_keys=True)\n', (2188, 2229), False, 'import json\n'), ((3103, 3154), 'json.dumps', 'json.dumps', (['mock_response'], {'indent': '(4)', 'sort_keys': '(True)'}), '(mock_response, indent=4, sort_keys=True)\n', (3113, 3154), False, 'import json\n'), ((4058, 4109), 'json.dumps', 'json.dumps', (['mock_response'], {'indent': '(4)', 'sort_keys': '(True)'}), '(mock_response, indent=4, sort_keys=True)\n', (4068, 4109), False, 'import json\n'), ((4865, 4951), 'json.dumps', 'json.dumps', (["{'message': 'Insight deleted successfully'}"], {'indent': '(4)', 'sort_keys': '(True)'}), "({'message': 'Insight deleted successfully'}, indent=4, sort_keys\n =True)\n", (4875, 4951), False, 'import json\n'), ((5728, 5779), 'json.dumps', 'json.dumps', (['mock_response'], {'indent': '(4)', 'sort_keys': '(True)'}), '(mock_response, indent=4, sort_keys=True)\n', (5738, 5779), False, 'import json\n'), ((6442, 6493), 'json.dumps', 'json.dumps', (['mock_response'], {'indent': '(4)', 'sort_keys': '(True)'}), '(mock_response, indent=4, sort_keys=True)\n', (6452, 6493), False, 'import json\n'), ((7226, 7277), 'json.dumps', 'json.dumps', (['mock_response'], {'indent': '(4)', 'sort_keys': '(True)'}), '(mock_response, indent=4, sort_keys=True)\n', (7236, 7277), False, 'import json\n'), ((8135, 8186), 'json.dumps', 'json.dumps', (['mock_response'], {'indent': '(4)', 'sort_keys': '(True)'}), '(mock_response, indent=4, sort_keys=True)\n', (8145, 8186), False, 'import json\n'), ((8924, 9003), 'json.dumps', 'json.dumps', (["{'message': 'Token deleted successfully'}"], {'indent': '(4)', 'sort_keys': '(True)'}), "({'message': 'Token deleted successfully'}, indent=4, sort_keys=True)\n", (8934, 9003), False, 'import json\n'), ((9767, 9818), 'json.dumps', 'json.dumps', (['mock_response'], {'indent': '(4)', 'sort_keys': '(True)'}), '(mock_response, indent=4, sort_keys=True)\n', (9777, 9818), False, 'import json\n'), ((10497, 10548), 'json.dumps', 'json.dumps', (['mock_response'], {'indent': '(4)', 'sort_keys': '(True)'}), '(mock_response, indent=4, sort_keys=True)\n', (10507, 10548), False, 'import json\n')]
|
import os
import os.path
import sys
import logging
logger = logging.getLogger(__name__)
import numpy as np
import inspect
import datetime
import hashlib
import functools
import h5py
import filelock
import multiprocessing
import itertools
import random
from tqdm.auto import tqdm
#
# utilities for my hdf5 datasets
#
def _normalize_attribute_value_string(v):
# NOTE: Only ASCII strings allowed in string values.
return v.encode('ascii')
class _Hdf5GroupProxyObject:
def __init__(self, grp):
self.grp = grp
def get(self, key, default, *, _default_action=None):
if key in self.grp:
obj = self.grp[key]
if isinstance(obj, h5py.Group):
return _Hdf5GroupProxyObject(self.grp[key])
if isinstance(obj, h5py.Dataset):
return obj[()]
raise ValueError("Can't interface object value {!r}".format(obj))
if key in self.grp.attrs:
return self._unpack_attr_val(self.grp.attrs[key])
if _default_action:
return _default_action()
return default
def keys(self):
return itertools.chain(self.grp.keys(), self.grp.attrs.keys())
def keys_children(self):
return self.grp.keys()
def keys_attrs(self):
return self.grp.attrs.keys()
def all_attrs(self):
return dict([(k, self._unpack_attr_val(v)) for (k,v) in self.grp.attrs.items()])
def __getitem__(self, key):
def keyerror():
raise KeyError("No key {} in hdf5 group {!r} or its attributes"
.format(key, self.grp))
return self.get(key, None, _default_action=keyerror)
def _unpack_attr_val(self, att_val):
return _unpack_attr_val(att_val) # call global method
def value_equals(self, key, test_value):
val = self.get(key, None)
if val is None:
return (test_value is None)
if isinstance(val, np.ndarray) or isinstance(test_value, np.ndarray):
return np.all(val == test_value)
if _normalize_attribute_value_global(val, keep_float=False) \
!= _normalize_attribute_value_global(test_value, keep_float=False):
return False
return True
def __repr__(self):
return '_Hdf5GroupProxyObject('+repr(self.grp)+')'
def __str__(self):
ds = {k: str(v) for k, v in self.all_attrs().items() }
for k in self.keys_children():
v = self.grp[k]
ds[k] = '<{}>'.format(type(v).__name__)
return ('HDF5 group {' +
', '.join('{}: {}'.format(k,vstr) for k,vstr in ds.items()) + '}')
def hdf5_group(self):
"""
Return the group object in the HDF5 data structure, giving you direct access
to the :py:mod:`h5py` API in case you need it.
"""
return self.grp
def hdf5_key(self):
"""
Return the key in the HDF5 data structure where this group is located.
"""
return self.grp.name
def _unpack_attr_val(att_val):
if isinstance(att_val, bytes):
return att_val.decode('ascii')
#if isinstance(att_val, np.ndarray) and att_val.size == 1:
# # if it's a scalar, return the bare scalar and not an ndarray
# return att_val[()]
return att_val
def _normalize_attribute_value_global(
value, *,
normalize_string=_normalize_attribute_value_string,
keep_float=True
):
t = type(value)
if value is None:
return ""
if isinstance(value, str):
return _normalize_attribute_value_string(value)
if isinstance(value, bytes):
# bytes and str are treated the same, as ASCII strings. For storage
# of raw binary data you'll want to store a dataset of some kind
# e.g. with numpy.
return value
if isinstance(value, int) or np.issubdtype(t, np.integer):
return int(value)
if isinstance(value, float) or np.issubdtype(t, np.floating):
if keep_float:
return value
else:
return _normalize_attribute_value_string( '{:0.8g}'.format(value) )
if isinstance(value, (datetime.date, datetime.time, datetime.datetime)):
return _normalize_attribute_value_string(value.isoformat())
if isinstance(value, (datetime.timedelta,)):
return _normalize_attribute_value_string("total_seconds={:.06g}"
.format(value.total_seconds()))
raise ValueError("Cannot encode {!r} for HDF5 attribute storage, unknown type"
.format(value))
class Hdf5StoreResultsAccessor:
"""
TODO: Doc.....
Note: must be used in a context manager!
"""
def __init__(self, filename, *, realm='results'):
super().__init__()
self.filename = filename
self.realm = realm
self._lock_file_name = os.path.join(
os.path.dirname(filename),
'.' + os.path.basename(filename) + '.py_lock'
)
self._filelock = None
self._store = None
self.store_value_filters = []
def __enter__(self):
self._filelock = filelock.FileLock(self._lock_file_name)
self._filelock.acquire()
try:
self._store = h5py.File(self.filename, 'a')
except Exception:
self._filelock.release()
raise
return self
def __exit__(self, type, value, traceback):
try:
if self._store is not None:
self._store.close()
self._store = None
finally:
if self._filelock is not None:
self._filelock.release()
self._filelock = None
def iterate_results(self, *, predicate=None, **kwargs):
if self.realm not in self._store:
# no results registered yet, nothing to yield
return
grp_results = self._store[self.realm]
predicate_attrs = None
if predicate is not None:
sig = inspect.signature(predicate)
predicate_attrs = list( sig.parameters.keys() )
def want_this(grpiface):
for k,v in kwargs.items():
if not grpiface.value_equals(k, v):
return False
if predicate is not None:
return predicate(**{k: _unpack_attr_val(grpiface.get(k, None)) for k in predicate_attrs})
return True
for key in grp_results.keys():
grp = grp_results[key]
grpiface = _Hdf5GroupProxyObject(grp)
if want_this(grpiface):
yield grpiface
def attribute_values(self, attribute_name, *, include_none=False):
if self.realm not in self._store:
return set()
grp_results = self._store[self.realm]
return set(
_unpack_attr_val(attval)
for attval in (
grp.attrs.get(attribute_name, None)
for grp in (grp_results[key] for key in grp_results.keys())
)
if include_none or attval is not None
)
# vals = set()
# for key in grp_results.keys():
# grp = grp_results[key]
# this_val = _unpack_attr_val(grp.attrs[attribute_name])
# if this_val not in vals:
# vals.append(this_val)
# return vals
def has_result(self, attributes):
key = self._store_key(attributes)
if key in self._store:
return True
return False
def get_result(self, attributes):
key = self._store_key(attributes)
if key in self._store:
grp = self._store[key]
return _Hdf5GroupProxyObject(grp)
return None
def store_result(self, attributes, value, *, forbid_overwrite=False, info=None):
key = self._store_key(attributes)
if key in self._store:
if forbid_overwrite:
raise ValueError("key {!r} already exists in {}, not overwriting"
.format(key, self.realm))
logger.debug("Overwriting key %r in %s", key, self.realm)
del self._store[key]
grp = self._store.create_group(key)
for k, v in attributes.items():
grp.attrs[k] = self._normalize_attribute_value(v)
for filt in self.store_value_filters:
value = filt(value)
has_error = self._store_result_dict_value(grp, value)
# only raise errors *after* having written everything to disk, in case
# that computation was very time-costly to obtain and our poor user
# would otherwise lose all their hard-obtained results
if has_error is not None:
raise has_error
if info:
for k, v in info.items():
grp.attrs[k] = self._normalize_attribute_value(v)
def _store_result_dict_value(self, grp, value):
has_error = None
for k, v in value.items():
if k.startswith('_'):
continue
try:
for filt in self.store_value_filters:
v = filt(v)
if v is None:
continue
if isinstance(v, dict):
newgrp = grp.create_group(k)
has_error = self._store_result_dict_value(newgrp, v)
elif isinstance(v, (np.ndarray, int, float)) \
or np.issubdtype(np.dtype(type(v)), np.integer) \
or np.issubdtype(np.dtype(type(v)), np.floating):
# Pass on any numpy array as is to h5py. Also store floats
# and ints directly
dset = grp.create_dataset(k, data=v)
elif isinstance(v, str):
# difficult to support strings in HDF5 -- see
# https://docs.h5py.org/en/stable/strings.html
#
# we use " np.void(utf8 bytes) " stored in an attribute as
# it looks like it's the safest. NOTE: You need to access
# the string via result['string_field'].tobytes().decode('utf-8')
grp.attrs[k] = np.void(v.encode('utf-8'))
logger.warning("Storing string as UTF-8 opaque bytes for field ‘%s’. Use "
"“result['%s'].tobytes().decode('utf-8')” when reading "
"out the string.", k, k)
elif isinstance(v, bytes):
# store raw bytes
grp.attrs[k] = np.void(v)
logger.warning("Storing bytes as opaque type for field ‘%s’. Use "
"“result['%s'].tobytes()” when reading "
"out the bytes again.", k, k)
elif isinstance(v, (datetime.date, datetime.time, datetime.datetime)):
grp.attrs[k] = v.isoformat().encode('ascii')
elif isinstance(v, (datetime.timedelta,)):
grp.attrs[k] = ("timedelta(seconds={:.06g})"
.format(v.total_seconds())).encode('ascii')
else:
has_error = ValueError("Can't save object {!r}, unknown type".format(v))
# continue saving other stuff
except Exception as e:
has_error = e
return has_error
def delete_result(self, attributes, *, dry_run=False):
key = self._store_key(attributes)
if key not in self._store:
raise ValueError("No such key for attributes {!r}".format(attributes))
if dry_run:
logger.info("Delete results %r, key=%r (dry run)", attributes, key)
else:
del self._store[key]
logger.info("Deleted results %r, key=%r", attributes, key)
def delete_results(self, *, dry_run=False, **kwargs):
keys_to_delete = []
for it in self.iterate_results(**kwargs):
keys_to_delete.append(it.hdf5_key())
for key in keys_to_delete:
if dry_run:
logger.info("Delete results %r (dry run)", key)
def _do_get_result(key):
# use "self" outside inner class
return _Hdf5GroupProxyObject(self._store[key])
class get_all_attrs_str:
def __str__(self):
return repr(_do_get_result(key).all_attrs())
logger.debug("with properties: %r -> %s", key, get_all_attrs_str())
else:
del self._store[key]
logger.info("Deleted results %r", key)
def update_keys(self, attribute_names, *, add_default_keys=None, dry_run=False):
"""
Checks that all result storage keys are up-to-date. If you introduce a new
kwarg attribute in the storage, we can set that attribute to all
existing results with the given value in `add_default_keys`.
- `attribute_names` is a list or tuple of attribute names to consider when
composing the storage key.
- `add_default_keys` is a dictionary of new attribute names and values
to set to records that don't have that attribute set
"""
rename_keys = [] # [ (oldkey,newkey), ... ]
set_attributes = {} # { newkey: {attribute1: value1 ...}, ... }
if add_default_keys is None:
add_default_keys = {}
grp_results = self._store[self.realm]
for key in grp_results.keys():
grp = grp_results[key]
these_attributes = {}
this_set_attributes = {}
for k in attribute_names:
att_value = None
if k in grp.attrs:
att_value = grp.attrs[k]
else:
if k in add_default_keys:
att_value = add_default_keys[k]
this_set_attributes[k] = att_value
else:
att_value = None
these_attributes[k] = att_value
# also take note of any default attributes to set that are not part
# of the results-identifying attributes
for k, v in ((akey, aval,)
for akey, aval in add_default_keys.items()
if akey not in attribute_names):
if k not in grp.attrs:
this_set_attributes[k] = v
newkey = self._store_key(these_attributes, hash_only=True)
if newkey != key:
logger.debug("Will rename {} -> {}".format(key, newkey))
rename_keys.append( (key, newkey) )
if this_set_attributes:
logger.debug("Will set attributes on newkey {}: {!r}"
.format(newkey, this_set_attributes))
set_attributes[newkey] = this_set_attributes
if not rename_keys and not set_attributes:
logger.debug("All keys and attributes are up-to-date.")
return
logger.debug("Finished inspecting keys, proceeding to updates ... ")
for oldkey, newkey in rename_keys:
if dry_run:
logger.info("\tgrp_results.move({!r}, {!r})".format(oldkey, newkey))
else:
grp_results.move(oldkey, newkey)
for newkey, attrib in set_attributes.items():
grp = grp_results[newkey] if not dry_run else None
for ak, av in attrib.items():
if dry_run:
logger.info("\tresults({!r}).attrs[{!r}] = {!r}".format(newkey, ak, av))
else:
grp.attrs[ak] = self._normalize_attribute_value(av)
logger.debug("Keys and attributes renamed successfully.")
def _normalize_attribute_value(self, value, **kwargs):
return _normalize_attribute_value_global(value, **kwargs)
def _store_key(self, attributes, *, hash_only=False):
m = hashlib.sha1()
stuff = "\n".join(
"{key}={value}\n".format(
key=k,
value=repr(self._normalize_attribute_value(attributes[k], keep_float=False))
)
for k in sorted(attributes.keys())
)
m.update( stuff.encode('ascii') )
the_hash = m.hexdigest()
if hash_only:
return the_hash
return '{}/{}'.format(self.realm, the_hash)
class NoResultException(Exception):
pass
class MultipleResults:
def __init__(self, results=None):
# results = [
# ({attrs1...}, {infoattrs1...}, <result1>),
# ({attrs2...}, {infoattrs2...}, <result2>),
# ...
# ]
# arttrsN are merged with "global" attributes (items
# in attrsN take precedence)
if results is not None:
self.results = results #[ (attrs, info, result) for (attrs, info, result) in results ]
else:
self.results = []
def append_result(self, attrs, info, result):
# if result is itself a MultipleResults instance, merge results.
if isinstance(result, MultipleResults):
for res_attrs, res_info_v, res in result.results:
try:
the_res_attrs = dict(attrs)
the_res_attrs.update(**res_attrs)
the_res_info = dict(info)
if res_info_v:
the_res_info.update(**res_info_v)
self.results.append( (the_res_attrs, the_res_info, res,) )
except Exception as e:
logger.warning(
f"Couldn't save result {attrs}, {res_attrs}; "
f"[info {info}, {res_info_v}] [result {res}]: {e}"
)
else:
self.results.append( (attrs, info, result) )
class _ShowValueShort:
def __init__(self, value, process_value=None):
self.value = value
self.process_value = process_value
def _processed_value(self):
if self.process_value is not None:
return self.process_value(self.value)
else:
return self.value
def __str__(self):
return _showvalue(self._processed_value())
def __repr__(self):
return repr(self._processed_value())
def _showvalue(value, short=False):
if isinstance(value, dict) and not short:
return '{' + ",".join(
"{}={}".format(k, _showvalue(v, short=True))
for k,v in value.items()
) + '}'
if short and isinstance(value, (np.ndarray,)):
# print short version of ndarray
with np.printoptions(precision=4,threshold=8,linewidth=9999,):
return str(value)
if isinstance(value, (float,)) or np.issubdtype(type(value), np.floating):
return "%.4g"%(value)
if value is None or isinstance(value, (int, bool, str, bytes)):
return str(value)
return '<{}>'.format(value.__class__.__name__)
def _call_with_accepted_kwargs(fun, kwargs):
sig = inspect.signature(fun)
fun_args = set( sig.parameters.keys() )
return fun(**{k: v
for k, v in kwargs.items()
if k in fun_args})
class FnComputer:
decode_inputargs = None
fixed_attributes = None
multiple_attribute_values = None
info = None
force_recompute = False
skip_store = False
def __call__(self):
raise RuntimeError("You need to reimplement the __call__() function")
class ComputeAndStore:
"""
Wraps a function `fn` that computes something potentially expensive with the
necessary code to perform the computation only if it doesn't already exist
in the data storage described by `store_filename` and `realm` and designed
to be managed by a :py:class:`HDF5StoreResultsAccessor`.
To determine whether the computation must be run, and to store the result
after the computation if it was carried out, the attributes that
characterize the associated result in the
:py:class:`HDF5StoreResultsAccessor` are determined as follows (for use with
:py:meth:`HDF5StoreResultsAccessor.has_result()` and
:py:meth:`HDF5StoreResultsAccessor.store_result()`). The function's named
arguments are considered as attributes, and they are merged with the given
attribute dictionary `fixed_attributes`.
The return value of the function (usually a dictionary) is then stored using
a :py:class:`HDF5StoreAccessor` instance in the given filename and realm,
with the associated attributes. The function may also return an instance of
:py:class:`MultipleResults`—see more on this topic below.
The `info` argument can be a dictionary of values to store alongside with
the result, but that do not contribute to the identification of the result
instance (see :py:meth:`HDF5StoreAccessor.store_result()`'s `info=` keyword
argument).
It is possible to "decode" some arguments of `fn()` if you would like the
attribute value in the store file to have a different format or
representation as the value actually passed on to `fn()`. Use the
`decode_inputargs()` for this purpose. It is given the tuple of input
arguments as-is (without any 'multiple-attributes' arguments—see below), and
is supposed to return the arguments to send to `fn()` instead (either as a
tuple or as a kwargs dictionary). If a tuple is returned, it must preserve
the order and number of the arguments.
The results storage file `store_filename` is accessed with a
:py:class:`HDF5StoreResultsAccessor` instance. The instance is only created
momentarily to check whether the results exist in the storage, and again if
necessary to store the result into the cache. In this way multiple
instances of this function can run in different processes without locking
out the results storage file.
Messages are logged to the given `logger` instance (see python's
:py:mod:`logging` mechanism), or to a default logger.
**Computing functions with multiple attribute values at in one function
call:**
Sometimes we want to compute multiple result objects in one go, especially
if they share some common intermediate steps. In such cases, the function
should return a :py:class:`MultipleResults` instance that collects the
different result objects along with their different attributes values. The
attributes specified in each object in `MultipleResults` are merged with the
function's arguments and with the `fixed_attributes`.
When the function returns multiple result objects, then `ComputeAndStore`
needs additional information in order to determine if a computation needs to
run, and if so, which of those multiple results need to be computed. Use
the `multiple_attribute_values` field to this effect. This field should be
a list of dictionaries, or a dictionary containing a list in one of its
values, that specify any additional attribute(s) and the values associated
with the results that the function is expected to return. These values are
used to check the existence of the result objects in the store.
If the function accepts a keyword argument associated with a "multiple
result attributes", then a list of all the values that we need to compute
(i.e., that are not in the store) is provided to the function via that
keyword argument. If multiple such arguments are accepted, then all these
keyword arguments `kw1`, `kw2`, ... are given a list of the same length,
such that `{kw1=kw1[j], kw2=kw2[j], ...}` for `j=0,1,...` describe the
result objects that need to be computed.
"""
def __init__(self, fn, store_filename, *,
realm=None,
fixed_attributes=None,
info=None,
decode_inputargs=None,
multiple_attribute_values=None,
force_recompute=None,
skip_store=None,
logger=None):
self.fn = fn
if isinstance(fn, FnComputer):
self.fn_name = fn.__class__.__name__
fn_sig = inspect.signature(fn.__call__)
else:
self.fn_name = fn.__name__
fn_sig = inspect.signature(fn)
self.fn_arg_names = list( fn_sig.parameters.keys() )
self.store_filename = store_filename
self.realm = realm
self.fixed_attributes = {}
if getattr(fn, 'fixed_attributes', None) is not None:
self.fixed_attributes.update(fn.fixed_attributes)
if fixed_attributes is not None:
self.fixed_attributes.update(fixed_attributes)
self.info = {}
if getattr(fn, 'info', None) is not None:
self.info.update(fn.info)
if info is not None:
self.info.update(info)
self.decode_inputargs = None
if getattr(fn, 'decode_inputargs', None) is not None:
self.decode_inputargs = fn.decode_inputargs
if decode_inputargs is not None:
if self.decode_inputargs is not None:
raise ValueError("decode_inputargs=... specified both in FnComputer class "
"and as argument to ComputeAndStore()")
self.decode_inputargs = decode_inputargs
self.multiple_attribute_values = None
if getattr(fn, 'multiple_attribute_values', None) is not None:
self.multiple_attribute_values = fn.multiple_attribute_values
if multiple_attribute_values is not None:
if self.multiple_attribute_values is not None:
raise ValueError("multiple_attribute_values=... specified both in FnComputer "
"class and as argument to ComputeAndStore()")
self.multiple_attribute_values = multiple_attribute_values
if self.multiple_attribute_values is None:
self.multiple_attribute_values = []
# go through multiple_attribute_values, and replace dictionary-of-list
# by list-of-dictionaries, i.e. {'a': [1, 2]} -> [{'a': 1}, {'a': 2}]
self.multiple_attribute_values = \
flatten_attribute_value_lists(self.multiple_attribute_values)
self.multiple_attribute_all_keys = \
list(set( itertools.chain.from_iterable(
d.keys() for d in self.multiple_attribute_values
) ))
#print(f"{self.multiple_attribute_values=}")
self.fn_attribute_names = [k for k in self.fn_arg_names
if k not in self.multiple_attribute_all_keys ]
self.force_recompute = False
if hasattr(fn, 'force_recompute'):
self.force_recompute = fn.force_recompute
if force_recompute is not None:
self.force_recompute = self.force_recompute or force_recompute
self.skip_store = False
if hasattr(fn, 'skip_store'):
self.skip_store = fn.skip_store
if skip_store is not None:
self.skip_store = self.skip_store and skip_store
if logger is None:
self.logger = logging.getLogger(__name__ + '.ComputeAndStore')
else:
self.logger = logger
def _prepare_inputargs_as_kwargs(self, inputargs):
decoded_inputargs = inputargs
if self.decode_inputargs is not None:
decoded_inputargs = self.decode_inputargs(inputargs)
if isinstance(decoded_inputargs, dict):
kwargs = decoded_inputargs
else:
if len(decoded_inputargs) != len(self.fn_attribute_names):
raise ValueError("Can't match (decoded) input arguments %r to "
"function parameters %r"
% (decoded_inputargs, self.fn_attribute_names))
kwargs = dict(zip(self.fn_attribute_names, decoded_inputargs))
return kwargs
def __call__(self, inputargs):
return self.call_with_inputs( [inputargs] )
def call_with_inputs(self, list_of_inputargs):
logger = self.logger
import phfnbutils # TimeThis
if self.skip_store:
# offer friendly warning to make sure the user didn't forget to
# unset skip_store before a very long computation
logger.warning("`skip_store` is set to True, results will not be stored at the end!")
# we might have to decode the inputargs, in case they have attribute
# values encoded in some way (e.g. dependent attributes zipped together)
kwargs = None
list_of_kwargs = [ self._prepare_inputargs_as_kwargs(inputargs)
for inputargs in list_of_inputargs ]
list_of_kwargs_and_attributes = [
(kwargs, dict(self.fixed_attributes, **kwargs))
for kwargs in list_of_kwargs
]
#logger.debug("requested %s(%r)", self.fn_name,
# _ShowValueShort(list_of_kwargs_and_attributes, lambda x: [y[1] for y in x]))
with self._get_store() as store:
# def is_need_to_recompute(attributes):
# if self.force_recompute:
# return True
# return not store.has_result(attributes)
#
# def which_attributes_need_recompute
list_of_kwargs_and_attributes_and_multiattribs = []
for kwargs, attributes in list_of_kwargs_and_attributes:
multiple_attribute_values = self.multiple_attribute_values
if not multiple_attribute_values:
multiple_attribute_values = [ {} ]
# here we use multiple_attribute_values also for functions that
# don't explicitly have any multiple_attribute_values. In
# thoses cases an empty list means that there is nothing to
# compute, and a list containing only an empty dictionary means
# that we should compute that function.
if not self.force_recompute:
multiple_attribute_values = [
m
for m in multiple_attribute_values
if not store.has_result(dict(attributes, **m))
]
if not multiple_attribute_values:
# nothing to compute even for non-multiple-attributed
# functions, see comment above
logger.debug("Results for %s [%s] already present, not repeating computation",
_ShowValueShort(attributes),
_ShowValueShort(self.multiple_attribute_values))
continue
multiattribkwargs = {
k: [m.get(k, None) for m in multiple_attribute_values]
for k in self.multiple_attribute_all_keys
}
list_of_kwargs_and_attributes_and_multiattribs.append(
(kwargs, attributes, multiattribkwargs)
)
# if not self.multiple_attribute_values:
# if is_need_to_recompute(attributes):
# def have_all_necessary_results_in_store():
# if not self.multiple_attribute_values:
# return store.has_result(attributes)
# return
# if not self.force_recompute and have_all_necessary_results_in_store():
# logger.debug("Results for %s already present, not repeating computation",
# _ShowValueShort(attributes))
# else:
# new_list_of_kwargs_and_attributes.append( (kwargs,attributes,) )
if not list_of_kwargs_and_attributes_and_multiattribs:
logger.debug("There's nothing to compute.")
return
all_results = MultipleResults()
for kwargs, attributes, multiattribkwargs \
in list_of_kwargs_and_attributes_and_multiattribs:
logger.info("computing for attributes = %s [with multi-attributes = %s]",
_ShowValueShort(attributes), _ShowValueShort(multiattribkwargs))
run_kwargs = dict(kwargs, **{k: v for (k,v) in multiattribkwargs.items()
if k in self.fn_arg_names})
tr = {}
result = None
try:
with phfnbutils.TimeThis(tr, silent=True):
# call the function that actually computes the result
result = self.fn(**run_kwargs)
except NoResultException as e:
logger.warning(
"No result (NoResultException): %s [for %s after %s seconds]",
e, _ShowValueShort(attributes), tr['timethisresult'].dt,
)
return False
except Exception as e:
logger.error("Exception while computing result!", exc_info=True)
return False
dt = tr['timethisresult'].dt
if result is None:
logger.warning("No result (returned None) for %s, after %s seconds",
_ShowValueShort(attributes), dt)
return False
logger.debug("result: %s", _ShowValueShort(result))
logger.info("Got result for %s [runtime: %s seconds]",
_ShowValueShort(attributes), dt)
the_info = {}
for info_k, info_v in self.info.items():
if callable(info_v):
info_v = _call_with_accepted_kwargs(info_v, attributes)
the_info[info_k] = info_v
the_info.update(timethisresult=dt)
all_results.append_result(attributes, the_info, result)
# store results
if not self.skip_store:
with self._get_store() as store:
for attributes, the_info, result in all_results.results:
store.store_result(attributes, result, info=the_info)
# signal to caller that we've computed (a) new result(s) -- but this
# return value is probably ignored anyways
return True
def _get_store(self):
store_kwargs = {}
if self.realm is not None:
store_kwargs.update(realm=self.realm)
return Hdf5StoreResultsAccessor(self.store_filename, **store_kwargs)
def flatten_attribute_value_lists(alist):
# {'a': [1, 2]} -> [{'a': 1}, {'a': 2}] for all keys in all listed dictionaries
if isinstance(alist, dict):
alist = [alist]
need_another_loop = True
while need_another_loop:
#print(f"Looping to flatten attribute value lists, {alist=}")
newalist = []
need_another_loop = False
for a in alist:
#print(f"Inspecting {a=}")
assert isinstance(a, dict) # should be dict here
k, v = next( ((k, v) for (k,v) in a.items() if isinstance(v, list)),
(None,None) )
if k is not None:
#print(f"Expanding {k=}: {v=}")
need_another_loop = True
# expand list value into list of dictionaries with each value
def _updated_k_with_vitem(vitem):
d = dict(a)
d[k] = vitem
return d
expanded = [
_updated_k_with_vitem(vitem)
for vitem in v
]
#print(f"{expanded=}") # DEBUG
newalist += expanded
else:
newalist += [a] # ok, keep this dict as is
alist = newalist
return newalist
|
[
"h5py.File",
"numpy.void",
"hashlib.sha1",
"os.path.basename",
"filelock.FileLock",
"os.path.dirname",
"numpy.all",
"logging.getLogger",
"inspect.signature",
"phfnbutils.TimeThis",
"numpy.printoptions",
"numpy.issubdtype"
] |
[((61, 88), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (78, 88), False, 'import logging\n'), ((19202, 19224), 'inspect.signature', 'inspect.signature', (['fun'], {}), '(fun)\n', (19219, 19224), False, 'import inspect\n'), ((3862, 3890), 'numpy.issubdtype', 'np.issubdtype', (['t', 'np.integer'], {}), '(t, np.integer)\n', (3875, 3890), True, 'import numpy as np\n'), ((3953, 3982), 'numpy.issubdtype', 'np.issubdtype', (['t', 'np.floating'], {}), '(t, np.floating)\n', (3966, 3982), True, 'import numpy as np\n'), ((5160, 5199), 'filelock.FileLock', 'filelock.FileLock', (['self._lock_file_name'], {}), '(self._lock_file_name)\n', (5177, 5199), False, 'import filelock\n'), ((16130, 16144), 'hashlib.sha1', 'hashlib.sha1', ([], {}), '()\n', (16142, 16144), False, 'import hashlib\n'), ((2018, 2043), 'numpy.all', 'np.all', (['(val == test_value)'], {}), '(val == test_value)\n', (2024, 2043), True, 'import numpy as np\n'), ((4912, 4937), 'os.path.dirname', 'os.path.dirname', (['filename'], {}), '(filename)\n', (4927, 4937), False, 'import os\n'), ((5273, 5302), 'h5py.File', 'h5py.File', (['self.filename', '"""a"""'], {}), "(self.filename, 'a')\n", (5282, 5302), False, 'import h5py\n'), ((6029, 6057), 'inspect.signature', 'inspect.signature', (['predicate'], {}), '(predicate)\n', (6046, 6057), False, 'import inspect\n'), ((18802, 18859), 'numpy.printoptions', 'np.printoptions', ([], {'precision': '(4)', 'threshold': '(8)', 'linewidth': '(9999)'}), '(precision=4, threshold=8, linewidth=9999)\n', (18817, 18859), True, 'import numpy as np\n'), ((24346, 24376), 'inspect.signature', 'inspect.signature', (['fn.__call__'], {}), '(fn.__call__)\n', (24363, 24376), False, 'import inspect\n'), ((24451, 24472), 'inspect.signature', 'inspect.signature', (['fn'], {}), '(fn)\n', (24468, 24472), False, 'import inspect\n'), ((27331, 27379), 'logging.getLogger', 'logging.getLogger', (["(__name__ + '.ComputeAndStore')"], {}), "(__name__ + '.ComputeAndStore')\n", (27348, 27379), False, 'import logging\n'), ((4957, 4983), 'os.path.basename', 'os.path.basename', (['filename'], {}), '(filename)\n', (4973, 4983), False, 'import os\n'), ((32714, 32750), 'phfnbutils.TimeThis', 'phfnbutils.TimeThis', (['tr'], {'silent': '(True)'}), '(tr, silent=True)\n', (32733, 32750), False, 'import phfnbutils\n'), ((10631, 10641), 'numpy.void', 'np.void', (['v'], {}), '(v)\n', (10638, 10641), True, 'import numpy as np\n')]
|
from tracker import (
GitFile,
TranslationGitFile,
GitPatch,
TranslationTrack,
ToCreateTranslationTrack,
ToInitTranslationTrack,
ToUpdateTranslationTrack,
UpToDateTranslationTrack,
OrphanTranslationTrack,
Status
)
from pathlib import Path
import os.path
from github_utils import (
file_url,
raw_file_url,
compare_url
)
class GitFileModel:
"""
A model describing a git file.
"""
def __init__(self, git_file):
"""
Builds the model for templating after the given git file.
:param tracker.GitFile git_file: git file to build the model from
:raise ValueError: when git_file is not an instance of GitFile
"""
if isinstance(git_file, GitFile):
self.path = git_file.path.as_posix()
self.filename = git_file.path.name
self.directory = git_file.path.parent.as_posix()
self.no_trace = git_file.no_trace
if git_file.no_trace:
self.commit = None
else:
self.commit = git_file.commit.hexsha
self.new_file = git_file.new_file
self.copied_file = git_file.copied_file
self.renamed_file = git_file.renamed_file
if git_file.rename_from:
self.rename_from = git_file.rename_from.as_posix()
else:
self.rename_from = None
if git_file.rename_to:
self.rename_to = git_file.rename_to.as_posix()
else:
self.rename_to = None
self.deleted_file = git_file.deleted_file
if isinstance(git_file, TranslationGitFile):
self.lang_tag = git_file.lang_tag
self.language = git_file.language
else:
raise ValueError("git_file is not an instance of GitFile")
class GitPatchModel:
"""
A model describing a git patch.
"""
def __init__(self, git_patch):
"""
Builds the model for templating after the given git patch.
:param tracker.GitPatch git_patch: git patch to build the model from
:raise ValueError: when git_patch is not an instance of GitPatch
"""
if isinstance(git_patch, GitPatch):
self.diff = git_patch.diff
self.additions = git_patch.additions
self.deletions = git_patch.deletions
self.changes = git_patch.changes
else:
raise ValueError("git_patch is not an instance of GitPatch")
class TranslationTrackModel:
"""
A model describing a translation track as an interface for templates to use.
"""
def __init__(self, track):
"""
Builds the model for templating after the given track.
:param tracker.TranslationTrack: the track
:raise ValueError: when track is not an instance of TranslationTrack
"""
if isinstance(track, TranslationTrack):
self.translation = GitFileModel(track.translation)
self.original = GitFileModel(track.original)
self.status = track.status
if isinstance(track, ToCreateTranslationTrack):
self.missing_lines = track.missing_lines
elif isinstance(track, ToInitTranslationTrack):
self.missing_lines = track.missing_lines
elif isinstance(track, ToUpdateTranslationTrack):
self.base_original = GitFileModel(track.base_original)
self.patch = GitPatchModel(track.patch)
self.to_rename = track.to_rename
elif isinstance(track, UpToDateTranslationTrack):
pass
elif isinstance(track, OrphanTranslationTrack):
self.deleted = track.deleted
self.surplus_lines = track.surplus_lines
else:
raise ValueError("track is not an instance of TranslationTrack")
class Template:
"""
Represents a template. Like "{t.translation.language} translation needs to be done here: {translation_url}" for a Github instruction, ``t`` being a TranslationTrackModel instance.
:var str template: the template itself
:var bool empty: whether the template is an empty string or not, generally meaning to an updater it should not process it
"""
def __init__(self, template=""):
"""
Template is a str with unformatted tags of a ``t`` object representing a ``TranslationTrackModel`` instance, and more args depending on the context (e.g URLs for Github).
Creating an empty template would generally mean to an updater that it should not process it.
:param str template: unformatted template, with ``format``-type tags using ``t``, instance of ``TranslationTrackModel``
:raise TypeError: when template is not a str
"""
if not isinstance(template, str):
raise TypeError("template is not str")
self.template = template
self.empty = len(self.template) == 0
def special_args(self, track, **kwargs):
"""
Defines special arguments for the template from a track, when necessary.
Override this method to provide special args when required in a certain context.
:param tracker.TranslationTrack track: the track, base of template
:param kwargs: other provided values for subclasses of this template when necessary
:return: kwargs for template formatting
:rtype: dict
"""
return {}
def format(self, t, **kwargs):
"""
Format the template using the translation track given, resources for the template to be built.
:param tracker.TranslationTrack t: a translation track or a subclass
:param **kwargs: other parameters to pass when formatting specific template, defined in special_args of subclass
:return: the formatted message
:rtype: str
:raise ValueError: when t is not a TranslationTrack instance
"""
if not isinstance(t, TranslationTrack):
raise ValueError("t is not a TranslationTrack instance")
data = TranslationTrackModel(t)
return self.template.format(t=data, **self.special_args(t, **kwargs))
class StubTemplate(Template):
"""
Represents a template for content of stub files.
"""
def special_args(self, track):
"""
Sets special argument ``translation_to_original_path``, relative path to original file from translation parent directory.
:param tracker.TranslationTrack track: the track, base of template
:return: kwargs for template formatting
:rtype: dict
"""
return {
"translation_to_original_path": Path(os.path.relpath(track.original.path, track.translation.path.parent)).as_posix()
}
class GithubTemplate(Template):
"""
Represents a template for Github Issues and Projects.
"""
def special_args(self, track, repo):
"""
Sets special arguments:
- ``original_url``, Github URL to original file (using commit rev). Only with To Create, To Initialize, To Update and Up-To-Date tracks.
- ``raw_original_url``, Github URL to raw original file (using commit rev). Only with To Create, To Initialize, To Update and Up-To-Date tracks.
- ``translation_url``, Github URL to translation file (using branch rev). Only with To Initialize, To Update, Up-To-Date and Orphan tracks.
- ``raw_translation_url``, Github URL to raw translation file (using commit rev). Only with To Initialize, To Update, Up-To-Date and Orphan tracks.
- ``base_original_url``, Github URL to base original file (using commit rev). Only with To Update tracks.
- ``raw_base_original_url``, Github URL to raw base original file (using commit rev). Only with To Update tracks.
- ``compare_url``, Github URL to Github comparison (using base_original and original commit rev). Only with To Update tracks.
:param tracker.TranslationTrack track: the track, base of template
:param github.Repository.Repository repo: the github repo for URL building purpose
:return: kwargs for template formatting
:rtype: dict
"""
args = {}
if isinstance(track, (ToCreateTranslationTrack, ToInitTranslationTrack, ToUpdateTranslationTrack, UpToDateTranslationTrack)):
args["original_url"] = file_url(repo.full_name, track.original.commit.hexsha, track.original.path.as_posix())
args["raw_original_url"] = raw_file_url(repo.full_name, track.original.commit.hexsha, track.original.path.as_posix())
if isinstance(track, (ToInitTranslationTrack, ToUpdateTranslationTrack, UpToDateTranslationTrack, OrphanTranslationTrack)):
args["translation_url"] = file_url(repo.full_name, track.branch, track.translation.path.as_posix())
args["raw_translation_url"] = raw_file_url(repo.full_name, track.translation.commit.hexsha, track.translation.path.as_posix())
if isinstance(track, ToUpdateTranslationTrack):
args["base_original_url"] = file_url(repo.full_name, track.base_original.commit.hexsha, track.base_original.path.as_posix()),
args["raw_base_original_url"] = raw_file_url(repo.full_name, track.base_original.commit.hexsha, track.base_original.path.as_posix()),
args["compare_url"] = compare_url(repo.full_name, track.base_original.commit.hexsha, track.original.commit.hexsha)
return args
def format(self, t, repo):
return super().format(t, repo=repo)
class GithubTemplater:
"""
Github Templates handler:
- maps ``GithubTemplate`` instances to ``tracker.Status``
- format corresponding template from ``tracker.TranslationTrack``, according to their status attribute
"""
def __init__(self):
self.map = {}
def __setitem__(self, status, template):
"""
Maps a template to a status. Templates can't be empty.
:param tracker.Status status: the status to map the template to
:param GithubTemplate template: the template to map to the status
:raise TypeError: when status is not an instance of tracker.Status
:raise TypeError: when template is not an instance of model.GithubTemplate
:raise AttributeError: when template is empty
"""
if not isinstance(template, GithubTemplate):
raise TypeError("template is not an instance of GithubTemplate")
if not isinstance(status, Status):
raise TypeError("status is not an instance of Status")
if template.empty:
raise AttributeError("template can't be empty")
self.map[status] = template
def __contains__(self, status):
"""
Tells whether a template is mapped to the given status.
:param tracker.Status status: the status
:return: True if status is key of a template, False otherwise
:rtype: bool
"""
return status in self.map
def __getitem__(self, status):
"""
Gets the template mapped to the given status.
:param tracker.Status status: the status
:return: the corresponding template, or None if status is not key of a template
:rtype: GithubTemplate
"""
if status in self:
return self.map[status]
else:
return None
def format(self, track, repo):
"""
Gets the formatted template using the given translation track and corresponding to its status attribute.
:param tracker.TranslationTrack: the track used as input to format the template
:param github.Repository.Repository repo: the repo input for the GitHub template
:return: the formatted template, or None if status is not mapped to a template
:rtype: str
:raise TypeError: when track is not an instance of tracker.TranslationTrack
"""
if not isinstance(track, TranslationTrack):
raise TypeError("track is not an instance of TranslationTrack")
if track.status in self:
return self.map[track.status].format(track, repo)
else:
return None
|
[
"github_utils.compare_url"
] |
[((9415, 9512), 'github_utils.compare_url', 'compare_url', (['repo.full_name', 'track.base_original.commit.hexsha', 'track.original.commit.hexsha'], {}), '(repo.full_name, track.base_original.commit.hexsha, track.\n original.commit.hexsha)\n', (9426, 9512), False, 'from github_utils import file_url, raw_file_url, compare_url\n')]
|
from .problem import Problem
from .trig_defs import RightAngleTrigFunction
from enum import Enum
from typing import List
import random
class TransformationType(Enum):
VerticalTranslation = 1
HorizontalTranslation = 2
VerticalStretchCompression = 3
HorizontalStretchCompression = 4
class GraphTransformProblem(Problem):
def __init__(self):
Problem.__init__(self)
def __repr__(self):
return str.format("Graph Transform Problem ({}): {}", self.level, self.prompt)
def _get_trig_func_text(trig_func: RightAngleTrigFunction) -> str:
if trig_func == RightAngleTrigFunction.Sin:
return "sin"
else:
return "cos"
class GraphTransformData:
def __init__(self):
self.trig_func: RightAngleTrigFunction = RightAngleTrigFunction.Sin
self.hints: List[str] = []
self.answer: str = ""
self.vert_translation_mod: str = ""
self.horiz_translation_mod: str = ""
self.vert_stretch_mod: str = ""
self.horiz_stretch_mod: str = ""
def _append_to_answer(self, val: str):
if len(self.answer) > 0:
self.answer += ";"
self.answer += val
def add_horiz_translation(self):
val = random.randint(0, 3)
horiz_translation = "π"
if val > 1:
horiz_translation = "{}{}".format(val, horiz_translation)
if random.randint(0, 1) is 1:
self.horiz_translation_mod = " + {}".format(horiz_translation)
hint_text = horiz_translation
else:
self.horiz_translation_mod = " - {}".format(horiz_translation)
hint_text = "-{}".format(horiz_translation)
self.hints.append("Observe horizontal translation: {}".format(hint_text))
self._append_to_answer("ht")
def add_vertical_translation(self):
val = random.randint(1, 3)
if random.randint(0, 1) is 1:
self.vert_translation_mod = " + {}".format(val)
hint_text = val
else:
self.vert_translation_mod = " - {}".format(val)
hint_text = "-{}".format(val)
self.hints.append("Observe vertical translation: {}".format(hint_text))
self._append_to_answer("vt")
def add_vertical_stretch(self):
val = random.randint(2, 5)
if random.randint(0, 1) is 1:
val *= -1
self.vert_stretch_mod = str(val)
self.hints.append("Observe vertical stretch: {}".format(self.vert_stretch_mod))
self._append_to_answer("vs")
def add_horiz_stretch(self):
val = random.randint(2, 5)
if random.randint(0, 1) is 1:
val *= -1
self.horiz_stretch_mod = str(val)
self.hints.append("Observe horizontal stretch: {}".format(self.horiz_stretch_mod))
self._append_to_answer("hs")
def get_prompt(self):
x = "x"
if self.horiz_stretch_mod != "":
x = "{}/{}".format(x, self.horiz_stretch_mod)
if self.horiz_translation_mod != "":
x = "{}{}".format(x, self.horiz_translation_mod)
x = "{}({})".format(self.trig_func.name, x)
if self.vert_stretch_mod != "":
x = "{}{}".format(self.vert_stretch_mod, x)
if self.vert_translation_mod != "":
x = "{}{}".format(x, self.vert_translation_mod)
return x
def generate_graph_transform_problem(level: int = 1):
graph_data = GraphTransformData()
graph_data.trig_func = RightAngleTrigFunction(random.randint(1, 2))
# pick some random transforms to add to the graph
transforms = list(range(4))
random.shuffle(transforms)
for i in range(level):
xform = transforms.pop()
if xform == 0 and graph_data.horiz_translation_mod is "":
graph_data.add_horiz_stretch()
elif xform == 1 and graph_data.horiz_stretch_mod is "":
graph_data.add_horiz_translation()
elif xform == 2:
graph_data.add_horiz_stretch()
else:
graph_data.add_vertical_stretch()
problem = GraphTransformProblem()
problem.prompt = graph_data.get_prompt()
problem.steps = graph_data.hints
problem.level = level
problem.answer = graph_data.answer
return problem
|
[
"random.shuffle",
"random.randint"
] |
[((3614, 3640), 'random.shuffle', 'random.shuffle', (['transforms'], {}), '(transforms)\n', (3628, 3640), False, 'import random\n'), ((1228, 1248), 'random.randint', 'random.randint', (['(0)', '(3)'], {}), '(0, 3)\n', (1242, 1248), False, 'import random\n'), ((1851, 1871), 'random.randint', 'random.randint', (['(1)', '(3)'], {}), '(1, 3)\n', (1865, 1871), False, 'import random\n'), ((2284, 2304), 'random.randint', 'random.randint', (['(2)', '(5)'], {}), '(2, 5)\n', (2298, 2304), False, 'import random\n'), ((2581, 2601), 'random.randint', 'random.randint', (['(2)', '(5)'], {}), '(2, 5)\n', (2595, 2601), False, 'import random\n'), ((3501, 3521), 'random.randint', 'random.randint', (['(1)', '(2)'], {}), '(1, 2)\n', (3515, 3521), False, 'import random\n'), ((1387, 1407), 'random.randint', 'random.randint', (['(0)', '(1)'], {}), '(0, 1)\n', (1401, 1407), False, 'import random\n'), ((1884, 1904), 'random.randint', 'random.randint', (['(0)', '(1)'], {}), '(0, 1)\n', (1898, 1904), False, 'import random\n'), ((2317, 2337), 'random.randint', 'random.randint', (['(0)', '(1)'], {}), '(0, 1)\n', (2331, 2337), False, 'import random\n'), ((2614, 2634), 'random.randint', 'random.randint', (['(0)', '(1)'], {}), '(0, 1)\n', (2628, 2634), False, 'import random\n')]
|
"""
HyperOne
HyperOne API # noqa: E501
The version of the OpenAPI document: 0.1.0
Generated by: https://openapi-generator.tech
"""
import unittest
import h1
from h1.api.website_project_instance_api import WebsiteProjectInstanceApi # noqa: E501
class TestWebsiteProjectInstanceApi(unittest.TestCase):
"""WebsiteProjectInstanceApi unit test stubs"""
def setUp(self):
self.api = WebsiteProjectInstanceApi() # noqa: E501
def tearDown(self):
pass
def test_website_project_instance_connect_get(self):
"""Test case for website_project_instance_connect_get
Get website/instance.connect # noqa: E501
"""
pass
def test_website_project_instance_connect_list(self):
"""Test case for website_project_instance_connect_list
List website/instance.connect # noqa: E501
"""
pass
def test_website_project_instance_create(self):
"""Test case for website_project_instance_create
Create website/instance # noqa: E501
"""
pass
def test_website_project_instance_credential_create(self):
"""Test case for website_project_instance_credential_create
Create website/instance.credential # noqa: E501
"""
pass
def test_website_project_instance_credential_delete(self):
"""Test case for website_project_instance_credential_delete
Delete website/instance.credential # noqa: E501
"""
pass
def test_website_project_instance_credential_get(self):
"""Test case for website_project_instance_credential_get
Get website/instance.credential # noqa: E501
"""
pass
def test_website_project_instance_credential_list(self):
"""Test case for website_project_instance_credential_list
List website/instance.credential # noqa: E501
"""
pass
def test_website_project_instance_credential_patch(self):
"""Test case for website_project_instance_credential_patch
Update website/instance.credential # noqa: E501
"""
pass
def test_website_project_instance_delete(self):
"""Test case for website_project_instance_delete
Delete website/instance # noqa: E501
"""
pass
def test_website_project_instance_domain_create(self):
"""Test case for website_project_instance_domain_create
Create website/instance.domain # noqa: E501
"""
pass
def test_website_project_instance_domain_delete(self):
"""Test case for website_project_instance_domain_delete
Delete website/instance.domain # noqa: E501
"""
pass
def test_website_project_instance_domain_get(self):
"""Test case for website_project_instance_domain_get
Get website/instance.domain # noqa: E501
"""
pass
def test_website_project_instance_domain_list(self):
"""Test case for website_project_instance_domain_list
List website/instance.domain # noqa: E501
"""
pass
def test_website_project_instance_env_create(self):
"""Test case for website_project_instance_env_create
Create website/instance.env # noqa: E501
"""
pass
def test_website_project_instance_env_delete(self):
"""Test case for website_project_instance_env_delete
Delete website/instance.env # noqa: E501
"""
pass
def test_website_project_instance_env_get(self):
"""Test case for website_project_instance_env_get
Get website/instance.env # noqa: E501
"""
pass
def test_website_project_instance_env_list(self):
"""Test case for website_project_instance_env_list
List website/instance.env # noqa: E501
"""
pass
def test_website_project_instance_event_get(self):
"""Test case for website_project_instance_event_get
Get website/instance.event # noqa: E501
"""
pass
def test_website_project_instance_event_list(self):
"""Test case for website_project_instance_event_list
List website/instance.event # noqa: E501
"""
pass
def test_website_project_instance_get(self):
"""Test case for website_project_instance_get
Get website/instance # noqa: E501
"""
pass
def test_website_project_instance_link_create(self):
"""Test case for website_project_instance_link_create
Create website/instance.link # noqa: E501
"""
pass
def test_website_project_instance_link_delete(self):
"""Test case for website_project_instance_link_delete
Delete website/instance.link # noqa: E501
"""
pass
def test_website_project_instance_link_get(self):
"""Test case for website_project_instance_link_get
Get website/instance.link # noqa: E501
"""
pass
def test_website_project_instance_link_list(self):
"""Test case for website_project_instance_link_list
List website/instance.link # noqa: E501
"""
pass
def test_website_project_instance_list(self):
"""Test case for website_project_instance_list
List website/instance # noqa: E501
"""
pass
def test_website_project_instance_log_get(self):
"""Test case for website_project_instance_log_get
Get website/instance.log # noqa: E501
"""
pass
def test_website_project_instance_log_list(self):
"""Test case for website_project_instance_log_list
List website/instance.log # noqa: E501
"""
pass
def test_website_project_instance_log_read(self):
"""Test case for website_project_instance_log_read
Read website/instance.log # noqa: E501
"""
pass
def test_website_project_instance_metric_get(self):
"""Test case for website_project_instance_metric_get
Get website/instance.metric # noqa: E501
"""
pass
def test_website_project_instance_metric_list(self):
"""Test case for website_project_instance_metric_list
List website/instance.metric # noqa: E501
"""
pass
def test_website_project_instance_metric_point_list(self):
"""Test case for website_project_instance_metric_point_list
List website/instance.point # noqa: E501
"""
pass
def test_website_project_instance_restart(self):
"""Test case for website_project_instance_restart
Restart website/instance # noqa: E501
"""
pass
def test_website_project_instance_service_get(self):
"""Test case for website_project_instance_service_get
Get website/instance.service # noqa: E501
"""
pass
def test_website_project_instance_service_list(self):
"""Test case for website_project_instance_service_list
List website/instance.service # noqa: E501
"""
pass
def test_website_project_instance_sideapp_get(self):
"""Test case for website_project_instance_sideapp_get
Get website/instance.sideapp # noqa: E501
"""
pass
def test_website_project_instance_sideapp_list(self):
"""Test case for website_project_instance_sideapp_list
List website/instance.sideapp # noqa: E501
"""
pass
def test_website_project_instance_sideapp_open(self):
"""Test case for website_project_instance_sideapp_open
Open website/instance.sideapp # noqa: E501
"""
pass
def test_website_project_instance_snapshot_create(self):
"""Test case for website_project_instance_snapshot_create
Create website/instance.snapshot # noqa: E501
"""
pass
def test_website_project_instance_snapshot_delete(self):
"""Test case for website_project_instance_snapshot_delete
Delete website/instance.snapshot # noqa: E501
"""
pass
def test_website_project_instance_snapshot_download(self):
"""Test case for website_project_instance_snapshot_download
Download website/instance.snapshot # noqa: E501
"""
pass
def test_website_project_instance_snapshot_get(self):
"""Test case for website_project_instance_snapshot_get
Get website/instance.snapshot # noqa: E501
"""
pass
def test_website_project_instance_snapshot_list(self):
"""Test case for website_project_instance_snapshot_list
List website/instance.snapshot # noqa: E501
"""
pass
def test_website_project_instance_start(self):
"""Test case for website_project_instance_start
Start website/instance # noqa: E501
"""
pass
def test_website_project_instance_stop(self):
"""Test case for website_project_instance_stop
Stop website/instance # noqa: E501
"""
pass
def test_website_project_instance_tag_create(self):
"""Test case for website_project_instance_tag_create
Create website/instance.tag # noqa: E501
"""
pass
def test_website_project_instance_tag_delete(self):
"""Test case for website_project_instance_tag_delete
Delete website/instance.tag # noqa: E501
"""
pass
def test_website_project_instance_tag_get(self):
"""Test case for website_project_instance_tag_get
Get website/instance.tag # noqa: E501
"""
pass
def test_website_project_instance_tag_list(self):
"""Test case for website_project_instance_tag_list
List website/instance.tag # noqa: E501
"""
pass
def test_website_project_instance_tag_put(self):
"""Test case for website_project_instance_tag_put
Replace website/instance.tag # noqa: E501
"""
pass
def test_website_project_instance_transfer(self):
"""Test case for website_project_instance_transfer
Transfer website/instance # noqa: E501
"""
pass
def test_website_project_instance_update(self):
"""Test case for website_project_instance_update
Update website/instance # noqa: E501
"""
pass
if __name__ == '__main__':
unittest.main()
|
[
"unittest.main",
"h1.api.website_project_instance_api.WebsiteProjectInstanceApi"
] |
[((10465, 10480), 'unittest.main', 'unittest.main', ([], {}), '()\n', (10478, 10480), False, 'import unittest\n'), ((418, 445), 'h1.api.website_project_instance_api.WebsiteProjectInstanceApi', 'WebsiteProjectInstanceApi', ([], {}), '()\n', (443, 445), False, 'from h1.api.website_project_instance_api import WebsiteProjectInstanceApi\n')]
|
import os
import subprocess
folder = r"./examples"
example = [
"eit_dynamic_bp.py",
"eit_dynamic_greit.py",
"eit_dynamic_jac.py",
"eit_dynamic_jac3d.py",
"eit_dynamic_stack.py",
"eit_dynamic_svd.py",
"eit_sensitivity2d.py",
"eit_static_GN_3D.py",
"eit_static_jac.py",
"fem_forward2d.py",
"fem_forward3d.py",
"mesh_distmesh2d.py",
"mesh_distmesh3d.py",
"mesh_intro2d.py",
"mesh_multi_shell.py",
"paper_eit2016b.py",
"softx/figure01.py",
"softx/figure02.py",
"softx/figure02b.py",
"softx/figure03.py",
]
list_ex = ""
index = {}
for i, file in enumerate(example):
list_ex = f"{list_ex}Example #{i}: {file}\r\n"
index[f"{i}"] = i
def run():
ans = input(f"List of all examples:\r\n{list_ex} Run all examples? (y)/n or #: ")
all = ans in ["Y", "y"]
if not all and ans in list(index.keys()):
_run_ex(example[index[ans]])
return
for ex in example:
next = True
if not all:
ans = input(f"Run example '{ex}'? (y)/n:")
next = ans not in ["N", "n"]
if not next:
continue
_run_ex(ex)
def _run_ex(ex_):
path = os.path.join(folder, ex_)
cmd = f"python {path}"
print(f"runs >> {cmd}")
subprocess.call(cmd, shell=True)
if __name__ == "__main__":
""""""
run()
|
[
"subprocess.call",
"os.path.join"
] |
[((1194, 1219), 'os.path.join', 'os.path.join', (['folder', 'ex_'], {}), '(folder, ex_)\n', (1206, 1219), False, 'import os\n'), ((1279, 1311), 'subprocess.call', 'subprocess.call', (['cmd'], {'shell': '(True)'}), '(cmd, shell=True)\n', (1294, 1311), False, 'import subprocess\n')]
|
from unittest import TestCase
from regal import BaseInfo
from regal.grouping import GroupAlgorithm
from regal.check_interface import AlgorithmABC
# Run Method: python -m unittest -v tests.py
class TestBaseInfoInitial(TestCase):
def test_empty_info(self):
ab = BaseInfo('', '', '')
with self.assertRaises(AttributeError):
ab.grouping()
def test_empty_info_version_host_isdict(self):
ab = BaseInfo({}, '', '')
self.assertIsNotNone(ab.grouping())
def test_info_errortype(self):
ab = BaseInfo({}, '1', 'sds')
self.assertIsNotNone(ab.grouping())
class TestGroupingResult(TestCase):
ver = {
'ver1': '1.1.1.1,2.2.2.2,3.3.3.3,4.4.4.4,5.1.1.1,6.2.2.2,7.3.3.3,8.4.4.4'}
combine_num = 4
def test_combine_num(self):
ab = BaseInfo(
self.ver,
self.combine_num
)
instance_combine_num = ab.grouping().result[0][1]
self.assertEqual(len(instance_combine_num[1:-1][0]), self.combine_num)
def test_schedule_num(self):
schedule_num = 2
ab = BaseInfo(self.ver, self.combine_num, schedule_num)
instance_combine_num = ab.grouping().result[0][1]
self.assertEqual(len(instance_combine_num[0][0].split(',')), schedule_num)
class TestInstance(TestCase):
def test_algorithm_instance(self):
self.assertIsInstance(GroupAlgorithm(), AlgorithmABC)
|
[
"regal.BaseInfo",
"regal.grouping.GroupAlgorithm"
] |
[((275, 295), 'regal.BaseInfo', 'BaseInfo', (['""""""', '""""""', '""""""'], {}), "('', '', '')\n", (283, 295), False, 'from regal import BaseInfo\n'), ((435, 455), 'regal.BaseInfo', 'BaseInfo', (['{}', '""""""', '""""""'], {}), "({}, '', '')\n", (443, 455), False, 'from regal import BaseInfo\n'), ((549, 573), 'regal.BaseInfo', 'BaseInfo', (['{}', '"""1"""', '"""sds"""'], {}), "({}, '1', 'sds')\n", (557, 573), False, 'from regal import BaseInfo\n'), ((817, 853), 'regal.BaseInfo', 'BaseInfo', (['self.ver', 'self.combine_num'], {}), '(self.ver, self.combine_num)\n', (825, 853), False, 'from regal import BaseInfo\n'), ((1097, 1147), 'regal.BaseInfo', 'BaseInfo', (['self.ver', 'self.combine_num', 'schedule_num'], {}), '(self.ver, self.combine_num, schedule_num)\n', (1105, 1147), False, 'from regal import BaseInfo\n'), ((1390, 1406), 'regal.grouping.GroupAlgorithm', 'GroupAlgorithm', ([], {}), '()\n', (1404, 1406), False, 'from regal.grouping import GroupAlgorithm\n')]
|
import sys
from storage.models import Database
if len(sys.argv) != 2:
print("Usage: python insert.py <file> # file should contain one A Number per line.")
sys.exit(1)
alien_numbers = [line.strip().replace('-', '') for line in open(sys.argv[1])]
db = Database()
db.create_table() # checks if already exists
db.upload_new_requests(alien_numbers)
|
[
"storage.models.Database",
"sys.exit"
] |
[((261, 271), 'storage.models.Database', 'Database', ([], {}), '()\n', (269, 271), False, 'from storage.models import Database\n'), ((165, 176), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (173, 176), False, 'import sys\n')]
|
import re
from datetime import date, datetime, time
import dateparser
import pytz
import requests
from bs4 import BeautifulSoup
import sessionize
def get(url):
res = requests.get(url)
return BeautifulSoup(res.text, 'html.parser')
def parse_page(root):
for evt_elm in root.select('.CalMEvent a'):
col_index = len(evt_elm.find_parent('td').find_previous_siblings('td'))
date_row = evt_elm.find_parent('tr').find_previous_sibling(lambda elm: elm.name == 'tr' and elm.select('.CalMDate'))
day = date_row.find_all('td')[col_index].text
yield {
'short_name' : evt_elm.text,
'url': evt_elm['href'],
'name': evt_elm['title'],
'day': day
}
def find_pages():
start = date.today()
for i in range(12):
new_month = start.month + i
new_year = start.year
if new_month > 12:
new_month -= 12
new_year += 1
yield f'https://lwn.net/Calendar/Monthly/cfp/{new_year}-{new_month:02d}/', date(new_year, new_month, 1)
def parse_pages():
for url, base_date in find_pages():
for evt in parse_page(get(url)):
evt['date'] = base_date.replace(day=int(evt['day']))
yield evt
def format_page(raw_evt):
md = re.search(r'^([^(]+) \(([^)]+)\)$', raw_evt['name'])
name, location = md.group(1, 2)
return {
'Conference Name': name,
'Conference URL': raw_evt['url'],
'Location': location,
'CFP URL': raw_evt['url'],
'CFP End Date': datetime.combine(raw_evt['date'], time()),
}
def scrape():
for raw_evt in parse_pages():
evt = format_page(raw_evt)
if evt is None:
continue
if 'papercall.io' in evt['CFP URL']:
continue
if 'events.linuxfoundation.org' in evt['CFP URL']:
continue
if 'sessionize.com' in evt['CFP URL']:
s = sessionize.parse_event(evt['CFP URL'])
if s:
evt.update(s)
yield evt
if __name__ == '__main__':
for e in scrape():
print(e)
|
[
"sessionize.parse_event",
"datetime.date",
"datetime.date.today",
"requests.get",
"bs4.BeautifulSoup",
"datetime.time",
"re.search"
] |
[((173, 190), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (185, 190), False, 'import requests\n'), ((202, 240), 'bs4.BeautifulSoup', 'BeautifulSoup', (['res.text', '"""html.parser"""'], {}), "(res.text, 'html.parser')\n", (215, 240), False, 'from bs4 import BeautifulSoup\n'), ((768, 780), 'datetime.date.today', 'date.today', ([], {}), '()\n', (778, 780), False, 'from datetime import date, datetime, time\n'), ((1290, 1343), 're.search', 're.search', (['"""^([^(]+) \\\\(([^)]+)\\\\)$"""', "raw_evt['name']"], {}), "('^([^(]+) \\\\(([^)]+)\\\\)$', raw_evt['name'])\n", (1299, 1343), False, 'import re\n'), ((1590, 1596), 'datetime.time', 'time', ([], {}), '()\n', (1594, 1596), False, 'from datetime import date, datetime, time\n'), ((1943, 1981), 'sessionize.parse_event', 'sessionize.parse_event', (["evt['CFP URL']"], {}), "(evt['CFP URL'])\n", (1965, 1981), False, 'import sessionize\n'), ((1035, 1063), 'datetime.date', 'date', (['new_year', 'new_month', '(1)'], {}), '(new_year, new_month, 1)\n', (1039, 1063), False, 'from datetime import date, datetime, time\n')]
|
#-----------------------------------------------------------------------------
# Title : PyRogue AMC Carrier Cryo Demo Board Application
#-----------------------------------------------------------------------------
# File : AppCore.py
# Created : 2017-04-03
#-----------------------------------------------------------------------------
# Description:
# PyRogue AMC Carrier Cryo Demo Board Application
#
# Network Interfaces:
# UDP_SRV_XVC_IDX_C => 2542, -- Xilinx XVC
# UDP_SRV_SRPV0_IDX_C => 8192, -- Legacy SRPv0 register access (still used for remote FPGA reprogramming)
# UDP_SRV_RSSI0_IDX_C => 8193, -- Legacy Non-interleaved RSSI for Register access and ASYNC messages
# UDP_SRV_RSSI1_IDX_C => 8194, -- Legacy Non-interleaved RSSI for bulk data transfer
# UDP_SRV_BP_MGS_IDX_C => 8195, -- Backplane Messaging
# UDP_SRV_TIMING_IDX_C => 8197, -- Timing ASYNC Messaging
# UDP_SRV_RSSI_ILEAVE_IDX_C => 8198); -- Interleaved RSSI
#-----------------------------------------------------------------------------
# This file is part of the rogue software platform. It is subject to
# the license terms in the LICENSE.txt file found in the top-level directory
# of this distribution and at:
# https://confluence.slac.stanford.edu/display/ppareg/LICENSE.html.
# No part of the rogue software platform, including this file, may be
# copied, modified, propagated, or distributed except according to the terms
# contained in the LICENSE.txt file.
#-----------------------------------------------------------------------------
import pyrogue as pr
# import pyrogue.interfaces.simulation
# import pyrogue.protocols
# import pyrogue.utilities.fileio
import AmcCarrierCore as amccCore
from AmcCarrierCore.AppTop._AppTop import AppTop
class TopLevel(pr.Device):
def __init__( self,
name = 'FpgaTopLevel',
description = 'Container for FPGA Top-Level',
# JESD Parameters
numRxLanes = [0,0],
numTxLanes = [0,0],
enJesdDrp = False,
# Signal Generator Parameters
numSigGen = [0,0],
sizeSigGen = [0,0],
modeSigGen = [False,False],
# General Parameters
enablePwrI2C = False,
enableBsa = False,
enableMps = False,
numWaveformBuffers = 4,
expand = True,
enableTpgMini = True,
**kwargs):
super().__init__(name=name, description=description, expand=expand, **kwargs)
self._numRxLanes = numRxLanes
self._numTxLanes = numTxLanes
self._numWaveformBuffers = numWaveformBuffers
# Add devices
self.add(amccCore.AmcCarrierCore(
offset = 0x00000000,
enablePwrI2C = enablePwrI2C,
enableBsa = enableBsa,
enableMps = enableMps,
numWaveformBuffers= numWaveformBuffers,
enableTpgMini = enableTpgMini,
))
self.add(AppTop(
offset = 0x80000000,
numRxLanes = numRxLanes,
numTxLanes = numTxLanes,
enJesdDrp = enJesdDrp,
numSigGen = numSigGen,
sizeSigGen = sizeSigGen,
modeSigGen = modeSigGen,
numWaveformBuffers = numWaveformBuffers,
expand = True
))
# Define SW trigger command
@self.command(description="Software Trigger for DAQ MUX",)
def SwDaqMuxTrig():
for i in range(2):
self.AppTop.DaqMuxV2[i].TriggerDaq.call()
def writeBlocks(self, **kwargs):
super().writeBlocks(**kwargs)
# Retire any in-flight transactions before starting
self._root.checkBlocks(recurse=True)
# Calculate the BsaWaveformEngine buffer sizes
size = [[0]*self._numWaveformBuffers,[0]*self._numWaveformBuffers]
for i in range(2):
if ((self._numRxLanes[i] > 0) or (self._numTxLanes[i] > 0)):
for j in range(self._numWaveformBuffers):
waveBuff = self.AmcCarrierCore.AmcCarrierBsa.BsaWaveformEngine[i].WaveformEngineBuffers
if ( (waveBuff.Enabled[j].get() > 0) and (waveBuff.EndAddr[j].get() > waveBuff.StartAddr[j].get()) ):
size[i][j] = waveBuff.EndAddr[j].get() - waveBuff.StartAddr[j].get()
# Calculate the
minSize = [size[0][0],size[1][0]]
for i in range(2):
if ((self._numRxLanes[i] > 0) or (self._numTxLanes[i] > 0)):
for j in range(self._numWaveformBuffers):
if ( size[i][j]<minSize[i] ):
minSize[i] = size[i][j]
# Set the DAQ MUX buffer sizes to match the BsaWaveformEngine buffer sizes
for i in range(2):
if ((self._numRxLanes[i] > 0) or (self._numTxLanes[i] > 0)):
# Convert from bytes to words
minSize[i] = minSize[i] >> 2
# Set the DAQ MUX buffer sizes
self.AppTop.DaqMuxV2[i].DataBufferSize.set(minSize[i])
self.checkBlocks(recurse=True)
|
[
"AmcCarrierCore.AppTop._AppTop.AppTop",
"AmcCarrierCore.AmcCarrierCore"
] |
[((2803, 2982), 'AmcCarrierCore.AmcCarrierCore', 'amccCore.AmcCarrierCore', ([], {'offset': '(0)', 'enablePwrI2C': 'enablePwrI2C', 'enableBsa': 'enableBsa', 'enableMps': 'enableMps', 'numWaveformBuffers': 'numWaveformBuffers', 'enableTpgMini': 'enableTpgMini'}), '(offset=0, enablePwrI2C=enablePwrI2C, enableBsa=\n enableBsa, enableMps=enableMps, numWaveformBuffers=numWaveformBuffers,\n enableTpgMini=enableTpgMini)\n', (2826, 2982), True, 'import AmcCarrierCore as amccCore\n'), ((3131, 3350), 'AmcCarrierCore.AppTop._AppTop.AppTop', 'AppTop', ([], {'offset': '(2147483648)', 'numRxLanes': 'numRxLanes', 'numTxLanes': 'numTxLanes', 'enJesdDrp': 'enJesdDrp', 'numSigGen': 'numSigGen', 'sizeSigGen': 'sizeSigGen', 'modeSigGen': 'modeSigGen', 'numWaveformBuffers': 'numWaveformBuffers', 'expand': '(True)'}), '(offset=2147483648, numRxLanes=numRxLanes, numTxLanes=numTxLanes,\n enJesdDrp=enJesdDrp, numSigGen=numSigGen, sizeSigGen=sizeSigGen,\n modeSigGen=modeSigGen, numWaveformBuffers=numWaveformBuffers, expand=True)\n', (3137, 3350), False, 'from AmcCarrierCore.AppTop._AppTop import AppTop\n')]
|
'''
import numpy as np
import pandas as pd
import nltk
nltk.download('punkt') # one time execution
import re
we_df = pd.read_hdf('mini.h5', start = 0, stop = 100) # (362891, 300)
pi(we_df.shape)
words = we_df.index
pi(words)
pi(words[50000])
pi(we_df.iloc[50000])
mes = 'This is some demo text, which has some spe$hial charecters! And numbers 10, also mixed with text, like - numb3r and number34. Just for testing. #peace_out!'
def get_text_vector(text):
re.findall(r'[a-zA-Z]+', )
'''
# python textrank.py
# textrank (using conceptnet word ventors/embeddings and cosinesimilarity)
import numpy as np
import pandas as pd
'''
import time
from sklearn.metrics import confusion_matrix
import json
import re
'''
cnnb_df = pd.read_hdf('mini.h5')
# cnnb_df = cnnb_df/59 # not req. (takes ~1.3sec)
def pi(a, b = None):
if b:
print('\n', b, a, '\n', type(a))
else:
print('\n', a, '\n', type(a))
'''
mes = 'This is some demo text, which has some spe$hial characters! And numbers 10, also mixed with text, like - numb3r and number34. Just for testing. #peace_out!'
#words = ['This', 'is', 'some', 'demo', 'text', 'which', 'has', 'some', 'spe', 'hial', 'characters', 'And', 'numbers', '10', 'also', 'mixed', 'with', 'text', 'like', 'numb', 'r', 'and', 'number', 'Just', 'for', 'testing', 'peace_out']
mes2 = 'demo text, which only has plain characters and no numbers, also not mixed with text, like - numb3r and number34. Just for testing.'
#vec = text_to_vec(list(map(lambda x: x.lower(), words)))
words = re.findall(r'[a-zA-Z]+', mes.lower())
words2 = re.findall(r'[a-zA-Z]+', mes2.lower())
#pi(words)
vec = text_to_vec(words)
vec2 = text_to_vec(words2)
sim = get_cosine_similarity(vec, vec2)
pi(sim)
pi(keyerror_list)
'''
# Read data
df = pd.read_csv('demo_articles.csv')
df.head()
df['article_text'][0]
# Form sentences
from nltk.tokenize import sent_tokenize
sentences = []
for s in df['article_text']:
sentences.append(sent_tokenize(s))
sentences = [y for x in sentences for y in x] # flatten list / 2d to 1d / combine
# Text preprocessing
# remove punctuations, numbers and special characters
clean_sentences = pd.Series(sentences).str.replace("[^a-zA-Z]", " ")
# make alphabets lowercase
clean_sentences = [s.lower() for s in clean_sentences]
import nltk
#nltk.download('stopwords')
from nltk.corpus import stopwords
stop_words = stopwords.words('english')
print(stop_words)
print(len(stop_words))
# function to remove stopwords
def remove_stopwords(sen):
sen_new = " ".join([i for i in sen if i not in stop_words])
return sen_new
# remove stopwords from the sentences
clean_sentences = [remove_stopwords(r.split()) for r in clean_sentences]
# Vector Representation of Sentences
# Form vector from text
keyerror_list = []
def word_to_vec(word):
vec = pd.Series(np.zeros(shape=(300)))
try:
wuri = '/c/en/' + word
vec = cnnb_df.loc[wuri]
except KeyError:
keyerror_list.append(wuri)
return vec
sentence_vectors = []
for i in clean_sentences:
if len(i) != 0:
#v = sum([word_embeddings.get(w, np.zeros((100,))) for w in i.split()]) / (len(i.split())+0.001)
v = sum([word_to_vec(word) for word in i.split()]) / (len(i.split())+0.001)
else:
v = pd.Series(np.zeros(shape=(300)))
sentence_vectors.append(v)
# Similarity Matrix Preparation
# similarity matrix
sim_mat = np.zeros([len(sentences), len(sentences)])
'''
from sklearn.metrics.pairwise import cosine_similarity
'''
# Vector comparision
def get_cosine_similarity(vec1, vec2):
# =a.b/|a||b| =dot_prod/vec_mag
try:
return sum(vec1 * vec2) / ( pow(sum(vec1*vec1), 0.5) * pow(sum(vec2*vec2), 0.5) )
except ZeroDivisionError:
return 0
for i in range(len(sentences)):
for j in range(len(sentences)):
if i != j:
#sim_mat[i][j] = cosine_similarity(sentence_vectors[i].reshape(1,300), sentence_vectors[j].reshape(1,300))[0,0]
sim_mat[i][j] = get_cosine_similarity(sentence_vectors[i], sentence_vectors[j])
'''
sim_mat[i][j] = get_cosine_similarity(sentence_vectors[i], sentence_vectors[j])
__main__:3: RuntimeWarning: invalid value encountered in double_scalars
'''
# Applying PageRank Algorithm
import networkx as nx
nx_graph = nx.from_numpy_array(sim_mat)
scores = nx.pagerank(nx_graph, max_iter=100) # default max_iter is 100
# Summary Extraction
ranked_sentences = sorted(((scores[i],s) for i,s in enumerate(sentences)), reverse=True)
# Extract top 10 sentences as the summary
for i in range(10):
print(ranked_sentences[i][1])
|
[
"pandas.read_hdf",
"networkx.pagerank",
"pandas.read_csv",
"numpy.zeros",
"networkx.from_numpy_array",
"nltk.tokenize.sent_tokenize",
"pandas.Series",
"nltk.corpus.stopwords.words"
] |
[((772, 794), 'pandas.read_hdf', 'pd.read_hdf', (['"""mini.h5"""'], {}), "('mini.h5')\n", (783, 794), True, 'import pandas as pd\n'), ((1847, 1879), 'pandas.read_csv', 'pd.read_csv', (['"""demo_articles.csv"""'], {}), "('demo_articles.csv')\n", (1858, 1879), True, 'import pandas as pd\n'), ((2488, 2514), 'nltk.corpus.stopwords.words', 'stopwords.words', (['"""english"""'], {}), "('english')\n", (2503, 2514), False, 'from nltk.corpus import stopwords\n'), ((4468, 4496), 'networkx.from_numpy_array', 'nx.from_numpy_array', (['sim_mat'], {}), '(sim_mat)\n', (4487, 4496), True, 'import networkx as nx\n'), ((4507, 4542), 'networkx.pagerank', 'nx.pagerank', (['nx_graph'], {'max_iter': '(100)'}), '(nx_graph, max_iter=100)\n', (4518, 4542), True, 'import networkx as nx\n'), ((2049, 2065), 'nltk.tokenize.sent_tokenize', 'sent_tokenize', (['s'], {}), '(s)\n', (2062, 2065), False, 'from nltk.tokenize import sent_tokenize\n'), ((2956, 2975), 'numpy.zeros', 'np.zeros', ([], {'shape': '(300)'}), '(shape=300)\n', (2964, 2975), True, 'import numpy as np\n'), ((2254, 2274), 'pandas.Series', 'pd.Series', (['sentences'], {}), '(sentences)\n', (2263, 2274), True, 'import pandas as pd\n'), ((3426, 3445), 'numpy.zeros', 'np.zeros', ([], {'shape': '(300)'}), '(shape=300)\n', (3434, 3445), True, 'import numpy as np\n')]
|
"""
Usage:
paaws instance detail [ --instance-id=<instance_id> ] [ --name=<app_name> --process=<process> --platform=<platform> --env=<env> ] --region=<region>
paaws instance list [ --instance-ids=<instance_ids> ] [ --name=<app_name> ] [ --process=<process> ] [ --platform=<platform> ] [ --env=<env> ] --region=<region>
paaws instance launch --name=<app_name> --process=<process> --platform=<platform> --env=<env> --instance-class=<instance_class> --region=<region> [ --zone=<zone> ] [ --public ]
paaws instance destroy --name=<app_name> [ --process=<process> --platform=<platform> --env=<env> ] --region=<region>
The most commonly used paaws instance commands are:
launch
destroy
list
detail
"""
from __future__ import print_function
from paaws.config import Config
from paaws.ec2 import Instance, get_instances_data
from paaws.helpers.parsers import to_table
def instance(args):
if args['launch']:
instance = Instance(
name=args['--name'],
process=args['--process'],
platform=Config.get_default_config(space='paaws', key='platform') if args['--platform'] is None else args['--platform'],
env=Config.get_default_config(space='paaws', key='env') if args['--env'] is None else args['--env'],
region=Config.get_default_config(space='paaws', key='region') if args['--region'] is None else args['--region'],
instance_class=args['--instance-class'],
public=args['--public'],
zone=args['--zone']
)
instance = instance.launch()
instance_id = instance.id
instance_data = get_instances_data(
region=Config.get_default_config(space='paaws', key='region') if args['--region'] is None else args['--region'],
instance_ids=[ instance_id ],
list_instances=False,
name=args['--name'],
process=args['--process'],
platform=args['--platform'],
env=args['--env']
)
print(to_table(instance_data))
elif args['destroy']:
instance = Instance(
region=Config.get_default_config(space='paaws', key='region') if args['--region'] is None else args['--region'],
name=args['--name'],
process=args['--process'],
platform=args['--platform'],
env=args['--env'],
)
print(instance.destroy())
elif args['detail']:
instance_data = get_instances_data(
region=Config.get_default_config(space='paaws', key='region') if args['--region'] is None else args['--region'],
instance_ids=[ args['--instance-id'] ] if args['--instance-id'] is not None else [],
list_instances=False,
name=args['--name'],
process=args['--process'],
platform=args['--platform'],
env=args['--env']
)
print(to_table(instance_data))
elif args['list']:
instance_data = get_instances_data(
region=Config.get_default_config(space='paaws', key='region') if args['--region'] is None else args['--region'],
instance_ids=args['--instance-ids'].split(" ") if args['--instance-ids'] is not None else [],
list_instances=True,
name=args['--name'],
process=args['--process'],
platform=args['--platform'],
env=args['--env']
)
print(to_table(instance_data))
else:
pass
|
[
"paaws.helpers.parsers.to_table",
"paaws.config.Config.get_default_config"
] |
[((2028, 2051), 'paaws.helpers.parsers.to_table', 'to_table', (['instance_data'], {}), '(instance_data)\n', (2036, 2051), False, 'from paaws.helpers.parsers import to_table\n'), ((1061, 1117), 'paaws.config.Config.get_default_config', 'Config.get_default_config', ([], {'space': '"""paaws"""', 'key': '"""platform"""'}), "(space='paaws', key='platform')\n", (1086, 1117), False, 'from paaws.config import Config\n'), ((1189, 1240), 'paaws.config.Config.get_default_config', 'Config.get_default_config', ([], {'space': '"""paaws"""', 'key': '"""env"""'}), "(space='paaws', key='env')\n", (1214, 1240), False, 'from paaws.config import Config\n'), ((1305, 1359), 'paaws.config.Config.get_default_config', 'Config.get_default_config', ([], {'space': '"""paaws"""', 'key': '"""region"""'}), "(space='paaws', key='region')\n", (1330, 1359), False, 'from paaws.config import Config\n'), ((1679, 1733), 'paaws.config.Config.get_default_config', 'Config.get_default_config', ([], {'space': '"""paaws"""', 'key': '"""region"""'}), "(space='paaws', key='region')\n", (1704, 1733), False, 'from paaws.config import Config\n'), ((2915, 2938), 'paaws.helpers.parsers.to_table', 'to_table', (['instance_data'], {}), '(instance_data)\n', (2923, 2938), False, 'from paaws.helpers.parsers import to_table\n'), ((2128, 2182), 'paaws.config.Config.get_default_config', 'Config.get_default_config', ([], {'space': '"""paaws"""', 'key': '"""region"""'}), "(space='paaws', key='region')\n", (2153, 2182), False, 'from paaws.config import Config\n'), ((3439, 3462), 'paaws.helpers.parsers.to_table', 'to_table', (['instance_data'], {}), '(instance_data)\n', (3447, 3462), False, 'from paaws.helpers.parsers import to_table\n'), ((2511, 2565), 'paaws.config.Config.get_default_config', 'Config.get_default_config', ([], {'space': '"""paaws"""', 'key': '"""region"""'}), "(space='paaws', key='region')\n", (2536, 2565), False, 'from paaws.config import Config\n'), ((3027, 3081), 'paaws.config.Config.get_default_config', 'Config.get_default_config', ([], {'space': '"""paaws"""', 'key': '"""region"""'}), "(space='paaws', key='region')\n", (3052, 3081), False, 'from paaws.config import Config\n')]
|
import time
import sys
from textEditor import TextEditor
from core import curses
# import completion
raise Exception
class TextSmartEditor(TextEditor):
'''
option-o to write out
option-q to quit
'''
def __init__(self):
super(TextSmartEditor, self).__init__()
self.marginRight = self.width // 2
def updateDim(self):
super(TextSmartEditor, self).updateDim()
self.width -= self.marginRight
def checkErrors(self):
code = '\n'.join(self.lines)
try:
exec(code)
except Exception as e:
return e # type(e).__name__, e.__traceback__.tb_lineno
return
def updateScreen(self, endLine=True):
super(TextSmartEditor, self).updateScreen(endLine=endLine)
start = self.width + self.getMargin()
for y in range(self.height):
# space available = (self.marginRight - 2)
# msg = str(completion.understandLine(self.lines[y + self.scrollY]))
msg = str(self.checkErrors())
msg = msg[:self.marginRight - 2]
text = '| ' + msg
self.window.addstr(y, start, text, curses.color_pair(0))
if not endLine:
return
#self.print(, self.height - 1, resetX=True, fullLine=True)
msg1 = '<< option-q to quit >>'
msg2 = '<< option-o to save >>'
buf = '-' * ((self.width + self.marginLeft + self.marginRight) - len(msg1) - len(msg2) - 1)
if buf == '':
raise Exception('Make your window bigger (wider)')
text = msg1 + buf + msg2
self.window.addstr(self.height, 0, text, curses.color_pair(0)) # 16
if __name__ == '__main__':
if len(sys.argv) != 2:
print('usage: TextEditor file')
sys.exit(1)
# if not os.path.isfile(sys.argv[1]):
# print('error: file not found')
# sys.exit(1)
with TextSmartEditor() as m:
m.load(sys.argv[1])
m.run()
|
[
"sys.exit",
"core.curses.color_pair"
] |
[((1795, 1806), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (1803, 1806), False, 'import sys\n'), ((1662, 1682), 'core.curses.color_pair', 'curses.color_pair', (['(0)'], {}), '(0)\n', (1679, 1682), False, 'from core import curses\n'), ((1176, 1196), 'core.curses.color_pair', 'curses.color_pair', (['(0)'], {}), '(0)\n', (1193, 1196), False, 'from core import curses\n')]
|
# Copyright 2019 NEC Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
[概要]
メールドライバーのライブラリ
"""
import traceback
from web_app.models.models import MailTemplate
from web_app.models.mail_models import MailDriver
from web_app.models.mail_models import MailActionHistory
from libs.backyardlibs.action_driver.mail.mail_driver import mailManager
from libs.commonlibs.oase_logger import OaseLogger
from libs.commonlibs.common import DriverCommon
logger = OaseLogger.get_instance()
def check_dt_action_params(params, act_info, conditions, *args, **kwargs):
"""
アクションパラメータのチェックをする。
エラーメッセージのリストを返す。
"""
message_list = []
to_list = []
to_info = kwargs['to_info'] if 'to_info' in kwargs else {}
pre_flg = kwargs['pre_flg'] if 'pre_flg' in kwargs else False
for param in params:
param = param.strip()
# パラメーター情報取得
check_info = mailManager.analysis_parameters(params)
# MAIL_NAME チェック
message_list = check_dt_action_params_mail_name(check_info, act_info,message_list)
# MAIL_TEMPLATE チェック
result = check_dt_action_params_mail_template(check_info, pre_flg, act_info, to_list, to_info, message_list)
message_list = result[0]
to_list = result[1]
to_info = result[2]
# メール送信先設定の有無チェック(MAIL_TO MAIL_CC MAIL_BCC)
message_list = check_dt_action_params_mail_to_list(check_info, conditions, to_list, message_list)
return message_list
def check_dt_action_params_mail_name(check_info, act_info, message_list):
"""
MAIL_NAME のチェックをする。
エラーメッセージのリストを返す。
"""
mail_name = check_info['MAIL_NAME']
if mail_name is None:
logger.logic_log('LOSM00008', check_info)
message_list.append({'id': 'MOSJA03113', 'param': 'MAIL_NAME'})
else:
# MAIL_NAME の値が登録済みのドライバー名であるかチェック
if 'drv_name' not in act_info:
act_info['drv_name'] = {}
if mail_name not in act_info['drv_name']:
rcnt = MailDriver.objects.filter(mail_disp_name=mail_name).count()
act_info['drv_name'][mail_name] = True if rcnt > 0 else False
if not act_info['drv_name'][mail_name]:
logger.logic_log('LOSM00009', check_info)
message_list.append({'id': 'MOSJA03117', 'param': None})
return message_list
def check_dt_action_params_mail_template(check_info, pre_flg, act_info, to_list, to_info, message_list):
"""
MAIL_TEMPLATE のチェックをする。
エラーメッセージのリスト、宛先のリスト、テンプレート名と宛先の情報を返す。
"""
template = check_info['MAIL_TEMPLATE']
if template is None:
logger.logic_log('LOSM00010', check_info)
message_list.append({'id': 'MOSJA03113', 'param': 'MAIL_TEMPLATE'})
elif template == '':
if not pre_flg:
logger.logic_log('LOSM00011', check_info)
message_list.append({'id': 'MOSJA03118', 'param': None})
else:
# MAIL_TEMPLATE の値が登録済みのメールテンプレート名であるかチェック
result = is_dt_action_params_mail_template(act_info, template, to_list, to_info)
if result:
to_list.extend(to_info[template])
else:
logger.logic_log('LOSM00011', check_info)
message_list.append({'id': 'MOSJA03118', 'param': None})
return message_list, to_list, to_info
def check_dt_action_params_mail_to_list(check_info, conditions, to_list, message_list):
"""
メール送信先設定の有無チェックをする。
エラーメッセージのリストを返す。
"""
# MAIL_TO チェック
mail_to = check_info['MAIL_TO']
if mail_to is None:
logger.logic_log('LOSM00012', check_info)
message_list.append({'id': 'MOSJA03114', 'param': 'MAIL_TO'})
elif not DriverCommon.has_right_reserved_value(conditions, mail_to):
logger.logic_log('LOSM00023', mail_to)
message_list.append({'id': 'MOSJA03137', 'param': 'MAIL_TO'})
elif mail_to != '':
to_list.append(mail_to)
# MAIL_CC チェック
mail_cc = check_info['MAIL_CC']
if mail_cc is None:
logger.logic_log('LOSM00013', check_info)
message_list.append({'id': 'MOSJA03114', 'param': 'MAIL_CC'})
elif not DriverCommon.has_right_reserved_value(conditions, mail_cc):
logger.logic_log('LOSM00023', mail_cc)
message_list.append({'id': 'MOSJA03137', 'param': 'MAIL_CC'})
# MAIL_BCC チェック
mail_bcc = check_info['MAIL_BCC']
if mail_bcc is None:
logger.logic_log('LOSM00014', check_info)
message_list.append({'id': 'MOSJA03114', 'param': 'MAIL_BCC'})
elif not DriverCommon.has_right_reserved_value(conditions, mail_bcc):
logger.logic_log('LOSM00023', mail_bcc)
message_list.append({'id': 'MOSJA03137', 'param': 'MAIL_BCC'})
# メール送信先設定の有無チェック
if len(to_list) <= 0:
logger.logic_log('LOSM00015', check_info)
message_list.append({'id': 'MOSJA03119', 'param': None})
return message_list
def is_dt_action_params_mail_template(act_info, template, to_list, to_info):
"""
登録済みのメールテンプレート名であるかチェックをする。
メールテンプレート名が登録済みであればTrueを返す。
"""
if 'tmp_name' not in act_info:
act_info['tmp_name'] = {}
if template not in act_info['tmp_name']:
rset = list(MailTemplate.objects.filter(
mail_template_name=template,
).values_list(
'destination', flat=True)
)
for r in rset:
if r:
to_list.append(r)
else:
if template not in to_info:
to_info[template] = to_list
act_info['tmp_name'][template] = True if len(rset) > 0 else False
return act_info['tmp_name'][template]
def get_history_data(action_his_id):
"""
[概要]
action_his_idのメールアクション履歴を取得する
[引数]
action_his_id: int
[戻り値]
result: dict アクション情報に表示したい情報
"""
result = {}
try:
history = MailActionHistory.objects.get(action_his_id=action_his_id)
result['MOSJA13029'] = history.mail_template_name
result['MOSJA13030'] = history.mail_address
except MailActionHistory.DoesNotExist:
logger.system_log('LOSE00000', action_his_id, traceback.format_exc())
finally:
return result
|
[
"web_app.models.mail_models.MailDriver.objects.filter",
"libs.commonlibs.oase_logger.OaseLogger.get_instance",
"libs.backyardlibs.action_driver.mail.mail_driver.mailManager.analysis_parameters",
"libs.commonlibs.common.DriverCommon.has_right_reserved_value",
"traceback.format_exc",
"web_app.models.mail_models.MailActionHistory.objects.get",
"web_app.models.models.MailTemplate.objects.filter"
] |
[((968, 993), 'libs.commonlibs.oase_logger.OaseLogger.get_instance', 'OaseLogger.get_instance', ([], {}), '()\n', (991, 993), False, 'from libs.commonlibs.oase_logger import OaseLogger\n'), ((1391, 1430), 'libs.backyardlibs.action_driver.mail.mail_driver.mailManager.analysis_parameters', 'mailManager.analysis_parameters', (['params'], {}), '(params)\n', (1422, 1430), False, 'from libs.backyardlibs.action_driver.mail.mail_driver import mailManager\n'), ((6282, 6340), 'web_app.models.mail_models.MailActionHistory.objects.get', 'MailActionHistory.objects.get', ([], {'action_his_id': 'action_his_id'}), '(action_his_id=action_his_id)\n', (6311, 6340), False, 'from web_app.models.mail_models import MailActionHistory\n'), ((4123, 4181), 'libs.commonlibs.common.DriverCommon.has_right_reserved_value', 'DriverCommon.has_right_reserved_value', (['conditions', 'mail_to'], {}), '(conditions, mail_to)\n', (4160, 4181), False, 'from libs.commonlibs.common import DriverCommon\n'), ((4570, 4628), 'libs.commonlibs.common.DriverCommon.has_right_reserved_value', 'DriverCommon.has_right_reserved_value', (['conditions', 'mail_cc'], {}), '(conditions, mail_cc)\n', (4607, 4628), False, 'from libs.commonlibs.common import DriverCommon\n'), ((4966, 5025), 'libs.commonlibs.common.DriverCommon.has_right_reserved_value', 'DriverCommon.has_right_reserved_value', (['conditions', 'mail_bcc'], {}), '(conditions, mail_bcc)\n', (5003, 5025), False, 'from libs.commonlibs.common import DriverCommon\n'), ((6549, 6571), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (6569, 6571), False, 'import traceback\n'), ((2467, 2518), 'web_app.models.mail_models.MailDriver.objects.filter', 'MailDriver.objects.filter', ([], {'mail_disp_name': 'mail_name'}), '(mail_disp_name=mail_name)\n', (2492, 2518), False, 'from web_app.models.mail_models import MailDriver\n'), ((5629, 5685), 'web_app.models.models.MailTemplate.objects.filter', 'MailTemplate.objects.filter', ([], {'mail_template_name': 'template'}), '(mail_template_name=template)\n', (5656, 5685), False, 'from web_app.models.models import MailTemplate\n')]
|
import numpy as np
import torch
import torch.nn.init as init
from torch.autograd import Variable
import torch.nn as nn
import torch.nn.functional as F
from .layer_norm import LayerNorm
def maybe_mask(attn, attn_mask):
if attn_mask is not None:
assert attn_mask.size() == attn.size(), \
'Attention mask shape {} mismatch ' \
'with Attention logit tensor shape ' \
'{}.'.format(attn_mask.size(), attn.size())
attn.data.masked_fill_(attn_mask, -float('inf'))
class DotProductAttention(nn.Module):
def __init__(self, num_units, num_mem_units, num_heads):
super(DotProductAttention, self).__init__()
self.linear_ins = [
nn.Linear(num_units, num_mem_units, bias=False) for _ in range(num_heads)]
self.linear_outs = [nn.Linear(
num_mem_units + 2 * num_units, num_units, bias=False) for _ in range(num_heads)]
for i, x in enumerate(self.linear_ins + self.linear_outs):
setattr(self, 'param_%s' % i, x)
self.num_heads = num_heads
def forward(self, query, context, attn_mask=None):
"""Apply attention.
query: batch x dim
context: batch x length x dim
"""
input_ = query
for i in range(self.num_heads):
query_proj = self.linear_ins[i](
input_).unsqueeze(2) # batch x dim x 1
attn = torch.bmm(context, query_proj).squeeze(2) # batch x length
maybe_mask(attn, attn_mask)
attn = F.softmax(attn, dim=1)
wc = torch.bmm(attn.unsqueeze(1), context).squeeze(1) # batch x dim
wc = torch.cat([wc, input_, query], 1) # batch x 2dim
wc = self.linear_outs[i](wc)
wc = torch.tanh(wc)
input_ = wc
return wc, attn
class ScaledDotProductAttention(nn.Module):
''' Scaled Dot-Product Attention '''
def __init__(self, dim, attn_dropout=0.1):
super(ScaledDotProductAttention, self).__init__()
self.temper = np.power(dim, 0.5)
self.dropout = nn.Dropout(attn_dropout)
self.softmax = nn.Softmax(dim=-1)
def forward(self, q, k, v, attn_mask=None):
attn = torch.bmm(q, k.transpose(1, 2)) / self.temper
maybe_mask(attn, attn_mask)
attn = self.softmax(attn)
attn = self.dropout(attn)
output = torch.bmm(attn, v)
return output, attn
class RepeatLinear(nn.Module):
def __init__(self, repeat, feature_dim, dim):
super(RepeatLinear, self).__init__()
self.repeat = repeat
self.layer = nn.Parameter(torch.FloatTensor(repeat, feature_dim, dim))
self.output_dim = dim
init.xavier_normal(self.layer)
def forward(self, x):
_, dim1, dim2 = x.size()
if self.repeat > 1:
out = x.repeat(self.repeat, 1, 1).view(self.repeat, -1, dim2)
else:
out = x.view(1, -1, dim2)
return torch.bmm(out, self.layer).view(-1, dim1, self.output_dim)
class MultiHeadAttention(nn.Module):
def __init__(
self, num_heads, num_units, query_dim, key_dim, value_dim,
dropout_p=0.1):
super(MultiHeadAttention, self).__init__()
self.num_heads = num_heads
self.num_units = num_units
assert query_dim == key_dim
self.query_dim = query_dim
self.key_dim = key_dim
self.value_dim = value_dim
self.query_layer = RepeatLinear(num_heads, num_units, query_dim)
self.key_layer = RepeatLinear(num_heads, num_units, key_dim)
self.value_layer = RepeatLinear(num_heads, num_units, value_dim)
self.attention = ScaledDotProductAttention(num_units)
self.proj = nn.Linear(num_heads * value_dim, num_units)
self.dropout = nn.Dropout(dropout_p)
self.layer_norm = LayerNorm(num_units)
def forward(self, query, keys, values, attn_mask=None):
# query shape: batch x num queries x num units
# keys shape: batch x num kv x num units
# values shape: batch x num kv x num units
# batch * heads x num queries x query_dim
Q = self.query_layer(query)
# batch * heads x num kv x key_dim (= query_dim)
K = self.key_layer(keys)
# batch * heads x num kv x value_dim
V = self.value_layer(values)
# outputs: batch * heads x num queries x value_dim
# attns: batch * heads x num queries x num kv
outputs, attns = self.attention(
Q, K, V, attn_mask=attn_mask.repeat(self.num_heads, 1, 1) if attn_mask is not None else None)
# TODO: transpose or unfold?
bsz = query.size(0)
# batch x num queries x num_heads * value_dim
outputs = torch.cat(torch.split(outputs, bsz, dim=0), dim=-1)
# batch x num queries x num_units
outputs = self.proj(outputs)
outputs = self.dropout(outputs)
return self.layer_norm(outputs + query), attns
class SimpleMultiHeadAttention(MultiHeadAttention):
def __init__(self, num_heads, num_units, dropout_p=0.1):
assert num_units % num_heads == 0
dim = num_units / num_heads
super(SimpleMultiHeadAttention, self).__init__(
num_heads, num_units, dim, dim, dim, dropout_p)
def forward(self, query, values, attn_mask=None):
if query.dim() == 2:
query = query.unsqueeze(1)
outputs, attns = super(SimpleMultiHeadAttention, self).forward(
query, values, values, attn_mask)
if query.dim() == 2:
outputs = outputs.squeeze(1)
return outputs, attns
class SimpleSDPAttention(ScaledDotProductAttention):
def __init__(self, query_dim, values_dim, dropout_p=0.0):
super(SimpleSDPAttention, self).__init__(values_dim, dropout_p)
self.query_proj = nn.Linear(query_dim, values_dim)
def forward(self, query, values, attn_mask=None):
# query shape: batch x query dim
# values shape: batch x num values x values dim
if attn_mask is not None:
attn_mask = attn_mask.unsqueeze(1)
output, attn = super(SimpleSDPAttention, self).forward(
self.query_proj(query).unsqueeze(1), values, values, attn_mask)
output = output.squeeze(1)
return output, attn
|
[
"torch.nn.Dropout",
"torch.nn.init.xavier_normal",
"torch.bmm",
"numpy.power",
"torch.split",
"torch.FloatTensor",
"torch.cat",
"torch.nn.functional.softmax",
"torch.nn.Softmax",
"torch.nn.Linear",
"torch.tanh"
] |
[((2039, 2057), 'numpy.power', 'np.power', (['dim', '(0.5)'], {}), '(dim, 0.5)\n', (2047, 2057), True, 'import numpy as np\n'), ((2081, 2105), 'torch.nn.Dropout', 'nn.Dropout', (['attn_dropout'], {}), '(attn_dropout)\n', (2091, 2105), True, 'import torch.nn as nn\n'), ((2129, 2147), 'torch.nn.Softmax', 'nn.Softmax', ([], {'dim': '(-1)'}), '(dim=-1)\n', (2139, 2147), True, 'import torch.nn as nn\n'), ((2381, 2399), 'torch.bmm', 'torch.bmm', (['attn', 'v'], {}), '(attn, v)\n', (2390, 2399), False, 'import torch\n'), ((2704, 2734), 'torch.nn.init.xavier_normal', 'init.xavier_normal', (['self.layer'], {}), '(self.layer)\n', (2722, 2734), True, 'import torch.nn.init as init\n'), ((3737, 3780), 'torch.nn.Linear', 'nn.Linear', (['(num_heads * value_dim)', 'num_units'], {}), '(num_heads * value_dim, num_units)\n', (3746, 3780), True, 'import torch.nn as nn\n'), ((3804, 3825), 'torch.nn.Dropout', 'nn.Dropout', (['dropout_p'], {}), '(dropout_p)\n', (3814, 3825), True, 'import torch.nn as nn\n'), ((5842, 5874), 'torch.nn.Linear', 'nn.Linear', (['query_dim', 'values_dim'], {}), '(query_dim, values_dim)\n', (5851, 5874), True, 'import torch.nn as nn\n'), ((711, 758), 'torch.nn.Linear', 'nn.Linear', (['num_units', 'num_mem_units'], {'bias': '(False)'}), '(num_units, num_mem_units, bias=False)\n', (720, 758), True, 'import torch.nn as nn\n'), ((814, 877), 'torch.nn.Linear', 'nn.Linear', (['(num_mem_units + 2 * num_units)', 'num_units'], {'bias': '(False)'}), '(num_mem_units + 2 * num_units, num_units, bias=False)\n', (823, 877), True, 'import torch.nn as nn\n'), ((1533, 1555), 'torch.nn.functional.softmax', 'F.softmax', (['attn'], {'dim': '(1)'}), '(attn, dim=1)\n', (1542, 1555), True, 'import torch.nn.functional as F\n'), ((1654, 1687), 'torch.cat', 'torch.cat', (['[wc, input_, query]', '(1)'], {}), '([wc, input_, query], 1)\n', (1663, 1687), False, 'import torch\n'), ((1762, 1776), 'torch.tanh', 'torch.tanh', (['wc'], {}), '(wc)\n', (1772, 1776), False, 'import torch\n'), ((2621, 2664), 'torch.FloatTensor', 'torch.FloatTensor', (['repeat', 'feature_dim', 'dim'], {}), '(repeat, feature_dim, dim)\n', (2638, 2664), False, 'import torch\n'), ((4757, 4789), 'torch.split', 'torch.split', (['outputs', 'bsz'], {'dim': '(0)'}), '(outputs, bsz, dim=0)\n', (4768, 4789), False, 'import torch\n'), ((2964, 2990), 'torch.bmm', 'torch.bmm', (['out', 'self.layer'], {}), '(out, self.layer)\n', (2973, 2990), False, 'import torch\n'), ((1412, 1442), 'torch.bmm', 'torch.bmm', (['context', 'query_proj'], {}), '(context, query_proj)\n', (1421, 1442), False, 'import torch\n')]
|
"""Unit testing of the Edit Post view"""
from django.test import TestCase, tag
from django.urls import reverse
from BookClub.models import User, ForumPost, Club
from BookClub.tests.helpers import reverse_with_next
@tag('views', 'forum', 'edit_post')
class EditPostViewTestCase(TestCase):
"""Tests of the Edit Posts view."""
fixtures = [
'BookClub/tests/fixtures/default_users.json',
'BookClub/tests/fixtures/default_clubs.json',
'BookClub/tests/fixtures/default_memberships.json',
'BookClub/tests/fixtures/default_forum.json',
'BookClub/tests/fixtures/default_posts.json',
]
def setUp(self):
self.user = User.objects.get(username="johndoe")
self.non_user = User.objects.get(pk=7)
self.club = Club.objects.get(pk=1)
self.my_post = ForumPost.objects.get(pk=1)
self.other_post = ForumPost.objects.get(pk=2)
self.club_post = ForumPost.objects.get(pk=4)
self.my_url = reverse('edit_forum_post', kwargs={'post_id': self.my_post.id})
self.other_url = reverse('edit_forum_post', kwargs={'post_id': self.other_post.id})
self.club_url = reverse('edit_forum_post', kwargs={'club_url_name': self.club.club_url_name, 'post_id': self.club_post.id})
self.edit = {
"content": "HELLO, HOW DO YOU DO!",
}
def test_edit_post_url(self):
self.assertEqual(self.my_url, '/forum/'+str(self.my_post.pk)+'/edit/')
def test_edit_other_post_url(self):
self.assertEqual(self.other_url, '/forum/'+str(self.other_post.pk)+'/edit/')
def test_edit_club_post_url(self):
self.assertEqual(self.club_url, '/club/'+str(self.club.club_url_name)+'/forum/'+str(self.club_post.id)+'/edit/')
def test_redirect_when_not_logged_in(self):
redirect_url = reverse_with_next('login', self.my_url)
response = self.client.post(self.my_url, self.edit, follow=True)
self.assertRedirects(response, redirect_url,
status_code=302, target_status_code=200, fetch_redirect_response=True
)
self.assertTemplateUsed(response, 'authentication/login.html')
def test_redirect_club_when_not_logged_in(self):
redirect_url = reverse_with_next('login', self.club_url)
response = self.client.post(self.club_url, self.edit, follow=True)
self.assertRedirects(response, redirect_url,
status_code=302, target_status_code=200, fetch_redirect_response=True
)
self.assertTemplateUsed(response, 'authentication/login.html')
def test_redirect_when_not_creator(self):
self.client.login(username=self.user.username, password="<PASSWORD>")
redirect_url = reverse('global_forum')
response = self.client.post(self.other_url, self.edit, follow=True)
self.assertRedirects(response, redirect_url,
status_code=302, target_status_code=200, fetch_redirect_response=True
)
def test_redirect_non_existing_id(self):
self.client.login(username=self.user.username, password="<PASSWORD>")
url = reverse('edit_forum_post', kwargs={'post_id': 555})
redirect_url = reverse('global_forum')
response = self.client.post(url, self.edit, follow=True)
self.assertRedirects(response, redirect_url,
status_code=302, target_status_code=200, fetch_redirect_response=True
)
def test_redirect_club_non_existing_id(self):
self.client.login(username=self.user.username, password="<PASSWORD>")
url = reverse('edit_forum_post', kwargs={'club_url_name': self.club.club_url_name, 'post_id': 555})
redirect_url = reverse('club_forum', kwargs={'club_url_name': self.club.club_url_name})
response = self.client.post(url, self.edit, follow=True)
self.assertRedirects(response, redirect_url,
status_code=302, target_status_code=200, fetch_redirect_response=True
)
def test_edit_post_when_not_logged_in(self):
redirect_url = reverse_with_next('login', self.my_url)
response = self.client.post(self.my_url, self.edit, follow=True)
post = ForumPost.objects.get(pk=1)
self.assertEqual(post.content, "Lorem Ipsum is simply dummy text of the printing and typesetting industry. "
"Lorem Ipsum has been the industrial standard dummy text ever since the 1500s, "
"when an unknown printer took a galley of type and scrambled it to make a type "
"specimen book.")
def test_edit_post_when_not_creator(self):
self.client.login(username=self.user.username, password="<PASSWORD>")
response = self.client.post(self.other_url, self.edit, follow=True)
post = ForumPost.objects.get(pk=2)
self.assertEqual(post.content, "Contrary to popular belief, Lorem Ipsum is not simply random text. It has "
"roots in a piece of classical Latin literature from 45 BC, making it over "
"2000 years old.")
def test_edit_club_post_when_non_member(self):
self.client.login(username=self.non_user.username, password="<PASSWORD>")
response = self.client.post(self.club_url, self.edit, follow=True)
post = ForumPost.objects.get(pk=4)
self.assertEqual(post.content, "... qui dolorem ipsum quia dolor sit amet, consectetur, adipisci velit...")
def test_edit_post_when_creator(self):
self.client.login(username=self.user.username, password="<PASSWORD>")
response = self.client.post(self.my_url, self.edit, follow=True)
post = ForumPost.objects.get(pk=1)
self.assertEqual(post.content, "HELLO, HOW DO YOU DO!")
def test_post_details_show(self):
self.client.login(username=self.user.username, password="<PASSWORD>")
response = self.client.get(self.my_url)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'forum/edit_forum_post.html')
self.assertContains(response, "Lorem Ipsum")
self.assertContains(response, "Lorem Ipsum is simply dummy text of the printing and typesetting industry. "
"Lorem Ipsum has been the industrial standard dummy text ever since the "
"1500s, when an unknown printer took a galley of type and scrambled it to make "
"a type specimen book.")
self.assertContains(response, "Posted by: johndoe")
def test_club_post_details_show(self):
self.client.login(username=self.user.username, password="<PASSWORD>")
response = self.client.get(self.club_url)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'forum/edit_forum_post.html')
self.assertContains(response, "Latin Quota")
self.assertContains(response, "... qui dolorem ipsum quia dolor sit amet, consectetur, adipisci velit...")
self.assertContains(response, "Posted by: johndoe")
|
[
"BookClub.models.User.objects.get",
"BookClub.models.ForumPost.objects.get",
"BookClub.models.Club.objects.get",
"BookClub.tests.helpers.reverse_with_next",
"django.urls.reverse",
"django.test.tag"
] |
[((218, 252), 'django.test.tag', 'tag', (['"""views"""', '"""forum"""', '"""edit_post"""'], {}), "('views', 'forum', 'edit_post')\n", (221, 252), False, 'from django.test import TestCase, tag\n'), ((673, 709), 'BookClub.models.User.objects.get', 'User.objects.get', ([], {'username': '"""johndoe"""'}), "(username='johndoe')\n", (689, 709), False, 'from BookClub.models import User, ForumPost, Club\n'), ((734, 756), 'BookClub.models.User.objects.get', 'User.objects.get', ([], {'pk': '(7)'}), '(pk=7)\n', (750, 756), False, 'from BookClub.models import User, ForumPost, Club\n'), ((777, 799), 'BookClub.models.Club.objects.get', 'Club.objects.get', ([], {'pk': '(1)'}), '(pk=1)\n', (793, 799), False, 'from BookClub.models import User, ForumPost, Club\n'), ((823, 850), 'BookClub.models.ForumPost.objects.get', 'ForumPost.objects.get', ([], {'pk': '(1)'}), '(pk=1)\n', (844, 850), False, 'from BookClub.models import User, ForumPost, Club\n'), ((877, 904), 'BookClub.models.ForumPost.objects.get', 'ForumPost.objects.get', ([], {'pk': '(2)'}), '(pk=2)\n', (898, 904), False, 'from BookClub.models import User, ForumPost, Club\n'), ((930, 957), 'BookClub.models.ForumPost.objects.get', 'ForumPost.objects.get', ([], {'pk': '(4)'}), '(pk=4)\n', (951, 957), False, 'from BookClub.models import User, ForumPost, Club\n'), ((980, 1043), 'django.urls.reverse', 'reverse', (['"""edit_forum_post"""'], {'kwargs': "{'post_id': self.my_post.id}"}), "('edit_forum_post', kwargs={'post_id': self.my_post.id})\n", (987, 1043), False, 'from django.urls import reverse\n'), ((1069, 1135), 'django.urls.reverse', 'reverse', (['"""edit_forum_post"""'], {'kwargs': "{'post_id': self.other_post.id}"}), "('edit_forum_post', kwargs={'post_id': self.other_post.id})\n", (1076, 1135), False, 'from django.urls import reverse\n'), ((1160, 1271), 'django.urls.reverse', 'reverse', (['"""edit_forum_post"""'], {'kwargs': "{'club_url_name': self.club.club_url_name, 'post_id': self.club_post.id}"}), "('edit_forum_post', kwargs={'club_url_name': self.club.club_url_name,\n 'post_id': self.club_post.id})\n", (1167, 1271), False, 'from django.urls import reverse\n'), ((1821, 1860), 'BookClub.tests.helpers.reverse_with_next', 'reverse_with_next', (['"""login"""', 'self.my_url'], {}), "('login', self.my_url)\n", (1838, 1860), False, 'from BookClub.tests.helpers import reverse_with_next\n'), ((2265, 2306), 'BookClub.tests.helpers.reverse_with_next', 'reverse_with_next', (['"""login"""', 'self.club_url'], {}), "('login', self.club_url)\n", (2282, 2306), False, 'from BookClub.tests.helpers import reverse_with_next\n'), ((2784, 2807), 'django.urls.reverse', 'reverse', (['"""global_forum"""'], {}), "('global_forum')\n", (2791, 2807), False, 'from django.urls import reverse\n'), ((3205, 3256), 'django.urls.reverse', 'reverse', (['"""edit_forum_post"""'], {'kwargs': "{'post_id': 555}"}), "('edit_forum_post', kwargs={'post_id': 555})\n", (3212, 3256), False, 'from django.urls import reverse\n'), ((3280, 3303), 'django.urls.reverse', 'reverse', (['"""global_forum"""'], {}), "('global_forum')\n", (3287, 3303), False, 'from django.urls import reverse\n'), ((3695, 3792), 'django.urls.reverse', 'reverse', (['"""edit_forum_post"""'], {'kwargs': "{'club_url_name': self.club.club_url_name, 'post_id': 555}"}), "('edit_forum_post', kwargs={'club_url_name': self.club.club_url_name,\n 'post_id': 555})\n", (3702, 3792), False, 'from django.urls import reverse\n'), ((3812, 3884), 'django.urls.reverse', 'reverse', (['"""club_forum"""'], {'kwargs': "{'club_url_name': self.club.club_url_name}"}), "('club_forum', kwargs={'club_url_name': self.club.club_url_name})\n", (3819, 3884), False, 'from django.urls import reverse\n'), ((4206, 4245), 'BookClub.tests.helpers.reverse_with_next', 'reverse_with_next', (['"""login"""', 'self.my_url'], {}), "('login', self.my_url)\n", (4223, 4245), False, 'from BookClub.tests.helpers import reverse_with_next\n'), ((4334, 4361), 'BookClub.models.ForumPost.objects.get', 'ForumPost.objects.get', ([], {'pk': '(1)'}), '(pk=1)\n', (4355, 4361), False, 'from BookClub.models import User, ForumPost, Club\n'), ((4993, 5020), 'BookClub.models.ForumPost.objects.get', 'ForumPost.objects.get', ([], {'pk': '(2)'}), '(pk=2)\n', (5014, 5020), False, 'from BookClub.models import User, ForumPost, Club\n'), ((5535, 5562), 'BookClub.models.ForumPost.objects.get', 'ForumPost.objects.get', ([], {'pk': '(4)'}), '(pk=4)\n', (5556, 5562), False, 'from BookClub.models import User, ForumPost, Club\n'), ((5889, 5916), 'BookClub.models.ForumPost.objects.get', 'ForumPost.objects.get', ([], {'pk': '(1)'}), '(pk=1)\n', (5910, 5916), False, 'from BookClub.models import User, ForumPost, Club\n')]
|
#! /usr/bin/env python
# Copyright (c) 2008, <NAME>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of the copyright holders nor the names of
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# The ATmega128 simulations return an error code string in "external
# memory" at address 0x2000 upon failure. If runtest.sh is run with
# option -s, it will abort the simulation, and leave the file
# core_avr_dump.core where this script can read the error code string
# from. (The simulations on smaller AVRs don't generate this string
# in order to not bloat their code beyond the available ROM size by
# including sprintf().)
# If an argument is given to the script, it is used as the name of the
# simulavr core dump file to read. Otherwise, the simulavr default
# name "core_avr_dump.core" is used.
# $Id: readcore.py 1647 2008-03-19 22:45:15Z joerg_wunsch $
# Enum implementation, from Python recipe:
# http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/413486
# Author: <NAME>
def Enum(*names):
##assert names, "Empty enums are not supported" # <- Don't like empty enums? Uncomment!
class EnumClass(object):
__slots__ = names
def __iter__(self): return iter(constants)
def __len__(self): return len(constants)
def __getitem__(self, i): return constants[i]
def __repr__(self): return 'Enum' + str(names)
def __str__(self): return 'enum ' + str(constants)
class EnumValue(object):
__slots__ = ('__value')
def __init__(self, value): self.__value = value
Value = property(lambda self: self.__value)
EnumType = property(lambda self: EnumType)
def __hash__(self): return hash(self.__value)
def __cmp__(self, other):
# C fans might want to remove the following assertion
# to make all enums comparable by ordinal value {;))
assert self.EnumType is other.EnumType, "Only values from the same enum are comparable"
return cmp(self.__value, other.__value)
def __invert__(self): return constants[maximum - self.__value]
def __nonzero__(self): return bool(self.__value)
def __repr__(self): return str(names[self.__value])
maximum = len(names) - 1
constants = [None] * len(names)
for i, each in enumerate(names):
val = EnumValue(i)
setattr(EnumClass, each, val)
constants[i] = val
constants = tuple(constants)
EnumType = EnumClass()
return EnumType
# end Enum recipe
import re, sys
# Start of CPU register dump
regmagic = re.compile('^General Purpose Register Dump')
# Location of exit code is r24/r25
r24magic = re.compile('r24=(..) +r25=(..)')
# Start of external SRAM dump
srammagic = re.compile('^External SRAM Memory Dump:')
# Start of error code string at address 0x2000
startaddr = re.compile('^2000 :')
# Pattern to detect repeated lines
repline = re.compile('-- last line repeats --')
# Turn one line from the memory dump into an ASCII string.
# Stops processing upon encountering a NUL character.
# Returns a tuple consisting of the string and a condition
# code that is 1 when processing has been terminated by
# detecting NUL, 0 when reaching end of line without seeing
# NUL.
def asciiize(s):
rv = ''
a = s.split()
for iascii in a[2:]:
i = int(iascii, 16)
if i == 0:
return (rv, 1)
if i == 10 or (i >= 32 and i < 127):
rv += chr(i)
else:
# Non-printable character, not supposed to happen
rv += '?'
return (rv, 0)
# Calculate exitcode from r24/r25 hex values
def exitcode(r24, r25):
i24 = int(r24, 16)
i25 = int(r25, 16)
return i25 * 256 + i24
# Start of main
try:
corename = sys.argv[1]
except IndexError:
corename = 'core_avr_dump.core'
core = open(corename)
# Our result string
s = ''
# Exit code
ec = -1
# Parser state.
pstateClass = Enum('Done', 'StartAddr', 'SRAMfound', 'GotExitCode',
'FoundCPUregs', 'Starting')
pstate = pstateClass.Starting
oline = ''
while pstate > pstateClass.Done:
l = core.readline()
if l == '':
# EOF encountered
break
if pstate == pstateClass.Starting:
if regmagic.match(l):
pstate = pstateClass.FoundCPUregs
continue
elif pstate == pstateClass.FoundCPUregs:
matchobj = r24magic.match(l)
if matchobj != None:
ec = exitcode(matchobj.group(1), matchobj.group(2))
pstate = pstateClass.GotExitCode
continue
elif pstate == pstateClass.GotExitCode:
if srammagic.match(l):
pstate = pstateClass.SRAMfound
continue
elif pstate == pstateClass.SRAMfound or pstate == pstateClass.StartAddr:
if repline.match(l):
l = oline
if pstate == pstateClass.SRAMfound:
if startaddr.match(l):
pstate = pstateClass.StartAddr
else:
continue
(part, condcode) = asciiize(l)
s += part
if condcode == 1:
pstate = pstateClass.Done
oline = l
core.close()
print("Exit code: %d" % ec)
if s != '':
print("Message string:")
print(s)
else:
print("No message string found.")
|
[
"re.compile"
] |
[((3912, 3956), 're.compile', 're.compile', (['"""^General Purpose Register Dump"""'], {}), "('^General Purpose Register Dump')\n", (3922, 3956), False, 'import re, sys\n'), ((4004, 4036), 're.compile', 're.compile', (['"""r24=(..) +r25=(..)"""'], {}), "('r24=(..) +r25=(..)')\n", (4014, 4036), False, 'import re, sys\n'), ((4080, 4121), 're.compile', 're.compile', (['"""^External SRAM Memory Dump:"""'], {}), "('^External SRAM Memory Dump:')\n", (4090, 4121), False, 'import re, sys\n'), ((4182, 4203), 're.compile', 're.compile', (['"""^2000 :"""'], {}), "('^2000 :')\n", (4192, 4203), False, 'import re, sys\n'), ((4250, 4287), 're.compile', 're.compile', (['"""-- last line repeats --"""'], {}), "('-- last line repeats --')\n", (4260, 4287), False, 'import re, sys\n')]
|
# 根据图片和音乐合成带节奏的相册视频
from typing import Tuple, Union, Any
import moviepy.editor
from moviepy.video.fx.speedx import speedx
import wave
import numpy as np
import re
from progressbar import *
from common import python_box
from common import gui
import psutil
import time
import math
import moviepy.audio.fx.all
class FfmpegPlugin:
def __init__(self):
self.t = time.time()
self.ffmpeg = "ffmpeg"
def __del__(self):
print("use time:", time.time() - self.t)
def video2audio(self, directory):
f_lst = python_box.dir_list(directory, "mp4$")
for file in f_lst:
wav = re.sub("mp4", "", file) + "wav"
print(file, wav)
cmd = "%s -y -i '%s' '%s'" % (self.ffmpeg, file, wav)
print(cmd)
os.system(cmd)
def audio_split(self, directory):
f_lst = python_box.dir_list(directory, "mp3$")
for file in f_lst:
seconds = 0
while 1:
m, s = divmod(seconds, 60)
h, m = divmod(m, 60)
start = ("%01d:%02d:%02d" % (h, m, s))
end = "0:0:07"
seconds += 7
print(file)
mp4 = file
mp4_split = re.sub(".mp3", "", file) + "_%d.pcm" % seconds
cmd = "{ffmpeg} -y -ss {start} -t {end} -i {mp4} -acodec pcm_s16le -f s16le -ac 1 -ar 16000 {mp4_split}".format(
ffmpeg=self.ffmpeg, start=start, end=end, mp4_split=mp4_split, mp4=mp4)
print(cmd)
os.system(cmd)
size = os.path.getsize(mp4_split)
if size == 0:
break
def video_split(self, file):
mp4 = file
mp4_split = re.sub(".mp4", "", file) + "_split.mp4"
start = "0:0:9"
end = "0:4:49"
print(file)
cmd = '''{ffmpeg} -y -ss {start} -t {end} -i "{mp4}" -vcodec copy -acodec copy "{mp4_split}"'''.format(
ffmpeg=self.ffmpeg, start=start, end=end, mp4_split=mp4_split, mp4=mp4)
print(cmd)
os.system(cmd)
def video_concat(self, dir):
os.chdir(dir)
f_lst = []
for file in python_box.dir_list(dir, "mp4"):
file = "file '{}'".format(file)
f_lst.append(file)
videoInfo = dir + "/videoInfo.txt"
python_box.write_file(f_lst, videoInfo)
cmd = '''{} -f concat -i {} -c copy {}output.mp4'''.format(self.ffmpeg, videoInfo, dir + "/")
print(cmd)
os.chdir(dir)
os.system(cmd)
os.remove(videoInfo)
def imageSequence(directory, target):
# 只支持相同尺寸图片合成视频
clip = moviepy.editor.ImageSequenceClip(directory, fps=10)
clip.write_videofile(target)
def movie_concat(directory): # 合并后衔接处卡顿重复
outPath = directory + "/concatVideo.mp4"
f_lst = python_box.dir_list(directory, "mp4")
videoClips = []
for file in f_lst:
videoClip = moviepy.editor.VideoFileClip(file)
videoClips.append(videoClip)
videoClip = moviepy.editor.concatenate_videoclips(videoClips)
videoClip.write_videofile(outPath)
def clip_speed_change(clip, speed, ta, tb):
"""
调节速度
keep change's time
:param clip:
:param speed:
:param ta: 开始时间
:param tb: 结束时间
:return:
"""
tb = ta + (tb - ta) * speed
if tb <= clip.duration:
speed_lambda = lambda c: speedx(c, speed)
try:
clip = clip.subfx(speed_lambda, ta, tb)
# 此处报错关闭所有python即可解决,升级库
except Exception as e:
print(e)
return clip
def num_speed(numpy_arr, n):
new_numpy_arr = np.array([])
for speed in numpy_arr:
if speed > 1:
new_speed = 1 + (speed - 1) * n
else:
if n <= 1:
new_speed = (1 - (1 - speed) * n)
if n > 1:
new_speed = speed / n
new_numpy_arr = np.append(new_numpy_arr, new_speed)
return new_numpy_arr
def get_current_index(np_array: np.ndarray, value):
"""
获取顺序排序数组中t附近的索引
:param np_array:
:param value:
:return:
"""
index = np.where(np_array <= value)
if len(index) > 0:
if len(index[0]) > 0:
return index[0][len(index[0]) - 1]
return len(np_array) - 1
def compute_time_line(np_time: np.ndarray, np_speed: np.ndarray, clips: list, audio_duration) -> list:
"""
算法循环找出clip适合的时长,使总时长接近audio_duration
:param np_time:
:param np_speed:
:param clips:
:param audio_duration:
:return:
"""
default_var = audio_duration / len(clips)
change_var = 0.01
durations = []
while True:
durations.clear()
for _ in clips:
like_index = get_current_index(np_time, sum(durations))
clip_duration = 1.0 / np_speed[like_index]
clip_duration = clip_duration * default_var
durations.append(clip_duration)
total = sum(durations)
if total > audio_duration:
default_var *= 1 - change_var
if total <= audio_duration:
default_var *= 1 + change_var
got = math.fabs(total - audio_duration) < 1
if got:
break
else:
change_var *= 0.8
if len(sys.argv) >= 3 and sys.argv[2] == "plot":
from common import tools
data = []
for i in durations:
data.append(1 / i)
tools.plot_list(data)
return durations
class MovieLib(FfmpegPlugin):
def __init__(self):
super().__init__()
self.image_list = []
self.audio_lst = []
self.imageVideo = None
self.audio_file = None
self.speed_video_file = None
self.temp_videos = []
# 速度变化敏感度
self.sens = 0.6
self.change_speed_time = 0.8
self.audio_leader = True
def set_out(self, directory):
dir_ = os.path.split(directory)[0]
self.imageVideo = os.path.join(dir_, "pic2video.mp4")
self.audio_file = os.path.join(dir_, "pic2video.wav")
self.speed_video_file = os.path.join(dir_, f"{os.path.basename(dir_)}.mp4")
def add_bgm(self, audio_dir):
self.audio_lst.append(audio_dir)
def add_pic(self, pic_dir):
self.image_list.extend(sorted(python_box.dir_list(pic_dir, "jpg", walk=True)))
if not self.speed_video_file:
self.set_out(pic_dir)
def audio2data(self, audio):
f = wave.open(audio, 'rb')
params = f.getparams()
nchannels, sampwidth, self.framerate, nframes = params[:4]
strData = f.readframes(nframes)
f.close()
waveData = np.fromstring(strData, dtype=np.short)
waveData.shape = -1, 2
waveData = waveData.T
waveData = waveData[0]
audioTime = np.arange(0, nframes) * (1.0 / self.framerate)
if len(sys.argv) >= 3 and sys.argv[2] == "plot":
from common import tools
tools.plot_list(waveData, audioTime)
np.abs(waveData, out=waveData)
return audioTime, waveData
def frame2speed(self, audioTime: list, wave_data: list, f_duration=None) -> Tuple[
np.ndarray, Union[Union[float, int], Any]]:
"""
根据帧获取音频速度
:param f_duration:
:param audioTime:
:param wave_data:
:return:
"""
np_time = np.array([])
np_speed = np.array([])
# 获取关键帧
f = 0
if f_duration is None:
f_duration = int(self.framerate * 0.5)
while f <= len(audioTime) - 1:
t = audioTime[f]
speed = np.mean(wave_data[f:f + f_duration])
f += f_duration
np_time = np.append(np_time, t)
np_speed = np.append(np_speed, speed)
# 调整速度敏感度
np_speed = np_speed / np.mean(np_speed)
np_speed = np.where(np_speed >= 8, 8, np_speed)
np_speed = np.where(np_speed <= 0.2, 0.2, np_speed)
np_speed = np.where(np_speed >= 1, np_speed * self.sens, np_speed)
np_speed = np.where(np_speed < 1, np_speed / self.sens, np_speed)
np_speed = np_speed / np.mean(np_speed)
return np_time, np_speed
def video_speed_with_audio(self):
# 视频速度匹配音频节奏 适用视频为重复性图片或者平调速度
sys.setrecursionlimit(10000000)
video = moviepy.editor.VideoFileClip(self.imageVideo)
video.audio.write_audiofile(self.audio_file)
audioTime, wave_data = self.audio2data(self.audio_file)
np_time, np_speed = self.frame2speed(audioTime, wave_data,
f_duration=int(self.framerate * self.change_speed_time))
# 处理视频
bar_setting = ['change speed: ', Percentage(), Bar("#"), Timer(), ' ', ETA()]
speed_clip = moviepy.editor.VideoFileClip(self.imageVideo) # initial clip
audio_clip = speed_clip.audio
bar = ProgressBar(widgets=bar_setting, maxval=len(np_speed)).start()
bar_update_tie = 1
for i in range(len(np_speed)):
bar.update(bar_update_tie)
bar_update_tie += 1
speed = np_speed[i]
t = np_time[i]
speed_clip = clip_speed_change(speed_clip, speed, t, t + self.change_speed_time) # 分段变速
np_time = np.append(np_time, t)
speed_clip.audio = audio_clip
print(self.speed_video_file)
video_without_audio = python_box.FileSys().get_outfile(self.speed_video_file, "no_audio")
speed_clip.write_videofile(video_without_audio, audio=False)
speed_clip = moviepy.editor.VideoFileClip(video_without_audio) # solve cant write audio
duration = speed_clip.duration
audio = moviepy.editor.AudioFileClip(self.audio_file)
audio.set_duration(duration)
speed_clip.audio = audio
speed_clip.write_videofile(self.speed_video_file)
# destroy
del audio
del speed_clip
try:
os.remove(video_without_audio)
os.remove(self.audio_file)
os.remove(self.imageVideo)
except Exception as e:
print(e)
bar.finish()
def crop_clip(self, clip: moviepy.editor.ImageClip, width=1080 * 4 / 3, height=1080):
w, h = clip.size # 视频长宽
w_h = w / h
if w_h <= width / height: # 宽度尺寸偏小
clip = clip.resize(width=width)
w, h = clip.size
clip = clip.crop(x_center=w / 2, y_center=h / 2, width=width, height=height)
if w_h > width / height:
clip = clip.resize(height=height)
w, h = clip.size
clip = clip.crop(x_center=w / 2, y_center=h / 2, width=width, height=height)
return clip
def image2speed_video(self, width=1080 * 4 / 3, height=1080):
"""
图片直接生成变速视频
跳过图片生成视频步骤
:param width:
:param height:
:return:
"""
# 生成音频数据
if len(self.audio_lst) == 0:
raise Exception("not exists any music")
audio_clips = []
for m in self.audio_lst:
clip = moviepy.editor.AudioFileClip(m)
audio_clips.append(clip)
audio_clip = moviepy.editor.concatenate_audioclips(audio_clips)
audio_clip.write_audiofile(self.audio_file)
audioTime, wave_data = self.audio2data(self.audio_file)
np_time, np_speed = self.frame2speed(audioTime, wave_data)
time_line = compute_time_line(np_time, np_speed, self.image_list, audio_clip.duration)
self.image_list.sort()
image_clips = []
for i in range(len(self.image_list)):
image_clip = moviepy.editor.ImageClip(self.image_list[i])
image_clip.start = sum(time_line[0:i])
image_clip.duration = time_line[i]
image_clip.fps = 1
image_clip = self.crop_clip(image_clip, width, height)
image_clips.append(image_clip)
video_clip = moviepy.editor.concatenate_videoclips(image_clips)
video_clip.audio = audio_clip
video_clip.write_videofile(self.speed_video_file, fps=5)
os.remove(self.audio_file)
def image2clip(self, width=1080 * 4 / 3, height=1080, duration=0.25):
fps = 1.0 / duration
width_height = width / height
if len(self.audio_lst) == 0:
raise Exception("exists any music")
audioClips = []
for m in self.audio_lst:
audioClip = moviepy.editor.AudioFileClip(m)
audioClips.append(audioClip)
audioClip = moviepy.editor.concatenate_audioclips(audioClips)
self.image_list.sort()
bar_setting = ['image2clip: ', Percentage(), Bar('#'), ' ', ETA()]
bar = ProgressBar(widgets=bar_setting, maxval=len(self.image_list)).start()
videoStartTime = 0
videoClips = []
fail_pic = []
bar_i = 0
for imageFileName in self.image_list:
bar_i += 1
try:
imageClip = moviepy.editor.ImageClip(imageFileName)
videoClip = imageClip.set_duration(duration)
videoClip = videoClip.set_start(videoStartTime)
videoClip = self.crop_clip(videoClip, width, height)
videoStartTime += duration
if 'video_clip' not in locals().keys():
video_clip = videoClip
else:
video_clip = moviepy.editor.concatenate_videoclips([video_clip, videoClip])
# 内存不足时,分步写入
if psutil.Process(os.getpid()).memory_info().rss / 1024 / 1024 > 800:
i = 1
temp_video = python_box.FileSys().get_outfile(self.imageVideo, str(i))
while 1:
if os.path.exists(temp_video):
i += 1
temp_video = python_box.FileSys().get_outfile(self.imageVideo, str(i))
else:
self.temp_videos.append(temp_video)
break
video_clip.write_videofile(temp_video, fps=fps)
del video_clip
except Exception as e:
fail_pic.append(imageFileName)
print(e)
bar.update(bar_i)
if len(self.temp_videos) > 0:
videos = []
for temp_video in self.temp_videos:
video_clip = moviepy.editor.VideoFileClip(temp_video)
videos.append(video_clip)
video_clip = moviepy.editor.concatenate_videoclips(videos)
bar.finish()
# 设置音轨长度
video_duration = video_clip.duration
audio_duration = audioClip.duration
if self.audio_leader:
video_clip = video_clip.subfx(lambda c: speedx(c, video_duration / audio_duration))
else:
while audioClip.duration < video_duration:
audioClip = moviepy.editor.concatenate_audioclips([audioClip, audioClip])
audioClip = audioClip.set_duration(video_duration)
video_clip.audio = audioClip
video_clip.write_videofile(self.imageVideo, fps=fps)
del video_clip
for temp in self.temp_videos:
try:
os.remove(temp)
except Exception as e:
print(e)
return self.imageVideo
def run(self):
"""
批量图片合成clip
通过bgm识别播放节奏,生成新的clip
:return:
"""
self.image2speed_video()
if __name__ == "__main__":
"""
pic to video clip
"""
movie = MovieLib()
for i in range(6):
directory = gui.select_dir("多个图片目录,取消代表则选择完成")
if directory:
movie.add_pic(directory)
else:
break
for i in range(6):
file = gui.select_file("多个音乐文件,取消代表则选择完成")
if file:
movie.add_bgm(file)
else:
break
movie.run()
|
[
"wave.open",
"common.tools.plot_list",
"numpy.abs",
"math.fabs",
"common.python_box.write_file",
"moviepy.video.fx.speedx.speedx",
"common.python_box.FileSys",
"common.python_box.dir_list",
"time.time",
"numpy.append",
"common.gui.select_file",
"numpy.where",
"numpy.array",
"common.gui.select_dir",
"numpy.arange",
"numpy.mean",
"re.sub",
"numpy.fromstring"
] |
[((2845, 2882), 'common.python_box.dir_list', 'python_box.dir_list', (['directory', '"""mp4"""'], {}), "(directory, 'mp4')\n", (2864, 2882), False, 'from common import python_box\n'), ((3636, 3648), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (3644, 3648), True, 'import numpy as np\n'), ((4129, 4156), 'numpy.where', 'np.where', (['(np_array <= value)'], {}), '(np_array <= value)\n', (4137, 4156), True, 'import numpy as np\n'), ((372, 383), 'time.time', 'time.time', ([], {}), '()\n', (381, 383), False, 'import time\n'), ((543, 581), 'common.python_box.dir_list', 'python_box.dir_list', (['directory', '"""mp4$"""'], {}), "(directory, 'mp4$')\n", (562, 581), False, 'from common import python_box\n'), ((859, 897), 'common.python_box.dir_list', 'python_box.dir_list', (['directory', '"""mp3$"""'], {}), "(directory, 'mp3$')\n", (878, 897), False, 'from common import python_box\n'), ((2193, 2224), 'common.python_box.dir_list', 'python_box.dir_list', (['dir', '"""mp4"""'], {}), "(dir, 'mp4')\n", (2212, 2224), False, 'from common import python_box\n'), ((2352, 2391), 'common.python_box.write_file', 'python_box.write_file', (['f_lst', 'videoInfo'], {}), '(f_lst, videoInfo)\n', (2373, 2391), False, 'from common import python_box\n'), ((3914, 3949), 'numpy.append', 'np.append', (['new_numpy_arr', 'new_speed'], {}), '(new_numpy_arr, new_speed)\n', (3923, 3949), True, 'import numpy as np\n'), ((5410, 5431), 'common.tools.plot_list', 'tools.plot_list', (['data'], {}), '(data)\n', (5425, 5431), False, 'from common import tools\n'), ((6434, 6456), 'wave.open', 'wave.open', (['audio', '"""rb"""'], {}), "(audio, 'rb')\n", (6443, 6456), False, 'import wave\n'), ((6632, 6670), 'numpy.fromstring', 'np.fromstring', (['strData'], {'dtype': 'np.short'}), '(strData, dtype=np.short)\n', (6645, 6670), True, 'import numpy as np\n'), ((6981, 7011), 'numpy.abs', 'np.abs', (['waveData'], {'out': 'waveData'}), '(waveData, out=waveData)\n', (6987, 7011), True, 'import numpy as np\n'), ((7344, 7356), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (7352, 7356), True, 'import numpy as np\n'), ((7376, 7388), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (7384, 7388), True, 'import numpy as np\n'), ((7833, 7869), 'numpy.where', 'np.where', (['(np_speed >= 8)', '(8)', 'np_speed'], {}), '(np_speed >= 8, 8, np_speed)\n', (7841, 7869), True, 'import numpy as np\n'), ((7889, 7929), 'numpy.where', 'np.where', (['(np_speed <= 0.2)', '(0.2)', 'np_speed'], {}), '(np_speed <= 0.2, 0.2, np_speed)\n', (7897, 7929), True, 'import numpy as np\n'), ((7949, 8004), 'numpy.where', 'np.where', (['(np_speed >= 1)', '(np_speed * self.sens)', 'np_speed'], {}), '(np_speed >= 1, np_speed * self.sens, np_speed)\n', (7957, 8004), True, 'import numpy as np\n'), ((8024, 8078), 'numpy.where', 'np.where', (['(np_speed < 1)', '(np_speed / self.sens)', 'np_speed'], {}), '(np_speed < 1, np_speed / self.sens, np_speed)\n', (8032, 8078), True, 'import numpy as np\n'), ((15636, 15670), 'common.gui.select_dir', 'gui.select_dir', (['"""多个图片目录,取消代表则选择完成"""'], {}), "('多个图片目录,取消代表则选择完成')\n", (15650, 15670), False, 'from common import gui\n'), ((15800, 15835), 'common.gui.select_file', 'gui.select_file', (['"""多个音乐文件,取消代表则选择完成"""'], {}), "('多个音乐文件,取消代表则选择完成')\n", (15815, 15835), False, 'from common import gui\n'), ((1753, 1777), 're.sub', 're.sub', (['""".mp4"""', '""""""', 'file'], {}), "('.mp4', '', file)\n", (1759, 1777), False, 'import re\n'), ((3398, 3414), 'moviepy.video.fx.speedx.speedx', 'speedx', (['c', 'speed'], {}), '(c, speed)\n', (3404, 3414), False, 'from moviepy.video.fx.speedx import speedx\n'), ((5123, 5156), 'math.fabs', 'math.fabs', (['(total - audio_duration)'], {}), '(total - audio_duration)\n', (5132, 5156), False, 'import math\n'), ((6783, 6804), 'numpy.arange', 'np.arange', (['(0)', 'nframes'], {}), '(0, nframes)\n', (6792, 6804), True, 'import numpy as np\n'), ((6936, 6972), 'common.tools.plot_list', 'tools.plot_list', (['waveData', 'audioTime'], {}), '(waveData, audioTime)\n', (6951, 6972), False, 'from common import tools\n'), ((7589, 7625), 'numpy.mean', 'np.mean', (['wave_data[f:f + f_duration]'], {}), '(wave_data[f:f + f_duration])\n', (7596, 7625), True, 'import numpy as np\n'), ((7676, 7697), 'numpy.append', 'np.append', (['np_time', 't'], {}), '(np_time, t)\n', (7685, 7697), True, 'import numpy as np\n'), ((7721, 7747), 'numpy.append', 'np.append', (['np_speed', 'speed'], {}), '(np_speed, speed)\n', (7730, 7747), True, 'import numpy as np\n'), ((7796, 7813), 'numpy.mean', 'np.mean', (['np_speed'], {}), '(np_speed)\n', (7803, 7813), True, 'import numpy as np\n'), ((8109, 8126), 'numpy.mean', 'np.mean', (['np_speed'], {}), '(np_speed)\n', (8116, 8126), True, 'import numpy as np\n'), ((9243, 9264), 'numpy.append', 'np.append', (['np_time', 't'], {}), '(np_time, t)\n', (9252, 9264), True, 'import numpy as np\n'), ((466, 477), 'time.time', 'time.time', ([], {}), '()\n', (475, 477), False, 'import time\n'), ((627, 650), 're.sub', 're.sub', (['"""mp4"""', '""""""', 'file'], {}), "('mp4', '', file)\n", (633, 650), False, 'import re\n'), ((6267, 6313), 'common.python_box.dir_list', 'python_box.dir_list', (['pic_dir', '"""jpg"""'], {'walk': '(True)'}), "(pic_dir, 'jpg', walk=True)\n", (6286, 6313), False, 'from common import python_box\n'), ((9370, 9390), 'common.python_box.FileSys', 'python_box.FileSys', ([], {}), '()\n', (9388, 9390), False, 'from common import python_box\n'), ((1248, 1272), 're.sub', 're.sub', (['""".mp3"""', '""""""', 'file'], {}), "('.mp3', '', file)\n", (1254, 1272), False, 'import re\n'), ((14796, 14838), 'moviepy.video.fx.speedx.speedx', 'speedx', (['c', '(video_duration / audio_duration)'], {}), '(c, video_duration / audio_duration)\n', (14802, 14838), False, 'from moviepy.video.fx.speedx import speedx\n'), ((13614, 13634), 'common.python_box.FileSys', 'python_box.FileSys', ([], {}), '()\n', (13632, 13634), False, 'from common import python_box\n'), ((13848, 13868), 'common.python_box.FileSys', 'python_box.FileSys', ([], {}), '()\n', (13866, 13868), False, 'from common import python_box\n')]
|
import decimal
import math
import warnings
from concurrent.futures import ProcessPoolExecutor, ThreadPoolExecutor
from decimal import Decimal, localcontext
from itertools import repeat
from pathlib import Path
from time import time
from typing import List, Optional, Union
import numpy as np
import pandas as pd
from tqdm.notebook import tqdm
from .config import get_global_config
from .types import FilenameType
def python_hash(SSN: int) -> int:
"""
A pythonic implementation of COBOL code using floating-point arithmetic. Note that
this will differ ever-so-slightly from the cobol_hash due to the differing rounding
conventions.
"""
# Constants determined by DoIT
L_SD = SSN
C_Q = 127773 # 3^2 * 14197
C_A = 16807 # 7^5
C_R = 2836 # 2^2 * 709
C_M = 2147483647 # prime (In fact, 2^{2^5 - 1} - 1, double Mersenne)
# Translated
W_HI = L_SD // C_Q
W_LO = L_SD % C_Q
# Recombine the quotient and remainder mod a medium-sized almost-prime with two
# coprime factors. (N.B. Not sure exactly why C_A is a power of 7 whereas C_R is
# almost prime. Would be curious to read the history of this algorithm.)
L_SD = C_A * W_LO - C_R * W_HI
# Note that C_M is _almost_ 2^31, but not quite. Also, note that
# C_A * W_LO - C_R * W_HI is maximized when SSN = C_Q - 1
# and it is minimized when SSN is the largest social security number which is
# exactly divisible by C_Q, i.e., (999_99_9999 // C_Q) * C_Q = 999_95_1498.
#
# In either case, C_A * W_LO - C_R * W_HI \in (-C_M, C_M) and so the following
# block guarantees that L_SD will be in [0, C_M).
#
# We also note that the _smallest negative_ value that C_A * W_LO - C_R * W_HI can
# achieve in theory is -1 (since C_A and C_R are coprime) but I haven't done the
# computation to determine whether it's actually possible in this range of numbers
if L_SD <= 0:
warnings.warn("L_SD is negative")
L_SD += C_M
# And so by the above comment, L_RAND is in [0, 1) and this rounding gives us the
# top 10 digits of the mantissa
L_RAND = math.floor(L_SD / C_M * 1e10) / 1e10
return L_RAND
def cobol_hash(SSN: int) -> float:
"""
A python implementation of COBOL's fixed-point arithmetic
"""
with localcontext() as ctx:
# Constants determined by DoIT
ctx.prec = 10
ctx.rounding = decimal.ROUND_DOWN
L_SD = Decimal(SSN)
C_A = Decimal("0000016807")
C_M = Decimal("2147483647")
C_Q = Decimal("0000127773")
C_R = Decimal("0000002836")
# Translated
W_HI = (L_SD / C_Q).quantize(Decimal("1E0")) # L_SD // C_Q
W_LO = L_SD - C_Q * W_HI # L_SD % C_Q
L_SD = C_A * W_LO - C_R * W_HI
if L_SD <= 0:
L_SD += C_M
L_RAND = (L_SD / C_M).quantize(Decimal("1E-10"))
if L_RAND == 0:
warnings.warn("L_RAND is zero")
L_SD += C_M
return L_RAND
def generate_outcomes(
input_list: Optional[List[int]] = None,
process_type: str = "cobol",
low: Optional[int] = None,
high: Optional[int] = None,
size: Optional[int] = None,
all_values: Optional[bool] = False,
generate_rand_whole: Optional[bool] = False,
) -> pd.DataFrame:
"""
Helper function that generates L_RAND outcomes with the option for pythonic or cobol implmentations.
"""
# Generate a random sample of SSNs to test, and sort to verify monotonicity of relationship
if input_list is not None:
ssn_pool = input_list
elif not all_values:
# Setting seed to ensure replicability
np.random.seed(0)
ssn_pool = np.random.randint(low=low, high=high, size=size)
ssn_pool.sort()
elif all_values:
ssn_pool = np.arange(low, high)
# apply random number generator to SSN pool
if process_type == "python":
with ThreadPoolExecutor() as executor:
ssn_outcomes = list(
tqdm(executor.map(python_hash, ssn_pool), total=len(ssn_pool))
)
if process_type == "cobol":
with ThreadPoolExecutor() as executor:
ssn_outcomes = list(
tqdm(
executor.map(cobol_hash, ssn_pool.astype(str)), total=len(ssn_pool)
)
)
df = pd.DataFrame(ssn_outcomes, columns=["L_RAND"])
final_df = pd.concat([pd.Series(ssn_pool, name="SSN"), df], axis=1)
if generate_rand_whole:
final_df["L_RAND_WHOLE"] = final_df["L_RAND"] * 10_000_000_000
return final_df
def chunk_using_generators(lst, n):
for i in range(0, len(lst), n):
yield lst[i : i + n]
def generate_all_L_RAND(
filepath: Optional[FilenameType] = None,
filename: FilenameType = "ssn_output.csv.gz",
ssn_min: int = 1_01_0001,
ssn_max: int = 899_99_9999,
chunksize: int = 10_0000,
):
"""
A function that calculates L_RAND values for all possible SSN from 001_01_0001 to 899_99_9999.
This exercise was necessary to ensure that the maximum value attainable from all reasonable SSNs
would result in an L_RAND value less than 9_999_999_999.
"""
if filepath is None:
# default to the DATA_DIR / reference
filepath = Path(get_global_config().DATA_DIR) / "reference"
# Total list of valid SSNs
list_of_ssn = np.arange(ssn_min, ssn_max)
# Divide the total list into manageable chunks
list_of_list_of_ssn = list(chunk_using_generators(list_of_ssn, chunksize))
# Process each list using COBOL
with ProcessPoolExecutor() as executor:
ssn_outcomes = list(
tqdm(
executor.map(generate_outcomes, list_of_list_of_ssn, repeat("cobol")),
total=len(list_of_list_of_ssn),
)
)
# Output data into a gzip dataframe.
pd.DataFrame(pd.concat(ssn_outcomes)).sort_values(
by="L_RAND", ascending=False
).reset_index(drop=True).to_csv(
filepath / filename, compression="gzip", index=False
)
def add_ms_to_seed(ssn: int, ms: int = None):
"""
A good-enough solution to resolve local-randomization issues with the current DoIT algorithm.
"""
if ms is None:
ms = int(round(time(), 6) * 1e6) % 1_000_000
return int(str(ssn + ms)[::-1])
|
[
"pandas.DataFrame",
"numpy.random.seed",
"decimal.Decimal",
"concurrent.futures.ProcessPoolExecutor",
"math.floor",
"time.time",
"numpy.random.randint",
"numpy.arange",
"decimal.localcontext",
"pandas.Series",
"warnings.warn",
"concurrent.futures.ThreadPoolExecutor",
"pandas.concat",
"itertools.repeat"
] |
[((4358, 4404), 'pandas.DataFrame', 'pd.DataFrame', (['ssn_outcomes'], {'columns': "['L_RAND']"}), "(ssn_outcomes, columns=['L_RAND'])\n", (4370, 4404), True, 'import pandas as pd\n'), ((5383, 5410), 'numpy.arange', 'np.arange', (['ssn_min', 'ssn_max'], {}), '(ssn_min, ssn_max)\n', (5392, 5410), True, 'import numpy as np\n'), ((1939, 1972), 'warnings.warn', 'warnings.warn', (['"""L_SD is negative"""'], {}), "('L_SD is negative')\n", (1952, 1972), False, 'import warnings\n'), ((2129, 2167), 'math.floor', 'math.floor', (['(L_SD / C_M * 10000000000.0)'], {}), '(L_SD / C_M * 10000000000.0)\n', (2139, 2167), False, 'import math\n'), ((2309, 2323), 'decimal.localcontext', 'localcontext', ([], {}), '()\n', (2321, 2323), False, 'from decimal import Decimal, localcontext\n'), ((2451, 2463), 'decimal.Decimal', 'Decimal', (['SSN'], {}), '(SSN)\n', (2458, 2463), False, 'from decimal import Decimal, localcontext\n'), ((2478, 2499), 'decimal.Decimal', 'Decimal', (['"""0000016807"""'], {}), "('0000016807')\n", (2485, 2499), False, 'from decimal import Decimal, localcontext\n'), ((2514, 2535), 'decimal.Decimal', 'Decimal', (['"""2147483647"""'], {}), "('2147483647')\n", (2521, 2535), False, 'from decimal import Decimal, localcontext\n'), ((2550, 2571), 'decimal.Decimal', 'Decimal', (['"""0000127773"""'], {}), "('0000127773')\n", (2557, 2571), False, 'from decimal import Decimal, localcontext\n'), ((2586, 2607), 'decimal.Decimal', 'Decimal', (['"""0000002836"""'], {}), "('0000002836')\n", (2593, 2607), False, 'from decimal import Decimal, localcontext\n'), ((5586, 5607), 'concurrent.futures.ProcessPoolExecutor', 'ProcessPoolExecutor', ([], {}), '()\n', (5605, 5607), False, 'from concurrent.futures import ProcessPoolExecutor, ThreadPoolExecutor\n'), ((2667, 2681), 'decimal.Decimal', 'Decimal', (['"""1E0"""'], {}), "('1E0')\n", (2674, 2681), False, 'from decimal import Decimal, localcontext\n'), ((2870, 2886), 'decimal.Decimal', 'Decimal', (['"""1E-10"""'], {}), "('1E-10')\n", (2877, 2886), False, 'from decimal import Decimal, localcontext\n'), ((2925, 2956), 'warnings.warn', 'warnings.warn', (['"""L_RAND is zero"""'], {}), "('L_RAND is zero')\n", (2938, 2956), False, 'import warnings\n'), ((3667, 3684), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (3681, 3684), True, 'import numpy as np\n'), ((3704, 3752), 'numpy.random.randint', 'np.random.randint', ([], {'low': 'low', 'high': 'high', 'size': 'size'}), '(low=low, high=high, size=size)\n', (3721, 3752), True, 'import numpy as np\n'), ((3933, 3953), 'concurrent.futures.ThreadPoolExecutor', 'ThreadPoolExecutor', ([], {}), '()\n', (3951, 3953), False, 'from concurrent.futures import ProcessPoolExecutor, ThreadPoolExecutor\n'), ((4139, 4159), 'concurrent.futures.ThreadPoolExecutor', 'ThreadPoolExecutor', ([], {}), '()\n', (4157, 4159), False, 'from concurrent.futures import ProcessPoolExecutor, ThreadPoolExecutor\n'), ((4431, 4462), 'pandas.Series', 'pd.Series', (['ssn_pool'], {'name': '"""SSN"""'}), "(ssn_pool, name='SSN')\n", (4440, 4462), True, 'import pandas as pd\n'), ((3817, 3837), 'numpy.arange', 'np.arange', (['low', 'high'], {}), '(low, high)\n', (3826, 3837), True, 'import numpy as np\n'), ((5737, 5752), 'itertools.repeat', 'repeat', (['"""cobol"""'], {}), "('cobol')\n", (5743, 5752), False, 'from itertools import repeat\n'), ((6269, 6275), 'time.time', 'time', ([], {}), '()\n', (6273, 6275), False, 'from time import time\n'), ((5886, 5909), 'pandas.concat', 'pd.concat', (['ssn_outcomes'], {}), '(ssn_outcomes)\n', (5895, 5909), True, 'import pandas as pd\n')]
|
# -*- coding: utf-8 -*-
# Copyright (c) 2021-2022 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Configuration tuning module."""
from typing import Any, Dict, List, Optional, Union
from neural_compressor.ux.utils.exceptions import ClientErrorException
from neural_compressor.ux.utils.json_serializer import JsonSerializer
from neural_compressor.ux.utils.utils import (
parse_bool_value,
parse_to_float_list,
parse_to_string_list,
)
class Strategy(JsonSerializer):
"""Configuration Strategy class."""
def __init__(self, data: Dict[str, Any] = {}) -> None:
"""Initialize configuration Strategy class."""
super().__init__()
# [Required] One of neural_compressor.strategy.STRATEGIES
self.name: str = data.get("name", "basic")
self.sigopt_api_token: Optional[str] = data.get("sigopt_api_token", None)
self.accuracy_weight: Optional[float] = data.get("accuracy_weight", None)
self.latency_weight: Optional[float] = data.get("latency_weight", None)
class MultiObjectives(JsonSerializer):
"""Configuration MultiObjectives class."""
def __init__(self, data: Dict[str, Any] = {}) -> None:
"""Initialize configuration MultiObjectives class."""
super().__init__()
self._objective: List[str] = data.get("objective", [])
self._weight: List[float] = data.get("weight", [])
@property
def objective(self) -> List[str]:
"""Get objectives."""
return self._objective
@objective.setter
def objective(self, value: Union[None, str, List[str]]) -> None:
"""Set inputs value."""
self._objective = parse_to_string_list(value)
@property
def weight(self) -> List[float]:
"""Get weights."""
return self._weight
@weight.setter
def weight(self, value: Union[None, float, List[float]]) -> None:
"""Set weights value."""
self._weight = parse_to_float_list(value)
class AccCriterion(JsonSerializer):
"""Configuration AccCriterion class."""
def __init__(self, data: Dict[str, Any] = {}) -> None:
"""Initialize configuration AccCriterion class."""
super().__init__()
self.relative: Optional[float] = data.get(
"relative",
None,
) # [Optional] (INT8-FP32)/FP32
self.absolute: Optional[float] = data.get(
"absolute",
None,
) # [Optional] INT8-FP32
# Set default accuracy criterion to relative
if self.relative is None and self.absolute is None:
self.relative = 0.1
class ExitPolicy(JsonSerializer):
"""Configuration ExitPolicy class."""
def __init__(self, data: Dict[str, Any] = {}) -> None:
"""Initialize Configuration ExitPolicy class."""
super().__init__()
self.timeout: Optional[int] = data.get("timeout", None)
self.max_trials: Optional[int] = data.get("max_trials", None)
self.performance_only: Optional[bool] = data.get("performance_only", None)
class Workspace(JsonSerializer):
"""Configuration Workspace class."""
def __init__(self, data: Dict[str, Any] = {}) -> None:
"""Initialize Configuration Workspace class."""
super().__init__()
self.path: Optional[str] = data.get("path", None) # [Optional]
self.resume: Optional[str] = data.get("resume", None) # [Optional]
class Tuning(JsonSerializer):
"""Configuration Tuning class."""
def __init__(self, data: Dict[str, Any] = {}) -> None:
"""Initialize Configuration Tuning class."""
super().__init__()
self.strategy: Strategy = Strategy()
if data.get("strategy"):
self.strategy = Strategy(data.get("strategy", {}))
self.accuracy_criterion: AccCriterion = AccCriterion(
data.get("accuracy_criterion", {}),
)
self.multi_objectives: Optional[MultiObjectives] = None
if data.get("multi_objectives"):
self.multi_objectives = MultiObjectives(data.get("multi_objectives", {}))
self.exit_policy: Optional[ExitPolicy] = None
if data.get("exit_policy"):
self.exit_policy = ExitPolicy(data.get("exit_policy", {}))
self.random_seed: Optional[int] = data.get("random_seed", None)
self.tensorboard: Optional[bool] = data.get("tensorboard", None)
self.workspace: Optional[Workspace] = None
if data.get("workspace", {}):
self.workspace = Workspace(data.get("workspace", {}))
def set_timeout(self, timeout: int) -> None:
"""Update tuning timeout in config."""
try:
timeout = int(timeout)
if timeout < 0:
raise ValueError
except ValueError:
raise ClientErrorException(
"The timeout value is not valid. " "Timeout should be non negative integer.",
)
if self.exit_policy:
self.exit_policy.timeout = timeout
else:
self.exit_policy = ExitPolicy({"timeout": timeout})
def set_max_trials(self, max_trials: int) -> None:
"""Update max tuning trials in config."""
try:
max_trials = int(max_trials)
if max_trials < 0:
raise ValueError
except ValueError:
raise ClientErrorException(
"The max trials value is not valid. " "Max trials should be non negative integer.",
)
if self.exit_policy:
self.exit_policy.max_trials = max_trials
else:
self.exit_policy = ExitPolicy({"max_trials": max_trials})
def set_performance_only(self, performance_only: Any) -> None:
"""Update performance only flag in config."""
try:
performance_only = parse_bool_value(performance_only)
except ValueError:
raise ClientErrorException(
"The performance_only flag value is not valid. "
"Performance_ony should be a boolean.",
)
if self.exit_policy:
self.exit_policy.performance_only = performance_only
else:
self.exit_policy = ExitPolicy({"performance_only": performance_only})
def set_random_seed(self, random_seed: int) -> None:
"""Update random seed value in config."""
try:
random_seed = int(random_seed)
except ValueError:
raise ClientErrorException(
"The random seed value is not valid. " "Random seed should be an integer.",
)
self.random_seed = random_seed
def set_workspace(self, path: str) -> None:
"""Update tuning workspace path in config."""
if self.workspace is None:
self.workspace = Workspace()
self.workspace.path = path
|
[
"neural_compressor.ux.utils.utils.parse_bool_value",
"neural_compressor.ux.utils.exceptions.ClientErrorException",
"neural_compressor.ux.utils.utils.parse_to_string_list",
"neural_compressor.ux.utils.utils.parse_to_float_list"
] |
[((2165, 2192), 'neural_compressor.ux.utils.utils.parse_to_string_list', 'parse_to_string_list', (['value'], {}), '(value)\n', (2185, 2192), False, 'from neural_compressor.ux.utils.utils import parse_bool_value, parse_to_float_list, parse_to_string_list\n'), ((2446, 2472), 'neural_compressor.ux.utils.utils.parse_to_float_list', 'parse_to_float_list', (['value'], {}), '(value)\n', (2465, 2472), False, 'from neural_compressor.ux.utils.utils import parse_bool_value, parse_to_float_list, parse_to_string_list\n'), ((6318, 6352), 'neural_compressor.ux.utils.utils.parse_bool_value', 'parse_bool_value', (['performance_only'], {}), '(performance_only)\n', (6334, 6352), False, 'from neural_compressor.ux.utils.utils import parse_bool_value, parse_to_float_list, parse_to_string_list\n'), ((5297, 5397), 'neural_compressor.ux.utils.exceptions.ClientErrorException', 'ClientErrorException', (['"""The timeout value is not valid. Timeout should be non negative integer."""'], {}), "(\n 'The timeout value is not valid. Timeout should be non negative integer.')\n", (5317, 5397), False, 'from neural_compressor.ux.utils.exceptions import ClientErrorException\n'), ((5850, 5961), 'neural_compressor.ux.utils.exceptions.ClientErrorException', 'ClientErrorException', (['"""The max trials value is not valid. Max trials should be non negative integer."""'], {}), "(\n 'The max trials value is not valid. Max trials should be non negative integer.'\n )\n", (5870, 5961), False, 'from neural_compressor.ux.utils.exceptions import ClientErrorException\n'), ((6398, 6514), 'neural_compressor.ux.utils.exceptions.ClientErrorException', 'ClientErrorException', (['"""The performance_only flag value is not valid. Performance_ony should be a boolean."""'], {}), "(\n 'The performance_only flag value is not valid. Performance_ony should be a boolean.'\n )\n", (6418, 6514), False, 'from neural_compressor.ux.utils.exceptions import ClientErrorException\n'), ((6954, 7052), 'neural_compressor.ux.utils.exceptions.ClientErrorException', 'ClientErrorException', (['"""The random seed value is not valid. Random seed should be an integer."""'], {}), "(\n 'The random seed value is not valid. Random seed should be an integer.')\n", (6974, 7052), False, 'from neural_compressor.ux.utils.exceptions import ClientErrorException\n')]
|
import socket
import base64
from random import sample,shuffle
import pickle
import time
def name_generator(_len_ = 16, onlyText = False):
lower_case = list("abcdefghijklmnopqrstuvwxyz")
upper_case = list('ABCDEFGHIJKLMNOPQRSTUVWXYZ')
special = list("!@#$%&*?")
number = list("0123456789")
if onlyText:
_all_ = lower_case + upper_case
else:
_all_ = lower_case + upper_case + special + number
shuffle(_all_)
return "".join(sample(_all_,_len_))
count = 0
print("Test Started...")
while True:
s = socket.socket()
s.connect(("192.168.43.206",9600))
name = name_generator(_len_ = 8, onlyText = True)
ini = base64.b64encode(pickle.dumps(name))
s.send(bytes(str(len(ini)).center(32,"-"),"utf-8"))
s.send(ini)
prepare_send_data = {
"channel" : "test",
"sender_name" : name,
"target_name" : "SERVER",
"data" : "Hello World"
}
prepare_for_send = base64.b64encode(pickle.dumps(prepare_send_data))
s.send(bytes(str(len(prepare_for_send)).center(32,"-"),"utf-8"))
s.send(prepare_for_send)
count += 1
print(count)
# time.sleep(1)
# C@C/piBsKTAP9?C
|
[
"random.sample",
"random.shuffle",
"socket.socket",
"pickle.dumps"
] |
[((446, 460), 'random.shuffle', 'shuffle', (['_all_'], {}), '(_all_)\n', (453, 460), False, 'from random import sample, shuffle\n'), ((557, 572), 'socket.socket', 'socket.socket', ([], {}), '()\n', (570, 572), False, 'import socket\n'), ((480, 500), 'random.sample', 'sample', (['_all_', '_len_'], {}), '(_all_, _len_)\n', (486, 500), False, 'from random import sample, shuffle\n'), ((695, 713), 'pickle.dumps', 'pickle.dumps', (['name'], {}), '(name)\n', (707, 713), False, 'import pickle\n'), ((984, 1015), 'pickle.dumps', 'pickle.dumps', (['prepare_send_data'], {}), '(prepare_send_data)\n', (996, 1015), False, 'import pickle\n')]
|
#!/usr/bin/env python
# The Expat License
#
# Copyright (c) 2017, <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import re
import sys
from subprocess import Popen, PIPE
if sys.version_info > (3,):
long = int
xrange = range
cache = {'': long(1)}
def factor_sig(n):
pipe = Popen(['factor', str(n)], shell=False, stdout=PIPE).stdout
factors_s = re.sub('^[^:]*:', '', pipe.readline())
pipe.close()
sig = {}
for x in re.findall('[0-9]+', factors_s):
if x not in sig:
sig[x] = 0
sig[x] += 1
return sorted(sig.values())
def check_factor_sig(n, good):
if factor_sig(n) != good:
print("%d is not good" % (n))
raise BaseException
return
check_factor_sig(24, [1, 3])
check_factor_sig(100, [2, 2])
check_factor_sig(1000, [3, 3])
# sig must be sorted.
def real_calc_num_chains(sig):
def helper(so_far, x, all_zeros):
if x == len(sig):
return 0 if all_zeros else calc_num_chains(sorted(so_far))
ret = 0
n = sig[x]
for c in xrange(n+1):
ret += helper(so_far + [n - c] if c < n else so_far,
x+1,
(all_zeros and (c == 0)))
return ret
return helper([], 0, True)
# sig must be sorted.
def calc_num_chains(sig):
sig_s = ','.join(str(x) for x in sig)
if sig_s not in cache:
cache[sig_s] = real_calc_num_chains(sig)
return cache[sig_s]
def calc_g(n):
return calc_num_chains(factor_sig(n))
def check_num_chains(n, good):
if calc_g(n) != good:
print("calc_num_chains %d is not good" % (n))
raise BaseException
return
check_num_chains(12, 8)
check_num_chains(48, 48)
check_num_chains(120, 132)
LIM = long('1' + ('0' * 16))
found = set()
found.add(long(1))
def iter_over_sigs(length):
if calc_num_chains([1] * length) > LIM:
return False
def helper(so_far):
if len(so_far) == length:
ret = calc_num_chains(list(reversed(so_far)))
if ret > LIM:
return False
if (ret == calc_g(ret)):
found.add(ret)
return True
for x in xrange(1, so_far[-1]+1):
if not helper(so_far + [x]):
if x == 1:
return False
return True
first = 1
while True:
if not helper([first]):
break
first += 1
return True
length = 1
while (iter_over_sigs(length)):
print("Finished len = %d" % (length))
length += 1
print("Result = %d" % (sum(found)))
|
[
"re.findall"
] |
[((1464, 1495), 're.findall', 're.findall', (['"""[0-9]+"""', 'factors_s'], {}), "('[0-9]+', factors_s)\n", (1474, 1495), False, 'import re\n')]
|
# Once for All: Train One Network and Specialize it for Efficient Deployment
# <NAME>, <NAME>, <NAME>, <NAME>, <NAME>
# International Conference on Learning Representations (ICLR), 2020.
import os
import torch
import argparse
from ofa.stereo_matching.data_providers.stereo import StereoDataProvider
from ofa.stereo_matching.run_manager import StereoRunConfig, RunManager
from ofa.stereo_matching.elastic_nn.networks.ofa_aanet import OFAAANet
from ofa.stereo_matching.elastic_nn.training.progressive_shrinking import load_models
import numpy as np
from ofa.utils.pytorch_utils import get_net_info
parser = argparse.ArgumentParser()
parser.add_argument(
'-g',
'--gpu',
help='The gpu(s) to use',
type=str,
default='0')
parser.add_argument(
'-n',
'--net',
metavar='OFAAANet',
default='ofa_aanet',
choices=['ofa_aanet_d234_e346_k357_w1.0',
'ofa_aanet'],
help='OFA AANet networks')
args = parser.parse_args()
if args.gpu == 'all':
device_list = range(torch.cuda.device_count())
args.gpu = ','.join(str(_) for _ in device_list)
else:
device_list = [int(_) for _ in args.gpu.split(',')]
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
ofa_network = OFAAANet(ks_list=[3,5,7], expand_ratio_list=[2,4,6,8], depth_list=[2,3,4], scale_list=[2,3,4])
model_file = 'ofa_stereo_checkpoints/ofa_stereo_D234_E2468_K357_S4'
init = torch.load(model_file, map_location='cpu')
model_dict = init['state_dict']
ofa_network.load_state_dict(model_dict)
""" Randomly sample a sub-network,
you can also manually set the sub-network using:
ofa_network.set_active_subnet(ks=7, e=6, d=4)
"""
#ofa_network.sample_active_subnet()
#ofa_network.set_max_net()
d = 4
e = 8
ks = 7
s = 4
ofa_network.set_active_subnet(ks=ks, d=d, e=e, s=s)
subnet = ofa_network.get_active_subnet(preserve_weight=True)
#subnet = ofa_network
save_path = "ofa_stereo_checkpoints/aanet_D%d_E%d_K%d_S%d" % (d, e, ks, s)
torch.save(subnet.state_dict(), save_path)
net = subnet
net.eval()
net = net.cuda()
#net = net.get_tensorrt_model()
#torch.save(net.state_dict(), 'models/mobilefadnet_trt.pth')
get_net_info(net, input_shape=(3, 540, 960))
# fake input data
dummy_left = torch.randn(1, 3, 576, 960, dtype=torch.float).cuda()
dummy_right = torch.randn(1, 3, 576, 960, dtype=torch.float).cuda()
# INIT LOGGERS
starter, ender = torch.cuda.Event(enable_timing=True), torch.cuda.Event(enable_timing=True)
repetitions = 30
timings=np.zeros((repetitions,1))
#GPU-WARM-UP
for _ in range(10):
_ = net(dummy_left, dummy_right)
# MEASURE PERFORMANCE
with torch.no_grad():
for rep in range(-3, repetitions):
starter.record()
_ = net(dummy_left, dummy_right)
ender.record()
# WAIT FOR GPU SYNC
torch.cuda.synchronize()
if rep >= 0:
curr_time = starter.elapsed_time(ender)
timings[rep] = curr_time
print(rep, curr_time)
mean_syn = np.sum(timings) / repetitions
std_syn = np.std(timings)
print(mean_syn)
|
[
"ofa.stereo_matching.elastic_nn.networks.ofa_aanet.OFAAANet",
"torch.cuda.synchronize",
"torch.cuda.Event",
"numpy.sum",
"argparse.ArgumentParser",
"numpy.std",
"torch.load",
"numpy.zeros",
"torch.randn",
"torch.cuda.device_count",
"ofa.utils.pytorch_utils.get_net_info",
"torch.no_grad"
] |
[((608, 633), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (631, 633), False, 'import argparse\n'), ((1213, 1321), 'ofa.stereo_matching.elastic_nn.networks.ofa_aanet.OFAAANet', 'OFAAANet', ([], {'ks_list': '[3, 5, 7]', 'expand_ratio_list': '[2, 4, 6, 8]', 'depth_list': '[2, 3, 4]', 'scale_list': '[2, 3, 4]'}), '(ks_list=[3, 5, 7], expand_ratio_list=[2, 4, 6, 8], depth_list=[2, \n 3, 4], scale_list=[2, 3, 4])\n', (1221, 1321), False, 'from ofa.stereo_matching.elastic_nn.networks.ofa_aanet import OFAAANet\n'), ((1384, 1426), 'torch.load', 'torch.load', (['model_file'], {'map_location': '"""cpu"""'}), "(model_file, map_location='cpu')\n", (1394, 1426), False, 'import torch\n'), ((2125, 2169), 'ofa.utils.pytorch_utils.get_net_info', 'get_net_info', (['net'], {'input_shape': '(3, 540, 960)'}), '(net, input_shape=(3, 540, 960))\n', (2137, 2169), False, 'from ofa.utils.pytorch_utils import get_net_info\n'), ((2457, 2483), 'numpy.zeros', 'np.zeros', (['(repetitions, 1)'], {}), '((repetitions, 1))\n', (2465, 2483), True, 'import numpy as np\n'), ((2986, 3001), 'numpy.std', 'np.std', (['timings'], {}), '(timings)\n', (2992, 3001), True, 'import numpy as np\n'), ((2357, 2393), 'torch.cuda.Event', 'torch.cuda.Event', ([], {'enable_timing': '(True)'}), '(enable_timing=True)\n', (2373, 2393), False, 'import torch\n'), ((2395, 2431), 'torch.cuda.Event', 'torch.cuda.Event', ([], {'enable_timing': '(True)'}), '(enable_timing=True)\n', (2411, 2431), False, 'import torch\n'), ((2584, 2599), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (2597, 2599), False, 'import torch\n'), ((2946, 2961), 'numpy.sum', 'np.sum', (['timings'], {}), '(timings)\n', (2952, 2961), True, 'import numpy as np\n'), ((1010, 1035), 'torch.cuda.device_count', 'torch.cuda.device_count', ([], {}), '()\n', (1033, 1035), False, 'import torch\n'), ((2202, 2248), 'torch.randn', 'torch.randn', (['(1)', '(3)', '(576)', '(960)'], {'dtype': 'torch.float'}), '(1, 3, 576, 960, dtype=torch.float)\n', (2213, 2248), False, 'import torch\n'), ((2270, 2316), 'torch.randn', 'torch.randn', (['(1)', '(3)', '(576)', '(960)'], {'dtype': 'torch.float'}), '(1, 3, 576, 960, dtype=torch.float)\n', (2281, 2316), False, 'import torch\n'), ((2765, 2789), 'torch.cuda.synchronize', 'torch.cuda.synchronize', ([], {}), '()\n', (2787, 2789), False, 'import torch\n')]
|
import pytest
from dddpy.domain.book import Book, Isbn
class TestBook:
def test_constructor_should_create_instance(self):
book = Book(
id="book_01",
isbn=Isbn("978-0321125217"),
title="Domain-Driven Design: Tackling Complexity in the Heart of Softwares",
page=560,
)
assert book.id == "book_01"
assert book.isbn == Isbn("978-0321125217")
assert (
book.title
== "Domain-Driven Design: Tackling Complexity in the Heart of Softwares"
)
assert book.page == 560
assert book.read_page == 0
def test_book_entity_should_be_identified_by_id(self):
book_1 = Book(
id="book_01",
isbn=Isbn("978-0321125217"),
title="Domain-Driven Design: Tackling Complexity in the Heart of Softwares",
page=560,
read_page=50,
)
book_2 = Book(
id="book_01",
isbn=Isbn("978-0321125217"),
title="Domain-Driven Design: Tackling Complexity in the Heart of Softwares",
page=560,
read_page=120,
)
book_3 = Book(
id="book_02",
isbn=Isbn("978-0321125217"),
title="Domain-Driven Design: Tackling Complexity in the Heart of Softwares",
page=560,
read_page=50,
)
assert book_1 == book_2
assert book_1 != book_3
@pytest.mark.parametrize(
"read_page",
[
(0),
(1),
(320),
],
)
def test_read_page_setter_should_update_value(self, read_page):
book = Book(
id="book_01",
isbn=Isbn("978-0321125217"),
title="Domain-Driven Design: Tackling Complexity in the Heart of Softwares",
page=560,
)
book.read_page = read_page
assert book.read_page == read_page
@pytest.mark.parametrize(
"read_page, expected",
[
(0, False),
(559, False),
(560, True),
],
)
def test_is_already_read_should_true_when_read_page_has_reached_last_page(
self, read_page, expected
):
book = Book(
id="book_01",
isbn=Isbn("978-0321125217"),
title="Domain-Driven Design: Tackling Complexity in the Heart of Softwares",
page=560,
)
book.read_page = read_page
assert book.is_already_read() == expected
|
[
"pytest.mark.parametrize",
"dddpy.domain.book.Isbn"
] |
[((1473, 1522), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""read_page"""', '[0, 1, 320]'], {}), "('read_page', [0, 1, 320])\n", (1496, 1522), False, 'import pytest\n'), ((1962, 2054), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""read_page, expected"""', '[(0, False), (559, False), (560, True)]'], {}), "('read_page, expected', [(0, False), (559, False), (\n 560, True)])\n", (1985, 2054), False, 'import pytest\n'), ((403, 425), 'dddpy.domain.book.Isbn', 'Isbn', (['"""978-0321125217"""'], {}), "('978-0321125217')\n", (407, 425), False, 'from dddpy.domain.book import Book, Isbn\n'), ((193, 215), 'dddpy.domain.book.Isbn', 'Isbn', (['"""978-0321125217"""'], {}), "('978-0321125217')\n", (197, 215), False, 'from dddpy.domain.book import Book, Isbn\n'), ((754, 776), 'dddpy.domain.book.Isbn', 'Isbn', (['"""978-0321125217"""'], {}), "('978-0321125217')\n", (758, 776), False, 'from dddpy.domain.book import Book, Isbn\n'), ((992, 1014), 'dddpy.domain.book.Isbn', 'Isbn', (['"""978-0321125217"""'], {}), "('978-0321125217')\n", (996, 1014), False, 'from dddpy.domain.book import Book, Isbn\n'), ((1231, 1253), 'dddpy.domain.book.Isbn', 'Isbn', (['"""978-0321125217"""'], {}), "('978-0321125217')\n", (1235, 1253), False, 'from dddpy.domain.book import Book, Isbn\n'), ((1731, 1753), 'dddpy.domain.book.Isbn', 'Isbn', (['"""978-0321125217"""'], {}), "('978-0321125217')\n", (1735, 1753), False, 'from dddpy.domain.book import Book, Isbn\n'), ((2304, 2326), 'dddpy.domain.book.Isbn', 'Isbn', (['"""978-0321125217"""'], {}), "('978-0321125217')\n", (2308, 2326), False, 'from dddpy.domain.book import Book, Isbn\n')]
|
import os
import sys
from PIL import Image
import numpy as np
import random
import matplotlib.pyplot as plt
size_image = (256, 256)
class LSB:
# convert integer to 8-bit binary
def int2bin(self, image):
r, g, b = image
return (f'{r:08b}', f'{g:08b}', f'{b:08b}')
# convert 8-bit binary to integer
def bin2int(self, image):
r, g, b = image
return (int(r, 2), int(g, 2), int(b, 2))
# define the encryption function
def encryption(self, original, secret):
pixel_1 = original.load()
pixel_2 = secret.load()
outcome = Image.new(original.mode, original.size)
pixel_new = outcome.load()
for i in range(size_image[0]):
for j in range(size_image[1]):
r1, g1, b1 = self.int2bin(pixel_1[i, j])
r2, g2, b2 = self.int2bin(pixel_2[i, j])
pixel_new[i, j] = self.bin2int((r1[:4] + r2[:4], g1[:4] + g2[:4], b1[:4] + b2[:4]))
return outcome
# define the decryption function
def decryption(self, image):
pixel_merge = image.load()
secret = Image.new(image.mode, image.size)
pixel_secret = secret.load()
for i in range(size_image[0]):
for j in range(size_image[1]):
r, g, b = self.int2bin(pixel_merge[i, j])
pixel_secret[i, j] = self.bin2int((r[4:] + '0000', g[4:] + '0000', b[4:] + '0000'))
return secret
if __name__ == '__main__':
test_images = []
for imgnames in os.listdir("./images_test/"):
test_images.append(Image.open("./images_test/" + imgnames).resize(size_image, Image.ANTIALIAS))
np.random.shuffle(test_images)
lsb_implementation = LSB()
test_original = test_images[0:12]
test_secret = test_images[12:24]
test_merge = []
test_reveal = []
for i in range(12):
test_merge.append(lsb_implementation.encryption(test_original[i], test_secret[i]))
test_reveal.append(lsb_implementation.decryption(test_merge[-1]))
# Number of secret and cover pairs to show.
n = 12
def show_image(img, n_rows, n_col, idx, gray=False, first_row=False, title=None):
ax = plt.subplot(n_rows, n_col, idx)
plt.imshow(img)
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
if first_row:
plt.title(title)
plt.figure(figsize=(4, 12))
for i in range(12):
n_col = 4
show_image(test_original[i], n, n_col, i * n_col + 1, first_row=i == 0, title='Cover')
show_image(test_secret[i], n, n_col, i * n_col + 2, first_row=i == 0, title='Secret')
show_image(test_merge[i], n, n_col, i * n_col + 3, first_row=i == 0, title='Merge')
show_image(test_reveal[i], n, n_col, i * n_col + 4, first_row=i == 0, title='Reveal')
plt.savefig('./result_1.jpg')
plt.show()
|
[
"matplotlib.pyplot.title",
"matplotlib.pyplot.subplot",
"PIL.Image.new",
"matplotlib.pyplot.show",
"numpy.random.shuffle",
"matplotlib.pyplot.imshow",
"PIL.Image.open",
"matplotlib.pyplot.figure",
"os.listdir",
"matplotlib.pyplot.savefig"
] |
[((1580, 1608), 'os.listdir', 'os.listdir', (['"""./images_test/"""'], {}), "('./images_test/')\n", (1590, 1608), False, 'import os\n'), ((1722, 1752), 'numpy.random.shuffle', 'np.random.shuffle', (['test_images'], {}), '(test_images)\n', (1739, 1752), True, 'import numpy as np\n'), ((2470, 2497), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(4, 12)'}), '(figsize=(4, 12))\n', (2480, 2497), True, 'import matplotlib.pyplot as plt\n'), ((2936, 2965), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""./result_1.jpg"""'], {}), "('./result_1.jpg')\n", (2947, 2965), True, 'import matplotlib.pyplot as plt\n'), ((2971, 2981), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2979, 2981), True, 'import matplotlib.pyplot as plt\n'), ((626, 665), 'PIL.Image.new', 'Image.new', (['original.mode', 'original.size'], {}), '(original.mode, original.size)\n', (635, 665), False, 'from PIL import Image\n'), ((1159, 1192), 'PIL.Image.new', 'Image.new', (['image.mode', 'image.size'], {}), '(image.mode, image.size)\n', (1168, 1192), False, 'from PIL import Image\n'), ((2265, 2296), 'matplotlib.pyplot.subplot', 'plt.subplot', (['n_rows', 'n_col', 'idx'], {}), '(n_rows, n_col, idx)\n', (2276, 2296), True, 'import matplotlib.pyplot as plt\n'), ((2306, 2321), 'matplotlib.pyplot.imshow', 'plt.imshow', (['img'], {}), '(img)\n', (2316, 2321), True, 'import matplotlib.pyplot as plt\n'), ((2444, 2460), 'matplotlib.pyplot.title', 'plt.title', (['title'], {}), '(title)\n', (2453, 2460), True, 'import matplotlib.pyplot as plt\n'), ((1638, 1677), 'PIL.Image.open', 'Image.open', (["('./images_test/' + imgnames)"], {}), "('./images_test/' + imgnames)\n", (1648, 1677), False, 'from PIL import Image\n')]
|
import os, sys
from PIL import Image, ImageDraw, ImageFont
import random, time
import telebot
from telebot.types import InlineKeyboardMarkup, InlineKeyboardButton
from telebot import types
TELEGRAM_TOKEN = '<KEY>'
bot = telebot.TeleBot(TELEGRAM_TOKEN)
channelId = -1001390673326
user_dict = {}
msgDict = [
'اللَّهُمَّ صَلِّ عَلَى مُحَمَّدٍ وَعَلَى آلِ مُحَمَّدٍ.\nАллоҳумма солли ъалаа муҳаммадив-ва ъалаа аали муҳаммад.',
'صَلَّى اللهُ عَلَى مُحَمَّدٍ.\nСоллаллоҳу ъалаа муҳаммад.',
'صَلَّى اللهُ عَلَيْهِ وَسَلَّمَ.\nСоллаллоҳу ъалайҳи ва саллам.',
'أَللَّهُمَّ صَلِّ وَسَلِّمْ وَبَارِكْ عَلَيْهِ.\nАллоҳумма солли ва саллим ва баарик ъалайҳ.',
'اللَّهُمَّ صَلِّ عَلَى مُحَمَّدٍ وَعَلَى آلِهِ وَسَلِّمْ.\nАллоҳумма солли ъалаа муҳаммадив-ва ъалаа аалиҳий ва саллим.',
'صَلَّى اللهُ وَسَلَّمَ عَلَى نَبِيِّنَا مُحَمَّدٍ وَعَلَى آلِهِ وَأَصْحَابِهِ أَجْمَعِينَ.\nСоллаллоҳу ва саллама ъалаа набиййинаа муҳаммад, ва ъалаа аалиҳий ва асҳаабиҳий ажмаъийн.'
]
msgOne = random.choice(msgDict)
def UImgTextWriter(ext):
IMAGES = [
'juma01.jpg',
'juma02.jpg',
'juma03.jpg',
'juma04.jpg',
'juma05.jpg',
'juma06.jpg',
'juma07.jpg',
'juma08.jpg',
'juma09.jpg',
'juma010.jpg',
'juma011.jpg',
]
try:
img = random.choice(IMAGES)
except:
time.sleep(2)
img = random.choice(IMAGES)
# get an image
base = Image.open(img).convert("RGBA")
ext = ext.upper()
text = ext
# make a blank image for the text, initialized to transparent text color
txt = Image.new("RGBA", base.size, (255,255,255,0))
# get a font
fnt = ImageFont.truetype("OpenSans-Italic.ttf", 40)
# get a drawing context
d = ImageDraw.Draw(txt)
# draw text, half opacity
d.text(((800)/4, 330), text, font=fnt, fill=(231,195,113,255), anchor='mb')
out = Image.alpha_composite(base, txt)
filename = random.randint(30,45)
g = out.save(f'{filename}.png')
return filename
def ImgTextWriter(ext):
IMAGES = [
'juma1.jpg',
'juma2.jpg',
'juma3.jpg',
'juma4.jpg',
'juma5.jpg',
'juma6.jpg',
'juma7.jpg',
'juma8.jpg',
'juma9.jpg',
'juma10.jpg',
'juma11.jpg',
]
try:
img = random.choice(IMAGES)
except:
time.sleep(2)
img = random.choice(IMAGES)
# get an image
base = Image.open(img).convert("RGBA")
ext = ext.upper()
text = ext
# make a blank image for the text, initialized to transparent text color
txt = Image.new("RGBA", base.size, (255,255,255,0))
# get a font
fnt = ImageFont.truetype("OpenSans-Italic.ttf", 40)
# get a drawing context
d = ImageDraw.Draw(txt)
# draw text, half opacity
d.text(((800)/4, 330), text, font=fnt, fill=(231,195,113,255), anchor='mb')
out = Image.alpha_composite(base, txt)
filename = random.randint(1,15)
g = out.save(f'{filename}.png')
return filename
def gen_markup():
markup = InlineKeyboardMarkup()
markup.row_width = 1
markup.add(InlineKeyboardButton("Azo bo'ling", callback_data="cb_yes", url='t.me/onideal'),
InlineKeyboardButton("Tasdiqlash", callback_data="cb_no"))
return markup
def getUserFromChannel(userId):
u = bot.get_chat_member(channelId, userId)
return u.status
@bot.callback_query_handler(func=lambda call: True)
def callback_query(call):
if call.data == "cb_yes":
bot.answer_callback_query(call.id, "Answer is Yes")
elif call.data == "cb_no":
u = getUserFromChannel(call.from_user.id)
if u == 'member':
msg = bot.send_message(call.from_user.id, """\
Juda soz!!!, Do'stingizni ismini yozing
""")
bot.register_next_step_handler(msg, process_name_step)
else:
bot.send_message(call.from_user.id, f"Salom {call.from_user.first_name}, Kanalimizga a'zo bo'ling va A'zolikni tekshirish buyrug'ini tanlang", reply_markup=gen_markup())
def process_name_step(message):
try:
name = message.text
name = name.upper()
myfile = ImgTextWriter(name)
photoSend = open(f'{myfile}.png', 'rb')
caption = f"{name} : <NAME> muborak aziz dindoshim🕌🌙\
\nSizni Sayyid-ul Ayyom bilan qutlayman🌙\n{msgOne}\
\nO'zingiz yaxshi ko'rgan, jannatda xam birga bo'lishni istagan insonlaringizni O'z ismimlari bilan tabriklang. \n@JumaTabriklarbot"
bot.send_photo(message.chat.id, photoSend, caption=caption)
except Exception as e:
bot.reply_to(message, 'oooops')
def process_uname_step(message):
try:
name = message.text
name = name.upper()
myfile = UImgTextWriter(name)
photoSend = open(f'{myfile}.png', 'rb')
caption = f"Juma Ayyom muborak aziz dindoshlarim🕌🌙\
\nSizni Sayyid-ul Ayyom bilan qutlayman🌙,\n{msgOne}\
\nO'zingiz yaxshi ko'rgan, jannatda xam birga bo'lishni istagan insonlaringizga yuboring \n@JumaTabriklarbot"
bot.send_photo(message.chat.id, photoSend, caption=caption)
except Exception as e:
bot.reply_to(message, 'oooops')
@bot.message_handler(commands=['start','help'])
def start(message):
us = getUserFromChannel(message.chat.id)
if us == 'member':
markup = types.ReplyKeyboardMarkup(resize_keyboard=True, row_width=2)
btn1 = types.KeyboardButton("Do'stimga")
btn2 = types.KeyboardButton("O'zimga")
markup.add(btn1, btn2)
bot.send_message(message.chat.id, "Ass<NAME> Do'stim", reply_markup=markup)
else:
bot.send_message(message.chat.id, f"Salom {message.from_user.first_name}, Kanalimizga a'zo bo'ling va A'zolikni tekshirish buyrug'ini tasdiqlang", reply_markup=gen_markup())
@bot.message_handler(func=lambda message: True)
def message_handler(message):
us = getUserFromChannel(message.chat.id)
if us == 'member':
msg = bot.send_message(message.chat.id, """\
Juda soz!!!, Do'stingizni ismini yozing. \nYoki /start /help ni bosing
""")
if message.text == "Do'stimga":
bot.register_next_step_handler(msg, process_name_step)
elif message.text == "O'zimga":
bot.register_next_step_handler(msg, process_uname_step)
else:
bot.send_message(message.chat.id, f"Salom {message.from_user.first_name}, kanallarga a'zo bo'ling va A'zolikni tekshirish buyrug'ini tanlang", reply_markup=gen_markup())
bot.polling(none_stop=True)
|
[
"PIL.Image.new",
"random.randint",
"telebot.types.InlineKeyboardButton",
"telebot.types.ReplyKeyboardMarkup",
"telebot.types.KeyboardButton",
"random.choice",
"time.sleep",
"PIL.ImageFont.truetype",
"PIL.Image.open",
"PIL.Image.alpha_composite",
"telebot.types.InlineKeyboardMarkup",
"PIL.ImageDraw.Draw",
"telebot.TeleBot"
] |
[((231, 262), 'telebot.TeleBot', 'telebot.TeleBot', (['TELEGRAM_TOKEN'], {}), '(TELEGRAM_TOKEN)\n', (246, 262), False, 'import telebot\n'), ((1013, 1035), 'random.choice', 'random.choice', (['msgDict'], {}), '(msgDict)\n', (1026, 1035), False, 'import random, time\n'), ((1654, 1702), 'PIL.Image.new', 'Image.new', (['"""RGBA"""', 'base.size', '(255, 255, 255, 0)'], {}), "('RGBA', base.size, (255, 255, 255, 0))\n", (1663, 1702), False, 'from PIL import Image, ImageDraw, ImageFont\n'), ((1729, 1774), 'PIL.ImageFont.truetype', 'ImageFont.truetype', (['"""OpenSans-Italic.ttf"""', '(40)'], {}), "('OpenSans-Italic.ttf', 40)\n", (1747, 1774), False, 'from PIL import Image, ImageDraw, ImageFont\n'), ((1813, 1832), 'PIL.ImageDraw.Draw', 'ImageDraw.Draw', (['txt'], {}), '(txt)\n', (1827, 1832), False, 'from PIL import Image, ImageDraw, ImageFont\n'), ((1960, 1992), 'PIL.Image.alpha_composite', 'Image.alpha_composite', (['base', 'txt'], {}), '(base, txt)\n', (1981, 1992), False, 'from PIL import Image, ImageDraw, ImageFont\n'), ((2015, 2037), 'random.randint', 'random.randint', (['(30)', '(45)'], {}), '(30, 45)\n', (2029, 2037), False, 'import random, time\n'), ((2701, 2749), 'PIL.Image.new', 'Image.new', (['"""RGBA"""', 'base.size', '(255, 255, 255, 0)'], {}), "('RGBA', base.size, (255, 255, 255, 0))\n", (2710, 2749), False, 'from PIL import Image, ImageDraw, ImageFont\n'), ((2776, 2821), 'PIL.ImageFont.truetype', 'ImageFont.truetype', (['"""OpenSans-Italic.ttf"""', '(40)'], {}), "('OpenSans-Italic.ttf', 40)\n", (2794, 2821), False, 'from PIL import Image, ImageDraw, ImageFont\n'), ((2860, 2879), 'PIL.ImageDraw.Draw', 'ImageDraw.Draw', (['txt'], {}), '(txt)\n', (2874, 2879), False, 'from PIL import Image, ImageDraw, ImageFont\n'), ((3007, 3039), 'PIL.Image.alpha_composite', 'Image.alpha_composite', (['base', 'txt'], {}), '(base, txt)\n', (3028, 3039), False, 'from PIL import Image, ImageDraw, ImageFont\n'), ((3062, 3083), 'random.randint', 'random.randint', (['(1)', '(15)'], {}), '(1, 15)\n', (3076, 3083), False, 'import random, time\n'), ((3176, 3198), 'telebot.types.InlineKeyboardMarkup', 'InlineKeyboardMarkup', ([], {}), '()\n', (3196, 3198), False, 'from telebot.types import InlineKeyboardMarkup, InlineKeyboardButton\n'), ((1367, 1388), 'random.choice', 'random.choice', (['IMAGES'], {}), '(IMAGES)\n', (1380, 1388), False, 'import random, time\n'), ((2414, 2435), 'random.choice', 'random.choice', (['IMAGES'], {}), '(IMAGES)\n', (2427, 2435), False, 'import random, time\n'), ((3241, 3320), 'telebot.types.InlineKeyboardButton', 'InlineKeyboardButton', (['"""Azo bo\'ling"""'], {'callback_data': '"""cb_yes"""', 'url': '"""t.me/onideal"""'}), '("Azo bo\'ling", callback_data=\'cb_yes\', url=\'t.me/onideal\')\n', (3261, 3320), False, 'from telebot.types import InlineKeyboardMarkup, InlineKeyboardButton\n'), ((3354, 3411), 'telebot.types.InlineKeyboardButton', 'InlineKeyboardButton', (['"""Tasdiqlash"""'], {'callback_data': '"""cb_no"""'}), "('Tasdiqlash', callback_data='cb_no')\n", (3374, 3411), False, 'from telebot.types import InlineKeyboardMarkup, InlineKeyboardButton\n'), ((5561, 5621), 'telebot.types.ReplyKeyboardMarkup', 'types.ReplyKeyboardMarkup', ([], {'resize_keyboard': '(True)', 'row_width': '(2)'}), '(resize_keyboard=True, row_width=2)\n', (5586, 5621), False, 'from telebot import types\n'), ((5638, 5671), 'telebot.types.KeyboardButton', 'types.KeyboardButton', (['"""Do\'stimga"""'], {}), '("Do\'stimga")\n', (5658, 5671), False, 'from telebot import types\n'), ((5688, 5719), 'telebot.types.KeyboardButton', 'types.KeyboardButton', (['"""O\'zimga"""'], {}), '("O\'zimga")\n', (5708, 5719), False, 'from telebot import types\n'), ((1411, 1424), 'time.sleep', 'time.sleep', (['(2)'], {}), '(2)\n', (1421, 1424), False, 'import random, time\n'), ((1440, 1461), 'random.choice', 'random.choice', (['IMAGES'], {}), '(IMAGES)\n', (1453, 1461), False, 'import random, time\n'), ((1494, 1509), 'PIL.Image.open', 'Image.open', (['img'], {}), '(img)\n', (1504, 1509), False, 'from PIL import Image, ImageDraw, ImageFont\n'), ((2458, 2471), 'time.sleep', 'time.sleep', (['(2)'], {}), '(2)\n', (2468, 2471), False, 'import random, time\n'), ((2487, 2508), 'random.choice', 'random.choice', (['IMAGES'], {}), '(IMAGES)\n', (2500, 2508), False, 'import random, time\n'), ((2541, 2556), 'PIL.Image.open', 'Image.open', (['img'], {}), '(img)\n', (2551, 2556), False, 'from PIL import Image, ImageDraw, ImageFont\n')]
|
def kwargs_remover(f, kwargs, check_list = None, clone = True):
'''Removes all the keys from a kwargs-list, that a given function does not understand.
The keys removed can optionally be restricted, so only keys from check_list are removed.'''
import inspect
if check_list == None: check_list = kwargs.keys()
if clone: kwargs = kwargs.copy()
if not f.func_code.co_flags & 8:
args, varargs, keywords, defaults = getargspec(f)
for c in set(check_list).intersection(kwargs.keys()):
if c not in args:
del kwargs[c]
return kwargs
def getargs(co):
"""Get information about the arguments accepted by a code object.
Three things are returned: (args, varargs, varkw), where 'args' is
a list of argument names (possibly containing nested lists), and
'varargs' and 'varkw' are the names of the * and ** arguments or None."""
import dis
CO_OPTIMIZED, CO_NEWLOCALS, CO_VARARGS, CO_VARKEYWORDS = 0x1, 0x2, 0x4, 0x8
CO_NESTED, CO_GENERATOR, CO_NOFREE = 0x10, 0x20, 0x40
nargs = co.co_argcount
names = co.co_varnames
args = list(names[:nargs])
step = 0
# The following acrobatics are for anonymous (tuple) arguments.
for i in range(nargs):
if args[i][:1] in ('', '.'):
stack, remain, count = [], [], []
while step < len(co.co_code):
op = ord(co.co_code[step])
step = step + 1
if op >= dis.HAVE_ARGUMENT:
opname = dis.opname[op]
value = ord(co.co_code[step]) + ord(co.co_code[step+1])*256
step = step + 2
if opname in ('UNPACK_TUPLE', 'UNPACK_SEQUENCE'):
remain.append(value)
count.append(value)
elif opname == 'STORE_FAST':
stack.append(names[value])
# Special case for sublists of length 1: def foo((bar))
# doesn't generate the UNPACK_TUPLE bytecode, so if
# `remain` is empty here, we have such a sublist.
if not remain:
stack[0] = [stack[0]]
break
else:
remain[-1] = remain[-1] - 1
while remain[-1] == 0:
remain.pop()
size = count.pop()
stack[-size:] = [stack[-size:]]
if not remain: break
remain[-1] = remain[-1] - 1
if not remain: break
args[i] = stack[0]
varargs = None
if co.co_flags & CO_VARARGS:
varargs = co.co_varnames[nargs]
nargs = nargs + 1
varkw = None
if co.co_flags & CO_VARKEYWORDS:
varkw = co.co_varnames[nargs]
return [args, varargs, varkw]
def getargspec(func):
return getargs(func.func_code) + [func.func_defaults if func.func_defaults else []]
def method_signature(f):
'''Returns the method signature for a function.'''
spec = getargspec(f)
args = []
def simple_arg(a):
if isinstance(a, list):
return '(' + ', '.join(map(simple_arg, a)) + ')'
return str(a)
if spec[2] != None:
args.append('**' + spec[2])
if spec[1] != None:
args.append('*' + spec[1])
for n in range(len(spec[0])):
cur = spec[0][len(spec[0])-n-1]
if n < len(spec[3]):
args.append(str(cur) + ' = ' + repr(spec[3][len(spec[3])-n-1]))
else:
args.append(simple_arg(cur))
return f.func_name + '(' + ', '.join(reversed(args)) + ')'
def ewraps(wrapped):
'''Extended version of functools.wraps.
This version also adds the original method signature to the docstring.'''
def deco(wrapper):
import functools
semi_fixed = functools.wraps(wrapped)(wrapper)
if not wrapped.__dict__.get('signature_added', False):
semi_fixed.__doc__ = method_signature(wrapped) + '\n\n' + (semi_fixed.__doc__ or '')
semi_fixed.__dict__['signature_added'] = True
return semi_fixed
return deco
# Copied from <NAME>'s blog:
# http://eli.thegreenplace.net/2009/08/29/co-routines-as-an-alternative-to-state-machines/
def coroutine(func):
def start(*args,**kwargs):
cr = func(*args,**kwargs)
cr.next()
return cr
return start
def memleaker(func):
'''Create an information leak object.'''
import leak
return leak.MemLeak(func)
|
[
"leak.MemLeak",
"functools.wraps"
] |
[((4655, 4673), 'leak.MemLeak', 'leak.MemLeak', (['func'], {}), '(func)\n', (4667, 4673), False, 'import leak\n'), ((4010, 4034), 'functools.wraps', 'functools.wraps', (['wrapped'], {}), '(wrapped)\n', (4025, 4034), False, 'import functools\n')]
|
import datetime
import itertools
from django.contrib.auth import get_user_model
from django.test import TestCase
from django.utils import dateparse, timezone
from furl import furl
from rest_framework.authtoken.models import Token
from rest_framework.test import APIRequestFactory, force_authenticate
from .. import views
from .. import models
class ViewTestCase(TestCase):
fixtures = ['preferences/tests/fixtures/common.yaml']
def setUp(self):
self.factory = APIRequestFactory()
self.get_request = self.factory.get('/')
self.user = get_user_model().objects.get(pk=1)
class ProfileViewTestCase(ViewTestCase):
def setUp(self):
super().setUp()
self.view = views.ProfileView().as_view()
self.expected_display_name = self.user.get_full_name()
def test_anonymous(self):
"""An anonymous user should have is_anonymous set to True."""
response = self.view(self.get_request)
self.assertTrue(response.data['is_anonymous'])
def test_authenticated(self):
"""A non-anonymous user should have is_anonymous set to False and username set."""
force_authenticate(self.get_request, user=self.user)
response = self.view(self.get_request)
self.assertFalse(response.data['is_anonymous'])
self.assertEqual(response.data['username'], self.user.username)
self.assertEqual(response.data['display_name'], self.expected_display_name)
def test_token_authenticated(self):
"""A token-authenticated user should get expected media back."""
token = Token.objects.create(user=self.user)
token_get_request = self.factory.get('/', HTTP_AUTHORIZATION=f'Token {token.key}')
response = self.view(token_get_request)
self.assertFalse(response.data['is_anonymous'])
self.assertEqual(response.data['username'], self.user.username)
self.assertEqual(response.data['display_name'], self.expected_display_name)
def test_last_name_only(self):
"""A last name only user should have that as the display name."""
self.user.first_name = ''
self.user.last_name = 'GHJ'
force_authenticate(self.get_request, user=self.user)
response = self.view(self.get_request)
self.assertEqual(response.data['display_name'], 'GHJ')
def test_first_name_only(self):
"""A first name only user should have that as the display name."""
self.user.first_name = 'GHJ'
self.user.last_name = ''
force_authenticate(self.get_request, user=self.user)
response = self.view(self.get_request)
self.assertEqual(response.data['display_name'], 'GHJ')
def test_no_name(self):
"""A user with no name should fall back to the username as a display name."""
self.user.first_name = ''
self.user.last_name = ''
force_authenticate(self.get_request, user=self.user)
response = self.view(self.get_request)
self.assertEqual(response.data['display_name'], self.user.username)
class PreferenceListViewTestCase(ViewTestCase):
def setUp(self):
super().setUp()
self.view = views.PreferenceListView().as_view()
# Some common querysets
self.all_qs = models.Preference.objects.all()
self.most_recent_qs = models.Preference.objects.all().filter_most_recent_expressed_at()
def test_basic_functionality(self):
"""A basic GET returns all the user preferences."""
results = self._request_all()['results']
self.assertEqual(len(results), self.most_recent_qs.count())
def test_pagination(self):
"""A page size of one takes as many pages as there are results."""
response = self._request_all(data={'page_size': 1})
self.assertGreater(response['page_count'], 1)
self.assertEqual(len(response['results']), response['page_count'])
def test_user_filtering(self):
"""Filtering by user returns correct result."""
user_pref = self.most_recent_qs.filter(user=self.user).first()
self.assertIsNotNone(user_pref)
results = self._request_all({'user': self.user.username})['results']
self.assertEqual(len(results), 1)
self._assert_preference_dict_matches(results[0], user_pref)
def test_ordering_by_expressed_at_descending(self):
"""Ordering by descending expressed_at gives correct result."""
expected_prefs = self.most_recent_qs.order_by('-expressed_at')
results = self._request_all({'ordering': '-expressed_at'})['results']
self.assertEqual(len(results), expected_prefs.count())
for pref_dict, pref in zip(results, expected_prefs):
self._assert_preference_dict_matches(pref_dict, pref)
def test_ordering_by_expressed_at_ascending(self):
"""Ordering by ascending expressed_at gives correct result."""
expected_prefs = self.most_recent_qs.order_by('expressed_at')
results = self._request_all({'ordering': 'expressed_at'})['results']
self.assertEqual(len(results), expected_prefs.count())
for pref_dict, pref in zip(results, expected_prefs):
self._assert_preference_dict_matches(pref_dict, pref)
def test_expressed_at_query(self):
"""Can query list by expressed_at range."""
minimum = self.most_recent_qs.order_by('expressed_at')[0]
maximum = self.most_recent_qs.order_by('-expressed_at')[0]
self.assertGreater(maximum.expressed_at, minimum.expressed_at)
self.assertGreater(self.most_recent_qs.count(), 2)
# Get the expected preferences between lower and upper quartile dates
expected_prefs = self.most_recent_qs.filter(
expressed_at__gt=minimum.expressed_at,
expressed_at__lt=maximum.expressed_at).order_by('-expressed_at')
self.assertTrue(expected_prefs.exists())
# Get list returned by query
results = self._request_all({
'ordering': '-expressed_at',
'expressed_at_after':
(minimum.expressed_at + datetime.timedelta(seconds=0.1)).isoformat(),
'expressed_at_before':
(maximum.expressed_at - datetime.timedelta(seconds=0.1)).isoformat(),
})['results']
self.assertEqual(len(results), expected_prefs.count())
for pref_dict, pref in zip(results, expected_prefs):
self._assert_preference_dict_matches(pref_dict, pref)
def test_creation(self):
"""POST-ing preferences updates preferences for user"""
for allow_capture, request_hold in itertools.product([True, False], [True, False]):
# Update user preference
request = self.factory.post('/', {
'allow_capture': allow_capture, 'request_hold': request_hold
})
force_authenticate(request, user=self.user)
response = self.view(request)
self.assertEqual(response.status_code, 201) # created
# Most recent preference is updated
pref = self.most_recent_qs.filter(user=self.user).first()
self.assertIsNotNone(pref)
self.assertEqual(pref.allow_capture, allow_capture)
self.assertEqual(pref.request_hold, request_hold)
def test_anonymous_creation(self):
"""POST-ing preferences updates preferences for anonymous user fails"""
request = self.factory.post('/', {
'allow_capture': True, 'request_hold': False
})
response = self.view(request)
self.assertEqual(response.status_code, 403) # Forbidden
def test_creation_ignores_expressed_at(self):
"""POST-ing preferences updates preferences for user and ignores any expressed_at"""
# Update user preference
expressed_at_request = timezone.now() - datetime.timedelta(days=34)
request = self.factory.post('/', {
'allow_capture': True, 'request_hold': False,
'expressed_at': expressed_at_request.isoformat()
})
force_authenticate(request, user=self.user)
prev_pref = self.most_recent_qs.filter(user=self.user).first()
response = self.view(request)
self.assertEqual(response.status_code, 201) # created
pref = self.most_recent_qs.filter(user=self.user).first()
self.assertNotEqual(prev_pref.id, pref.id)
self.assertNotEqual(pref.expressed_at, expressed_at_request)
def _assert_preference_dict_matches(self, pref_dict, pref):
"""
Assert that a preference returned from the API matches a database object.
"""
self.assertEqual(pref_dict['user']['username'], pref.user.username)
self.assertEqual(pref_dict['allow_capture'], pref.allow_capture)
self.assertEqual(pref_dict['request_hold'], pref.request_hold)
self.assertEqual(dateparse.parse_datetime(pref_dict['expressed_at']), pref.expressed_at)
def _request_all(self, data=None, page_count_max=20):
"""
Fetch all preference objects from the API. Returns an object of the form
{'results': [...], 'page_count': number }
"""
results = []
page_count = 0
# Use the furl library so that it's easy to merge query arguments in.
url = furl('/')
if data is not None:
url.args.update(data)
while True:
request = self.factory.get(url.url)
response = self.view(request)
self.assertEqual(response.status_code, 200)
page_count += 1
results.extend(response.data['results'])
# We're done if we've run out of pages
if response.data.get('next') is None:
break
# Update the URL from the "next" field in the response
url = furl(response.data.get('next'))
if page_count > page_count_max:
assert False, f'Exceeded maximum page count of {page_count_max}'
return {'results': results, 'page_count': page_count}
|
[
"rest_framework.authtoken.models.Token.objects.create",
"django.utils.timezone.now",
"rest_framework.test.APIRequestFactory",
"furl.furl",
"django.contrib.auth.get_user_model",
"datetime.timedelta",
"rest_framework.test.force_authenticate",
"itertools.product",
"django.utils.dateparse.parse_datetime"
] |
[((480, 499), 'rest_framework.test.APIRequestFactory', 'APIRequestFactory', ([], {}), '()\n', (497, 499), False, 'from rest_framework.test import APIRequestFactory, force_authenticate\n'), ((1142, 1194), 'rest_framework.test.force_authenticate', 'force_authenticate', (['self.get_request'], {'user': 'self.user'}), '(self.get_request, user=self.user)\n', (1160, 1194), False, 'from rest_framework.test import APIRequestFactory, force_authenticate\n'), ((1584, 1620), 'rest_framework.authtoken.models.Token.objects.create', 'Token.objects.create', ([], {'user': 'self.user'}), '(user=self.user)\n', (1604, 1620), False, 'from rest_framework.authtoken.models import Token\n'), ((2160, 2212), 'rest_framework.test.force_authenticate', 'force_authenticate', (['self.get_request'], {'user': 'self.user'}), '(self.get_request, user=self.user)\n', (2178, 2212), False, 'from rest_framework.test import APIRequestFactory, force_authenticate\n'), ((2513, 2565), 'rest_framework.test.force_authenticate', 'force_authenticate', (['self.get_request'], {'user': 'self.user'}), '(self.get_request, user=self.user)\n', (2531, 2565), False, 'from rest_framework.test import APIRequestFactory, force_authenticate\n'), ((2866, 2918), 'rest_framework.test.force_authenticate', 'force_authenticate', (['self.get_request'], {'user': 'self.user'}), '(self.get_request, user=self.user)\n', (2884, 2918), False, 'from rest_framework.test import APIRequestFactory, force_authenticate\n'), ((6586, 6633), 'itertools.product', 'itertools.product', (['[True, False]', '[True, False]'], {}), '([True, False], [True, False])\n', (6603, 6633), False, 'import itertools\n'), ((8028, 8071), 'rest_framework.test.force_authenticate', 'force_authenticate', (['request'], {'user': 'self.user'}), '(request, user=self.user)\n', (8046, 8071), False, 'from rest_framework.test import APIRequestFactory, force_authenticate\n'), ((9272, 9281), 'furl.furl', 'furl', (['"""/"""'], {}), "('/')\n", (9276, 9281), False, 'from furl import furl\n'), ((6823, 6866), 'rest_framework.test.force_authenticate', 'force_authenticate', (['request'], {'user': 'self.user'}), '(request, user=self.user)\n', (6841, 6866), False, 'from rest_framework.test import APIRequestFactory, force_authenticate\n'), ((7802, 7816), 'django.utils.timezone.now', 'timezone.now', ([], {}), '()\n', (7814, 7816), False, 'from django.utils import dateparse, timezone\n'), ((7819, 7846), 'datetime.timedelta', 'datetime.timedelta', ([], {'days': '(34)'}), '(days=34)\n', (7837, 7846), False, 'import datetime\n'), ((8847, 8898), 'django.utils.dateparse.parse_datetime', 'dateparse.parse_datetime', (["pref_dict['expressed_at']"], {}), "(pref_dict['expressed_at'])\n", (8871, 8898), False, 'from django.utils import dateparse, timezone\n'), ((569, 585), 'django.contrib.auth.get_user_model', 'get_user_model', ([], {}), '()\n', (583, 585), False, 'from django.contrib.auth import get_user_model\n'), ((6069, 6100), 'datetime.timedelta', 'datetime.timedelta', ([], {'seconds': '(0.1)'}), '(seconds=0.1)\n', (6087, 6100), False, 'import datetime\n'), ((6190, 6221), 'datetime.timedelta', 'datetime.timedelta', ([], {'seconds': '(0.1)'}), '(seconds=0.1)\n', (6208, 6221), False, 'import datetime\n')]
|
#!/usr/bin/python
import sys
import os
fn = "pileup.txt"
coverage_thresh = 5
if not os.path.isfile(fn):
print("File not found...")
sys.exit()
with open(fn) as fp:
hotspot_count = 0
hotspot_read_count = 0
in_hotspot = False
max_coverage = 0
hotspot_chr = ""
hotspot_start = 0
hotspot_end = 0
for line in fp:
# parse each line (chr, loc, base, coverage, codes, quality)
fields = line.split()
coverage = int(fields[3])
if not in_hotspot :
if coverage > coverage_thresh :
in_hotspot = True
hotspot_chr = str(fields[0])
hotspot_start = str(fields[1])
hotspot_count = hotspot_count + 1
max_coverage = coverage
#print(line)
else :
#print(line)
if coverage > max_coverage:
max_coverage = coverage
if coverage < coverage_thresh and in_hotspot:
#print(max_coverage)
hotspot_read_count = hotspot_read_count + max_coverage
hotspot_end = str(fields[1])
print(hotspot_chr + "\t" + hotspot_start + "\t" + hotspot_end)
max_coverage = 0
in_hotspot = False
#print("Num hotspots: ", hotspot_count)
#print("Hotspot reads: ", hotspot_read_count)
|
[
"os.path.isfile",
"sys.exit"
] |
[((87, 105), 'os.path.isfile', 'os.path.isfile', (['fn'], {}), '(fn)\n', (101, 105), False, 'import os\n'), ((142, 152), 'sys.exit', 'sys.exit', ([], {}), '()\n', (150, 152), False, 'import sys\n')]
|
import numpy as np
import torch
import torch.nn as nn
def conv(in_channels, out_channels, kernel_size, bias=True):
return nn.Conv2d(
in_channels, out_channels, kernel_size,
padding=(kernel_size//2), bias=bias)
class MappingNet(nn.Module):
def __init__(self, opt):
super().__init__()
latent_dim = opt.latent_dim
style_dim = opt.style_size**2
hidden_dim = opt.hidden_dim
depth = opt.depth
layers = []
layers += [nn.Linear(latent_dim, hidden_dim)]
layers += [nn.ReLU()]
for _ in range(depth):
layers += [nn.Linear(hidden_dim, hidden_dim)]
layers += [nn.ReLU()]
layers += [nn.Linear(hidden_dim, style_dim)]
self.net = nn.Sequential(*layers)
def forward(self, z):
out = self.net(z)
return out
class Net(nn.Module):
def __init__(self, opt):
super().__init__()
inp_ch=opt.input_nch
ndf=opt.ndf
out_ch=opt.output_nch
Nr=opt.Nr
num_ups=int(np.log2(opt.up_factor))
need_bias=opt.need_bias
upsample_mode=opt.upsample_mode
layers = [conv(inp_ch, ndf, 3, bias=need_bias),
nn.BatchNorm2d(ndf),
nn.ReLU(True)]
for _ in range(Nr):
layers += [conv(ndf, ndf, 3, bias=need_bias),
nn.BatchNorm2d(ndf),
nn.ReLU(True)]
for _ in range(num_ups):
layers += [nn.Upsample(scale_factor=2, mode=upsample_mode),
conv(ndf, ndf, 3, bias=need_bias),
nn.BatchNorm2d(ndf),
nn.ReLU(True)]
for _ in range(Nr):
layers += [conv(ndf, ndf, 3, bias=need_bias),
nn.BatchNorm2d(ndf),
nn.ReLU(True)]
layers += [conv(ndf, out_ch, 3, bias=need_bias)]
self.net = nn.Sequential(*layers)
def forward(self, z, s=None):
out = self.net(z)
return out
|
[
"torch.nn.ReLU",
"torch.nn.Sequential",
"numpy.log2",
"torch.nn.Conv2d",
"torch.nn.Upsample",
"torch.nn.BatchNorm2d",
"torch.nn.Linear"
] |
[((129, 219), 'torch.nn.Conv2d', 'nn.Conv2d', (['in_channels', 'out_channels', 'kernel_size'], {'padding': '(kernel_size // 2)', 'bias': 'bias'}), '(in_channels, out_channels, kernel_size, padding=kernel_size // 2,\n bias=bias)\n', (138, 219), True, 'import torch.nn as nn\n'), ((782, 804), 'torch.nn.Sequential', 'nn.Sequential', (['*layers'], {}), '(*layers)\n', (795, 804), True, 'import torch.nn as nn\n'), ((2044, 2066), 'torch.nn.Sequential', 'nn.Sequential', (['*layers'], {}), '(*layers)\n', (2057, 2066), True, 'import torch.nn as nn\n'), ((513, 546), 'torch.nn.Linear', 'nn.Linear', (['latent_dim', 'hidden_dim'], {}), '(latent_dim, hidden_dim)\n', (522, 546), True, 'import torch.nn as nn\n'), ((567, 576), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (574, 576), True, 'import torch.nn as nn\n'), ((720, 752), 'torch.nn.Linear', 'nn.Linear', (['hidden_dim', 'style_dim'], {}), '(hidden_dim, style_dim)\n', (729, 752), True, 'import torch.nn as nn\n'), ((1083, 1105), 'numpy.log2', 'np.log2', (['opt.up_factor'], {}), '(opt.up_factor)\n', (1090, 1105), True, 'import numpy as np\n'), ((1263, 1282), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['ndf'], {}), '(ndf)\n', (1277, 1282), True, 'import torch.nn as nn\n'), ((1302, 1315), 'torch.nn.ReLU', 'nn.ReLU', (['(True)'], {}), '(True)\n', (1309, 1315), True, 'import torch.nn as nn\n'), ((632, 665), 'torch.nn.Linear', 'nn.Linear', (['hidden_dim', 'hidden_dim'], {}), '(hidden_dim, hidden_dim)\n', (641, 665), True, 'import torch.nn as nn\n'), ((690, 699), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (697, 699), True, 'import torch.nn as nn\n'), ((1435, 1454), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['ndf'], {}), '(ndf)\n', (1449, 1454), True, 'import torch.nn as nn\n'), ((1479, 1492), 'torch.nn.ReLU', 'nn.ReLU', (['(True)'], {}), '(True)\n', (1486, 1492), True, 'import torch.nn as nn\n'), ((1551, 1598), 'torch.nn.Upsample', 'nn.Upsample', ([], {'scale_factor': '(2)', 'mode': 'upsample_mode'}), '(scale_factor=2, mode=upsample_mode)\n', (1562, 1598), True, 'import torch.nn as nn\n'), ((1722, 1741), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['ndf'], {}), '(ndf)\n', (1736, 1741), True, 'import torch.nn as nn\n'), ((1766, 1779), 'torch.nn.ReLU', 'nn.ReLU', (['(True)'], {}), '(True)\n', (1773, 1779), True, 'import torch.nn as nn\n'), ((1902, 1921), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['ndf'], {}), '(ndf)\n', (1916, 1921), True, 'import torch.nn as nn\n'), ((1950, 1963), 'torch.nn.ReLU', 'nn.ReLU', (['(True)'], {}), '(True)\n', (1957, 1963), True, 'import torch.nn as nn\n')]
|
# Generated by Django 4.0 on 2021-12-22 04:00
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('practice_app', '0009_alter_museumapicsv_additionalimages_and_more'),
]
operations = [
migrations.AlterField(
model_name='museumapicsv',
name='accessionNumber',
field=models.CharField(max_length=50),
),
]
|
[
"django.db.models.CharField"
] |
[((380, 411), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(50)'}), '(max_length=50)\n', (396, 411), False, 'from django.db import migrations, models\n')]
|
#!usr/bin/env python
# -*-coding:utf8-*-
from bank import Bank
from bank import Account
import socket
import time
from server import Server
from server_logger import log
__doc__ = """
* This module provide bcs_server class to access the bcs server.
* This extends the Server class.
"""
class BcsServer(Server):
def __init__(self):
super(BcsServer, self).__init__()
Bank()
def start(self, conn, addr):
self.respond(conn, "connecting...", str("type=valid"))
log.info('Session started with %s' % addr)
login_params = None
debug = None
try:
while True:
request_message, request_params = self.receive(conn, addr)
# Get response message and parameters
response_params = None
response_msg = None
debug = False
log.info('Request from %s - %s' % (addr, request_message))
if request_message == "authenticate":
login_params = request_params
response_msg, account_type = Bank().login(
request_params['email'],
request_params['password'])
response_params = str("type=" + account_type)
elif request_message == "logout":
response_msg = "Logout Successful"
del Bank().logged_ins[login_params['email']]
else:
response_msg, response_params, debug = self.bank_operation(
request_message,
request_params)
# Respond to client
self.respond(conn, response_msg, response_params)
if debug:
log.debug('Response to %s - %s' % (addr, response_msg))
log.info('Passbook sent to %s' % (addr))
else:
log.info('Response to %s - %s' % (addr, response_msg))
# Close connection if authentication failed or logout
if ("Login Unsuccessful" in response_msg or
response_msg == "Logout Successful"):
conn.close()
break
except Exception as e:
if login_params['email'] in Bank().logged_ins:
del Bank().logged_ins[login_params['email']]
log.error(e)
log.error('Error after menu ' + str(addr))
finally:
self.count -= 1
conn.close()
def bank_operation(self, request_message, request_params):
response_msg = None
response_params = None
debug = False
if request_message == "addAccount":
response_msg = Bank().addAccount(Account(request_params['name'],
request_params['email'],
request_params['password'],
request_params['type']))
elif request_message == "deleteAccount":
response_msg = Bank().deleteAccount(request_params['email'])
elif request_message == "changePassword":
response_msg = Bank().changePassword(request_params['email'],
request_params['password'])
elif request_message == "withdraw":
log.debug('withDraw: %s' % str(request_params))
response_msg = Bank().withDraw(request_params['email'],
request_params['amount'])
elif request_message == "deposit":
response_msg = Bank().deposit(request_params['email'],
request_params['amount'])
elif request_message == "getPassbook":
response_msg = Bank().getPassbook(request_params['email'])
debug = True
return response_msg, response_params, debug
if __name__ == '__main__':
server_app = BcsServer()
try:
server_app.listen()
except KeyboardInterrupt:
log.info('Keyboard Interupt, Shutting down the server')
|
[
"server_logger.log.debug",
"bank.Account",
"server_logger.log.info",
"bank.Bank",
"server_logger.log.error"
] |
[((398, 404), 'bank.Bank', 'Bank', ([], {}), '()\n', (402, 404), False, 'from bank import Bank\n'), ((510, 552), 'server_logger.log.info', 'log.info', (["('Session started with %s' % addr)"], {}), "('Session started with %s' % addr)\n", (518, 552), False, 'from server_logger import log\n'), ((4208, 4263), 'server_logger.log.info', 'log.info', (['"""Keyboard Interupt, Shutting down the server"""'], {}), "('Keyboard Interupt, Shutting down the server')\n", (4216, 4263), False, 'from server_logger import log\n'), ((889, 948), 'server_logger.log.info', 'log.info', (["('Request from %s - %s' % (addr, request_message))"], {}), "('Request from %s - %s' % (addr, request_message))\n", (897, 948), False, 'from server_logger import log\n'), ((2522, 2534), 'server_logger.log.error', 'log.error', (['e'], {}), '(e)\n', (2531, 2534), False, 'from server_logger import log\n'), ((2894, 3007), 'bank.Account', 'Account', (["request_params['name']", "request_params['email']", "request_params['password']", "request_params['type']"], {}), "(request_params['name'], request_params['email'], request_params[\n 'password'], request_params['type'])\n", (2901, 3007), False, 'from bank import Account\n'), ((1890, 1945), 'server_logger.log.debug', 'log.debug', (["('Response to %s - %s' % (addr, response_msg))"], {}), "('Response to %s - %s' % (addr, response_msg))\n", (1899, 1945), False, 'from server_logger import log\n'), ((1966, 2004), 'server_logger.log.info', 'log.info', (["('Passbook sent to %s' % addr)"], {}), "('Passbook sent to %s' % addr)\n", (1974, 2004), False, 'from server_logger import log\n'), ((2053, 2107), 'server_logger.log.info', 'log.info', (["('Response to %s - %s' % (addr, response_msg))"], {}), "('Response to %s - %s' % (addr, response_msg))\n", (2061, 2107), False, 'from server_logger import log\n'), ((2876, 2882), 'bank.Bank', 'Bank', ([], {}), '()\n', (2880, 2882), False, 'from bank import Bank\n'), ((2430, 2436), 'bank.Bank', 'Bank', ([], {}), '()\n', (2434, 2436), False, 'from bank import Bank\n'), ((3215, 3221), 'bank.Bank', 'Bank', ([], {}), '()\n', (3219, 3221), False, 'from bank import Bank\n'), ((1102, 1108), 'bank.Bank', 'Bank', ([], {}), '()\n', (1106, 1108), False, 'from bank import Bank\n'), ((2469, 2475), 'bank.Bank', 'Bank', ([], {}), '()\n', (2473, 2475), False, 'from bank import Bank\n'), ((3338, 3344), 'bank.Bank', 'Bank', ([], {}), '()\n', (3342, 3344), False, 'from bank import Bank\n'), ((1462, 1468), 'bank.Bank', 'Bank', ([], {}), '()\n', (1466, 1468), False, 'from bank import Bank\n'), ((3593, 3599), 'bank.Bank', 'Bank', ([], {}), '()\n', (3597, 3599), False, 'from bank import Bank\n'), ((3773, 3779), 'bank.Bank', 'Bank', ([], {}), '()\n', (3777, 3779), False, 'from bank import Bank\n'), ((3955, 3961), 'bank.Bank', 'Bank', ([], {}), '()\n', (3959, 3961), False, 'from bank import Bank\n')]
|
""" This script uploads created music files in directories to youtube music library """
import time
from watchdog.observers import Observer
from watchdog.events import PatternMatchingEventHandler
from ytmusicapi import YTMusic
import music_tag
from datetime import date
import os
directories = ["D:\Kwan\Desktop", "D:\Kwan\Music"]
# torrent folder is not allowed due to slow download (> 60 s)
fill_empty_tag = True # Fill empty music tags or not
ytmusic = YTMusic("ytmusic_auth.json") # Authentication file
filetypes = [".mp3", "flac", ".wma", ".m4a", ".ogg"] # only last four elements
def set_tag(fn):
""" This function sets music tags if empty """
f = music_tag.load_file(fn)
title = os.path.splitext(os.path.basename(fn))[0]
title = title.split("-",1) # Assumes 'artist - song name' format
if f["year"].value == 0:
f["year"] = int(date.today().strftime("%Y"))
if f["title"].value == "":
f["title"] = title[-1]
if f["artist"].value == "":
f["artist"] = title[0]
f.save()
def on_created(event):
""" This function gets executed when a file is created in directories being monitored """
fn = event.src_path
print(f"fn is {fn} and extension is {fn[-4:]}")
if fn[-4:] in filetypes:
time.sleep(30) # Wait until download is done
try:
if fill_empty_tag:
set_tag(fn)
ytmusic.upload_song(fn)
except:
print("File does not exist")
pass
if __name__ == "__main__":
patterns = "*"
ignore_patterns = ""
ignore_directories = False
case_sensitive = True
my_event_handler = PatternMatchingEventHandler(
patterns, ignore_patterns, ignore_directories, case_sensitive
)
my_event_handler.on_created = on_created
my_observer = Observer()
for path in directories:
my_observer.schedule(my_event_handler, path, recursive=True)
my_observer.start()
try:
while True:
time.sleep(1)
except KeyboardInterrupt:
my_observer.stop()
my_observer.join()
|
[
"music_tag.load_file",
"os.path.basename",
"datetime.date.today",
"time.sleep",
"ytmusicapi.YTMusic",
"watchdog.events.PatternMatchingEventHandler",
"watchdog.observers.Observer"
] |
[((458, 486), 'ytmusicapi.YTMusic', 'YTMusic', (['"""ytmusic_auth.json"""'], {}), "('ytmusic_auth.json')\n", (465, 486), False, 'from ytmusicapi import YTMusic\n'), ((668, 691), 'music_tag.load_file', 'music_tag.load_file', (['fn'], {}), '(fn)\n', (687, 691), False, 'import music_tag\n'), ((1654, 1748), 'watchdog.events.PatternMatchingEventHandler', 'PatternMatchingEventHandler', (['patterns', 'ignore_patterns', 'ignore_directories', 'case_sensitive'], {}), '(patterns, ignore_patterns, ignore_directories,\n case_sensitive)\n', (1681, 1748), False, 'from watchdog.events import PatternMatchingEventHandler\n'), ((1824, 1834), 'watchdog.observers.Observer', 'Observer', ([], {}), '()\n', (1832, 1834), False, 'from watchdog.observers import Observer\n'), ((1272, 1286), 'time.sleep', 'time.sleep', (['(30)'], {}), '(30)\n', (1282, 1286), False, 'import time\n'), ((721, 741), 'os.path.basename', 'os.path.basename', (['fn'], {}), '(fn)\n', (737, 741), False, 'import os\n'), ((2000, 2013), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (2010, 2013), False, 'import time\n'), ((870, 882), 'datetime.date.today', 'date.today', ([], {}), '()\n', (880, 882), False, 'from datetime import date\n')]
|
import numpy as np
from copy import copy
from .utils.thresholdcurator import ThresholdCurator
from .quality_metric import QualityMetric
import spiketoolkit as st
import spikemetrics.metrics as metrics
from spikemetrics.utils import printProgressBar
from spikemetrics.metrics import find_neighboring_channels
from collections import OrderedDict
from sklearn.neighbors import NearestNeighbors
from .parameter_dictionaries import update_all_param_dicts_with_kwargs
class NoiseOverlap(QualityMetric):
installed = True # check at class level if installed or not
installation_mesg = "" # err
params = OrderedDict([('num_channels_to_compare', 13),
('max_spikes_per_unit_for_noise_overlap', 1000),
('num_features', 10),
('num_knn', 6)])
curator_name = "ThresholdNoiseOverlaps"
def __init__(self, metric_data):
QualityMetric.__init__(self, metric_data, metric_name="noise_overlap")
if not metric_data.has_recording():
raise ValueError("MetricData object must have a recording")
def compute_metric(self, num_channels_to_compare, max_spikes_per_unit_for_noise_overlap,
num_features, num_knn, **kwargs):
# Make sure max_spikes_per_unit_for_noise_overlap is not None
assert max_spikes_per_unit_for_noise_overlap is not None, "'max_spikes_per_unit_for_noise_overlap' must be an integer."
# update keyword arg in case it's already specified to something
kwargs['max_spikes_per_unit'] = max_spikes_per_unit_for_noise_overlap
params_dict = update_all_param_dicts_with_kwargs(kwargs)
save_property_or_features = params_dict['save_property_or_features']
seed = params_dict['seed']
# set random seed
if seed is not None:
np.random.seed(seed)
# first, get waveform snippets of every unit (at most n spikes)
# waveforms = List (units,) of np.array (n_spikes, n_channels, n_timepoints)
waveforms = st.postprocessing.get_unit_waveforms(
self._metric_data._recording,
self._metric_data._sorting,
unit_ids=self._metric_data._unit_ids,
**kwargs)
n_waveforms_per_unit = np.array([len(wf) for wf in waveforms])
n_spikes_per_unit = np.array([len(self._metric_data._sorting.get_unit_spike_train(u)) for u in self._metric_data._unit_ids])
if np.all(n_waveforms_per_unit < max_spikes_per_unit_for_noise_overlap):
# in this case it means that waveforms have been computed on
# less spikes than max_spikes_per_unit_for_noise_overlap --> recompute
kwargs['recompute_info'] = True
waveforms = st.postprocessing.get_unit_waveforms(
self._metric_data._recording,
self._metric_data._sorting,
unit_ids = self._metric_data._unit_ids,
# max_spikes_per_unit = max_spikes_per_unit_for_noise_overlap,
**kwargs)
elif np.all(n_waveforms_per_unit >= max_spikes_per_unit_for_noise_overlap):
# waveforms computed on more spikes than needed --> sample
for i_w, wfs in enumerate(waveforms):
if len(wfs) > max_spikes_per_unit_for_noise_overlap:
selecte_idxs = np.random.permutation(len(wfs))[:max_spikes_per_unit_for_noise_overlap]
waveforms[i_w] = wfs[selecte_idxs]
# get channel idx and locations
channel_idx = np.arange(self._metric_data._recording.get_num_channels())
channel_locations = self._metric_data._channel_locations
if num_channels_to_compare > len(channel_idx):
num_channels_to_compare = len(channel_idx)
# get noise snippets
min_time = min([self._metric_data._sorting.get_unit_spike_train(unit_id=unit)[0]
for unit in self._metric_data._sorting.get_unit_ids()])
max_time = max([self._metric_data._sorting.get_unit_spike_train(unit_id=unit)[-1]
for unit in self._metric_data._sorting.get_unit_ids()])
max_spikes = np.max([len(self._metric_data._sorting.get_unit_spike_train(u)) for u in self._metric_data._unit_ids])
if max_spikes < max_spikes_per_unit_for_noise_overlap:
max_spikes_per_unit_for_noise_overlap = max_spikes
times_control = np.random.choice(np.arange(min_time, max_time),
size=max_spikes_per_unit_for_noise_overlap, replace=False)
clip_size = waveforms[0].shape[-1]
# np.array, (n_spikes, n_channels, n_timepoints)
clips_control_max = np.stack(self._metric_data._recording.get_snippets(snippet_len=clip_size,
reference_frames=times_control))
noise_overlaps = []
for i_u, unit in enumerate(self._metric_data._unit_ids):
# show progress bar
if self._metric_data.verbose:
printProgressBar(i_u + 1, len(self._metric_data._unit_ids))
# get spike and noise snippets
# np.array, (n_spikes, n_channels, n_timepoints)
clips = waveforms[i_u]
clips_control = clips_control_max
# make noise snippets size equal to number of spikes
if len(clips) < max_spikes_per_unit_for_noise_overlap:
selected_idxs = np.random.choice(np.arange(max_spikes_per_unit_for_noise_overlap),
size=len(clips), replace=False)
clips_control = clips_control[selected_idxs]
else:
selected_idxs = np.random.choice(np.arange(len(clips)),
size=max_spikes_per_unit_for_noise_overlap,
replace=False)
clips = clips[selected_idxs]
num_clips = len(clips)
# compute weight for correcting noise snippets
template = np.median(clips, axis=0)
chmax, tmax = np.unravel_index(np.argmax(np.abs(template)), template.shape)
max_val = template[chmax, tmax]
weighted_clips_control = np.zeros(clips_control.shape)
weights = np.zeros(num_clips)
for j in range(num_clips):
clip0 = clips_control[j, :, :]
val0 = clip0[chmax, tmax]
weight0 = val0 * max_val
weights[j] = weight0
weighted_clips_control[j, :, :] = clip0 * weight0
noise_template = np.sum(weighted_clips_control, axis=0)
noise_template = noise_template / np.sum(np.abs(noise_template)) * np.sum(np.abs(template))
# subtract it out
for j in range(num_clips):
clips[j, :, :] = _subtract_clip_component(clips[j, :, :], noise_template)
clips_control[j, :, :] = _subtract_clip_component(clips_control[j, :, :], noise_template)
# use only subsets of channels that are closest to peak channel
channels_to_use = find_neighboring_channels(chmax, channel_idx,
num_channels_to_compare, channel_locations)
channels_to_use = np.sort(channels_to_use)
clips = clips[:,channels_to_use,:]
clips_control = clips_control[:,channels_to_use,:]
all_clips = np.concatenate([clips, clips_control], axis=0)
num_channels_wfs = all_clips.shape[1]
num_samples_wfs = all_clips.shape[2]
all_features = _compute_pca_features(all_clips.reshape((num_clips * 2,
num_channels_wfs * num_samples_wfs)), num_features)
num_all_clips=len(all_clips)
distances, indices = NearestNeighbors(n_neighbors=min(num_knn + 1, num_all_clips - 1), algorithm='auto').fit(
all_features.T).kneighbors()
group_id = np.zeros((num_clips * 2))
group_id[0:num_clips] = 1
group_id[num_clips:] = 2
num_match = 0
total = 0
for j in range(num_clips * 2):
for k in range(1, min(num_knn + 1, num_all_clips - 1)):
ind = indices[j][k]
if group_id[j] == group_id[ind]:
num_match = num_match + 1
total = total + 1
pct_match = num_match / total
noise_overlap = 1 - pct_match
noise_overlaps.append(noise_overlap)
noise_overlaps = np.asarray(noise_overlaps)
if save_property_or_features:
self.save_property_or_features(self._metric_data._sorting, noise_overlaps, self._metric_name)
return noise_overlaps
def threshold_metric(self, threshold, threshold_sign, num_channels_to_compare,
max_spikes_per_unit_for_noise_overlap,
num_features, num_knn, **kwargs):
noise_overlaps = self.compute_metric(num_channels_to_compare,
max_spikes_per_unit_for_noise_overlap,
num_features, num_knn, **kwargs)
threshold_curator = ThresholdCurator(sorting=self._metric_data._sorting, metric=noise_overlaps)
threshold_curator.threshold_sorting(threshold=threshold, threshold_sign=threshold_sign)
return threshold_curator
def _compute_pca_features(X, num_components):
u, s, vt = np.linalg.svd(X)
return u[:, :num_components].T
def _subtract_clip_component(clip1, component):
V1 = clip1.flatten()
V2 = component.flatten()
V1 = V1 - np.mean(V1)
V2 = V2 - np.mean(V2)
V1 = V1 - V2 * np.dot(V1, V2) / np.dot(V2, V2)
return V1.reshape(clip1.shape)
|
[
"spikemetrics.metrics.find_neighboring_channels",
"numpy.random.seed",
"numpy.sum",
"numpy.concatenate",
"numpy.abs",
"numpy.median",
"numpy.asarray",
"spiketoolkit.postprocessing.get_unit_waveforms",
"numpy.zeros",
"numpy.sort",
"numpy.linalg.svd",
"numpy.mean",
"numpy.arange",
"collections.OrderedDict",
"numpy.dot",
"numpy.all"
] |
[((611, 754), 'collections.OrderedDict', 'OrderedDict', (["[('num_channels_to_compare', 13), ('max_spikes_per_unit_for_noise_overlap',\n 1000), ('num_features', 10), ('num_knn', 6)]"], {}), "([('num_channels_to_compare', 13), (\n 'max_spikes_per_unit_for_noise_overlap', 1000), ('num_features', 10), (\n 'num_knn', 6)])\n", (622, 754), False, 'from collections import OrderedDict\n'), ((9602, 9618), 'numpy.linalg.svd', 'np.linalg.svd', (['X'], {}), '(X)\n', (9615, 9618), True, 'import numpy as np\n'), ((2048, 2195), 'spiketoolkit.postprocessing.get_unit_waveforms', 'st.postprocessing.get_unit_waveforms', (['self._metric_data._recording', 'self._metric_data._sorting'], {'unit_ids': 'self._metric_data._unit_ids'}), '(self._metric_data._recording, self.\n _metric_data._sorting, unit_ids=self._metric_data._unit_ids, **kwargs)\n', (2084, 2195), True, 'import spiketoolkit as st\n'), ((2457, 2525), 'numpy.all', 'np.all', (['(n_waveforms_per_unit < max_spikes_per_unit_for_noise_overlap)'], {}), '(n_waveforms_per_unit < max_spikes_per_unit_for_noise_overlap)\n', (2463, 2525), True, 'import numpy as np\n'), ((8666, 8692), 'numpy.asarray', 'np.asarray', (['noise_overlaps'], {}), '(noise_overlaps)\n', (8676, 8692), True, 'import numpy as np\n'), ((9772, 9783), 'numpy.mean', 'np.mean', (['V1'], {}), '(V1)\n', (9779, 9783), True, 'import numpy as np\n'), ((9798, 9809), 'numpy.mean', 'np.mean', (['V2'], {}), '(V2)\n', (9805, 9809), True, 'import numpy as np\n'), ((1849, 1869), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (1863, 1869), True, 'import numpy as np\n'), ((2751, 2898), 'spiketoolkit.postprocessing.get_unit_waveforms', 'st.postprocessing.get_unit_waveforms', (['self._metric_data._recording', 'self._metric_data._sorting'], {'unit_ids': 'self._metric_data._unit_ids'}), '(self._metric_data._recording, self.\n _metric_data._sorting, unit_ids=self._metric_data._unit_ids, **kwargs)\n', (2787, 2898), True, 'import spiketoolkit as st\n'), ((3073, 3142), 'numpy.all', 'np.all', (['(n_waveforms_per_unit >= max_spikes_per_unit_for_noise_overlap)'], {}), '(n_waveforms_per_unit >= max_spikes_per_unit_for_noise_overlap)\n', (3079, 3142), True, 'import numpy as np\n'), ((4446, 4475), 'numpy.arange', 'np.arange', (['min_time', 'max_time'], {}), '(min_time, max_time)\n', (4455, 4475), True, 'import numpy as np\n'), ((6082, 6106), 'numpy.median', 'np.median', (['clips'], {'axis': '(0)'}), '(clips, axis=0)\n', (6091, 6106), True, 'import numpy as np\n'), ((6276, 6305), 'numpy.zeros', 'np.zeros', (['clips_control.shape'], {}), '(clips_control.shape)\n', (6284, 6305), True, 'import numpy as np\n'), ((6328, 6347), 'numpy.zeros', 'np.zeros', (['num_clips'], {}), '(num_clips)\n', (6336, 6347), True, 'import numpy as np\n'), ((6650, 6688), 'numpy.sum', 'np.sum', (['weighted_clips_control'], {'axis': '(0)'}), '(weighted_clips_control, axis=0)\n', (6656, 6688), True, 'import numpy as np\n'), ((7166, 7259), 'spikemetrics.metrics.find_neighboring_channels', 'find_neighboring_channels', (['chmax', 'channel_idx', 'num_channels_to_compare', 'channel_locations'], {}), '(chmax, channel_idx, num_channels_to_compare,\n channel_locations)\n', (7191, 7259), False, 'from spikemetrics.metrics import find_neighboring_channels\n'), ((7322, 7346), 'numpy.sort', 'np.sort', (['channels_to_use'], {}), '(channels_to_use)\n', (7329, 7346), True, 'import numpy as np\n'), ((7482, 7528), 'numpy.concatenate', 'np.concatenate', (['[clips, clips_control]'], {'axis': '(0)'}), '([clips, clips_control], axis=0)\n', (7496, 7528), True, 'import numpy as np\n'), ((8063, 8086), 'numpy.zeros', 'np.zeros', (['(num_clips * 2)'], {}), '(num_clips * 2)\n', (8071, 8086), True, 'import numpy as np\n'), ((9846, 9860), 'numpy.dot', 'np.dot', (['V2', 'V2'], {}), '(V2, V2)\n', (9852, 9860), True, 'import numpy as np\n'), ((9829, 9843), 'numpy.dot', 'np.dot', (['V1', 'V2'], {}), '(V1, V2)\n', (9835, 9843), True, 'import numpy as np\n'), ((5482, 5530), 'numpy.arange', 'np.arange', (['max_spikes_per_unit_for_noise_overlap'], {}), '(max_spikes_per_unit_for_noise_overlap)\n', (5491, 5530), True, 'import numpy as np\n'), ((6160, 6176), 'numpy.abs', 'np.abs', (['template'], {}), '(template)\n', (6166, 6176), True, 'import numpy as np\n'), ((6775, 6791), 'numpy.abs', 'np.abs', (['template'], {}), '(template)\n', (6781, 6791), True, 'import numpy as np\n'), ((6742, 6764), 'numpy.abs', 'np.abs', (['noise_template'], {}), '(noise_template)\n', (6748, 6764), True, 'import numpy as np\n')]
|
"""
FLAME - Fuzzy clustering by Local Approximation of MEmbership
"""
from __future__ import print_function
import numpy as np
from scipy import sparse
from scipy.sparse import csr_matrix, lil_matrix
from sklearn.base import BaseEstimator, ClusterMixin
from sklearn.utils import check_array
from sklearn.metrics.pairwise import pairwise_distances
from sklearn.preprocessing import normalize
from math import sqrt
# __author__ = "<NAME>"
'''
IMPORTANT!!!
I DID NOT DO THIS!! CREDIT GOES TO Matthew Billson github link: https://github.com/yclicc/FLAME-python
'''
class FLAME(BaseEstimator, ClusterMixin):
def __init__(self, metric="euclidean", cluster_neighbors=5, iteration_neighbors=5, max_iter=np.inf, eps=1e-10, thd=-2, verbose=0):
self.metric = metric
self.cluster_neighbors = cluster_neighbors
self.iteration_neighbors = iteration_neighbors
self.max_iter = max_iter
self.eps = eps
self.thd = thd
self.verbose = verbose
def _get_nearest(self, distances, n_neighbors, n_samples):
# Make a numpy arange for iteration purposes.
sample_range = np.arange(n_samples)[:, None]
# Do an introsort on each row of the distances matrix to put the nth smallest distance in the nth position and all
# smaller elements before it. Then keep only the first n+1 elements (including the element itself which will have
# distance 0 from itself and is removed later).
nearest_np = np.argpartition(distances, n_neighbors, axis=1)
nearest_np = nearest_np[:, :n_neighbors + 1]
# Find the largest distance of the kth closest points.
largest_distance = distances[sample_range, nearest_np[sample_range, -1]]
# Make two arrays of sets the first containing only the n nearest other elements to each element not
# including the element itself and the second containing the same plus any other elements tied for nth nearest
# again excluding the element itself (though if there are k other elements all 0 distance away other problems
# will result).
nearest = []
nearest_with_ties = []
for i in range(n_samples):
ties_for_largest_distance = np.where(distances[i] == largest_distance[i])
nearest.append(set(nearest_np[i, :].tolist()))
print(nearest)
print(i)
print(nearest_np[i])
nearest[-1].remove(i)
ties_for_largest_distance = set(ties_for_largest_distance[0].tolist())
ties_for_largest_distance.discard(i)
nearest_with_ties.append(nearest[i] | ties_for_largest_distance)
return nearest, nearest_with_ties
def _get_densities(self, distances, nearest, n_samples):
# Make a numpy arange for iteration purposes.
sample_range = np.arange(n_samples)[:, None]
nearest_np = np.array([list(s) for s in nearest])
n_shortest_distances = distances[sample_range, nearest_np]
local_distance_sums = n_shortest_distances.sum(axis=1)
largest_local_sum = local_distance_sums.max(axis=0)
densities = np.asarray(largest_local_sum / local_distance_sums)
return densities
def _get_supports(self, densities, nearest_with_ties, n_samples):
density_sum = densities.sum()
density_mean = density_sum / n_samples
density_sum2 = (densities * densities).sum()
thd = density_mean + self.thd * sqrt(density_sum2 / n_samples - density_mean * density_mean)
csos = []
outliers = []
remaining = []
for i in range(n_samples):
if densities[i] < thd:
outliers.append(i)
elif densities[i] > densities[list(nearest_with_ties[i])].max():
csos.append(i)
else:
remaining.append(i)
return csos, outliers, remaining
def _get_weights(self, distances, nearest_with_ties, fixed, n_samples):
nearest_with_ties = [sorted(list(s)) for s in nearest_with_ties]
weights = lil_matrix((n_samples, n_samples))
for i in range(n_samples):
if i in fixed:
weights[i, i] = 1
else:
for j in nearest_with_ties[i]:
weights[i, j] = distances[i, j]
if self.verbose: print("Assigned weights {0}.".format(i))
weights = weights.tocsr()
weights = normalize(weights, norm='l1', axis=1, copy=False)
return weights
def _get_starting_membership(self, csos, outliers, fixed, n_samples):
M = len(csos) + 1
starting_membership = np.zeros(shape=(n_samples, M))
general_row = np.ndarray(shape=(1, M))
general_row.fill(1. / M)
for i in range(n_samples):
if i not in fixed:
starting_membership[i, :] = general_row
for index, value in enumerate(csos):
starting_membership[value, index] = 1
for i in outliers:
starting_membership[i, -1] = 1
return starting_membership
def _flame(self, X):
"""
Pass Numpy or Pandas array of data as X. As metric pass any string as in sklearn.metrics.pairwise.pairwise_distances
or a callable on pairs of members of X. FLAME is computed with n_neighbors until max_iter or convergence up to eps.
thd is the threshold for outliers: Any element which has less than mean(density) + thd * std(density) will be an outlier.
"""
if sparse.issparse(X) and self.metric not in {"precomputed", "cityblock", "cosine", "euclidean", "l1", "l2",
"manhattan"} and not callable(self.metric):
raise TypeError("The metric {0} does not support sparse data.".format(self.metric))
# Convert pandas objects to numpy arrays.
if 'pandas' in str(X.__class__):
X = X.values
X = check_array(X, accept_sparse="csr", dtype=None)
# Get the number of samples. We use this a lot.
n_samples, _ = X.shape
distances = pairwise_distances(X, metric=self.metric)
nearest, nearest_with_ties = self._get_nearest(distances, self.cluster_neighbors, n_samples)
if self.verbose: print("Got distances and nearest.")
densities = self._get_densities(distances, nearest, n_samples)
if self.verbose: print("Got densities.")
csos, outliers, remaining = self._get_supports(densities, nearest_with_ties, n_samples)
if self.verbose: print("Got suppports.")
if self.verbose: print("There are {0} clusters and {1} outliers.".format(len(csos), len(outliers)))
fixed = set(csos) | set(outliers)
_, nearest_with_ties_for_iteration = self._get_nearest(distances, self.iteration_neighbors, n_samples)
weights = self._get_weights(distances, nearest_with_ties_for_iteration, fixed, n_samples)
if self.verbose: print("Got weights.")
membership_proba = self._get_starting_membership(csos, outliers, fixed, n_samples)
if self.verbose: print("Got starting memberships.")
i = 0
while i < self.max_iter:
lastMembership = membership_proba.copy()
membership_proba = weights.dot(membership_proba)
delta = np.absolute(membership_proba - lastMembership).max()
i += 1
if self.verbose: print("Done iteration {0}.".format(i))
if delta < self.eps:
break
num_clusters = membership_proba.shape[1] - 1
# Get cluster assignment.
pred = np.argmax(membership_proba, axis=1)
# Replace predictions of the outlier group with -1.
pred[pred == num_clusters] = -1
return membership_proba, pred, csos, outliers, densities
def fit(self, X):
self.membership_proba_, self.labels_, self.csos_, self.outliers_, self.densities_ = \
self._flame(X)
return self
def fit_predict(self, X, y=None):
y = self.fit(X).labels_
return y
def fit_predict_proba(self, X, y=None):
y = self.fit(X).membership_proba_
return y
if __name__== "__main__":
X = np.array(
[[0, 0, 0], [1.1, 0, 0], [0, 0.8, 0], [0, 0, 1.3], [10, 10, 10], [11.1, 10, 10], [10, 10.8, 10], [10, 11, 12]])
print(X)
model = FLAME(cluster_neighbors=3, iteration_neighbors=3,verbose=1)
membership = model.fit_predict(X)
print(membership
)
|
[
"numpy.absolute",
"sklearn.metrics.pairwise.pairwise_distances",
"math.sqrt",
"numpy.argmax",
"sklearn.utils.check_array",
"scipy.sparse.issparse",
"numpy.asarray",
"numpy.zeros",
"numpy.argpartition",
"scipy.sparse.lil_matrix",
"numpy.where",
"numpy.array",
"sklearn.preprocessing.normalize",
"numpy.arange",
"numpy.ndarray"
] |
[((7213, 7338), 'numpy.array', 'np.array', (['[[0, 0, 0], [1.1, 0, 0], [0, 0.8, 0], [0, 0, 1.3], [10, 10, 10], [11.1, 10,\n 10], [10, 10.8, 10], [10, 11, 12]]'], {}), '([[0, 0, 0], [1.1, 0, 0], [0, 0.8, 0], [0, 0, 1.3], [10, 10, 10], [\n 11.1, 10, 10], [10, 10.8, 10], [10, 11, 12]])\n', (7221, 7338), True, 'import numpy as np\n'), ((1404, 1451), 'numpy.argpartition', 'np.argpartition', (['distances', 'n_neighbors'], {'axis': '(1)'}), '(distances, n_neighbors, axis=1)\n', (1419, 1451), True, 'import numpy as np\n'), ((2862, 2913), 'numpy.asarray', 'np.asarray', (['(largest_local_sum / local_distance_sums)'], {}), '(largest_local_sum / local_distance_sums)\n', (2872, 2913), True, 'import numpy as np\n'), ((3647, 3681), 'scipy.sparse.lil_matrix', 'lil_matrix', (['(n_samples, n_samples)'], {}), '((n_samples, n_samples))\n', (3657, 3681), False, 'from scipy.sparse import csr_matrix, lil_matrix\n'), ((3933, 3982), 'sklearn.preprocessing.normalize', 'normalize', (['weights'], {'norm': '"""l1"""', 'axis': '(1)', 'copy': '(False)'}), "(weights, norm='l1', axis=1, copy=False)\n", (3942, 3982), False, 'from sklearn.preprocessing import normalize\n'), ((4116, 4146), 'numpy.zeros', 'np.zeros', ([], {'shape': '(n_samples, M)'}), '(shape=(n_samples, M))\n', (4124, 4146), True, 'import numpy as np\n'), ((4163, 4187), 'numpy.ndarray', 'np.ndarray', ([], {'shape': '(1, M)'}), '(shape=(1, M))\n', (4173, 4187), True, 'import numpy as np\n'), ((5228, 5275), 'sklearn.utils.check_array', 'check_array', (['X'], {'accept_sparse': '"""csr"""', 'dtype': 'None'}), "(X, accept_sparse='csr', dtype=None)\n", (5239, 5275), False, 'from sklearn.utils import check_array\n'), ((5365, 5406), 'sklearn.metrics.pairwise.pairwise_distances', 'pairwise_distances', (['X'], {'metric': 'self.metric'}), '(X, metric=self.metric)\n', (5383, 5406), False, 'from sklearn.metrics.pairwise import pairwise_distances\n'), ((6695, 6730), 'numpy.argmax', 'np.argmax', (['membership_proba'], {'axis': '(1)'}), '(membership_proba, axis=1)\n', (6704, 6730), True, 'import numpy as np\n'), ((1076, 1096), 'numpy.arange', 'np.arange', (['n_samples'], {}), '(n_samples)\n', (1085, 1096), True, 'import numpy as np\n'), ((2077, 2122), 'numpy.where', 'np.where', (['(distances[i] == largest_distance[i])'], {}), '(distances[i] == largest_distance[i])\n', (2085, 2122), True, 'import numpy as np\n'), ((2594, 2614), 'numpy.arange', 'np.arange', (['n_samples'], {}), '(n_samples)\n', (2603, 2614), True, 'import numpy as np\n'), ((4875, 4893), 'scipy.sparse.issparse', 'sparse.issparse', (['X'], {}), '(X)\n', (4890, 4893), False, 'from scipy import sparse\n'), ((3155, 3215), 'math.sqrt', 'sqrt', (['(density_sum2 / n_samples - density_mean * density_mean)'], {}), '(density_sum2 / n_samples - density_mean * density_mean)\n', (3159, 3215), False, 'from math import sqrt\n'), ((6455, 6501), 'numpy.absolute', 'np.absolute', (['(membership_proba - lastMembership)'], {}), '(membership_proba - lastMembership)\n', (6466, 6501), True, 'import numpy as np\n')]
|
import json
import os
import subprocess
import time
subid = raw_input('Enter subject id (i.e. s999): ')
training = raw_input('Enter 0 for training, 1 for main tasks: ')
if training == '1':
run_file = 'scanner_tasks_order1'
else:
run_file = 'practice_tasks'
taskset = raw_input('Enter task group (1, 2 or 3): ')
if taskset == '1':
tasks = ['stop_signal','attention_network_task','twobytwo']
elif taskset == '2':
tasks = ['motor_selective_stop_signal', 'stroop', 'discount_fixed']
elif taskset == '3':
tasks = ['dot_pattern_expectancy', 'columbia_card_task_hot',
'ward_and_allport']
else:
raise ValueError('Invalid session number')
print('\n'.join(tasks))
json.dump(tasks, open('temp_tasklist.json','w'))
for task in tasks:
print('***************************************************************')
if os.name == 'posix':
subprocess.call("expfactory --run --folder {0} --battery expfactory-battery/ "
"--experiments {1} --subid {2} &".format(run_file, task, subid), shell=True)
else:
subprocess.call("start expfactory --run --folder {0} --battery expfactory-battery/ "
"--experiments {1} --subid {2}".format(run_file, task, subid), shell=True)
time.sleep(1)
|
[
"time.sleep"
] |
[((1264, 1277), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (1274, 1277), False, 'import time\n')]
|
'''OpenGL extension ARB.sync
This module customises the behaviour of the
OpenGL.raw.GL.ARB.sync to provide a more
Python-friendly API
Overview (from the spec)
This extension introduces the concept of "sync objects". Sync
objects are a synchronization primitive - a representation of events
whose completion status can be tested or waited upon. One specific
type of sync object, the "fence sync object", is supported in this
extension, and additional types can easily be added in the future.
Fence sync objects have corresponding fences, which are inserted
into the OpenGL command stream at the time the sync object is
created. A sync object can be queried for a given condition. The
only condition supported for fence sync objects is completion of the
corresponding fence command. Fence completion allows applications to
request a partial Finish, wherein all commands prior to the fence
will be forced to complete before control is returned to the calling
process.
These new mechanisms allow for synchronization between the host CPU
and the GPU, which may be accessing the same resources (typically
memory), as well as between multiple GL contexts bound to multiple
threads in the host CPU.
The official definition of this extension is available here:
http://www.opengl.org/registry/specs/ARB/sync.txt
'''
from OpenGL import platform, constants, constant, arrays
from OpenGL import extensions, wrapper
from OpenGL.GL import glget
import ctypes
from OpenGL.raw.GL.ARB.sync import *
### END AUTOGENERATED SECTION
from OpenGL.constants import GLint
from OpenGL.arrays import GLintArray
from OpenGL.lazywrapper import lazy
def glGetSync( sync, pname, bufSize=1,length=None,values=None ):
"""Wrapper around glGetSynciv that auto-allocates buffers
sync -- the GLsync struct pointer (see glGetSynciv)
pname -- constant to retrieve (see glGetSynciv)
bufSize -- defaults to 1, maximum number of items to retrieve,
currently all constants are defined to return a single
value
length -- None or a GLint() instance (ONLY!), must be a byref()
capable object with a .value attribute which retrieves the
set value
values -- None or an array object, if None, will be a default
return-array-type of length bufSize
returns values[:length.value], i.e. an array with the values set
by the call, currently always a single-value array.
"""
if values is None:
values = GLintArray.zeros( (bufSize,) )
if length is None:
length = GLint()
glGetSynciv( sync, pname, bufSize, length, values )
written = length.value
return values[:written]
|
[
"OpenGL.constants.GLint",
"OpenGL.arrays.GLintArray.zeros"
] |
[((2478, 2506), 'OpenGL.arrays.GLintArray.zeros', 'GLintArray.zeros', (['(bufSize,)'], {}), '((bufSize,))\n', (2494, 2506), False, 'from OpenGL.arrays import GLintArray\n'), ((2549, 2556), 'OpenGL.constants.GLint', 'GLint', ([], {}), '()\n', (2554, 2556), False, 'from OpenGL.constants import GLint\n')]
|
# -*- coding: utf-8 -*-
"""
This module
"""
import attr
import typing
from ..core.model import (
Property, Resource, Tag, GetAtt, TypeHint, TypeCheck,
)
from ..core.constant import AttrMeta
#--- Property declaration ---
#--- Resource declaration ---
@attr.s
class ResourceShare(Resource):
"""
AWS Object Type = "AWS::RAM::ResourceShare"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-ram-resourceshare.html
Property Document:
- ``rp_Name``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-ram-resourceshare.html#cfn-ram-resourceshare-name
- ``p_AllowExternalPrincipals``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-ram-resourceshare.html#cfn-ram-resourceshare-allowexternalprincipals
- ``p_PermissionArns``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-ram-resourceshare.html#cfn-ram-resourceshare-permissionarns
- ``p_Principals``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-ram-resourceshare.html#cfn-ram-resourceshare-principals
- ``p_ResourceArns``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-ram-resourceshare.html#cfn-ram-resourceshare-resourcearns
- ``p_Tags``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-ram-resourceshare.html#cfn-ram-resourceshare-tags
"""
AWS_OBJECT_TYPE = "AWS::RAM::ResourceShare"
rp_Name: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type),
metadata={AttrMeta.PROPERTY_NAME: "Name"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-ram-resourceshare.html#cfn-ram-resourceshare-name"""
p_AllowExternalPrincipals: bool = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(bool)),
metadata={AttrMeta.PROPERTY_NAME: "AllowExternalPrincipals"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-ram-resourceshare.html#cfn-ram-resourceshare-allowexternalprincipals"""
p_PermissionArns: typing.List[TypeHint.intrinsic_str] = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.deep_iterable(member_validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type), iterable_validator=attr.validators.instance_of(list))),
metadata={AttrMeta.PROPERTY_NAME: "PermissionArns"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-ram-resourceshare.html#cfn-ram-resourceshare-permissionarns"""
p_Principals: typing.List[TypeHint.intrinsic_str] = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.deep_iterable(member_validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type), iterable_validator=attr.validators.instance_of(list))),
metadata={AttrMeta.PROPERTY_NAME: "Principals"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-ram-resourceshare.html#cfn-ram-resourceshare-principals"""
p_ResourceArns: typing.List[TypeHint.intrinsic_str] = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.deep_iterable(member_validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type), iterable_validator=attr.validators.instance_of(list))),
metadata={AttrMeta.PROPERTY_NAME: "ResourceArns"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-ram-resourceshare.html#cfn-ram-resourceshare-resourcearns"""
p_Tags: typing.List[typing.Union[Tag, dict]] = attr.ib(
default=None,
converter=Tag.from_list,
validator=attr.validators.optional(attr.validators.deep_iterable(member_validator=attr.validators.instance_of(Tag), iterable_validator=attr.validators.instance_of(list))),
metadata={AttrMeta.PROPERTY_NAME: "Tags"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-ram-resourceshare.html#cfn-ram-resourceshare-tags"""
@property
def rv_Arn(self) -> GetAtt:
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-ram-resourceshare.html#aws-resource-ram-resourceshare-return-values"""
return GetAtt(resource=self, attr_name="Arn")
|
[
"attr.validators.instance_of"
] |
[((1601, 1658), 'attr.validators.instance_of', 'attr.validators.instance_of', (['TypeCheck.intrinsic_str_type'], {}), '(TypeCheck.intrinsic_str_type)\n', (1628, 1658), False, 'import attr\n'), ((1969, 2002), 'attr.validators.instance_of', 'attr.validators.instance_of', (['bool'], {}), '(bool)\n', (1996, 2002), False, 'import attr\n'), ((2421, 2478), 'attr.validators.instance_of', 'attr.validators.instance_of', (['TypeCheck.intrinsic_str_type'], {}), '(TypeCheck.intrinsic_str_type)\n', (2448, 2478), False, 'import attr\n'), ((2499, 2532), 'attr.validators.instance_of', 'attr.validators.instance_of', (['list'], {}), '(list)\n', (2526, 2532), False, 'import attr\n'), ((2930, 2987), 'attr.validators.instance_of', 'attr.validators.instance_of', (['TypeCheck.intrinsic_str_type'], {}), '(TypeCheck.intrinsic_str_type)\n', (2957, 2987), False, 'import attr\n'), ((3008, 3041), 'attr.validators.instance_of', 'attr.validators.instance_of', (['list'], {}), '(list)\n', (3035, 3041), False, 'import attr\n'), ((3433, 3490), 'attr.validators.instance_of', 'attr.validators.instance_of', (['TypeCheck.intrinsic_str_type'], {}), '(TypeCheck.intrinsic_str_type)\n', (3460, 3490), False, 'import attr\n'), ((3511, 3544), 'attr.validators.instance_of', 'attr.validators.instance_of', (['list'], {}), '(list)\n', (3538, 3544), False, 'import attr\n'), ((3966, 3998), 'attr.validators.instance_of', 'attr.validators.instance_of', (['Tag'], {}), '(Tag)\n', (3993, 3998), False, 'import attr\n'), ((4019, 4052), 'attr.validators.instance_of', 'attr.validators.instance_of', (['list'], {}), '(list)\n', (4046, 4052), False, 'import attr\n')]
|
# ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
# <pep8 compliant>
import os
import time
import shutil
import bpy
import mathutils
def write_objc(filepath,context):
out = open(filepath, 'w')
current_scene = bpy.context.scene
objs = current_scene.objects
#i know there has to be an easier way to do this, but i'm too lazy to look it up
for next_obj in objs:
if next_obj.type == 'MESH':
mesh = next_obj
print("Writing Object")
for i in current_scene.objects:
i.select = False #deselect all objects
mesh.select = True
current_scene.objects.active = mesh #set the mesh object to current
bpy.ops.object.mode_set(mode='EDIT') #Operators
bpy.ops.mesh.select_all(action='SELECT')#select all the face/vertex/edge
bpy.ops.mesh.quads_convert_to_tris() #Operators
current_scene.update()
bpy.ops.object.mode_set(mode='OBJECT') # set it in object
mesh = mesh.data
objectname = mesh.name
basename = objectname.capitalize()
out.write('#import "OpenGLCommon.h"\n\n\n')
if len(mesh.uv_textures) > 0:
out.write('static const TexturedVertexData3D %sVertexData[] = {\n' % basename)
#for face in uv: #loop through the faces
uv_layer = mesh.active_uv_texture
for face in mesh.faces:
faceUV = uv_layer.data[face.index]
i=0
for index in face.vertices:
if len(face.vertices) == 3:
vert = mesh.vertices[index]
out.write('\t{/*v:*/{%f, %f, %f}, ' % (vert.co.x, vert.co.y, vert.co.z) )
out.write('/*n:*/{%f, %f, %f}, ' % (vert.normal.x, vert.normal.y, vert.normal.z))
out.write('/*t:*/{%f, %f}' % ( faceUV.uv[i][0], faceUV.uv[i][1] ) )
out.write('},\n')
i+=1
out.write('};\n\n')
elif len(mesh.vertex_colors) > 0:
out.write('static const ColoredVertexData3D %sVertexData[] = {\n' % basename)
color_layer = mesh.active_vertex_color
for face in mesh.faces:
if len(face.vertices) == 3:
faceC = color_layer.data[face.index]
i=0
for index in face.vertices:
vert = mesh.vertices[index]
out.write('\t{/*v:*/{%f, %f, %f}, ' % (vert.co.x, vert.co.y, vert.co.z) )
out.write('/*n:*/{%f, %f, %f}, ' % (vert.normal.x, vert.normal.y, vert.normal.z))
out.write('/*c:*/{%f, %f, %f, %f}' % ( faceC.color1[i], faceC.color2[i], faceC.color3[i], faceC.color4[i]) )
out.write('},\n')
i+=1
out.write('};\n\n')
else:
out.write
out.write('static const VertexData3D %sVertexData[] = {\n' % basename)
for face in mesh.faces:
if len(face.vertices) == 3:
for index in face.vertices:
vert = mesh.vertices[index]
out.write('\t{/*v:*/{%f, %f, %f}, ' % (vert.co.x, vert.co.y, vert.co.z) )
out.write('/*n:*/{%f, %f, %f} ' % (vert.normal.x, vert.normal.y, vert.normal.z))
out.write('},\n')
out.write('};\n\n')
#if editmode: Window.EditMode(1)
out.write('#define k%sNumberOfVertices\t%i\n' % (basename, len(mesh.faces) * 3) )
out.write('// Drawing Code:\n')
out.write('// glEnableClientState(GL_VERTEX_ARRAY);\n')
if len(mesh.uv_textures) > 0:
out.write('// glEnableClientState(GL_TEXTURE_COORD_ARRAY);\n')
elif len(mesh.vertex_colors) > 0:
out.write('// glEnableClientState(GL_COLOR_ARRAY);\n')
out.write('// glEnable(GL_COLOR_MATERIAL)\n')
out.write('// glEnableClientState(GL_NORMAL_ARRAY);\n')
out.write('// glVertexPointer(3, GL_FLOAT, sizeof(')
if len(mesh.uv_textures) > 0:
out.write('TexturedVertexData3D')
elif len(mesh.vertex_colors) > 0:
out.write('ColoredVertexData3D')
else:
out.write('VertexData3D')
out.write('), &%sVertexData[0].vertex);\n' % basename)
out.write('// glNormalPointer(GL_FLOAT, sizeof(')
if len(mesh.uv_textures) > 0:
out.write('TexturedVertexData3D')
elif len(mesh.vertex_colors) > 0:
out.write('ColoredVertexData3D')
else:
out.write('VertexData3D')
out.write('), &%sVertexData[0].normal);\n' % basename)
if len(mesh.uv_textures) > 0:
out.write('// glTexCoordPointer(2, GL_FLOAT, sizeof(TexturedVertexData3D), &%sVertexData[0].texCoord);\n' % basename)
elif len(mesh.vertex_colors) > 0:
out.write('// glColorPointer(4, GL_FLOAT, sizeof(ColoredVertexData3D), &%sVertexData[0].color);\n' % basename)
out.write('// glDrawArrays(GL_TRIANGLES, 0, k%sNumberOfVertices);\n' % basename)
out.write('// glDisableClientState(GL_VERTEX_ARRAY);\n')
if len(mesh.uv_textures) > 0:
out.write('// glDisableClientState(GL_TEXTURE_COORD_ARRAY);\n')
elif len(mesh.vertex_colors) > 0:
out.write('// glDisableClientState(GL_NORMAL_ARRAY);\n')
out.write('// glDisable(GL_COLOR_MATERIAL);\n')
out.write('// glDisableClientState(GL_NORMAL_ARRAY);\n\n\n')
out.close()
def save(operator, context, filepath="",
use_triangles=False,
use_edges=True,
use_normals=False,
use_hq_normals=False,
use_uvs=True,
use_materials=True,
copy_images=False,
use_modifiers=True,
use_rotate_x90=True,
use_blen_objects=True,
group_by_object=False,
group_by_material=False,
keep_vertex_order=False,
use_vertex_groups=False,
use_nurbs=True,
use_selection=True,
use_all_scenes=False,
use_animation=False,
):
write_objc(filepath,context)
return {'FINISHED'}
|
[
"bpy.ops.mesh.select_all",
"bpy.ops.mesh.quads_convert_to_tris",
"bpy.ops.object.mode_set"
] |
[((1420, 1456), 'bpy.ops.object.mode_set', 'bpy.ops.object.mode_set', ([], {'mode': '"""EDIT"""'}), "(mode='EDIT')\n", (1443, 1456), False, 'import bpy\n'), ((1472, 1512), 'bpy.ops.mesh.select_all', 'bpy.ops.mesh.select_all', ([], {'action': '"""SELECT"""'}), "(action='SELECT')\n", (1495, 1512), False, 'import bpy\n'), ((1549, 1585), 'bpy.ops.mesh.quads_convert_to_tris', 'bpy.ops.mesh.quads_convert_to_tris', ([], {}), '()\n', (1583, 1585), False, 'import bpy\n'), ((1628, 1666), 'bpy.ops.object.mode_set', 'bpy.ops.object.mode_set', ([], {'mode': '"""OBJECT"""'}), "(mode='OBJECT')\n", (1651, 1666), False, 'import bpy\n')]
|
import copy
import numpy as np
import tensorflow as tf
from ammf.utils.wavedata.tools.obj_detection import obj_utils
from ammf.utils.wavedata.tools.obj_detection import evaluation
from ammf.core import anchor_projector
from ammf.core import box_3d_encoder
COLOUR_SCHEME_PREDICTIONS = {
"Easy GT": (255, 255, 0), # Yellow
"Medium GT": (255, 128, 0), # Orange
"Hard GT": (255, 0, 0), # Red
"Prediction": (50, 255, 50), # Green
}
def get_gts_based_on_difficulty(dataset, img_idx):
"""Returns lists of ground-truth based on difficulty.
"""
# Get all ground truth labels
all_gt_objs = obj_utils.read_labels(dataset.label_dir, img_idx)
# Filter to dataset classes
gt_objs = dataset.kitti_utils.filter_labels(all_gt_objs)
# Filter objects to desired difficulty
easy_gt_objs = dataset.kitti_utils.filter_labels(
copy.deepcopy(gt_objs), difficulty=0)
medium_gt_objs = dataset.kitti_utils.filter_labels(
copy.deepcopy(gt_objs), difficulty=1)
hard_gt_objs = dataset.kitti_utils.filter_labels(
copy.deepcopy(gt_objs), difficulty=2)
for gt_obj in easy_gt_objs:
gt_obj.type = 'Easy GT'
for gt_obj in medium_gt_objs:
gt_obj.type = 'Medium GT'
for gt_obj in hard_gt_objs:
gt_obj.type = 'Hard GT'
return easy_gt_objs, medium_gt_objs, hard_gt_objs, all_gt_objs
def get_max_ious_3d(all_gt_boxes_3d, pred_boxes_3d):
"""Helper function to calculate 3D IoU for the given predictions.
Args:
all_gt_boxes_3d: A list of the same ground-truth boxes in box_3d
format.
pred_boxes_3d: A list of predictions in box_3d format.
"""
# Only calculate ious if there are predictions
if pred_boxes_3d:
# Convert to iou format
gt_objs_iou_fmt = box_3d_encoder.box_3d_to_3d_iou_format(
all_gt_boxes_3d)
pred_objs_iou_fmt = box_3d_encoder.box_3d_to_3d_iou_format(
pred_boxes_3d)
max_ious_3d = np.zeros(len(all_gt_boxes_3d))
for gt_obj_idx in range(len(all_gt_boxes_3d)):
gt_obj_iou_fmt = gt_objs_iou_fmt[gt_obj_idx]
ious_3d = evaluation.three_d_iou(gt_obj_iou_fmt,
pred_objs_iou_fmt)
max_ious_3d[gt_obj_idx] = np.amax(ious_3d)
else:
# No detections, all ious = 0
max_ious_3d = np.zeros(len(all_gt_boxes_3d))
return max_ious_3d
def tf_project_to_image_space(anchors, calib_p2, image_shape, img_idx):
"""Helper function to convert data to tensors and project
to image space using the tf projection function.
"""
anchors_tensor = tf.convert_to_tensor(anchors, tf.float32)
calib_p2_tensor = tf.convert_to_tensor(calib_p2, tf.float32)
image_shape_tensor = tf.convert_to_tensor(image_shape, tf.float32)
projected_boxes_tensor, _ = \
anchor_projector.tf_project_to_image_space(
anchors_tensor,
calib_p2_tensor,
image_shape_tensor)
sess = tf.Session()
with sess.as_default():
projected_boxes = projected_boxes_tensor.eval()
return projected_boxes
|
[
"ammf.core.box_3d_encoder.box_3d_to_3d_iou_format",
"copy.deepcopy",
"tensorflow.convert_to_tensor",
"ammf.core.anchor_projector.tf_project_to_image_space",
"tensorflow.Session",
"ammf.utils.wavedata.tools.obj_detection.evaluation.three_d_iou",
"numpy.amax",
"ammf.utils.wavedata.tools.obj_detection.obj_utils.read_labels"
] |
[((632, 681), 'ammf.utils.wavedata.tools.obj_detection.obj_utils.read_labels', 'obj_utils.read_labels', (['dataset.label_dir', 'img_idx'], {}), '(dataset.label_dir, img_idx)\n', (653, 681), False, 'from ammf.utils.wavedata.tools.obj_detection import obj_utils\n'), ((2679, 2720), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['anchors', 'tf.float32'], {}), '(anchors, tf.float32)\n', (2699, 2720), True, 'import tensorflow as tf\n'), ((2743, 2785), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['calib_p2', 'tf.float32'], {}), '(calib_p2, tf.float32)\n', (2763, 2785), True, 'import tensorflow as tf\n'), ((2811, 2856), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['image_shape', 'tf.float32'], {}), '(image_shape, tf.float32)\n', (2831, 2856), True, 'import tensorflow as tf\n'), ((2900, 2999), 'ammf.core.anchor_projector.tf_project_to_image_space', 'anchor_projector.tf_project_to_image_space', (['anchors_tensor', 'calib_p2_tensor', 'image_shape_tensor'], {}), '(anchors_tensor, calib_p2_tensor,\n image_shape_tensor)\n', (2942, 2999), False, 'from ammf.core import anchor_projector\n'), ((3044, 3056), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (3054, 3056), True, 'import tensorflow as tf\n'), ((882, 904), 'copy.deepcopy', 'copy.deepcopy', (['gt_objs'], {}), '(gt_objs)\n', (895, 904), False, 'import copy\n'), ((984, 1006), 'copy.deepcopy', 'copy.deepcopy', (['gt_objs'], {}), '(gt_objs)\n', (997, 1006), False, 'import copy\n'), ((1084, 1106), 'copy.deepcopy', 'copy.deepcopy', (['gt_objs'], {}), '(gt_objs)\n', (1097, 1106), False, 'import copy\n'), ((1819, 1874), 'ammf.core.box_3d_encoder.box_3d_to_3d_iou_format', 'box_3d_encoder.box_3d_to_3d_iou_format', (['all_gt_boxes_3d'], {}), '(all_gt_boxes_3d)\n', (1857, 1874), False, 'from ammf.core import box_3d_encoder\n'), ((1916, 1969), 'ammf.core.box_3d_encoder.box_3d_to_3d_iou_format', 'box_3d_encoder.box_3d_to_3d_iou_format', (['pred_boxes_3d'], {}), '(pred_boxes_3d)\n', (1954, 1969), False, 'from ammf.core import box_3d_encoder\n'), ((2173, 2230), 'ammf.utils.wavedata.tools.obj_detection.evaluation.three_d_iou', 'evaluation.three_d_iou', (['gt_obj_iou_fmt', 'pred_objs_iou_fmt'], {}), '(gt_obj_iou_fmt, pred_objs_iou_fmt)\n', (2195, 2230), False, 'from ammf.utils.wavedata.tools.obj_detection import evaluation\n'), ((2315, 2331), 'numpy.amax', 'np.amax', (['ious_3d'], {}), '(ious_3d)\n', (2322, 2331), True, 'import numpy as np\n')]
|
import argparse
import pprint
from colorama import Fore
from classroom_tools import github_utils
parser = argparse.ArgumentParser(
'Create a protected branch to freeze assignment submissions using the latest commit on master')
parser.add_argument(
'--token',
required=True,
help='GitHub personal access token with repo permissions'
)
parser.add_argument(
'--org_name',
required=True,
help='GitHub organization name'
)
parser.add_argument(
'--repo_filter',
required=True,
help='Prefix to filter repositories for a given assignment or exercise'
)
parser.add_argument(
'--branch',
required=True,
help='Name of protected branch'
)
def create_or_update_ref(repo, branch_name):
master_branch = repo.get_branch('master')
try:
branch = repo.get_branch(branch_name)
if branch.protected:
branch.remove_protection()
ref = repo.get_git_ref(f'heads/{branch_name}')
ref.edit(sha=master_branch.commit.sha, force=True)
except:
repo.create_git_ref(f'refs/heads/{branch_name}', sha=master_branch.commit.sha)
def add_push_restrictions(repo, branch_name):
branch = repo.get_branch(branch_name)
branch.edit_protection(
user_push_restrictions=['']
)
def main(args):
print('\n\n' + 'Creating protected branches'.center(80, '='))
args = parser.parse_args(args)
print('Args:\n' + ''.join(f'\t{k}: {v}\n' for k, v in vars(args).items()))
github_utils.verify_token(args.token)
repositories = github_utils.get_students_repositories(
token=args.token,
org_name=args.org_name,
repo_filter=args.repo_filter
)
num_fail = 0
for repo in repositories:
try:
create_or_update_ref(repo=repo, branch_name=args.branch)
add_push_restrictions(repo=repo, branch_name=args.branch)
print(f'{Fore.GREEN}Repo: {repo.full_name}')
except Exception as e:
print(f'{Fore.RED}Repo: {repo.full_name}')
pprint.pprint(vars(repo))
print(f'{Fore.RED}{e}')
num_fail += 1
print('\nSummary:')
print(f'\tTotal number of repositories: {len(repositories)}')
print(f'\tTotal number failed: {num_fail}')
if num_fail > 0:
raise Exception(f'{Fore.RED}Couldn\'t create protected branches')
if __name__ == '__main__':
import sys
main(sys.argv[1:])
|
[
"classroom_tools.github_utils.get_students_repositories",
"classroom_tools.github_utils.verify_token",
"argparse.ArgumentParser"
] |
[((109, 238), 'argparse.ArgumentParser', 'argparse.ArgumentParser', (['"""Create a protected branch to freeze assignment submissions using the latest commit on master"""'], {}), "(\n 'Create a protected branch to freeze assignment submissions using the latest commit on master'\n )\n", (132, 238), False, 'import argparse\n'), ((1473, 1510), 'classroom_tools.github_utils.verify_token', 'github_utils.verify_token', (['args.token'], {}), '(args.token)\n', (1498, 1510), False, 'from classroom_tools import github_utils\n'), ((1530, 1645), 'classroom_tools.github_utils.get_students_repositories', 'github_utils.get_students_repositories', ([], {'token': 'args.token', 'org_name': 'args.org_name', 'repo_filter': 'args.repo_filter'}), '(token=args.token, org_name=args.\n org_name, repo_filter=args.repo_filter)\n', (1568, 1645), False, 'from classroom_tools import github_utils\n')]
|
from datetime import datetime
from pytz import timezone
from elastalert.enhancements import BaseEnhancement
from elastalert.util import ts_to_dt, pretty_ts, elastalert_logger
"""
This Class will convert the incoming Timezone object of UTC offset to Taiwan/India Standard Timezone
"""
class ConvertTzInfo(BaseEnhancement):
# The enhancement is run against every match
# The match is passed to the process function where it can be modified in any way
# ElastAlert will do this for each enhancement linked to a rule
def process(self, match):
elastalert_logger.info("Received UTC Time %s" % (match['@timestamp']))
utc_ts = match['@timestamp']
if not isinstance(utc_ts, datetime):
utc_ts = ts_to_dt(utc_ts)
taipei_tz = timezone('Asia/Taipei')
india_tz = timezone('Asia/Kolkata')
ist_tz = utc_ts.astimezone(india_tz)
tst_tz = utc_ts.astimezone(taipei_tz)
ist_tz_str = pretty_ts(ist_tz, False)
tst_tz_str = pretty_ts(tst_tz, False)
tz_str = ist_tz_str + " Or " + tst_tz_str
match['@timestamp'] = tz_str
|
[
"elastalert.util.pretty_ts",
"elastalert.util.elastalert_logger.info",
"pytz.timezone",
"elastalert.util.ts_to_dt"
] |
[((566, 634), 'elastalert.util.elastalert_logger.info', 'elastalert_logger.info', (["('Received UTC Time %s' % match['@timestamp'])"], {}), "('Received UTC Time %s' % match['@timestamp'])\n", (588, 634), False, 'from elastalert.util import ts_to_dt, pretty_ts, elastalert_logger\n'), ((778, 801), 'pytz.timezone', 'timezone', (['"""Asia/Taipei"""'], {}), "('Asia/Taipei')\n", (786, 801), False, 'from pytz import timezone\n'), ((821, 845), 'pytz.timezone', 'timezone', (['"""Asia/Kolkata"""'], {}), "('Asia/Kolkata')\n", (829, 845), False, 'from pytz import timezone\n'), ((960, 984), 'elastalert.util.pretty_ts', 'pretty_ts', (['ist_tz', '(False)'], {}), '(ist_tz, False)\n', (969, 984), False, 'from elastalert.util import ts_to_dt, pretty_ts, elastalert_logger\n'), ((1006, 1030), 'elastalert.util.pretty_ts', 'pretty_ts', (['tst_tz', '(False)'], {}), '(tst_tz, False)\n', (1015, 1030), False, 'from elastalert.util import ts_to_dt, pretty_ts, elastalert_logger\n'), ((740, 756), 'elastalert.util.ts_to_dt', 'ts_to_dt', (['utc_ts'], {}), '(utc_ts)\n', (748, 756), False, 'from elastalert.util import ts_to_dt, pretty_ts, elastalert_logger\n')]
|
import ipaddress
import socket
import json
from . import ovsdb_query
from .bridge import OvsBridge
from .port import OvsPort
from datetime import datetime, timedelta
from . import ovspy_error
import sys
import time
class OvsClient:
SEND_DEBUG = False
RECV_DEBUG = False
def __init__(self, ovsdb_port, ovsdb_ip="127.0.0.1", timeout=5, buffer_size=4096):
self._ovsdb_ip = ipaddress.ip_address(ovsdb_ip)
self._ovsdb_port = int(ovsdb_port)
self._query_timeout = timeout
self._buffer_size = buffer_size
def _send(self, query):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((str(self._ovsdb_ip), self._ovsdb_port))
if self.SEND_DEBUG:
sys.stderr.write("[SEND] %s\n" % json.dumps(query).encode())
s.send(json.dumps(query).encode())
#s.shutdown(socket.SHUT_RDWR)
buf = bytes()
timeout = datetime.now() + timedelta(seconds=self._query_timeout)
while True:
if datetime.now() >= timeout:
raise ovspy_error.TransactionError("Timeout")
buf += s.recv(self._buffer_size)
try:
query_result = json.loads(buf.decode())
#echo method
#https://tools.ietf.org/html/rfc7047
if "method" in query_result.keys() and query_result["method"] == "echo":
echo_reply= {
"method": "echo",
"params": query_result["params"],
"id": query_result["id"]
}
s.send(json.loads(echo_reply).encode())
buf = bytes()
continue
else:
break
except json.JSONDecodeError:
pass
s.close()
if self.RECV_DEBUG:
sys.stderr.write("[RECV] %s\n" % query_result)
self._check_error(query_result)
return query_result
@staticmethod
def _check_error(query_result_json):
if "result" in query_result_json.keys():
for item in query_result_json["result"]:
if "error" in item.keys():
raise ovspy_error.TransactionError("[QueryError] %s" % item["details"])
elif len(query_result_json["error"]) != 0:
raise ovspy_error.TransactionError("[QueryError] %s" % query_result_json["error"])
#get Open_vSwitch table
def get_ovs_raw(self):
query = ovsdb_query.Generator.get_ovs()
result = self._send(query)
return result
#get id of Open_vSwitch entry from Open_vSwitch table
def get_uuid(self):
return self.get_ovs_raw()["result"][0]["rows"][0]["_uuid"][1]
def get_bridge_raw(self, bridge_id=None):
query = ovsdb_query.Generator.get_bridges()
result = self._send(query)
if bridge_id is None:
return result["result"][0]["rows"]
else:
for br in result["result"][0]["rows"]:
if br['_uuid'][1] == bridge_id:
return br
return None
def get_bridges(self):
bridges = self.get_bridge_raw()
ret = []
for br in bridges:
_br = OvsBridge(br['_uuid'][1])
_br.set_client(self)
ret.append(_br)
return ret
def find_bridge(self, bridge_name):
for br in self.get_bridges():
if br.get_name() == bridge_name:
return br
return None
def find_port(self, port_name):
for p in self.get_port_raw():
if p["name"] == port_name:
return p
return None
def get_port_raw(self, port_id=None):
if port_id is None:
query = ovsdb_query.Generator.get_ports()
result = self._send(query)
return result["result"][0]["rows"]
else:
query = ovsdb_query.Generator.get_port(port_id)
result = self._send(query)
for p in result["result"][0]["rows"]:
if p['_uuid'][1] == port_id:
return p
return None
def add_port_to_bridge(self, bridge, port_name, vlan=None):
bridge_raw = bridge.get_raw()
if bridge_raw is None:
raise ovspy_error.NotFound("bridge is not found")
if self.find_port(port_name) is not None:
raise ovspy_error.Duplicate("port is already exist")
#print(bridge.get_raw())
exist_ports = []
for p in bridge.get_ports():
exist_ports.append(p.get_uuid())
query = ovsdb_query.Generator.add_port(bridge.get_uuid(), exist_ports, port_name, vlan=vlan)
self._send(query)
def del_port_from_bridge(self, bridge, port_name):
target_port = bridge.find_port(port_name)
exist_ports = []
for p in bridge.get_ports():
exist_ports.append(p.get_uuid())
exist_ports = list(set(exist_ports))
if target_port is None:
raise ovspy_error.NotFound("Specified port(%s) is not exist in bridge(%s)." % (port_name, bridge.get_name()))
if target_port.get_uuid() not in exist_ports:
raise ovspy_error.NotFound("Specified port(%s) is not exist in bridge(%s)." % (port_name, bridge.get_name()))
query = ovsdb_query.Generator.del_port(bridge.get_uuid(), exist_ports, target_port.get_uuid())
self._send(query)
def add_bridge(self, bridge_name):
exist_bridges = []
for br in self.get_bridges():
if bridge_name == br.get_name():
raise ovspy_error.Duplicate("Bridge(%s) is already exist." % bridge_name)
exist_bridges.append(br.get_uuid())
exist_bridges = list(set(exist_bridges))
query = ovsdb_query.Generator.add_bridge(self.get_uuid(), bridge_name, exist_bridges)
self._send(query)
def del_bridge(self, bridge_name):
target_bridge = self.find_bridge(bridge_name)
exist_bridges = []
for br in self.get_bridges():
exist_bridges.append(br.get_uuid())
if target_bridge is None:
raise ovspy_error.NotFound("Bridge(%s) is not exist." % bridge_name)
if target_bridge.get_uuid() not in exist_bridges:
raise ovspy_error.NotFound("Bridge(%s) is not exist." % bridge_name)
query = ovsdb_query.Generator.del_bridge(self.get_uuid(), exist_bridges, target_bridge.get_uuid())
self._send(query)
|
[
"json.loads",
"socket.socket",
"ipaddress.ip_address",
"json.dumps",
"datetime.timedelta",
"sys.stderr.write",
"datetime.datetime.now"
] |
[((396, 426), 'ipaddress.ip_address', 'ipaddress.ip_address', (['ovsdb_ip'], {}), '(ovsdb_ip)\n', (416, 426), False, 'import ipaddress\n'), ((593, 642), 'socket.socket', 'socket.socket', (['socket.AF_INET', 'socket.SOCK_STREAM'], {}), '(socket.AF_INET, socket.SOCK_STREAM)\n', (606, 642), False, 'import socket\n'), ((951, 965), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (963, 965), False, 'from datetime import datetime, timedelta\n'), ((968, 1006), 'datetime.timedelta', 'timedelta', ([], {'seconds': 'self._query_timeout'}), '(seconds=self._query_timeout)\n', (977, 1006), False, 'from datetime import datetime, timedelta\n'), ((1968, 2014), 'sys.stderr.write', 'sys.stderr.write', (["('[RECV] %s\\n' % query_result)"], {}), "('[RECV] %s\\n' % query_result)\n", (1984, 2014), False, 'import sys\n'), ((1042, 1056), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (1054, 1056), False, 'from datetime import datetime, timedelta\n'), ((827, 844), 'json.dumps', 'json.dumps', (['query'], {}), '(query)\n', (837, 844), False, 'import json\n'), ((784, 801), 'json.dumps', 'json.dumps', (['query'], {}), '(query)\n', (794, 801), False, 'import json\n'), ((1695, 1717), 'json.loads', 'json.loads', (['echo_reply'], {}), '(echo_reply)\n', (1705, 1717), False, 'import json\n')]
|
"""
Utilities based on building baseline machine learning models.
"""
from typing import Union, Optional
from pandas import DataFrame, Series
from numpy import mean, tile, empty, std, square, sqrt, log as nplog, reciprocal
from scipy.stats import boxcox, normaltest, mode
from sklearn.compose import ColumnTransformer
from sklearn.exceptions import ConvergenceWarning, DataConversionWarning
from sklearn.impute import SimpleImputer
from sklearn.linear_model import LinearRegression, LogisticRegression
from sklearn.metrics import mean_squared_error, roc_auc_score
from sklearn.mixture import GaussianMixture
from sklearn.model_selection import train_test_split
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import (FunctionTransformer, OneHotEncoder,
RobustScaler, StandardScaler,
label_binarize)
from sklearn.utils._testing import ignore_warnings
from .auxiliary import infer_dtypes
from .enum import PredictionTask
BASELINE_CLASSIFIER = Pipeline([
('imputer', SimpleImputer()),
('classifier', LogisticRegression())
])
BASELINE_REGRESSION = Pipeline([
('imputer', SimpleImputer()),
('classifier', LinearRegression())
])
NUMERIC_TRANSFORMER = Pipeline([
('imputer', SimpleImputer()),
('scaler', StandardScaler())])
CATEGORICAL_TRANSFORMER = Pipeline([
('imputer', SimpleImputer(strategy='most_frequent')),
('encoder', OneHotEncoder(handle_unknown='ignore'))])
ORDINAL_TRANSFORMER = None # Not implemented
def get_prediction_task(df: DataFrame, label: str):
"Heuristics to infer prediction task (classification/regression)."
return 'classification' if len(set(df[label])) == 2 else 'regression'
@ignore_warnings(category=ConvergenceWarning)
def baseline_predictions(df: DataFrame, label: str, task='classification'):
"Train a baseline model and predict for a test set"
# 0. Infer the prediction task
task = get_prediction_task(df=df, label=label)
# 1. Define the baseline model
model = BASELINE_CLASSIFIER if task == 'classification' else BASELINE_REGRESSION
# 2. Train overall model
x_orig, y_orig = df.drop(label, axis=1), label_binarize(df[label], classes=list(set(df[label])))
x_train, x_test, y_train, y_test = train_test_split(x_orig, y_orig, test_size=0.3, random_state=42)
model.fit(x_train.select_dtypes('number'), y_train)
# 3. Predict
if task == 'regression':
y_pred = model.predict(x_test.select_dtypes('number'))
elif task == 'classification':
y_pred = model.predict_proba(x_test.select_dtypes('number'))[:, 1]
# 4. Return both the predictions and x_test, y_test to analyze the performances
return y_pred, x_test, y_test
@ignore_warnings(category=DataConversionWarning)
def baseline_performance(df: DataFrame, label: str,
task: PredictionTask = PredictionTask.CLASSIFICATION,
adjusted_metric: bool = False):
"""Train a baseline model, predict for a test set and return the performance.
Args:
- df (DataFrame): original dataset
- label (str): name of target feature column
- task (PredictionTask): classification, regression
- adjusted_metric (bool): if True, return metric as percentage of max achievable performance
"""
# 0. Infer the prediction task
task = get_prediction_task(df=df, label=label)
# 1. Define the baseline performance metric
metric = roc_auc_score if task == 'classification' else mean_squared_error
# 2. Get the baseline predictions
y_pred, _, y_test = baseline_predictions(df=df, label=label, task=task)
# 3. Get the performance
if adjusted_metric:
perf = adjusted_performance(y_test, y_pred, task=task, metric=metric)
else:
perf = metric(y_test, y_pred)
return perf
def adjusted_performance(y_true, y_pred, task: PredictionTask, metric: callable):
"""Calculates the adjusted metric as ratio of real to maximum performance.
Returns the percentage to the best achievable performance starting from a baseline.
"""
task = PredictionTask(task)
y_default = mean(y_true) if task == PredictionTask.CLASSIFICATION else mode(y_true).mode[0] # define the value
y_base = tile(y_default, (len(y_true), 1)) # create an array with default value
best_perf = metric(y_true, y_true)
base_perf = metric(y_true, y_base)
real_perf = metric(y_true, y_pred)
return (real_perf - base_perf) / (best_perf - base_perf)
@ignore_warnings(category=DataConversionWarning)
def performance_per_feature_values(df: DataFrame, feature: str, label: str, task='classification'):
"""Performance achieved per each value of a groupby feature."""
# 0. Infer the prediction task
task = get_prediction_task(df=df, label=label)
# 1. Define the baseline performance metric
metric = roc_auc_score if task == 'classification' else mean_squared_error
# 2. Get the baseline predictions
y_pred, x_test, y_test = baseline_predictions(df=df, label=label, task=task)
# 3. Get the performances per feature value
uniques = set(x_test[feature])
results = {}
for value in uniques: # for each category
y_pred_cat = y_pred[x_test[feature] == value]
y_true_cat = y_test[x_test[feature] == value]
try:
results[value] = metric(y_true_cat, y_pred_cat)
except ValueError as exc:
results[value] = f'[ERROR] Failed performance metric with message: {exc}'
return results
def performance_per_missing_value(df: DataFrame, feature: str, label: str, task='classification'):
"""Performance difference between valued and missing values in feature."""
# 0. Infer the prediction task
task = get_prediction_task(df=df, label=label)
# 1. Define the baseline performance metric
metric = roc_auc_score if task == 'classification' else mean_squared_error
# 2. Get the baseline predictions
y_pred, x_test, y_test = baseline_predictions(df=df, label=label, task=task)
# 3. Get the performance per valued vs missing feature
missing_mask = x_test[feature].isna()
results = {}
results['missing'] = metric(y_test[missing_mask], y_pred[missing_mask])
results['valued'] = metric(y_test[~missing_mask], y_pred[~missing_mask])
return results
@ignore_warnings(category=ConvergenceWarning)
def predict_missingness(df: DataFrame, feature: str):
"Train a baseline model to predict the missingness of a feature value."
# 0. Preprocessing
df = df.copy() # avoid altering the original DataFrame
target = f'is_missing_{feature}'
# 1. Define the baseline model
model = BASELINE_CLASSIFIER
# 2. Create the new target
df[target] = df[feature].isna()
# 3. Train overall model
x_orig, y_orig = df.drop([feature, target], axis=1), df[target]
x_train, x_test, y_train, y_test = train_test_split(x_orig, y_orig, test_size=0.3, random_state=42)
model.fit(x_train.select_dtypes('number'), y_train)
# 4. Predict
y_pred = model.predict_proba(x_test.select_dtypes('number'))[:, 1]
# 5. Return the area under the roc curve
return roc_auc_score(y_test, y_pred)
def standard_transform(df, dtypes, skip: Optional[list] = None, robust=False):
"""Applies standard transformation to the dataset (imputation, centering and scaling), returns transformed data
and the fitted transformer.
Numerical data is imputed with mean, centered and scaled by 4 standard deviations.
Categorical data is imputed with mode. Encoding is not performed in this stage to preserve the same columns.
If robust is passed as True, will truncate numerical data before computing statistics.
[1]From 1997 <NAME>; Martinez, <NAME>. -
Improved Heterogeneous Distance Functions https://arxiv.org/pdf/cs/9701101.pdf
"""
skip = [] if skip is None else skip
numerical_features = [key for key, value in dtypes.items() if value == 'numerical' and key not in skip]
categorical_features = [key for key, value in dtypes.items() if value == 'categorical' and key not in skip]
assert len(numerical_features + categorical_features +
skip) == len(df.columns), 'the union of dtypes keys with skip should be the same as the df columns'
if robust:
numeric_transformer = Pipeline([
('imputer', SimpleImputer()),
('scaler', RobustScaler(quantile_range=(5.0, 95.0)))])
else:
numeric_transformer = NUMERIC_TRANSFORMER
preprocessor = ColumnTransformer(
transformers=[ # Numerical vars are scaled by 4sd so that most of the data are fit in the [-1, 1] range
('num', Pipeline(numeric_transformer.steps + \
[('divby4', FunctionTransformer(lambda x: x / 4))]), numerical_features),
('cat', Pipeline([('impute', SimpleImputer(strategy='most_frequent'))]), categorical_features)],
remainder='passthrough')
new_column_order = numerical_features + categorical_features + skip
tdf = DataFrame(preprocessor.fit_transform(df), index=df.index, columns=new_column_order)
return tdf, preprocessor
def performance_one_vs_rest(df: DataFrame, label_feat: str, _class: str, dtypes=None):
"""Train a classifier to predict a class in binary fashion against all other classes.
A normalized dataframe should be passed for best results"""
# 0. Preprocessing
df = df.copy() # avoid altering the original DataFrame
# 1. Define the baseline model
if not dtypes:
dtypes = infer_dtypes(df)
categorical_features = [key for key, value in dtypes.items() if value == 'categorical' and key != label_feat]
preprocessor = ColumnTransformer(
transformers=[('cat', CATEGORICAL_TRANSFORMER, categorical_features)]) # OHE categorical variables
model = Pipeline([('preprocessing', preprocessor), ('classifier', LogisticRegression())])
# 2. Train overall model
x_orig, y_orig = df.drop(label_feat, axis=1), label_binarize(df[label_feat], classes=[_class]).squeeze()
x_train, x_test, y_train, y_test = train_test_split(x_orig, y_orig, test_size=0.3, random_state=24)
model.fit(x_train, y_train)
# 3. Predict
y_pred = model.predict_proba(x_test)[:, 1]
# 4. Return the area under the roc curve
return roc_auc_score(y_test, y_pred)
def center_of_mass_statistic(column: Series, col_dtype: str) -> Union[float, int, str]:
"Returns a center of mass statistic of a column based on its dtype."
return column.mean() if col_dtype == 'numerical' else column.mode()[0] # only first mode
def estimate_centroid(df: DataFrame, dtypes: dict = None):
"""Makes a centroid estimation for a given dataframe.
Will use provided dtypes or infer in order to use best statistic columnwise"""
if dtypes:
if not all((col in dtypes for col in df.columns)):
dtypes = dtypes.update(infer_dtypes(df, skip=dtypes.columns))
else:
dtypes = infer_dtypes(df)
centroid = Series(df.iloc[0])
for col in centroid.index:
centroid[col] = center_of_mass_statistic(df[col], dtypes[col])
return centroid
def heom(x_df: DataFrame, y_df, dtypes):
"""Implements the Heterogeneous Euclidean-Overlap Metric between a sample x and a reference y.
The data is assumed to already be preprocessed (normalized and imputed).
[1]From 1997 <NAME>; <NAME>. -
Improved Heterogeneous Distance Functions https://arxiv.org/pdf/cs/9701101.pdf
"""
distances = DataFrame(empty(x_df.shape), index=x_df.index, columns=x_df.columns)
distance_funcs = {'categorical': lambda x, y: 0 if x == y else 1,
'numerical': lambda x, y: abs(x - y)} # Here we are assuming the data to be previously scaled
for col_idx, column in enumerate(distances.columns):
distances[column] = x_df[column].apply(distance_funcs[dtypes[column]], args=[y_df[col_idx]])
return distances
def estimate_sd(sample: DataFrame, reference=None, dtypes=None):
"""Estimates the standard deviation of a sample of records.
A reference can be passed in order to avoid new computation of mean or to use distances to another reference point.
The reference is expected as a (1, N) array where N is the number of columns in the sample.
Returns:
std_dev: the standard deviation of the distance vectors of the sample to the reference point
std_distances: the distances of the sample points to the reference point scaled by std_dev
"""
if dtypes: # Ensure dtypes are compatible with sample
if not all((col in dtypes for col in sample.columns)):
dtypes = dtypes.update(infer_dtypes(sample, skip=dtypes.columns))
else:
dtypes = infer_dtypes(sample)
if reference is None:
reference = estimate_centroid(sample, dtypes)
else:
assert len(reference) == len(
sample.columns), "The provided reference point does not have the same dimension as the sample records"
distances = heom(x_df=sample, y_df=reference, dtypes=dtypes)
euclidean_distances = (distances.apply(square).sum(axis=1) / len(sample.columns)).apply(sqrt)
std_dev = std(euclidean_distances)
std_distances = euclidean_distances / std_dev
return std_dev, std_distances
def gmm_clustering(data, n_gaussians):
"""Produces a GMM model with n_gaussians to cluster provided data."""
gmm_ = GaussianMixture(n_components=n_gaussians).fit(data)
return gmm_.predict(data), gmm_.aic(data)
def normality_test(data, suite='full', p_th=5e-3):
"""Performs a normality test on the data. Null hypothesis, data comes from normal distribution.
A transformations taken from a suite is applied to the data before each run of the normal test.
The first transformation in the suite that passes the normalcy test is returned
Returns:
result: True if any transformation led to a positive normal test, False otherwise
test: The first test in the suite to lead to positive normal test"""
transforms = {None: lambda x: x,
'inverse': reciprocal,
'square root': sqrt,
'log': nplog,
'Box Cox': boxcox}
if suite == 'full':
suite = transforms.keys()
else:
suite = list(suite) if isinstance(suite, str) else suite
for transform in suite:
try:
transformed_data = transforms[transform](data)
_, p_stat = normaltest(transformed_data, nan_policy='raise')
except (AttributeError, TypeError, ZeroDivisionError, ValueError):
continue
if p_stat > p_th:
return True, transform, p_stat
return False, None, None
|
[
"sklearn.preprocessing.FunctionTransformer",
"sklearn.preprocessing.StandardScaler",
"sklearn.model_selection.train_test_split",
"numpy.empty",
"sklearn.mixture.GaussianMixture",
"sklearn.compose.ColumnTransformer",
"numpy.mean",
"sklearn.impute.SimpleImputer",
"numpy.std",
"scipy.stats.normaltest",
"scipy.stats.mode",
"sklearn.preprocessing.OneHotEncoder",
"sklearn.preprocessing.label_binarize",
"sklearn.metrics.roc_auc_score",
"sklearn.linear_model.LinearRegression",
"sklearn.linear_model.LogisticRegression",
"pandas.Series",
"sklearn.preprocessing.RobustScaler",
"sklearn.utils._testing.ignore_warnings"
] |
[((1737, 1781), 'sklearn.utils._testing.ignore_warnings', 'ignore_warnings', ([], {'category': 'ConvergenceWarning'}), '(category=ConvergenceWarning)\n', (1752, 1781), False, 'from sklearn.utils._testing import ignore_warnings\n'), ((2755, 2802), 'sklearn.utils._testing.ignore_warnings', 'ignore_warnings', ([], {'category': 'DataConversionWarning'}), '(category=DataConversionWarning)\n', (2770, 2802), False, 'from sklearn.utils._testing import ignore_warnings\n'), ((4551, 4598), 'sklearn.utils._testing.ignore_warnings', 'ignore_warnings', ([], {'category': 'DataConversionWarning'}), '(category=DataConversionWarning)\n', (4566, 4598), False, 'from sklearn.utils._testing import ignore_warnings\n'), ((6380, 6424), 'sklearn.utils._testing.ignore_warnings', 'ignore_warnings', ([], {'category': 'ConvergenceWarning'}), '(category=ConvergenceWarning)\n', (6395, 6424), False, 'from sklearn.utils._testing import ignore_warnings\n'), ((2292, 2356), 'sklearn.model_selection.train_test_split', 'train_test_split', (['x_orig', 'y_orig'], {'test_size': '(0.3)', 'random_state': '(42)'}), '(x_orig, y_orig, test_size=0.3, random_state=42)\n', (2308, 2356), False, 'from sklearn.model_selection import train_test_split\n'), ((6948, 7012), 'sklearn.model_selection.train_test_split', 'train_test_split', (['x_orig', 'y_orig'], {'test_size': '(0.3)', 'random_state': '(42)'}), '(x_orig, y_orig, test_size=0.3, random_state=42)\n', (6964, 7012), False, 'from sklearn.model_selection import train_test_split\n'), ((7215, 7244), 'sklearn.metrics.roc_auc_score', 'roc_auc_score', (['y_test', 'y_pred'], {}), '(y_test, y_pred)\n', (7228, 7244), False, 'from sklearn.metrics import mean_squared_error, roc_auc_score\n'), ((9747, 9839), 'sklearn.compose.ColumnTransformer', 'ColumnTransformer', ([], {'transformers': "[('cat', CATEGORICAL_TRANSFORMER, categorical_features)]"}), "(transformers=[('cat', CATEGORICAL_TRANSFORMER,\n categorical_features)])\n", (9764, 9839), False, 'from sklearn.compose import ColumnTransformer\n'), ((10146, 10210), 'sklearn.model_selection.train_test_split', 'train_test_split', (['x_orig', 'y_orig'], {'test_size': '(0.3)', 'random_state': '(24)'}), '(x_orig, y_orig, test_size=0.3, random_state=24)\n', (10162, 10210), False, 'from sklearn.model_selection import train_test_split\n'), ((10365, 10394), 'sklearn.metrics.roc_auc_score', 'roc_auc_score', (['y_test', 'y_pred'], {}), '(y_test, y_pred)\n', (10378, 10394), False, 'from sklearn.metrics import mean_squared_error, roc_auc_score\n'), ((11061, 11079), 'pandas.Series', 'Series', (['df.iloc[0]'], {}), '(df.iloc[0])\n', (11067, 11079), False, 'from pandas import DataFrame, Series\n'), ((13238, 13262), 'numpy.std', 'std', (['euclidean_distances'], {}), '(euclidean_distances)\n', (13241, 13262), False, 'from numpy import mean, tile, empty, std, square, sqrt, log as nplog, reciprocal\n'), ((4183, 4195), 'numpy.mean', 'mean', (['y_true'], {}), '(y_true)\n', (4187, 4195), False, 'from numpy import mean, tile, empty, std, square, sqrt, log as nplog, reciprocal\n'), ((11577, 11594), 'numpy.empty', 'empty', (['x_df.shape'], {}), '(x_df.shape)\n', (11582, 11594), False, 'from numpy import mean, tile, empty, std, square, sqrt, log as nplog, reciprocal\n'), ((1058, 1073), 'sklearn.impute.SimpleImputer', 'SimpleImputer', ([], {}), '()\n', (1071, 1073), False, 'from sklearn.impute import SimpleImputer\n'), ((1095, 1115), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {}), '()\n', (1113, 1115), False, 'from sklearn.linear_model import LinearRegression, LogisticRegression\n'), ((1170, 1185), 'sklearn.impute.SimpleImputer', 'SimpleImputer', ([], {}), '()\n', (1183, 1185), False, 'from sklearn.impute import SimpleImputer\n'), ((1207, 1225), 'sklearn.linear_model.LinearRegression', 'LinearRegression', ([], {}), '()\n', (1223, 1225), False, 'from sklearn.linear_model import LinearRegression, LogisticRegression\n'), ((1280, 1295), 'sklearn.impute.SimpleImputer', 'SimpleImputer', ([], {}), '()\n', (1293, 1295), False, 'from sklearn.impute import SimpleImputer\n'), ((1313, 1329), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (1327, 1329), False, 'from sklearn.preprocessing import FunctionTransformer, OneHotEncoder, RobustScaler, StandardScaler, label_binarize\n'), ((1387, 1426), 'sklearn.impute.SimpleImputer', 'SimpleImputer', ([], {'strategy': '"""most_frequent"""'}), "(strategy='most_frequent')\n", (1400, 1426), False, 'from sklearn.impute import SimpleImputer\n'), ((1445, 1483), 'sklearn.preprocessing.OneHotEncoder', 'OneHotEncoder', ([], {'handle_unknown': '"""ignore"""'}), "(handle_unknown='ignore')\n", (1458, 1483), False, 'from sklearn.preprocessing import FunctionTransformer, OneHotEncoder, RobustScaler, StandardScaler, label_binarize\n'), ((13473, 13514), 'sklearn.mixture.GaussianMixture', 'GaussianMixture', ([], {'n_components': 'n_gaussians'}), '(n_components=n_gaussians)\n', (13488, 13514), False, 'from sklearn.mixture import GaussianMixture\n'), ((14531, 14579), 'scipy.stats.normaltest', 'normaltest', (['transformed_data'], {'nan_policy': '"""raise"""'}), "(transformed_data, nan_policy='raise')\n", (14541, 14579), False, 'from scipy.stats import boxcox, normaltest, mode\n'), ((4242, 4254), 'scipy.stats.mode', 'mode', (['y_true'], {}), '(y_true)\n', (4246, 4254), False, 'from scipy.stats import boxcox, normaltest, mode\n'), ((9944, 9964), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {}), '()\n', (9962, 9964), False, 'from sklearn.linear_model import LinearRegression, LogisticRegression\n'), ((10048, 10096), 'sklearn.preprocessing.label_binarize', 'label_binarize', (['df[label_feat]'], {'classes': '[_class]'}), '(df[label_feat], classes=[_class])\n', (10062, 10096), False, 'from sklearn.preprocessing import FunctionTransformer, OneHotEncoder, RobustScaler, StandardScaler, label_binarize\n'), ((8420, 8435), 'sklearn.impute.SimpleImputer', 'SimpleImputer', ([], {}), '()\n', (8433, 8435), False, 'from sklearn.impute import SimpleImputer\n'), ((8461, 8501), 'sklearn.preprocessing.RobustScaler', 'RobustScaler', ([], {'quantile_range': '(5.0, 95.0)'}), '(quantile_range=(5.0, 95.0))\n', (8473, 8501), False, 'from sklearn.preprocessing import FunctionTransformer, OneHotEncoder, RobustScaler, StandardScaler, label_binarize\n'), ((8903, 8942), 'sklearn.impute.SimpleImputer', 'SimpleImputer', ([], {'strategy': '"""most_frequent"""'}), "(strategy='most_frequent')\n", (8916, 8942), False, 'from sklearn.impute import SimpleImputer\n'), ((8800, 8836), 'sklearn.preprocessing.FunctionTransformer', 'FunctionTransformer', (['(lambda x: x / 4)'], {}), '(lambda x: x / 4)\n', (8819, 8836), False, 'from sklearn.preprocessing import FunctionTransformer, OneHotEncoder, RobustScaler, StandardScaler, label_binarize\n')]
|
# Copyright 2019-2020 by <NAME>, MGLAND animation studio. All rights reserved.
# This file is part of IUTest, and is released under the "MIT License Agreement".
# Please see the LICENSE file that should have been included as part of this package.
import logging
import os
from iutest.core import appsettings
from iutest.core import constants
from iutest.qt import QtCore, Signal
logger = logging.getLogger(__name__)
class CodeLineVisitor(QtCore.QObject):
_editorSetting = None
errorIssued = Signal(str)
@classmethod
def initEditorSetting(cls):
cls._editorSetting = appsettings.get().simpleConfigStrValue(
constants.CONFIG_KEY_CODE_EDITOR, constants.CONFIG_KEY_CODE_EDITOR_DEFAULT
)
@classmethod
def config(cls):
if not cls._editorSetting:
cls.initEditorSetting()
return cls._editorSetting
def __init__(self, parent=None):
QtCore.QObject.__init__(self, parent=parent)
self._lastCmd = None
self._process = QtCore.QProcess(self)
self._process.error.connect(self._onGoToCodeError)
self._process.readyReadStandardError.connect(self._onReadyReadStandardError)
@staticmethod
def _goToCmd(template, filePath, lineNumber):
cmd = template.replace(constants.CODE_FILE_VAR, filePath)
return cmd.replace(constants.CODE_LINE_VAR, str(lineNumber))
def goTo(self, filePath, lineNumber=0):
if not os.path.isfile(filePath):
logger.warning("%s is not a valid file.", filePath)
self._lastCmd = self._goToCmd(self.config(), filePath, lineNumber)
logger.debug(self._lastCmd)
self._process.start(self._lastCmd)
def _onGoToCodeError(self, err):
msg = "<font color=red><b>Error: </b></font>"
if err == self._process.FailedToStart:
msg = (
msg
+ "Failed to launch the program as it was either missing or insufficient permissions.<br><br>"
)
msg = (
msg
+ "You might need to change the goToCode setting in Preference Dialog, e.g.<br>Specify full path to the program, etc."
)
elif err == self._process.Crashed:
msg = msg + "The program to browse the code has crashed."
elif err == self._process.Timedout:
msg = msg + "The last goToCodeProcess.waitFor...() function timed out."
elif err == self._process.WriteError:
msg = (
msg
+ "An error occurred when attempting to write to the goToCode process."
)
elif err == self._process.ReadError:
msg = (
msg
+ "An error occurred when attempting to read to the goToCode process."
)
else:
msg = msg + "An unknown error occurred when attempting to go to the code."
msg = msg + "<hr><font color=red><b>Failed Command:</b></font><br>{}".format(
self._lastCmd
)
self.errorIssued.emit(msg)
def _onReadyReadStandardError(self):
logger.error(self.readAllStandardError())
|
[
"iutest.core.appsettings.get",
"iutest.qt.QtCore.QProcess",
"iutest.qt.Signal",
"os.path.isfile",
"iutest.qt.QtCore.QObject.__init__",
"logging.getLogger"
] |
[((390, 417), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (407, 417), False, 'import logging\n'), ((504, 515), 'iutest.qt.Signal', 'Signal', (['str'], {}), '(str)\n', (510, 515), False, 'from iutest.qt import QtCore, Signal\n'), ((922, 966), 'iutest.qt.QtCore.QObject.__init__', 'QtCore.QObject.__init__', (['self'], {'parent': 'parent'}), '(self, parent=parent)\n', (945, 966), False, 'from iutest.qt import QtCore, Signal\n'), ((1020, 1041), 'iutest.qt.QtCore.QProcess', 'QtCore.QProcess', (['self'], {}), '(self)\n', (1035, 1041), False, 'from iutest.qt import QtCore, Signal\n'), ((1450, 1474), 'os.path.isfile', 'os.path.isfile', (['filePath'], {}), '(filePath)\n', (1464, 1474), False, 'import os\n'), ((595, 612), 'iutest.core.appsettings.get', 'appsettings.get', ([], {}), '()\n', (610, 612), False, 'from iutest.core import appsettings\n')]
|
from pathlib import Path
import numpy as np
from PIL import ImageFont
from scipy.ndimage import convolve
from scipy.spatial import cKDTree
resource_dir = (Path(__file__) / "../resources").absolute()
class Particle:
def __init__(self, x, y, color, ball_size=1):
self.pos = np.array([x, y]).astype(float)
self.vel = np.zeros(2)
self.acc = np.zeros(2)
self.target = self.pos
self.radius = ball_size
self.max_speed = 10
self.max_force = 0.6
self.color = np.array(color, dtype=np.uint8)
def update(self):
self.pos += self.vel
self.vel += self.acc
self.acc *= 0
def arrive(self):
# calculate the distance
dist = np.linalg.norm(self.target - self.pos)
# normalize it
desired = (self.target - self.pos) / dist
# if we are less than 100px away from our target, start to slow down
if dist < 100:
speed = dist / 100 * self.max_speed
else:
# otherwise go at full speed
speed = self.max_speed
# set the magnitude of our desired vector
desired *= speed
steer = desired - self.vel
steer_mag = np.linalg.norm(steer)
if steer_mag > self.max_force:
steer = steer / steer_mag * self.max_force
return steer
def render_text_perimeter_balls(
txt, pos=(0, 0), scale=16, color=(235, 64, 52), ball_size=4.5
):
# place particles on the text outline without overlapping them.
font = ImageFont.truetype(
(resource_dir / "VCR_OSD_MONO_1.001.ttf").as_posix(), scale
)
a = font.getmask(txt)
out = np.empty(a.size)
for y in range(a.size[0]):
for x in range(a.size[1]):
out[y, x] = a.getpixel((y, x))
out = out / 255
out = np.where(out > 0, 1, 0)
out = np.rot90(out)
kernel = np.array([[1, 1, 1], [1, 0, 1], [1, 1, 1]])
out = convolve(out, kernel, mode="constant")
outline = np.where(out == 5, 1, 0)
indices = np.transpose(outline.nonzero()) + np.array(pos)
particles = []
for xy in indices:
particles.append(Particle(xy[1], xy[0], color, ball_size))
quadTree = cKDTree([p.pos for p in particles])
# loop over particles. remove all touching particles
to_remove = set()
for particle in particles:
if particle in to_remove:
continue
colliding_particles = [
particles[i]
for i in quadTree.query_ball_point(particle.pos, particle.radius * 2)
]
for p in colliding_particles:
if p != particle:
to_remove.add(p)
for particle in to_remove:
particles.remove(particle)
out = np.array([p.pos for p in particles])
# out = out/np.linalg.norm(out)
return out
if __name__ == "__main__":
# generate the particles with their target position
render_text_perimeter_balls("Hey!", scale=300, pos=(75, 250), color=(226, 53, 31))
render_text_perimeter_balls(
"#show-your-projects",
scale=70,
pos=(10, 150),
color=(231, 201, 49),
ball_size=2,
)
|
[
"numpy.empty",
"numpy.zeros",
"scipy.ndimage.convolve",
"pathlib.Path",
"numpy.where",
"numpy.array",
"numpy.rot90",
"numpy.linalg.norm",
"scipy.spatial.cKDTree"
] |
[((1668, 1684), 'numpy.empty', 'np.empty', (['a.size'], {}), '(a.size)\n', (1676, 1684), True, 'import numpy as np\n'), ((1825, 1848), 'numpy.where', 'np.where', (['(out > 0)', '(1)', '(0)'], {}), '(out > 0, 1, 0)\n', (1833, 1848), True, 'import numpy as np\n'), ((1859, 1872), 'numpy.rot90', 'np.rot90', (['out'], {}), '(out)\n', (1867, 1872), True, 'import numpy as np\n'), ((1887, 1930), 'numpy.array', 'np.array', (['[[1, 1, 1], [1, 0, 1], [1, 1, 1]]'], {}), '([[1, 1, 1], [1, 0, 1], [1, 1, 1]])\n', (1895, 1930), True, 'import numpy as np\n'), ((1941, 1979), 'scipy.ndimage.convolve', 'convolve', (['out', 'kernel'], {'mode': '"""constant"""'}), "(out, kernel, mode='constant')\n", (1949, 1979), False, 'from scipy.ndimage import convolve\n'), ((1994, 2018), 'numpy.where', 'np.where', (['(out == 5)', '(1)', '(0)'], {}), '(out == 5, 1, 0)\n', (2002, 2018), True, 'import numpy as np\n'), ((2208, 2243), 'scipy.spatial.cKDTree', 'cKDTree', (['[p.pos for p in particles]'], {}), '([p.pos for p in particles])\n', (2215, 2243), False, 'from scipy.spatial import cKDTree\n'), ((2738, 2774), 'numpy.array', 'np.array', (['[p.pos for p in particles]'], {}), '([p.pos for p in particles])\n', (2746, 2774), True, 'import numpy as np\n'), ((339, 350), 'numpy.zeros', 'np.zeros', (['(2)'], {}), '(2)\n', (347, 350), True, 'import numpy as np\n'), ((370, 381), 'numpy.zeros', 'np.zeros', (['(2)'], {}), '(2)\n', (378, 381), True, 'import numpy as np\n'), ((527, 558), 'numpy.array', 'np.array', (['color'], {'dtype': 'np.uint8'}), '(color, dtype=np.uint8)\n', (535, 558), True, 'import numpy as np\n'), ((734, 772), 'numpy.linalg.norm', 'np.linalg.norm', (['(self.target - self.pos)'], {}), '(self.target - self.pos)\n', (748, 772), True, 'import numpy as np\n'), ((1218, 1239), 'numpy.linalg.norm', 'np.linalg.norm', (['steer'], {}), '(steer)\n', (1232, 1239), True, 'import numpy as np\n'), ((2067, 2080), 'numpy.array', 'np.array', (['pos'], {}), '(pos)\n', (2075, 2080), True, 'import numpy as np\n'), ((158, 172), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (162, 172), False, 'from pathlib import Path\n'), ((289, 305), 'numpy.array', 'np.array', (['[x, y]'], {}), '([x, y])\n', (297, 305), True, 'import numpy as np\n')]
|
import os.path
from flask import Flask, render_template, jsonify, request
from pywhale.whale import PyWhale
curr_file = os.path.abspath(os.path.dirname(__file__))
app_path = os.path.join(curr_file)
static_path = os.path.join(curr_file, 'static')
template_path = os.path.join(curr_file, 'templates')
app = Flask("PyWhale", root_path=app_path, template_folder=template_path)
@app.route('/', methods=['GET'])
def main(): # pragma: no cover
return render_template("app.html")
@app.route('/api/process', methods=['POST'])
def process(): # pragma: no cover
whale = PyWhale.process(request.form.get("body"))
return jsonify(whale)
def start(host="127.0.0.1", port=3333):
app.run(host=host, port=port)
|
[
"flask.jsonify",
"flask.Flask",
"flask.render_template",
"flask.request.form.get"
] |
[((309, 376), 'flask.Flask', 'Flask', (['"""PyWhale"""'], {'root_path': 'app_path', 'template_folder': 'template_path'}), "('PyWhale', root_path=app_path, template_folder=template_path)\n", (314, 376), False, 'from flask import Flask, render_template, jsonify, request\n'), ((455, 482), 'flask.render_template', 'render_template', (['"""app.html"""'], {}), "('app.html')\n", (470, 482), False, 'from flask import Flask, render_template, jsonify, request\n'), ((630, 644), 'flask.jsonify', 'jsonify', (['whale'], {}), '(whale)\n', (637, 644), False, 'from flask import Flask, render_template, jsonify, request\n'), ((593, 617), 'flask.request.form.get', 'request.form.get', (['"""body"""'], {}), "('body')\n", (609, 617), False, 'from flask import Flask, render_template, jsonify, request\n')]
|
from natch.core import Decoration
from natch.rules import Eq
eq = Decoration.make_rule_decorator(Eq)
|
[
"natch.core.Decoration.make_rule_decorator"
] |
[((68, 102), 'natch.core.Decoration.make_rule_decorator', 'Decoration.make_rule_decorator', (['Eq'], {}), '(Eq)\n', (98, 102), False, 'from natch.core import Decoration\n')]
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.init as init
from config_file import *
class Identity_TransformerBlock(nn.Module):
def __init__(self):
super(Identity_TransformerBlock, self).__init__()
def forward(self, Q, K, V, episilon=1e-8):
# assert (Q == K and Q == V and K == V)
return Q
class TransformerBlock(nn.Module):
def __init__(self, input_size, is_layer_norm=False):
super(TransformerBlock, self).__init__()
self.is_layer_norm = is_layer_norm
if is_layer_norm:
self.layer_norm1 = nn.LayerNorm(normalized_shape=input_size)
self.layer_norm2 = nn.LayerNorm(normalized_shape=input_size)
self.relu = nn.ReLU()
self.linear1 = nn.Linear(input_size, input_size)
self.linear2 = nn.Linear(input_size, input_size)
self.init_weights()
def init_weights(self):
init.xavier_normal_(self.linear1.weight)
init.xavier_normal_(self.linear2.weight)
init.constant_(self.linear1.bias, 0.0)
init.constant_(self.linear2.bias, 0.0)
init.constant_(self.layer_norm1.weight, 1.)
init.constant_(self.layer_norm1.bias, 0.)
init.constant_(self.layer_norm2.weight, 1.)
init.constant_(self.layer_norm2.bias, 0.)
def FFN(self, X):
return self.linear2(self.relu(self.linear1(X)))
def forward(self, Q, K, V, attention_mask=None, episilon=1e-8):
"""
:param Q: (batch_size, max_r_words, embedding_dim)
:param K: (batch_size, max_u_words, embedding_dim)
:param V: (batch_size, max_u_words, embedding_dim)
:return: output: (batch_size, max_r_words, embedding_dim) same size as Q
"""
attention_mask = torch.zeros(size=(Q.size(0), Q.size(1), K.size(1))) if attention_mask is None else attention_mask
attention_mask = attention_mask.to(self.args.device)
dk = torch.Tensor([max(1.0, Q.size(-1))]).to(self.args.device)
Q_K = Q.bmm(K.permute(0, 2, 1)) / (torch.sqrt(dk) + episilon)
Q_K = Q_K + attention_mask # mask some scores
# (batch_size, max_r_words, max_u_words)
Q_K_score = F.softmax(Q_K, dim=-1)
V_att = Q_K_score.bmm(V)
if self.is_layer_norm:
# (batch_size, max_r_words, embedding_dim)
X = self.layer_norm1(Q + V_att)
output = self.layer_norm2(self.FFN(X) + X)
else:
X = Q + V_att
output = self.FFN(X) + X
return output
class AttentionBlock(nn.Module):
"refer: DGMN codes provided by Zhao"
def __init__(self, args):
self.args = args
super(AttentionBlock, self).__init__()
self.layernorm = nn.LayerNorm(normalized_shape=(args.emb_size))
self.layernorm_ffn = nn.LayerNorm(normalized_shape=(args.emb_size))
self.ffn = nn.Sequential(
nn.Linear(args.emb_size, args.emb_size, bias=True),
nn.ReLU(),
nn.Linear(args.emb_size, args.emb_size, bias=True)
)
self.init_weight()
def init_weight(self):
init.constant_(self.layernorm.weight, 1.)
init.constant_(self.layernorm.bias, 0.)
init.constant_(self.layernorm_ffn.weight, 1.)
init.constant_(self.layernorm_ffn.bias, 0.)
init.xavier_uniform_(self.ffn[0].weight)
init.xavier_uniform_(self.ffn[2].weight)
def attention_dot(self, queries, keys, query_masks, key_masks, episilon=1e-8):
"""
:param queries:
:param keys:
:param query_masks: (B, L_q)
:param key_masks: (B, L_k) e.g. [[1,1,1,0],[1,1,1,1]]
:param episilon:
:return:
"""
sim = torch.einsum('bik,bjk->bij', queries, keys) # [B, L_q, L_k]
scale = torch.Tensor([max(1.0, queries.size(-1))]).to(self.args.device)
sim = sim / (torch.sqrt(scale) + episilon)
# Key Masking
masks = key_masks.unsqueeze(1).repeat(1, queries.shape[1], 1) # (B, L_q, L_k)
paddings = (torch.ones_like(sim) * (-2 ** 32 + 1)).to(self.args.device)
sim = torch.where(masks == 0, paddings, sim) # (B, L_q, L_k)
# Activation
sim = torch.softmax(sim, dim=-1)
# Query Masking
sim = sim * query_masks.unsqueeze(-1)
outputs = torch.einsum('bij,bjk->bik', sim, keys)
return outputs
def feedforward(self, inputs):
outputs = self.ffn(inputs)
outputs = outputs + inputs
outputs = self.layernorm_ffn(outputs)
return outputs
def forward(self, queries, keys, query_masks, key_masks, residual=True, epsilon=1e-8):
outputs = self.attention_dot(queries, keys, query_masks, key_masks, epsilon)
if residual:
outputs = self.layernorm(outputs + queries)
else:
outputs = self.layernorm(outputs)
outputs = self.feedforward(outputs)
return outputs
class NNSubmulti(nn.Module):
def __init__(self, args):
self.args = args
super(NNSubmulti, self).__init__()
self.linear_ff_sim = nn.Sequential(
nn.Linear(in_features=args.emb_size * 2, out_features=100, bias=True),
nn.Tanh(),
nn.Linear(in_features=100, out_features=1, bias=False)
)
self.linear_last = nn.Linear(in_features=args.emb_size * 2, out_features=args.emb_size, bias=True)
self.init_weight()
def init_weight(self):
init.xavier_uniform_(self.linear_ff_sim[0].weight)
init.xavier_uniform_(self.linear_ff_sim[2].weight)
init.xavier_uniform_(self.linear_last.weight)
def ff_sim(self, queries, keys):
T_q = queries.shape[1]
T_k = keys.shape[1]
expand_queries = queries.unsqueeze(2).repeat(1, 1, T_k, 1)
expand_keys = keys.unsqueeze(1).repeat(1, T_q, 1, 1)
# TODO: add a vector >> ref: source codes of Xueliang Zhao
features = torch.cat([expand_queries, expand_keys], dim=-1)
outputs = self.linear_ff_sim(features)
outputs = outputs.view(-1, T_q, T_k)
return outputs
def attention_fc(self, queries, keys, query_masks, key_masks):
sim = self.ff_sim(queries, keys) # [B, L_q, L_k]
# Key Masking
masks = key_masks.unsqueeze(1).repeat(1, queries.shape[1], 1) # (B, L_q, L_k)
paddings = torch.ones_like(sim) * (-2 ** 32 + 1)
sim = torch.where(masks == 0, paddings, sim) # (B, L_q, L_k)
# Activation
sim = torch.softmax(sim, dim=-1) # (B, L_q, L_k)
# Query Masking
sim = sim * query_masks.unsqueeze(-1)
# Weighted sum
outputs = torch.einsum('bij,bjk->bik', sim, keys) # (B, T_q, C)
return outputs
def forward(self, queries, keys, query_masks, key_masks):
keys_attn = self.attention_fc(keys, queries, key_masks, query_masks) # TODO: check有没有反了
feature_mul = keys_attn * keys
feature_sub = (keys_attn - keys) * (keys_attn - keys)
feature_last = torch.cat([feature_mul, feature_sub], dim=-1)
feature_last = torch.relu(self.linear_last(feature_last))
return feature_last
class HierarchicalNNSubmulti(nn.Module):
def __init__(self, args):
self.args = args
super(HierarchicalNNSubmulti, self).__init__()
self.linear_last = nn.Linear(in_features=args.emb_size * 2, out_features=args.emb_size, bias=True)
self.init_weight()
def init_weight(self):
init.xavier_uniform_(self.linear_last.weight)
def hierarchical_attention(self, queries, keys, query_masks, key_masks):
L_q = queries.shape[1]
N = keys.shape[1]
sim1 = torch.einsum('bik,bnjk->binj', queries, keys) # [B, L_q, N, L_k]
# scale = torch.Tensor([max(1.0, queries.size(-1))]).to(self.args.device)
# scale = torch.sqrt(scale)
# sim1 = sim1 / scale
masks = key_masks.unsqueeze(1).repeat(1, L_q, 1, 1) # [B, L_q, N, L_k]
paddings = torch.ones_like(sim1) * (-2 ** 32 + 1)
sim1 = torch.where(masks == 0, paddings, sim1) # [B, L_q, N, L_k]
sim1 = torch.softmax(sim1, dim=-1)
masks = query_masks.unsqueeze(2).repeat(1, 1, N)
sim1 = sim1 * masks.unsqueeze(-1) # [B, L_q, N, L_k]
outputs1 = torch.einsum('binj,bnjk->bink', sim1, keys)
sim2 = torch.einsum('bik,bink->bin', queries, outputs1) # [B, L_k, N]
# # Scale
# scale = torch.Tensor([max(1.0, queries.size(-1))]).to(self.args.device)
# scale = torch.sqrt(scale)
# sim2 = sim2 / scale
masks = torch.sign(torch.sum(key_masks, dim=-1)) # [B, N]
masks = masks.unsqueeze(1).repeat(1, L_q, 1) # [B, L_q, N]
paddings = torch.ones_like(sim2) * (-2 ** 32 + 1)
sim2 = torch.where(masks == 0, paddings, sim2)
sim2 = torch.softmax(sim2, dim=-1)
sim2 = sim2 * query_masks.unsqueeze(-1)
outputs2 = torch.einsum('bin,bink->bik', sim2, outputs1)
return outputs2
def forward(self, queries, keys, query_masks, key_masks):
keys_attn = self.hierarchical_attention(keys, queries, key_masks, query_masks) # TODO: check有没有搞反
feature_mul = keys_attn * keys
feature_sub = (keys_attn - keys) * (keys_attn - keys)
feature_last = torch.cat([feature_mul, feature_sub], dim=-1)
feature_last = torch.relu(self.linear_last(feature_last))
return feature_last
class FusionBlock(nn.Module):
def __init__(self, input_size, is_layer_norm=False):
super(FusionBlock, self).__init__()
self.is_layer_norm = is_layer_norm
if is_layer_norm:
self.layer_norm1 = nn.LayerNorm(normalized_shape=input_size)
self.layer_norm2 = nn.LayerNorm(normalized_shape=input_size)
self.relu = nn.ReLU()
self.linear1 = nn.Linear(input_size, input_size)
self.linear2 = nn.Linear(input_size, input_size)
self.init_weights()
def init_weights(self):
init.xavier_normal_(self.linear1.weight)
init.xavier_normal_(self.linear2.weight)
init.constant_(self.linear1.bias, 0.0)
init.constant_(self.linear2.bias, 0.0)
init.constant_(self.layer_norm1.weight, 1.)
init.constant_(self.layer_norm1.bias, 0.)
init.constant_(self.layer_norm2.weight, 1.)
init.constant_(self.layer_norm2.bias, 0.)
def FFN(self, X):
return self.linear2(self.relu(self.linear1(X)))
def forward(self, Q, K, V, attention_mask=None, episilon=1e-8, output_score=False):
"""
:param Q: (batch size, n_turn, max_u_words, embedding_dim)
:param K: (batch size, n_doc, max_d_words, embedding_dim)
:param V: (batch size, n_doc, max_d_words, embedding_dim)
:param episilon:
:return: output: (batch size, n_turn, n_doc, max_u_words, embedding_dim)
"""
attention_mask = torch.zeros(size=(Q.size(0), Q.size(1), K.size(1), Q.size(2), K.size(2))) if attention_mask is None else attention_mask
attention_mask = attention_mask.to(self.args.device)
batch_size, n_turn, max_u_words, embedding_dim = Q.shape
batch_size, n_doc, max_d_words, embedding_dim = K.shape
dk = torch.Tensor([max(1.0, Q.size(-1))]).to(self.args.device)
Q_K = torch.einsum('btue,bdpe->btdup', Q, K) / (torch.sqrt(dk) + episilon)
Q_K = Q_K + attention_mask
Q_K_score = F.softmax(Q_K, dim=-1)
V_att = torch.einsum('btdup,bdpe->btdue', Q_K_score, V)
Q_repeat = Q.view(batch_size, n_turn, 1, max_u_words, embedding_dim).repeat(1, 1, n_doc, 1, 1)
X = Q_repeat + V_att
if self.is_layer_norm:
X = self.layer_norm1(X)
output = self.layer_norm2(self.FFN(X) + X)
else:
output = self.FFN(X) + X
if output_score:
return output, Q_K_score
else:
return output
class MLP_Attention(nn.Module):
def __init__(self, input_size, hidden_size):
super(MLP_Attention, self).__init__()
self.linear_X = nn.Linear(input_size, hidden_size, bias=True)
self.linear_ref = nn.Linear(input_size, hidden_size, bias=True)
self.v = nn.Linear(hidden_size, out_features=1)
def init_weight(self):
init.xavier_normal_(self.linear_X.weight)
init.xavier_normal_(self.linear_ref.weight)
init.xavier_normal_(self.v.weight)
init.constant_(self.linear1.bias, 0.0)
init.constant_(self.linear2.bias, 0.0)
init.constant_(self.v.bias, 0.0)
def forward(self, X, ref):
batch_size, n_X, _ = X.shape
_, n_ref, _ = ref.shape
stacking_X = self.linear_X(X).view(batch_size, n_X, 1, -1).repeat(1, 1, n_ref, 1)
stacking_ref = self.linear_ref(ref).view(batch_size, 1, n_ref, -1).repeat(1, n_X, 1, 1)
out = self.v(torch.tanh(stacking_X + stacking_ref)).squeeze()
attention_scores = torch.softmax(out, dim=1)
weighted_X = torch.einsum('bxe,bxr->bre', X, attention_scores)
return weighted_X
if __name__ == '__main__':
mlp_attention = MLP_Attention(300, 128)
X = torch.rand(16, 25, 300)
ref = torch.rand(16, 25, 300)
out = mlp_attention(X, ref)
print(out.shape)
|
[
"torch.ones_like",
"torch.nn.ReLU",
"torch.rand",
"torch.where",
"torch.nn.Tanh",
"torch.sqrt",
"torch.nn.init.xavier_uniform_",
"torch.nn.init.xavier_normal_",
"torch.cat",
"torch.nn.functional.softmax",
"torch.softmax",
"torch.nn.LayerNorm",
"torch.einsum",
"torch.nn.init.constant_",
"torch.nn.Linear",
"torch.sum",
"torch.tanh"
] |
[((13178, 13201), 'torch.rand', 'torch.rand', (['(16)', '(25)', '(300)'], {}), '(16, 25, 300)\n', (13188, 13201), False, 'import torch\n'), ((13212, 13235), 'torch.rand', 'torch.rand', (['(16)', '(25)', '(300)'], {}), '(16, 25, 300)\n', (13222, 13235), False, 'import torch\n'), ((742, 751), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (749, 751), True, 'import torch.nn as nn\n'), ((775, 808), 'torch.nn.Linear', 'nn.Linear', (['input_size', 'input_size'], {}), '(input_size, input_size)\n', (784, 808), True, 'import torch.nn as nn\n'), ((832, 865), 'torch.nn.Linear', 'nn.Linear', (['input_size', 'input_size'], {}), '(input_size, input_size)\n', (841, 865), True, 'import torch.nn as nn\n'), ((931, 971), 'torch.nn.init.xavier_normal_', 'init.xavier_normal_', (['self.linear1.weight'], {}), '(self.linear1.weight)\n', (950, 971), True, 'import torch.nn.init as init\n'), ((980, 1020), 'torch.nn.init.xavier_normal_', 'init.xavier_normal_', (['self.linear2.weight'], {}), '(self.linear2.weight)\n', (999, 1020), True, 'import torch.nn.init as init\n'), ((1029, 1067), 'torch.nn.init.constant_', 'init.constant_', (['self.linear1.bias', '(0.0)'], {}), '(self.linear1.bias, 0.0)\n', (1043, 1067), True, 'import torch.nn.init as init\n'), ((1076, 1114), 'torch.nn.init.constant_', 'init.constant_', (['self.linear2.bias', '(0.0)'], {}), '(self.linear2.bias, 0.0)\n', (1090, 1114), True, 'import torch.nn.init as init\n'), ((1124, 1168), 'torch.nn.init.constant_', 'init.constant_', (['self.layer_norm1.weight', '(1.0)'], {}), '(self.layer_norm1.weight, 1.0)\n', (1138, 1168), True, 'import torch.nn.init as init\n'), ((1176, 1218), 'torch.nn.init.constant_', 'init.constant_', (['self.layer_norm1.bias', '(0.0)'], {}), '(self.layer_norm1.bias, 0.0)\n', (1190, 1218), True, 'import torch.nn.init as init\n'), ((1226, 1270), 'torch.nn.init.constant_', 'init.constant_', (['self.layer_norm2.weight', '(1.0)'], {}), '(self.layer_norm2.weight, 1.0)\n', (1240, 1270), True, 'import torch.nn.init as init\n'), ((1278, 1320), 'torch.nn.init.constant_', 'init.constant_', (['self.layer_norm2.bias', '(0.0)'], {}), '(self.layer_norm2.bias, 0.0)\n', (1292, 1320), True, 'import torch.nn.init as init\n'), ((2204, 2226), 'torch.nn.functional.softmax', 'F.softmax', (['Q_K'], {'dim': '(-1)'}), '(Q_K, dim=-1)\n', (2213, 2226), True, 'import torch.nn.functional as F\n'), ((2747, 2791), 'torch.nn.LayerNorm', 'nn.LayerNorm', ([], {'normalized_shape': 'args.emb_size'}), '(normalized_shape=args.emb_size)\n', (2759, 2791), True, 'import torch.nn as nn\n'), ((2823, 2867), 'torch.nn.LayerNorm', 'nn.LayerNorm', ([], {'normalized_shape': 'args.emb_size'}), '(normalized_shape=args.emb_size)\n', (2835, 2867), True, 'import torch.nn as nn\n'), ((3127, 3169), 'torch.nn.init.constant_', 'init.constant_', (['self.layernorm.weight', '(1.0)'], {}), '(self.layernorm.weight, 1.0)\n', (3141, 3169), True, 'import torch.nn.init as init\n'), ((3177, 3217), 'torch.nn.init.constant_', 'init.constant_', (['self.layernorm.bias', '(0.0)'], {}), '(self.layernorm.bias, 0.0)\n', (3191, 3217), True, 'import torch.nn.init as init\n'), ((3225, 3271), 'torch.nn.init.constant_', 'init.constant_', (['self.layernorm_ffn.weight', '(1.0)'], {}), '(self.layernorm_ffn.weight, 1.0)\n', (3239, 3271), True, 'import torch.nn.init as init\n'), ((3279, 3323), 'torch.nn.init.constant_', 'init.constant_', (['self.layernorm_ffn.bias', '(0.0)'], {}), '(self.layernorm_ffn.bias, 0.0)\n', (3293, 3323), True, 'import torch.nn.init as init\n'), ((3331, 3371), 'torch.nn.init.xavier_uniform_', 'init.xavier_uniform_', (['self.ffn[0].weight'], {}), '(self.ffn[0].weight)\n', (3351, 3371), True, 'import torch.nn.init as init\n'), ((3380, 3420), 'torch.nn.init.xavier_uniform_', 'init.xavier_uniform_', (['self.ffn[2].weight'], {}), '(self.ffn[2].weight)\n', (3400, 3420), True, 'import torch.nn.init as init\n'), ((3729, 3772), 'torch.einsum', 'torch.einsum', (['"""bik,bjk->bij"""', 'queries', 'keys'], {}), "('bik,bjk->bij', queries, keys)\n", (3741, 3772), False, 'import torch\n'), ((4126, 4164), 'torch.where', 'torch.where', (['(masks == 0)', 'paddings', 'sim'], {}), '(masks == 0, paddings, sim)\n', (4137, 4164), False, 'import torch\n'), ((4218, 4244), 'torch.softmax', 'torch.softmax', (['sim'], {'dim': '(-1)'}), '(sim, dim=-1)\n', (4231, 4244), False, 'import torch\n'), ((4335, 4374), 'torch.einsum', 'torch.einsum', (['"""bij,bjk->bik"""', 'sim', 'keys'], {}), "('bij,bjk->bik', sim, keys)\n", (4347, 4374), False, 'import torch\n'), ((5337, 5416), 'torch.nn.Linear', 'nn.Linear', ([], {'in_features': '(args.emb_size * 2)', 'out_features': 'args.emb_size', 'bias': '(True)'}), '(in_features=args.emb_size * 2, out_features=args.emb_size, bias=True)\n', (5346, 5416), True, 'import torch.nn as nn\n'), ((5480, 5530), 'torch.nn.init.xavier_uniform_', 'init.xavier_uniform_', (['self.linear_ff_sim[0].weight'], {}), '(self.linear_ff_sim[0].weight)\n', (5500, 5530), True, 'import torch.nn.init as init\n'), ((5539, 5589), 'torch.nn.init.xavier_uniform_', 'init.xavier_uniform_', (['self.linear_ff_sim[2].weight'], {}), '(self.linear_ff_sim[2].weight)\n', (5559, 5589), True, 'import torch.nn.init as init\n'), ((5598, 5643), 'torch.nn.init.xavier_uniform_', 'init.xavier_uniform_', (['self.linear_last.weight'], {}), '(self.linear_last.weight)\n', (5618, 5643), True, 'import torch.nn.init as init\n'), ((5956, 6004), 'torch.cat', 'torch.cat', (['[expand_queries, expand_keys]'], {'dim': '(-1)'}), '([expand_queries, expand_keys], dim=-1)\n', (5965, 6004), False, 'import torch\n'), ((6428, 6466), 'torch.where', 'torch.where', (['(masks == 0)', 'paddings', 'sim'], {}), '(masks == 0, paddings, sim)\n', (6439, 6466), False, 'import torch\n'), ((6520, 6546), 'torch.softmax', 'torch.softmax', (['sim'], {'dim': '(-1)'}), '(sim, dim=-1)\n', (6533, 6546), False, 'import torch\n'), ((6677, 6716), 'torch.einsum', 'torch.einsum', (['"""bij,bjk->bik"""', 'sim', 'keys'], {}), "('bij,bjk->bik', sim, keys)\n", (6689, 6716), False, 'import torch\n'), ((7041, 7086), 'torch.cat', 'torch.cat', (['[feature_mul, feature_sub]'], {'dim': '(-1)'}), '([feature_mul, feature_sub], dim=-1)\n', (7050, 7086), False, 'import torch\n'), ((7361, 7440), 'torch.nn.Linear', 'nn.Linear', ([], {'in_features': '(args.emb_size * 2)', 'out_features': 'args.emb_size', 'bias': '(True)'}), '(in_features=args.emb_size * 2, out_features=args.emb_size, bias=True)\n', (7370, 7440), True, 'import torch.nn as nn\n'), ((7504, 7549), 'torch.nn.init.xavier_uniform_', 'init.xavier_uniform_', (['self.linear_last.weight'], {}), '(self.linear_last.weight)\n', (7524, 7549), True, 'import torch.nn.init as init\n'), ((7700, 7745), 'torch.einsum', 'torch.einsum', (['"""bik,bnjk->binj"""', 'queries', 'keys'], {}), "('bik,bnjk->binj', queries, keys)\n", (7712, 7745), False, 'import torch\n'), ((8069, 8108), 'torch.where', 'torch.where', (['(masks == 0)', 'paddings', 'sim1'], {}), '(masks == 0, paddings, sim1)\n', (8080, 8108), False, 'import torch\n'), ((8145, 8172), 'torch.softmax', 'torch.softmax', (['sim1'], {'dim': '(-1)'}), '(sim1, dim=-1)\n', (8158, 8172), False, 'import torch\n'), ((8312, 8355), 'torch.einsum', 'torch.einsum', (['"""binj,bnjk->bink"""', 'sim1', 'keys'], {}), "('binj,bnjk->bink', sim1, keys)\n", (8324, 8355), False, 'import torch\n'), ((8371, 8419), 'torch.einsum', 'torch.einsum', (['"""bik,bink->bin"""', 'queries', 'outputs1'], {}), "('bik,bink->bin', queries, outputs1)\n", (8383, 8419), False, 'import torch\n'), ((8811, 8850), 'torch.where', 'torch.where', (['(masks == 0)', 'paddings', 'sim2'], {}), '(masks == 0, paddings, sim2)\n', (8822, 8850), False, 'import torch\n'), ((8867, 8894), 'torch.softmax', 'torch.softmax', (['sim2'], {'dim': '(-1)'}), '(sim2, dim=-1)\n', (8880, 8894), False, 'import torch\n'), ((8963, 9008), 'torch.einsum', 'torch.einsum', (['"""bin,bink->bik"""', 'sim2', 'outputs1'], {}), "('bin,bink->bik', sim2, outputs1)\n", (8975, 9008), False, 'import torch\n'), ((9327, 9372), 'torch.cat', 'torch.cat', (['[feature_mul, feature_sub]'], {'dim': '(-1)'}), '([feature_mul, feature_sub], dim=-1)\n', (9336, 9372), False, 'import torch\n'), ((9836, 9845), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (9843, 9845), True, 'import torch.nn as nn\n'), ((9869, 9902), 'torch.nn.Linear', 'nn.Linear', (['input_size', 'input_size'], {}), '(input_size, input_size)\n', (9878, 9902), True, 'import torch.nn as nn\n'), ((9926, 9959), 'torch.nn.Linear', 'nn.Linear', (['input_size', 'input_size'], {}), '(input_size, input_size)\n', (9935, 9959), True, 'import torch.nn as nn\n'), ((10025, 10065), 'torch.nn.init.xavier_normal_', 'init.xavier_normal_', (['self.linear1.weight'], {}), '(self.linear1.weight)\n', (10044, 10065), True, 'import torch.nn.init as init\n'), ((10074, 10114), 'torch.nn.init.xavier_normal_', 'init.xavier_normal_', (['self.linear2.weight'], {}), '(self.linear2.weight)\n', (10093, 10114), True, 'import torch.nn.init as init\n'), ((10123, 10161), 'torch.nn.init.constant_', 'init.constant_', (['self.linear1.bias', '(0.0)'], {}), '(self.linear1.bias, 0.0)\n', (10137, 10161), True, 'import torch.nn.init as init\n'), ((10170, 10208), 'torch.nn.init.constant_', 'init.constant_', (['self.linear2.bias', '(0.0)'], {}), '(self.linear2.bias, 0.0)\n', (10184, 10208), True, 'import torch.nn.init as init\n'), ((10218, 10262), 'torch.nn.init.constant_', 'init.constant_', (['self.layer_norm1.weight', '(1.0)'], {}), '(self.layer_norm1.weight, 1.0)\n', (10232, 10262), True, 'import torch.nn.init as init\n'), ((10270, 10312), 'torch.nn.init.constant_', 'init.constant_', (['self.layer_norm1.bias', '(0.0)'], {}), '(self.layer_norm1.bias, 0.0)\n', (10284, 10312), True, 'import torch.nn.init as init\n'), ((10320, 10364), 'torch.nn.init.constant_', 'init.constant_', (['self.layer_norm2.weight', '(1.0)'], {}), '(self.layer_norm2.weight, 1.0)\n', (10334, 10364), True, 'import torch.nn.init as init\n'), ((10372, 10414), 'torch.nn.init.constant_', 'init.constant_', (['self.layer_norm2.bias', '(0.0)'], {}), '(self.layer_norm2.bias, 0.0)\n', (10386, 10414), True, 'import torch.nn.init as init\n'), ((11457, 11479), 'torch.nn.functional.softmax', 'F.softmax', (['Q_K'], {'dim': '(-1)'}), '(Q_K, dim=-1)\n', (11466, 11479), True, 'import torch.nn.functional as F\n'), ((11496, 11543), 'torch.einsum', 'torch.einsum', (['"""btdup,bdpe->btdue"""', 'Q_K_score', 'V'], {}), "('btdup,bdpe->btdue', Q_K_score, V)\n", (11508, 11543), False, 'import torch\n'), ((12106, 12151), 'torch.nn.Linear', 'nn.Linear', (['input_size', 'hidden_size'], {'bias': '(True)'}), '(input_size, hidden_size, bias=True)\n', (12115, 12151), True, 'import torch.nn as nn\n'), ((12178, 12223), 'torch.nn.Linear', 'nn.Linear', (['input_size', 'hidden_size'], {'bias': '(True)'}), '(input_size, hidden_size, bias=True)\n', (12187, 12223), True, 'import torch.nn as nn\n'), ((12241, 12279), 'torch.nn.Linear', 'nn.Linear', (['hidden_size'], {'out_features': '(1)'}), '(hidden_size, out_features=1)\n', (12250, 12279), True, 'import torch.nn as nn\n'), ((12316, 12357), 'torch.nn.init.xavier_normal_', 'init.xavier_normal_', (['self.linear_X.weight'], {}), '(self.linear_X.weight)\n', (12335, 12357), True, 'import torch.nn.init as init\n'), ((12366, 12409), 'torch.nn.init.xavier_normal_', 'init.xavier_normal_', (['self.linear_ref.weight'], {}), '(self.linear_ref.weight)\n', (12385, 12409), True, 'import torch.nn.init as init\n'), ((12418, 12452), 'torch.nn.init.xavier_normal_', 'init.xavier_normal_', (['self.v.weight'], {}), '(self.v.weight)\n', (12437, 12452), True, 'import torch.nn.init as init\n'), ((12461, 12499), 'torch.nn.init.constant_', 'init.constant_', (['self.linear1.bias', '(0.0)'], {}), '(self.linear1.bias, 0.0)\n', (12475, 12499), True, 'import torch.nn.init as init\n'), ((12508, 12546), 'torch.nn.init.constant_', 'init.constant_', (['self.linear2.bias', '(0.0)'], {}), '(self.linear2.bias, 0.0)\n', (12522, 12546), True, 'import torch.nn.init as init\n'), ((12555, 12587), 'torch.nn.init.constant_', 'init.constant_', (['self.v.bias', '(0.0)'], {}), '(self.v.bias, 0.0)\n', (12569, 12587), True, 'import torch.nn.init as init\n'), ((12974, 12999), 'torch.softmax', 'torch.softmax', (['out'], {'dim': '(1)'}), '(out, dim=1)\n', (12987, 12999), False, 'import torch\n'), ((13021, 13070), 'torch.einsum', 'torch.einsum', (['"""bxe,bxr->bre"""', 'X', 'attention_scores'], {}), "('bxe,bxr->bre', X, attention_scores)\n", (13033, 13070), False, 'import torch\n'), ((606, 647), 'torch.nn.LayerNorm', 'nn.LayerNorm', ([], {'normalized_shape': 'input_size'}), '(normalized_shape=input_size)\n', (618, 647), True, 'import torch.nn as nn\n'), ((679, 720), 'torch.nn.LayerNorm', 'nn.LayerNorm', ([], {'normalized_shape': 'input_size'}), '(normalized_shape=input_size)\n', (691, 720), True, 'import torch.nn as nn\n'), ((2916, 2966), 'torch.nn.Linear', 'nn.Linear', (['args.emb_size', 'args.emb_size'], {'bias': '(True)'}), '(args.emb_size, args.emb_size, bias=True)\n', (2925, 2966), True, 'import torch.nn as nn\n'), ((2980, 2989), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (2987, 2989), True, 'import torch.nn as nn\n'), ((3003, 3053), 'torch.nn.Linear', 'nn.Linear', (['args.emb_size', 'args.emb_size'], {'bias': '(True)'}), '(args.emb_size, args.emb_size, bias=True)\n', (3012, 3053), True, 'import torch.nn as nn\n'), ((5139, 5208), 'torch.nn.Linear', 'nn.Linear', ([], {'in_features': '(args.emb_size * 2)', 'out_features': '(100)', 'bias': '(True)'}), '(in_features=args.emb_size * 2, out_features=100, bias=True)\n', (5148, 5208), True, 'import torch.nn as nn\n'), ((5222, 5231), 'torch.nn.Tanh', 'nn.Tanh', ([], {}), '()\n', (5229, 5231), True, 'import torch.nn as nn\n'), ((5245, 5299), 'torch.nn.Linear', 'nn.Linear', ([], {'in_features': '(100)', 'out_features': '(1)', 'bias': '(False)'}), '(in_features=100, out_features=1, bias=False)\n', (5254, 5299), True, 'import torch.nn as nn\n'), ((6376, 6396), 'torch.ones_like', 'torch.ones_like', (['sim'], {}), '(sim)\n', (6391, 6396), False, 'import torch\n'), ((8015, 8036), 'torch.ones_like', 'torch.ones_like', (['sim1'], {}), '(sim1)\n', (8030, 8036), False, 'import torch\n'), ((8630, 8658), 'torch.sum', 'torch.sum', (['key_masks'], {'dim': '(-1)'}), '(key_masks, dim=-1)\n', (8639, 8658), False, 'import torch\n'), ((8757, 8778), 'torch.ones_like', 'torch.ones_like', (['sim2'], {}), '(sim2)\n', (8772, 8778), False, 'import torch\n'), ((9700, 9741), 'torch.nn.LayerNorm', 'nn.LayerNorm', ([], {'normalized_shape': 'input_size'}), '(normalized_shape=input_size)\n', (9712, 9741), True, 'import torch.nn as nn\n'), ((9773, 9814), 'torch.nn.LayerNorm', 'nn.LayerNorm', ([], {'normalized_shape': 'input_size'}), '(normalized_shape=input_size)\n', (9785, 9814), True, 'import torch.nn as nn\n'), ((11333, 11371), 'torch.einsum', 'torch.einsum', (['"""btue,bdpe->btdup"""', 'Q', 'K'], {}), "('btue,bdpe->btdup', Q, K)\n", (11345, 11371), False, 'import torch\n'), ((2050, 2064), 'torch.sqrt', 'torch.sqrt', (['dk'], {}), '(dk)\n', (2060, 2064), False, 'import torch\n'), ((3892, 3909), 'torch.sqrt', 'torch.sqrt', (['scale'], {}), '(scale)\n', (3902, 3909), False, 'import torch\n'), ((11375, 11389), 'torch.sqrt', 'torch.sqrt', (['dk'], {}), '(dk)\n', (11385, 11389), False, 'import torch\n'), ((4052, 4072), 'torch.ones_like', 'torch.ones_like', (['sim'], {}), '(sim)\n', (4067, 4072), False, 'import torch\n'), ((12898, 12935), 'torch.tanh', 'torch.tanh', (['(stacking_X + stacking_ref)'], {}), '(stacking_X + stacking_ref)\n', (12908, 12935), False, 'import torch\n')]
|
from django.shortcuts import reverse
from apps.notifications.models import Notification
def connection_notifications(backend, user, response, *args, **kwargs):
if backend.name in ['sharemyhealth']:
# Dismiss the notification prompting the user to connect
notifications = Notification.objects.filter(
notify_id=user.id,
actor_id=user.id,
actions__contains=f'''"url": "{reverse('social:begin', args=[backend.name])}"''',
)
for notification in notifications:
notification.dismissed = True
notification.save()
# Dismiss any notifications related to this backend
action_url = reverse('social:disconnect', args=[backend.name])
notifications = Notification.objects.filter(
notify_id=user.id,
actor_id=user.id,
actions__contains=f'''"url": "{action_url}"''',
)
for notification in notifications:
notification.dismissed = True
notification.save()
# Create a notification that the user connected to the backend
Notification.objects.create(
notify=user,
actor=user,
actions=[{'url': action_url, 'text': 'Disconnect'}],
message='You connected to <b>HIXNY</b>',
)
def disconnection_notifications(backend, user, *args, **kwargs):
if backend.name in ['sharemyhealth']:
# Dismiss any notifications related to this backend
action_url = reverse('social:disconnect', args=[backend.name])
notifications = Notification.objects.filter(
notify_id=user.id,
actor_id=user.id,
actions__contains=f'''"url": "{action_url}"''',
)
for notification in notifications:
notification.dismissed = True
notification.save()
|
[
"apps.notifications.models.Notification.objects.filter",
"django.shortcuts.reverse",
"apps.notifications.models.Notification.objects.create"
] |
[((687, 736), 'django.shortcuts.reverse', 'reverse', (['"""social:disconnect"""'], {'args': '[backend.name]'}), "('social:disconnect', args=[backend.name])\n", (694, 736), False, 'from django.shortcuts import reverse\n'), ((761, 873), 'apps.notifications.models.Notification.objects.filter', 'Notification.objects.filter', ([], {'notify_id': 'user.id', 'actor_id': 'user.id', 'actions__contains': 'f""""url": "{action_url}\\""""'}), '(notify_id=user.id, actor_id=user.id,\n actions__contains=f\'"url": "{action_url}"\')\n', (788, 873), False, 'from apps.notifications.models import Notification\n'), ((1118, 1273), 'apps.notifications.models.Notification.objects.create', 'Notification.objects.create', ([], {'notify': 'user', 'actor': 'user', 'actions': "[{'url': action_url, 'text': 'Disconnect'}]", 'message': '"""You connected to <b>HIXNY</b>"""'}), "(notify=user, actor=user, actions=[{'url':\n action_url, 'text': 'Disconnect'}], message='You connected to <b>HIXNY</b>'\n )\n", (1145, 1273), False, 'from apps.notifications.models import Notification\n'), ((1514, 1563), 'django.shortcuts.reverse', 'reverse', (['"""social:disconnect"""'], {'args': '[backend.name]'}), "('social:disconnect', args=[backend.name])\n", (1521, 1563), False, 'from django.shortcuts import reverse\n'), ((1588, 1700), 'apps.notifications.models.Notification.objects.filter', 'Notification.objects.filter', ([], {'notify_id': 'user.id', 'actor_id': 'user.id', 'actions__contains': 'f""""url": "{action_url}\\""""'}), '(notify_id=user.id, actor_id=user.id,\n actions__contains=f\'"url": "{action_url}"\')\n', (1615, 1700), False, 'from apps.notifications.models import Notification\n'), ((427, 471), 'django.shortcuts.reverse', 'reverse', (['"""social:begin"""'], {'args': '[backend.name]'}), "('social:begin', args=[backend.name])\n", (434, 471), False, 'from django.shortcuts import reverse\n')]
|
#!/usr/bin/python3
"""Calculate IoU of part segmentation task."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import sys
import argparse
import data_utils
import numpy as np
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--folder_gt', '-g', help='Path to ground truth folder', required=True)
parser.add_argument('--folder_pred', '-p', help='Path to prediction folder', required=True)
parser.add_argument('--folder_data', '-d', help='Path to point cloud data folder')
parser.add_argument('--part_avg', '-a', action='store_true', help='Use part level average')
args = parser.parse_args()
print(args)
category_id_to_name = {
2691156: 'Airplane',
2773838: 'Bag',
2954340: 'Cap',
2958343: 'Car',
3001627: 'Chair',
3261776: 'Earphone',
3467517: 'Guitar',
3624134: 'Knife',
3636649: 'Lamp',
3642806: 'Laptop',
3790512: 'Motorbike',
3797390: 'Mug',
3948459: 'Pistol',
4099429: 'Rocket',
4225987: 'Skateboard',
4379243: 'Table'}
categories = sorted(os.listdir(args.folder_gt))
label_min = sys.maxsize
for category in categories:
category_folder_gt = os.path.join(args.folder_gt, category)
filenames = sorted(os.listdir(category_folder_gt))
for filename in filenames:
filepath_gt = os.path.join(category_folder_gt, filename)
label_gt = np.loadtxt(filepath_gt).astype(np.int32)
label_min = min(label_min, np.amin(label_gt))
IoU = 0.0
total_num = 0
for category in categories:
category_folder_gt = os.path.join(args.folder_gt, category)
category_folder_pred = os.path.join(args.folder_pred, category)
if args.folder_data:
category_folder_data = os.path.join(args.folder_data, category)
category_folder_err = os.path.join(args.folder_pred+'_err_ply', category)
IoU_category = 0.0
filenames = sorted(os.listdir(category_folder_gt))
for filename in filenames:
filepath_gt = os.path.join(category_folder_gt, filename)
filepath_pred = os.path.join(category_folder_pred, filename)
label_gt = np.loadtxt(filepath_gt).astype(np.int32) - label_min
label_pred = np.loadtxt(filepath_pred).astype(np.int32)
if args.folder_data:
filepath_data = os.path.join(category_folder_data, filename[:-3]+'pts')
filepath_err = os.path.join(category_folder_err, filename[:-3] + 'ply')
coordinates = [[float(value) for value in xyz.split(' ')]
for xyz in open(filepath_data, 'r') if len(xyz.split(' ')) == 3]
assert (label_gt.shape[0] == len(coordinates))
data_utils.save_ply_property(np.array(coordinates), (label_gt == label_pred), 6, filepath_err)
if args.part_avg:
label_max = np.amax(label_gt)
IoU_part = 0.0
for label_idx in range(label_max+1):
locations_gt = (label_gt == label_idx)
locations_pred = (label_pred == label_idx)
I_locations = np.logical_and(locations_gt, locations_pred)
U_locations = np.logical_or(locations_gt, locations_pred)
I = np.sum(I_locations) + np.finfo(np.float32).eps
U = np.sum(U_locations) + np.finfo(np.float32).eps
IoU_part = IoU_part + I/U
IoU_sample = IoU_part / (label_max+1)
else:
label_correct_locations = (label_gt == label_pred)
IoU_sample = np.sum(label_correct_locations) / label_gt.size
IoU_category = IoU_category + IoU_sample
IoU = IoU + IoU_category
IoU_category = IoU_category / len(filenames)
if category.isdigit():
print("IoU of %s: " % (category_id_to_name[int(category)]), IoU_category)
else:
print("IoU of %s: " % category, IoU_category)
total_num = total_num + len(filenames)
IoU = IoU / total_num
print("IoU: ", IoU)
if __name__ == '__main__':
main()
|
[
"numpy.sum",
"numpy.amin",
"argparse.ArgumentParser",
"numpy.logical_and",
"numpy.amax",
"numpy.finfo",
"numpy.array",
"numpy.loadtxt",
"numpy.logical_or",
"os.path.join",
"os.listdir"
] |
[((293, 318), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (316, 318), False, 'import argparse\n'), ((1247, 1273), 'os.listdir', 'os.listdir', (['args.folder_gt'], {}), '(args.folder_gt)\n', (1257, 1273), False, 'import os\n'), ((1369, 1407), 'os.path.join', 'os.path.join', (['args.folder_gt', 'category'], {}), '(args.folder_gt, category)\n', (1381, 1407), False, 'import os\n'), ((1797, 1835), 'os.path.join', 'os.path.join', (['args.folder_gt', 'category'], {}), '(args.folder_gt, category)\n', (1809, 1835), False, 'import os\n'), ((1868, 1908), 'os.path.join', 'os.path.join', (['args.folder_pred', 'category'], {}), '(args.folder_pred, category)\n', (1880, 1908), False, 'import os\n'), ((1436, 1466), 'os.listdir', 'os.listdir', (['category_folder_gt'], {}), '(category_folder_gt)\n', (1446, 1466), False, 'import os\n'), ((1531, 1573), 'os.path.join', 'os.path.join', (['category_folder_gt', 'filename'], {}), '(category_folder_gt, filename)\n', (1543, 1573), False, 'import os\n'), ((1975, 2015), 'os.path.join', 'os.path.join', (['args.folder_data', 'category'], {}), '(args.folder_data, category)\n', (1987, 2015), False, 'import os\n'), ((2051, 2104), 'os.path.join', 'os.path.join', (["(args.folder_pred + '_err_ply')", 'category'], {}), "(args.folder_pred + '_err_ply', category)\n", (2063, 2104), False, 'import os\n'), ((2161, 2191), 'os.listdir', 'os.listdir', (['category_folder_gt'], {}), '(category_folder_gt)\n', (2171, 2191), False, 'import os\n'), ((2256, 2298), 'os.path.join', 'os.path.join', (['category_folder_gt', 'filename'], {}), '(category_folder_gt, filename)\n', (2268, 2298), False, 'import os\n'), ((2328, 2372), 'os.path.join', 'os.path.join', (['category_folder_pred', 'filename'], {}), '(category_folder_pred, filename)\n', (2340, 2372), False, 'import os\n'), ((1679, 1696), 'numpy.amin', 'np.amin', (['label_gt'], {}), '(label_gt)\n', (1686, 1696), True, 'import numpy as np\n'), ((2588, 2645), 'os.path.join', 'os.path.join', (['category_folder_data', "(filename[:-3] + 'pts')"], {}), "(category_folder_data, filename[:-3] + 'pts')\n", (2600, 2645), False, 'import os\n'), ((2676, 2732), 'os.path.join', 'os.path.join', (['category_folder_err', "(filename[:-3] + 'ply')"], {}), "(category_folder_err, filename[:-3] + 'ply')\n", (2688, 2732), False, 'import os\n'), ((3143, 3160), 'numpy.amax', 'np.amax', (['label_gt'], {}), '(label_gt)\n', (3150, 3160), True, 'import numpy as np\n'), ((1598, 1621), 'numpy.loadtxt', 'np.loadtxt', (['filepath_gt'], {}), '(filepath_gt)\n', (1608, 1621), True, 'import numpy as np\n'), ((2476, 2501), 'numpy.loadtxt', 'np.loadtxt', (['filepath_pred'], {}), '(filepath_pred)\n', (2486, 2501), True, 'import numpy as np\n'), ((3015, 3036), 'numpy.array', 'np.array', (['coordinates'], {}), '(coordinates)\n', (3023, 3036), True, 'import numpy as np\n'), ((3406, 3450), 'numpy.logical_and', 'np.logical_and', (['locations_gt', 'locations_pred'], {}), '(locations_gt, locations_pred)\n', (3420, 3450), True, 'import numpy as np\n'), ((3486, 3529), 'numpy.logical_or', 'np.logical_or', (['locations_gt', 'locations_pred'], {}), '(locations_gt, locations_pred)\n', (3499, 3529), True, 'import numpy as np\n'), ((3894, 3925), 'numpy.sum', 'np.sum', (['label_correct_locations'], {}), '(label_correct_locations)\n', (3900, 3925), True, 'import numpy as np\n'), ((2397, 2420), 'numpy.loadtxt', 'np.loadtxt', (['filepath_gt'], {}), '(filepath_gt)\n', (2407, 2420), True, 'import numpy as np\n'), ((3555, 3574), 'numpy.sum', 'np.sum', (['I_locations'], {}), '(I_locations)\n', (3561, 3574), True, 'import numpy as np\n'), ((3627, 3646), 'numpy.sum', 'np.sum', (['U_locations'], {}), '(U_locations)\n', (3633, 3646), True, 'import numpy as np\n'), ((3577, 3597), 'numpy.finfo', 'np.finfo', (['np.float32'], {}), '(np.float32)\n', (3585, 3597), True, 'import numpy as np\n'), ((3649, 3669), 'numpy.finfo', 'np.finfo', (['np.float32'], {}), '(np.float32)\n', (3657, 3669), True, 'import numpy as np\n')]
|
GAME_SIZE = 4
SCORE_TO_WIN = 2048
from game2048.game import Game
from game2048.agents import ExpectiMaxAgent
# save the dataset
f_256 = open("dataset_256.txt", "w")
f_512 = open("dataset_512.txt", "w")
f_1024 = open("dataset_1024.txt", "w")
for i in range(30000):
print("i = ", i)
game = Game(size=GAME_SIZE)
agent = ExpectiMaxAgent(game=game)
while True:
direction = agent.step()
if (game.end == True):
break
maxNum = 0
for i in range(4):
for j in range(4):
if game.board[i, j] > maxNum:
maxNum = game.board[i, j]
if maxNum == 2048: # start the next turn
break
if maxNum <= 256:
for i in range(4):
for j in range(4):
print(game.board[i, j], file = f_256)
print(direction, file = f_256)
elif maxNum == 512:
for i in range(4):
for j in range(4):
print(game.board[i, j], file = f_512)
print(direction, file = f_512)
if maxNum == 1024:
for i in range(4):
for j in range(4):
print(game.board[i, j], file = f_1024)
print(direction, file = f_1024)
game.move(direction)
|
[
"game2048.agents.ExpectiMaxAgent",
"game2048.game.Game"
] |
[((299, 319), 'game2048.game.Game', 'Game', ([], {'size': 'GAME_SIZE'}), '(size=GAME_SIZE)\n', (303, 319), False, 'from game2048.game import Game\n'), ((332, 358), 'game2048.agents.ExpectiMaxAgent', 'ExpectiMaxAgent', ([], {'game': 'game'}), '(game=game)\n', (347, 358), False, 'from game2048.agents import ExpectiMaxAgent\n')]
|
# Implementations of approval-based multi-winner voting rules
from __future__ import print_function
import math
import sys
from itertools import combinations
try:
from gmpy2 import mpq as Fraction
except ImportError:
from fractions import Fraction
from rules_approval_ilp import compute_monroe_ilp, compute_thiele_methods_ilp,\
compute_optphragmen_ilp, compute_minimaxav_ilp
from committees import sort_committees,\
enough_approved_candidates,\
print_committees
import score_functions as sf
########################################################################
MWRULES = {
"av": "Approval Voting",
"sav": "Satisfaction Approval Voting",
"pav-ilp": "Proportional Approval Voting (PAV) via ILP",
"pav-noilp": "Proportional Approval Voting (PAV) via branch-and-bound",
"seqpav": "Sequential Proportional Approval Voting (seq-PAV)",
"revseqpav": "Reverse Sequential Prop. Approval Voting (revseq-PAV)",
"slav-ilp": "Sainte-Lague Approval Voting (SLAV) via ILP",
"slav-noilp": "Sainte-Lague Approval Voting (SLAV) via branch-and-bound",
"seqslav": "Sequential Sainte-Lague Approval Voting (seq-SLAV)",
"phrag": "Phragmen's sequential rule (seq-Phragmen)",
"optphrag": "Phragmen's optimization rule (opt-Phragmen)",
"monroe-ilp": "Monroe's rule via ILP",
"monroe-noilp": "Monroe's rule via flow algorithm",
"greedy-monroe": "Greedy Monroe rule",
"cc-ilp": "Chamberlin-Courant (CC) via ILP",
"cc-noilp": "Chamberlin-Courant (CC) via branch-and-bound",
"seqcc": "Sequential Chamberlin-Courant (seq-CC)",
"revseqcc": "Reverse Sequential Chamberlin-Courant (revseq-CC)",
"minimaxav-noilp": "Minimax Approval Voting via brute-force",
"minimaxav-ilp": "Minimax Approval Voting via ILP",
"rule-x": "Rule X",
"phragmen-enestroem": "Phragmen's first method / Enestroeom’s method",
}
def compute_rule(name, profile, committeesize, resolute=False):
"""Returns the list of winning committees according to the named rule"""
if name == "seqpav":
return compute_seqpav(profile, committeesize, resolute=resolute)
elif name == "revseqpav":
return compute_revseqpav(profile, committeesize, resolute=resolute)
elif name == "av":
return compute_av(profile, committeesize, resolute=resolute)
elif name == "sav":
return compute_sav(profile, committeesize, resolute=resolute)
elif name == "pav-ilp":
return compute_pav(profile, committeesize,
ilp=True, resolute=resolute)
elif name == "pav-noilp":
return compute_pav(profile, committeesize,
ilp=False, resolute=resolute)
elif name == "seqslav":
return compute_seqslav(profile, committeesize, resolute=resolute)
elif name == "slav-ilp":
return compute_slav(profile, committeesize,
ilp=True, resolute=resolute)
elif name == "slav-noilp":
return compute_slav(profile, committeesize,
ilp=False, resolute=resolute)
elif name == "phrag":
return compute_seqphragmen(profile, committeesize, resolute=resolute)
elif name == "monroe-ilp":
return compute_monroe(profile, committeesize,
ilp=True, resolute=resolute)
elif name == "monroe-noilp":
return compute_monroe(profile, committeesize,
ilp=False, resolute=resolute)
elif name == "greedy-monroe":
return compute_greedy_monroe(profile, committeesize)
elif name == "cc-ilp":
return compute_cc(profile, committeesize,
ilp=True, resolute=resolute)
elif name == "cc-noilp":
return compute_cc(profile, committeesize,
ilp=False, resolute=resolute)
if name == "seqcc":
return compute_seqcc(profile, committeesize, resolute=resolute)
elif name == "revseqcc":
return compute_revseqcc(profile, committeesize, resolute=resolute)
elif name == "minimaxav-noilp":
return compute_minimaxav(profile, committeesize,
ilp=False, resolute=resolute)
elif name == "minimaxav-ilp":
return compute_minimaxav(profile, committeesize,
ilp=True, resolute=resolute)
elif name == "optphrag":
return compute_optphragmen_ilp(profile, committeesize,
resolute=resolute)
elif name == "rule-x":
return compute_rule_x(profile, committeesize, resolute=resolute)
elif name == "phragmen-enestroem":
return compute_phragmen_enestroem(profile, committeesize,
resolute=resolute)
else:
raise NotImplementedError("voting method " + str(name)
+ " not known")
def allrules(profile, committeesize, ilp=True, include_resolute=False):
"""Prints the winning committees for all implemented rules"""
for rule in list(MWRULES.keys()):
if not ilp and "-ilp" in rule:
continue
print(MWRULES[rule] + ":")
com = compute_rule(rule, profile, committeesize)
print_committees(com)
if include_resolute:
print(MWRULES[rule] + " (with tie-breaking):")
com = compute_rule(rule, profile, committeesize, resolute=True)
print_committees(com)
########################################################################
# computes arbitrary Thiele methods via branch-and-bound
def compute_thiele_methods_branchandbound(profile, committeesize,
scorefct_str, resolute=False):
enough_approved_candidates(profile, committeesize)
scorefct = sf.get_scorefct(scorefct_str, committeesize)
best_committees = []
init_com = compute_seq_thiele_resolute(profile, committeesize,
scorefct_str)
best_score = sf.thiele_score(profile, init_com[0], scorefct_str)
part_coms = [[]]
while part_coms:
part_com = part_coms.pop(0)
# potential committee, check if at least as good
# as previous best committee
if len(part_com) == committeesize:
score = sf.thiele_score(profile, part_com, scorefct_str)
if score == best_score:
best_committees.append(part_com)
elif score > best_score:
best_committees = [part_com]
best_score = score
else:
if len(part_com) > 0:
largest_cand = part_com[-1]
else:
largest_cand = -1
missing = committeesize - len(part_com)
marg_util_cand = sf.additional_thiele_scores(profile, part_com,
scorefct)
upper_bound = (
sum(sorted(marg_util_cand[largest_cand + 1:])[-missing:])
+ sf.thiele_score(profile, part_com, scorefct_str))
if upper_bound >= best_score:
for c in range(largest_cand + 1,
profile.num_cand - missing + 1):
part_coms.insert(0, part_com + [c])
committees = sort_committees(best_committees)
if resolute:
return [committees[0]]
else:
return committees
# Sequential PAV
def compute_seqpav(profile, committeesize, resolute=False):
"""Returns the list of winning committees according sequential PAV"""
if resolute:
return compute_seq_thiele_resolute(profile, committeesize, 'pav')
else:
return compute_seq_thiele_methods(profile, committeesize, 'pav')
# Sequential SLAV
def compute_seqslav(profile, committeesize, resolute=False):
"""Returns the list of winning committees according sequential SLAV"""
if resolute:
return compute_seq_thiele_resolute(profile, committeesize, 'slav')
else:
return compute_seq_thiele_methods(profile, committeesize, 'slav')
# Reverse Sequential PAV
def compute_revseqpav(profile, committeesize, resolute=False):
if resolute:
return compute_revseq_thiele_methods_resolute(profile,
committeesize, 'pav')
else:
return compute_revseq_thiele_methods(profile, committeesize, 'pav')
# Sequential Chamberlin-Courant
def compute_seqcc(profile, committeesize, resolute=False):
"""Returns the list of winning committees according to sequential CC"""
if resolute:
return compute_seq_thiele_resolute(profile, committeesize, 'cc')
else:
return compute_seq_thiele_methods(profile, committeesize, 'cc')
# Reverse Sequential Chamberlin-Courant
def compute_revseqcc(profile, committeesize, resolute=False):
if resolute:
return compute_revseq_thiele_methods_resolute(profile, committeesize,
'cc')
else:
return compute_revseq_thiele_methods(profile, committeesize, 'cc')
# Satisfaction Approval Voting (SAV)
def compute_sav(profile, committeesize, resolute=False):
return compute_av(profile, committeesize, resolute, sav=True)
# Approval Voting (AV)
def compute_av(profile, committeesize, resolute=False, sav=False):
"""Returns the list of winning committees according to Approval Voting"""
enough_approved_candidates(profile, committeesize)
appr_scores = [0] * profile.num_cand
for pref in profile.preferences:
for cand in pref.approved:
if sav:
# Satisfaction Approval Voting
appr_scores[cand] += Fraction(pref.weight, len(pref.approved))
else:
# (Classic) Approval Voting
appr_scores[cand] += pref.weight
# smallest score to be in the committee
cutoff = sorted(appr_scores)[-committeesize]
certain_cand = [c for c in range(profile.num_cand)
if appr_scores[c] > cutoff]
possible_cand = [c for c in range(profile.num_cand)
if appr_scores[c] == cutoff]
missing = committeesize - len(certain_cand)
if resolute:
return sort_committees([(certain_cand + possible_cand[:missing])])
else:
return sort_committees([(certain_cand + list(selection))
for selection
in combinations(possible_cand, missing)])
# Sequential Thiele methods (resolute)
def compute_seq_thiele_methods(profile, committeesize, scorefct_str):
enough_approved_candidates(profile, committeesize)
scorefct = sf.get_scorefct(scorefct_str, committeesize)
comm_scores = {(): 0}
# build committees starting with the empty set
for _ in range(0, committeesize):
comm_scores_next = {}
for committee, score in comm_scores.items():
# marginal utility gained by adding candidate to the committee
additional_score_cand = sf.additional_thiele_scores(
profile, committee, scorefct)
for c in range(profile.num_cand):
if additional_score_cand[c] >= max(additional_score_cand):
next_comm = tuple(sorted(committee + (c,)))
comm_scores_next[next_comm] = (comm_scores[committee]
+ additional_score_cand[c])
# remove suboptimal committees
comm_scores = {}
cutoff = max(comm_scores_next.values())
for com, score in comm_scores_next.items():
if score >= cutoff:
comm_scores[com] = score
return sort_committees(list(comm_scores.keys()))
# Sequential Thiele methods with resolute
def compute_seq_thiele_resolute(profile, committeesize, scorefct_str):
enough_approved_candidates(profile, committeesize)
scorefct = sf.get_scorefct(scorefct_str, committeesize)
committee = []
# build committees starting with the empty set
for _ in range(0, committeesize):
additional_score_cand = sf.additional_thiele_scores(
profile, committee, scorefct)
next_cand = additional_score_cand.index(max(additional_score_cand))
committee.append(next_cand)
return [sorted(committee)]
# required for computing Reverse Sequential Thiele methods
def __least_relevant_cands(profile, comm, utilityfct):
# marginal utility gained by adding candidate to the committee
marg_util_cand = [0] * profile.num_cand
for pref in profile.preferences:
for c in pref.approved:
satisfaction = len(pref.approved.intersection(comm))
marg_util_cand[c] += pref.weight * utilityfct(satisfaction)
for c in range(profile.num_cand):
if c not in comm:
# do not choose candidates that already have been removed
marg_util_cand[c] = max(marg_util_cand) + 1
# find smallest elements in marg_util_cand and return indices
return ([cand for cand in range(profile.num_cand)
if marg_util_cand[cand] == min(marg_util_cand)],
min(marg_util_cand))
# Reverse Sequential Thiele methods without resolute
def compute_revseq_thiele_methods(profile, committeesize, scorefct_str):
enough_approved_candidates(profile, committeesize)
scorefct = sf.get_scorefct(scorefct_str, committeesize)
allcandcomm = tuple(range(profile.num_cand))
comm_scores = {allcandcomm: sf.thiele_score(profile, allcandcomm,
scorefct_str)}
for _ in range(0, profile.num_cand - committeesize):
comm_scores_next = {}
for committee, score in comm_scores.items():
cands_to_remove, score_reduction = \
__least_relevant_cands(profile, committee, scorefct)
for c in cands_to_remove:
next_comm = tuple(set(committee) - set([c]))
comm_scores_next[next_comm] = score - score_reduction
# remove suboptimal committees
comm_scores = {}
cutoff = max(comm_scores_next.values())
for com, score in comm_scores_next.items():
if score >= cutoff:
comm_scores[com] = score
return sort_committees(list(comm_scores.keys()))
# Reverse Sequential Thiele methods with resolute
def compute_revseq_thiele_methods_resolute(profile, committeesize,
scorefct_str):
enough_approved_candidates(profile, committeesize)
scorefct = sf.get_scorefct(scorefct_str, committeesize)
committee = set(range(profile.num_cand))
for _ in range(0, profile.num_cand - committeesize):
cands_to_remove, _ = __least_relevant_cands(profile, committee,
scorefct)
committee.remove(cands_to_remove[0])
return [sorted(list(committee))]
# Phragmen's Sequential Rule
def compute_seqphragmen(profile, committeesize, resolute=False):
"""Returns the list of winning committees
according to sequential Phragmen"""
enough_approved_candidates(profile, committeesize)
load = {v: 0 for v in profile.preferences}
comm_loads = {(): load}
approvers_weight = {}
for c in range(profile.num_cand):
approvers_weight[c] = sum(v.weight
for v in profile.preferences
if c in v.approved)
# build committees starting with the empty set
for _ in range(0, committeesize):
comm_loads_next = {}
for committee, load in comm_loads.items():
approvers_load = {}
for c in range(profile.num_cand):
approvers_load[c] = sum(v.weight * load[v]
for v in profile.preferences
if c in v.approved)
new_maxload = [Fraction(approvers_load[c] + 1, approvers_weight[c])
if approvers_weight[c] > 0 else committeesize + 1
for c in range(profile.num_cand)]
for c in range(profile.num_cand):
if c in committee:
new_maxload[c] = sys.maxsize
for c in range(profile.num_cand):
if new_maxload[c] <= min(new_maxload):
new_load = {}
for v in profile.preferences:
if c in v.approved:
new_load[v] = new_maxload[c]
else:
new_load[v] = load[v]
comm_loads_next[tuple(sorted(committee + (c,)))] = new_load
# remove suboptimal committees
comm_loads = {}
cutoff = min([max(load.values()) for load in comm_loads_next.values()])
for com, load in comm_loads_next.items():
if max(load.values()) <= cutoff:
comm_loads[com] = load
if resolute:
committees = sort_committees(list(comm_loads.keys()))
comm = tuple(committees[0])
comm_loads = {comm: comm_loads[comm]}
committees = sort_committees(list(comm_loads.keys()))
if resolute:
return [committees[0]]
else:
return committees
# Minimax Approval Voting
def compute_minimaxav(profile, committeesize, ilp=True, resolute=False):
"""Returns the list of winning committees according to Minimax AV"""
if ilp:
return compute_minimaxav_ilp(profile, committeesize, resolute)
def hamming(a, b, elements):
diffs = 0
for x in elements:
if (x in a and x not in b) or (x in b and x not in a):
diffs += 1
return diffs
def mavscore(committee, profile):
score = 0
for vote in profile.preferences:
hamdistance = hamming(vote.approved, committee,
list(range(profile.num_cand)))
if hamdistance > score:
score = hamdistance
return score
enough_approved_candidates(profile, committeesize)
opt_committees = []
opt_mavscore = profile.num_cand + 1
for comm in combinations(list(range(profile.num_cand)), committeesize):
score = mavscore(comm, profile)
if score < opt_mavscore:
opt_committees = [comm]
opt_mavscore = score
elif mavscore(comm, profile) == opt_mavscore:
opt_committees.append(comm)
opt_committees = sort_committees(opt_committees)
if resolute:
return [opt_committees[0]]
else:
return sort_committees(opt_committees)
# Proportional Approval Voting
def compute_pav(profile, committeesize, ilp=True, resolute=False):
"""Returns the list of winning committees according to Proportional AV"""
if ilp:
return compute_thiele_methods_ilp(profile, committeesize,
'pav', resolute)
else:
return compute_thiele_methods_branchandbound(profile, committeesize,
'pav', resolute)
# Sainte-Lague Approval Voting
def compute_slav(profile, committeesize, ilp=True, resolute=False):
"""Returns the list of winning committees according to Proportional AV"""
if ilp:
return compute_thiele_methods_ilp(profile, committeesize,
'slav', resolute)
else:
return compute_thiele_methods_branchandbound(profile, committeesize,
'slav', resolute)
# Chamberlin-Courant
def compute_cc(profile, committeesize, ilp=True, resolute=False):
"""Returns the list of winning committees
according to Chamblerlin-Courant"""
if ilp:
return compute_thiele_methods_ilp(profile, committeesize,
'cc', resolute)
else:
return compute_thiele_methods_branchandbound(profile, committeesize,
'cc', resolute)
# Monroe's rule
def compute_monroe(profile, committeesize, ilp=True, resolute=False):
"""Returns the list of winning committees according to Monroe's rule"""
if ilp:
return compute_monroe_ilp(profile, committeesize, resolute)
else:
return compute_monroe_bruteforce(profile, committeesize, resolute)
# Monroe's rule, computed via (brute-force) matching
def compute_monroe_bruteforce(profile, committeesize,
resolute=False, flowbased=True):
"""Returns the list of winning committees via brute-force Monroe's rule"""
enough_approved_candidates(profile, committeesize)
if not profile.has_unit_weights():
raise Exception("Monroe is only defined for unit weights (weight=1)")
if profile.totalweight() % committeesize != 0 or flowbased:
monroescore = sf.monroescore_flowbased
else:
monroescore = sf.monroescore_matching
opt_committees = []
opt_monroescore = -1
for comm in combinations(list(range(profile.num_cand)), committeesize):
score = monroescore(profile, comm)
if score > opt_monroescore:
opt_committees = [comm]
opt_monroescore = score
elif monroescore(profile, comm) == opt_monroescore:
opt_committees.append(comm)
opt_committees = sort_committees(opt_committees)
if resolute:
return [opt_committees[0]]
else:
return opt_committees
def compute_greedy_monroe(profile, committeesize):
""""Returns the winning committee of the greedy monroe.
Always selects the candidate with the highest approval.
Always removes the first n/k (rounding depends) voters that approve
with the selected candidate. (voter sorted by their rankings)
"""
enough_approved_candidates(profile, committeesize)
if not profile.has_unit_weights():
raise Exception("Greedy Monroe is only defined for unit weights"
+ " (weight=1)")
v = list(enumerate(list(profile.preferences)))
# list of tuples (nr, Preferences)
# sorted by sorted approved list of preferences
voters = sorted(v, key=lambda p: sorted(p[1].approved))
n = len(voters) # number of voters
cands = set(range(profile.num_cand))
not_s, committee = (voters, set()) # not_s .. not satisfied voters
for t in range(1, committeesize+1):
remaining_cands = cands - committee
approval = {c: 0 for c in remaining_cands}
for nr, voter in not_s:
for c in voter.approved:
if c in remaining_cands:
approval[c] += 1
max_approval = max(approval.values())
winner = [c for c in remaining_cands
if approval[c] == max_approval][0]
# round how many are removed, either up or down
if t <= n - committeesize * math.floor(n / committeesize):
to_remove = math.ceil(float(n) / committeesize)
else:
to_remove = math.floor(n / committeesize)
# not more than the voters that approve
# the candidate can be removed
to_remove = min(max_approval, to_remove)
next_voters = []
for nr, voter in not_s:
if to_remove > 0 and winner in voter.approved:
to_remove -= 1
else:
next_voters.append((nr, voter))
not_s = next_voters
committee.add(winner)
return sort_committees([committee])
def compute_rule_x(profile, committeesize, resolute=False):
"""Returns the list of winning candidates according to rule x.
But rule x does stop if not enough budget is there to finance a
candidate. As this is not optimal the committee is filled with the
candidates that have the most remaining budget as support.
Rule from:
https://arxiv.org/pdf/1911.11747.pdf (Page 7)"""
enough_approved_candidates(profile, committeesize)
if not profile.has_unit_weights():
raise Exception("Rule X is only defined \
for unit weights (weight=1)")
num_voters = len(profile.preferences)
price = Fraction(num_voters, committeesize)
start_budget = {v: Fraction(1, 1) for v in range(num_voters)}
cands = range(profile.num_cand)
committees = [(start_budget, set())]
final_committees = []
for _ in range(committeesize):
next_committees = []
for committee in committees:
budget = committee[0]
q_affordability = {}
curr_cands = set(cands) - committee[1]
for c in curr_cands:
approved_by = set()
for v, vote in enumerate(profile.preferences):
if c in vote.approved and budget[v] > 0.0:
approved_by.add(v)
too_poor = set()
already_available = Fraction(0)
rich = set(approved_by)
q = 0.0
while already_available < price and q == 0.0 and len(rich) > 0:
fair_split = Fraction(price-already_available, len(rich))
still_rich = set()
for v in rich:
if budget[v] <= fair_split:
too_poor.add(v)
already_available += budget[v]
else:
still_rich.add(v)
if len(still_rich) == len(rich):
q = fair_split
q_affordability[c] = q
elif already_available == price:
q = fair_split
q_affordability[c] = q
else:
rich = still_rich
if len(q_affordability) > 0:
min_q = min(q_affordability.values())
cheapest_split = [c for c in q_affordability
if q_affordability[c] == min_q]
for c in cheapest_split:
b = dict(committee[0])
for v, vote in enumerate(profile.preferences):
if c in vote.approved:
b[v] -= min(budget[v], min_q)
comm = set(committee[1])
comm.add(c)
next_committees.append((b, comm))
else: # no affordable candidate remains
comms = fill_remaining_committee(committee, curr_cands,
committeesize, profile)
# after filling the remaining spots these committees
# have size committeesize
for b, comm in comms:
final_committees.append(comm)
if resolute:
if len(next_committees) > 0:
committees = [next_committees[0]]
else:
committees = []
else:
committees = next_committees
# The committees that could be fully filled with Rule X:
for b, comm in committees: # budget and committee
final_committees.append(comm)
committees = sort_committees(final_committees)
if resolute:
if len(committees) > 0:
return [committees[0]]
else:
return []
else:
return committees
def fill_remaining_committee(committee, curr_cands, committee_size,
profile):
"""
Rule X has no definition of how to fill remaining committee spots.
This function takes the candidates with the most remaining budget
selecting one candidate depletes all budgets of the voters that
approve that candidate.
This can produce multiple possible committees.
"""
missing = committee_size - len(committee[1])
committees = [committee]
for _ in range(missing):
next_comms = []
for comm in committees:
budget, appr_set = comm
remaining_cands = curr_cands - appr_set
budget_support = {}
for cand in remaining_cands:
budget_support[cand] = 0
for v, vote in enumerate(profile.preferences):
if cand in vote.approved:
budget_support[cand] += budget[v]
max_support = max(budget_support.values())
winners = [c for c in remaining_cands
if budget_support[c] == max_support]
for c in winners:
budget_c = {}
for voter, value in budget.items():
if c in profile.preferences[voter].approved:
budget_c[voter] = 0
else:
budget_c[voter] = value
next_comms.append((budget_c, appr_set.union([c])))
committees = next_comms
return committees
def compute_phragmen_enestroem(profile, committeesize, resolute=False):
""""Returns the winning committees with
Phragmen's first method (Enestroem's method) –
STV with unordered ballots
In every step the candidate with the highest combined budget of
their supporters gets into a committee.
For equal voting power multiple committees are computed.
Method from:
https://arxiv.org/pdf/1611.08826.pdf (18.5, Page 59)
"""
enough_approved_candidates(profile, committeesize)
num_voters = len(profile.preferences)
start_budget = {v: Fraction(profile.preferences[v].weight) for v in range(num_voters)}
price = Fraction(sum(start_budget.values()), committeesize)
cands = range(profile.num_cand)
committees = [(start_budget, set())]
for i in range(committeesize):
# here the committees with i+1 candidates are
# stored (together with budget)
next_committees = []
# loop in case multiple possible committees
# with i filled candidates
for committee in committees:
budget, comm = committee
curr_cands = set(cands) - comm
support = {c: 0 for c in curr_cands}
for nr, pref in enumerate(profile.preferences):
voting_power = budget[nr]
if voting_power <= 0:
continue
for cand in pref.approved:
if cand in curr_cands:
support[cand] += voting_power
max_support = max(support.values())
winners = [c for c, s in support.items()
if s == max_support]
for cand in winners:
b = dict(budget) # new copy of budget
if max_support > price: # supporters can afford it
# (voting_power - price) / voting_power
multiplier = Fraction(max_support - price,
max_support)
else: # set supporters to 0
multiplier = 0
for nr, pref in enumerate(profile.preferences):
if cand in pref.approved:
b[nr] *= multiplier
c = comm.union([cand]) # new committee with candidate
next_committees.append((b, c))
if resolute: # only one is requested
if len(next_committees) > 0:
committees = [next_committees[0]]
else: # should not happen
committees = []
raise Exception("phragmen enestroem failed to find "
+ "next candidate for", committees)
else:
committees = next_committees
committees = [comm for b, comm in committees]
committees = sort_committees(committees)
if resolute:
if len(committees) > 0:
return [committees[0]]
else:
return []
else:
return committees
|
[
"committees.enough_approved_candidates",
"score_functions.additional_thiele_scores",
"rules_approval_ilp.compute_minimaxav_ilp",
"rules_approval_ilp.compute_monroe_ilp",
"committees.print_committees",
"rules_approval_ilp.compute_optphragmen_ilp",
"rules_approval_ilp.compute_thiele_methods_ilp",
"math.floor",
"score_functions.thiele_score",
"itertools.combinations",
"score_functions.get_scorefct",
"committees.sort_committees",
"fractions.Fraction"
] |
[((5779, 5829), 'committees.enough_approved_candidates', 'enough_approved_candidates', (['profile', 'committeesize'], {}), '(profile, committeesize)\n', (5805, 5829), False, 'from committees import sort_committees, enough_approved_candidates, print_committees\n'), ((5845, 5889), 'score_functions.get_scorefct', 'sf.get_scorefct', (['scorefct_str', 'committeesize'], {}), '(scorefct_str, committeesize)\n', (5860, 5889), True, 'import score_functions as sf\n'), ((6057, 6108), 'score_functions.thiele_score', 'sf.thiele_score', (['profile', 'init_com[0]', 'scorefct_str'], {}), '(profile, init_com[0], scorefct_str)\n', (6072, 6108), True, 'import score_functions as sf\n'), ((7333, 7365), 'committees.sort_committees', 'sort_committees', (['best_committees'], {}), '(best_committees)\n', (7348, 7365), False, 'from committees import sort_committees, enough_approved_candidates, print_committees\n'), ((9462, 9512), 'committees.enough_approved_candidates', 'enough_approved_candidates', (['profile', 'committeesize'], {}), '(profile, committeesize)\n', (9488, 9512), False, 'from committees import sort_committees, enough_approved_candidates, print_committees\n'), ((10638, 10688), 'committees.enough_approved_candidates', 'enough_approved_candidates', (['profile', 'committeesize'], {}), '(profile, committeesize)\n', (10664, 10688), False, 'from committees import sort_committees, enough_approved_candidates, print_committees\n'), ((10704, 10748), 'score_functions.get_scorefct', 'sf.get_scorefct', (['scorefct_str', 'committeesize'], {}), '(scorefct_str, committeesize)\n', (10719, 10748), True, 'import score_functions as sf\n'), ((11907, 11957), 'committees.enough_approved_candidates', 'enough_approved_candidates', (['profile', 'committeesize'], {}), '(profile, committeesize)\n', (11933, 11957), False, 'from committees import sort_committees, enough_approved_candidates, print_committees\n'), ((11973, 12017), 'score_functions.get_scorefct', 'sf.get_scorefct', (['scorefct_str', 'committeesize'], {}), '(scorefct_str, committeesize)\n', (11988, 12017), True, 'import score_functions as sf\n'), ((13369, 13419), 'committees.enough_approved_candidates', 'enough_approved_candidates', (['profile', 'committeesize'], {}), '(profile, committeesize)\n', (13395, 13419), False, 'from committees import sort_committees, enough_approved_candidates, print_committees\n'), ((13435, 13479), 'score_functions.get_scorefct', 'sf.get_scorefct', (['scorefct_str', 'committeesize'], {}), '(scorefct_str, committeesize)\n', (13450, 13479), True, 'import score_functions as sf\n'), ((14562, 14612), 'committees.enough_approved_candidates', 'enough_approved_candidates', (['profile', 'committeesize'], {}), '(profile, committeesize)\n', (14588, 14612), False, 'from committees import sort_committees, enough_approved_candidates, print_committees\n'), ((14628, 14672), 'score_functions.get_scorefct', 'sf.get_scorefct', (['scorefct_str', 'committeesize'], {}), '(scorefct_str, committeesize)\n', (14643, 14672), True, 'import score_functions as sf\n'), ((15179, 15229), 'committees.enough_approved_candidates', 'enough_approved_candidates', (['profile', 'committeesize'], {}), '(profile, committeesize)\n', (15205, 15229), False, 'from committees import sort_committees, enough_approved_candidates, print_committees\n'), ((18131, 18181), 'committees.enough_approved_candidates', 'enough_approved_candidates', (['profile', 'committeesize'], {}), '(profile, committeesize)\n', (18157, 18181), False, 'from committees import sort_committees, enough_approved_candidates, print_committees\n'), ((18581, 18612), 'committees.sort_committees', 'sort_committees', (['opt_committees'], {}), '(opt_committees)\n', (18596, 18612), False, 'from committees import sort_committees, enough_approved_candidates, print_committees\n'), ((20720, 20770), 'committees.enough_approved_candidates', 'enough_approved_candidates', (['profile', 'committeesize'], {}), '(profile, committeesize)\n', (20746, 20770), False, 'from committees import sort_committees, enough_approved_candidates, print_committees\n'), ((21456, 21487), 'committees.sort_committees', 'sort_committees', (['opt_committees'], {}), '(opt_committees)\n', (21471, 21487), False, 'from committees import sort_committees, enough_approved_candidates, print_committees\n'), ((21903, 21953), 'committees.enough_approved_candidates', 'enough_approved_candidates', (['profile', 'committeesize'], {}), '(profile, committeesize)\n', (21929, 21953), False, 'from committees import sort_committees, enough_approved_candidates, print_committees\n'), ((23562, 23590), 'committees.sort_committees', 'sort_committees', (['[committee]'], {}), '([committee])\n', (23577, 23590), False, 'from committees import sort_committees, enough_approved_candidates, print_committees\n'), ((23994, 24044), 'committees.enough_approved_candidates', 'enough_approved_candidates', (['profile', 'committeesize'], {}), '(profile, committeesize)\n', (24020, 24044), False, 'from committees import sort_committees, enough_approved_candidates, print_committees\n'), ((24246, 24281), 'fractions.Fraction', 'Fraction', (['num_voters', 'committeesize'], {}), '(num_voters, committeesize)\n', (24254, 24281), False, 'from fractions import Fraction\n'), ((27263, 27296), 'committees.sort_committees', 'sort_committees', (['final_committees'], {}), '(final_committees)\n', (27278, 27296), False, 'from committees import sort_committees, enough_approved_candidates, print_committees\n'), ((29441, 29491), 'committees.enough_approved_candidates', 'enough_approved_candidates', (['profile', 'committeesize'], {}), '(profile, committeesize)\n', (29467, 29491), False, 'from committees import sort_committees, enough_approved_candidates, print_committees\n'), ((31788, 31815), 'committees.sort_committees', 'sort_committees', (['committees'], {}), '(committees)\n', (31803, 31815), False, 'from committees import sort_committees, enough_approved_candidates, print_committees\n'), ((5281, 5302), 'committees.print_committees', 'print_committees', (['com'], {}), '(com)\n', (5297, 5302), False, 'from committees import sort_committees, enough_approved_candidates, print_committees\n'), ((10268, 10325), 'committees.sort_committees', 'sort_committees', (['[certain_cand + possible_cand[:missing]]'], {}), '([certain_cand + possible_cand[:missing]])\n', (10283, 10325), False, 'from committees import sort_committees, enough_approved_candidates, print_committees\n'), ((12160, 12217), 'score_functions.additional_thiele_scores', 'sf.additional_thiele_scores', (['profile', 'committee', 'scorefct'], {}), '(profile, committee, scorefct)\n', (12187, 12217), True, 'import score_functions as sf\n'), ((13562, 13613), 'score_functions.thiele_score', 'sf.thiele_score', (['profile', 'allcandcomm', 'scorefct_str'], {}), '(profile, allcandcomm, scorefct_str)\n', (13577, 13613), True, 'import score_functions as sf\n'), ((17560, 17615), 'rules_approval_ilp.compute_minimaxav_ilp', 'compute_minimaxav_ilp', (['profile', 'committeesize', 'resolute'], {}), '(profile, committeesize, resolute)\n', (17581, 17615), False, 'from rules_approval_ilp import compute_monroe_ilp, compute_thiele_methods_ilp, compute_optphragmen_ilp, compute_minimaxav_ilp\n'), ((18690, 18721), 'committees.sort_committees', 'sort_committees', (['opt_committees'], {}), '(opt_committees)\n', (18705, 18721), False, 'from committees import sort_committees, enough_approved_candidates, print_committees\n'), ((18927, 18994), 'rules_approval_ilp.compute_thiele_methods_ilp', 'compute_thiele_methods_ilp', (['profile', 'committeesize', '"""pav"""', 'resolute'], {}), "(profile, committeesize, 'pav', resolute)\n", (18953, 18994), False, 'from rules_approval_ilp import compute_monroe_ilp, compute_thiele_methods_ilp, compute_optphragmen_ilp, compute_minimaxav_ilp\n'), ((19400, 19468), 'rules_approval_ilp.compute_thiele_methods_ilp', 'compute_thiele_methods_ilp', (['profile', 'committeesize', '"""slav"""', 'resolute'], {}), "(profile, committeesize, 'slav', resolute)\n", (19426, 19468), False, 'from rules_approval_ilp import compute_monroe_ilp, compute_thiele_methods_ilp, compute_optphragmen_ilp, compute_minimaxav_ilp\n'), ((19871, 19937), 'rules_approval_ilp.compute_thiele_methods_ilp', 'compute_thiele_methods_ilp', (['profile', 'committeesize', '"""cc"""', 'resolute'], {}), "(profile, committeesize, 'cc', resolute)\n", (19897, 19937), False, 'from rules_approval_ilp import compute_monroe_ilp, compute_thiele_methods_ilp, compute_optphragmen_ilp, compute_minimaxav_ilp\n'), ((20327, 20379), 'rules_approval_ilp.compute_monroe_ilp', 'compute_monroe_ilp', (['profile', 'committeesize', 'resolute'], {}), '(profile, committeesize, resolute)\n', (20345, 20379), False, 'from rules_approval_ilp import compute_monroe_ilp, compute_thiele_methods_ilp, compute_optphragmen_ilp, compute_minimaxav_ilp\n'), ((24306, 24320), 'fractions.Fraction', 'Fraction', (['(1)', '(1)'], {}), '(1, 1)\n', (24314, 24320), False, 'from fractions import Fraction\n'), ((29558, 29597), 'fractions.Fraction', 'Fraction', (['profile.preferences[v].weight'], {}), '(profile.preferences[v].weight)\n', (29566, 29597), False, 'from fractions import Fraction\n'), ((5480, 5501), 'committees.print_committees', 'print_committees', (['com'], {}), '(com)\n', (5496, 5501), False, 'from committees import sort_committees, enough_approved_candidates, print_committees\n'), ((6344, 6392), 'score_functions.thiele_score', 'sf.thiele_score', (['profile', 'part_com', 'scorefct_str'], {}), '(profile, part_com, scorefct_str)\n', (6359, 6392), True, 'import score_functions as sf\n'), ((6820, 6876), 'score_functions.additional_thiele_scores', 'sf.additional_thiele_scores', (['profile', 'part_com', 'scorefct'], {}), '(profile, part_com, scorefct)\n', (6847, 6876), True, 'import score_functions as sf\n'), ((11060, 11117), 'score_functions.additional_thiele_scores', 'sf.additional_thiele_scores', (['profile', 'committee', 'scorefct'], {}), '(profile, committee, scorefct)\n', (11087, 11117), True, 'import score_functions as sf\n'), ((23112, 23141), 'math.floor', 'math.floor', (['(n / committeesize)'], {}), '(n / committeesize)\n', (23122, 23141), False, 'import math\n'), ((7054, 7102), 'score_functions.thiele_score', 'sf.thiele_score', (['profile', 'part_com', 'scorefct_str'], {}), '(profile, part_com, scorefct_str)\n', (7069, 7102), True, 'import score_functions as sf\n'), ((24979, 24990), 'fractions.Fraction', 'Fraction', (['(0)'], {}), '(0)\n', (24987, 24990), False, 'from fractions import Fraction\n'), ((10484, 10520), 'itertools.combinations', 'combinations', (['possible_cand', 'missing'], {}), '(possible_cand, missing)\n', (10496, 10520), False, 'from itertools import combinations\n'), ((15994, 16046), 'fractions.Fraction', 'Fraction', (['(approvers_load[c] + 1)', 'approvers_weight[c]'], {}), '(approvers_load[c] + 1, approvers_weight[c])\n', (16002, 16046), False, 'from fractions import Fraction\n'), ((22983, 23012), 'math.floor', 'math.floor', (['(n / committeesize)'], {}), '(n / committeesize)\n', (22993, 23012), False, 'import math\n'), ((30883, 30925), 'fractions.Fraction', 'Fraction', (['(max_support - price)', 'max_support'], {}), '(max_support - price, max_support)\n', (30891, 30925), False, 'from fractions import Fraction\n'), ((4448, 4514), 'rules_approval_ilp.compute_optphragmen_ilp', 'compute_optphragmen_ilp', (['profile', 'committeesize'], {'resolute': 'resolute'}), '(profile, committeesize, resolute=resolute)\n', (4471, 4514), False, 'from rules_approval_ilp import compute_monroe_ilp, compute_thiele_methods_ilp, compute_optphragmen_ilp, compute_minimaxav_ilp\n')]
|
"""Setup xorca."""
from setuptools import setup
setup(name='xorca',
description='Work on the ORCA grid with XGCM and Xarray',
packages=['xorca'],
package_dir={'xorca': 'xorca'},
install_requires=['setuptools', ],
zip_safe=False)
|
[
"setuptools.setup"
] |
[((50, 241), 'setuptools.setup', 'setup', ([], {'name': '"""xorca"""', 'description': '"""Work on the ORCA grid with XGCM and Xarray"""', 'packages': "['xorca']", 'package_dir': "{'xorca': 'xorca'}", 'install_requires': "['setuptools']", 'zip_safe': '(False)'}), "(name='xorca', description=\n 'Work on the ORCA grid with XGCM and Xarray', packages=['xorca'],\n package_dir={'xorca': 'xorca'}, install_requires=['setuptools'],\n zip_safe=False)\n", (55, 241), False, 'from setuptools import setup\n')]
|
"""Import tasks for the Nearby Supernova Factory.
"""
import csv
import os
from glob import glob
from astrocats.catalog.utils import jd_to_mjd, pbar, pretty_num, uniq_cdl
from astropy.time import Time as astrotime
from decimal import Decimal
from ..supernova import SUPERNOVA
def do_snf_aliases(catalog):
file_path = os.path.join(
catalog.get_current_task_repo(), 'SNF/snf-aliases.csv')
with open(file_path, 'r') as f:
for row in [x.split(',') for x in f.read().splitlines()]:
name, source = catalog.new_entry(
row[0], bibcode=catalog.OSC_BIBCODE, srcname=catalog.OSC_NAME,
url=catalog.OSC_URL, secondary=True)
catalog.entries[name].add_quantity(SUPERNOVA.ALIAS, row[1], source)
catalog.journal_entries()
return
def do_snf_spectra(catalog):
task_str = catalog.get_current_task_str()
bibcodes = {'SN2005gj': '2006ApJ...650..510A',
'SN2006D': '2007ApJ...654L..53T',
'SN2007if': '2010ApJ...713.1073S',
'SN2011fe': '2013A&A...554A..27P'}
oldname = ''
snfcnt = 0
eventfolders = next(os.walk(os.path.join(
catalog.get_current_task_repo(), 'SNFactory')))[1]
for eventfolder in pbar(eventfolders, task_str):
oname = eventfolder
name = catalog.get_preferred_name(oname)
if oldname and name != oldname:
catalog.journal_entries()
oldname = name
name = catalog.add_entry(name)
sec_reference = 'Nearby Supernova Factory'
sec_refurl = 'http://snfactory.lbl.gov/'
sec_bibcode = '2002SPIE.4836...61A'
sec_source = catalog.entries[name].add_source(
name=sec_reference, url=sec_refurl, bibcode=sec_bibcode,
secondary=True)
catalog.entries[name].add_quantity(SUPERNOVA.ALIAS, oname, sec_source)
bibcode = bibcodes[oname]
source = catalog.entries[name].add_source(bibcode=bibcode)
sources = uniq_cdl([source, sec_source])
use_path = os.path.join(
catalog.get_current_task_repo(), 'SNFactory', eventfolder, '*.dat')
eventspectra = glob(use_path)
for spectrum in pbar(eventspectra, task_str):
filename = os.path.basename(spectrum)
with open(spectrum) as spec_file:
specdata = list(csv.reader(
spec_file, delimiter=' ', skipinitialspace=True))
specdata = list(filter(None, specdata))
newspec = []
time = ''
telescope = ''
instrument = ''
observer = ''
observatory = ''
if 'Keck_20060202_R' in spectrum:
time = '53768.23469'
elif 'Spectrum05_276' in spectrum:
time = pretty_num(astrotime('2005-10-03').mjd, sig=5)
elif 'Spectrum05_329' in spectrum:
time = pretty_num(astrotime('2005-11-25').mjd, sig=5)
elif 'Spectrum05_336' in spectrum:
time = pretty_num(astrotime('2005-12-02').mjd, sig=5)
for row in specdata:
if row[0][0] == '#':
joinrow = (' '.join(row)).split('=')
if len(joinrow) < 2:
continue
field = joinrow[0].strip('# ')
value = joinrow[1].split('/')[0].strip('\' ')
if not time:
if field == 'JD':
time = str(jd_to_mjd(Decimal(value)))
elif field == 'MJD':
time = value
elif field == 'MJD-OBS':
time = value
if field == 'OBSERVER':
observer = value.capitalize()
if field == 'OBSERVAT':
observatory = value.capitalize()
if field == 'TELESCOP':
telescope = value.capitalize()
if field == 'INSTRUME':
instrument = value.capitalize()
else:
newspec.append(row)
if not time:
raise ValueError('Time missing from spectrum.')
specdata = newspec
haserrors = len(specdata[0]) == 3 and specdata[
0][2] and specdata[0][2] != 'NaN'
specdata = [list(i) for i in zip(*specdata)]
wavelengths = specdata[0]
fluxes = specdata[1]
errors = ''
if haserrors:
errors = specdata[2]
unit_err = ('Variance' if oldname == 'SN2011fe' else
'erg/s/cm^2/Angstrom')
unit_flx = 'erg/s/cm^2/Angstrom'
catalog.entries[name].add_spectrum(
u_wavelengths='Angstrom', u_fluxes=unit_flx, u_time='MJD',
time=time,
wavelengths=wavelengths, fluxes=fluxes, errors=errors,
observer=observer, observatory=observatory,
telescope=telescope, instrument=instrument, u_errors=unit_err,
source=sources, filename=filename)
snfcnt = snfcnt + 1
if (catalog.args.travis and
snfcnt % catalog.TRAVIS_QUERY_LIMIT == 0):
break
catalog.journal_entries()
return
|
[
"csv.reader",
"os.path.basename",
"decimal.Decimal",
"astropy.time.Time",
"astrocats.catalog.utils.uniq_cdl",
"astrocats.catalog.utils.pbar",
"glob.glob"
] |
[((1246, 1274), 'astrocats.catalog.utils.pbar', 'pbar', (['eventfolders', 'task_str'], {}), '(eventfolders, task_str)\n', (1250, 1274), False, 'from astrocats.catalog.utils import jd_to_mjd, pbar, pretty_num, uniq_cdl\n'), ((1987, 2017), 'astrocats.catalog.utils.uniq_cdl', 'uniq_cdl', (['[source, sec_source]'], {}), '([source, sec_source])\n', (1995, 2017), False, 'from astrocats.catalog.utils import jd_to_mjd, pbar, pretty_num, uniq_cdl\n'), ((2154, 2168), 'glob.glob', 'glob', (['use_path'], {}), '(use_path)\n', (2158, 2168), False, 'from glob import glob\n'), ((2193, 2221), 'astrocats.catalog.utils.pbar', 'pbar', (['eventspectra', 'task_str'], {}), '(eventspectra, task_str)\n', (2197, 2221), False, 'from astrocats.catalog.utils import jd_to_mjd, pbar, pretty_num, uniq_cdl\n'), ((2246, 2272), 'os.path.basename', 'os.path.basename', (['spectrum'], {}), '(spectrum)\n', (2262, 2272), False, 'import os\n'), ((2351, 2410), 'csv.reader', 'csv.reader', (['spec_file'], {'delimiter': '""" """', 'skipinitialspace': '(True)'}), "(spec_file, delimiter=' ', skipinitialspace=True)\n", (2361, 2410), False, 'import csv\n'), ((2806, 2829), 'astropy.time.Time', 'astrotime', (['"""2005-10-03"""'], {}), "('2005-10-03')\n", (2815, 2829), True, 'from astropy.time import Time as astrotime\n'), ((2923, 2946), 'astropy.time.Time', 'astrotime', (['"""2005-11-25"""'], {}), "('2005-11-25')\n", (2932, 2946), True, 'from astropy.time import Time as astrotime\n'), ((3040, 3063), 'astropy.time.Time', 'astrotime', (['"""2005-12-02"""'], {}), "('2005-12-02')\n", (3049, 3063), True, 'from astropy.time import Time as astrotime\n'), ((3518, 3532), 'decimal.Decimal', 'Decimal', (['value'], {}), '(value)\n', (3525, 3532), False, 'from decimal import Decimal\n')]
|
from __future__ import print_function
import os
import numpy as np
import torch
from torchvision import datasets, transforms
from .smallnorb_dataset_helper import smallnorb, smallnorb_equivariance
from .utils import random_split, CustomDataset
def get_dataset(args):
if args.dataset == "cifar10":
train_transform = transforms.Compose([
transforms.ColorJitter(brightness=.2, contrast=.2),
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))
])
test_transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))
])
if args.valid_mode:
train_transform.transforms.insert(0, transforms.ToPILImage())
test_transform.transforms.insert(0, transforms.ToPILImage())
valid_transform = test_transform
train_dataset = datasets.CIFAR10('./data', train=True, download=True, transform=train_transform)
test_dataset = datasets.CIFAR10('./data', train=False, transform=test_transform)
train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=args.batch_size, num_workers=8, pin_memory=True, shuffle=True)
test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=args.test_batch_size, num_workers=8, pin_memory=True, shuffle=False)
# For spliting the original traning set to a new training set and a validation set.
# The new training set and validation set are named valid_mode_train and valid_mode_valid
# valid_mode_train + valid_mode_valid is the original training set
data, labels = random_split(data=train_dataset.data,
labels=np.array(train_dataset.targets),
n_classes=10,
n_samples_per_class=np.repeat(1000, 10).reshape(-1))
# make channels last and convert to np arrays
#data['valid_mode_train'] = np.moveaxis(np.array(data['valid_mode_train']), 1, -1)
#data['valid_mode_valid'] = np.moveaxis(np.array(data['valid_mode_valid']), 1, -1)
#print("data['valid_mode_train'].shape", data['valid_mode_train'].shape)
# dataloader
valid_mode_train_dataset = CustomDataset(data=data['valid_mode_train'], labels=labels['valid_mode_train'], transform=train_transform)
valid_mode_valid_dataset = CustomDataset(data=data['valid_mode_valid'], labels=labels['valid_mode_valid'], transform=valid_transform)
valid_mode_train_loader = torch.utils.data.DataLoader(valid_mode_train_dataset, batch_size=args.batch_size, shuffle=True)
valid_mode_valid_loader = torch.utils.data.DataLoader(valid_mode_valid_dataset, batch_size=args.test_batch_size, shuffle=False)
return train_loader, test_loader, valid_mode_train_loader, valid_mode_valid_loader, train_transform
elif args.dataset == "Fashion-MNIST":
train_transform = transforms.Compose([
transforms.ColorJitter(brightness=.2, contrast=.2),
transforms.RandomCrop(32, padding=4),
#transforms.RandomAffine(degrees=0, translate=[0.2, 0.2]),
#transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.2862,), (0.3529,))
])
test_transform = transforms.Compose([
transforms.Pad(padding=2),
transforms.ToTensor(),
transforms.Normalize((0.2862,), (0.3529,))
])
if args.valid_mode:
train_transform.transforms.insert(0, transforms.ToPILImage())
test_transform.transforms.insert(0, transforms.ToPILImage())
valid_transform = test_transform
train_dataset = datasets.FashionMNIST('./data', train=True, download=True, transform=train_transform)
test_dataset = datasets.FashionMNIST('./data', train=False, transform=test_transform)
train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=args.batch_size, shuffle=True)
test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=args.test_batch_size, shuffle=False)
# For spliting the original traning set to a new training set and a validation set.
# The new training set and validation set are named valid_mode_train and valid_mode_valid
# valid_mode_train + valid_mode_valid is the original training set
data, labels = random_split(data=train_dataset.train_data, labels=train_dataset.train_labels,
n_classes=10, n_samples_per_class=np.repeat(1000, 10).reshape(-1))
# convert to np arrays
# data['valid_mode_train'] = np.array(data['valid_mode_train'])
# data['valid_mode_valid'] = np.array(data['valid_mode_valid'])
# data['valid_mode_train'] = np.moveaxis(np.array(data['valid_mode_train']), 1, -1)
# data['valid_mode_valid'] = np.moveaxis(np.array(data['valid_mode_valid']), 1, -1)
# dataloader
valid_mode_train_dataset = CustomDataset(data=data['valid_mode_train'], labels=labels['valid_mode_train'], transform=train_transform)
valid_mode_valid_dataset = CustomDataset(data=data['valid_mode_valid'], labels=labels['valid_mode_valid'], transform=valid_transform)
valid_mode_train_loader = torch.utils.data.DataLoader(valid_mode_train_dataset, batch_size=args.batch_size, shuffle=True)
valid_mode_valid_loader = torch.utils.data.DataLoader(valid_mode_valid_dataset, batch_size=args.test_batch_size, shuffle=False)
return train_loader, test_loader, valid_mode_train_loader, valid_mode_valid_loader, train_transform
elif args.dataset == "svhn":
train_transform = transforms.Compose([
transforms.RandomCrop(32, padding=4),
#transforms.ColorJitter(brightness=.2, contrast=.2),
transforms.ToTensor(),
transforms.Normalize((0.4376821, 0.4437697, 0.47280442), (0.19803012, 0.20101562, 0.19703614))
])
print("train_transform", train_transform)
test_transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.4376821, 0.4437697, 0.47280442), (0.19803012, 0.20101562, 0.19703614))
])
if args.valid_mode:
train_transform.transforms.insert(0, transforms.ToPILImage())
test_transform.transforms.insert(0, transforms.ToPILImage())
valid_transform = test_transform
# extra_dataset = datasets.SVHN(
# './data', split='extra', transform=train_transform, download=True)
# # Combine both training splits (https://arxiv.org/pdf/1605.07146.pdf)
# data = np.concatenate([train_dataset.data, extra_dataset.data], axis=0)
# labels = np.concatenate([train_dataset.labels, extra_dataset.labels], axis=0)
# train_dataset.data = data
# train_dataset.labels = labels
train_dataset = datasets.SVHN(
'./data', split='train', transform=train_transform, download=True)
test_dataset = datasets.SVHN(
'./data', split='test', transform=test_transform, download=True)
train_loader = torch.utils.data.DataLoader(
dataset=train_dataset, num_workers=8, pin_memory=True,
batch_size=args.batch_size, shuffle=True)
test_loader = torch.utils.data.DataLoader(
dataset=test_dataset, num_workers=8, pin_memory=True,
batch_size=args.test_batch_size, shuffle=True)
# For spliting the original traning set to a new training set and a validation set.
# The new training set and validation set are named valid_mode_train and valid_mode_valid
# valid_mode_train + valid_mode_valid is the original training set
data, labels = random_split(data=train_dataset.data,
labels=train_dataset.labels,
n_classes=10,
n_samples_per_class=np.repeat(1000, 10).reshape(-1))
# make channels last and convert to np arrays
data['valid_mode_train'] = np.moveaxis(np.array(data['valid_mode_train']), 1, -1)
data['valid_mode_valid'] = np.moveaxis(np.array(data['valid_mode_valid']), 1, -1)
print("data['valid_mode_train'].shape", data['valid_mode_train'].shape)
# dataloader
valid_mode_train_dataset = CustomDataset(data=data['valid_mode_train'], labels=labels['valid_mode_train'], transform=train_transform)
valid_mode_valid_dataset = CustomDataset(data=data['valid_mode_valid'], labels=labels['valid_mode_valid'], transform=valid_transform)
valid_mode_train_loader = torch.utils.data.DataLoader(valid_mode_train_dataset, batch_size=args.batch_size, shuffle=True)
valid_mode_valid_loader = torch.utils.data.DataLoader(valid_mode_valid_dataset, batch_size=args.test_batch_size, shuffle=False)
return train_loader, test_loader, valid_mode_train_loader, valid_mode_valid_loader, train_transform
elif args.dataset == "smallnorb":
working_dir = args.working_dir
dataset_paths = {'train': os.path.join(working_dir, 'train'),
'test': os.path.join(working_dir, 'test')}
dataloaders, train_transf = smallnorb(args, dataset_paths)
train_loader = dataloaders['train_valid']
test_loader = dataloaders['test']
valid_mode_train_loader = dataloaders['train']
valid_mode_valid_loader = dataloaders['valid']
# print("len(train_loader.dataset)", len(train_loader.dataset))
# print("len(train_loader.dataset)", len(train_loader.dataset))
# print("len(test_loader.dataset)", len(test_loader.dataset))
# print("len(valid_mode_train_loader.dataset)", len(valid_mode_train_loader.dataset))
# print("len(valid_mode_valid_loader.dataset)", len(valid_mode_valid_loader.dataset))
return train_loader, test_loader, valid_mode_train_loader, valid_mode_valid_loader, train_transf
elif args.dataset == "smallNORB_48_azimuth" or args.dataset == "smallNORB_48_elevation":
working_dir = args.working_dir
dataset_paths = {'train': os.path.join(working_dir, 'train'),
'test_novel': os.path.join(working_dir, 'test_novel'),
'test_familiar': os.path.join(working_dir, 'test_familiar')}
dataloaders, train_transform = smallnorb_equivariance(args, dataset_paths)
train_loader = dataloaders['train']
test_novel_loader = dataloaders['test_novel']
test_familiar_loader = dataloaders['test_familiar']
print("len(train_loader.dataset)", len(train_loader.dataset))
print("len(test_novel_loader.dataset)", len(test_novel_loader.dataset))
print("len(test_familiar_loader.dataset)", len(test_familiar_loader.dataset))
return train_loader, test_novel_loader, test_familiar_loader, train_transform
else:
print("Unsupported dataset.")
quit()
return train_loader, test_loader
|
[
"torchvision.transforms.ColorJitter",
"torchvision.datasets.FashionMNIST",
"torch.utils.data.DataLoader",
"torchvision.transforms.RandomHorizontalFlip",
"os.path.join",
"torchvision.transforms.ToPILImage",
"torchvision.datasets.CIFAR10",
"torchvision.transforms.Pad",
"numpy.array",
"numpy.repeat",
"torchvision.transforms.Normalize",
"torchvision.datasets.SVHN",
"torchvision.transforms.RandomCrop",
"torchvision.transforms.ToTensor"
] |
[((1265, 1350), 'torchvision.datasets.CIFAR10', 'datasets.CIFAR10', (['"""./data"""'], {'train': '(True)', 'download': '(True)', 'transform': 'train_transform'}), "('./data', train=True, download=True, transform=train_transform\n )\n", (1281, 1350), False, 'from torchvision import datasets, transforms\n'), ((1369, 1434), 'torchvision.datasets.CIFAR10', 'datasets.CIFAR10', (['"""./data"""'], {'train': '(False)', 'transform': 'test_transform'}), "('./data', train=False, transform=test_transform)\n", (1385, 1434), False, 'from torchvision import datasets, transforms\n'), ((1458, 1578), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['train_dataset'], {'batch_size': 'args.batch_size', 'num_workers': '(8)', 'pin_memory': '(True)', 'shuffle': '(True)'}), '(train_dataset, batch_size=args.batch_size,\n num_workers=8, pin_memory=True, shuffle=True)\n', (1485, 1578), False, 'import torch\n'), ((1597, 1722), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['test_dataset'], {'batch_size': 'args.test_batch_size', 'num_workers': '(8)', 'pin_memory': '(True)', 'shuffle': '(False)'}), '(test_dataset, batch_size=args.test_batch_size,\n num_workers=8, pin_memory=True, shuffle=False)\n', (1624, 1722), False, 'import torch\n'), ((2904, 3004), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['valid_mode_train_dataset'], {'batch_size': 'args.batch_size', 'shuffle': '(True)'}), '(valid_mode_train_dataset, batch_size=args.\n batch_size, shuffle=True)\n', (2931, 3004), False, 'import torch\n'), ((3034, 3140), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['valid_mode_valid_dataset'], {'batch_size': 'args.test_batch_size', 'shuffle': '(False)'}), '(valid_mode_valid_dataset, batch_size=args.\n test_batch_size, shuffle=False)\n', (3061, 3140), False, 'import torch\n'), ((4324, 4414), 'torchvision.datasets.FashionMNIST', 'datasets.FashionMNIST', (['"""./data"""'], {'train': '(True)', 'download': '(True)', 'transform': 'train_transform'}), "('./data', train=True, download=True, transform=\n train_transform)\n", (4345, 4414), False, 'from torchvision import datasets, transforms\n'), ((4434, 4504), 'torchvision.datasets.FashionMNIST', 'datasets.FashionMNIST', (['"""./data"""'], {'train': '(False)', 'transform': 'test_transform'}), "('./data', train=False, transform=test_transform)\n", (4455, 4504), False, 'from torchvision import datasets, transforms\n'), ((4528, 4616), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['train_dataset'], {'batch_size': 'args.batch_size', 'shuffle': '(True)'}), '(train_dataset, batch_size=args.batch_size,\n shuffle=True)\n', (4555, 4616), False, 'import torch\n'), ((4635, 4728), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['test_dataset'], {'batch_size': 'args.test_batch_size', 'shuffle': '(False)'}), '(test_dataset, batch_size=args.test_batch_size,\n shuffle=False)\n', (4662, 4728), False, 'import torch\n'), ((5906, 6006), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['valid_mode_train_dataset'], {'batch_size': 'args.batch_size', 'shuffle': '(True)'}), '(valid_mode_train_dataset, batch_size=args.\n batch_size, shuffle=True)\n', (5933, 6006), False, 'import torch\n'), ((6036, 6142), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['valid_mode_valid_dataset'], {'batch_size': 'args.test_batch_size', 'shuffle': '(False)'}), '(valid_mode_valid_dataset, batch_size=args.\n test_batch_size, shuffle=False)\n', (6063, 6142), False, 'import torch\n'), ((400, 452), 'torchvision.transforms.ColorJitter', 'transforms.ColorJitter', ([], {'brightness': '(0.2)', 'contrast': '(0.2)'}), '(brightness=0.2, contrast=0.2)\n', (422, 452), False, 'from torchvision import datasets, transforms\n'), ((485, 521), 'torchvision.transforms.RandomCrop', 'transforms.RandomCrop', (['(32)'], {'padding': '(4)'}), '(32, padding=4)\n', (506, 521), False, 'from torchvision import datasets, transforms\n'), ((556, 589), 'torchvision.transforms.RandomHorizontalFlip', 'transforms.RandomHorizontalFlip', ([], {}), '()\n', (587, 589), False, 'from torchvision import datasets, transforms\n'), ((624, 645), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (643, 645), False, 'from torchvision import datasets, transforms\n'), ((680, 751), 'torchvision.transforms.Normalize', 'transforms.Normalize', (['(0.4914, 0.4822, 0.4465)', '(0.2023, 0.1994, 0.201)'], {}), '((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.201))\n', (700, 751), False, 'from torchvision import datasets, transforms\n'), ((864, 885), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (883, 885), False, 'from torchvision import datasets, transforms\n'), ((920, 991), 'torchvision.transforms.Normalize', 'transforms.Normalize', (['(0.4914, 0.4822, 0.4465)', '(0.2023, 0.1994, 0.201)'], {}), '((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.201))\n', (940, 991), False, 'from torchvision import datasets, transforms\n'), ((1102, 1125), 'torchvision.transforms.ToPILImage', 'transforms.ToPILImage', ([], {}), '()\n', (1123, 1125), False, 'from torchvision import datasets, transforms\n'), ((1175, 1198), 'torchvision.transforms.ToPILImage', 'transforms.ToPILImage', ([], {}), '()\n', (1196, 1198), False, 'from torchvision import datasets, transforms\n'), ((2084, 2115), 'numpy.array', 'np.array', (['train_dataset.targets'], {}), '(train_dataset.targets)\n', (2092, 2115), True, 'import numpy as np\n'), ((7546, 7631), 'torchvision.datasets.SVHN', 'datasets.SVHN', (['"""./data"""'], {'split': '"""train"""', 'transform': 'train_transform', 'download': '(True)'}), "('./data', split='train', transform=train_transform, download=True\n )\n", (7559, 7631), False, 'from torchvision import datasets, transforms\n'), ((7663, 7741), 'torchvision.datasets.SVHN', 'datasets.SVHN', (['"""./data"""'], {'split': '"""test"""', 'transform': 'test_transform', 'download': '(True)'}), "('./data', split='test', transform=test_transform, download=True)\n", (7676, 7741), False, 'from torchvision import datasets, transforms\n'), ((7778, 7906), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', ([], {'dataset': 'train_dataset', 'num_workers': '(8)', 'pin_memory': '(True)', 'batch_size': 'args.batch_size', 'shuffle': '(True)'}), '(dataset=train_dataset, num_workers=8,\n pin_memory=True, batch_size=args.batch_size, shuffle=True)\n', (7805, 7906), False, 'import torch\n'), ((7950, 8083), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', ([], {'dataset': 'test_dataset', 'num_workers': '(8)', 'pin_memory': '(True)', 'batch_size': 'args.test_batch_size', 'shuffle': '(True)'}), '(dataset=test_dataset, num_workers=8, pin_memory\n =True, batch_size=args.test_batch_size, shuffle=True)\n', (7977, 8083), False, 'import torch\n'), ((9288, 9388), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['valid_mode_train_dataset'], {'batch_size': 'args.batch_size', 'shuffle': '(True)'}), '(valid_mode_train_dataset, batch_size=args.\n batch_size, shuffle=True)\n', (9315, 9388), False, 'import torch\n'), ((9418, 9524), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['valid_mode_valid_dataset'], {'batch_size': 'args.test_batch_size', 'shuffle': '(False)'}), '(valid_mode_valid_dataset, batch_size=args.\n test_batch_size, shuffle=False)\n', (9445, 9524), False, 'import torch\n'), ((3366, 3418), 'torchvision.transforms.ColorJitter', 'transforms.ColorJitter', ([], {'brightness': '(0.2)', 'contrast': '(0.2)'}), '(brightness=0.2, contrast=0.2)\n', (3388, 3418), False, 'from torchvision import datasets, transforms\n'), ((3451, 3487), 'torchvision.transforms.RandomCrop', 'transforms.RandomCrop', (['(32)'], {'padding': '(4)'}), '(32, padding=4)\n', (3472, 3487), False, 'from torchvision import datasets, transforms\n'), ((3683, 3704), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (3702, 3704), False, 'from torchvision import datasets, transforms\n'), ((3739, 3781), 'torchvision.transforms.Normalize', 'transforms.Normalize', (['(0.2862,)', '(0.3529,)'], {}), '((0.2862,), (0.3529,))\n', (3759, 3781), False, 'from torchvision import datasets, transforms\n'), ((3893, 3918), 'torchvision.transforms.Pad', 'transforms.Pad', ([], {'padding': '(2)'}), '(padding=2)\n', (3907, 3918), False, 'from torchvision import datasets, transforms\n'), ((3953, 3974), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (3972, 3974), False, 'from torchvision import datasets, transforms\n'), ((4009, 4051), 'torchvision.transforms.Normalize', 'transforms.Normalize', (['(0.2862,)', '(0.3529,)'], {}), '((0.2862,), (0.3529,))\n', (4029, 4051), False, 'from torchvision import datasets, transforms\n'), ((4161, 4184), 'torchvision.transforms.ToPILImage', 'transforms.ToPILImage', ([], {}), '()\n', (4182, 4184), False, 'from torchvision import datasets, transforms\n'), ((4234, 4257), 'torchvision.transforms.ToPILImage', 'transforms.ToPILImage', ([], {}), '()\n', (4255, 4257), False, 'from torchvision import datasets, transforms\n'), ((8724, 8758), 'numpy.array', 'np.array', (["data['valid_mode_train']"], {}), "(data['valid_mode_train'])\n", (8732, 8758), True, 'import numpy as np\n'), ((8814, 8848), 'numpy.array', 'np.array', (["data['valid_mode_valid']"], {}), "(data['valid_mode_valid'])\n", (8822, 8848), True, 'import numpy as np\n'), ((2215, 2234), 'numpy.repeat', 'np.repeat', (['(1000)', '(10)'], {}), '(1000, 10)\n', (2224, 2234), True, 'import numpy as np\n'), ((6347, 6383), 'torchvision.transforms.RandomCrop', 'transforms.RandomCrop', (['(32)'], {'padding': '(4)'}), '(32, padding=4)\n', (6368, 6383), False, 'from torchvision import datasets, transforms\n'), ((6462, 6483), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (6481, 6483), False, 'from torchvision import datasets, transforms\n'), ((6497, 6596), 'torchvision.transforms.Normalize', 'transforms.Normalize', (['(0.4376821, 0.4437697, 0.47280442)', '(0.19803012, 0.20101562, 0.19703614)'], {}), '((0.4376821, 0.4437697, 0.47280442), (0.19803012, \n 0.20101562, 0.19703614))\n', (6517, 6596), False, 'from torchvision import datasets, transforms\n'), ((6711, 6732), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (6730, 6732), False, 'from torchvision import datasets, transforms\n'), ((6746, 6845), 'torchvision.transforms.Normalize', 'transforms.Normalize', (['(0.4376821, 0.4437697, 0.47280442)', '(0.19803012, 0.20101562, 0.19703614)'], {}), '((0.4376821, 0.4437697, 0.47280442), (0.19803012, \n 0.20101562, 0.19703614))\n', (6766, 6845), False, 'from torchvision import datasets, transforms\n'), ((6929, 6952), 'torchvision.transforms.ToPILImage', 'transforms.ToPILImage', ([], {}), '()\n', (6950, 6952), False, 'from torchvision import datasets, transforms\n'), ((7002, 7025), 'torchvision.transforms.ToPILImage', 'transforms.ToPILImage', ([], {}), '()\n', (7023, 7025), False, 'from torchvision import datasets, transforms\n'), ((9739, 9773), 'os.path.join', 'os.path.join', (['working_dir', '"""train"""'], {}), "(working_dir, 'train')\n", (9751, 9773), False, 'import os\n'), ((9808, 9841), 'os.path.join', 'os.path.join', (['working_dir', '"""test"""'], {}), "(working_dir, 'test')\n", (9820, 9841), False, 'import os\n'), ((5163, 5182), 'numpy.repeat', 'np.repeat', (['(1000)', '(10)'], {}), '(1000, 10)\n', (5172, 5182), True, 'import numpy as np\n'), ((10785, 10819), 'os.path.join', 'os.path.join', (['working_dir', '"""train"""'], {}), "(working_dir, 'train')\n", (10797, 10819), False, 'import os\n'), ((10860, 10899), 'os.path.join', 'os.path.join', (['working_dir', '"""test_novel"""'], {}), "(working_dir, 'test_novel')\n", (10872, 10899), False, 'import os\n'), ((10943, 10985), 'os.path.join', 'os.path.join', (['working_dir', '"""test_familiar"""'], {}), "(working_dir, 'test_familiar')\n", (10955, 10985), False, 'import os\n'), ((8590, 8609), 'numpy.repeat', 'np.repeat', (['(1000)', '(10)'], {}), '(1000, 10)\n', (8599, 8609), True, 'import numpy as np\n')]
|
from Creating_Synthetic_Dataset import x_train, y_train
import tensorflow as tf
from scipy.fft import fft, fftfreq
#global variables
l = 50000
low_lim = 100
high_lim = 150
fs = 512
sep_ind = int(0.8*l)
length_of_input = 60
# Size of FFT analysis
N = 60
def fir_freqz(b):
# Get the frequency response
X = np.fft.fft(b, N)
# Take the magnitude
Xm = np.abs(X)
# Convert the magnitude to decibel scale
Xdb = 20*np.log10(Xm/Xm.max())
# Frequency vector
f = np.arange(N)*fs/N
return Xdb, f
def plot(coeffs,high_lim,low_lim):
# FIR filter coefficients
#b = np.array(list(reversed(coeffs)))
b = np.array(coeffs)
# Window to be used
win = np.kaiser(len(b), 15)
# Windowed filter coefficients
b_win = win*b
# Get frequency response of filter
Xdb, f = fir_freqz(b)
# ... and it mirrored version
Xdb_win, f = fir_freqz(b_win)
# Plot the impulse response
plt.subplot(211)
plt.stem(b, linefmt='b-', markerfmt='bo', basefmt='k-', label='Orig. coeff.')
plt.grid(True)
plt.title('Impulse reponse')
plt.xlabel('Sample')
plt.ylabel('Amplitude')
# Plot the frequency response
plt.subplot(212)
plt.plot(f, Xdb, 'b', label='Orig. coeff.')
plt.grid(True)
plt.title('Frequency reponse for range {} - {} Hz'.format(low_lim,high_lim))
plt.xlabel('Frequency [Hz]')
plt.ylabel('Amplitude [dB]')
plt.xlim((0, fs/2)) # Set the frequency limit - being lazy
plt.tight_layout()
plt.show()
#creating and training the CNN
model = tf.keras.Sequential()
model.add(tf.keras.layers.Input(shape=(60,1)))
model.add(tf.keras.layers.Conv1D(filters=1,kernel_size=6, use_bias=False))
model.add(tf.keras.layers.Flatten())
model.add(tf.keras.layers.Dense(16,activation='relu'))
model.add(tf.keras.layers.Dense(1,activation='sigmoid'))
model.compile(optimizer = 'adam', loss = 'binary_crossentropy', metrics = ['accuracy'])
model.fit(x_train, y_train, batch_size = 100, epochs=5)
#getting the convulting filters weights and plotting the frequency and step response
coeffs = []
for j in model.trainable_variables[0]:
coeffs.append(float(j[0]))
plot(coeffs,high_lim,low_lim)
|
[
"tensorflow.keras.layers.Dense",
"tensorflow.keras.layers.Conv1D",
"tensorflow.keras.layers.Input",
"tensorflow.keras.Sequential",
"tensorflow.keras.layers.Flatten"
] |
[((1568, 1589), 'tensorflow.keras.Sequential', 'tf.keras.Sequential', ([], {}), '()\n', (1587, 1589), True, 'import tensorflow as tf\n'), ((1600, 1636), 'tensorflow.keras.layers.Input', 'tf.keras.layers.Input', ([], {'shape': '(60, 1)'}), '(shape=(60, 1))\n', (1621, 1636), True, 'import tensorflow as tf\n'), ((1647, 1711), 'tensorflow.keras.layers.Conv1D', 'tf.keras.layers.Conv1D', ([], {'filters': '(1)', 'kernel_size': '(6)', 'use_bias': '(False)'}), '(filters=1, kernel_size=6, use_bias=False)\n', (1669, 1711), True, 'import tensorflow as tf\n'), ((1722, 1747), 'tensorflow.keras.layers.Flatten', 'tf.keras.layers.Flatten', ([], {}), '()\n', (1745, 1747), True, 'import tensorflow as tf\n'), ((1759, 1803), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(16)'], {'activation': '"""relu"""'}), "(16, activation='relu')\n", (1780, 1803), True, 'import tensorflow as tf\n'), ((1814, 1860), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(1)'], {'activation': '"""sigmoid"""'}), "(1, activation='sigmoid')\n", (1835, 1860), True, 'import tensorflow as tf\n')]
|
from ChessRender.RenderFsmCommon.button_fsm import ButtonFsm
from ChessRender.RenderFsmCommon.screen_states import ScreenState
from ChessRender.RenderFsmCommon.screen_text_fsm import ScreenTextFsm
from ChessRender.RenderFsmCommon.text_field_fsm import TextFieldFsm
class FsmStateRegistration(ScreenState):
def __init__(self, process_login):
ScreenState.__init__(self)
self.screen_atributes.buttons["but:Confirm"] = ButtonFsm("Confirm", (0, 0, -0.5))
self.screen_atributes.buttons["but:Back"] = ButtonFsm("Back", (0, 0, -0.8))
self.screen_atributes.text_fields["text_field:Login"] = TextFieldFsm("text_field_login", (-0.5, 0, 0.5), False)
self.screen_atributes.text_fields["text_field:Email"] = TextFieldFsm("text_field_email", (-0.5, 0, 0.3), False)
self.screen_atributes.text_fields["text_field:Password"] = TextFieldFsm("text_field_password", (-0.5, 0, 0.1), True)
self.screen_atributes.screen_texts["scrtext:Login"] = ScreenTextFsm("Login: ", (-0.7, 0.5))
self.screen_atributes.screen_texts["scrtext:Email"] = ScreenTextFsm("Email:", (-0.7, 0.3))
self.screen_atributes.screen_texts["scrtext:Password"] = ScreenTextFsm("Password:", (-0.7, 0.1))
self.initialize_button_links()
self.login_field = None
self.email = None
self.password_field = None
self.process_login = process_login
def initialize_button_links(self):
self.screen_atributes.buttons["but:Confirm"].add_command(self.confirm_command)
self.screen_atributes.buttons["but:Back"].add_link("fsm:Multiplayer")
def confirm_command(self):
process_login_arg = {"Login": self.gui_text_fields["text_field_login"].get(),
"Email": self.gui_text_fields["text_field_email"].get(),
"Password": self.gui_text_fields["text_field_password"].get()}
self.process_login(process_login_arg)
|
[
"ChessRender.RenderFsmCommon.screen_text_fsm.ScreenTextFsm",
"ChessRender.RenderFsmCommon.text_field_fsm.TextFieldFsm",
"ChessRender.RenderFsmCommon.button_fsm.ButtonFsm",
"ChessRender.RenderFsmCommon.screen_states.ScreenState.__init__"
] |
[((355, 381), 'ChessRender.RenderFsmCommon.screen_states.ScreenState.__init__', 'ScreenState.__init__', (['self'], {}), '(self)\n', (375, 381), False, 'from ChessRender.RenderFsmCommon.screen_states import ScreenState\n'), ((438, 472), 'ChessRender.RenderFsmCommon.button_fsm.ButtonFsm', 'ButtonFsm', (['"""Confirm"""', '(0, 0, -0.5)'], {}), "('Confirm', (0, 0, -0.5))\n", (447, 472), False, 'from ChessRender.RenderFsmCommon.button_fsm import ButtonFsm\n'), ((525, 556), 'ChessRender.RenderFsmCommon.button_fsm.ButtonFsm', 'ButtonFsm', (['"""Back"""', '(0, 0, -0.8)'], {}), "('Back', (0, 0, -0.8))\n", (534, 556), False, 'from ChessRender.RenderFsmCommon.button_fsm import ButtonFsm\n'), ((622, 677), 'ChessRender.RenderFsmCommon.text_field_fsm.TextFieldFsm', 'TextFieldFsm', (['"""text_field_login"""', '(-0.5, 0, 0.5)', '(False)'], {}), "('text_field_login', (-0.5, 0, 0.5), False)\n", (634, 677), False, 'from ChessRender.RenderFsmCommon.text_field_fsm import TextFieldFsm\n'), ((742, 797), 'ChessRender.RenderFsmCommon.text_field_fsm.TextFieldFsm', 'TextFieldFsm', (['"""text_field_email"""', '(-0.5, 0, 0.3)', '(False)'], {}), "('text_field_email', (-0.5, 0, 0.3), False)\n", (754, 797), False, 'from ChessRender.RenderFsmCommon.text_field_fsm import TextFieldFsm\n'), ((865, 922), 'ChessRender.RenderFsmCommon.text_field_fsm.TextFieldFsm', 'TextFieldFsm', (['"""text_field_password"""', '(-0.5, 0, 0.1)', '(True)'], {}), "('text_field_password', (-0.5, 0, 0.1), True)\n", (877, 922), False, 'from ChessRender.RenderFsmCommon.text_field_fsm import TextFieldFsm\n'), ((986, 1025), 'ChessRender.RenderFsmCommon.screen_text_fsm.ScreenTextFsm', 'ScreenTextFsm', (['"""Login: """', '(-0.7, 0.5)'], {}), "('Login: ', (-0.7, 0.5))\n", (999, 1025), False, 'from ChessRender.RenderFsmCommon.screen_text_fsm import ScreenTextFsm\n'), ((1088, 1124), 'ChessRender.RenderFsmCommon.screen_text_fsm.ScreenTextFsm', 'ScreenTextFsm', (['"""Email:"""', '(-0.7, 0.3)'], {}), "('Email:', (-0.7, 0.3))\n", (1101, 1124), False, 'from ChessRender.RenderFsmCommon.screen_text_fsm import ScreenTextFsm\n'), ((1190, 1229), 'ChessRender.RenderFsmCommon.screen_text_fsm.ScreenTextFsm', 'ScreenTextFsm', (['"""Password:"""', '(-0.7, 0.1)'], {}), "('Password:', (-0.7, 0.1))\n", (1203, 1229), False, 'from ChessRender.RenderFsmCommon.screen_text_fsm import ScreenTextFsm\n')]
|