code
stringlengths 22
1.05M
| apis
listlengths 1
3.31k
| extract_api
stringlengths 75
3.25M
|
|---|---|---|
from application.TDG import PatientTDG
from passlib.hash import sha256_crypt
import datetime
# Returns True if patient exists
def patientExists(hcnumber):
return PatientTDG.find(hcnumber=hcnumber) is not None
# Returns Patient if found
def getPatient(hcnumber):
patient = PatientTDG.find(hcnumber=hcnumber)
if patient is None:
return None
else:
return dict(patient)
# Returns true if patient is authenticated
def authenticate(hcnumber, password):
verified = False
user = getPatient(hcnumber)
if user is not None:
verified = sha256_crypt.verify(password, user['password_hash'])
return verified
# Returns True if patient is created
def createPatient(hcnumber, fname, lname, birthday, gender, phone, email, address, password, lastAnnual):
reponse = False
if patientExists(hcnumber):
reponse = False # if patient exists then return false
else:
# hash password
password_hash = sha256_crypt.hash(password)
# format the dates
if lastAnnual:
lastannualSplit = lastAnnual.split("-")
lastAnnual = datetime.datetime.strptime(lastannualSplit[0] + lastannualSplit[1] + lastannualSplit[2], '%Y%m%d').date()
else:
lastAnnual = None
bdaySplit = birthday.split("-")
birthday = datetime.datetime.strptime(bdaySplit[0] + bdaySplit[1] + bdaySplit[2], '%Y%m%d').date()
PatientTDG.create(hcnumber=hcnumber, fname=fname, lname=lname, birthday=birthday, gender=gender, phone=phone, email=email, address=address, password_hash=password_hash, lastAnnual=lastAnnual)
reponse = True
return reponse
# Returns true if patient can book an annual appointment. If not, return false.
# Checks when last annual was (must be at least over a year ago).
def canBookAnnual(hcnumber):
if getPatient(hcnumber)['lastAnnual'] is None:
return True
else:
annual = getPatient(hcnumber)['lastAnnual']
now = datetime.datetime.now()
if (now-annual).days >= 365:
return True
else:
return False
# returns true if patient's annual has been changed
# TO DO: update this method to change lastAnnual to the day of the appointment, not the day of the booking
def updateAnnual(hcnumber, date):
if getPatient(hcnumber) is None:
return False
else:
if date is not None:
lastannualSplit = date.split("-")
date = datetime.datetime.strptime(lastannualSplit[0] + lastannualSplit[1] + lastannualSplit[2], '%Y%m%d').date()
PatientTDG.update(hcnumber=hcnumber, date=date)
return True
else:
PatientTDG.update(hcnumber=hcnumber, date=None)
return True
|
[
"application.TDG.PatientTDG.create",
"application.TDG.PatientTDG.find",
"datetime.datetime.strptime",
"application.TDG.PatientTDG.update",
"passlib.hash.sha256_crypt.hash",
"passlib.hash.sha256_crypt.verify",
"datetime.datetime.now"
] |
[((276, 310), 'application.TDG.PatientTDG.find', 'PatientTDG.find', ([], {'hcnumber': 'hcnumber'}), '(hcnumber=hcnumber)\n', (291, 310), False, 'from application.TDG import PatientTDG\n'), ((164, 198), 'application.TDG.PatientTDG.find', 'PatientTDG.find', ([], {'hcnumber': 'hcnumber'}), '(hcnumber=hcnumber)\n', (179, 198), False, 'from application.TDG import PatientTDG\n'), ((540, 592), 'passlib.hash.sha256_crypt.verify', 'sha256_crypt.verify', (['password', "user['password_hash']"], {}), "(password, user['password_hash'])\n", (559, 592), False, 'from passlib.hash import sha256_crypt\n'), ((928, 955), 'passlib.hash.sha256_crypt.hash', 'sha256_crypt.hash', (['password'], {}), '(password)\n', (945, 955), False, 'from passlib.hash import sha256_crypt\n'), ((1391, 1591), 'application.TDG.PatientTDG.create', 'PatientTDG.create', ([], {'hcnumber': 'hcnumber', 'fname': 'fname', 'lname': 'lname', 'birthday': 'birthday', 'gender': 'gender', 'phone': 'phone', 'email': 'email', 'address': 'address', 'password_hash': 'password_hash', 'lastAnnual': 'lastAnnual'}), '(hcnumber=hcnumber, fname=fname, lname=lname, birthday=\n birthday, gender=gender, phone=phone, email=email, address=address,\n password_hash=password_hash, lastAnnual=lastAnnual)\n', (1408, 1591), False, 'from application.TDG import PatientTDG\n'), ((1924, 1947), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (1945, 1947), False, 'import datetime\n'), ((2447, 2494), 'application.TDG.PatientTDG.update', 'PatientTDG.update', ([], {'hcnumber': 'hcnumber', 'date': 'date'}), '(hcnumber=hcnumber, date=date)\n', (2464, 2494), False, 'from application.TDG import PatientTDG\n'), ((2521, 2568), 'application.TDG.PatientTDG.update', 'PatientTDG.update', ([], {'hcnumber': 'hcnumber', 'date': 'None'}), '(hcnumber=hcnumber, date=None)\n', (2538, 2568), False, 'from application.TDG import PatientTDG\n'), ((1294, 1379), 'datetime.datetime.strptime', 'datetime.datetime.strptime', (['(bdaySplit[0] + bdaySplit[1] + bdaySplit[2])', '"""%Y%m%d"""'], {}), "(bdaySplit[0] + bdaySplit[1] + bdaySplit[2], '%Y%m%d'\n )\n", (1320, 1379), False, 'import datetime\n'), ((1084, 1186), 'datetime.datetime.strptime', 'datetime.datetime.strptime', (['(lastannualSplit[0] + lastannualSplit[1] + lastannualSplit[2])', '"""%Y%m%d"""'], {}), "(lastannualSplit[0] + lastannualSplit[1] +\n lastannualSplit[2], '%Y%m%d')\n", (1110, 1186), False, 'import datetime\n'), ((2338, 2440), 'datetime.datetime.strptime', 'datetime.datetime.strptime', (['(lastannualSplit[0] + lastannualSplit[1] + lastannualSplit[2])', '"""%Y%m%d"""'], {}), "(lastannualSplit[0] + lastannualSplit[1] +\n lastannualSplit[2], '%Y%m%d')\n", (2364, 2440), False, 'import datetime\n')]
|
from django.urls import path, include
urlpatterns = [
# API
path('', include('backend.api.v2.urls')),
]
|
[
"django.urls.include"
] |
[((78, 108), 'django.urls.include', 'include', (['"""backend.api.v2.urls"""'], {}), "('backend.api.v2.urls')\n", (85, 108), False, 'from django.urls import path, include\n')]
|
'''
@brief Leg-Rest Pos Recommendataion with DecisionTree Regressor
@author <NAME> <<EMAIL>>
@date 2021. 05. 21
'''
import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.tree import DecisionTreeRegressor
import progressbar
'''
Presets & Hyper-parameters
'''
CONFIGURATION_FILE_PATH = "./data/train/data_config.csv"
DATASET_PATH = "./data/train/"
pd.set_option('display.width', 200) # for display width
# FEATURE_LENGTH = 30 # n-dimensional data feature only use
# NUMBER_OF_SAMPLES = 299 # number of augmented data
# FEATURE_MAX_LENGTH = 115 # Maximum feature length
# NUMBER_OF_RANDOM_SELECTION = 5
# MAX_TRAIN_ITERATION = -1 # infinity
'''
1. Load configuration file
'''
data_config = pd.read_csv(CONFIGURATION_FILE_PATH, header=0, index_col=0)
'''
2. data extraction
'''
X = data_config.loc[:, ['user_height', 'user_weight', 'user_age']]
bmr = 66.47+(13.75*X['user_weight'])+(5*X['user_height'])-(6.76*X['user_age'])
bmi = X['user_weight']/(X['user_height']/100*X['user_height']/100)
X["bmr"] = bmr
X["bmi"] = bmi
ys = data_config.loc[:, ['bestfit_angle_standard']]
yr = data_config.loc[:, ['bestfit_angle_relax']]
del X["user_age"]
'''
DecisionTree Regression Model
'''
X_train, X_test, y_train, y_test = train_test_split(X, np.ravel(ys), test_size=0.33, shuffle=True)
print("------ Regression Model Evaluation (@standard) ------")
model_standard = DecisionTreeRegressor(
criterion = "mse",
max_depth=50,
min_samples_leaf=1,
random_state=1).fit(X_train, y_train)
print("* R2 Score with Trainset (@standard) :", model_standard.score(X_train, y_train))
print("* R2 Score with Testset (@standard) :", model_standard.score(X_test, y_test))
print("* Feature Impotances (@standard) :")
for name, value in zip(X_train.columns, model_standard.feature_importances_):
print(' - {0}: {1:.3f}'.format(name, value))
print("------ Regression Model Evaluation (@relax) ------")
model_relax = DecisionTreeRegressor(
criterion = "mse", # mean square error
max_depth=50,
min_samples_leaf=1,
random_state=1).fit(X_train, y_train)
print("* R-squared Score with Trainset (@relax) :", model_relax.score(X_train, y_train))
print("* R-squared Score with Testset (@relax) :", model_relax.score(X_test, y_test))
print("* Feature Impotances (@relax) :")
for name, value in zip(X_train.columns, model_relax.feature_importances_):
print(' - {0}: {1:.3f}'.format(name, value))
'''
Output File Generation
'''
# min_age = 20
# max_age = 80
# ages = np.array([min_age+i for i in range(max_age-min_age+1)])
ages = np.arange(20, 80, step=10)
# min_height = 150
# max_height = 190
# heights = np.array([min_height+i for i in range(max_height-min_height+1)])
heights = np.arange(150, 190, step=10)
# min_weight = 40
# max_weight = 100
# weights = np.array([min_weight+i for i in range(max_weight-min_weight+1)])
weights = np.arange(40, 100, step=10)
print(X.head())
bar = progressbar.ProgressBar(maxval=len(ages)*len(heights)*len(weights), widgets=[progressbar.Bar('=', '[', ']'), ' ', progressbar.Percentage()])
bar.start()
output_standard = pd.DataFrame(columns=['height','weight','legrest'])
output_relax = pd.DataFrame(columns=['height','weight','legrest'])
count = 0
for a in ages:
for h in heights:
for w in weights:
bmr = 66.47+(13.75*w)+(5*h)-(6.76*a)
bmi = w/(h/100*h/100)
pvs = model_standard.predict([[h,w,bmr,bmi]])
pvr = model_relax.predict([[h,w,bmr,bmi]])
output_standard = output_standard.append({'height':h, 'weight':w, 'legrest':pvs[0]}, ignore_index=True)
output_relax = output_relax.append({'height':h, 'weight':w, 'legrest':pvr[0]}, ignore_index=True)
count = count+1
bar.update(count)
bar.finish()
output_standard.to_csv('result_standard.csv', index=False)
output_relax.to_csv('result_relax.csv', index=False)
print("saved results")
|
[
"pandas.DataFrame",
"sklearn.tree.DecisionTreeRegressor",
"numpy.ravel",
"pandas.read_csv",
"progressbar.Bar",
"progressbar.Percentage",
"numpy.arange",
"pandas.set_option"
] |
[((404, 439), 'pandas.set_option', 'pd.set_option', (['"""display.width"""', '(200)'], {}), "('display.width', 200)\n", (417, 439), True, 'import pandas as pd\n'), ((748, 807), 'pandas.read_csv', 'pd.read_csv', (['CONFIGURATION_FILE_PATH'], {'header': '(0)', 'index_col': '(0)'}), '(CONFIGURATION_FILE_PATH, header=0, index_col=0)\n', (759, 807), True, 'import pandas as pd\n'), ((2602, 2628), 'numpy.arange', 'np.arange', (['(20)', '(80)'], {'step': '(10)'}), '(20, 80, step=10)\n', (2611, 2628), True, 'import numpy as np\n'), ((2755, 2783), 'numpy.arange', 'np.arange', (['(150)', '(190)'], {'step': '(10)'}), '(150, 190, step=10)\n', (2764, 2783), True, 'import numpy as np\n'), ((2909, 2936), 'numpy.arange', 'np.arange', (['(40)', '(100)'], {'step': '(10)'}), '(40, 100, step=10)\n', (2918, 2936), True, 'import numpy as np\n'), ((3132, 3185), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['height', 'weight', 'legrest']"}), "(columns=['height', 'weight', 'legrest'])\n", (3144, 3185), True, 'import pandas as pd\n'), ((3199, 3252), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['height', 'weight', 'legrest']"}), "(columns=['height', 'weight', 'legrest'])\n", (3211, 3252), True, 'import pandas as pd\n'), ((1294, 1306), 'numpy.ravel', 'np.ravel', (['ys'], {}), '(ys)\n', (1302, 1306), True, 'import numpy as np\n'), ((1419, 1511), 'sklearn.tree.DecisionTreeRegressor', 'DecisionTreeRegressor', ([], {'criterion': '"""mse"""', 'max_depth': '(50)', 'min_samples_leaf': '(1)', 'random_state': '(1)'}), "(criterion='mse', max_depth=50, min_samples_leaf=1,\n random_state=1)\n", (1440, 1511), False, 'from sklearn.tree import DecisionTreeRegressor\n'), ((1973, 2065), 'sklearn.tree.DecisionTreeRegressor', 'DecisionTreeRegressor', ([], {'criterion': '"""mse"""', 'max_depth': '(50)', 'min_samples_leaf': '(1)', 'random_state': '(1)'}), "(criterion='mse', max_depth=50, min_samples_leaf=1,\n random_state=1)\n", (1994, 2065), False, 'from sklearn.tree import DecisionTreeRegressor\n'), ((3038, 3068), 'progressbar.Bar', 'progressbar.Bar', (['"""="""', '"""["""', '"""]"""'], {}), "('=', '[', ']')\n", (3053, 3068), False, 'import progressbar\n'), ((3075, 3099), 'progressbar.Percentage', 'progressbar.Percentage', ([], {}), '()\n', (3097, 3099), False, 'import progressbar\n')]
|
import os
import os.path as osp
import re
import time
import shutil
import argparse
import subprocess
import multiprocessing
import cv2
import numpy as np
import pandas as pd
from requests_html import HTML
from selenium import webdriver
def check_banner(args):
valid = False
stage_dir = args[0]
banner_dir = args[1]
# Read banners to check
banners = [ cv2.imread(osp.join(banner_dir, banner))
for banner in os.listdir(banner_dir)
if not banner.startswith('.') ]
count = len(banners)
# Check downloaded images one by one
for path in [ osp.join(stage_dir, f) for f in os.listdir(stage_dir) ]:
# Read image
img = cv2.imread(path)
if img is None:
continue
# Match with banner
for banner in banners:
img = cv2.resize(img, (banner.shape[1], banner.shape[0]))
ref = banner.astype('float')
tar = img.astype('float')
# Determine image volume
volume = 1
for v in img.shape:
volume *= v
# Perform difference between two image
diff = np.sum(np.abs(ref-tar)) / volume
if diff < 10:
count -= 1
# Early stopping
if count <= 0:
valid = True
break
return (osp.basename(stage_dir), valid)
def main(args):
# Read target sellers to check their banner
with open(args['input'], 'r') as f:
sellers = [ line.strip('\n') for line in f.readlines() ]
seller_names = [ osp.basename(seller) for seller in sellers ]
# Instantiate chrome webdriver with default page google.com opened
mobile_emulation = { "deviceName": "iPhone X" }
chrome_options = webdriver.ChromeOptions()
chrome_options.add_argument("--headless")
# chrome_options.add_experimental_option("mobileEmulation", mobile_emulation)
driver = webdriver.Chrome(args['driver'], options=chrome_options)
driver.get('http://google.com')
# Load every seller pages
for name, seller in zip(seller_names, sellers):
print(f"Open page '{name}'")
driver.execute_script(f"window.open('about:blank', '{name}');")
driver.switch_to.window(name)
driver.get(seller)
time.sleep(3)
# Parse every opened pages
pattern = r"https://cf.shopee.tw/file/[\d\w]+"
for name in seller_names:
# Create Staging directory for each seller
stage_dir = osp.join(args['stage'], name)
shutil.rmtree(stage_dir, ignore_errors=True)
os.makedirs(stage_dir)
# Extract links of each loaded images
driver.switch_to.window(name)
html = driver.page_source
imgs = re.findall(pattern, html)
# Download each loaded images
print(f"Download images in '{driver.current_url}'")
procs = []
for img in imgs:
cmdline = f'wget -O {osp.join(stage_dir, osp.basename(img))} {img}'
proc = subprocess.Popen(
cmdline,
shell=True,
stderr=subprocess.DEVNULL,
stdout=subprocess.DEVNULL)
procs.append(proc)
# Wait for download completion
for proc in procs:
proc.wait()
proc.terminate()
# Exit the driver
driver.quit()
# Check banners with multiple workers
stages = [
osp.join(args['stage'], seller)
for seller in os.listdir(args['stage'])
if not seller.startswith('.')
]
banners = [ args['banner'] ]*len(stages)
tasks = list(zip(stages, banners))
pool = multiprocessing.Pool(multiprocessing.cpu_count())
results = pool.map(check_banner, tasks)
data = { 'seller': [], 'result': [] }
for result in results:
data['seller'].append(result[0])
data['result'].append(result[1])
df = pd.DataFrame(data, columns=['seller', 'result'])
df.to_csv(args['output'], index=False)
print(f"Export result to {args['output']}")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--input", required=True, help="list of urls of target sellers")
parser.add_argument("--output", default="report.txt", help="report file")
parser.add_argument("--banner", default="banner", help="directory containing banners need to check")
parser.add_argument("--stage", default="stage", help="staging directories to hold download images")
parser.add_argument("--driver", default="driver/chromedriver")
args = vars(parser.parse_args())
main(args)
|
[
"pandas.DataFrame",
"subprocess.Popen",
"numpy.abs",
"argparse.ArgumentParser",
"os.makedirs",
"os.path.basename",
"time.sleep",
"cv2.imread",
"re.findall",
"selenium.webdriver.ChromeOptions",
"selenium.webdriver.Chrome",
"shutil.rmtree",
"os.path.join",
"os.listdir",
"cv2.resize",
"multiprocessing.cpu_count"
] |
[((1762, 1787), 'selenium.webdriver.ChromeOptions', 'webdriver.ChromeOptions', ([], {}), '()\n', (1785, 1787), False, 'from selenium import webdriver\n'), ((1929, 1985), 'selenium.webdriver.Chrome', 'webdriver.Chrome', (["args['driver']"], {'options': 'chrome_options'}), "(args['driver'], options=chrome_options)\n", (1945, 1985), False, 'from selenium import webdriver\n'), ((3956, 4004), 'pandas.DataFrame', 'pd.DataFrame', (['data'], {'columns': "['seller', 'result']"}), "(data, columns=['seller', 'result'])\n", (3968, 4004), True, 'import pandas as pd\n'), ((4138, 4163), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (4161, 4163), False, 'import argparse\n'), ((603, 625), 'os.path.join', 'osp.join', (['stage_dir', 'f'], {}), '(stage_dir, f)\n', (611, 625), True, 'import os.path as osp\n'), ((695, 711), 'cv2.imread', 'cv2.imread', (['path'], {}), '(path)\n', (705, 711), False, 'import cv2\n'), ((1345, 1368), 'os.path.basename', 'osp.basename', (['stage_dir'], {}), '(stage_dir)\n', (1357, 1368), True, 'import os.path as osp\n'), ((2287, 2300), 'time.sleep', 'time.sleep', (['(3)'], {}), '(3)\n', (2297, 2300), False, 'import time\n'), ((2485, 2514), 'os.path.join', 'osp.join', (["args['stage']", 'name'], {}), "(args['stage'], name)\n", (2493, 2514), True, 'import os.path as osp\n'), ((2523, 2567), 'shutil.rmtree', 'shutil.rmtree', (['stage_dir'], {'ignore_errors': '(True)'}), '(stage_dir, ignore_errors=True)\n', (2536, 2567), False, 'import shutil\n'), ((2576, 2598), 'os.makedirs', 'os.makedirs', (['stage_dir'], {}), '(stage_dir)\n', (2587, 2598), False, 'import os\n'), ((2732, 2757), 're.findall', 're.findall', (['pattern', 'html'], {}), '(pattern, html)\n', (2742, 2757), False, 'import re\n'), ((3477, 3508), 'os.path.join', 'osp.join', (["args['stage']", 'seller'], {}), "(args['stage'], seller)\n", (3485, 3508), True, 'import os.path as osp\n'), ((3721, 3748), 'multiprocessing.cpu_count', 'multiprocessing.cpu_count', ([], {}), '()\n', (3746, 3748), False, 'import multiprocessing\n'), ((387, 415), 'os.path.join', 'osp.join', (['banner_dir', 'banner'], {}), '(banner_dir, banner)\n', (395, 415), True, 'import os.path as osp\n'), ((447, 469), 'os.listdir', 'os.listdir', (['banner_dir'], {}), '(banner_dir)\n', (457, 469), False, 'import os\n'), ((635, 656), 'os.listdir', 'os.listdir', (['stage_dir'], {}), '(stage_dir)\n', (645, 656), False, 'import os\n'), ((834, 885), 'cv2.resize', 'cv2.resize', (['img', '(banner.shape[1], banner.shape[0])'], {}), '(img, (banner.shape[1], banner.shape[0]))\n', (844, 885), False, 'import cv2\n'), ((1572, 1592), 'os.path.basename', 'osp.basename', (['seller'], {}), '(seller)\n', (1584, 1592), True, 'import os.path as osp\n'), ((2999, 3095), 'subprocess.Popen', 'subprocess.Popen', (['cmdline'], {'shell': '(True)', 'stderr': 'subprocess.DEVNULL', 'stdout': 'subprocess.DEVNULL'}), '(cmdline, shell=True, stderr=subprocess.DEVNULL, stdout=\n subprocess.DEVNULL)\n', (3015, 3095), False, 'import subprocess\n'), ((3531, 3556), 'os.listdir', 'os.listdir', (["args['stage']"], {}), "(args['stage'])\n", (3541, 3556), False, 'import os\n'), ((1162, 1179), 'numpy.abs', 'np.abs', (['(ref - tar)'], {}), '(ref - tar)\n', (1168, 1179), True, 'import numpy as np\n'), ((2953, 2970), 'os.path.basename', 'osp.basename', (['img'], {}), '(img)\n', (2965, 2970), True, 'import os.path as osp\n')]
|
#!/usr/bin/python
import argparse
import glob
import re
def recog_file(filename, ground_truth_path):
# read ground truth
gt_file = ground_truth_path + re.sub('.*/','/',filename) + '.txt'
with open(gt_file, 'r') as f:
ground_truth = f.read().split('\n')[0:-1]
f.close()
# read recognized sequence
with open(filename, 'r') as f:
recognized = f.read().split('\n')[5].split() # framelevel recognition is in 6-th line of file
f.close()
n_frame_errors = 0
for i in range(len(recognized)):
if not recognized[i] == ground_truth[i]:
n_frame_errors += 1
return n_frame_errors, len(recognized)
### MAIN #######################################################################
### arguments ###
### --recog_dir: the directory where the recognition files from inferency.py are placed
### --ground_truth_dir: the directory where the framelevel ground truth can be found
parser = argparse.ArgumentParser()
parser.add_argument('--recog_dir', default='results')
parser.add_argument('--ground_truth_dir', default='data/groundTruth')
args = parser.parse_args()
filelist = glob.glob(args.recog_dir + '/*')
print('Evaluate %d video files...' % len(filelist))
n_frames = 0
n_errors = 0
# loop over all recognition files and evaluate the frame error
for filename in filelist:
errors, frames = recog_file(filename, args.ground_truth_dir)
n_errors += errors
n_frames += frames
# print frame accuracy (1.0 - frame error rate)
print('frame accuracy: %f' % (1.0 - float(n_errors) / n_frames))
|
[
"re.sub",
"argparse.ArgumentParser",
"glob.glob"
] |
[((957, 982), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (980, 982), False, 'import argparse\n'), ((1146, 1178), 'glob.glob', 'glob.glob', (["(args.recog_dir + '/*')"], {}), "(args.recog_dir + '/*')\n", (1155, 1178), False, 'import glob\n'), ((163, 191), 're.sub', 're.sub', (['""".*/"""', '"""/"""', 'filename'], {}), "('.*/', '/', filename)\n", (169, 191), False, 'import re\n')]
|
import argparse
import numpy as np
import struct
from matplotlib import gridspec
import matplotlib.pyplot as plt
from glob import glob
import os
from os.path import join
from natsort import natsorted
from skimage.transform import resize
import re
from tqdm import tqdm
""" Code to process depth/image/pose binaries the ios DepthBundleRecorder app into more useable .npz files.
Usage: python ConvertBinaries.py -d data_folder_with_binaries
Output: a folder data_processed_folder_with_binaries containing the processed depth bundles
"""
def read_header(header):
h = re.sub("\[|\]|\(|\)|\s|\'", "", str(header)) # Strip all delims but <> and commas
h = h.split("<ENDHEADER>")[0] # Snip empty end of header
timestamp = float(h.split("Time:")[1].split(",")[0])
euler_angles = np.array(h.split("EulerAngles:SIMD3<Float>")[1].split(",")[0:3], dtype=np.float32)
world_pose = np.array(h.split("WorldPose:simd_float4x4")[1].split(",")[0:16], dtype=np.float32).reshape((4,4))
intrinsics = np.array(h.split("Intrinsics:Optionalsimd_float3x3")[1].split(",")[0:9], dtype=np.float32).reshape((3,3))
world_to_camera = np.array(h.split("WorldToCamera:Optionalsimd_float4x4")[1].split(",")[0:16], dtype=np.float32).reshape((4,4))
return {'timestamp' : timestamp,
'euler_angles' : euler_angles,
'world_pose' : world_pose.T,
'intrinsics' : intrinsics.T,
'world_to_camera' : world_to_camera.T}
def load_info(info_name):
with open(info_name, mode='rb') as file:
file_content = file.read()
header = file_content[:1024] # 1024 bit header
return read_header(header)
def load_depth(depth_name):
with open(depth_name, mode='rb') as file:
file_content = file.read()
header = file_content[:1024] # 1024 bit header
file_content = file_content[1024:]
file_content = struct.unpack('f'* ((len(file_content)) // 4), file_content)
depth = np.reshape(file_content, (192,256))
depth = np.flip(depth.T, 1).astype(np.float32)
return depth, header
def load_conf(conf_name):
with open(conf_name, mode='rb') as file:
file_content = file.read()
file_content = struct.unpack('B'* ((len(file_content))), file_content)
conf = np.reshape(file_content, (192,256))
conf = np.flip(conf.T, 1).astype(np.uint8)
return conf
def load_img(img_name):
with open(img_name, mode='rb') as file:
file_content = file.read()
Y = file_content[:1920*1440]
Y = struct.unpack('B' * ((len(Y))), Y)
Y = np.reshape(Y, (1440,1920))
Y = np.flip(Y.T, 1)
UV = file_content[1920*1440:]
UV = struct.unpack('B' * ((len(UV))), UV)
U,V = UV[0::2], UV[1::2]
U,V = np.reshape(U, (720,960)), np.reshape(V, (720,960))
U,V = np.flip(U.T, 1), np.flip(V.T, 1)
# Re-Center U,V channels
Y,U,V = Y.astype(np.float32), (U.astype(np.float32) - 128), (V.astype(np.float32) - 128)
U,V = resize(U, (1920,1440), order=0), resize(V, (1920,1440), order=0)
# Convert YUV 420 to RGB
R = Y + (V*1/0.6350)
B = Y + (U*1/0.5389)
G = (Y - 0.2126*R - 0.0722*B)*(1/0.7152)
img = np.stack((R,G,B), axis=-1)
img[img<0] = 0
img[img>255] = 255
img = img.astype(np.uint8)
return img
def main():
parser = argparse.ArgumentParser()
parser.add_argument('-d', default=None, type=str, required=True, help='Data directory')
args = parser.parse_args()
bundle_names = natsorted(glob(join(args.d, "*")))
for bundle_name in bundle_names:
print("Processing {0}.".format(bundle_name.split("/")[-1]))
if "-poses" not in bundle_name:
# Process image + depth bundle
depth_names = natsorted(glob(join(bundle_name, "depth*.bin")))
img_names = natsorted(glob(join(bundle_name, "image*.bin")))
conf_names = natsorted(glob(join(bundle_name, "conf*.bin")))
save_path = bundle_name.replace("data", "data_processed")
os.makedirs(save_path, exist_ok=True)
npz_file = {}
for i, (img_name, depth_name, conf_name) in tqdm(enumerate(zip(img_names, depth_names, conf_names))):
img = load_img(img_name)
depth, header = load_depth(depth_name)
info = read_header(header)
conf = load_conf(conf_name)
if i == 0:
ref_time = info['timestamp']
info['timestamp'] -= ref_time
npz_file["img_{0}".format(i)] = img
npz_file["depth_{0}".format(i)] = depth
npz_file["conf_{0}".format(i)] = conf
npz_file["info_{0}".format(i)] = info
npz_file["num_frames"] = len(img_names)
# Save first frame preview
fig = plt.figure(figsize=(14, 30))
gs = gridspec.GridSpec(1, 3, wspace=0.0, hspace=0.0, width_ratios=[1,1,1.12])
ax1 = plt.subplot(gs[0,0])
ax1.imshow(npz_file['img_0'])
ax1.axis('off')
ax1.set_title("Image")
ax2 = plt.subplot(gs[0,1])
ax2.imshow(npz_file['conf_0'], cmap="gray")
ax2.axis('off')
ax2.set_title("Confidence")
ax3 = plt.subplot(gs[0,2])
d = ax3.imshow(npz_file['depth_0'], cmap="Spectral", vmin=0, vmax=7)
ax3.axis('off')
ax3.set_title("Depth")
fig.colorbar(d, fraction=0.055, label="Depth [m]")
plt.savefig(join(save_path, "frame_first.png"), bbox_inches='tight', pad_inches=0.05, facecolor='white')
plt.close()
# Save last frame preview
fig = plt.figure(figsize=(14, 30))
gs = gridspec.GridSpec(1, 3, wspace=0.0, hspace=0.0, width_ratios=[1,1,1.12])
ax1 = plt.subplot(gs[0,0])
ax1.imshow(npz_file['img_{0}'.format(len(img_names) - 1)])
ax1.axis('off')
ax1.set_title("Image")
ax2 = plt.subplot(gs[0,1])
ax2.imshow(npz_file['conf_{0}'.format(len(img_names) - 1)], cmap="gray")
ax2.axis('off')
ax2.set_title("Confidence")
ax3 = plt.subplot(gs[0,2])
d = ax3.imshow(npz_file['depth_{0}'.format(len(img_names) - 1)], cmap="Spectral", vmin=0, vmax=7)
ax3.axis('off')
ax3.set_title("Depth")
fig.colorbar(d, fraction=0.055, label="Depth [m]")
plt.savefig(join(save_path, "frame_last.png"), bbox_inches='tight', pad_inches=0.05, facecolor='white')
plt.close()
# Save bundle
np.savez(join(save_path, "frame_bundle"), **npz_file)
else:
# Process only poses + info bundle
info_names = natsorted(glob(join(bundle_name, "info*.bin")))
save_path = bundle_name.replace("data", "data_processed")
os.makedirs(save_path, exist_ok=True)
npz_file = {}
for i, info_name in tqdm(enumerate(info_names)):
info = load_info(info_name)
if i == 0:
ref_time = info['timestamp']
info['timestamp'] -= ref_time
npz_file["info_{0}".format(i)] = info
npz_file["num_frames"] = len(info_names)
# Save bundle
np.savez(join(save_path, "info_bundle"), **npz_file)
if __name__ == '__main__':
main()
|
[
"numpy.stack",
"matplotlib.pyplot.subplot",
"numpy.flip",
"argparse.ArgumentParser",
"os.makedirs",
"matplotlib.pyplot.close",
"matplotlib.pyplot.figure",
"skimage.transform.resize",
"numpy.reshape",
"matplotlib.gridspec.GridSpec",
"os.path.join"
] |
[((1955, 1991), 'numpy.reshape', 'np.reshape', (['file_content', '(192, 256)'], {}), '(file_content, (192, 256))\n', (1965, 1991), True, 'import numpy as np\n'), ((2260, 2296), 'numpy.reshape', 'np.reshape', (['file_content', '(192, 256)'], {}), '(file_content, (192, 256))\n', (2270, 2296), True, 'import numpy as np\n'), ((2552, 2579), 'numpy.reshape', 'np.reshape', (['Y', '(1440, 1920)'], {}), '(Y, (1440, 1920))\n', (2562, 2579), True, 'import numpy as np\n'), ((2587, 2602), 'numpy.flip', 'np.flip', (['Y.T', '(1)'], {}), '(Y.T, 1)\n', (2594, 2602), True, 'import numpy as np\n'), ((3150, 3178), 'numpy.stack', 'np.stack', (['(R, G, B)'], {'axis': '(-1)'}), '((R, G, B), axis=-1)\n', (3158, 3178), True, 'import numpy as np\n'), ((3291, 3316), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (3314, 3316), False, 'import argparse\n'), ((2723, 2748), 'numpy.reshape', 'np.reshape', (['U', '(720, 960)'], {}), '(U, (720, 960))\n', (2733, 2748), True, 'import numpy as np\n'), ((2749, 2774), 'numpy.reshape', 'np.reshape', (['V', '(720, 960)'], {}), '(V, (720, 960))\n', (2759, 2774), True, 'import numpy as np\n'), ((2784, 2799), 'numpy.flip', 'np.flip', (['U.T', '(1)'], {}), '(U.T, 1)\n', (2791, 2799), True, 'import numpy as np\n'), ((2801, 2816), 'numpy.flip', 'np.flip', (['V.T', '(1)'], {}), '(V.T, 1)\n', (2808, 2816), True, 'import numpy as np\n'), ((2949, 2981), 'skimage.transform.resize', 'resize', (['U', '(1920, 1440)'], {'order': '(0)'}), '(U, (1920, 1440), order=0)\n', (2955, 2981), False, 'from skimage.transform import resize\n'), ((2982, 3014), 'skimage.transform.resize', 'resize', (['V', '(1920, 1440)'], {'order': '(0)'}), '(V, (1920, 1440), order=0)\n', (2988, 3014), False, 'from skimage.transform import resize\n'), ((2003, 2022), 'numpy.flip', 'np.flip', (['depth.T', '(1)'], {}), '(depth.T, 1)\n', (2010, 2022), True, 'import numpy as np\n'), ((2307, 2325), 'numpy.flip', 'np.flip', (['conf.T', '(1)'], {}), '(conf.T, 1)\n', (2314, 2325), True, 'import numpy as np\n'), ((3479, 3496), 'os.path.join', 'join', (['args.d', '"""*"""'], {}), "(args.d, '*')\n", (3483, 3496), False, 'from os.path import join\n'), ((4018, 4055), 'os.makedirs', 'os.makedirs', (['save_path'], {'exist_ok': '(True)'}), '(save_path, exist_ok=True)\n', (4029, 4055), False, 'import os\n'), ((4832, 4860), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(14, 30)'}), '(figsize=(14, 30))\n', (4842, 4860), True, 'import matplotlib.pyplot as plt\n'), ((4879, 4953), 'matplotlib.gridspec.GridSpec', 'gridspec.GridSpec', (['(1)', '(3)'], {'wspace': '(0.0)', 'hspace': '(0.0)', 'width_ratios': '[1, 1, 1.12]'}), '(1, 3, wspace=0.0, hspace=0.0, width_ratios=[1, 1, 1.12])\n', (4896, 4953), False, 'from matplotlib import gridspec\n'), ((4970, 4991), 'matplotlib.pyplot.subplot', 'plt.subplot', (['gs[0, 0]'], {}), '(gs[0, 0])\n', (4981, 4991), True, 'import matplotlib.pyplot as plt\n'), ((5114, 5135), 'matplotlib.pyplot.subplot', 'plt.subplot', (['gs[0, 1]'], {}), '(gs[0, 1])\n', (5125, 5135), True, 'import matplotlib.pyplot as plt\n'), ((5277, 5298), 'matplotlib.pyplot.subplot', 'plt.subplot', (['gs[0, 2]'], {}), '(gs[0, 2])\n', (5288, 5298), True, 'import matplotlib.pyplot as plt\n'), ((5634, 5645), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (5643, 5645), True, 'import matplotlib.pyplot as plt\n'), ((5703, 5731), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(14, 30)'}), '(figsize=(14, 30))\n', (5713, 5731), True, 'import matplotlib.pyplot as plt\n'), ((5750, 5824), 'matplotlib.gridspec.GridSpec', 'gridspec.GridSpec', (['(1)', '(3)'], {'wspace': '(0.0)', 'hspace': '(0.0)', 'width_ratios': '[1, 1, 1.12]'}), '(1, 3, wspace=0.0, hspace=0.0, width_ratios=[1, 1, 1.12])\n', (5767, 5824), False, 'from matplotlib import gridspec\n'), ((5841, 5862), 'matplotlib.pyplot.subplot', 'plt.subplot', (['gs[0, 0]'], {}), '(gs[0, 0])\n', (5852, 5862), True, 'import matplotlib.pyplot as plt\n'), ((6014, 6035), 'matplotlib.pyplot.subplot', 'plt.subplot', (['gs[0, 1]'], {}), '(gs[0, 1])\n', (6025, 6035), True, 'import matplotlib.pyplot as plt\n'), ((6206, 6227), 'matplotlib.pyplot.subplot', 'plt.subplot', (['gs[0, 2]'], {}), '(gs[0, 2])\n', (6217, 6227), True, 'import matplotlib.pyplot as plt\n'), ((6591, 6602), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (6600, 6602), True, 'import matplotlib.pyplot as plt\n'), ((6934, 6971), 'os.makedirs', 'os.makedirs', (['save_path'], {'exist_ok': '(True)'}), '(save_path, exist_ok=True)\n', (6945, 6971), False, 'import os\n'), ((5529, 5563), 'os.path.join', 'join', (['save_path', '"""frame_first.png"""'], {}), "(save_path, 'frame_first.png')\n", (5533, 5563), False, 'from os.path import join\n'), ((6487, 6520), 'os.path.join', 'join', (['save_path', '"""frame_last.png"""'], {}), "(save_path, 'frame_last.png')\n", (6491, 6520), False, 'from os.path import join\n'), ((6651, 6682), 'os.path.join', 'join', (['save_path', '"""frame_bundle"""'], {}), "(save_path, 'frame_bundle')\n", (6655, 6682), False, 'from os.path import join\n'), ((7396, 7426), 'os.path.join', 'join', (['save_path', '"""info_bundle"""'], {}), "(save_path, 'info_bundle')\n", (7400, 7426), False, 'from os.path import join\n'), ((3755, 3786), 'os.path.join', 'join', (['bundle_name', '"""depth*.bin"""'], {}), "(bundle_name, 'depth*.bin')\n", (3759, 3786), False, 'from os.path import join\n'), ((3828, 3859), 'os.path.join', 'join', (['bundle_name', '"""image*.bin"""'], {}), "(bundle_name, 'image*.bin')\n", (3832, 3859), False, 'from os.path import join\n'), ((3902, 3932), 'os.path.join', 'join', (['bundle_name', '"""conf*.bin"""'], {}), "(bundle_name, 'conf*.bin')\n", (3906, 3932), False, 'from os.path import join\n'), ((6819, 6849), 'os.path.join', 'join', (['bundle_name', '"""info*.bin"""'], {}), "(bundle_name, 'info*.bin')\n", (6823, 6849), False, 'from os.path import join\n')]
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2016 <NAME>
# http://www.codeatcpp.com
#
# Licensed under the BSD 3-Clause license.
# See LICENSE file in the project root for full license information.
#
""" Convert Zeus Z80 assembler file to a plain text """
import argparse
import logging
import io
from zxtools import CHUNK_SIZE
from zxtools.common import default_main
CODE_ALIGN_WIDTH = 35
def show_info(*parsed_args):
"""Show some statistic about Zeus file"""
# TODO Implement this function
return parsed_args
def read_file(src_file):
"""Read source file for future processing"""
with src_file:
while True:
chunk = src_file.read(CHUNK_SIZE)
if chunk:
for cur_char in chunk:
yield cur_char
else:
break
ASM_FIRST_TOKEN = 128
ASM_META = [
"A", "ADC ", "ADD ", "AF'", "AF", "AND ", "B", "BC", "BIT ", "C",
"CALL ", "CCF", "CP ", "CPD", "CPDR", "CPI", "CPIR", "CPL", "D", "DAA",
"DE", "DEC ", "DEFB ", "DEFM ", "DEFS ", "DEFW ", "DI", "DISP ", "DJNZ ",
"E", "EI", "ENT", "EQU ", "EX ", "EXX", "H", "HALT", "HL", "I", "IM ",
"IN ", "INC ", "IND", "INDR", "INI", "INIR", "IX", "IY", "JP ", "JR ",
"L", "LD ", "LDD", "LDDR", "LDI", "LDIR", "M", "NC", "NEG", "NOP", "NV",
"NZ", "OR ", "ORG ", "OTDR", "OTIR", "OUT ", "OUTD", "OUTI", "P", "PE",
"PO", "POP ", "PUSH ", "R", "RES ", "RET", "RETI", "RETN", "RL ", "RLA",
"RLC ", "RLCA", "RLD", "RR ", "RRA", "RRC ", "RRCA", "RRD", "RST ",
"SBC ", "SCF", "SET ", "SLA ", "SP", "SRA ", "SRL ", "SUB ", "V", "XOR ",
"Z"]
def convert_file(parsed_args):
""" Convert Zeus Z80 assembler file specified in zeus_file to the plain
text and print it to the output_file """
logger = logging.getLogger('convert_file')
process_string = False
strnum_lo = False, 0
tab = False
output = parsed_args.output_file
strnum = 0
cur_buffer = ""
cur_line = io.StringIO()
for cur_char in read_file(parsed_args.zeus_file):
if process_string:
cur_buffer += "0x%02X " % cur_char
if not cur_char: # End of string
process_string = False
strnum_lo = False, 0
cur_str = cur_line.getvalue()
print(cur_str, end="", file=output)
if parsed_args.include_code:
print(" "*(CODE_ALIGN_WIDTH-len(cur_str))+";",
"0x%04X " % strnum + cur_buffer, file=output)
else:
print(file=output)
continue
if tab:
print(" "*cur_char, end="", file=cur_line)
tab = False
continue
if cur_char == 0x0A:
tab = True
continue
if cur_char < ASM_FIRST_TOKEN: # Printable character
print(chr(cur_char), end="", file=cur_line)
continue
try:
print(ASM_META[cur_char-ASM_FIRST_TOKEN], end="", file=cur_line)
except IndexError:
logger.warning("Token not defined: 0x%02X (%d), at line %05d. "
"Skipped.", cur_char, cur_char, strnum)
else:
if not strnum_lo[0]:
strnum_lo = True, cur_char
else:
strnum = strnum_lo[1] + cur_char*256
if strnum == 0xFFFF: # End of file
print(file=output)
break
cur_line = io.StringIO()
cur_line.truncate(0)
cur_buffer = ""
print("%05d" % strnum, end=" ", file=cur_line)
process_string = True
output.close()
def create_parser():
""" Parse command line arguments """
parser = argparse.ArgumentParser(
description="Zeus Z80 assembler files converter")
parser.add_argument(
'-v', '--verbose', help="Increase output verbosity",
action='store_true')
subparsers = parser.add_subparsers(help="Available commands")
subparsers.required = False
info_parser = subparsers.add_parser(
'info',
help="Show information about the specified Zeus Z80 assembler file")
info_parser.add_argument(
'zeus_file', metavar='zeus-file', type=argparse.FileType('rb', 0),
help="Input file with Zeus Z80 assembler (usually FILENAME.$C)")
info_parser.set_defaults(func=show_info)
convert_parser = subparsers.add_parser(
'convert', help="Convert Zeus Z80 assembler file to a plain text file")
convert_parser.add_argument(
'zeus_file', metavar='zeus-file', type=argparse.FileType('rb', 0),
help="Input file with Zeus Z80 assembler (usually FILENAME.$C)")
convert_parser.add_argument(
'output_file', metavar='output-file',
type=argparse.FileType('w'), help="Path to the output file")
convert_parser.add_argument(
'--include-code', dest='include_code',
action='store_true', help="Include original code in the output file")
convert_parser.set_defaults(func=convert_file)
return parser
def main():
"""Entry point"""
default_main(create_parser())
if __name__ == '__main__':
main()
|
[
"io.StringIO",
"argparse.ArgumentParser",
"logging.getLogger",
"argparse.FileType"
] |
[((1812, 1845), 'logging.getLogger', 'logging.getLogger', (['"""convert_file"""'], {}), "('convert_file')\n", (1829, 1845), False, 'import logging\n'), ((2002, 2015), 'io.StringIO', 'io.StringIO', ([], {}), '()\n', (2013, 2015), False, 'import io\n'), ((3867, 3940), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Zeus Z80 assembler files converter"""'}), "(description='Zeus Z80 assembler files converter')\n", (3890, 3940), False, 'import argparse\n'), ((4376, 4402), 'argparse.FileType', 'argparse.FileType', (['"""rb"""', '(0)'], {}), "('rb', 0)\n", (4393, 4402), False, 'import argparse\n'), ((4727, 4753), 'argparse.FileType', 'argparse.FileType', (['"""rb"""', '(0)'], {}), "('rb', 0)\n", (4744, 4753), False, 'import argparse\n'), ((4920, 4942), 'argparse.FileType', 'argparse.FileType', (['"""w"""'], {}), "('w')\n", (4937, 4942), False, 'import argparse\n'), ((3587, 3600), 'io.StringIO', 'io.StringIO', ([], {}), '()\n', (3598, 3600), False, 'import io\n')]
|
from django import forms
class ContactForm(forms.Form):
nombre = forms.CharField(max_length=100)
email = forms.CharField( max_length=100)
mensaje = forms.CharField(widget=forms.Textarea)
|
[
"django.forms.CharField"
] |
[((70, 101), 'django.forms.CharField', 'forms.CharField', ([], {'max_length': '(100)'}), '(max_length=100)\n', (85, 101), False, 'from django import forms\n'), ((114, 145), 'django.forms.CharField', 'forms.CharField', ([], {'max_length': '(100)'}), '(max_length=100)\n', (129, 145), False, 'from django import forms\n'), ((161, 199), 'django.forms.CharField', 'forms.CharField', ([], {'widget': 'forms.Textarea'}), '(widget=forms.Textarea)\n', (176, 199), False, 'from django import forms\n')]
|
from tinyalign import edit_distance, hamming_distance
import random
import pytest
STRING_PAIRS = [
('', ''),
('', 'A'),
('A', 'A'),
('AB', ''),
('AB', 'ABC'),
('TGAATCCC', 'CCTGAATC'),
('ANANAS', 'BANANA'),
('SISSI', 'MISSISSIPPI'),
('GGAATCCC', 'TGAGGGATAAATATTTAGAATTTAGTAGTAGTGTT'),
('TCTGTTCCCTCCCTGTCTCA', 'TTTTAGGAAATACGCC'),
('TGAGACACGCAACATGGGAAAGGCAAGGCACACAGGGGATAGG', 'AATTTATTTTATTGTGATTTTTTGGAGGTTTGGAAGCCACTAAGCTATACTGAGACACGCAACAGGGGAAAGGCAAGGCACA'),
('TCCATCTCATCCCTGCGTGTCCCATCTGTTCCCTCCCTGTCTCA', 'TTTTAGGAAATACGCCTGGTGGGGTTTGGAGTATAGTGAAAGATAGGTGAGTTGGTCGGGTG'),
('A', 'TCTGCTCCTGGCCCATGATCGTATAACTTTCAAATTT'),
('GCGCGGACT', 'TAAATCCTGG'),
]
def py_edit_distance(s, t):
"""
Pure-Python edit distance
"""
m = len(s)
n = len(t)
costs = list(range(m + 1))
for j in range(1, n + 1):
prev = costs[0]
costs[0] += 1
for i in range(1, m + 1):
c = min(
prev + int(s[i-1] != t[j-1]),
costs[i] + 1,
costs[i-1] + 1,
)
prev = costs[i]
costs[i] = c
return costs[-1]
def random_string():
return ''.join(random.choice('AC') for _ in range(random.randint(0, 20)))
RANDOM_STRING_PAIRS = [(random_string(), random_string()) for _ in range(10000)]
def test_edit_distance():
assert edit_distance('', '') == 0
assert edit_distance('', 'A') == 1
assert edit_distance('A', 'B') == 1
assert edit_distance('A', 'A') == 0
assert edit_distance('A', 'AB') == 1
assert edit_distance('BA', 'AB') == 2
for s, t in STRING_PAIRS + RANDOM_STRING_PAIRS:
assert edit_distance(s, '') == len(s)
assert edit_distance('', s) == len(s)
assert edit_distance(s, t) == edit_distance(t, s)
assert edit_distance(s, t) == py_edit_distance(s, t)
def assert_banded(s, t, maxdiff):
banded_dist = edit_distance(s, t, maxdiff=maxdiff)
true_dist = edit_distance(s, t)
if true_dist > maxdiff:
assert banded_dist > maxdiff
else:
assert banded_dist == true_dist
def test_edit_distance_banded():
for maxdiff in range(5):
assert_banded('ABC', '', maxdiff)
for s, t in STRING_PAIRS:
assert_banded(s, '', maxdiff)
assert_banded('', s, maxdiff)
assert_banded(s, t, maxdiff)
assert_banded(t, s, maxdiff)
def test_hamming_distance():
assert hamming_distance('', '') == 0
assert hamming_distance('A', 'A') == 0
assert hamming_distance('HELLO', 'HELLO') == 0
assert hamming_distance('ABC', 'DEF') == 3
assert hamming_distance('ABCXDEF', 'ABCYDEF') == 1
def test_hamming_distance_incorrect_length():
with pytest.raises(IndexError):
hamming_distance('A', 'BC')
|
[
"random.randint",
"tinyalign.hamming_distance",
"random.choice",
"pytest.raises",
"tinyalign.edit_distance"
] |
[((1960, 1996), 'tinyalign.edit_distance', 'edit_distance', (['s', 't'], {'maxdiff': 'maxdiff'}), '(s, t, maxdiff=maxdiff)\n', (1973, 1996), False, 'from tinyalign import edit_distance, hamming_distance\n'), ((2013, 2032), 'tinyalign.edit_distance', 'edit_distance', (['s', 't'], {}), '(s, t)\n', (2026, 2032), False, 'from tinyalign import edit_distance, hamming_distance\n'), ((1414, 1435), 'tinyalign.edit_distance', 'edit_distance', (['""""""', '""""""'], {}), "('', '')\n", (1427, 1435), False, 'from tinyalign import edit_distance, hamming_distance\n'), ((1452, 1474), 'tinyalign.edit_distance', 'edit_distance', (['""""""', '"""A"""'], {}), "('', 'A')\n", (1465, 1474), False, 'from tinyalign import edit_distance, hamming_distance\n'), ((1491, 1514), 'tinyalign.edit_distance', 'edit_distance', (['"""A"""', '"""B"""'], {}), "('A', 'B')\n", (1504, 1514), False, 'from tinyalign import edit_distance, hamming_distance\n'), ((1531, 1554), 'tinyalign.edit_distance', 'edit_distance', (['"""A"""', '"""A"""'], {}), "('A', 'A')\n", (1544, 1554), False, 'from tinyalign import edit_distance, hamming_distance\n'), ((1571, 1595), 'tinyalign.edit_distance', 'edit_distance', (['"""A"""', '"""AB"""'], {}), "('A', 'AB')\n", (1584, 1595), False, 'from tinyalign import edit_distance, hamming_distance\n'), ((1612, 1637), 'tinyalign.edit_distance', 'edit_distance', (['"""BA"""', '"""AB"""'], {}), "('BA', 'AB')\n", (1625, 1637), False, 'from tinyalign import edit_distance, hamming_distance\n'), ((2496, 2520), 'tinyalign.hamming_distance', 'hamming_distance', (['""""""', '""""""'], {}), "('', '')\n", (2512, 2520), False, 'from tinyalign import edit_distance, hamming_distance\n'), ((2537, 2563), 'tinyalign.hamming_distance', 'hamming_distance', (['"""A"""', '"""A"""'], {}), "('A', 'A')\n", (2553, 2563), False, 'from tinyalign import edit_distance, hamming_distance\n'), ((2580, 2614), 'tinyalign.hamming_distance', 'hamming_distance', (['"""HELLO"""', '"""HELLO"""'], {}), "('HELLO', 'HELLO')\n", (2596, 2614), False, 'from tinyalign import edit_distance, hamming_distance\n'), ((2631, 2661), 'tinyalign.hamming_distance', 'hamming_distance', (['"""ABC"""', '"""DEF"""'], {}), "('ABC', 'DEF')\n", (2647, 2661), False, 'from tinyalign import edit_distance, hamming_distance\n'), ((2678, 2716), 'tinyalign.hamming_distance', 'hamming_distance', (['"""ABCXDEF"""', '"""ABCYDEF"""'], {}), "('ABCXDEF', 'ABCYDEF')\n", (2694, 2716), False, 'from tinyalign import edit_distance, hamming_distance\n'), ((2779, 2804), 'pytest.raises', 'pytest.raises', (['IndexError'], {}), '(IndexError)\n', (2792, 2804), False, 'import pytest\n'), ((2814, 2841), 'tinyalign.hamming_distance', 'hamming_distance', (['"""A"""', '"""BC"""'], {}), "('A', 'BC')\n", (2830, 2841), False, 'from tinyalign import edit_distance, hamming_distance\n'), ((1233, 1252), 'random.choice', 'random.choice', (['"""AC"""'], {}), "('AC')\n", (1246, 1252), False, 'import random\n'), ((1710, 1730), 'tinyalign.edit_distance', 'edit_distance', (['s', '""""""'], {}), "(s, '')\n", (1723, 1730), False, 'from tinyalign import edit_distance, hamming_distance\n'), ((1756, 1776), 'tinyalign.edit_distance', 'edit_distance', (['""""""', 's'], {}), "('', s)\n", (1769, 1776), False, 'from tinyalign import edit_distance, hamming_distance\n'), ((1802, 1821), 'tinyalign.edit_distance', 'edit_distance', (['s', 't'], {}), '(s, t)\n', (1815, 1821), False, 'from tinyalign import edit_distance, hamming_distance\n'), ((1825, 1844), 'tinyalign.edit_distance', 'edit_distance', (['t', 's'], {}), '(t, s)\n', (1838, 1844), False, 'from tinyalign import edit_distance, hamming_distance\n'), ((1860, 1879), 'tinyalign.edit_distance', 'edit_distance', (['s', 't'], {}), '(s, t)\n', (1873, 1879), False, 'from tinyalign import edit_distance, hamming_distance\n'), ((1268, 1289), 'random.randint', 'random.randint', (['(0)', '(20)'], {}), '(0, 20)\n', (1282, 1289), False, 'import random\n')]
|
import time
from datetime import datetime, timedelta
from ledger.util import F
from plenum.common.txn import TXN_TIME
from sovrin.persistence.identity_graph import IdentityGraph
def testMakeResultTxnTimeString():
oRecordData = {
F.seqNo.name: 1,
TXN_TIME: 'some-datetime'
}
assert TXN_TIME not in IdentityGraph.makeResult(0, oRecordData)
def testMakeResultTxnTimeDatetime():
dt = datetime.now()
oRecordData = {
F.seqNo.name: 1,
TXN_TIME: dt
}
assert IdentityGraph.makeResult(0, oRecordData)[TXN_TIME] == int(time.mktime(dt.timetuple()))
def testMakeResultTxnTimeDatetimeInvalidPast():
dt = datetime(1999, 1, 1)
oRecordData = {
F.seqNo.name: 1,
TXN_TIME: dt
}
assert TXN_TIME not in IdentityGraph.makeResult(0, oRecordData)
def testMakeResultTxnTimeDatetimeInvalidFuture():
dt = datetime.now() + timedelta(1)
oRecordData = {
F.seqNo.name: 1,
TXN_TIME: dt
}
assert TXN_TIME not in IdentityGraph.makeResult(0, oRecordData)
def testMakeResultTxnTimeNone():
from datetime import datetime
dt = datetime.now()
oRecordData = {
F.seqNo.name: 1,
}
assert TXN_TIME not in IdentityGraph.makeResult(0, oRecordData)
|
[
"sovrin.persistence.identity_graph.IdentityGraph.makeResult",
"datetime.datetime.now",
"datetime.timedelta",
"datetime.datetime"
] |
[((418, 432), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (430, 432), False, 'from datetime import datetime\n'), ((662, 682), 'datetime.datetime', 'datetime', (['(1999)', '(1)', '(1)'], {}), '(1999, 1, 1)\n', (670, 682), False, 'from datetime import datetime\n'), ((1132, 1146), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (1144, 1146), False, 'from datetime import datetime\n'), ((329, 369), 'sovrin.persistence.identity_graph.IdentityGraph.makeResult', 'IdentityGraph.makeResult', (['(0)', 'oRecordData'], {}), '(0, oRecordData)\n', (353, 369), False, 'from sovrin.persistence.identity_graph import IdentityGraph\n'), ((782, 822), 'sovrin.persistence.identity_graph.IdentityGraph.makeResult', 'IdentityGraph.makeResult', (['(0)', 'oRecordData'], {}), '(0, oRecordData)\n', (806, 822), False, 'from sovrin.persistence.identity_graph import IdentityGraph\n'), ((884, 898), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (896, 898), False, 'from datetime import datetime\n'), ((901, 913), 'datetime.timedelta', 'timedelta', (['(1)'], {}), '(1)\n', (910, 913), False, 'from datetime import datetime, timedelta\n'), ((1013, 1053), 'sovrin.persistence.identity_graph.IdentityGraph.makeResult', 'IdentityGraph.makeResult', (['(0)', 'oRecordData'], {}), '(0, oRecordData)\n', (1037, 1053), False, 'from sovrin.persistence.identity_graph import IdentityGraph\n'), ((1225, 1265), 'sovrin.persistence.identity_graph.IdentityGraph.makeResult', 'IdentityGraph.makeResult', (['(0)', 'oRecordData'], {}), '(0, oRecordData)\n', (1249, 1265), False, 'from sovrin.persistence.identity_graph import IdentityGraph\n'), ((516, 556), 'sovrin.persistence.identity_graph.IdentityGraph.makeResult', 'IdentityGraph.makeResult', (['(0)', 'oRecordData'], {}), '(0, oRecordData)\n', (540, 556), False, 'from sovrin.persistence.identity_graph import IdentityGraph\n')]
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nisqai.layer._product_ansatz import ProductAnsatz
def test_basic():
"""Tests that a ProductAnsatz can be instantiated."""
# create an product ansatz on four qubits
ansatz = ProductAnsatz(4)
# check that the number of qubits is correct
assert ansatz.num_qubits == 4
def test_params():
"""Tests the params attribute has the correct shape."""
# create a product ansatz
ansatz = ProductAnsatz(5, gate_depth=4)
# test if the params attribute has the correct shape
assert ansatz.params.shape() == (5, 4)
def test_correct_small():
"""Creates a small ProductAnsatz and tests if the circuit is correct."""
# create a small product ansatz
ansatz = ProductAnsatz(1)
# correct string representation of program
correct = "DECLARE q_000_g_000 REAL[1]\nDECLARE q_000_g_001 REAL[1]\n" + \
"DECLARE q_000_g_002 REAL[1]\nRX(pi/2) 0\nRZ(q_000_g_000) 0\n" + \
"RX(pi/2) 0\nRZ(q_000_g_001) 0\nRX(pi/2) 0\nRZ(q_000_g_002) 0\n"
# make sure the program is correct
assert ansatz.circuit.__str__() == correct
if __name__ == "__main__":
test_basic()
test_params()
test_correct_small()
print("All tests for ProductAnsatz passed.")
|
[
"nisqai.layer._product_ansatz.ProductAnsatz"
] |
[((755, 771), 'nisqai.layer._product_ansatz.ProductAnsatz', 'ProductAnsatz', (['(4)'], {}), '(4)\n', (768, 771), False, 'from nisqai.layer._product_ansatz import ProductAnsatz\n'), ((980, 1010), 'nisqai.layer._product_ansatz.ProductAnsatz', 'ProductAnsatz', (['(5)'], {'gate_depth': '(4)'}), '(5, gate_depth=4)\n', (993, 1010), False, 'from nisqai.layer._product_ansatz import ProductAnsatz\n'), ((1266, 1282), 'nisqai.layer._product_ansatz.ProductAnsatz', 'ProductAnsatz', (['(1)'], {}), '(1)\n', (1279, 1282), False, 'from nisqai.layer._product_ansatz import ProductAnsatz\n')]
|
from this import d
from django.core.exceptions import ObjectDoesNotExist
from rest_framework import status
from rest_framework.decorators import action
from rest_framework.viewsets import ModelViewSet
from rest_framework.authtoken.models import Token
from rest_framework.response import Response
from rest_framework.permissions import IsAuthenticated
from django_filters.rest_framework import DjangoFilterBackend
from royal_cloud_33498.settings import SECRET_KEY
from users.models import User
from users.authentication import ExpiringTokenAuthentication
from home.permissions import IsPostOrIsAuthenticated
from home.utility import auth_token, generateOTP, send_otp_sms
from users.serializers import ChangePasswordSerializer, CustomAuthTokenSerializer, OTPSerializer, UserProfileSerializer
class UserViewSet(ModelViewSet):
serializer_class = UserProfileSerializer
permission_classes = (IsPostOrIsAuthenticated,)
authentication_classes = [ExpiringTokenAuthentication]
queryset = User.objects.all()
filter_backends = [DjangoFilterBackend]
filterset_fields = ['name', 'last_name', 'phone', "email", "flagged"]
def get_queryset(self):
return super().get_queryset().filter(is_superuser=False)
# Create User and return Token + Profile
def create(self, request, *args, **kwargs):
serializer = self.get_serializer(data=request.data)
serializer.is_valid(raise_exception=True)
self.perform_create(serializer)
headers = self.get_success_headers(serializer.data)
token, created = Token.objects.get_or_create(user=serializer.instance)
return Response({'token': token.key, 'user': serializer.data}, status=status.HTTP_201_CREATED, headers=headers)
# Update Profile
def partial_update(self, request, *args, **kwargs):
partial = True
instance = self.get_object()
serializer = self.get_serializer(instance, data=request.data, partial=partial)
serializer.is_valid(raise_exception=True)
self.perform_update(serializer)
return Response(serializer.data, status=status.HTTP_200_OK)
# Send a OTP
@action(detail=False, methods=['post'])
def otp(self, request):
try:
phone = request.data.get('phone')
user = User.objects.get(phone=phone)
except ObjectDoesNotExist:
return Response({"detail": "Invalid Phone Number - Does not exist"}, status=status.HTTP_400_BAD_REQUEST)
generateOTP(phone=phone, user=user)
return Response(status=status.HTTP_200_OK)
# Verify OTP
@action(detail=False, methods=['post'])
def verify(self, request):
serializer = OTPSerializer(data=request.data)
if serializer.is_valid():
user = serializer.validated_data['user']
token = auth_token(user)
serializer = UserProfileSerializer(user)
return Response({'token': token.key, 'user': serializer.data}, status=status.HTTP_200_OK)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
# Set password
@action(detail=False, methods=['post'], permission_classes=[IsAuthenticated])
def password(self, request):
serializer = ChangePasswordSerializer(data=request.data)
if serializer.is_valid():
user = request.user
user.set_password(serializer.validated_data['<PASSWORD>'])
user.save()
return Response({'detail': "Password Updated Successfully"}, status=status.HTTP_200_OK)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
# Login a User
@action(detail=False, methods=['post'])
def login(self, request, **kwargs):
serializer = CustomAuthTokenSerializer(data=request.data, context = {'request':request})
if serializer.is_valid():
user = serializer.validated_data['user']
token = auth_token(user)
serializer = UserProfileSerializer(user, context = {'request':request})
return Response({'token': token.key, 'user': serializer.data}, status=status.HTTP_200_OK)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
# Logout a User
@action(detail=False, methods=['post'])
def logout(self, request):
try:
request.user.auth_token.delete()
except (AttributeError, ObjectDoesNotExist):
return Response({'detail': 'Authentication Token Missing or Invalid'}, status=status.HTTP_401_UNAUTHORIZED)
return Response(status=status.HTTP_200_OK)
# Admin a User
@action(detail=False, methods=['post'])
def admin(self, request):
username = request.data.get('username')
email = request.data.get('email')
password = request.data.get('password')
key = request.data.get('key')
if key != SECRET_KEY:
return Response(status=status.HTTP_400_BAD_REQUEST)
User.objects.create_superuser(username, email, password)
return Response(status=status.HTTP_200_OK)
|
[
"users.models.User.objects.get",
"users.serializers.OTPSerializer",
"home.utility.auth_token",
"rest_framework.authtoken.models.Token.objects.get_or_create",
"users.models.User.objects.create_superuser",
"users.serializers.ChangePasswordSerializer",
"home.utility.generateOTP",
"rest_framework.response.Response",
"rest_framework.decorators.action",
"users.serializers.UserProfileSerializer",
"users.serializers.CustomAuthTokenSerializer",
"users.models.User.objects.all"
] |
[((998, 1016), 'users.models.User.objects.all', 'User.objects.all', ([], {}), '()\n', (1014, 1016), False, 'from users.models import User\n'), ((2139, 2177), 'rest_framework.decorators.action', 'action', ([], {'detail': '(False)', 'methods': "['post']"}), "(detail=False, methods=['post'])\n", (2145, 2177), False, 'from rest_framework.decorators import action\n'), ((2584, 2622), 'rest_framework.decorators.action', 'action', ([], {'detail': '(False)', 'methods': "['post']"}), "(detail=False, methods=['post'])\n", (2590, 2622), False, 'from rest_framework.decorators import action\n'), ((3091, 3167), 'rest_framework.decorators.action', 'action', ([], {'detail': '(False)', 'methods': "['post']", 'permission_classes': '[IsAuthenticated]'}), "(detail=False, methods=['post'], permission_classes=[IsAuthenticated])\n", (3097, 3167), False, 'from rest_framework.decorators import action\n'), ((3631, 3669), 'rest_framework.decorators.action', 'action', ([], {'detail': '(False)', 'methods': "['post']"}), "(detail=False, methods=['post'])\n", (3637, 3669), False, 'from rest_framework.decorators import action\n'), ((4222, 4260), 'rest_framework.decorators.action', 'action', ([], {'detail': '(False)', 'methods': "['post']"}), "(detail=False, methods=['post'])\n", (4228, 4260), False, 'from rest_framework.decorators import action\n'), ((4599, 4637), 'rest_framework.decorators.action', 'action', ([], {'detail': '(False)', 'methods': "['post']"}), "(detail=False, methods=['post'])\n", (4605, 4637), False, 'from rest_framework.decorators import action\n'), ((1558, 1611), 'rest_framework.authtoken.models.Token.objects.get_or_create', 'Token.objects.get_or_create', ([], {'user': 'serializer.instance'}), '(user=serializer.instance)\n', (1585, 1611), False, 'from rest_framework.authtoken.models import Token\n'), ((1627, 1736), 'rest_framework.response.Response', 'Response', (["{'token': token.key, 'user': serializer.data}"], {'status': 'status.HTTP_201_CREATED', 'headers': 'headers'}), "({'token': token.key, 'user': serializer.data}, status=status.\n HTTP_201_CREATED, headers=headers)\n", (1635, 1736), False, 'from rest_framework.response import Response\n'), ((2063, 2115), 'rest_framework.response.Response', 'Response', (['serializer.data'], {'status': 'status.HTTP_200_OK'}), '(serializer.data, status=status.HTTP_200_OK)\n', (2071, 2115), False, 'from rest_framework.response import Response\n'), ((2474, 2509), 'home.utility.generateOTP', 'generateOTP', ([], {'phone': 'phone', 'user': 'user'}), '(phone=phone, user=user)\n', (2485, 2509), False, 'from home.utility import auth_token, generateOTP, send_otp_sms\n'), ((2525, 2560), 'rest_framework.response.Response', 'Response', ([], {'status': 'status.HTTP_200_OK'}), '(status=status.HTTP_200_OK)\n', (2533, 2560), False, 'from rest_framework.response import Response\n'), ((2675, 2707), 'users.serializers.OTPSerializer', 'OTPSerializer', ([], {'data': 'request.data'}), '(data=request.data)\n', (2688, 2707), False, 'from users.serializers import ChangePasswordSerializer, CustomAuthTokenSerializer, OTPSerializer, UserProfileSerializer\n'), ((3002, 3065), 'rest_framework.response.Response', 'Response', (['serializer.errors'], {'status': 'status.HTTP_400_BAD_REQUEST'}), '(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n', (3010, 3065), False, 'from rest_framework.response import Response\n'), ((3222, 3265), 'users.serializers.ChangePasswordSerializer', 'ChangePasswordSerializer', ([], {'data': 'request.data'}), '(data=request.data)\n', (3246, 3265), False, 'from users.serializers import ChangePasswordSerializer, CustomAuthTokenSerializer, OTPSerializer, UserProfileSerializer\n'), ((3542, 3605), 'rest_framework.response.Response', 'Response', (['serializer.errors'], {'status': 'status.HTTP_400_BAD_REQUEST'}), '(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n', (3550, 3605), False, 'from rest_framework.response import Response\n'), ((3731, 3805), 'users.serializers.CustomAuthTokenSerializer', 'CustomAuthTokenSerializer', ([], {'data': 'request.data', 'context': "{'request': request}"}), "(data=request.data, context={'request': request})\n", (3756, 3805), False, 'from users.serializers import ChangePasswordSerializer, CustomAuthTokenSerializer, OTPSerializer, UserProfileSerializer\n'), ((4132, 4195), 'rest_framework.response.Response', 'Response', (['serializer.errors'], {'status': 'status.HTTP_400_BAD_REQUEST'}), '(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n', (4140, 4195), False, 'from rest_framework.response import Response\n'), ((4538, 4573), 'rest_framework.response.Response', 'Response', ([], {'status': 'status.HTTP_200_OK'}), '(status=status.HTTP_200_OK)\n', (4546, 4573), False, 'from rest_framework.response import Response\n'), ((4946, 5002), 'users.models.User.objects.create_superuser', 'User.objects.create_superuser', (['username', 'email', 'password'], {}), '(username, email, password)\n', (4975, 5002), False, 'from users.models import User\n'), ((5018, 5053), 'rest_framework.response.Response', 'Response', ([], {'status': 'status.HTTP_200_OK'}), '(status=status.HTTP_200_OK)\n', (5026, 5053), False, 'from rest_framework.response import Response\n'), ((2284, 2313), 'users.models.User.objects.get', 'User.objects.get', ([], {'phone': 'phone'}), '(phone=phone)\n', (2300, 2313), False, 'from users.models import User\n'), ((2815, 2831), 'home.utility.auth_token', 'auth_token', (['user'], {}), '(user)\n', (2825, 2831), False, 'from home.utility import auth_token, generateOTP, send_otp_sms\n'), ((2857, 2884), 'users.serializers.UserProfileSerializer', 'UserProfileSerializer', (['user'], {}), '(user)\n', (2878, 2884), False, 'from users.serializers import ChangePasswordSerializer, CustomAuthTokenSerializer, OTPSerializer, UserProfileSerializer\n'), ((2904, 2991), 'rest_framework.response.Response', 'Response', (["{'token': token.key, 'user': serializer.data}"], {'status': 'status.HTTP_200_OK'}), "({'token': token.key, 'user': serializer.data}, status=status.\n HTTP_200_OK)\n", (2912, 2991), False, 'from rest_framework.response import Response\n'), ((3446, 3531), 'rest_framework.response.Response', 'Response', (["{'detail': 'Password Updated Successfully'}"], {'status': 'status.HTTP_200_OK'}), "({'detail': 'Password Updated Successfully'}, status=status.HTTP_200_OK\n )\n", (3454, 3531), False, 'from rest_framework.response import Response\n'), ((3914, 3930), 'home.utility.auth_token', 'auth_token', (['user'], {}), '(user)\n', (3924, 3930), False, 'from home.utility import auth_token, generateOTP, send_otp_sms\n'), ((3956, 4013), 'users.serializers.UserProfileSerializer', 'UserProfileSerializer', (['user'], {'context': "{'request': request}"}), "(user, context={'request': request})\n", (3977, 4013), False, 'from users.serializers import ChangePasswordSerializer, CustomAuthTokenSerializer, OTPSerializer, UserProfileSerializer\n'), ((4034, 4121), 'rest_framework.response.Response', 'Response', (["{'token': token.key, 'user': serializer.data}"], {'status': 'status.HTTP_200_OK'}), "({'token': token.key, 'user': serializer.data}, status=status.\n HTTP_200_OK)\n", (4042, 4121), False, 'from rest_framework.response import Response\n'), ((4893, 4937), 'rest_framework.response.Response', 'Response', ([], {'status': 'status.HTTP_400_BAD_REQUEST'}), '(status=status.HTTP_400_BAD_REQUEST)\n', (4901, 4937), False, 'from rest_framework.response import Response\n'), ((2368, 2470), 'rest_framework.response.Response', 'Response', (["{'detail': 'Invalid Phone Number - Does not exist'}"], {'status': 'status.HTTP_400_BAD_REQUEST'}), "({'detail': 'Invalid Phone Number - Does not exist'}, status=status\n .HTTP_400_BAD_REQUEST)\n", (2376, 2470), False, 'from rest_framework.response import Response\n'), ((4422, 4527), 'rest_framework.response.Response', 'Response', (["{'detail': 'Authentication Token Missing or Invalid'}"], {'status': 'status.HTTP_401_UNAUTHORIZED'}), "({'detail': 'Authentication Token Missing or Invalid'}, status=\n status.HTTP_401_UNAUTHORIZED)\n", (4430, 4527), False, 'from rest_framework.response import Response\n')]
|
from root.config.main import rAnk, mAster_rank, cOmm
from screws.freeze.main import FrozenOnly
import matplotlib.pyplot as plt
from matplotlib import cm
import numpy as np
class _3dCSCG_1Trace_Visualize(FrozenOnly):
"""The visualization property/component of standard forms."""
def __init__(self, tf):
self._tf_ = tf
self._freeze_self_()
def __call__(self, **kwargs):
"""When this object is called, we call the default visualizing method: ``tecplot``."""
return self.matplot(**kwargs)
def matplot(self, density=None, i=None,
plot_type='contourf',
colormap='RdBu',
num_color_bar_ticks=5):
"""
:param density:
:param i: Plot which trace elements?
:param plot_type: Plot type?
:param colormap:
:param num_color_bar_ticks:
:return:
"""
if density is None:
if plot_type == 'quiver':
density = 500
elif plot_type == 'contourf':
density = 10000
else:
raise NotImplementedError(f'3dCSCG 1Trace plot type={plot_type} is not implemented.')
else:
pass
mesh = self._tf_.mesh
density = int(np.sqrt(density/mesh.trace.elements.GLOBAL_num)) + 1
xi = eta = sigma = np.linspace(-1, 1, density)
xyz, v = self._tf_.reconstruct(xi, eta, sigma, i=i)
xyz = cOmm.gather(xyz, root=mAster_rank)
v = cOmm.gather(v, root=mAster_rank)
if rAnk != mAster_rank: return
XYZ = list()
Vx = list()
Vy = list()
Vz = list()
for _xyz_, _v_ in zip(xyz, v):
for i in _xyz_:
xyz_i = _xyz_[i]
vx_i, vy_i, vz_i = _v_[i]
XYZ.append(xyz_i)
Vx.append(vx_i)
Vy.append(vy_i)
Vz.append(vz_i)
Vx = np.array(Vx)
Vy = np.array(Vy)
Vz = np.array(Vz)
del xyz, v
if plot_type == 'quiver': # ================= quiver plot =====================================
fig = plt.figure(figsize=(8, 7))
ax = fig.add_subplot(111, projection='3d')
for i, xyz in enumerate(XYZ):
x, y, z = xyz
ax.plot_surface(x, y, z, color=(0.5,0.5,0.5,0.5))
vx = Vx[i]
vy = Vy[i]
vz = Vz[i]
ax.quiver(x, y, z, vx, vy, vz, color='r', linewidth=0.5)
ax.set_xlabel(r'$x$')
ax.set_ylabel(r'$y$')
ax.set_zlabel(r'$z$')
plt.show()
elif plot_type == 'contourf': # ================= contourf plot =====================================
cmap = getattr(cm, colormap)
fig = plt.figure(figsize=(15,6))
# x-component ----------------------------------------------------------
ax = fig.add_subplot(131, projection='3d')
ax.view_init(45, 60)
MAX = np.max(Vx)
MIN = np.min(Vx)
if MAX == MIN:
MAX += 0.0001
bounds = MAX - MIN
Vx = Vx - MIN
Vx = Vx / bounds
ticks = np.linspace(MAX, MIN, num_color_bar_ticks)
for i, xyz in enumerate(XYZ):
x, y, z = xyz
v = Vx[i]
ax.plot_surface(x, y, z, facecolors=cmap(v))
mappable = cm.ScalarMappable(cmap=cmap)
mappable.set_array(np.array(ticks))
cb = plt.colorbar(mappable, ax=ax, # ticks=np.linspace(0,1,num_ticks),
shrink=1, aspect=20,# extend='min',
orientation='vertical', )
cb.ax.tick_params() # labelsize=13.5)
ax.set_xlabel(r'$x$')
ax.set_ylabel(r'$y$')
ax.set_zlabel(r'$z$')
plt.title('x-component')
# y-component -------------------------------------------------------------
ax = fig.add_subplot(132, projection='3d')
ax.view_init(45, 60)
MAX = np.max(Vy)
MIN = np.min(Vy)
if MAX == MIN:
MAX += 0.0001
bounds = MAX - MIN
Vy = Vy - MIN
Vy = Vy / bounds
ticks = np.linspace(MAX, MIN, num_color_bar_ticks)
for i, xyz in enumerate(XYZ):
x, y, z = xyz
v = Vy[i]
ax.plot_surface(x, y, z, facecolors=cmap(v))
mappable = cm.ScalarMappable(cmap=cmap)
mappable.set_array(np.array(ticks))
cb = plt.colorbar(mappable, ax=ax, # ticks=np.linspace(0,1,num_ticks),
shrink=1, aspect=20, # extend='min',
orientation='vertical', )
cb.ax.tick_params() # labelsize=13.5)
ax.set_xlabel(r'$x$')
ax.set_ylabel(r'$y$')
ax.set_zlabel(r'$z$')
plt.title('y-component')
# z-component -------------------------------------------------------
ax = fig.add_subplot(133, projection='3d')
ax.view_init(45, 60)
MAX = np.max(Vz)
MIN = np.min(Vz)
if MAX == MIN:
MAX += 0.0001
bounds = MAX - MIN
Vz = Vz - MIN
Vz = Vz / bounds
ticks = np.linspace(MAX, MIN, num_color_bar_ticks)
for i, xyz in enumerate(XYZ):
x, y, z = xyz
v = Vz[i]
ax.plot_surface(x, y, z, facecolors=cmap(v))
mappable = cm.ScalarMappable(cmap=cmap)
mappable.set_array(np.array(ticks))
cb = plt.colorbar(mappable, ax=ax, # ticks=np.linspace(0,1,num_ticks),
shrink=1, aspect=20, # extend='min',
orientation='vertical', )
cb.ax.tick_params() # labelsize=13.5)
ax.set_xlabel(r'$x$')
ax.set_ylabel(r'$y$')
ax.set_zlabel(r'$z$')
plt.title('z-component')
plt.show()
else:
raise NotImplementedError(f'3dCSCG 1Trace plot type={plot_type} is not implemented.')
|
[
"matplotlib.pyplot.title",
"matplotlib.pyplot.show",
"matplotlib.cm.ScalarMappable",
"matplotlib.pyplot.colorbar",
"matplotlib.pyplot.figure",
"numpy.max",
"numpy.array",
"numpy.min",
"numpy.linspace",
"root.config.main.cOmm.gather",
"numpy.sqrt"
] |
[((1384, 1411), 'numpy.linspace', 'np.linspace', (['(-1)', '(1)', 'density'], {}), '(-1, 1, density)\n', (1395, 1411), True, 'import numpy as np\n'), ((1486, 1520), 'root.config.main.cOmm.gather', 'cOmm.gather', (['xyz'], {'root': 'mAster_rank'}), '(xyz, root=mAster_rank)\n', (1497, 1520), False, 'from root.config.main import rAnk, mAster_rank, cOmm\n'), ((1533, 1565), 'root.config.main.cOmm.gather', 'cOmm.gather', (['v'], {'root': 'mAster_rank'}), '(v, root=mAster_rank)\n', (1544, 1565), False, 'from root.config.main import rAnk, mAster_rank, cOmm\n'), ((1974, 1986), 'numpy.array', 'np.array', (['Vx'], {}), '(Vx)\n', (1982, 1986), True, 'import numpy as np\n'), ((2000, 2012), 'numpy.array', 'np.array', (['Vy'], {}), '(Vy)\n', (2008, 2012), True, 'import numpy as np\n'), ((2026, 2038), 'numpy.array', 'np.array', (['Vz'], {}), '(Vz)\n', (2034, 2038), True, 'import numpy as np\n'), ((2181, 2207), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(8, 7)'}), '(figsize=(8, 7))\n', (2191, 2207), True, 'import matplotlib.pyplot as plt\n'), ((2672, 2682), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2680, 2682), True, 'import matplotlib.pyplot as plt\n'), ((1304, 1353), 'numpy.sqrt', 'np.sqrt', (['(density / mesh.trace.elements.GLOBAL_num)'], {}), '(density / mesh.trace.elements.GLOBAL_num)\n', (1311, 1353), True, 'import numpy as np\n'), ((2855, 2882), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(15, 6)'}), '(figsize=(15, 6))\n', (2865, 2882), True, 'import matplotlib.pyplot as plt\n'), ((3074, 3084), 'numpy.max', 'np.max', (['Vx'], {}), '(Vx)\n', (3080, 3084), True, 'import numpy as np\n'), ((3103, 3113), 'numpy.min', 'np.min', (['Vx'], {}), '(Vx)\n', (3109, 3113), True, 'import numpy as np\n'), ((3277, 3319), 'numpy.linspace', 'np.linspace', (['MAX', 'MIN', 'num_color_bar_ticks'], {}), '(MAX, MIN, num_color_bar_ticks)\n', (3288, 3319), True, 'import numpy as np\n'), ((3504, 3532), 'matplotlib.cm.ScalarMappable', 'cm.ScalarMappable', ([], {'cmap': 'cmap'}), '(cmap=cmap)\n', (3521, 3532), False, 'from matplotlib import cm\n'), ((3598, 3672), 'matplotlib.pyplot.colorbar', 'plt.colorbar', (['mappable'], {'ax': 'ax', 'shrink': '(1)', 'aspect': '(20)', 'orientation': '"""vertical"""'}), "(mappable, ax=ax, shrink=1, aspect=20, orientation='vertical')\n", (3610, 3672), True, 'import matplotlib.pyplot as plt\n'), ((3951, 3975), 'matplotlib.pyplot.title', 'plt.title', (['"""x-component"""'], {}), "('x-component')\n", (3960, 3975), True, 'import matplotlib.pyplot as plt\n'), ((4171, 4181), 'numpy.max', 'np.max', (['Vy'], {}), '(Vy)\n', (4177, 4181), True, 'import numpy as np\n'), ((4200, 4210), 'numpy.min', 'np.min', (['Vy'], {}), '(Vy)\n', (4206, 4210), True, 'import numpy as np\n'), ((4374, 4416), 'numpy.linspace', 'np.linspace', (['MAX', 'MIN', 'num_color_bar_ticks'], {}), '(MAX, MIN, num_color_bar_ticks)\n', (4385, 4416), True, 'import numpy as np\n'), ((4601, 4629), 'matplotlib.cm.ScalarMappable', 'cm.ScalarMappable', ([], {'cmap': 'cmap'}), '(cmap=cmap)\n', (4618, 4629), False, 'from matplotlib import cm\n'), ((4695, 4769), 'matplotlib.pyplot.colorbar', 'plt.colorbar', (['mappable'], {'ax': 'ax', 'shrink': '(1)', 'aspect': '(20)', 'orientation': '"""vertical"""'}), "(mappable, ax=ax, shrink=1, aspect=20, orientation='vertical')\n", (4707, 4769), True, 'import matplotlib.pyplot as plt\n'), ((5051, 5075), 'matplotlib.pyplot.title', 'plt.title', (['"""y-component"""'], {}), "('y-component')\n", (5060, 5075), True, 'import matplotlib.pyplot as plt\n'), ((5265, 5275), 'numpy.max', 'np.max', (['Vz'], {}), '(Vz)\n', (5271, 5275), True, 'import numpy as np\n'), ((5294, 5304), 'numpy.min', 'np.min', (['Vz'], {}), '(Vz)\n', (5300, 5304), True, 'import numpy as np\n'), ((5468, 5510), 'numpy.linspace', 'np.linspace', (['MAX', 'MIN', 'num_color_bar_ticks'], {}), '(MAX, MIN, num_color_bar_ticks)\n', (5479, 5510), True, 'import numpy as np\n'), ((5695, 5723), 'matplotlib.cm.ScalarMappable', 'cm.ScalarMappable', ([], {'cmap': 'cmap'}), '(cmap=cmap)\n', (5712, 5723), False, 'from matplotlib import cm\n'), ((5789, 5863), 'matplotlib.pyplot.colorbar', 'plt.colorbar', (['mappable'], {'ax': 'ax', 'shrink': '(1)', 'aspect': '(20)', 'orientation': '"""vertical"""'}), "(mappable, ax=ax, shrink=1, aspect=20, orientation='vertical')\n", (5801, 5863), True, 'import matplotlib.pyplot as plt\n'), ((6145, 6169), 'matplotlib.pyplot.title', 'plt.title', (['"""z-component"""'], {}), "('z-component')\n", (6154, 6169), True, 'import matplotlib.pyplot as plt\n'), ((6183, 6193), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6191, 6193), True, 'import matplotlib.pyplot as plt\n'), ((3564, 3579), 'numpy.array', 'np.array', (['ticks'], {}), '(ticks)\n', (3572, 3579), True, 'import numpy as np\n'), ((4661, 4676), 'numpy.array', 'np.array', (['ticks'], {}), '(ticks)\n', (4669, 4676), True, 'import numpy as np\n'), ((5755, 5770), 'numpy.array', 'np.array', (['ticks'], {}), '(ticks)\n', (5763, 5770), True, 'import numpy as np\n')]
|
"""
Topic handler definition
"""
import os
from distutils.util import strtobool
from topics.utils import TopicHandler
from .handler import handler
EXAMPLE_HANDLER = TopicHandler(
handle=handler,
topic="/example",
enabled=strtobool(os.environ.get("EXAMPLE_STREAMING", "false")),
)
|
[
"os.environ.get"
] |
[((250, 294), 'os.environ.get', 'os.environ.get', (['"""EXAMPLE_STREAMING"""', '"""false"""'], {}), "('EXAMPLE_STREAMING', 'false')\n", (264, 294), False, 'import os\n')]
|
import itertools
class Solution:
def permute(self, nums: [int]) -> [[int]]:
return list(itertools.permutations(nums))
|
[
"itertools.permutations"
] |
[((100, 128), 'itertools.permutations', 'itertools.permutations', (['nums'], {}), '(nums)\n', (122, 128), False, 'import itertools\n')]
|
# package org.apache.helix.util
#from org.apache.helix.util import *
#from java.util import Arrays
#from java.util import HashMap
#from java.util import Map
#from java.util.regex import Matcher
#from java.util.regex import Pattern
#from org.apache.log4j import Logger
from org.apache.helix.util.logger import get_logger
from org.apache.helix.util.UserExceptions import IllegalArgumentException
import re
class StringTemplate:
"""
Java modifiers:
private static
Type:
Logger
"""
# LOG = Logger.getLogger(StringTemplate.class)
LOG = get_logger(__name__)
"""
Java modifiers:
static
Type:
Pattern
"""
def __init__(self):
self.templateMap = {}
self.pattern = re.compile("({.+?})")
def addEntry(self, type, numKeys, template):
"""
Returns void
Parameters:
type: EnumnumKeys: inttemplate: String
"""
if not self.templateMap.__contains__(type):
self.templateMap.__setitem__(type, {})
# self.templateMap.__setitem__(type, HashMap<Integer, String>())
# import pdb; pdb.set_trace()
# dzhang:may need to pass in name from the caller
self.LOG.trace("Add template for type: " + str(type) + ", arguments: " + str(numKeys) + ", template: " + template)
self.templateMap.get(type).__setitem__(numKeys, template)
def instantiate(self, type, keys):
"""
Returns String
Parameters:
type: Enumkeys: String
"""
if keys == None:
keys = []
# keys = new String[] {}
# String
template = None
if self.templateMap.__contains__(type):
template = self.templateMap.get(type).get(len(keys))
# String
result = None
if template != None:
result = template
# Matcher
matches = self.pattern.findall(template)
# int
count = 0
# while (matcher.find():
for var in matches:
# String
# var = matcher.group()
result = result.replace(var, keys[count])
count +=1
if result == None or result.find('{') > -1 or result.find('}') > -1:
# String
errMsg = "Unable to instantiate template: " + template + " using keys: " + keys
# errMsg = "Unable to instantiate template: " + template + " using keys: " + Arrays.toString(keys)
self.LOG.error(errMsg)
raise IllegalArgumentException(errMsg)
return result
|
[
"org.apache.helix.util.UserExceptions.IllegalArgumentException",
"re.compile",
"org.apache.helix.util.logger.get_logger"
] |
[((574, 594), 'org.apache.helix.util.logger.get_logger', 'get_logger', (['__name__'], {}), '(__name__)\n', (584, 594), False, 'from org.apache.helix.util.logger import get_logger\n'), ((753, 774), 're.compile', 're.compile', (['"""({.+?})"""'], {}), "('({.+?})')\n", (763, 774), False, 'import re\n'), ((2577, 2609), 'org.apache.helix.util.UserExceptions.IllegalArgumentException', 'IllegalArgumentException', (['errMsg'], {}), '(errMsg)\n', (2601, 2609), False, 'from org.apache.helix.util.UserExceptions import IllegalArgumentException\n')]
|
from django import forms
class AddDocuments(forms.Form):
doc = forms.FileField(required=True)
description = forms.CharField(label='Description', max_length=100,
widget=forms.TextInput(
attrs={'placeholder': 'Enter Description'}))
def __init__(self, *args, **kwargs):
super(AddDocuments, self).__init__(*args, **kwargs)
self.fields['doc'].widget.attrs.update({'accept': '.pdf,.doc,.docx,.ppt,.pptx,.xls,.xlsx'})
class AddVideos(forms.Form):
vid = forms.FileField(required=True)
description = forms.CharField(label='Description', max_length=100,
widget=forms.TextInput(
attrs={'placeholder': 'Enter Description'}))
def __init__(self, *args, **kwargs):
super(AddVideos, self).__init__(*args, **kwargs)
self.fields['vid'].widget.attrs.update({'accept': '.mp4,.3gp,.mpg,.mkv,.amv'})
|
[
"django.forms.TextInput",
"django.forms.FileField"
] |
[((69, 99), 'django.forms.FileField', 'forms.FileField', ([], {'required': '(True)'}), '(required=True)\n', (84, 99), False, 'from django import forms\n'), ((555, 585), 'django.forms.FileField', 'forms.FileField', ([], {'required': '(True)'}), '(required=True)\n', (570, 585), False, 'from django import forms\n'), ((212, 271), 'django.forms.TextInput', 'forms.TextInput', ([], {'attrs': "{'placeholder': 'Enter Description'}"}), "(attrs={'placeholder': 'Enter Description'})\n", (227, 271), False, 'from django import forms\n'), ((698, 757), 'django.forms.TextInput', 'forms.TextInput', ([], {'attrs': "{'placeholder': 'Enter Description'}"}), "(attrs={'placeholder': 'Enter Description'})\n", (713, 757), False, 'from django import forms\n')]
|
# GridGain Community Edition Licensing
# Copyright 2019 GridGain Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License") modified with Commons Clause
# Restriction; you may not use this file except in compliance with the License. You may obtain a
# copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the
# License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the specific language governing permissions
# and limitations under the License.
#
# Commons Clause Restriction
#
# The Software is provided to you by the Licensor under the License, as defined below, subject to
# the following condition.
#
# Without limiting other conditions in the License, the grant of rights under the License will not
# include, and the License does not grant to you, the right to Sell the Software.
# For purposes of the foregoing, “Sell” means practicing any or all of the rights granted to you
# under the License to provide to third parties, for a fee or other consideration (including without
# limitation fees for hosting or consulting/ support services related to the Software), a product or
# service whose value derives, entirely or substantially, from the functionality of the Software.
# Any license notice or attribution required by the License must also include this Commons Clause
# License Condition notice.
#
# For purposes of the clause above, the “Licensor” is Copyright 2019 GridGain Systems, Inc.,
# the “License” is the Apache License, Version 2.0, and the Software is the GridGain Community
# Edition software provided with this notice.
from pyignite import Client
from pyignite.datatypes.cache_config import CacheMode
from pyignite.datatypes.prop_codes import *
from pyignite.exceptions import SocketError
nodes = [
('127.0.0.1', 10800),
('127.0.0.1', 10801),
('127.0.0.1', 10802),
]
client = Client(timeout=4.0)
client.connect(nodes)
print('Connected to {}'.format(client))
my_cache = client.get_or_create_cache({
PROP_NAME: 'my_cache',
PROP_CACHE_MODE: CacheMode.REPLICATED,
})
my_cache.put('test_key', 0)
# abstract main loop
while True:
try:
# do the work
test_value = my_cache.get('test_key')
my_cache.put('test_key', test_value + 1)
except (OSError, SocketError) as e:
# recover from error (repeat last command, check data
# consistency or just continue − depends on the task)
print('Error: {}'.format(e))
print('Last value: {}'.format(my_cache.get('test_key')))
print('Reconnected to {}'.format(client))
# Connected to 127.0.0.1:10800
# Error: [Errno 104] Connection reset by peer
# Last value: 6999
# Reconnected to 127.0.0.1:10801
# Error: Socket connection broken.
# Last value: 12302
# Reconnected to 127.0.0.1:10802
# Error: [Errno 111] Client refused
# Traceback (most recent call last):
# ...
# pyignite.exceptions.ReconnectError: Can not reconnect: out of nodes
|
[
"pyignite.Client"
] |
[((2069, 2088), 'pyignite.Client', 'Client', ([], {'timeout': '(4.0)'}), '(timeout=4.0)\n', (2075, 2088), False, 'from pyignite import Client\n')]
|
import sympy as sy
from sympy.physics import mechanics as mc
import numpy as np
from sympy import sympify, nsimplify
from forward_kinematics import forward
from sympy import Integral, Matrix, pi, pprint
def Inverse_kin(T0_4, T0_3, T0_2, T0_1, X):
#Calculates inverse kinematics
f=T0_4[:3,3]
J_half=f.jacobian(X)
# J_otherhalf=T0_1[:3,2].row_join(T0_2[:3,2].row_join(T0_3[:3,2].row_join(T0_4[:3,2].row_join(T0_5[:3,2]))))
J_otherhalf=T0_1[:3,2].row_join(T0_2[:3,2].row_join(T0_3[:3,2].row_join(T0_4[:3,2])))
J=J_half.col_join(J_otherhalf)
J=nsimplify(J,tolerance=1e-3,rational=True)
# print(J)
return J
if __name__ == "__main__" :
R, theta, alpha, a, d, theta1, theta2, theta3, theta4, theta5, d1, d2, d3, d4, d5 = sy.symbols('R, theta, alpha, a, d, theta1, theta2, theta3, theta4, theta5, d1, d2, d3, d4, d5')
pi=np.pi
X = [theta1, theta2, theta3, theta4]
# Solution 0 0 550
X_sub = [0,0,0,0]
T0_4, T0_3, T0_2, T0_1 = forward()
T0_f=T0_4.subs({theta1:X_sub[0],theta2:X_sub[1],theta3:X_sub[2],theta4:X_sub[3], d1:150, d2:0, d3:0, d4:400})
f_x, f_y, f_z = T0_f[0,3], T0_f[1,3], T0_f[2,3]
print(f'Locations : {f_x}, {f_y}, {f_z}')
print('Location calculated from input theta value it is validated using thetas.Using location values we validate joint angles')
J = Inverse_kin(T0_4, T0_3, T0_2, T0_1, X)
J_val=J.subs({theta1:X_sub[0],theta2:X_sub[1],theta3:X_sub[2],theta4:X_sub[3], d1:150, d2:0, d3:0, d4:400})
J_val= nsimplify(J_val,tolerance=1e-3,rational=True)
J_val=np.array(J_val,dtype='float')
# print(f'Jacobian for joint angles: {X_sub}')
# pprint(J_val)
J_inv=np.linalg.pinv(J_val)
J_inv= nsimplify(J_inv,tolerance=1e-3,rational=True)
print("Inverse kinematics Validation")
print(f'Location of end effector {[f_x, f_y, f_z, 0, 0, 0]}')
pos = np.matrix([f_x, f_y, f_z, 0, 0, 0])
# pos = np.matrix([0, 0, -150, 0, 0, 0])
j_a =(J_inv@pos.T)*pi
print('Joint Angles')
print(f'Theta1 : {j_a[0][0].flatten()}')
print(f'Theta2 : {j_a[1][0].flatten()}')
print(f'Theta3 : {j_a[2][0].flatten()}')
print(f'Theta4 : {j_a[3][0].flatten()}')
print(f'Theta5 : [[0]]')
|
[
"sympy.symbols",
"forward_kinematics.forward",
"numpy.matrix",
"numpy.array",
"sympy.nsimplify",
"numpy.linalg.pinv"
] |
[((572, 616), 'sympy.nsimplify', 'nsimplify', (['J'], {'tolerance': '(0.001)', 'rational': '(True)'}), '(J, tolerance=0.001, rational=True)\n', (581, 616), False, 'from sympy import sympify, nsimplify\n'), ((760, 865), 'sympy.symbols', 'sy.symbols', (['"""R, theta, alpha, a, d, theta1, theta2, theta3, theta4, theta5, d1, d2, d3, d4, d5"""'], {}), "(\n 'R, theta, alpha, a, d, theta1, theta2, theta3, theta4, theta5, d1, d2, d3, d4, d5'\n )\n", (770, 865), True, 'import sympy as sy\n'), ((991, 1000), 'forward_kinematics.forward', 'forward', ([], {}), '()\n', (998, 1000), False, 'from forward_kinematics import forward\n'), ((1516, 1564), 'sympy.nsimplify', 'nsimplify', (['J_val'], {'tolerance': '(0.001)', 'rational': '(True)'}), '(J_val, tolerance=0.001, rational=True)\n', (1525, 1564), False, 'from sympy import sympify, nsimplify\n'), ((1572, 1602), 'numpy.array', 'np.array', (['J_val'], {'dtype': '"""float"""'}), "(J_val, dtype='float')\n", (1580, 1602), True, 'import numpy as np\n'), ((1688, 1709), 'numpy.linalg.pinv', 'np.linalg.pinv', (['J_val'], {}), '(J_val)\n', (1702, 1709), True, 'import numpy as np\n'), ((1721, 1769), 'sympy.nsimplify', 'nsimplify', (['J_inv'], {'tolerance': '(0.001)', 'rational': '(True)'}), '(J_inv, tolerance=0.001, rational=True)\n', (1730, 1769), False, 'from sympy import sympify, nsimplify\n'), ((1888, 1923), 'numpy.matrix', 'np.matrix', (['[f_x, f_y, f_z, 0, 0, 0]'], {}), '([f_x, f_y, f_z, 0, 0, 0])\n', (1897, 1923), True, 'import numpy as np\n')]
|
import doublemetaphone
def match(value1, value2):
value1metaphone = doublemetaphone.doublemetaphone(value1)
value2metaphone = doublemetaphone.doublemetaphone(value2)
possibilities = [
value1metaphone[0] == value2metaphone[0],
value1metaphone[0] == value2metaphone[1],
value1metaphone[1] == value2metaphone[0],
value1metaphone[1] == value2metaphone[1] != ''
]
return 1.0 if True in possibilities else 0.0
|
[
"doublemetaphone.doublemetaphone"
] |
[((73, 112), 'doublemetaphone.doublemetaphone', 'doublemetaphone.doublemetaphone', (['value1'], {}), '(value1)\n', (104, 112), False, 'import doublemetaphone\n'), ((135, 174), 'doublemetaphone.doublemetaphone', 'doublemetaphone.doublemetaphone', (['value2'], {}), '(value2)\n', (166, 174), False, 'import doublemetaphone\n')]
|
import builtins as __builtin__
import json
import os
import time
import torch
from models import get_iou_types
from utils import misc_util
from utils.coco_eval_util import CocoEvaluator
from utils.coco_util import get_coco_api_from_dataset
def overwrite_dict(org_dict, sub_dict):
for sub_key, sub_value in sub_dict.items():
if sub_key in org_dict:
if isinstance(sub_value, dict):
overwrite_dict(org_dict[sub_key], sub_value)
else:
org_dict[sub_key] = sub_value
else:
org_dict[sub_key] = sub_value
def overwrite_config(config, json_str):
overwrite_dict(config, json.loads(json_str))
def setup_for_distributed(is_master):
"""
This function disables printing when not in master process
"""
builtin_print = __builtin__.print
def print(*args, **kwargs):
force = kwargs.pop('force', False)
if is_master or force:
builtin_print(*args, **kwargs)
__builtin__.print = print
def init_distributed_mode(world_size=1, dist_url='env://'):
if 'RANK' in os.environ and 'WORLD_SIZE' in os.environ:
rank = int(os.environ['RANK'])
world_size = int(os.environ['WORLD_SIZE'])
device_id = int(os.environ['LOCAL_RANK'])
elif 'SLURM_PROCID' in os.environ:
rank = int(os.environ['SLURM_PROCID'])
device_id = rank % torch.cuda.device_count()
else:
print('Not using distributed mode')
return False, None
torch.cuda.set_device(device_id)
dist_backend = 'nccl'
print('| distributed init (rank {}): {}'.format(rank, dist_url), flush=True)
torch.distributed.init_process_group(backend=dist_backend, init_method=dist_url,
world_size=world_size, rank=rank)
torch.distributed.barrier()
setup_for_distributed(rank == 0)
return True, [device_id]
def warmup_lr_scheduler(optimizer, warmup_iters, warmup_factor):
def f(x):
if x >= warmup_iters:
return 1
alpha = float(x) / warmup_iters
return warmup_factor * (1 - alpha) + alpha
return torch.optim.lr_scheduler.LambdaLR(optimizer, f)
@torch.no_grad()
def evaluate(model, data_loader, device):
n_threads = torch.get_num_threads()
# FIXME remove this and make paste_masks_in_image run on the GPU
torch.set_num_threads(1)
cpu_device = torch.device('cpu')
model.eval()
metric_logger = misc_util.MetricLogger(delimiter=' ')
header = 'Test:'
coco = get_coco_api_from_dataset(data_loader.dataset)
iou_types = get_iou_types(model)
coco_evaluator = CocoEvaluator(coco, iou_types)
for id, (image, targets) in enumerate(metric_logger.log_every(data_loader, 100, header)):
image = list(img.to(device) for img in image)
targets = [{k: v.to(device) for k, v in t.items()} for t in targets]
torch.cuda.synchronize()
model_time = time.time()
outputs = model(image)
outputs = [{k: v.to(cpu_device) for k, v in t.items()} for t in outputs]
model_time = time.time() - model_time
# print(targets, outputs)
res = {target['image_id'].item(): output for target, output in zip(targets, outputs)}
evaluator_time = time.time()
coco_evaluator.update(res)
evaluator_time = time.time() - evaluator_time
metric_logger.update(model_time=model_time, evaluator_time=evaluator_time)
# exit()
#
# if id > 30:
# break
# gather the stats from all processes
metric_logger.synchronize_between_processes()
# print('Averaged stats:', metric_logger)
coco_evaluator.synchronize_between_processes()
# accumulate predictions from all images
coco_evaluator.accumulate()
coco_evaluator.summarize()
torch.set_num_threads(n_threads)
return coco_evaluator
|
[
"utils.misc_util.MetricLogger",
"torch.cuda.synchronize",
"torch.distributed.init_process_group",
"json.loads",
"utils.coco_util.get_coco_api_from_dataset",
"models.get_iou_types",
"torch.distributed.barrier",
"time.time",
"torch.cuda.device_count",
"torch.set_num_threads",
"torch.optim.lr_scheduler.LambdaLR",
"torch.device",
"torch.cuda.set_device",
"utils.coco_eval_util.CocoEvaluator",
"torch.no_grad",
"torch.get_num_threads"
] |
[((2189, 2204), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (2202, 2204), False, 'import torch\n'), ((1505, 1537), 'torch.cuda.set_device', 'torch.cuda.set_device', (['device_id'], {}), '(device_id)\n', (1526, 1537), False, 'import torch\n'), ((1649, 1768), 'torch.distributed.init_process_group', 'torch.distributed.init_process_group', ([], {'backend': 'dist_backend', 'init_method': 'dist_url', 'world_size': 'world_size', 'rank': 'rank'}), '(backend=dist_backend, init_method=\n dist_url, world_size=world_size, rank=rank)\n', (1685, 1768), False, 'import torch\n'), ((1809, 1836), 'torch.distributed.barrier', 'torch.distributed.barrier', ([], {}), '()\n', (1834, 1836), False, 'import torch\n'), ((2138, 2185), 'torch.optim.lr_scheduler.LambdaLR', 'torch.optim.lr_scheduler.LambdaLR', (['optimizer', 'f'], {}), '(optimizer, f)\n', (2171, 2185), False, 'import torch\n'), ((2263, 2286), 'torch.get_num_threads', 'torch.get_num_threads', ([], {}), '()\n', (2284, 2286), False, 'import torch\n'), ((2360, 2384), 'torch.set_num_threads', 'torch.set_num_threads', (['(1)'], {}), '(1)\n', (2381, 2384), False, 'import torch\n'), ((2402, 2421), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (2414, 2421), False, 'import torch\n'), ((2459, 2497), 'utils.misc_util.MetricLogger', 'misc_util.MetricLogger', ([], {'delimiter': '""" """'}), "(delimiter=' ')\n", (2481, 2497), False, 'from utils import misc_util\n'), ((2530, 2576), 'utils.coco_util.get_coco_api_from_dataset', 'get_coco_api_from_dataset', (['data_loader.dataset'], {}), '(data_loader.dataset)\n', (2555, 2576), False, 'from utils.coco_util import get_coco_api_from_dataset\n'), ((2593, 2613), 'models.get_iou_types', 'get_iou_types', (['model'], {}), '(model)\n', (2606, 2613), False, 'from models import get_iou_types\n'), ((2635, 2665), 'utils.coco_eval_util.CocoEvaluator', 'CocoEvaluator', (['coco', 'iou_types'], {}), '(coco, iou_types)\n', (2648, 2665), False, 'from utils.coco_eval_util import CocoEvaluator\n'), ((3827, 3859), 'torch.set_num_threads', 'torch.set_num_threads', (['n_threads'], {}), '(n_threads)\n', (3848, 3859), False, 'import torch\n'), ((658, 678), 'json.loads', 'json.loads', (['json_str'], {}), '(json_str)\n', (668, 678), False, 'import json\n'), ((2900, 2924), 'torch.cuda.synchronize', 'torch.cuda.synchronize', ([], {}), '()\n', (2922, 2924), False, 'import torch\n'), ((2946, 2957), 'time.time', 'time.time', ([], {}), '()\n', (2955, 2957), False, 'import time\n'), ((3270, 3281), 'time.time', 'time.time', ([], {}), '()\n', (3279, 3281), False, 'import time\n'), ((3091, 3102), 'time.time', 'time.time', ([], {}), '()\n', (3100, 3102), False, 'import time\n'), ((3342, 3353), 'time.time', 'time.time', ([], {}), '()\n', (3351, 3353), False, 'import time\n'), ((1393, 1418), 'torch.cuda.device_count', 'torch.cuda.device_count', ([], {}), '()\n', (1416, 1418), False, 'import torch\n')]
|
#!/usr/bin/env python
from configparser import ConfigParser
from sys import argv
from autofit.tools import edenise
def main(
root_directory
):
try:
config = ConfigParser()
config.read(
f"{root_directory}/eden.ini"
)
eden_dependencies = [
dependency.strip()
for dependency
in config.get(
"eden",
"eden_dependencies"
).split(",")
]
edenise.edenise(
root_directory=root_directory,
name=config.get("eden", "name"),
prefix=config.get("eden", "prefix"),
eden_prefix=config.get("eden", "eden_prefix"),
eden_dependencies=eden_dependencies,
should_rename_modules=config.get("eden", "should_rename_modules").lower().startswith("t"),
should_remove_type_annotations=config.get("eden", "should_remove_type_annotations").lower().startswith("t"),
)
except ValueError:
print("Usage: ./edenise.py root_directory")
exit(1)
if __name__ == "__main__":
main(
argv[1]
)
|
[
"configparser.ConfigParser"
] |
[((193, 207), 'configparser.ConfigParser', 'ConfigParser', ([], {}), '()\n', (205, 207), False, 'from configparser import ConfigParser\n')]
|
global sdc
try:
sdc.importLock()
import time
from datetime import datetime, timedelta
import sys
import os
sys.path.append(os.path.join(os.environ['SDC_DIST'], 'python-libs'))
import requests
finally:
sdc.importUnlock()
def get_interval():
return int(sdc.userParams['INTERVAL_IN_SECONDS'])
def get_now_with_delay():
return int(time.time()) - int(sdc.userParams['DELAY_IN_SECONDS'])
def to_timestamp(date):
epoch = datetime(1970, 1, 1)
return int((date - epoch).total_seconds())
def _filter(list_):
return list(filter(lambda x: bool(x), list_))
def csv_to_json(csv_data, last_timestamp):
if not str(csv_data).strip():
return []
results = _filter(csv_data.split('\r\n\r\n'))
data = []
for result in results:
rows = result.split('\r\n')
header = _filter(rows.pop(0).split(','))
for row in rows:
rec = dict(zip(header, _filter(row.split(','))))
rec['last_timestamp'] = last_timestamp
data.append(rec)
return data
entityName = ''
interval = timedelta(seconds=get_interval())
if sdc.lastOffsets.containsKey(entityName):
offset = int(float(sdc.lastOffsets.get(entityName)))
elif sdc.userParams['INITIAL_OFFSET']:
offset = to_timestamp(datetime.strptime(sdc.userParams['INITIAL_OFFSET'], '%d/%m/%Y %H:%M'))
else:
offset = to_timestamp(datetime.utcnow().replace(second=0, microsecond=0) - interval)
sdc.log.info('OFFSET: ' + str(offset))
N_REQUESTS_TRIES = 3
while True:
if sdc.isStopped():
break
now_with_delay = get_now_with_delay() - interval.total_seconds()
if offset > now_with_delay:
time.sleep(offset - now_with_delay)
start = int(offset)
stop = int(offset + interval.total_seconds())
session = requests.Session()
session.headers = sdc.userParams['HEADERS']
for i in range(1, N_REQUESTS_TRIES + 1):
try:
res = session.post(
sdc.userParams['URL'],
data=sdc.userParams['QUERY'].format(start, stop),
timeout=sdc.userParams['TIMEOUT']
)
res.raise_for_status()
except requests.HTTPError as e:
requests.post(sdc.userParams['MONITORING_URL'] + str(res.status_code))
sdc.log.error(str(e))
if i == N_REQUESTS_TRIES:
raise
time.sleep(2 ** i)
cur_batch = sdc.createBatch()
for obj in csv_to_json(res.text, int(offset)):
record = sdc.createRecord('record created ' + str(datetime.now()))
record.value = obj
cur_batch.add(record)
if cur_batch.size() == sdc.batchSize:
cur_batch.process(entityName, str(offset))
cur_batch = sdc.createBatch()
if sdc.isStopped():
break
cur_batch.process(entityName, str(offset))
offset += interval.total_seconds()
|
[
"requests.Session",
"datetime.datetime.now",
"time.time",
"time.sleep",
"datetime.datetime",
"datetime.datetime.strptime",
"datetime.datetime.utcnow",
"os.path.join"
] |
[((465, 485), 'datetime.datetime', 'datetime', (['(1970)', '(1)', '(1)'], {}), '(1970, 1, 1)\n', (473, 485), False, 'from datetime import datetime, timedelta\n'), ((1803, 1821), 'requests.Session', 'requests.Session', ([], {}), '()\n', (1819, 1821), False, 'import requests\n'), ((148, 199), 'os.path.join', 'os.path.join', (["os.environ['SDC_DIST']", '"""python-libs"""'], {}), "(os.environ['SDC_DIST'], 'python-libs')\n", (160, 199), False, 'import os\n'), ((1678, 1713), 'time.sleep', 'time.sleep', (['(offset - now_with_delay)'], {}), '(offset - now_with_delay)\n', (1688, 1713), False, 'import time\n'), ((372, 383), 'time.time', 'time.time', ([], {}), '()\n', (381, 383), False, 'import time\n'), ((1290, 1359), 'datetime.datetime.strptime', 'datetime.strptime', (["sdc.userParams['INITIAL_OFFSET']", '"""%d/%m/%Y %H:%M"""'], {}), "(sdc.userParams['INITIAL_OFFSET'], '%d/%m/%Y %H:%M')\n", (1307, 1359), False, 'from datetime import datetime, timedelta\n'), ((2393, 2411), 'time.sleep', 'time.sleep', (['(2 ** i)'], {}), '(2 ** i)\n', (2403, 2411), False, 'import time\n'), ((2556, 2570), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (2568, 2570), False, 'from datetime import datetime, timedelta\n'), ((1393, 1410), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (1408, 1410), False, 'from datetime import datetime, timedelta\n')]
|
from __future__ import print_function, division
import logging
from time import time
import numpy as np
from ...core.exceptions import IncompatibleAttribute
from ...core.util import Pointer, split_component_view
from ...utils import view_shape, stack_view, color2rgb
from ...clients.image_client import ImageClient
from ...clients.layer_artist import (LayerArtistBase,
ImageLayerBase, SubsetImageLayerBase)
from ginga.util import wcsmod
from ginga.misc import Bunch
wcsmod.use('astropy')
from ginga import AstroImage, BaseImage
class GingaClient(ImageClient):
def __init__(self, data, canvas=None, artist_container=None):
super(GingaClient, self).__init__(data, artist_container)
self._setup_ginga(canvas)
def _setup_ginga(self, canvas):
if canvas is None:
raise ValueError("GingaClient needs a canvas")
self._canvas = canvas
self._wcs = None
self._crosshair_id = '_crosshair'
def _new_rgb_layer(self, layer):
raise NotImplementedError()
def _new_subset_image_layer(self, layer):
return GingaSubsetImageLayer(layer, self._canvas)
def _new_image_layer(self, layer):
return GingaImageLayer(layer, self._canvas)
def _new_scatter_layer(self, layer):
raise NotImplementedError()
def _update_axis_labels(self):
pass
def set_cmap(self, cmap):
self._canvas.set_cmap(cmap)
def show_crosshairs(self, x, y):
self.clear_crosshairs()
c = self._canvas.viewer.getDrawClass('point')(x, y, 6, color='red',
style='plus')
self._canvas.add(c, tag=self._crosshair_id, redraw=True)
def clear_crosshairs(self):
try:
self._canvas.deleteObjectsByTag([self._crosshair_id], redraw=False)
except:
pass
class GingaLayerArtist(LayerArtistBase):
zorder = Pointer('_zorder')
visible = Pointer('_visible')
def __init__(self, layer, canvas):
super(GingaLayerArtist, self).__init__(layer)
self._canvas = canvas
self._visible = True
def redraw(self, whence=0):
self._canvas.redraw(whence=whence)
class GingaImageLayer(GingaLayerArtist, ImageLayerBase):
# unused by Ginga
cmap = None
norm = None
def __init__(self, layer, canvas):
super(GingaImageLayer, self).__init__(layer, canvas)
self._override_image = None
self._tag = "layer%s_%s" % (layer.label, time())
self._img = None # DataImage instance
self._enabled = True
@property
def visible(self):
return self._visible
@visible.setter
def visible(self, value):
if self._visible == value:
return
self._visible = value
if not value:
self.clear()
elif self._img:
self._canvas.set_image(self._img)
@property
def zorder(self):
return self._zorder
@zorder.setter
def zorder(self, value):
self._zorder = value
try:
canvas_img = self._canvas.getObjectByTag('_image')
canvas_img.set_zorder(value)
except KeyError:
# object does not yet exist on canvas
pass
def set_norm(self, **kwargs):
# NOP for ginga
pass
def clear_norm(self):
# NOP for ginga
pass
def override_image(self, image):
"""Temporarily show a different image"""
self._override_image = image
def clear_override(self):
self._override_image = None
def clear(self):
# remove previously added image
try:
self._canvas.deleteObjectsByTag(['_image'], redraw=False)
except:
pass
@property
def enabled(self):
return self._enabled
def update(self, view, transpose=False):
if not self.visible:
return
# update ginga model
comp, view = split_component_view(view)
if self._img is None:
self._img = DataImage(self.layer, comp, view, transpose)
self._canvas.set_image(self._img)
self._img.data = self.layer
self._img.component = comp
self._img.view = view
self._img.transpose = transpose
self._img.override_image = self._override_image
self.redraw()
class GingaSubsetImageLayer(GingaLayerArtist, SubsetImageLayerBase):
def __init__(self, layer, canvas):
super(GingaSubsetImageLayer, self).__init__(layer, canvas)
self._img = None
self._cimg = None
self._tag = "layer%s_%s" % (layer.label, time())
self._enabled = True
@property
def visible(self):
return self._visible
@property
def enabled(self):
return self._enabled
@visible.setter
def visible(self, value):
if value is self._visible:
return
self._visible = value
if not value:
self.clear()
elif self._cimg:
self._canvas.add(self._cimg, tag=self._tag, redraw=True)
@property
def zorder(self):
return self._zorder
@zorder.setter
def zorder(self, value):
self._zorder = value
try:
canvas_img = self._canvas.getObjectByTag(self._tag)
canvas_img.set_zorder(value)
except KeyError:
# object does not yet exist on canvas
pass
def clear(self):
try:
self._canvas.deleteObjectsByTag([self._tag], redraw=True)
except:
pass
def _update_ginga_models(self, view, transpose=False):
subset = self.layer
logging.getLogger(__name__).debug("View into subset %s is %s", self.layer, view)
_, view = split_component_view(view) # discard ComponentID
r, g, b = color2rgb(self.layer.style.color)
if self._img is None:
self._img = SubsetImage(subset, view)
if self._cimg is None:
# SubsetImages can't be added to canvases directly. Need
# to wrap into a ginga canvas type.
Image = self._canvas.getDrawClass('image')
self._cimg = Image(0, 0, self._img, alpha=0.5, flipy=False)
self._img.view = view
self._img.color = (r, g, b)
self._img.transpose = transpose
def _check_enabled(self):
"""
Sync the enabled/disabled status, based on whether
mask is computable
"""
self._enabled = True
try:
# the first pixel
view = tuple(0 for _ in self.layer.data.shape)
self.layer.to_mask(view)
except IncompatibleAttribute as exc:
self._enabled = False
self.disable_invalid_attributes(*exc.args)
return self._enabled
def _ensure_added(self):
""" Add artist to canvas if needed """
try:
self._canvas.getObjectByTag(self._tag)
except KeyError:
self._canvas.add(self._cimg, tag=self._tag, redraw=False)
def update(self, view, transpose=False):
self._check_enabled()
self._update_ginga_models(view, transpose)
if self._enabled and self._visible:
self._ensure_added()
else:
self.clear()
self.redraw(whence=0)
def forbidden(*args):
raise ValueError("Forbidden")
class DataImage(AstroImage.AstroImage):
"""
A Ginga image subclass to interface with Glue Data objects
"""
get_data = _get_data = copy_data = set_data = get_array = transfer = forbidden
def __init__(self, data, component, view, transpose=False,
override_image=None, **kwargs):
"""
Parameters
----------
data : glue.core.data.Data
The data to image
component : glue.core.data.ComponentID
The ComponentID in the data to image
view : numpy-style view
The view into the data to image. Must produce a 2D array
transpose : bool
Whether to transpose the view
override_image : numpy array (optional)
Whether to show override_image instead of the view into the data.
The override image must have the same shape as the 2D view into
the data.
kwargs : dict
Extra kwargs are passed to the superclass
"""
self.transpose = transpose
self.view = view
self.data = data
self.component = component
self.override_image = None
super(DataImage, self).__init__(**kwargs)
@property
def shape(self):
"""
The shape of the 2D view into the data
"""
result = view_shape(self.data.shape, self.view)
if self.transpose:
result = result[::-1]
return result
def _get_fast_data(self):
return self._slice((slice(None, None, 10), slice(None, None, 10)))
def _slice(self, view):
"""
Extract a view from the 2D image.
"""
if self.override_image is not None:
return self.override_image[view]
# Combining multiple views: First a 2D slice into an ND array, then
# the requested view from this slice
if self.transpose:
views = [self.view, 'transpose', view]
else:
views = [self.view, view]
view = stack_view(self.data.shape, *views)
return self.data[self.component, view]
class SubsetImage(BaseImage.BaseImage):
"""
A Ginga image subclass to interface with Glue subset objects
"""
get_data = _get_data = copy_data = set_data = get_array = transfer = forbidden
def __init__(self, subset, view, color=(0, 1, 0), transpose=False, **kwargs):
"""
Parameters
----------
subset : glue.core.subset.Subset
The subset to image
view : numpy-style view
The view into the subset to image. Must produce a 2D array
color : tuple of 3 floats in range [0, 1]
The color to image the subset as
transpose : bool
Whether to transpose the view
kwargs : dict
Extra kwargs are passed to the ginga superclass
"""
super(SubsetImage, self).__init__(**kwargs)
self.subset = subset
self.view = view
self.transpose = transpose
self.color = color
self.order = 'RGBA'
@property
def shape(self):
"""
Shape of the 2D view into the subset mask
"""
result = view_shape(self.subset.data.shape, self.view)
if self.transpose:
result = result[::-1]
return tuple(list(result) + [4]) # 4th dim is RGBA channels
def _rgb_from_mask(self, mask):
"""
Turn a boolean mask into a 4-channel RGBA image
"""
r, g, b = self.color
ones = mask * 0 + 255
alpha = mask * 127
result = np.dstack((ones * r, ones * g, ones * b, alpha)).astype(np.uint8)
return result
def _get_fast_data(self):
return self._slice((slice(None, None, 10), slice(None, None, 10)))
def _slice(self, view):
"""
Extract a view from the 2D subset mask.
"""
# Combining multiple views: First a 2D slice into an ND array, then
# the requested view from this slice
if self.transpose:
views = [self.view, 'transpose', view]
else:
views = [self.view, view]
view = stack_view(self.subset.data.shape, *views)
mask = self.subset.to_mask(view)
return self._rgb_from_mask(mask)
def _set_minmax(self):
# we already know the data bounds
self.minval = 0
self.maxval = 256
self.minval_noinf = self.minval
self.maxval_noinf = self.maxval
def get_scaled_cutout_wdht(self, x1, y1, x2, y2, new_wd, new_ht):
doit = getattr(self, '_doit', False)
self._doit = not doit
# default implementation if downsampling
if doit or new_wd <= (x2 - x1 + 1) or new_ht <= (y2 - y1 + 1):
return super(SubsetImage, self).get_scaled_cutout_wdht(x1, y1, x2, y2, new_wd, new_ht)
# if upsampling, prevent extra to_mask() computation
x1, x2 = np.clip([x1, x2], 0, self.width - 2).astype(np.int)
y1, y2 = np.clip([y1, y2], 0, self.height - 2).astype(np.int)
result = self._slice(np.s_[y1:y2 + 1, x1:x2 + 1])
yi = np.linspace(0, result.shape[0], new_ht).astype(np.int).reshape(-1, 1).clip(0, result.shape[0] - 1)
xi = np.linspace(0, result.shape[1], new_wd).astype(np.int).reshape(1, -1).clip(0, result.shape[1] - 1)
yi, xi = [np.array(a) for a in np.broadcast_arrays(yi, xi)]
result = result[yi, xi]
scale_x = 1.0 * result.shape[1] / (x2 - x1 + 1)
scale_y = 1.0 * result.shape[0] / (y2 - y1 + 1)
return Bunch.Bunch(data=result, scale_x=scale_x, scale_y=scale_y)
|
[
"numpy.dstack",
"ginga.util.wcsmod.use",
"numpy.clip",
"time.time",
"numpy.array",
"ginga.misc.Bunch.Bunch",
"numpy.linspace",
"numpy.broadcast_arrays",
"logging.getLogger"
] |
[((508, 529), 'ginga.util.wcsmod.use', 'wcsmod.use', (['"""astropy"""'], {}), "('astropy')\n", (518, 529), False, 'from ginga.util import wcsmod\n'), ((12980, 13038), 'ginga.misc.Bunch.Bunch', 'Bunch.Bunch', ([], {'data': 'result', 'scale_x': 'scale_x', 'scale_y': 'scale_y'}), '(data=result, scale_x=scale_x, scale_y=scale_y)\n', (12991, 13038), False, 'from ginga.misc import Bunch\n'), ((12769, 12780), 'numpy.array', 'np.array', (['a'], {}), '(a)\n', (12777, 12780), True, 'import numpy as np\n'), ((2535, 2541), 'time.time', 'time', ([], {}), '()\n', (2539, 2541), False, 'from time import time\n'), ((4680, 4686), 'time.time', 'time', ([], {}), '()\n', (4684, 4686), False, 'from time import time\n'), ((5715, 5742), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (5732, 5742), False, 'import logging\n'), ((11013, 11061), 'numpy.dstack', 'np.dstack', (['(ones * r, ones * g, ones * b, alpha)'], {}), '((ones * r, ones * g, ones * b, alpha))\n', (11022, 11061), True, 'import numpy as np\n'), ((12345, 12381), 'numpy.clip', 'np.clip', (['[x1, x2]', '(0)', '(self.width - 2)'], {}), '([x1, x2], 0, self.width - 2)\n', (12352, 12381), True, 'import numpy as np\n'), ((12414, 12451), 'numpy.clip', 'np.clip', (['[y1, y2]', '(0)', '(self.height - 2)'], {}), '([y1, y2], 0, self.height - 2)\n', (12421, 12451), True, 'import numpy as np\n'), ((12790, 12817), 'numpy.broadcast_arrays', 'np.broadcast_arrays', (['yi', 'xi'], {}), '(yi, xi)\n', (12809, 12817), True, 'import numpy as np\n'), ((12540, 12579), 'numpy.linspace', 'np.linspace', (['(0)', 'result.shape[0]', 'new_ht'], {}), '(0, result.shape[0], new_ht)\n', (12551, 12579), True, 'import numpy as np\n'), ((12652, 12691), 'numpy.linspace', 'np.linspace', (['(0)', 'result.shape[1]', 'new_wd'], {}), '(0, result.shape[1], new_wd)\n', (12663, 12691), True, 'import numpy as np\n')]
|
import codecs
import json
import os
import sys
sys.path.append("../")
sys.path.append("../transformers/src")
import copy
import gc
import torch
import pickle
from tqdm import tqdm
from utils import set_seed, get_task_data, random_split_train_and_dev
from dataset import PairSentenceClassificationDataset
from transformers import AutoTokenizer, BertConfig
from tokenizer import TransfomerTokenizer
from sklearn.model_selection import KFold
from model import Bert, TMPredictor
from finetune import SequenceClassificationTask
class TMDataset(PairSentenceClassificationDataset):
def __init__(self, *args, **kwargs):
super(TMDataset, self).__init__(*args, **kwargs)
self.categories_b = sorted(list(set([data['label_b'] for data in self.dataset])))
self.cat2id_b = dict(zip(self.categories_b, range(len(self.categories_b))))
self.id2cat_b = dict(zip(range(len(self.categories_b)), self.categories_b))
def _convert_to_transfomer_ids(self, bert_tokenizer):
features = []
for (index_, row_) in tqdm(enumerate(self.dataset)):
input_ids = bert_tokenizer.sequence_to_ids(row_['text_a'], row_['text_b'])
input_ids, input_mask, segment_ids, speaker_ids, e1_mask = input_ids
# input_a_length = self._get_input_length(row_['text_a'], bert_tokenizer)
# input_b_length = self._get_input_length(row_['text_b'], bert_tokenizer)
feature = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': segment_ids,
'speaker_ids': speaker_ids,
'e1_mask': e1_mask
}
if not self.is_test:
label_ids = self.cat2id[row_['label']]
label_ids_b = self.cat2id_b[row_['label_b']]
feature['label_ids'] = label_ids
feature['label_ids_b'] = label_ids_b
features.append(feature)
return features
def freeze_params(model):
"""Set requires_grad=False for each of model.parameters()"""
for par in model.parameters():
par.requires_grad = False
if __name__ == "__main__":
set_seed(2021)
model_name_or_path = "../pretrained_models/medbert"
max_seq_length = 256
data_df = get_task_data('../data/source_datasets/train.jsonl')
train_data_df, dev_data_df = random_split_train_and_dev(data_df, split_rate=0.8)
tm_train_dataset = TMDataset(train_data_df)
tm_dev_dataset = TMDataset(dev_data_df, categories=tm_train_dataset.categories)
bert_vocab = AutoTokenizer.from_pretrained(model_name_or_path)
bert_vocab.add_special_tokens({'additional_special_tokens': ["[unused1]", "[unused2]", "|"]})
tokenizer = TransfomerTokenizer(bert_vocab, max_seq_length)
if os.path.exists("../cache/tm_dataset.pkl"):
tm_dataset = pickle.load(open("../cache/tm_dataset.pkl", "rb"))
else:
tm_dataset = TMDataset(data_df)
tm_dataset.convert_to_ids(tokenizer)
pickle.dump(tm_dataset, open("../cache/tm_dataset.pkl", "wb"))
# kf = KFold(5, shuffle=True, random_state=42)
# examples = copy.deepcopy(tm_dataset.dataset)
# for fold_, (train_ids, dev_ids) in enumerate(kf.split(examples)):
# print(f"start fold{fold_}")
# tm_train_dataset.dataset = [examples[_idx] for _idx in train_ids]
# tm_dev_dataset.dataset = [examples[_idx] for _idx in dev_ids]
#
# bert_config = BertConfig.from_pretrained(model_name_or_path,
# num_labels=len(tm_train_dataset.cat2id))
# bert_config.gradient_checkpointing = True
# dl_module = Bert.from_pretrained(model_name_or_path,
# config=bert_config)
# # freeze_params(dl_module.bert.embeddings)
# param_optimizer = list(dl_module.named_parameters())
# param_optimizer = [n for n in param_optimizer if 'pooler' not in n[0]]
# no_decay = ["bias", "LayerNorm.weight"]
# optimizer_grouped_parameters = [
# {'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)],
# 'weight_decay': 0.01},
# {'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
# ]
#
# model = SequenceClassificationTask(dl_module, 'adamw', 'lsce', cuda_device=0, ema_decay=0.995)
#
# save_module_path = '../checkpoint/medbert2/'
# os.makedirs(save_module_path, exist_ok=True)
# model.fit(tm_train_dataset,
# tm_dev_dataset,
# lr=2e-5,
# epochs=1,
# batch_size=64,
# params=optimizer_grouped_parameters,
# evaluate_save=True,
# save_module_path=save_module_path + str(fold_) + '.pth'
# )
#
# del dl_module
# del model
# gc.collect()
# torch.cuda.empty_cache()
# predict
ensemble_dl_modules = []
bert_config = BertConfig.from_pretrained(model_name_or_path,
num_labels=len(tm_dataset.cat2id))
for file_name_ in os.listdir('../checkpoint/medbert2/'):
if file_name_.startswith('.'):
continue
ensemble_dl_module = Bert(config=bert_config)
ensemble_dl_module.load_state_dict(torch.load('../checkpoint/medbert2/' + file_name_))
ensemble_dl_module.eval()
ensemble_dl_module.to('cuda:0')
ensemble_dl_modules.append(ensemble_dl_module)
tm_predictor_instance = TMPredictor(ensemble_dl_modules, tokenizer, tm_dataset.cat2id)
submit_result = []
with codecs.open('../data/source_datasets/testa.txt', mode='r', encoding='utf8') as f:
reader = f.readlines(f)
data_list = []
for dialogue_ in tqdm(reader):
dialogue_ = json.loads(dialogue_)
for content_idx_, contents_ in enumerate(dialogue_['dialog_info']):
terms_ = contents_['ner']
if len(terms_) != 0:
idx_ = 0
for _ner_idx, term_ in enumerate(terms_):
entity_ = dict()
entity_['dialogue'] = dialogue_
_text = dialogue_['dialog_info'][content_idx_]['text']
_text_list = list(_text)
_text_list.insert(term_['range'][0], '[unused1]')
_text_list.insert(term_['range'][1] + 1, '[unused2]')
_text = ''.join(_text_list)
if content_idx_ - 1 >= 0 and len(dialogue_['dialog_info'][content_idx_ - 1]) < 40:
forward_text = dialogue_['dialog_info'][content_idx_ - 1]['sender'] + ':' + \
dialogue_['dialog_info'][content_idx_ - 1]['text'] + ';'
else:
forward_text = ''
if contents_['sender'] == '医生':
if content_idx_ + 1 >= len(dialogue_['dialog_info']):
entity_['text_a'] = forward_text + dialogue_['dialog_info'][content_idx_][
'sender'] + ':' + _text
else:
entity_['text_a'] = forward_text + dialogue_['dialog_info'][content_idx_][
'sender'] + ':' + _text + ';'
temp_index = copy.deepcopy(content_idx_) + 1
speaker_flag = False
sen_counter = 0
while True:
if dialogue_['dialog_info'][temp_index]['sender'] == '患者':
sen_counter += 1
speaker_flag = True
entity_['text_a'] += dialogue_['dialog_info'][temp_index]['sender'] + ':' + \
dialogue_['dialog_info'][temp_index]['text'] + ';'
if sen_counter > 3:
break
temp_index += 1
if temp_index >= len(dialogue_['dialog_info']):
break
elif contents_['sender'] == '患者':
if content_idx_ + 1 >= len(dialogue_['dialog_info']):
entity_['text_a'] = forward_text + dialogue_['dialog_info'][content_idx_][
'sender'] + ':' + _text
else:
entity_['text_a'] = forward_text + dialogue_['dialog_info'][content_idx_][
'sender'] + ':' + _text + ';'
temp_index = copy.deepcopy(content_idx_) + 1
speaker_flag = False
sen_counter = 0
while True:
sen_counter += 1
speaker_flag = True
entity_['text_a'] += dialogue_['dialog_info'][temp_index]['sender'] + ':' + \
dialogue_['dialog_info'][temp_index]['text'] + ';'
if sen_counter > 3:
break
temp_index += 1
if temp_index >= len(dialogue_['dialog_info']):
break
else:
entity_['text_a'] = forward_text + dialogue_['dialog_info'][content_idx_][
'sender'] + ':' + _text
if term_['name'] == 'undefined':
add_text = '|没有标准化'
else:
add_text = '|标准化为' + term_['name']
entity_['text_b'] = term_['mention'] + add_text
entity_['start_idx'] = term_['range'][0]
entity_['end_idx'] = term_['range'][1] - 1
entity_['label'] = term_['attr']
idx_ += 1
dialogue_['dialog_info'][content_idx_]['ner'][_ner_idx][
'attr'] = tm_predictor_instance.predict_one_sample([entity_['text_a'], entity_['text_b']])
submit_result.append(dialogue_)
with open('../CHIP-MDCFNPC_test.jsonl', 'w', encoding="utf-8") as output_data:
for json_content in submit_result:
output_data.write(json.dumps(json_content, ensure_ascii=False) + '\n')
|
[
"sys.path.append",
"tqdm.tqdm",
"copy.deepcopy",
"codecs.open",
"json.loads",
"utils.set_seed",
"torch.load",
"tokenizer.TransfomerTokenizer",
"os.path.exists",
"json.dumps",
"model.TMPredictor",
"transformers.AutoTokenizer.from_pretrained",
"utils.get_task_data",
"model.Bert",
"utils.random_split_train_and_dev",
"os.listdir"
] |
[((47, 69), 'sys.path.append', 'sys.path.append', (['"""../"""'], {}), "('../')\n", (62, 69), False, 'import sys\n'), ((70, 108), 'sys.path.append', 'sys.path.append', (['"""../transformers/src"""'], {}), "('../transformers/src')\n", (85, 108), False, 'import sys\n'), ((2179, 2193), 'utils.set_seed', 'set_seed', (['(2021)'], {}), '(2021)\n', (2187, 2193), False, 'from utils import set_seed, get_task_data, random_split_train_and_dev\n'), ((2290, 2342), 'utils.get_task_data', 'get_task_data', (['"""../data/source_datasets/train.jsonl"""'], {}), "('../data/source_datasets/train.jsonl')\n", (2303, 2342), False, 'from utils import set_seed, get_task_data, random_split_train_and_dev\n'), ((2376, 2427), 'utils.random_split_train_and_dev', 'random_split_train_and_dev', (['data_df'], {'split_rate': '(0.8)'}), '(data_df, split_rate=0.8)\n', (2402, 2427), False, 'from utils import set_seed, get_task_data, random_split_train_and_dev\n'), ((2577, 2626), 'transformers.AutoTokenizer.from_pretrained', 'AutoTokenizer.from_pretrained', (['model_name_or_path'], {}), '(model_name_or_path)\n', (2606, 2626), False, 'from transformers import AutoTokenizer, BertConfig\n'), ((2741, 2788), 'tokenizer.TransfomerTokenizer', 'TransfomerTokenizer', (['bert_vocab', 'max_seq_length'], {}), '(bert_vocab, max_seq_length)\n', (2760, 2788), False, 'from tokenizer import TransfomerTokenizer\n'), ((2796, 2837), 'os.path.exists', 'os.path.exists', (['"""../cache/tm_dataset.pkl"""'], {}), "('../cache/tm_dataset.pkl')\n", (2810, 2837), False, 'import os\n'), ((5245, 5282), 'os.listdir', 'os.listdir', (['"""../checkpoint/medbert2/"""'], {}), "('../checkpoint/medbert2/')\n", (5255, 5282), False, 'import os\n'), ((5651, 5713), 'model.TMPredictor', 'TMPredictor', (['ensemble_dl_modules', 'tokenizer', 'tm_dataset.cat2id'], {}), '(ensemble_dl_modules, tokenizer, tm_dataset.cat2id)\n', (5662, 5713), False, 'from model import Bert, TMPredictor\n'), ((5902, 5914), 'tqdm.tqdm', 'tqdm', (['reader'], {}), '(reader)\n', (5906, 5914), False, 'from tqdm import tqdm\n'), ((5373, 5397), 'model.Bert', 'Bert', ([], {'config': 'bert_config'}), '(config=bert_config)\n', (5377, 5397), False, 'from model import Bert, TMPredictor\n'), ((5746, 5821), 'codecs.open', 'codecs.open', (['"""../data/source_datasets/testa.txt"""'], {'mode': '"""r"""', 'encoding': '"""utf8"""'}), "('../data/source_datasets/testa.txt', mode='r', encoding='utf8')\n", (5757, 5821), False, 'import codecs\n'), ((5936, 5957), 'json.loads', 'json.loads', (['dialogue_'], {}), '(dialogue_)\n', (5946, 5957), False, 'import json\n'), ((5441, 5491), 'torch.load', 'torch.load', (["('../checkpoint/medbert2/' + file_name_)"], {}), "('../checkpoint/medbert2/' + file_name_)\n", (5451, 5491), False, 'import torch\n'), ((10649, 10693), 'json.dumps', 'json.dumps', (['json_content'], {'ensure_ascii': '(False)'}), '(json_content, ensure_ascii=False)\n', (10659, 10693), False, 'import json\n'), ((7491, 7518), 'copy.deepcopy', 'copy.deepcopy', (['content_idx_'], {}), '(content_idx_)\n', (7504, 7518), False, 'import copy\n'), ((8874, 8901), 'copy.deepcopy', 'copy.deepcopy', (['content_idx_'], {}), '(content_idx_)\n', (8887, 8901), False, 'import copy\n')]
|
from pyspark.ml.feature import HashingTF, IDF, Tokenizer, StopWordsRemover, CountVectorizer, RegexTokenizer, Word2Vec
from pyspark.sql import SparkSession
from pyspark.ml.clustering import LDA
spark = SparkSession.builder.appName("tokenizer").getOrCreate()
# Loads data.
raw = spark.read.load("data/libguides_txt.parquet")
# Purge Null / None datas
nullified = raw.na.drop()
guidesDFrame = nullified.select("guide_id","guide_name","page_id","page_name","words")
tokenizer = Tokenizer(inputCol="words", outputCol="word_tokens")
TokenizerData = tokenizer.transform(guidesDFrame)
guidesDFrame = TokenizerData
# Remove Stop Words
# https://spark.apache.org/docs/latest/ml-features.html#stopwordsremover
remover = StopWordsRemover(inputCol="word_tokens", outputCol="stop_removed")
my_sw = ["guide", "books", "database", "meta", "results", "https", "login", "updated", "david", "dillard", "use", "guide", "www", "search", "edu", "guides", "eric", "library", "find", "check", "doc", "check", "administration", "want", "ebsco", "http", "r", "f", "google", "com", "less", "tinyurl", "isbn", "call", "number", "date", "c", "paley", "temple", "research"]
sw = remover.loadDefaultStopWords("english")
remover.setStopWords(sw + my_sw)
StopWordsRemoverData = remover.transform(guidesDFrame)
guidesDFrame = StopWordsRemoverData
cv = CountVectorizer(inputCol="stop_removed", outputCol="CountVectorizer", vocabSize=1000, minDF=1.0, minTF=10.0)
transformer = cv.fit(guidesDFrame)
print(" ----------- ", transformer.vocabulary)
vacabulary = transformer.vocabulary
CountVectorizerData = transformer.transform(guidesDFrame)
guidesDFrame = CountVectorizerData
# Trains a LDA model.
lda = LDA(k=10, maxIter=15, featuresCol="CountVectorizer")
model = lda.fit(guidesDFrame)
print("------------")
model.vocabSize()
print("------------")
model.describeTopics(maxTermsPerTopic=20).show()
topics = model.describeTopics(maxTermsPerTopic=20).collect()
print(topics)
i=0
for topic in topics:
print(topic["topic"])
for word_id in topic["termIndices"]:
print(word_id, " - ", vacabulary[word_id])
print("------------")
ldaData = model.transform(guidesDFrame)
guidesDFrame = ldaData
guidesDFrame.show(10)
|
[
"pyspark.ml.clustering.LDA",
"pyspark.ml.feature.StopWordsRemover",
"pyspark.ml.feature.CountVectorizer",
"pyspark.ml.feature.Tokenizer",
"pyspark.sql.SparkSession.builder.appName"
] |
[((479, 531), 'pyspark.ml.feature.Tokenizer', 'Tokenizer', ([], {'inputCol': '"""words"""', 'outputCol': '"""word_tokens"""'}), "(inputCol='words', outputCol='word_tokens')\n", (488, 531), False, 'from pyspark.ml.feature import HashingTF, IDF, Tokenizer, StopWordsRemover, CountVectorizer, RegexTokenizer, Word2Vec\n'), ((716, 782), 'pyspark.ml.feature.StopWordsRemover', 'StopWordsRemover', ([], {'inputCol': '"""word_tokens"""', 'outputCol': '"""stop_removed"""'}), "(inputCol='word_tokens', outputCol='stop_removed')\n", (732, 782), False, 'from pyspark.ml.feature import HashingTF, IDF, Tokenizer, StopWordsRemover, CountVectorizer, RegexTokenizer, Word2Vec\n'), ((1328, 1440), 'pyspark.ml.feature.CountVectorizer', 'CountVectorizer', ([], {'inputCol': '"""stop_removed"""', 'outputCol': '"""CountVectorizer"""', 'vocabSize': '(1000)', 'minDF': '(1.0)', 'minTF': '(10.0)'}), "(inputCol='stop_removed', outputCol='CountVectorizer',\n vocabSize=1000, minDF=1.0, minTF=10.0)\n", (1343, 1440), False, 'from pyspark.ml.feature import HashingTF, IDF, Tokenizer, StopWordsRemover, CountVectorizer, RegexTokenizer, Word2Vec\n'), ((1679, 1731), 'pyspark.ml.clustering.LDA', 'LDA', ([], {'k': '(10)', 'maxIter': '(15)', 'featuresCol': '"""CountVectorizer"""'}), "(k=10, maxIter=15, featuresCol='CountVectorizer')\n", (1682, 1731), False, 'from pyspark.ml.clustering import LDA\n'), ((202, 243), 'pyspark.sql.SparkSession.builder.appName', 'SparkSession.builder.appName', (['"""tokenizer"""'], {}), "('tokenizer')\n", (230, 243), False, 'from pyspark.sql import SparkSession\n')]
|
from unittest import TestCase, main as run_tests
from src.pyetllib.etllib import filtertruefalse
class TestFilter(TestCase):
def test_filter_1(self):
data = list(range(5))
_, evens = filtertruefalse(
lambda x: bool(x % 2),
data
)
self.assertListEqual(list(evens), [0, 2, 4])
def test_filter_2(self):
data = [10, 5, 6, 11, 21, 2, 7]
digits, nondigits = filtertruefalse(
lambda x: 0 <= x <= 9,
data
)
self.assertSetEqual(set(digits), {5, 6, 7, 2})
self.assertSetEqual(set(nondigits), {10, 11, 21})
if __name__ == '__main__':
run_tests(verbosity=2)
|
[
"unittest.main",
"src.pyetllib.etllib.filtertruefalse"
] |
[((664, 686), 'unittest.main', 'run_tests', ([], {'verbosity': '(2)'}), '(verbosity=2)\n', (673, 686), True, 'from unittest import TestCase, main as run_tests\n'), ((439, 483), 'src.pyetllib.etllib.filtertruefalse', 'filtertruefalse', (['(lambda x: 0 <= x <= 9)', 'data'], {}), '(lambda x: 0 <= x <= 9, data)\n', (454, 483), False, 'from src.pyetllib.etllib import filtertruefalse\n')]
|
"""
Copyright (c) 2021 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import pytest
import numpy as np
import tensorflow as tf
from nncf.tensorflow.layers.wrapper import NNCFWrapper
from nncf.tensorflow.layers.custom_objects import NNCF_QUANTIZATION_OPERATIONS
from nncf.tensorflow.quantization.quantizers import TFQuantizerSpec
from nncf.tensorflow.quantization.quantizers import QuantizerConfig
from nncf.tensorflow.quantization.quantizers import Quantizer
from nncf.tensorflow.quantization.utils import apply_overflow_fix_to_layer
from nncf.common.quantization.structs import QuantizationMode
DIM_SPLIT = 1000
EPS = 1e-6
def check_quantized_values_equals(y_train, y_val, eps, range_len, narrow_range):
diff = np.abs(y_val - y_train)
if np.max(diff) > eps:
# If any point gets in really close to the middle of the quant
# it can changes its quant due to rounding error
outlayers = diff[diff > eps]
quant_len = range_len / (128 - (2 if narrow_range else 1))
assert (np.abs(outlayers - quant_len) < eps).all(), 'Quants are completely different'
assert False, 'Some values moved to the neighbor quant, possibly due to this values gets in ' \
'really close to the middle of the quant. ' \
f'Position of values: {np.where(diff > eps)[0].tolist()}'
@pytest.mark.parametrize('bits,low,range_,narrow_range,ref',
[(7, -1, 2, False, -128 / 127),
(7, -2, 2, True, -2)], ids=['full_range', 'narrow_range'])
def test_min_adj(bits, low, range_, narrow_range, ref):
res = Quantizer._min_adj(bits, low, range_, narrow_range).numpy() # pylint: disable=protected-access
assert abs(res - ref) < EPS
def get_weights_for_overflow_issue_test(low, range_len, narrow_range, init_w_as_middle_points):
if init_w_as_middle_points:
quant_len = range_len / (128 - (2 if narrow_range else 1))
if low > EPS:
# Range greater than zero
mid_points = [(i + 1 / 2) * quant_len for i in range(127)]
elif low + range_len < EPS:
# Range lower than zero
mid_points = [-(i + 1 / 2) * quant_len for i in range(127)]
else:
# Range with zero
min_adj = Quantizer._min_adj(7, low, range_len, narrow_range).numpy() # pylint: disable=protected-access
mid_points = [min_adj + (i + 1 / 2) * quant_len for i in range(127)]
new_w = mid_points * int(np.round(0.5 + DIM_SPLIT / 128))
new_w = tf.reshape(tf.constant(new_w[:DIM_SPLIT], dtype=tf.float32), (1, -1))
else:
new_w = tf.reshape(tf.constant(
np.linspace(low - 0.5, low + range_len + 0.5, DIM_SPLIT),
dtype=tf.float32), (1, -1))
return new_w
@pytest.mark.parametrize('per_ch', [False, True], ids=['per_tensor', 'per_channel'])
@pytest.mark.parametrize('init_w_as_middle_points', [False, True], ids=['', 'middle_points'])
@pytest.mark.parametrize('narrow_range', [False, True], ids=['full_range', 'narrow_range'])
class TestQuantizedWeightsEqualAfterFixApplied:
@pytest.mark.parametrize('signedness_to_force', [True, False], ids=['signed', 'unsigned'])
def test_symmetric_quantized_weights_equal_after_fix_applied(self, per_ch, signedness_to_force,
init_w_as_middle_points, narrow_range):
qconfig = QuantizerConfig(
num_bits=8,
mode=QuantizationMode.SYMMETRIC,
signedness_to_force=signedness_to_force,
per_channel=per_ch)
qspec = TFQuantizerSpec.from_config(
qconfig,
narrow_range=narrow_range,
half_range=True)
op_name = 'quantizer'
weight_attr = 'kernel'
layer = tf.keras.layers.Dense(DIM_SPLIT)
layer = NNCFWrapper(layer)
quantizer_cls = NNCF_QUANTIZATION_OPERATIONS.get(qspec.mode)
quantizer = quantizer_cls(op_name, qspec)
layer.registry_weight_operation(weight_attr, quantizer)
layer.build(1)
# Set layer weights
ref_signed_var = -1 if signedness_to_force else 0
ref_scale = 1
low = ref_scale * ref_signed_var
range_len = (1 - ref_signed_var) * ref_scale
new_w = get_weights_for_overflow_issue_test(low, range_len, narrow_range, init_w_as_middle_points)
layer.get_layer_weight(weight_attr).assign(new_w)
# Check quantizer weights
ops_weights = layer.ops_weights[op_name]
assert (ops_weights['scale_var'].numpy() == ref_scale).all()
assert (ops_weights['signed_var'].numpy() == ref_signed_var).all()
w_int7 = layer(tf.ones((1, 1))).numpy()
if init_w_as_middle_points:
quant_len = range_len / (128 - (2 if narrow_range else 1))
assert (np.abs(np.abs(w_int7 - new_w) - quant_len / 2) < 1e-6).all(), 'Middle points calculated incorrectly'
apply_overflow_fix_to_layer(layer, 'kernel', quantizer)
assert not quantizer._half_range # pylint: disable=protected-access
w_int8 = layer(tf.ones((1, 1))).numpy()
check_quantized_values_equals(w_int7, w_int8, EPS, range_len, narrow_range)
@pytest.mark.parametrize('low,range_len', [(-1, 2), (-5, 4), (3, 2)],
ids=['zero_in_range', 'max_less_than_zero', 'low_greater_than_zero'])
def test_asymmetric_quantized_weights_equal_after_fix_applied(self, low, range_len, per_ch,
init_w_as_middle_points, narrow_range):
qconfig = QuantizerConfig(
num_bits=8,
mode=QuantizationMode.ASYMMETRIC,
per_channel=per_ch)
qspec = TFQuantizerSpec.from_config(
qconfig,
narrow_range=narrow_range,
half_range=True)
op_name = 'quantizer'
weight_attr = 'kernel'
layer = tf.keras.layers.Dense(DIM_SPLIT)
layer = NNCFWrapper(layer)
quantizer_cls = NNCF_QUANTIZATION_OPERATIONS.get(qspec.mode)
quantizer = quantizer_cls(op_name, qspec)
layer.registry_weight_operation(weight_attr, quantizer)
layer.build(1)
# Set layer weights
new_w = get_weights_for_overflow_issue_test(low, range_len, narrow_range, init_w_as_middle_points)
layer.get_layer_weight(weight_attr).assign(new_w)
# Set quantizer weights
if per_ch:
low = tf.repeat(tf.constant([low], dtype=tf.float32), repeats=[DIM_SPLIT])
range_len = tf.repeat(tf.constant([range_len], dtype=tf.float32), repeats=[DIM_SPLIT])
ops_weights = layer.ops_weights[op_name]
ops_weights['input_low_var'].assign(low)
ops_weights['input_range_var'].assign(range_len)
w_int7 = layer(tf.ones((1, 1))).numpy()
if init_w_as_middle_points:
quant_len = range_len / (128 - (2 if narrow_range else 1))
assert (np.abs(np.abs(w_int7 - new_w) - quant_len / 2) < EPS).all(), 'Middle points calculated incorrectly'
apply_overflow_fix_to_layer(layer, 'kernel', quantizer)
assert not quantizer._half_range # pylint: disable=protected-access
w_int8 = layer(tf.ones((1, 1))).numpy()
check_quantized_values_equals(w_int7, w_int8, EPS, range_len, narrow_range)
|
[
"nncf.tensorflow.quantization.quantizers.QuantizerConfig",
"tensorflow.ones",
"numpy.abs",
"nncf.tensorflow.quantization.quantizers.TFQuantizerSpec.from_config",
"tensorflow.keras.layers.Dense",
"nncf.tensorflow.quantization.utils.apply_overflow_fix_to_layer",
"nncf.tensorflow.layers.custom_objects.NNCF_QUANTIZATION_OPERATIONS.get",
"tensorflow.constant",
"numpy.max",
"numpy.where",
"nncf.tensorflow.layers.wrapper.NNCFWrapper",
"numpy.linspace",
"pytest.mark.parametrize",
"numpy.round",
"nncf.tensorflow.quantization.quantizers.Quantizer._min_adj"
] |
[((1862, 2021), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""bits,low,range_,narrow_range,ref"""', '[(7, -1, 2, False, -128 / 127), (7, -2, 2, True, -2)]'], {'ids': "['full_range', 'narrow_range']"}), "('bits,low,range_,narrow_range,ref', [(7, -1, 2, \n False, -128 / 127), (7, -2, 2, True, -2)], ids=['full_range',\n 'narrow_range'])\n", (1885, 2021), False, 'import pytest\n'), ((3321, 3408), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""per_ch"""', '[False, True]'], {'ids': "['per_tensor', 'per_channel']"}), "('per_ch', [False, True], ids=['per_tensor',\n 'per_channel'])\n", (3344, 3408), False, 'import pytest\n'), ((3406, 3502), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""init_w_as_middle_points"""', '[False, True]'], {'ids': "['', 'middle_points']"}), "('init_w_as_middle_points', [False, True], ids=['',\n 'middle_points'])\n", (3429, 3502), False, 'import pytest\n'), ((3500, 3594), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""narrow_range"""', '[False, True]'], {'ids': "['full_range', 'narrow_range']"}), "('narrow_range', [False, True], ids=['full_range',\n 'narrow_range'])\n", (3523, 3594), False, 'import pytest\n'), ((1230, 1253), 'numpy.abs', 'np.abs', (['(y_val - y_train)'], {}), '(y_val - y_train)\n', (1236, 1253), True, 'import numpy as np\n'), ((3644, 3737), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""signedness_to_force"""', '[True, False]'], {'ids': "['signed', 'unsigned']"}), "('signedness_to_force', [True, False], ids=['signed',\n 'unsigned'])\n", (3667, 3737), False, 'import pytest\n'), ((5767, 5910), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""low,range_len"""', '[(-1, 2), (-5, 4), (3, 2)]'], {'ids': "['zero_in_range', 'max_less_than_zero', 'low_greater_than_zero']"}), "('low,range_len', [(-1, 2), (-5, 4), (3, 2)], ids=[\n 'zero_in_range', 'max_less_than_zero', 'low_greater_than_zero'])\n", (5790, 5910), False, 'import pytest\n'), ((1261, 1273), 'numpy.max', 'np.max', (['diff'], {}), '(diff)\n', (1267, 1273), True, 'import numpy as np\n'), ((3957, 4082), 'nncf.tensorflow.quantization.quantizers.QuantizerConfig', 'QuantizerConfig', ([], {'num_bits': '(8)', 'mode': 'QuantizationMode.SYMMETRIC', 'signedness_to_force': 'signedness_to_force', 'per_channel': 'per_ch'}), '(num_bits=8, mode=QuantizationMode.SYMMETRIC,\n signedness_to_force=signedness_to_force, per_channel=per_ch)\n', (3972, 4082), False, 'from nncf.tensorflow.quantization.quantizers import QuantizerConfig\n'), ((4144, 4229), 'nncf.tensorflow.quantization.quantizers.TFQuantizerSpec.from_config', 'TFQuantizerSpec.from_config', (['qconfig'], {'narrow_range': 'narrow_range', 'half_range': '(True)'}), '(qconfig, narrow_range=narrow_range, half_range=True\n )\n', (4171, 4229), False, 'from nncf.tensorflow.quantization.quantizers import TFQuantizerSpec\n'), ((4340, 4372), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['DIM_SPLIT'], {}), '(DIM_SPLIT)\n', (4361, 4372), True, 'import tensorflow as tf\n'), ((4389, 4407), 'nncf.tensorflow.layers.wrapper.NNCFWrapper', 'NNCFWrapper', (['layer'], {}), '(layer)\n', (4400, 4407), False, 'from nncf.tensorflow.layers.wrapper import NNCFWrapper\n'), ((4432, 4476), 'nncf.tensorflow.layers.custom_objects.NNCF_QUANTIZATION_OPERATIONS.get', 'NNCF_QUANTIZATION_OPERATIONS.get', (['qspec.mode'], {}), '(qspec.mode)\n', (4464, 4476), False, 'from nncf.tensorflow.layers.custom_objects import NNCF_QUANTIZATION_OPERATIONS\n'), ((5496, 5551), 'nncf.tensorflow.quantization.utils.apply_overflow_fix_to_layer', 'apply_overflow_fix_to_layer', (['layer', '"""kernel"""', 'quantizer'], {}), "(layer, 'kernel', quantizer)\n", (5523, 5551), False, 'from nncf.tensorflow.quantization.utils import apply_overflow_fix_to_layer\n'), ((6155, 6241), 'nncf.tensorflow.quantization.quantizers.QuantizerConfig', 'QuantizerConfig', ([], {'num_bits': '(8)', 'mode': 'QuantizationMode.ASYMMETRIC', 'per_channel': 'per_ch'}), '(num_bits=8, mode=QuantizationMode.ASYMMETRIC, per_channel=\n per_ch)\n', (6170, 6241), False, 'from nncf.tensorflow.quantization.quantizers import QuantizerConfig\n'), ((6290, 6375), 'nncf.tensorflow.quantization.quantizers.TFQuantizerSpec.from_config', 'TFQuantizerSpec.from_config', (['qconfig'], {'narrow_range': 'narrow_range', 'half_range': '(True)'}), '(qconfig, narrow_range=narrow_range, half_range=True\n )\n', (6317, 6375), False, 'from nncf.tensorflow.quantization.quantizers import TFQuantizerSpec\n'), ((6486, 6518), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['DIM_SPLIT'], {}), '(DIM_SPLIT)\n', (6507, 6518), True, 'import tensorflow as tf\n'), ((6535, 6553), 'nncf.tensorflow.layers.wrapper.NNCFWrapper', 'NNCFWrapper', (['layer'], {}), '(layer)\n', (6546, 6553), False, 'from nncf.tensorflow.layers.wrapper import NNCFWrapper\n'), ((6578, 6622), 'nncf.tensorflow.layers.custom_objects.NNCF_QUANTIZATION_OPERATIONS.get', 'NNCF_QUANTIZATION_OPERATIONS.get', (['qspec.mode'], {}), '(qspec.mode)\n', (6610, 6622), False, 'from nncf.tensorflow.layers.custom_objects import NNCF_QUANTIZATION_OPERATIONS\n'), ((7633, 7688), 'nncf.tensorflow.quantization.utils.apply_overflow_fix_to_layer', 'apply_overflow_fix_to_layer', (['layer', '"""kernel"""', 'quantizer'], {}), "(layer, 'kernel', quantizer)\n", (7660, 7688), False, 'from nncf.tensorflow.quantization.utils import apply_overflow_fix_to_layer\n'), ((2130, 2181), 'nncf.tensorflow.quantization.quantizers.Quantizer._min_adj', 'Quantizer._min_adj', (['bits', 'low', 'range_', 'narrow_range'], {}), '(bits, low, range_, narrow_range)\n', (2148, 2181), False, 'from nncf.tensorflow.quantization.quantizers import Quantizer\n'), ((3065, 3113), 'tensorflow.constant', 'tf.constant', (['new_w[:DIM_SPLIT]'], {'dtype': 'tf.float32'}), '(new_w[:DIM_SPLIT], dtype=tf.float32)\n', (3076, 3113), True, 'import tensorflow as tf\n'), ((3005, 3036), 'numpy.round', 'np.round', (['(0.5 + DIM_SPLIT / 128)'], {}), '(0.5 + DIM_SPLIT / 128)\n', (3013, 3036), True, 'import numpy as np\n'), ((3194, 3250), 'numpy.linspace', 'np.linspace', (['(low - 0.5)', '(low + range_len + 0.5)', 'DIM_SPLIT'], {}), '(low - 0.5, low + range_len + 0.5, DIM_SPLIT)\n', (3205, 3250), True, 'import numpy as np\n'), ((7034, 7070), 'tensorflow.constant', 'tf.constant', (['[low]'], {'dtype': 'tf.float32'}), '([low], dtype=tf.float32)\n', (7045, 7070), True, 'import tensorflow as tf\n'), ((7127, 7169), 'tensorflow.constant', 'tf.constant', (['[range_len]'], {'dtype': 'tf.float32'}), '([range_len], dtype=tf.float32)\n', (7138, 7169), True, 'import tensorflow as tf\n'), ((1529, 1558), 'numpy.abs', 'np.abs', (['(outlayers - quant_len)'], {}), '(outlayers - quant_len)\n', (1535, 1558), True, 'import numpy as np\n'), ((5234, 5249), 'tensorflow.ones', 'tf.ones', (['(1, 1)'], {}), '((1, 1))\n', (5241, 5249), True, 'import tensorflow as tf\n'), ((5651, 5666), 'tensorflow.ones', 'tf.ones', (['(1, 1)'], {}), '((1, 1))\n', (5658, 5666), True, 'import tensorflow as tf\n'), ((7372, 7387), 'tensorflow.ones', 'tf.ones', (['(1, 1)'], {}), '((1, 1))\n', (7379, 7387), True, 'import tensorflow as tf\n'), ((7788, 7803), 'tensorflow.ones', 'tf.ones', (['(1, 1)'], {}), '((1, 1))\n', (7795, 7803), True, 'import tensorflow as tf\n'), ((2795, 2846), 'nncf.tensorflow.quantization.quantizers.Quantizer._min_adj', 'Quantizer._min_adj', (['(7)', 'low', 'range_len', 'narrow_range'], {}), '(7, low, range_len, narrow_range)\n', (2813, 2846), False, 'from nncf.tensorflow.quantization.quantizers import Quantizer\n'), ((1824, 1844), 'numpy.where', 'np.where', (['(diff > eps)'], {}), '(diff > eps)\n', (1832, 1844), True, 'import numpy as np\n'), ((5393, 5415), 'numpy.abs', 'np.abs', (['(w_int7 - new_w)'], {}), '(w_int7 - new_w)\n', (5399, 5415), True, 'import numpy as np\n'), ((7531, 7553), 'numpy.abs', 'np.abs', (['(w_int7 - new_w)'], {}), '(w_int7 - new_w)\n', (7537, 7553), True, 'import numpy as np\n')]
|
#!/usr/bin/python3
import sys
import json
import os.path
if len(sys.argv) != 2:
print("Usage: python3 export_users.py <db-path>")
sys.exit(1)
db_path = sys.argv[1]
with open(os.path.join(db_path, 'users.json')) as f:
users = json.loads(f.read())
for user in users:
print(f"{user['username']},{user['password']}")
|
[
"sys.exit"
] |
[((140, 151), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (148, 151), False, 'import sys\n')]
|
# Copyright (c) 2017-2019 Uber Technologies, Inc.
# SPDX-License-Identifier: Apache-2.0
import functools
from pyro.params.param_store import ( # noqa: F401
_MODULE_NAMESPACE_DIVIDER,
ParamStoreDict,
)
# the global pyro stack
_PYRO_STACK = []
# the global ParamStore
_PYRO_PARAM_STORE = ParamStoreDict()
class _DimAllocator:
"""
Dimension allocator for internal use by :class:`plate`.
There is a single global instance.
Note that dimensions are indexed from the right, e.g. -1, -2.
"""
def __init__(self):
self._stack = [] # in reverse orientation of log_prob.shape
def allocate(self, name, dim):
"""
Allocate a dimension to an :class:`plate` with given name.
Dim should be either None for automatic allocation or a negative
integer for manual allocation.
"""
if name in self._stack:
raise ValueError('duplicate plate "{}"'.format(name))
if dim is None:
# Automatically designate the rightmost available dim for allocation.
dim = -1
while -dim <= len(self._stack) and self._stack[-1 - dim] is not None:
dim -= 1
elif dim >= 0:
raise ValueError('Expected dim < 0 to index from the right, actual {}'.format(dim))
# Allocate the requested dimension.
while dim < -len(self._stack):
self._stack.append(None)
if self._stack[-1 - dim] is not None:
raise ValueError('\n'.join([
'at plates "{}" and "{}", collide at dim={}'.format(name, self._stack[-1 - dim], dim),
'\nTry moving the dim of one plate to the left, e.g. dim={}'.format(dim - 1)]))
self._stack[-1 - dim] = name
return dim
def free(self, name, dim):
"""
Free a dimension.
"""
free_idx = -1 - dim # stack index to free
assert self._stack[free_idx] == name
self._stack[free_idx] = None
while self._stack and self._stack[-1] is None:
self._stack.pop()
# Handles placement of plate dimensions
_DIM_ALLOCATOR = _DimAllocator()
class _EnumAllocator:
"""
Dimension allocator for internal use by :func:`~pyro.poutine.markov`.
There is a single global instance.
Note that dimensions are indexed from the right, e.g. -1, -2.
Note that ids are simply nonnegative integers here.
"""
def set_first_available_dim(self, first_available_dim):
"""
Set the first available dim, which should be to the left of all
:class:`plate` dimensions, e.g. ``-1 - max_plate_nesting``. This should
be called once per program. In SVI this should be called only once per
(guide,model) pair.
"""
assert first_available_dim < 0, first_available_dim
self.next_available_dim = first_available_dim
self.next_available_id = 0
self.dim_to_id = {} # only the global ids
def allocate(self, scope_dims=None):
"""
Allocate a new recyclable dim and a unique id.
If ``scope_dims`` is None, this allocates a global enumeration dim
that will never be recycled. If ``scope_dims`` is specified, this
allocates a local enumeration dim that can be reused by at any other
local site whose scope excludes this site.
:param set scope_dims: An optional set of (negative integer)
local enumeration dims to avoid when allocating this dim.
:return: A pair ``(dim, id)``, where ``dim`` is a negative integer
and ``id`` is a nonnegative integer.
:rtype: tuple
"""
id_ = self.next_available_id
self.next_available_id += 1
dim = self.next_available_dim
if dim == -float('inf'):
raise ValueError("max_plate_nesting must be set to a finite value for parallel enumeration")
if scope_dims is None:
# allocate a new global dimension
self.next_available_dim -= 1
self.dim_to_id[dim] = id_
else:
# allocate a new local dimension
while dim in scope_dims:
dim -= 1
return dim, id_
# Handles placement of enumeration dimensions
_ENUM_ALLOCATOR = _EnumAllocator()
class NonlocalExit(Exception):
"""
Exception for exiting nonlocally from poutine execution.
Used by poutine.EscapeMessenger to return site information.
"""
def __init__(self, site, *args, **kwargs):
"""
:param site: message at a pyro site constructor.
Just stores the input site.
"""
super().__init__(*args, **kwargs)
self.site = site
def reset_stack(self):
"""
Reset the state of the frames remaining in the stack.
Necessary for multiple re-executions in poutine.queue.
"""
for frame in reversed(_PYRO_STACK):
frame._reset()
if type(frame).__name__ == "BlockMessenger" and frame.hide_fn(self.site):
break
def default_process_message(msg):
"""
Default method for processing messages in inference.
:param msg: a message to be processed
:returns: None
"""
if msg["done"] or msg["is_observed"] or msg["value"] is not None:
msg["done"] = True
return msg
msg["value"] = msg["fn"](*msg["args"], **msg["kwargs"])
# after fn has been called, update msg to prevent it from being called again.
msg["done"] = True
def apply_stack(initial_msg):
"""
Execute the effect stack at a single site according to the following scheme:
1. For each ``Messenger`` in the stack from bottom to top,
execute ``Messenger._process_message`` with the message;
if the message field "stop" is True, stop;
otherwise, continue
2. Apply default behavior (``default_process_message``) to finish remaining site execution
3. For each ``Messenger`` in the stack from top to bottom,
execute ``_postprocess_message`` to update the message and internal messenger state with the site results
4. If the message field "continuation" is not ``None``, call it with the message
:param dict initial_msg: the starting version of the trace site
:returns: ``None``
"""
stack = _PYRO_STACK
# TODO check at runtime if stack is valid
# msg is used to pass information up and down the stack
msg = initial_msg
pointer = 0
# go until time to stop?
for frame in reversed(stack):
pointer = pointer + 1
frame._process_message(msg)
if msg["stop"]:
break
default_process_message(msg)
for frame in stack[-pointer:]:
frame._postprocess_message(msg)
cont = msg["continuation"]
if cont is not None:
cont(msg)
return None
def am_i_wrapped():
"""
Checks whether the current computation is wrapped in a poutine.
:returns: bool
"""
return len(_PYRO_STACK) > 0
def effectful(fn=None, type=None):
"""
:param fn: function or callable that performs an effectful computation
:param str type: the type label of the operation, e.g. `"sample"`
Wrapper for calling :func:`~pyro.poutine.runtime.apply_stack` to apply any active effects.
"""
if fn is None:
return functools.partial(effectful, type=type)
if getattr(fn, "_is_effectful", None):
return fn
assert type is not None, "must provide a type label for operation {}".format(fn)
assert type != "message", "cannot use 'message' as keyword"
@functools.wraps(fn)
def _fn(*args, **kwargs):
name = kwargs.pop("name", None)
infer = kwargs.pop("infer", {})
value = kwargs.pop("obs", None)
is_observed = value is not None
if not am_i_wrapped():
return fn(*args, **kwargs)
else:
msg = {
"type": type,
"name": name,
"fn": fn,
"is_observed": is_observed,
"args": args,
"kwargs": kwargs,
"value": value,
"scale": 1.0,
"mask": None,
"cond_indep_stack": (),
"done": False,
"stop": False,
"continuation": None,
"infer": infer,
}
# apply the stack and return its return value
apply_stack(msg)
return msg["value"]
_fn._is_effectful = True
return _fn
def _inspect():
"""
EXPERIMENTAL Inspect the Pyro stack.
.. warning:: The format of the returned message may change at any time and
does not guarantee backwards compatibility.
:returns: A message with all effects applied.
:rtype: dict
"""
msg = {
"type": "inspect",
"name": "_pyro_inspect",
"fn": lambda: True,
"is_observed": False,
"args": (),
"kwargs": {},
"value": None,
"infer": {"_do_not_trace": True},
"scale": 1.0,
"mask": None,
"cond_indep_stack": (),
"done": False,
"stop": False,
"continuation": None,
}
apply_stack(msg)
return msg
def get_mask():
"""
Records the effects of enclosing ``poutine.mask`` handlers.
This is useful for avoiding expensive ``pyro.factor()`` computations during
prediction, when the log density need not be computed, e.g.::
def model():
# ...
if poutine.get_mask() is not False:
log_density = my_expensive_computation()
pyro.factor("foo", log_density)
# ...
:returns: The mask.
:rtype: None, bool, or torch.Tensor
"""
return _inspect()["mask"]
|
[
"functools.partial",
"pyro.params.param_store.ParamStoreDict",
"functools.wraps"
] |
[((299, 315), 'pyro.params.param_store.ParamStoreDict', 'ParamStoreDict', ([], {}), '()\n', (313, 315), False, 'from pyro.params.param_store import _MODULE_NAMESPACE_DIVIDER, ParamStoreDict\n'), ((7604, 7623), 'functools.wraps', 'functools.wraps', (['fn'], {}), '(fn)\n', (7619, 7623), False, 'import functools\n'), ((7346, 7385), 'functools.partial', 'functools.partial', (['effectful'], {'type': 'type'}), '(effectful, type=type)\n', (7363, 7385), False, 'import functools\n')]
|
import datetime
from django.db import models
from django.contrib.auth.models import User
from applications.globals.models import ExtraInfo, Staff, Faculty
from applications.academic_information.models import Student
from django.utils import timezone
class HostelManagementConstants:
ROOM_STATUS = (
('Booked', 'Booked'),
('CheckedIn', 'Checked In'),
('Available', 'Available'),
('UnderMaintenance', 'Under Maintenance'),
)
DAYS_OF_WEEK = (
(0, 'Monday'),
(1, 'Tuesday'),
(2, 'Wednesday'),
(3, 'Thursday'),
(4, 'Friday'),
(5, 'Saturday'),
(6, 'Sunday')
)
BOOKING_STATUS = (
("Confirmed" , 'Confirmed'),
("Pending" , 'Pending'),
("Rejected" , 'Rejected'),
("Canceled" , 'Canceled'),
("CancelRequested" , 'Cancel Requested'),
("CheckedIn" , 'Checked In'),
("Complete", 'Complete'),
("Forward", 'Forward')
)
class Hall(models.Model):
hall_id = models.CharField(max_length=10)
hall_name = models.CharField(max_length=50)
max_accomodation = models.IntegerField(default=0)
number_students = models.PositiveIntegerField(default=0)
def __str__(self):
return self.hall_id
class HallCaretaker(models.Model):
hall = models.ForeignKey(Hall, on_delete=models.CASCADE)
staff = models.ForeignKey(Staff, on_delete=models.CASCADE)
def __str__(self):
return self.hall + self.staff
class HallWarden(models.Model):
hall = models.ForeignKey(Hall, on_delete=models.CASCADE)
faculty = models.ForeignKey(Faculty, on_delete=models.CASCADE)
def __str__(self):
return self.hall + self.faculty
class GuestRoomDetail(models.Model):
hall = models.ForeignKey(Hall, on_delete=models.CASCADE)
room_no = models.CharField(max_length=4, unique=True)
room_status = models.CharField(max_length=20, choices=HostelManagementConstants.ROOM_STATUS, default='Available')
def __str__(self):
return self.room_no
class GuestRoomBooking(models.Model):
hall = models.ForeignKey(Hall, on_delete=models.CASCADE)
intender = models.ForeignKey(ExtraInfo, on_delete=models.CASCADE)
guest_name = models.CharField(max_length=100)
guest_phone = models.CharField(max_length=15)
guest_email = models.CharField(max_length=40, blank=True)
guest_address = models.TextField(blank=True)
rooms_required = models.IntegerField(default=1,null=True,blank=True)
guest_room_id = models.ManyToManyField(GuestRoomDetail)
total_guest = models.IntegerField(default=1)
purpose = models.TextField()
arrival_date = models.DateField(auto_now_add=False, auto_now=False)
arrival_time = models.TimeField(auto_now_add=False, auto_now=False)
departure_date = models.DateField(auto_now_add=False, auto_now=False)
departure_time = models.TimeField(auto_now_add=False, auto_now=False)
status = models.CharField(max_length=15, choices=HostelManagementConstants.BOOKING_STATUS ,default ="Pending")
booking_date = models.DateField(auto_now_add=False, auto_now=False, default=timezone.now)
nationality = models.CharField(max_length=20, blank=True)
def __str__(self):
return '%s ----> %s - %s' % (self.id, self.guest_id, self.status)
class StaffSchedule(models.Model):
hall = models.ForeignKey(Hall, on_delete=models.CASCADE)
staff_id = models.ForeignKey(Staff, on_delete=models.ForeignKey)
day = models.IntegerField(choices=HostelManagementConstants.DAYS_OF_WEEK)
start_time = models.TimeField(null=True,blank=True)
end_time = models.TimeField(null=True,blank=True)
def __str__(self):
return str(self.staff_id) + str(self.start_time) + '->' + str(self.end_time)
class HostelNoticeBoard(models.Model):
hall = models.ForeignKey(Hall, on_delete=models.CASCADE)
posted_by = models.ForeignKey(ExtraInfo, on_delete=models.ForeignKey)
head_line = models.CharField(max_length=100)
content = models.FileField(upload_to='hostel_management/', blank=True, null=True)
description = models.TextField(blank=True)
def __str__(self):
return self.head_line
class HostelStudentAttendence(models.Model):
hall = models.ForeignKey(Hall, on_delete=models.CASCADE)
student_id = models.ForeignKey(Student, on_delete=models.CASCADE)
date = models.DateField()
present = models.BooleanField()
def __str__(self):
return str(self.student_id) + '->' + str(self.date) + '-' + str(self.present)
|
[
"django.db.models.FileField",
"django.db.models.TextField",
"django.db.models.ManyToManyField",
"django.db.models.TimeField",
"django.db.models.CharField",
"django.db.models.ForeignKey",
"django.db.models.PositiveIntegerField",
"django.db.models.BooleanField",
"django.db.models.IntegerField",
"django.db.models.DateField"
] |
[((1034, 1065), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(10)'}), '(max_length=10)\n', (1050, 1065), False, 'from django.db import models\n'), ((1082, 1113), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(50)'}), '(max_length=50)\n', (1098, 1113), False, 'from django.db import models\n'), ((1137, 1167), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'default': '(0)'}), '(default=0)\n', (1156, 1167), False, 'from django.db import models\n'), ((1190, 1228), 'django.db.models.PositiveIntegerField', 'models.PositiveIntegerField', ([], {'default': '(0)'}), '(default=0)\n', (1217, 1228), False, 'from django.db import models\n'), ((1330, 1379), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Hall'], {'on_delete': 'models.CASCADE'}), '(Hall, on_delete=models.CASCADE)\n', (1347, 1379), False, 'from django.db import models\n'), ((1392, 1442), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Staff'], {'on_delete': 'models.CASCADE'}), '(Staff, on_delete=models.CASCADE)\n', (1409, 1442), False, 'from django.db import models\n'), ((1550, 1599), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Hall'], {'on_delete': 'models.CASCADE'}), '(Hall, on_delete=models.CASCADE)\n', (1567, 1599), False, 'from django.db import models\n'), ((1614, 1666), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Faculty'], {'on_delete': 'models.CASCADE'}), '(Faculty, on_delete=models.CASCADE)\n', (1631, 1666), False, 'from django.db import models\n'), ((1785, 1834), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Hall'], {'on_delete': 'models.CASCADE'}), '(Hall, on_delete=models.CASCADE)\n', (1802, 1834), False, 'from django.db import models\n'), ((1849, 1892), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(4)', 'unique': '(True)'}), '(max_length=4, unique=True)\n', (1865, 1892), False, 'from django.db import models\n'), ((1912, 2016), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(20)', 'choices': 'HostelManagementConstants.ROOM_STATUS', 'default': '"""Available"""'}), "(max_length=20, choices=HostelManagementConstants.\n ROOM_STATUS, default='Available')\n", (1928, 2016), False, 'from django.db import models\n'), ((2115, 2164), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Hall'], {'on_delete': 'models.CASCADE'}), '(Hall, on_delete=models.CASCADE)\n', (2132, 2164), False, 'from django.db import models\n'), ((2180, 2234), 'django.db.models.ForeignKey', 'models.ForeignKey', (['ExtraInfo'], {'on_delete': 'models.CASCADE'}), '(ExtraInfo, on_delete=models.CASCADE)\n', (2197, 2234), False, 'from django.db import models\n'), ((2252, 2284), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)'}), '(max_length=100)\n', (2268, 2284), False, 'from django.db import models\n'), ((2303, 2334), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(15)'}), '(max_length=15)\n', (2319, 2334), False, 'from django.db import models\n'), ((2353, 2396), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(40)', 'blank': '(True)'}), '(max_length=40, blank=True)\n', (2369, 2396), False, 'from django.db import models\n'), ((2417, 2445), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)'}), '(blank=True)\n', (2433, 2445), False, 'from django.db import models\n'), ((2468, 2521), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'default': '(1)', 'null': '(True)', 'blank': '(True)'}), '(default=1, null=True, blank=True)\n', (2487, 2521), False, 'from django.db import models\n'), ((2540, 2579), 'django.db.models.ManyToManyField', 'models.ManyToManyField', (['GuestRoomDetail'], {}), '(GuestRoomDetail)\n', (2562, 2579), False, 'from django.db import models\n'), ((2598, 2628), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'default': '(1)'}), '(default=1)\n', (2617, 2628), False, 'from django.db import models\n'), ((2643, 2661), 'django.db.models.TextField', 'models.TextField', ([], {}), '()\n', (2659, 2661), False, 'from django.db import models\n'), ((2681, 2733), 'django.db.models.DateField', 'models.DateField', ([], {'auto_now_add': '(False)', 'auto_now': '(False)'}), '(auto_now_add=False, auto_now=False)\n', (2697, 2733), False, 'from django.db import models\n'), ((2753, 2805), 'django.db.models.TimeField', 'models.TimeField', ([], {'auto_now_add': '(False)', 'auto_now': '(False)'}), '(auto_now_add=False, auto_now=False)\n', (2769, 2805), False, 'from django.db import models\n'), ((2827, 2879), 'django.db.models.DateField', 'models.DateField', ([], {'auto_now_add': '(False)', 'auto_now': '(False)'}), '(auto_now_add=False, auto_now=False)\n', (2843, 2879), False, 'from django.db import models\n'), ((2901, 2953), 'django.db.models.TimeField', 'models.TimeField', ([], {'auto_now_add': '(False)', 'auto_now': '(False)'}), '(auto_now_add=False, auto_now=False)\n', (2917, 2953), False, 'from django.db import models\n'), ((2967, 3072), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(15)', 'choices': 'HostelManagementConstants.BOOKING_STATUS', 'default': '"""Pending"""'}), "(max_length=15, choices=HostelManagementConstants.\n BOOKING_STATUS, default='Pending')\n", (2983, 3072), False, 'from django.db import models\n'), ((3088, 3162), 'django.db.models.DateField', 'models.DateField', ([], {'auto_now_add': '(False)', 'auto_now': '(False)', 'default': 'timezone.now'}), '(auto_now_add=False, auto_now=False, default=timezone.now)\n', (3104, 3162), False, 'from django.db import models\n'), ((3181, 3224), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(20)', 'blank': '(True)'}), '(max_length=20, blank=True)\n', (3197, 3224), False, 'from django.db import models\n'), ((3375, 3424), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Hall'], {'on_delete': 'models.CASCADE'}), '(Hall, on_delete=models.CASCADE)\n', (3392, 3424), False, 'from django.db import models\n'), ((3440, 3493), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Staff'], {'on_delete': 'models.ForeignKey'}), '(Staff, on_delete=models.ForeignKey)\n', (3457, 3493), False, 'from django.db import models\n'), ((3504, 3571), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'choices': 'HostelManagementConstants.DAYS_OF_WEEK'}), '(choices=HostelManagementConstants.DAYS_OF_WEEK)\n', (3523, 3571), False, 'from django.db import models\n'), ((3589, 3628), 'django.db.models.TimeField', 'models.TimeField', ([], {'null': '(True)', 'blank': '(True)'}), '(null=True, blank=True)\n', (3605, 3628), False, 'from django.db import models\n'), ((3643, 3682), 'django.db.models.TimeField', 'models.TimeField', ([], {'null': '(True)', 'blank': '(True)'}), '(null=True, blank=True)\n', (3659, 3682), False, 'from django.db import models\n'), ((3847, 3896), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Hall'], {'on_delete': 'models.CASCADE'}), '(Hall, on_delete=models.CASCADE)\n', (3864, 3896), False, 'from django.db import models\n'), ((3913, 3970), 'django.db.models.ForeignKey', 'models.ForeignKey', (['ExtraInfo'], {'on_delete': 'models.ForeignKey'}), '(ExtraInfo, on_delete=models.ForeignKey)\n', (3930, 3970), False, 'from django.db import models\n'), ((3987, 4019), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)'}), '(max_length=100)\n', (4003, 4019), False, 'from django.db import models\n'), ((4034, 4105), 'django.db.models.FileField', 'models.FileField', ([], {'upload_to': '"""hostel_management/"""', 'blank': '(True)', 'null': '(True)'}), "(upload_to='hostel_management/', blank=True, null=True)\n", (4050, 4105), False, 'from django.db import models\n'), ((4124, 4152), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)'}), '(blank=True)\n', (4140, 4152), False, 'from django.db import models\n'), ((4264, 4313), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Hall'], {'on_delete': 'models.CASCADE'}), '(Hall, on_delete=models.CASCADE)\n', (4281, 4313), False, 'from django.db import models\n'), ((4331, 4383), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Student'], {'on_delete': 'models.CASCADE'}), '(Student, on_delete=models.CASCADE)\n', (4348, 4383), False, 'from django.db import models\n'), ((4395, 4413), 'django.db.models.DateField', 'models.DateField', ([], {}), '()\n', (4411, 4413), False, 'from django.db import models\n'), ((4428, 4449), 'django.db.models.BooleanField', 'models.BooleanField', ([], {}), '()\n', (4447, 4449), False, 'from django.db import models\n')]
|
from pprint import pprint
import numpy as np
from collections import Counter
import itertools, copy
from more_itertools import split_before
import os, json, traceback, time, warnings, shutil, sys
import multiprocessing
from miditoolkit.midi.parser import MidiFile
from miditoolkit.midi.containers import Instrument
from miditoolkit.midi.containers import Note as mtkNote
from chorder import Dechorder
import sys, os
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from encoding import pit2str, pos2str, bom2str, dur2str, trk2str, ins2str, pit2alphabet
WORKERS=32
def measure_calc_chord(evt_seq):
assert evt_seq[0][1] == 'BOM', "wrong measure for chord"
bom_tick = evt_seq[0][0]
ts = min(evt_seq[0][-1], 8)
chroma = Counter()
mtknotes = []
for evt in evt_seq[1:-1]:
# if evt[1] != 'ON':
# print(evt)
# print(evt_seq[-1])
assert evt[1] == 'ON', "wrong measure for chord: " + evt[1] + evt_seq[-1][1]
if evt[3] == 128: # exclude drums
continue
o, p, d = evt[0] - bom_tick, evt[2], evt[-1]
if p < 21 or p > 108: # exclude unusual pitch
continue
if o < 8:
note = mtkNote(60, p, o, o+d if o > 0 else 8)
mtknotes.append(note)
else:
break
# if o == 0:
# d = d * 10
# if ts == 32 and o == 16:
# d = d * 3
# chroma[p%12] += d
chord, score = Dechorder.get_chord_quality(mtknotes, start=0, end=ts)
if score < 0:
return [bom_tick, 'CHR', None, None, None, None, 'NA']
return [bom_tick, 'CHR', None, None, None, None, pit2alphabet[chord.root_pc] + (chord.quality if chord.quality != '7' else 'D7')]
# intervals = [
# [4,3],
# [3,4],
# [4,4],
# [3,3]
# ]
# scores = []
# for rid, root in enumerate(pit2alphabet):
# for qid, quality in enumerate(['M', 'm', 'A', 'd']): # Major, minor, augment, diminish
# root_pitch = rid
# third_pitch = (rid + intervals[qid][0]) % 12
# fifth_pitch = (third_pitch + intervals[qid][1]) % 12
# a, b = (fifth_pitch+3)%12, (fifth_pitch+4)%12
# score = chroma[root_pitch] * 3 + chroma[third_pitch] * 2.5 + chroma[fifth_pitch] * 2 + ((chroma[a]+chroma[b])/2.0 if qid < 2 else 0)
# if qid < 2:
# score = score * 1.3
# scores.append((root+quality, score))
# scores.sort(key=lambda x: (-x[1], pit2alphabet.index(x[0][0])))
# #print(chroma)
# #print(scores)
# for k, _ in itertools.groupby(scores, key=lambda x:x[1]):
# #print(k)
# candidates = []
# for x in _:
# candidates.append(x)
# len_c = sum(1 for x in candidates)
# if len_c > 2:
# return [bom_tick, 'CHR', None, None, None, None, 'NA']
# if len_c == 2 and candidates[0][0][0] == candidates[1][0][0]:
# return [bom_tick, 'CHR', None, None, None, None, candidates[0][0][0] + candidates[1][0][0]]
# return [bom_tick, 'CHR', None, None, None, None,''.join([x[0] for x in candidates])]
# for item in candidates:
# print(item)
# #print(candidates)
# break
#assert False, "program end"
def merge_drums(p_midi): # merge all percussions
drum_0_lst = []
new_instruments= []
for instrument in p_midi.instruments:
if not len(instrument.notes) == 0:
# --------------------
if instrument.is_drum:
for note in instrument.notes:
drum_0_lst.append(note)
else:
new_instruments.append(instrument)
if len(drum_0_lst) > 0:
drum_0_lst.sort(key=lambda x: x.start)
# remove duplicate
drum_0_lst = list(k for k, _ in itertools.groupby(drum_0_lst))
drum_0_instrument = Instrument(program=0, is_drum=True, name="drum")
drum_0_instrument.notes = drum_0_lst
new_instruments.append(drum_0_instrument)
p_midi.instruments = new_instruments
def merge_sparse_track(p_midi, CANDI_THRES=50, MIN_THRES=5): # merge track has too less notes
good_instruments = []
bad_instruments = []
good_instruments_idx = []
for instrument in p_midi.instruments:
if len(instrument.notes) < CANDI_THRES:
bad_instruments.append(instrument)
else:
good_instruments.append(instrument)
good_instruments_idx.append((instrument.program, instrument.is_drum))
for bad_instrument in bad_instruments:
if (bad_instrument.program, bad_instrument.is_drum) in good_instruments_idx:
# find one track to merge
for instrument in good_instruments:
if bad_instrument.program == instrument.program and \
bad_instrument.is_drum == instrument.is_drum:
instrument.notes.extend(bad_instrument.notes)
break
# no track to merge
else:
if len(bad_instrument.notes) > MIN_THRES:
good_instruments.append(bad_instrument)
p_midi.instruments = good_instruments
def limit_max_track(p_midi, MAX_TRACK=40): # merge track with least notes and limit the maximum amount of track to 40
good_instruments = p_midi.instruments
good_instruments.sort(key=lambda x: (not x.is_drum, -len(x.notes))) # place drum track or the most note track at first
assert good_instruments[0].is_drum == True or len(good_instruments[0].notes) >= len(good_instruments[1].notes), tuple(len(x.notes) for x in good_instruments[:3])
#assert good_instruments[0].is_drum == False, (, len(good_instruments[2]))
track_idx_lst = list(range(len(good_instruments)))
if len(good_instruments) > MAX_TRACK:
new_good_instruments = copy.deepcopy(good_instruments[:MAX_TRACK])
#print(midi_file_path)
for id in track_idx_lst[MAX_TRACK:]:
cur_ins = good_instruments[id]
merged = False
new_good_instruments.sort(key=lambda x: len(x.notes))
for nid, ins in enumerate(new_good_instruments):
if cur_ins.program == ins.program and cur_ins.is_drum == ins.is_drum:
new_good_instruments[nid].notes.extend(cur_ins.notes)
merged = True
break
if not merged:
pass#print('Track {:d} deprecated, program {:d}, note count {:d}'.format(id, cur_ins.program, len(cur_ins.notes)))
good_instruments = new_good_instruments
#print(trks, probs, chosen)
assert len(good_instruments) <= MAX_TRACK, len(good_instruments)
for idx, good_instrument in enumerate(good_instruments):
if good_instrument.is_drum:
good_instruments[idx].program = 128
good_instruments[idx].is_drum = False
# for i, note in enumerate(good_instruments.notes):
# good_instruments.notes[i].pitch += 128
p_midi.instruments = good_instruments
def get_init_note_events(p_midi): # extract all notes in midi file
note_events, note_on_ticks, note_dur_lst = [], [], []
for track_idx, instrument in enumerate(p_midi.instruments):
#track_idx_lst.append(track_idx)
for note in instrument.notes:
note_dur = note.end - note.start
# special case: note_dur too long
max_dur = 4 * p_midi.ticks_per_beat
if note_dur / max_dur > 1:
total_dur = note_dur
start = note.start
while total_dur != 0:
if total_dur > max_dur:
note_events.extend([[start, "ON", note.pitch, instrument.program,
instrument.is_drum, track_idx, max_dur]])
note_on_ticks.append(start)
note_dur_lst.append(max_dur)
start += max_dur
total_dur -= max_dur
else:
note_events.extend([[start, "ON", note.pitch, instrument.program,
instrument.is_drum, track_idx, total_dur]])
note_on_ticks.append(start)
note_dur_lst.append(total_dur)
total_dur = 0
else:
note_events.extend([[note.start, "ON", note.pitch, instrument.program, instrument.is_drum, track_idx, note_dur]])
# for score analysis and beat estimating when score has no time signature
note_on_ticks.append(note.start)
note_dur_lst.append(note.end - note.start)
note_events.sort(key=lambda x: (x[0], x[1] == "ON", x[5], x[4], x[3], x[2], x[-1]))
note_events = list(k for k, _ in itertools.groupby(note_events))
return note_events, note_on_ticks, note_dur_lst
def calculate_measure(p_midi, first_event_tick, last_event_tick): # calculate measures and append measure symbol to event_seq
measure_events = []
time_signature_changes = p_midi.time_signature_changes
if not time_signature_changes: # no time_signature_changes, estimate it
raise AssertionError("No time_signature_changes")
else:
if time_signature_changes[0].time != 0 and \
time_signature_changes[0].time > first_event_tick:
raise AssertionError("First time signature start with None zero tick")
# clean duplicate time_signature_changes
temp_sig = []
for idx, time_sig in enumerate(time_signature_changes):
if idx == 0:
temp_sig.append(time_sig)
else:
previous_timg_sig = time_signature_changes[idx - 1]
if not (previous_timg_sig.numerator == time_sig.numerator
and previous_timg_sig.denominator == time_sig.denominator):
temp_sig.append(time_sig)
time_signature_changes = temp_sig
# print("time_signature_changes", time_signature_changes)
for idx in range(len(time_signature_changes)):
# calculate measures, eg: how many ticks per measure
numerator = time_signature_changes[idx].numerator
denominator = time_signature_changes[idx].denominator
ticks_per_measure = p_midi.ticks_per_beat * (4 / denominator) * numerator
cur_tick = time_signature_changes[idx].time
if idx < len(time_signature_changes) - 1:
next_tick = time_signature_changes[idx + 1].time
else:
next_tick = last_event_tick + int(ticks_per_measure)
if ticks_per_measure.is_integer():
for measure_start_tick in range(cur_tick, next_tick, int(ticks_per_measure)):
if measure_start_tick + int(ticks_per_measure) > next_tick:
measure_events.append([measure_start_tick, "BOM", None, None, None, None, 0])
measure_events.append([next_tick, "EOM", None, None, None, None, 0])
else:
measure_events.append([measure_start_tick, "BOM", None, None, None, None, 0])
measure_events.append([measure_start_tick + int(ticks_per_measure), "EOM", None, None, None, None, 0])
else:
assert False, "ticks_per_measure Error"
return measure_events
def quantize_by_nth(nth_tick, note_events):
# Eg. Quantize by 32th note
half = nth_tick / 2
split_score = list(split_before(note_events, lambda x: x[1] == "BOM"))
measure_durs = []
eom_tick = 0
for measure_id, measure in enumerate(split_score):
bom_tick = measure[0][0]
assert bom_tick == eom_tick, 'measure time error {bom_tick} {eom_tick}'
eom_tick = measure[-1][0]
mea_dur = eom_tick - bom_tick
if mea_dur < nth_tick: # measure duration need to be quantized
measure_durs.append(1)
else:
if mea_dur % nth_tick < half: # quantize to left
measure_durs.append(mea_dur // nth_tick)
else:
measure_durs.append(mea_dur // nth_tick + 1)
for evt in measure[1:-1]:
assert evt[1] == 'ON', f'measure structure error {evt[1]}'
rel_tick = evt[0] - bom_tick
if rel_tick % nth_tick <= half:
rel_tick = min(rel_tick // nth_tick, measure_durs[-1] - 1)
else:
rel_tick = min(rel_tick // nth_tick + 1, measure_durs[-1] - 1)
evt[0] = rel_tick
final_events = []
lasteom = 0
for measure_id, measure in enumerate(split_score):
measure[0][0] = lasteom
measure[-1][0] = measure[0][0] + measure_durs[measure_id]
lasteom = measure[-1][0]
for event in measure[1:-1]:
event[0] += measure[0][0]
if event[-1] < nth_tick: # duration too short, quantize to 1
event[-1] = 1
else:
if event[-1] % nth_tick <= half:
event[-1] = event[-1] // nth_tick
else:
event[-1] = event[-1] // nth_tick + 1
final_events.extend(measure)
return final_events
def prettify(note_events, ticks_per_beat):
fist_event_idx = next(i for i in (range(len(note_events))) if note_events[i][1] == "ON")
last_event_idx = next(i for i in reversed(range(len(note_events))) if note_events[i][1] == "ON")
assert note_events[fist_event_idx - 1][1] == "BOM", "measure_start Error"
assert note_events[last_event_idx + 1][1] == "EOM", "measure_end Error"
# remove invalid measures on both sides
note_events = note_events[fist_event_idx - 1: last_event_idx + 2]
# check again
assert note_events[0][1] == "BOM", "measure_start Error"
assert note_events[-1][1] == "EOM", "measure_end Error"
# -------------- zero start tick -----------------
start_tick = note_events[0][0]
if start_tick != 0:
for event in note_events:
event[0] -= start_tick
from fractions import Fraction
ticks_32th = Fraction(ticks_per_beat, 8)
note_events = quantize_by_nth(ticks_32th, note_events)
note_events.sort(key=lambda x: (x[0], x[1] == "ON", x[1] == "BOM", x[1] == "EOM",
x[5], x[4], x[3], x[2], x[-1]))
note_events = list(k for k, _ in itertools.groupby(note_events))
# -------------------------check measure duration----------------------------------------------
note_events.sort(key=lambda x: (x[0], x[1] == "ON", x[1] == "BOM", x[1] == "EOM",
x[5], x[4], x[3], x[2], x[-1]))
split_score = list(split_before(note_events, lambda x: x[1] == "BOM"))
check_measure_dur = [0]
for measure_idx, measure in enumerate(split_score):
first_tick = measure[0][0]
last_tick = measure[-1][0]
measure_dur = last_tick - first_tick
if measure_dur > 100:
raise AssertionError("Measure duration error")
split_score[measure_idx][0][-1] = measure_dur
if measure_dur in check_measure_dur:
#print(measure_dur)
raise AssertionError("Measure duration error")
return split_score
def get_pos_and_cc(split_score):
new_event_seq = []
for measure_idx, measure in enumerate(split_score):
measure.sort(key=lambda x: (x[1] == "EOM", x[1] == "ON", x[1] == 'CHR', x[1] == "BOM", x[-2]))
bom_tick = measure[0][0]
# split measure by track
track_nmb = set(map(lambda x: x[-2], measure[2:-1]))
tracks = [[y for y in measure if y[-2] == x] for x in track_nmb]
# ---------- calculate POS for each track / add CC
new_measure = []
for track_idx, track in enumerate(tracks):
pos_lst = []
trk_abs_num = -1
for event in track:
if event[1] == "ON":
assert trk_abs_num == -1 or trk_abs_num == event[-2], "Error: found inconsistent trackid within same track"
trk_abs_num = event[-2]
mypos = event[0] - bom_tick
pos_lst.append(mypos)
pos_lst = list(set(pos_lst))
for pos in pos_lst:
tracks[track_idx].append([pos + bom_tick, "POS", None, None, None, None, pos])
tracks[track_idx].insert(0, [bom_tick, "CC", None, None, None, None, trk_abs_num])
tracks[track_idx].sort(key=lambda x: (x[0], x[1] == "ON", x[1] == "POS", x[1] == "CC", x[5], x[4], x[3], x[2]))
new_measure.append(measure[0])
new_measure.append(measure[1])
for track in tracks:
for idx, event in enumerate(track):
new_measure.append(event)
# new_measure = new_measure[:-1]
#new_measure.append(measure[-1])
new_event_seq.extend(new_measure)
return new_event_seq
def event_seq_to_str(new_event_seq):
char_events = []
chord_cnt = Counter()
for evt in new_event_seq:
if evt[1] == 'ON':
char_events.append(pit2str(evt[2])) # pitch
char_events.append(dur2str(evt[-1])) # duration
char_events.append(trk2str(evt[-2])) # track
char_events.append(ins2str(evt[3])) # instrument
#dur = evt[-1]
elif evt[1] == 'POS':
char_events.append(pos2str(evt[-1])) # type (time position)
char_events.append('RZ')
char_events.append('TZ')
char_events.append('YZ')
#onset = evt[-1]
elif evt[1] == 'BOM':
char_events.append(bom2str(evt[-1]))
char_events.append('RZ')
char_events.append('TZ')
char_events.append('YZ')
#mea_len = evt[-1]
elif evt[1] == 'CC':
char_events.append('NT')
char_events.append('RZ')
char_events.append('TZ')
char_events.append('YZ')
elif evt[1] == 'CHR':
#print(evt[-1])
chord_cnt[evt[-1]] += 1
char_events.append('H'+evt[-1])
char_events.append('RZ')
char_events.append('TZ')
char_events.append('YZ')
else:
assert False, ("evt type error", evt[1])
return char_events, chord_cnt
# abs_pos type pitch program is_drum track_id duration/rela_pos
def midi_to_event_seq_str(midi_file_path, readonly=False):
p_midi = MidiFile(midi_file_path)
for ins in p_midi.instruments:
ins.remove_invalid_notes(verbose=False)
merge_drums(p_midi)
if not readonly:
merge_sparse_track(p_midi)
limit_max_track(p_midi)
note_events, note_on_ticks, _ = get_init_note_events(p_midi)
measure_events = calculate_measure(p_midi, min(note_on_ticks), max(note_on_ticks))
note_events.extend(measure_events)
note_events.sort(key=lambda x: (x[0], x[1] == "ON", x[1] == "BOM", x[1] == "EOM",
x[5], x[4], x[3], x[2]))
split_score = prettify(note_events, p_midi.ticks_per_beat)
for measure_idx, measure in enumerate(split_score): # calculate chord for every measure
chord_evt = measure_calc_chord(measure)
split_score[measure_idx].insert(1, chord_evt)
new_event_seq = get_pos_and_cc(split_score)
# new_event_seq[0:0] = [[0, "BOS", None, None, None, None, 0]]
# new_event_seq.append([new_event_seq[-1][0], "EOS", None, None, None, None, 0])
char_events, chord_cnt = event_seq_to_str(new_event_seq)
return char_events, chord_cnt
def mp_worker(file_path):
try:
event_seq = midi_to_event_seq_str(file_path)
return event_seq
except (OSError, EOFError, ValueError, KeyError) as e:
print(file_path)
traceback.print_exc(limit=0)
print()
# shutil.move(file_path, "/ai/fzc/fzc/Dataset_original_file/pop/invalid/")
return "errortouch"
except AssertionError as e:
if str(e) == "No time_signature_changes":
print("Moving no time sig to folder..........")
# shutil.move(file_path, "/ai/fzc/fzc/Dataset_original_file/midi_mono/notimesig/")
return "error"
elif str(e) == "Measure duration error":
#print("Measure duration error", file_path)
# shutil.move(file_path, "/Users/bytedance/Desktop/measure_error")
return "error"
# elif str(e) == "Track length error":
# print("-----", file_path)
# shutil.move(file_path, "/ai/fzc/fzc/Dataset_original_file/pop/too_many_tracks/")
# return "error"
else:
print("Other Assertion Error", str(e), file_path)
return "error"
except Exception as e:
print(file_path)
traceback.print_exc(limit=0)
print()
return "error"
def mp_handler(file_paths):
start = time.time()
broken_counter = 0
good_counter = 0
event_seq_res = []
chord_cnter = Counter()
print(f'starts processing midis with {WORKERS} processes')
with multiprocessing.Pool(WORKERS) as p:
for ret in p.imap(mp_worker, file_paths):
if isinstance(ret, str) and ret == "error":
broken_counter += 1
# (filename, count) tuples from worker
else:
try:
event_seq, chord_cnt = ret
except:
print(ret)
if len(event_seq) > 0:
event_seq_res.append(event_seq)
chord_cnter += chord_cnt
good_counter += 1
print("Process data takes: ", time.time() - start)
print(good_counter, broken_counter)
pprint(chord_cnter)
# drum_program_cnter = sorted(list(drum_program_cnter.items()))
# for i in range(1, len(drum_program_cnter)):
# tmp = drum_program_cnter[i]
# tmp = (tmp[0], tmp[1] + drum_program_cnter[i-1][1])
# drum_program_cnter[i] = tmp
# for k, v in drum_program_cnter:
# print("{:d} {:.3f}".format(k, v * 1. / good_counter))
# ----------------------------------------------------------------------------------
txt_start = time.time()
if not os.path.exists('../../data/preprocessed/'):
os.makedirs('../../data/preprocessed/')
with open("../../data/preprocessed/linear_4096_chord.txt", "w", encoding="utf-8") as f:
for idx, piece in enumerate(event_seq_res):
f.write(' '.join(piece) + '\n')
print("Create txt file takes: ", time.time() - txt_start)
# ----------------------------------------------------------------------------------
if __name__ == '__main__':
# test_file = "/ai/fzc/fzc/Dataset_original_file/pop/国内流行_周杰伦_周杰伦-夜的第七章.mid"
# test_file = "/ai/fzc/fzc/Dataset_original_file/kunstderfuge_nolive/anonymous_anonymous_duet_1_(c)icking-archive.mid"
# test_file = "/ai/fzc/fzc/Dataset_original_file/unzip/kunstderfuge_unzip/rachmaninov/rachmaninov_concerto_2_1_(c)galimberti.mid"
# test_file = "/ai/fzc/fzc/Dataset_original_file/unzip/kunstderfuge_unzip/rachmaninov/rachmaninov_concerto_2_2_(c)galimberti.mid"
# test_file = "/ai/fzc/fzc/Dataset_original_file/pop/国内流行_吉他音乐_King_Henry_VIII_Fantazy_a_3.mid"
# test_file = "/ai/fzc/fzc/Dataset_original_file/pop/港台歌曲_邓丽君_無言的結局.mid"
# test_file = "/ai/fzc/fzc/Dataset_original_file/midi_mono/NLB133686_01.mid"
# test_file = "/ai/fzc/fzc/Dataset_original_file/melody/我是一只小小鸟.mid"
# test_file = "../../full_data/mozart_die_zauber_floete_620_21a_(c)icking-archive.mid"
# # test_file = "/Users/bytedance/Desktop/selected_symphony/rachmaninov_concerto_2_1_(c)galimberti.mid"
# #print('starts')
# evnt_seq, _ = midi_to_event_seq_str(test_file, read_only=True)
# # for trk in evnt_seq:
# # print(trk)
# #print(' '.join(evnt_seq))
# exit()
# # for evt in evnt_seq:
# if evt[1] == 'POS':
#print(evnt_seq[1])
# for trk in evnt_seq:
# print(len(trk.split()))
#print(len(event_seq[1]))
warnings.filterwarnings('ignore')
# kunstderfuge_folder_path = "/ai/fzc/fzc/Dataset_original_file/kunstderfuge_nolive"
pop_folder_path = "../../data/midis"
file_paths = []
for path, directories, files in os.walk(pop_folder_path):
if path.split("/")[-1] != "invalid" \
and path.split("/")[-1] != "notimesig" \
and path.split("/")[-1] != "too_many_tracks" \
and path.split("/")[-1] != "measure_error" \
and path.split("/")[-1] != "delete":
for file in files:
passed = True
try:
num = int(file.replace('.mid', ''))
except:
passed = True
if not passed:
continue
#print(file)
if file.endswith(".mid") or file.endswith(".MID"):
file_path = path + "/" + file
file_paths.append(file_path)
# run multi-processing midi extractor
mp_handler(file_paths[:])
|
[
"os.walk",
"pprint.pprint",
"fractions.Fraction",
"more_itertools.split_before",
"os.path.abspath",
"traceback.print_exc",
"encoding.bom2str",
"os.path.exists",
"encoding.pos2str",
"collections.Counter",
"encoding.ins2str",
"copy.deepcopy",
"miditoolkit.midi.parser.MidiFile",
"chorder.Dechorder.get_chord_quality",
"encoding.dur2str",
"encoding.trk2str",
"multiprocessing.Pool",
"itertools.groupby",
"miditoolkit.midi.containers.Note",
"encoding.pit2str",
"os.makedirs",
"warnings.filterwarnings",
"time.time",
"miditoolkit.midi.containers.Instrument"
] |
[((764, 773), 'collections.Counter', 'Counter', ([], {}), '()\n', (771, 773), False, 'from collections import Counter\n'), ((1479, 1533), 'chorder.Dechorder.get_chord_quality', 'Dechorder.get_chord_quality', (['mtknotes'], {'start': '(0)', 'end': 'ts'}), '(mtknotes, start=0, end=ts)\n', (1506, 1533), False, 'from chorder import Dechorder\n'), ((14221, 14248), 'fractions.Fraction', 'Fraction', (['ticks_per_beat', '(8)'], {}), '(ticks_per_beat, 8)\n', (14229, 14248), False, 'from fractions import Fraction\n'), ((17132, 17141), 'collections.Counter', 'Counter', ([], {}), '()\n', (17139, 17141), False, 'from collections import Counter\n'), ((18606, 18630), 'miditoolkit.midi.parser.MidiFile', 'MidiFile', (['midi_file_path'], {}), '(midi_file_path)\n', (18614, 18630), False, 'from miditoolkit.midi.parser import MidiFile\n'), ((21060, 21071), 'time.time', 'time.time', ([], {}), '()\n', (21069, 21071), False, 'import os, json, traceback, time, warnings, shutil, sys\n'), ((21159, 21168), 'collections.Counter', 'Counter', ([], {}), '()\n', (21166, 21168), False, 'from collections import Counter\n'), ((21882, 21901), 'pprint.pprint', 'pprint', (['chord_cnter'], {}), '(chord_cnter)\n', (21888, 21901), False, 'from pprint import pprint\n'), ((22365, 22376), 'time.time', 'time.time', ([], {}), '()\n', (22374, 22376), False, 'import os, json, traceback, time, warnings, shutil, sys\n'), ((24222, 24255), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (24245, 24255), False, 'import os, json, traceback, time, warnings, shutil, sys\n'), ((24443, 24467), 'os.walk', 'os.walk', (['pop_folder_path'], {}), '(pop_folder_path)\n', (24450, 24467), False, 'import sys, os\n'), ((3928, 3976), 'miditoolkit.midi.containers.Instrument', 'Instrument', ([], {'program': '(0)', 'is_drum': '(True)', 'name': '"""drum"""'}), "(program=0, is_drum=True, name='drum')\n", (3938, 3976), False, 'from miditoolkit.midi.containers import Instrument\n'), ((5875, 5918), 'copy.deepcopy', 'copy.deepcopy', (['good_instruments[:MAX_TRACK]'], {}), '(good_instruments[:MAX_TRACK])\n', (5888, 5918), False, 'import itertools, copy\n'), ((11625, 11675), 'more_itertools.split_before', 'split_before', (['note_events', "(lambda x: x[1] == 'BOM')"], {}), "(note_events, lambda x: x[1] == 'BOM')\n", (11637, 11675), False, 'from more_itertools import split_before\n'), ((14816, 14866), 'more_itertools.split_before', 'split_before', (['note_events', "(lambda x: x[1] == 'BOM')"], {}), "(note_events, lambda x: x[1] == 'BOM')\n", (14828, 14866), False, 'from more_itertools import split_before\n'), ((21242, 21271), 'multiprocessing.Pool', 'multiprocessing.Pool', (['WORKERS'], {}), '(WORKERS)\n', (21262, 21271), False, 'import multiprocessing\n'), ((22388, 22430), 'os.path.exists', 'os.path.exists', (['"""../../data/preprocessed/"""'], {}), "('../../data/preprocessed/')\n", (22402, 22430), False, 'import sys, os\n'), ((22440, 22479), 'os.makedirs', 'os.makedirs', (['"""../../data/preprocessed/"""'], {}), "('../../data/preprocessed/')\n", (22451, 22479), False, 'import sys, os\n'), ((466, 491), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (481, 491), False, 'import sys, os\n'), ((1222, 1262), 'miditoolkit.midi.containers.Note', 'mtkNote', (['(60)', 'p', 'o', '(o + d if o > 0 else 8)'], {}), '(60, p, o, o + d if o > 0 else 8)\n', (1229, 1262), True, 'from miditoolkit.midi.containers import Note as mtkNote\n'), ((19938, 19966), 'traceback.print_exc', 'traceback.print_exc', ([], {'limit': '(0)'}), '(limit=0)\n', (19957, 19966), False, 'import os, json, traceback, time, warnings, shutil, sys\n'), ((20950, 20978), 'traceback.print_exc', 'traceback.print_exc', ([], {'limit': '(0)'}), '(limit=0)\n', (20969, 20978), False, 'import os, json, traceback, time, warnings, shutil, sys\n'), ((21817, 21828), 'time.time', 'time.time', ([], {}), '()\n', (21826, 21828), False, 'import os, json, traceback, time, warnings, shutil, sys\n'), ((22706, 22717), 'time.time', 'time.time', ([], {}), '()\n', (22715, 22717), False, 'import os, json, traceback, time, warnings, shutil, sys\n'), ((8885, 8915), 'itertools.groupby', 'itertools.groupby', (['note_events'], {}), '(note_events)\n', (8902, 8915), False, 'import itertools, copy\n'), ((14505, 14535), 'itertools.groupby', 'itertools.groupby', (['note_events'], {}), '(note_events)\n', (14522, 14535), False, 'import itertools, copy\n'), ((17230, 17245), 'encoding.pit2str', 'pit2str', (['evt[2]'], {}), '(evt[2])\n', (17237, 17245), False, 'from encoding import pit2str, pos2str, bom2str, dur2str, trk2str, ins2str, pit2alphabet\n'), ((17286, 17302), 'encoding.dur2str', 'dur2str', (['evt[-1]'], {}), '(evt[-1])\n', (17293, 17302), False, 'from encoding import pit2str, pos2str, bom2str, dur2str, trk2str, ins2str, pit2alphabet\n'), ((17346, 17362), 'encoding.trk2str', 'trk2str', (['evt[-2]'], {}), '(evt[-2])\n', (17353, 17362), False, 'from encoding import pit2str, pos2str, bom2str, dur2str, trk2str, ins2str, pit2alphabet\n'), ((17403, 17418), 'encoding.ins2str', 'ins2str', (['evt[3]'], {}), '(evt[3])\n', (17410, 17418), False, 'from encoding import pit2str, pos2str, bom2str, dur2str, trk2str, ins2str, pit2alphabet\n'), ((3868, 3897), 'itertools.groupby', 'itertools.groupby', (['drum_0_lst'], {}), '(drum_0_lst)\n', (3885, 3897), False, 'import itertools, copy\n'), ((17521, 17537), 'encoding.pos2str', 'pos2str', (['evt[-1]'], {}), '(evt[-1])\n', (17528, 17537), False, 'from encoding import pit2str, pos2str, bom2str, dur2str, trk2str, ins2str, pit2alphabet\n'), ((17777, 17793), 'encoding.bom2str', 'bom2str', (['evt[-1]'], {}), '(evt[-1])\n', (17784, 17793), False, 'from encoding import pit2str, pos2str, bom2str, dur2str, trk2str, ins2str, pit2alphabet\n')]
|
import torch
import cv2
import os
import numpy as np
from torch.utils.data import DataLoader, SubsetRandomSampler
def one_hot_encode(index, num):
vector = [0 for _ in range(num)]
vector[index] = 1
return torch.Tensor(vector)
def extract_frames(video_path, save_path, fps=5):
video_name = video_path.split('/')[-1].split('.')[0]
extracted_path = os.path.join(save_path, video_name)
if not os.path.isdir(extracted_path):
os.mkdir(extracted_path)
else:
raise IOError(f'Folder {extracted_path} already exists')
cap = cv2.VideoCapture(video_path)
video_fps = cap.get(cv2.CAP_PROP_FPS)
frame_interval = video_fps // fps
if not cap.isOpened():
raise IOError(f'Cannot read {video_path}. The file is an invalid video or does not exist.')
count = 0
while(cap.isOpened()):
ret, frame = cap.read()
if ret:
count += 1
if count % frame_interval == 0:
cv2.imwrite(os.path.join(extracted_path, f'{video_name}{count:003}.jpg'), frame)
else:
break;
return extracted_path
|
[
"os.mkdir",
"os.path.isdir",
"cv2.VideoCapture",
"torch.Tensor",
"os.path.join"
] |
[((217, 237), 'torch.Tensor', 'torch.Tensor', (['vector'], {}), '(vector)\n', (229, 237), False, 'import torch\n'), ((367, 402), 'os.path.join', 'os.path.join', (['save_path', 'video_name'], {}), '(save_path, video_name)\n', (379, 402), False, 'import os\n'), ((565, 593), 'cv2.VideoCapture', 'cv2.VideoCapture', (['video_path'], {}), '(video_path)\n', (581, 593), False, 'import cv2\n'), ((415, 444), 'os.path.isdir', 'os.path.isdir', (['extracted_path'], {}), '(extracted_path)\n', (428, 444), False, 'import os\n'), ((454, 478), 'os.mkdir', 'os.mkdir', (['extracted_path'], {}), '(extracted_path)\n', (462, 478), False, 'import os\n'), ((990, 1050), 'os.path.join', 'os.path.join', (['extracted_path', 'f"""{video_name}{count:003}.jpg"""'], {}), "(extracted_path, f'{video_name}{count:003}.jpg')\n", (1002, 1050), False, 'import os\n')]
|
# This is used by Environment to populate its env
# Due to circular dependencies it cannot reference other parts of bldr
import toml
import os
import platform
import shutil
from pathlib import Path
def default(dotbldr_path: str) -> dict:
"""
Load the config by merging the local config on top of included deps config
The load order - Last in wins
* .bldr/brick/*/config/config.toml
* .bldr/config/config.toml
* .bldr/brick/*/config/{BLDR_ENV}.toml
* .bldr/config/{BLDR_ENV}.toml
"""
bldr_env = os.getenv('BLDR_ENV')
full_config = {
'bldr': {
}
}
bldr_path = shutil.which('bldr')
if bldr_path != None:
if platform.system() == 'Windows':
bldr_path = to_mingw_path(bldr_path)
full_config['bldr']['cmd'] = bldr_path
deps_config_files = Path(dotbldr_path).glob( "./brick/*/config/config.toml")
for dep_config_file in deps_config_files:
dep_env = load_if_exists(dep_config_file)
full_config.update(dep_env)
local_config = load_if_exists(f"{dotbldr_path}/config/config.toml")
full_config.update(local_config)
if bldr_env != None:
e_deps_config_files = Path(dotbldr_path).glob(f"./brick/*/config/{bldr_env}.toml")
for e_dep_config_file in e_deps_config_files:
e_dep_env = load_if_exists(e_dep_config_file)
full_config.update(e_dep_env)
e_env = load_if_exists(f"{dotbldr_path}/config/{bldr_env}.toml")
full_config.update(e_env)
return full_config
def load_if_exists(path_str: str) -> dict:
path = Path(path_str)
if path.exists():
return toml.load(path)
else:
return {}
def to_mingw_path(win_path: str):
# c:\some\nested\path -> /c/some/nested/path
# 012345
win_path = win_path.replace('\\','/')
return f"/{win_path[0].lower()}/{win_path[3:]}"
|
[
"shutil.which",
"pathlib.Path",
"toml.load",
"platform.system",
"os.getenv"
] |
[((550, 571), 'os.getenv', 'os.getenv', (['"""BLDR_ENV"""'], {}), "('BLDR_ENV')\n", (559, 571), False, 'import os\n'), ((665, 685), 'shutil.which', 'shutil.which', (['"""bldr"""'], {}), "('bldr')\n", (677, 685), False, 'import shutil\n'), ((1671, 1685), 'pathlib.Path', 'Path', (['path_str'], {}), '(path_str)\n', (1675, 1685), False, 'from pathlib import Path\n'), ((1725, 1740), 'toml.load', 'toml.load', (['path'], {}), '(path)\n', (1734, 1740), False, 'import toml\n'), ((727, 744), 'platform.system', 'platform.system', ([], {}), '()\n', (742, 744), False, 'import platform\n'), ((888, 906), 'pathlib.Path', 'Path', (['dotbldr_path'], {}), '(dotbldr_path)\n', (892, 906), False, 'from pathlib import Path\n'), ((1256, 1274), 'pathlib.Path', 'Path', (['dotbldr_path'], {}), '(dotbldr_path)\n', (1260, 1274), False, 'from pathlib import Path\n')]
|
from django.core.management.base import BaseCommand
from playstore_review_crawler.crawler.crawler import Crawler
from config.settings.base import (
APP_ID,
AMOUNT_REVIEWS_TO_SAVE,
REVIEWS_LANGUAGE,
REVIEWS_COUNTRY,
)
class Command(BaseCommand):
help = "Stores app reviews in the database."
def handle(self, *args, **options):
crawler = Crawler(app_id=APP_ID)
crawler.save_reviews(
amount=AMOUNT_REVIEWS_TO_SAVE,
language=REVIEWS_LANGUAGE,
country=REVIEWS_COUNTRY,
)
|
[
"playstore_review_crawler.crawler.crawler.Crawler"
] |
[((372, 394), 'playstore_review_crawler.crawler.crawler.Crawler', 'Crawler', ([], {'app_id': 'APP_ID'}), '(app_id=APP_ID)\n', (379, 394), False, 'from playstore_review_crawler.crawler.crawler import Crawler\n')]
|
#!/usr/bin/env python
"""A script to scrape items from an Amazon wishlist. The script only works for
wishlists which are "Public". You can change the settings by following the
instruction in:
http://www.amazon.com/gp/help/customer/display.html?nodeId=501094
Copyright 2014 <NAME>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import argparse
from collections import namedtuple
import logging
import os
import sys
import urlparse
import requests
from lxml import html
from sorno import loggingutil
from sorno import consoleutil
_LOG = logging.getLogger(__name__)
_PLAIN_LOGGER = None # will be created in main()
_PLAIN_ERROR_LOGGER = None # will be created in main()
Item = namedtuple('Item', 'id title url')
class App(object):
WISHLIST_PAGE_TEMPLATE = (
"https://www.amazon.com/gp/registry/wishlist"
+ "/{wishlist_id}/?page={page_number}"
)
HEADERS = {
'user-agent': 'Mozilla/5.0 ;Windows NT 6.1; WOW64; Trident/7.0; rv:11.0; like Gecko',
}
def __init__(self, wishlist_id):
self.wishlist_id = wishlist_id
def run(self):
# stores the id's of items
seen_items = set()
page_number = 1
item_number = 1
num_of_empty_page_reached = 0
while True:
items = self.get_items_from_page_num(page_number)
rows = []
for item in items:
if item.id in seen_items:
_LOG.debug("Seen title %s, skip it", item.title)
else:
seen_items.add(item.id)
rows.append(
{
'no.': str(item_number),
'title': item.title,
'url': item.url if item.url else "",
}
)
item_number += 1
num_of_empty_page_reached = 0
if not rows:
num_of_empty_page_reached += 1
if num_of_empty_page_reached >= 3:
# All items are seen in the fetch, so we are done
# Sometimes amazon returns 0 items even we havn't reached
# to the end, so give it a few trials
break
else:
continue
data_printer = consoleutil.DataPrinter(
rows,
headers=('no.', 'title', 'url'),
delimiter='\t',
print_func=_PLAIN_LOGGER.info,
)
data_printer.print_result(
style=consoleutil.DataPrinter.PRINT_STYLE_PLAIN
)
page_number += 1
def get_items_from_page_num(self, num):
url = self.WISHLIST_PAGE_TEMPLATE.format(
wishlist_id=self.wishlist_id,
page_number=num,
)
_LOG.debug("Fetch from: %s", url)
wishlist_page = requests.get(url)
wishlist_page_html = wishlist_page.text
_PLAIN_ERROR_LOGGER.debug(wishlist_page_html)
tree = html.fromstring(wishlist_page_html)
all_h5_nodes = tree.xpath("//div[@class='a-row a-size-small']/h5")
items = []
for h5_node in all_h5_nodes:
try:
item = self._get_item_from_idea_h5_node(h5_node)
if not item:
item = self._get_item_from_amazon_item_h5_node(h5_node)
if item:
items.append(item)
else:
_LOG.warn("Fail to retrieve an item for snippet")
_PLAIN_ERROR_LOGGER.warn("===== Start of snippet =====")
_PLAIN_ERROR_LOGGER.warn(html.tostring(h5_node))
_PLAIN_ERROR_LOGGER.warn("===== End of snippet =====")
except ValueError as ex:
_LOG.exception("Fail to retrieve an item: %s", ex)
_PLAIN_ERROR_LOGGER.warn("===== Start of snippet =====")
_PLAIN_ERROR_LOGGER.warn(html.tostring(h5_node))
_PLAIN_ERROR_LOGGER.warn("===== End of snippet =====")
return items
def _get_item_from_idea_h5_node(self, h5_node):
"""
Gets the item in a H5 html node that contains an Idea. Returns
None if an Idea cannot be found.
The H5 html node supposes to be like the following, "{param}" denotes
the parameters of the item:
<h5>
...
<span id="itemName_{item id}">{item title}</span>
...
</h5>
"""
span_nodes = h5_node.xpath(
".//span[contains(@id, 'itemName_')]"
)
if not span_nodes:
return None
span_node = span_nodes[0]
item_title = self.get_text_from_element(span_node)
item_id = span_node.attrib['id'].split('itemName_')[1]
return Item(id=item_id, title=item_title, url=None)
def _get_item_from_amazon_item_h5_node(self, h5_node):
"""
Gets the item in a H5 html node that contains an Amazon item. Returns
None if an Amazon item cannot be found. An Amazon item is an item in
wishlish that is sold in Amazon.
The H5 html node supposes to be like the following, "{param}" denotes
the parameters of the item:
<h5>
...
<a id="itemName_{item id}" href="{item url}">{item title}</a>
...
</h5>
"""
anchor_nodes = h5_node.xpath(".//a[contains(@id, 'itemName_')]")
if anchor_nodes:
# This is an Amazon item node
anchor_node = anchor_nodes[0]
item_url = "http://www.amazon.com" + anchor_node.attrib['href']
item_title = self.get_text_from_element(anchor_node).strip()
item_id = anchor_node.attrib['id'].split('itemName_')[1]
return Item(id=item_id, title=item_title, url=item_url)
return None
def same_item_lists(self, prev_items, items):
if prev_items is None or len(prev_items) != len(items):
return False
for prev, cur in zip(prev_items, items):
prev_query = urlparse.urlparse(prev.attrib['href']).query
cur_query = urlparse.urlparse(cur.attrib['href']).query
if prev_query != cur_query:
return False
return True
def get_text_from_element(self, node):
"""
Return a plain text representation of an html node.
"""
text_segments = []
self._collect_text_from_element(node, text_segments)
return "".join(text_segments)
def _collect_text_from_element(self, node, text_segments):
"""
Collect text from node and all its children recursively and put into
text_segments as a list of strings.
"""
if node.tag.lower() == "br":
text_segments.append(os.linesep)
if node.text:
text_segments.append(node.text)
for child in node:
self._collect_text_from_element(child, text_segments)
if node.tail:
text_segments.append(node.tail)
def parse_args(cmd_args):
description = """
A script to scrape items from an Amazon wishlist. The script only
works for wishlists which are "Public". You can change the settings by
following the instruction in:
http://www.amazon.com/gp/help/customer/display.html?nodeId=501094
"""
parser = argparse.ArgumentParser(
description=description,
)
parser.add_argument(
"--debug",
action="store_true",
)
parser.add_argument(
"wishlist_id",
help="When you look at the URL of your wishlist, it's something like"
+ " https://www.amazon.com/gp/registry/wishlist/<wishlist id>/ref=cm_wl_list_o_0?"
+ ", so just copy the wishlist id for this argument",
)
args = parser.parse_args(cmd_args)
return args
def main():
global _PLAIN_LOGGER, _PLAIN_ERROR_LOGGER
args = parse_args(sys.argv[1:])
loggingutil.setup_logger(_LOG, debug=args.debug)
_PLAIN_LOGGER = loggingutil.create_plain_logger("PLAIN")
_PLAIN_ERROR_LOGGER = loggingutil.create_plain_logger(
"PLAIN_ERROR",
stdout=False,
)
app = App(args.wishlist_id)
app.run()
if __name__ == '__main__':
main()
|
[
"sorno.loggingutil.setup_logger",
"lxml.html.tostring",
"argparse.ArgumentParser",
"sorno.consoleutil.DataPrinter",
"lxml.html.fromstring",
"urlparse.urlparse",
"collections.namedtuple",
"requests.get",
"sorno.loggingutil.create_plain_logger",
"logging.getLogger"
] |
[((1212, 1239), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1229, 1239), False, 'import logging\n'), ((1354, 1388), 'collections.namedtuple', 'namedtuple', (['"""Item"""', '"""id title url"""'], {}), "('Item', 'id title url')\n", (1364, 1388), False, 'from collections import namedtuple\n'), ((8117, 8165), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': 'description'}), '(description=description)\n', (8140, 8165), False, 'import argparse\n'), ((8703, 8751), 'sorno.loggingutil.setup_logger', 'loggingutil.setup_logger', (['_LOG'], {'debug': 'args.debug'}), '(_LOG, debug=args.debug)\n', (8727, 8751), False, 'from sorno import loggingutil\n'), ((8772, 8812), 'sorno.loggingutil.create_plain_logger', 'loggingutil.create_plain_logger', (['"""PLAIN"""'], {}), "('PLAIN')\n", (8803, 8812), False, 'from sorno import loggingutil\n'), ((8839, 8899), 'sorno.loggingutil.create_plain_logger', 'loggingutil.create_plain_logger', (['"""PLAIN_ERROR"""'], {'stdout': '(False)'}), "('PLAIN_ERROR', stdout=False)\n", (8870, 8899), False, 'from sorno import loggingutil\n'), ((3600, 3617), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (3612, 3617), False, 'import requests\n'), ((3736, 3771), 'lxml.html.fromstring', 'html.fromstring', (['wishlist_page_html'], {}), '(wishlist_page_html)\n', (3751, 3771), False, 'from lxml import html\n'), ((3020, 3134), 'sorno.consoleutil.DataPrinter', 'consoleutil.DataPrinter', (['rows'], {'headers': "('no.', 'title', 'url')", 'delimiter': '"""\t"""', 'print_func': '_PLAIN_LOGGER.info'}), "(rows, headers=('no.', 'title', 'url'), delimiter=\n '\\t', print_func=_PLAIN_LOGGER.info)\n", (3043, 3134), False, 'from sorno import consoleutil\n'), ((6805, 6843), 'urlparse.urlparse', 'urlparse.urlparse', (["prev.attrib['href']"], {}), "(prev.attrib['href'])\n", (6822, 6843), False, 'import urlparse\n'), ((6874, 6911), 'urlparse.urlparse', 'urlparse.urlparse', (["cur.attrib['href']"], {}), "(cur.attrib['href'])\n", (6891, 6911), False, 'import urlparse\n'), ((4370, 4392), 'lxml.html.tostring', 'html.tostring', (['h5_node'], {}), '(h5_node)\n', (4383, 4392), False, 'from lxml import html\n'), ((4687, 4709), 'lxml.html.tostring', 'html.tostring', (['h5_node'], {}), '(h5_node)\n', (4700, 4709), False, 'from lxml import html\n')]
|
import cupy as cp
from SpaceSim.BackEndSources.DataStructures import MathList, TypeCounter
from SpaceSim.BackEndSources.Utils import TableToText
class _Component:
def __init__(self, *Modules):
self.Name = 'NONE'
self.Types = []
self.Stats = {}
self.Define('Armor', 0)
self.Define('Endurance', 0)
self.Define('Shielding', MathList(0, 0))
self.Define('Energy', 0)
self.Define('Power', 0)
self.Define('Tick', 0)
self.Define('Tock', 0)
self.Define('Energy Storage', 0)
self.Define('Cargo Storage', 0)
self.Define('Bandwidth', 0)
self.Define('Mass', 0)
self.Define('Thrust', 0)
self.Define('Ammunition', TypeCounter())
ModuleCounter = {}
for Item in Modules:
self = self + Item
if Item.Name not in ModuleCounter:
ModuleCounter[Item.Name] = 0
ModuleCounter[Item.Name] += 1
if len(Modules) > 0:
self.Name = '{} ({})'.format(self.Name, ', '.join(list(map(lambda Key: '{}x{}'.format(ModuleCounter[Key], Key))))).replace('(, ', '(')
def Define(self, Stat, Value=None):
if Value != None:
self.Stats[Stat] = Value
def Finish(self):
for Ammo in self.Stats['Ammunition']:
self.Stats['Mass'] += Ammo().Mass * self.Stats['Ammunition'][Ammo]
def __add__(self, Other):
for Key in self.Stats:
if type(self.Stats[Key]) == dict:
for SubKey in Other.Stats[Key]:
if SubKey in self.Stats[Key]:
self.Stats[Key][SubKey] += Other.Stats[Key][SubKey]
else:
self.Stats[Key][SubKey] = Other.Stats[Key][SubKey]
else:
self.Stats[Key] = self.Stats[Key] + Other.Stats[Key]
return self
def __repr__(self):
Output = ''
Output += '{}\n'.format(self.Name)
Output += TableToText(self.Stats) + '\n'
for Ammo in self.Stats['Ammunition']:
Output += str(Ammo()) + '\n'
return Output.rstrip()
|
[
"SpaceSim.BackEndSources.DataStructures.MathList",
"SpaceSim.BackEndSources.DataStructures.TypeCounter",
"SpaceSim.BackEndSources.Utils.TableToText"
] |
[((374, 388), 'SpaceSim.BackEndSources.DataStructures.MathList', 'MathList', (['(0)', '(0)'], {}), '(0, 0)\n', (382, 388), False, 'from SpaceSim.BackEndSources.DataStructures import MathList, TypeCounter\n'), ((732, 745), 'SpaceSim.BackEndSources.DataStructures.TypeCounter', 'TypeCounter', ([], {}), '()\n', (743, 745), False, 'from SpaceSim.BackEndSources.DataStructures import MathList, TypeCounter\n'), ((1994, 2017), 'SpaceSim.BackEndSources.Utils.TableToText', 'TableToText', (['self.Stats'], {}), '(self.Stats)\n', (2005, 2017), False, 'from SpaceSim.BackEndSources.Utils import TableToText\n')]
|
# -*- coding: utf-8 -*-
from pywechat.services.wechat_shake import ShakeService
from pywechat.services.wechat_card import CardService
from pywechat.excepts import CodeBuildError
class WechatService(object):
"""This class is a role of factory.
Attributes:
app_id: the app id of a wechat account.
app_secret: the app secret of a wechat account.
"""
def __init__(self, app_id, app_secret):
"""Initializes the class."""
self.__app_id = app_id
self.__app_secret = app_secret
def init_service(self, service_name):
"""Init the service of wechat by service_name.
Args:
service_name: the name of wechat's service.
Returns:
the service of wechat
Rasies:
SystemError
"""
services = {
'Shake': ShakeService,
'Card': CardService
}
if not services.has_key(service_name):
raise CodeBuildError('Service name wrong')
return services[service_name](self.__app_id, self.__app_secret)
|
[
"pywechat.excepts.CodeBuildError"
] |
[((970, 1006), 'pywechat.excepts.CodeBuildError', 'CodeBuildError', (['"""Service name wrong"""'], {}), "('Service name wrong')\n", (984, 1006), False, 'from pywechat.excepts import CodeBuildError\n')]
|
import copy
import logging
from disco.extensions.pydss_simulation.pydss_configuration import \
PyDssConfiguration
from disco.extensions.pydss_simulation.pydss_inputs import PyDssInputs
from disco.pydss.common import ConfigType
from jade.utils.utils import load_data
logger = logging.getLogger(__name__)
def auto_config(inputs, **kwargs):
"""Create a configuration from all available inputs."""
if isinstance(inputs, str):
inputs = PyDssInputs(inputs)
config = PyDssConfiguration(inputs, **kwargs)
for job in config.inputs.iter_jobs():
config.add_job(job)
#exports = load_data(exports_filename)
#config.set_pydss_config(ConfigType.EXPORTS, exports)
return config
|
[
"disco.extensions.pydss_simulation.pydss_configuration.PyDssConfiguration",
"disco.extensions.pydss_simulation.pydss_inputs.PyDssInputs",
"logging.getLogger"
] |
[((283, 310), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (300, 310), False, 'import logging\n'), ((490, 526), 'disco.extensions.pydss_simulation.pydss_configuration.PyDssConfiguration', 'PyDssConfiguration', (['inputs'], {}), '(inputs, **kwargs)\n', (508, 526), False, 'from disco.extensions.pydss_simulation.pydss_configuration import PyDssConfiguration\n'), ((457, 476), 'disco.extensions.pydss_simulation.pydss_inputs.PyDssInputs', 'PyDssInputs', (['inputs'], {}), '(inputs)\n', (468, 476), False, 'from disco.extensions.pydss_simulation.pydss_inputs import PyDssInputs\n')]
|
from __future__ import print_function, absolute_import, division #makes KratosMultiphysics backward compatible with python 2.6 and 2.7
import os
import sys
import platform
kratos_benchmarking_path = '../../../benchmarking'
sys.path.append(kratos_benchmarking_path)
swimming_dem_scripts_path = 'hydrodynamic_forces'
sys.path.append(swimming_dem_scripts_path)
import benchmarking
os.chdir(swimming_dem_scripts_path)
def Run():
print("\nStarting swimming_DEM Benchmarks..............\n")
Text=""
if platform.system()=="Windows":
os.system("python hydrodynamic_forces.py " + " > BenchTemp.txt")
else:
if sys.version_info >= (3, 0):
os.system("python3 hydrodynamic_forces.py " + " > BenchTemp.txt")
else:
os.system("python -3 hydrodynamic_forces.py " + " > BenchTemp.txt")
os.remove("BenchTemp.txt")
f = open("hydrodynamic_forces.txt")
file_contents = f.read()
f.close()
Text += file_contents.rstrip("\n")
Text += "\n\n\n"
return Text
if __name__ == '__main__':
print(Run())
|
[
"sys.path.append",
"os.remove",
"os.system",
"platform.system",
"os.chdir"
] |
[((224, 265), 'sys.path.append', 'sys.path.append', (['kratos_benchmarking_path'], {}), '(kratos_benchmarking_path)\n', (239, 265), False, 'import sys\n'), ((316, 358), 'sys.path.append', 'sys.path.append', (['swimming_dem_scripts_path'], {}), '(swimming_dem_scripts_path)\n', (331, 358), False, 'import sys\n'), ((379, 414), 'os.chdir', 'os.chdir', (['swimming_dem_scripts_path'], {}), '(swimming_dem_scripts_path)\n', (387, 414), False, 'import os\n'), ((885, 911), 'os.remove', 'os.remove', (['"""BenchTemp.txt"""'], {}), "('BenchTemp.txt')\n", (894, 911), False, 'import os\n'), ((540, 557), 'platform.system', 'platform.system', ([], {}), '()\n', (555, 557), False, 'import platform\n'), ((578, 642), 'os.system', 'os.system', (["('python hydrodynamic_forces.py ' + ' > BenchTemp.txt')"], {}), "('python hydrodynamic_forces.py ' + ' > BenchTemp.txt')\n", (587, 642), False, 'import os\n'), ((704, 769), 'os.system', 'os.system', (["('python3 hydrodynamic_forces.py ' + ' > BenchTemp.txt')"], {}), "('python3 hydrodynamic_forces.py ' + ' > BenchTemp.txt')\n", (713, 769), False, 'import os\n'), ((796, 863), 'os.system', 'os.system', (["('python -3 hydrodynamic_forces.py ' + ' > BenchTemp.txt')"], {}), "('python -3 hydrodynamic_forces.py ' + ' > BenchTemp.txt')\n", (805, 863), False, 'import os\n')]
|
"""
User classes & helpers
~~~~~~~~~~~~~~~~~~~~~~
"""
import os
import json
import binascii
import hashlib
import sqlite3
from functools import wraps
from flask import current_app
from flask_login import current_user
from config import USER_DIR
class UserManager(object):
"""A very simple user Manager, that saves it's data as json."""
def __init__(self, path):
self.file = os.path.join(USER_DIR, 'users.json')
self.dbConnection = sqlite3.connect(USER_DIR + '/Users.sqlite')
def read(self):
if not os.path.exists(self.file):
return {}
with open(self.file) as f:
data = json.loads(f.read())
return data
def write(self, data):
with open(self.file, 'w') as f:
f.write(json.dumps(data, indent=2))
def add_user(self, name, password,
active=True, roles=[], authentication_method=None):
users = self.read()
if authentication_method is None:
authentication_method = get_default_authentication_method()
"""
This is the Only information that is stored in users.json now. The passwords are only in the database now.
Removing this information from the JSON and putting it in the database was causing a lot of errors with Jinja, so I kept
this Json here to keep the login functionality working. Ideally everything would just be in the database, but then the
entire login system would have to be completely overhauled, so I left it as is for simplicity.
"""
new_user = {
'active': active,
'roles': roles,
'authenticated': False
}
users[name] = new_user
self.write(users)
userdata = users.get(name)
"""
This opens a connection to the database, and inserts the new user.
The new user is not inserted into the database if someone has the same
username.
"""
try:
dbCur = self.dbConnection.cursor()
dbCur.execute("""
INSERT INTO users (username,password)
VALUES( (?) , (?));
""", (name, password))
self.dbConnection.commit()
dbCur.close()
dbCon = sqlite3.connect(USER_DIR + '/Users.sqlite')
dbCur = dbCon.cursor()
dbCur.execute("SELECT username FROM users WHERE username = ?", name)
userdata = dbCur.fetchone()
return User(self, name, userdata)
except:
return
def get_user(self, name):
users = self.read()
userdata = users.get(name)
if not userdata:
return None
return User(self, name, userdata)
def delete_user(self, name):
dbCur = self.dbConnection.cursor()
dbCur.execute("""
DELETE FROM users
WHERE username = ?
""", (name,))
self.dbConnection.commit()
dbCur.close()
def update(self, name, userdata):
data = self.read()
data[name] = userdata
self.write(data)
class User(object):
def __init__(self, manager, name, data):
self.manager = manager
self.name = name
self.data = data
def get(self, option):
return self.data.get(option)
def set(self, option, value):
self.data[option] = value
self.save()
def save(self):
self.manager.update(self.name, self.data)
def is_authenticated(self):
return self.data.get('authenticated')
def is_active(self):
return self.data.get('active')
def is_anonymous(self):
return False
def get_id(self):
return self.name
"""Not Used"""
def check_password(self, password):
"""Return True, return False, or raise NotImplementedError if the
authentication_method is missing or unknown."""
authentication_method = self.data.get('authentication_method', None)
if authentication_method is None:
authentication_method = get_default_authentication_method()
# See comment in UserManager.add_user about authentication_method.
if authentication_method == 'hash':
result = check_hashed_password(password, self.get('hash'))
elif authentication_method == 'cleartext':
result = (self.get('password') == password)
else:
raise NotImplementedError(authentication_method)
return result
def get_default_authentication_method():
return current_app.config.get('DEFAULT_AUTHENTICATION_METHOD', 'cleartext')
def make_salted_hash(password, salt=None):
if not salt:
salt = os.urandom(64)
d = hashlib.sha512()
d.update(salt[:32])
d.update(password)
d.update(salt[32:])
return binascii.hexlify(salt) + d.hexdigest()
def check_hashed_password(password, salted_hash):
salt = binascii.unhexlify(salted_hash[:128])
return make_salted_hash(password, salt) == salted_hash
def protect(f):
@wraps(f)
def wrapper(*args, **kwargs):
if current_app.config.get('PRIVATE') and not current_user.is_authenticated:
return current_app.login_manager.unauthorized()
return f(*args, **kwargs)
return wrapper
|
[
"flask.current_app.login_manager.unauthorized",
"flask.current_app.config.get",
"binascii.hexlify",
"os.path.exists",
"json.dumps",
"binascii.unhexlify",
"sqlite3.connect",
"functools.wraps",
"hashlib.sha512",
"os.path.join",
"os.urandom"
] |
[((4557, 4625), 'flask.current_app.config.get', 'current_app.config.get', (['"""DEFAULT_AUTHENTICATION_METHOD"""', '"""cleartext"""'], {}), "('DEFAULT_AUTHENTICATION_METHOD', 'cleartext')\n", (4579, 4625), False, 'from flask import current_app\n'), ((4726, 4742), 'hashlib.sha512', 'hashlib.sha512', ([], {}), '()\n', (4740, 4742), False, 'import hashlib\n'), ((4927, 4964), 'binascii.unhexlify', 'binascii.unhexlify', (['salted_hash[:128]'], {}), '(salted_hash[:128])\n', (4945, 4964), False, 'import binascii\n'), ((5047, 5055), 'functools.wraps', 'wraps', (['f'], {}), '(f)\n', (5052, 5055), False, 'from functools import wraps\n'), ((404, 440), 'os.path.join', 'os.path.join', (['USER_DIR', '"""users.json"""'], {}), "(USER_DIR, 'users.json')\n", (416, 440), False, 'import os\n'), ((469, 512), 'sqlite3.connect', 'sqlite3.connect', (["(USER_DIR + '/Users.sqlite')"], {}), "(USER_DIR + '/Users.sqlite')\n", (484, 512), False, 'import sqlite3\n'), ((4703, 4717), 'os.urandom', 'os.urandom', (['(64)'], {}), '(64)\n', (4713, 4717), False, 'import os\n'), ((4825, 4847), 'binascii.hexlify', 'binascii.hexlify', (['salt'], {}), '(salt)\n', (4841, 4847), False, 'import binascii\n'), ((549, 574), 'os.path.exists', 'os.path.exists', (['self.file'], {}), '(self.file)\n', (563, 574), False, 'import os\n'), ((2268, 2311), 'sqlite3.connect', 'sqlite3.connect', (["(USER_DIR + '/Users.sqlite')"], {}), "(USER_DIR + '/Users.sqlite')\n", (2283, 2311), False, 'import sqlite3\n'), ((5101, 5134), 'flask.current_app.config.get', 'current_app.config.get', (['"""PRIVATE"""'], {}), "('PRIVATE')\n", (5123, 5134), False, 'from flask import current_app\n'), ((5193, 5233), 'flask.current_app.login_manager.unauthorized', 'current_app.login_manager.unauthorized', ([], {}), '()\n', (5231, 5233), False, 'from flask import current_app\n'), ((781, 807), 'json.dumps', 'json.dumps', (['data'], {'indent': '(2)'}), '(data, indent=2)\n', (791, 807), False, 'import json\n')]
|
from __future__ import division
import sys
from mmtbx.validation.molprobity import mp_geo
if __name__ == "__main__":
mp_geo.run(sys.argv[1:])
|
[
"mmtbx.validation.molprobity.mp_geo.run"
] |
[((121, 145), 'mmtbx.validation.molprobity.mp_geo.run', 'mp_geo.run', (['sys.argv[1:]'], {}), '(sys.argv[1:])\n', (131, 145), False, 'from mmtbx.validation.molprobity import mp_geo\n')]
|
# -*- coding: UTF-8 -*-
# Copyright 2009-2016 <NAME>
# License: BSD (see file COPYING for details)
"""Adds the default Lino user interface based on ExtJS.
It is being automatically included by every Lino application unless
you disable it (e.g. by overriding your :meth:`get_apps_modifiers
<lino.core.site.Site.get_apps_modifiers>` or your
:meth:`get_installed_apps <lino.core.site.Site.get_installed_apps>`
method).
When your Lino application uses the ExtJS user interface, then you may
need a `commercial license from Sencha
<https://www.sencha.com/store/extjs/>`__. Summary without warranty of
correctness: you need a commercial license if (1) your application is
not available under the GPL **and** (2) your site is used by other
people than the empoyees of the company who is the copyright holder of
your application.
.. autosummary::
:toctree:
views
ext_renderer
"""
from __future__ import unicode_literals
from __future__ import print_function
from lino.api.ad import Plugin
from django.utils.translation import ugettext_lazy as _
# raise Exception("20160528")
class Plugin(Plugin):
"""Extends :class:`lino.core.plugin.Plugin`.
"""
needs_plugins = ['lino.modlib.bootstrap3']
enter_submits_form = False
"""Whether the :kbd:`ENTER` key (or :kbd:`CTRL+ENTER` when in a
textarea field) should submit the form.
The default is `False`. For sites that were in production before
October 2015, we recommend to explain to the users that a simple
:kbd:`ENTER` no longer submits a form, and that :kbd:`Ctrl-S` is
the preferred keyboard shortcut for submitting a
form. Alternatively for backwards compatibility you can set it to
`True` using something like this::
def setup_plugins(self):
super(Site, self).setup_plugins()
if self.is_installed('extjs'):
self.plugins.extjs.configure(enter_submits_form=False)
When you set this to `True` :xfile:`linoweb.js` adds a special
mapping for :kbd:`ENTER`. The problem then is that the
:kbd:`ENTER` key won't work in a plain textarea field because we
didn't find a way to restore the default behaviour.
"""
ui_label = _("Admin")
use_statusbar = False
"""
Whether to use a status bar to display certain messages to the user.
Default is `False` since currently this is not really useful.
"""
url_prefix = "ext"
media_name = 'ext-3.3.1'
# media_base_url = "http://extjs-public.googlecode.com/" + \
# "svn/tags/extjs-3.3.1/release/"
"""The URL from where to include the ExtJS library files.
The default value points to the `extjs-public
<http://code.google.com/p/extjs-public/>`_ repository and thus
requires the clients to have an internet connection. This
relieves newcomers from the burden of having to specify a download
location in their :xfile:`settings.py`.
On a production site you'll probably want to download and serve
these files yourself by setting this to `None` and setting
:attr:`extjs_root` (or a symbolic link "extjs" in your
:xfile:`media` directory) to point to the local directory where
ExtJS 3.3.1 is installed).
"""
autorefresh_seconds = 0
# autorefresh_seconds = 60
"""Number of seconds to wait between two refreshes when autorefresh is
activated. Default is 60. Set this to 0 in order to deactivate
the autorefresh button.
"""
media_root = None
"""
Path to the ExtJS root directory. Only used when
:attr:`media_base_url` is None, and when the `media` directory has
no symbolic link named `extjs` pointing to the ExtJS root
directory.
"""
ui_handle_attr_name = 'extjs_handle'
def on_ui_init(self, kernel):
# logger.info("20140227 extjs.Plugin.on_ui_init() a")
from .ext_renderer import ExtRenderer
self.renderer = ExtRenderer(self)
kernel.extjs_renderer = self.renderer
# added 20160329
for fl in self.renderer.param_panels:
fl.get_layout_handle(self)
# logger.info("20140227 extjs.Plugin.on_ui_init() b")
def get_row_edit_lines(self, e, panel):
from lino.core.elems import (
GridElement, HtmlBoxElement, FieldElement, form_field_name)
from lino.core import constants
master_field = panel.layout_handle.layout._datasource.master_field
if isinstance(e, GridElement):
yield "%s.on_master_changed();" % e.as_ext()
elif isinstance(e, HtmlBoxElement):
yield "%s.refresh();" % e.as_ext()
elif isinstance(e, FieldElement):
if not panel.layout_handle.layout.editable:
return
holder = panel.layout_handle.layout.get_chooser_holder()
chooser = holder.get_chooser_for_field(e.field.name)
if not chooser:
return
for f in chooser.context_fields:
if master_field and master_field.name == f.name:
yield "var bp = this.get_base_params();"
yield "%s.setContextValue('%s',bp['%s']);" % (
e.as_ext(), constants.URL_PARAM_MASTER_PK,
constants.URL_PARAM_MASTER_PK)
yield "%s.setContextValue('%s',bp['%s']);" % (
e.as_ext(), constants.URL_PARAM_MASTER_TYPE,
constants.URL_PARAM_MASTER_TYPE)
else:
yield (
"%s.setContextValue('%s', record ? record."
"data['%s'] : undefined);" % (
e.as_ext(), f.name, form_field_name(f)))
def get_css_includes(self, site):
yield self.build_lib_url('resources/css/ext-all.css')
def get_js_includes(self, settings, language):
return []
def get_head_lines(self, site, request):
yield "<style>"
from lino.core.constants import ICON_NAMES
tpl = ".x-tbar-{0}{{ background-image: url({1}) !important; }}"
for n in ICON_NAMES:
url = site.build_static_url('images', 'mjames', n + '.png')
yield tpl.format(n, url)
yield """
.x-tbar-done{ background-image: url(/static/images/mjames/accept.png) !important; }
.x-tbar-parameters{ background-image: url(/static/images/mjames/database_gear.png) !important; }
"""
yield "</style>"
def get_used_libs(self, html=False):
if html is not None:
# version = '<script type="text/javascript">\
# document.write(Ext.version);</script>'
onclick = "alert('ExtJS client version is ' + Ext.version);"
tip = "Click to see ExtJS client version"
text = "(version)"
version = html.a(text, href='#', onclick=onclick, title=tip)
yield ("ExtJS", version, "http://www.sencha.com")
yield ("Silk Icons", '1.3',
"http://www.famfamfam.com/lab/icons/silk/")
def get_index_view(self):
from . import views
return views.AdminIndex.as_view()
def get_patterns(self):
from django.conf import settings
from django.conf.urls import url # patterns
from . import views
self.renderer.build_site_cache()
rx = '^'
urlpatterns = [
# url(rx + '/?$', views.AdminIndex.as_view()),
url(rx + '$', views.AdminIndex.as_view()),
url(rx + r'api/main_html$', views.MainHtml.as_view()),
# url(rx + r'auth$', views.Authenticate.as_view()),
url(rx + r'grid_config/(?P<app_label>\w+)/(?P<actor>\w+)$',
views.GridConfig.as_view()),
url(rx + r'api/(?P<app_label>\w+)/(?P<actor>\w+)$',
views.ApiList.as_view()),
url(rx + r'api/(?P<app_label>\w+)/(?P<actor>\w+)/(?P<pk>.+)$',
views.ApiElement.as_view()),
url(rx + r'restful/(?P<app_label>\w+)/(?P<actor>\w+)$',
views.Restful.as_view()),
url(rx + r'restful/(?P<app_label>\w+)/(?P<actor>\w+)/(?P<pk>.+)$',
views.Restful.as_view()),
url(rx + r'choices/(?P<app_label>\w+)/(?P<rptname>\w+)$',
views.Choices.as_view()),
url(rx + r'choices/(?P<app_label>\w+)/(?P<rptname>\w+)/'
'(?P<fldname>\w+)$',
views.Choices.as_view()),
url(rx + r'apchoices/(?P<app_label>\w+)/(?P<actor>\w+)/'
'(?P<an>\w+)/(?P<field>\w+)$',
views.ActionParamChoices.as_view()),
# the thread_id can be a negative number:
url(rx + r'callbacks/(?P<thread_id>[\-0-9a-zA-Z]+)/'
'(?P<button_id>\w+)$',
views.Callbacks.as_view())
]
if settings.SITE.use_eid_applet:
urlpatterns.append(
url(rx + r'eid-applet-service$',
views.EidAppletService.as_view()))
if settings.SITE.use_jasmine:
urlpatterns.append(
url(rx + r'run-jasmine$', views.RunJasmine.as_view()))
return urlpatterns
|
[
"django.utils.translation.ugettext_lazy",
"lino.core.elems.form_field_name"
] |
[((2201, 2211), 'django.utils.translation.ugettext_lazy', '_', (['"""Admin"""'], {}), "('Admin')\n", (2202, 2211), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((5698, 5716), 'lino.core.elems.form_field_name', 'form_field_name', (['f'], {}), '(f)\n', (5713, 5716), False, 'from lino.core.elems import GridElement, HtmlBoxElement, FieldElement, form_field_name\n')]
|
import pandas as pd
import os.path
import csv
clair = '/Users/malcolmorian/Documents/Bioinformatics/Projects2021/Guppy3Guppy5/NOSC/nosc_clair/2022.01.02/clair_vcfData'
pepper = '/Users/malcolmorian/Documents/Bioinformatics/Projects2021/Guppy3Guppy5/NOSC/nosc_pepper/2022.01.02/pepper_vcfData'
gatk = '/Users/malcolmorian/Documents/Bioinformatics/Projects2021/Guppy3Guppy5/NOSC/nosc_gatk/2022.01.05/gatk_vcfData'
venned ='/Users/malcolmorian/Documents/Bioinformatics/Projects2021/Guppy3Guppy5/NOSC/vennTextFiles'
# os.mkdir(venned)
def write(data_df,toWrite):
with open(toWrite,'w') as f:
# print(toWrite)
# if os.path.exists(toWrite):
# print('NOT WRITING ....THE FILE ALREADY EXISTS')
data_df.to_csv(toWrite,sep=' ', index=False, header=False)
def read_file(directory):
files = os.listdir(directory)
print(files)
for file in files:
if file.endswith('.xlsx'):
path = f'{directory}/{file}'
print(path)
data_df = pd.read_excel(path, engine='openpyxl')
data_df = data_df[['#CHROM','POS']]
write(data_df, f'{venned}/{file}.csv')
else:
print('NOT THIS ONE!!!!!!!!!!!')
continue
read_file(clair)
read_file(gatk)
read_file(pepper)
|
[
"pandas.read_excel"
] |
[((1013, 1051), 'pandas.read_excel', 'pd.read_excel', (['path'], {'engine': '"""openpyxl"""'}), "(path, engine='openpyxl')\n", (1026, 1051), True, 'import pandas as pd\n')]
|
"""
Script containing various utilities related to data processing and cleaning. Includes tokenization,
text cleaning, feature extractor (token type IDs & attention masks) for BERT, and IMDBDataset.
"""
import logging
import torch
from torch.utils.data import Dataset
import os
import pickle
import re
import numpy as np
from tqdm import trange
from nltk.stem import WordNetLemmatizer
from nltk.corpus import stopwords
# Setup stopwords list & word (noun, adjective, and verb) lemmatizer
stop_words = set(stopwords.words('english'))
lemmatizer = WordNetLemmatizer()
def clean_text(text):
"""Function to clean text using RegEx operations, removal of stopwords, and lemmatization."""
text = re.sub(r'[^\w\s]', '', text, re.UNICODE)
text = text.lower()
text = [lemmatizer.lemmatize(token) for token in text.split(' ')]
text = [lemmatizer.lemmatize(token, 'v') for token in text]
text = [word for word in text if word not in stop_words]
text = ' '.join(text)
text = text.lstrip().rstrip()
text = re.sub(' +', ' ', text)
return text
def tokenize_and_encode(text, tokenizer, apply_cleaning=False, max_tokenization_length=512,
truncation_method='head-only', split_head_density=0.5):
"""
Function to tokenize & encode a given text.
@param (str) text: a sequence of words to be tokenized in raw string format
@param (pytorch_transformers.BertTokenizer) tokenizer: tokenizer with pre-figured mappings
@param (bool) apply_cleaning: whether or not to perform common cleaning operations on texts;
note that enabling only makes sense if language of the task is English (default: False)
@param (int) max_tokenization_length: maximum number of positional embeddings, or the sequence
length of an example that will be fed to BERT model (default: 512)
@param (str) truncation_method: method that will be applied in case the text exceeds
@max_tokenization_length; currently implemented methods include 'head-only', 'tail-only',
and 'head+tail' (default: 'head-only')
@param (float) split_head_density: weight on head when splitting between head and tail, only
applicable if @truncation_method='head+tail' (default: 0.5)
@return (list) input_ids: the encoded integer indexes of the given text; note that
get_data_iterators() function converts this to a Tensor under the hood
"""
if apply_cleaning:
text = clean_text(text=text)
# Tokenize and encode
tokenized_text = tokenizer.tokenize(text)
input_ids = tokenizer.convert_tokens_to_ids(tokenized_text)
# Subtract 2 ([CLS] and[SEP] tokens) to get the actual text tokenization length
text_tokenization_length = max_tokenization_length - 2
# Truncate sequences with the specified approach
if len(input_ids) > text_tokenization_length:
# i) Head-Only Approach: Keep the first N tokens
if truncation_method == 'head-only':
input_ids = input_ids[:text_tokenization_length]
# ii) Tail-Only Approach: Keep the last N tokens
elif truncation_method == 'tail-only':
input_ids = input_ids[-text_tokenization_length:]
# iii) Head+Tail Approach: Keep the first F tokens and last L tokens where F + L = N
elif truncation_method == 'head+tail':
head_tokenization_length = int(text_tokenization_length * split_head_density)
tail_tokenization_length = text_tokenization_length - head_tokenization_length
input_head_ids = input_ids[:head_tokenization_length]
input_tail_ids = input_ids[-tail_tokenization_length:]
input_ids = input_head_ids + input_tail_ids
# Plug in CLS & SEP special tokens for identification of start & end points of sequences
cls_id = tokenizer.convert_tokens_to_ids('[CLS]')
sep_id = tokenizer.convert_tokens_to_ids('[SEP]')
input_ids = [cls_id] + input_ids + [sep_id]
# Pad sequences & corresponding masks and features
pad_id = tokenizer.convert_tokens_to_ids('[PAD]')
if len(input_ids) < max_tokenization_length:
padding_length = max_tokenization_length - len(input_ids)
input_ids = input_ids + ([pad_id] * padding_length)
# Check if input is in correct length
# assert len(input_ids) == max_tokenization_length
return input_ids
def get_features(input_ids, tokenizer, device):
"""
Function to get BERT-related features, and helps to build the total input representation.
@param (Tensor) input_ids: the encoded integer indexes of a batch, with shape: (B, P)
@param (pytorch_transformers.BertTokenizer) tokenizer: tokenizer with pre-figured mappings
@param (torch.device) device: 'cpu' or 'gpu', decides where to store the outputted tensors
@return (Tensor, Tensor) token_type_ids, attention_mask: features describe token type with
a 0 for the first sentence and a 1 for the pair sentence; enable attention on a
particular token with a 1 or disable it with a 0
"""
token_type_ids, attention_mask = [], []
# Iterate over batch
for input_ids_example in input_ids:
# Convert tensor to a 1D list
input_ids_example = input_ids_example.squeeze().tolist()
# Set example to whole input when batch size is 1
if input_ids.shape[0] == 1:
input_ids_example = input_ids.squeeze().tolist()
# Get padding information
padding_token_id = tokenizer.convert_tokens_to_ids('[PAD]')
padding_length = input_ids_example.count(padding_token_id)
text_length = len(input_ids_example) - padding_length
# Get segment IDs -> all 0s for one sentence, which is the case for sequence classification
token_type_ids_example = [0] * len(input_ids_example)
# Get input mask -> 1 for real tokens, 0 for padding tokens
attention_mask_example = ([1] * text_length) + ([0] * padding_length)
# Check if features are in correct length
assert len(token_type_ids_example) == len(input_ids_example)
assert len(attention_mask_example) == len(input_ids_example)
token_type_ids.append(token_type_ids_example)
attention_mask.append(attention_mask_example)
# Convert lists to tensors
token_type_ids = torch.tensor(data=token_type_ids, device=device)
attention_mask = torch.tensor(data=attention_mask, device=device)
return token_type_ids, attention_mask
class IMDBDataset(Dataset):
"""
IMDB Dataset for easily iterating over and performing common operations.
@param (str) input_directory: path of directory where the desired data exists
@param (pytorch_transformers.BertTokenizer) tokenizer: tokenizer with pre-figured mappings
@param (bool) apply_cleaning: whether or not to perform common cleaning operations on texts;
note that enabling only makes sense if language of the task is English
@param (int) max_tokenization_length: maximum number of positional embeddings, or the sequence
length of an example that will be fed to BERT model (default: 512)
@param (str) truncation_method: method that will be applied in case the text exceeds
@max_tokenization_length; currently implemented methods include 'head-only', 'tail-only',
and 'head+tail' (default: 'head-only')
@param (float) split_head_density: weight on head when splitting between head and tail, only
applicable if @truncation_method='head+tail' (default: 0.5)
@param (torch.device) device: 'cpu' or 'gpu', decides where to store the data tensors
"""
def __init__(self, input_directory, tokenizer, apply_cleaning, max_tokenization_length,
truncation_method='head-only', split_head_density=0.5, device='cpu'):
super(IMDBDataset).__init__()
self.positive_path = os.path.join(input_directory, 'pos')
self.positive_files = [f for f in os.listdir(self.positive_path)
if os.path.isfile(os.path.join(self.positive_path, f))]
self.num_positive_examples = len(self.positive_files)
self.positive_label = 1
self.negative_path = os.path.join(input_directory, 'neg')
self.negative_files = [f for f in os.listdir(self.negative_path)
if os.path.isfile(os.path.join(self.negative_path, f))]
self.num_negative_examples = len(self.negative_files)
self.negative_label = 0
self.tokenizer = tokenizer
self.apply_cleaning = apply_cleaning
self.max_tokenization_length = max_tokenization_length
self.truncation_method = truncation_method
self.split_head_density = split_head_density
self.device = device
# Pre-tokenize & encode examples
self.pre_tokenize_and_encode_examples()
def pre_tokenize_and_encode_examples(self):
"""
Function to tokenize & encode examples and save the tokenized versions to a separate folder.
This way, we won't have to perform the same tokenization and encoding ops every epoch.
"""
if not os.path.exists(os.path.join(self.positive_path, 'tokenized_and_encoded')):
os.mkdir(os.path.join(self.positive_path, 'tokenized_and_encoded'))
# Clean & tokenize positive reviews
for i in trange(len(self.positive_files), desc='Tokenizing & Encoding Positive Reviews',
leave=True):
file = self.positive_files[i]
with open(os.path.join(self.positive_path, file), mode='r', encoding='utf8') as f:
example = f.read()
example = re.sub(r'<br />', '', example)
example = example.lstrip().rstrip()
example = re.sub(' +', ' ', example)
example = tokenize_and_encode(text=example,
tokenizer=self.tokenizer,
apply_cleaning=self.apply_cleaning,
max_tokenization_length=self.max_tokenization_length,
truncation_method=self.truncation_method,
split_head_density=self.split_head_density)
with open(os.path.join(self.positive_path, 'tokenized_and_encoded', file), mode='wb') as f:
pickle.dump(obj=example, file=f)
else:
logging.warning('Tokenized positive reviews directory already exists!')
if not os.path.exists(os.path.join(self.negative_path, 'tokenized_and_encoded')):
os.mkdir(os.path.join(self.negative_path, 'tokenized_and_encoded'))
# Clean & tokenize negative reviews
for i in trange(len(self.negative_files), desc='Tokenizing & Encoding Negative Reviews',
leave=True):
file = self.negative_files[i]
with open(os.path.join(self.negative_path, file), mode='r', encoding='utf8') as f:
example = f.read()
example = re.sub(r'<br />', '', example)
example = example.lstrip().rstrip()
example = re.sub(' +', ' ', example)
example = tokenize_and_encode(text=example,
tokenizer=self.tokenizer,
apply_cleaning=self.apply_cleaning,
max_tokenization_length=self.max_tokenization_length,
truncation_method=self.truncation_method,
split_head_density=self.split_head_density)
with open(os.path.join(self.negative_path, 'tokenized_and_encoded', file), mode='wb') as f:
pickle.dump(obj=example, file=f)
else:
logging.warning('Tokenized negative reviews directory already exists!')
def __len__(self):
return len(self.positive_files) + len(self.negative_files)
def __getitem__(self, index):
if index < self.num_positive_examples:
file = self.positive_files[index]
label = torch.tensor(data=self.positive_label, dtype=torch.long).to(self.device)
with open(os.path.join(self.positive_path, 'tokenized_and_encoded', file), mode='rb') as f:
example = pickle.load(file=f)
elif index >= self.num_positive_examples:
file = self.negative_files[index-self.num_positive_examples]
label = torch.tensor(data=self.negative_label, dtype=torch.long).to(self.device)
with open(os.path.join(self.negative_path, 'tokenized_and_encoded', file), mode='rb') as f:
example = pickle.load(file=f)
else:
raise ValueError('Out of range index while accessing dataset')
return torch.from_numpy(np.array(example)).long().to(self.device), label
|
[
"pickle.dump",
"nltk.stem.WordNetLemmatizer",
"logging.warning",
"pickle.load",
"numpy.array",
"nltk.corpus.stopwords.words",
"re.sub",
"os.path.join",
"os.listdir",
"torch.tensor"
] |
[((549, 568), 'nltk.stem.WordNetLemmatizer', 'WordNetLemmatizer', ([], {}), '()\n', (566, 568), False, 'from nltk.stem import WordNetLemmatizer\n'), ((508, 534), 'nltk.corpus.stopwords.words', 'stopwords.words', (['"""english"""'], {}), "('english')\n", (523, 534), False, 'from nltk.corpus import stopwords\n'), ((702, 743), 're.sub', 're.sub', (['"""[^\\\\w\\\\s]"""', '""""""', 'text', 're.UNICODE'], {}), "('[^\\\\w\\\\s]', '', text, re.UNICODE)\n", (708, 743), False, 'import re\n'), ((1033, 1056), 're.sub', 're.sub', (['""" +"""', '""" """', 'text'], {}), "(' +', ' ', text)\n", (1039, 1056), False, 'import re\n'), ((6325, 6373), 'torch.tensor', 'torch.tensor', ([], {'data': 'token_type_ids', 'device': 'device'}), '(data=token_type_ids, device=device)\n', (6337, 6373), False, 'import torch\n'), ((6395, 6443), 'torch.tensor', 'torch.tensor', ([], {'data': 'attention_mask', 'device': 'device'}), '(data=attention_mask, device=device)\n', (6407, 6443), False, 'import torch\n'), ((7888, 7924), 'os.path.join', 'os.path.join', (['input_directory', '"""pos"""'], {}), "(input_directory, 'pos')\n", (7900, 7924), False, 'import os\n'), ((8208, 8244), 'os.path.join', 'os.path.join', (['input_directory', '"""neg"""'], {}), "(input_directory, 'neg')\n", (8220, 8244), False, 'import os\n'), ((10523, 10594), 'logging.warning', 'logging.warning', (['"""Tokenized positive reviews directory already exists!"""'], {}), "('Tokenized positive reviews directory already exists!')\n", (10538, 10594), False, 'import logging\n'), ((11984, 12055), 'logging.warning', 'logging.warning', (['"""Tokenized negative reviews directory already exists!"""'], {}), "('Tokenized negative reviews directory already exists!')\n", (11999, 12055), False, 'import logging\n'), ((7967, 7997), 'os.listdir', 'os.listdir', (['self.positive_path'], {}), '(self.positive_path)\n', (7977, 7997), False, 'import os\n'), ((8287, 8317), 'os.listdir', 'os.listdir', (['self.negative_path'], {}), '(self.negative_path)\n', (8297, 8317), False, 'import os\n'), ((9165, 9222), 'os.path.join', 'os.path.join', (['self.positive_path', '"""tokenized_and_encoded"""'], {}), "(self.positive_path, 'tokenized_and_encoded')\n", (9177, 9222), False, 'import os\n'), ((9246, 9303), 'os.path.join', 'os.path.join', (['self.positive_path', '"""tokenized_and_encoded"""'], {}), "(self.positive_path, 'tokenized_and_encoded')\n", (9258, 9303), False, 'import os\n'), ((9707, 9736), 're.sub', 're.sub', (['"""<br />"""', '""""""', 'example'], {}), "('<br />', '', example)\n", (9713, 9736), False, 'import re\n'), ((9816, 9842), 're.sub', 're.sub', (['""" +"""', '""" """', 'example'], {}), "(' +', ' ', example)\n", (9822, 9842), False, 'import re\n'), ((10626, 10683), 'os.path.join', 'os.path.join', (['self.negative_path', '"""tokenized_and_encoded"""'], {}), "(self.negative_path, 'tokenized_and_encoded')\n", (10638, 10683), False, 'import os\n'), ((10707, 10764), 'os.path.join', 'os.path.join', (['self.negative_path', '"""tokenized_and_encoded"""'], {}), "(self.negative_path, 'tokenized_and_encoded')\n", (10719, 10764), False, 'import os\n'), ((11168, 11197), 're.sub', 're.sub', (['"""<br />"""', '""""""', 'example'], {}), "('<br />', '', example)\n", (11174, 11197), False, 'import re\n'), ((11277, 11303), 're.sub', 're.sub', (['""" +"""', '""" """', 'example'], {}), "(' +', ' ', example)\n", (11283, 11303), False, 'import re\n'), ((12498, 12517), 'pickle.load', 'pickle.load', ([], {'file': 'f'}), '(file=f)\n', (12509, 12517), False, 'import pickle\n'), ((8047, 8082), 'os.path.join', 'os.path.join', (['self.positive_path', 'f'], {}), '(self.positive_path, f)\n', (8059, 8082), False, 'import os\n'), ((8367, 8402), 'os.path.join', 'os.path.join', (['self.negative_path', 'f'], {}), '(self.negative_path, f)\n', (8379, 8402), False, 'import os\n'), ((10464, 10496), 'pickle.dump', 'pickle.dump', ([], {'obj': 'example', 'file': 'f'}), '(obj=example, file=f)\n', (10475, 10496), False, 'import pickle\n'), ((11925, 11957), 'pickle.dump', 'pickle.dump', ([], {'obj': 'example', 'file': 'f'}), '(obj=example, file=f)\n', (11936, 11957), False, 'import pickle\n'), ((12295, 12351), 'torch.tensor', 'torch.tensor', ([], {'data': 'self.positive_label', 'dtype': 'torch.long'}), '(data=self.positive_label, dtype=torch.long)\n', (12307, 12351), False, 'import torch\n'), ((12390, 12453), 'os.path.join', 'os.path.join', (['self.positive_path', '"""tokenized_and_encoded"""', 'file'], {}), "(self.positive_path, 'tokenized_and_encoded', file)\n", (12402, 12453), False, 'import os\n'), ((12864, 12883), 'pickle.load', 'pickle.load', ([], {'file': 'f'}), '(file=f)\n', (12875, 12883), False, 'import pickle\n'), ((9568, 9606), 'os.path.join', 'os.path.join', (['self.positive_path', 'file'], {}), '(self.positive_path, file)\n', (9580, 9606), False, 'import os\n'), ((10362, 10425), 'os.path.join', 'os.path.join', (['self.positive_path', '"""tokenized_and_encoded"""', 'file'], {}), "(self.positive_path, 'tokenized_and_encoded', file)\n", (10374, 10425), False, 'import os\n'), ((11029, 11067), 'os.path.join', 'os.path.join', (['self.negative_path', 'file'], {}), '(self.negative_path, file)\n', (11041, 11067), False, 'import os\n'), ((11823, 11886), 'os.path.join', 'os.path.join', (['self.negative_path', '"""tokenized_and_encoded"""', 'file'], {}), "(self.negative_path, 'tokenized_and_encoded', file)\n", (11835, 11886), False, 'import os\n'), ((12661, 12717), 'torch.tensor', 'torch.tensor', ([], {'data': 'self.negative_label', 'dtype': 'torch.long'}), '(data=self.negative_label, dtype=torch.long)\n', (12673, 12717), False, 'import torch\n'), ((12756, 12819), 'os.path.join', 'os.path.join', (['self.negative_path', '"""tokenized_and_encoded"""', 'file'], {}), "(self.negative_path, 'tokenized_and_encoded', file)\n", (12768, 12819), False, 'import os\n'), ((13006, 13023), 'numpy.array', 'np.array', (['example'], {}), '(example)\n', (13014, 13023), True, 'import numpy as np\n')]
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
class DCA(nn.Module):
def __init__(self, no_channels=1):
super(DCA, self).__init__()
self.encoder = nn.Sequential(
nn.Conv2d(no_channels, 16, 7, stride=3, padding=1),
nn.ReLU(),
nn.Conv2d(16, 32, 7, stride=3, padding=1),
nn.ReLU(),
nn.Conv2d(32, 64, 7),
nn.Flatten(),
nn.Linear(5184, 1000),
nn.BatchNorm1d(1000),
nn.Linear(1000, 5184)
)
self.decoder = nn.Sequential(
nn.ConvTranspose2d(64, 32, 7),
nn.ReLU(),
nn.ConvTranspose2d(32, 16, 7, stride=3, padding=1, output_padding=2),
nn.ReLU(),
nn.ConvTranspose2d(16, no_channels, 6, stride=3, padding=1, output_padding=2),
nn.Tanh()
)
def forward(self, x):
x = self.encoder(x)
x = x.reshape(-1,64,9,9)
x = self.decoder(x)
return x
|
[
"torch.nn.ReLU",
"torch.nn.ConvTranspose2d",
"torch.nn.Tanh",
"torch.nn.BatchNorm1d",
"torch.nn.Conv2d",
"torch.nn.Linear",
"torch.nn.Flatten"
] |
[((252, 302), 'torch.nn.Conv2d', 'nn.Conv2d', (['no_channels', '(16)', '(7)'], {'stride': '(3)', 'padding': '(1)'}), '(no_channels, 16, 7, stride=3, padding=1)\n', (261, 302), True, 'import torch.nn as nn\n'), ((316, 325), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (323, 325), True, 'import torch.nn as nn\n'), ((339, 380), 'torch.nn.Conv2d', 'nn.Conv2d', (['(16)', '(32)', '(7)'], {'stride': '(3)', 'padding': '(1)'}), '(16, 32, 7, stride=3, padding=1)\n', (348, 380), True, 'import torch.nn as nn\n'), ((394, 403), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (401, 403), True, 'import torch.nn as nn\n'), ((417, 437), 'torch.nn.Conv2d', 'nn.Conv2d', (['(32)', '(64)', '(7)'], {}), '(32, 64, 7)\n', (426, 437), True, 'import torch.nn as nn\n'), ((451, 463), 'torch.nn.Flatten', 'nn.Flatten', ([], {}), '()\n', (461, 463), True, 'import torch.nn as nn\n'), ((477, 498), 'torch.nn.Linear', 'nn.Linear', (['(5184)', '(1000)'], {}), '(5184, 1000)\n', (486, 498), True, 'import torch.nn as nn\n'), ((512, 532), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['(1000)'], {}), '(1000)\n', (526, 532), True, 'import torch.nn as nn\n'), ((546, 567), 'torch.nn.Linear', 'nn.Linear', (['(1000)', '(5184)'], {}), '(1000, 5184)\n', (555, 567), True, 'import torch.nn as nn\n'), ((629, 658), 'torch.nn.ConvTranspose2d', 'nn.ConvTranspose2d', (['(64)', '(32)', '(7)'], {}), '(64, 32, 7)\n', (647, 658), True, 'import torch.nn as nn\n'), ((672, 681), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (679, 681), True, 'import torch.nn as nn\n'), ((695, 763), 'torch.nn.ConvTranspose2d', 'nn.ConvTranspose2d', (['(32)', '(16)', '(7)'], {'stride': '(3)', 'padding': '(1)', 'output_padding': '(2)'}), '(32, 16, 7, stride=3, padding=1, output_padding=2)\n', (713, 763), True, 'import torch.nn as nn\n'), ((777, 786), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (784, 786), True, 'import torch.nn as nn\n'), ((800, 877), 'torch.nn.ConvTranspose2d', 'nn.ConvTranspose2d', (['(16)', 'no_channels', '(6)'], {'stride': '(3)', 'padding': '(1)', 'output_padding': '(2)'}), '(16, no_channels, 6, stride=3, padding=1, output_padding=2)\n', (818, 877), True, 'import torch.nn as nn\n'), ((891, 900), 'torch.nn.Tanh', 'nn.Tanh', ([], {}), '()\n', (898, 900), True, 'import torch.nn as nn\n')]
|
import json
import logging
import os
import random
import spacy
from spacy.training import Example
from tqdm.auto import tqdm
from label_studio_ml.model import LabelStudioMLBase
logging.basicConfig(level=logging.INFO)
class SimpleNER(LabelStudioMLBase):
def __init__(self, **kwargs):
# don't forget to initialize base class...
super(SimpleNER, self).__init__(**kwargs)
# then collect all keys from config which will be used to extract data from task and to form prediction
# Parsed label config contains only one output of <Labels> type
assert len(self.parsed_label_config) == 1
self.from_name, self.info = list(self.parsed_label_config.items())[0]
assert self.info['type'] == 'Labels'
# the model has only one textual input
assert len(self.info['to_name']) == 1
assert len(self.info['inputs']) == 1
assert self.info['inputs'][0]['type'] == 'Text'
self.to_name = self.info['to_name'][0]
self.value = self.info['inputs'][0]['value']
if not self.train_output:
# If there is no trainings, define cold-started the simple spaCy NER model
self.reset_model()
# This is an array of <Labels> labels
self.labels = self.info['labels']
# Initialized the ner model with labels
list(map(self.ner.add_label, self.labels))
print('Initialized with from_name={from_name}, to_name={to_name}, labels={labels}'.format(
from_name=self.from_name, to_name=self.to_name, labels=str(
self.labels)
))
else:
# otherwise load the model from the latest training results
self.model_file = self.train_output['model_file']
self.model = spacy.load(self.model_file)
# and use the labels from training outputs
self.labels = self.train_output['labels']
print('Loaded from train output with from_name={from_name}, to_name={to_name}, labels={labels}'.format(
from_name=self.from_name, to_name=self.to_name, labels=str(
self.labels)
))
def reset_model(self):
self.model = spacy.blank("en")
self.model.add_pipe("ner")
self.ner = self.model.get_pipe("ner")
self.new_model = True
def predict(self, tasks, **kwargs):
# collect input texts
predictions = []
for task in tasks:
doc = self.model(task['data'][self.value])
# get named entities
result = [{
'from_name': self.from_name,
'to_name': self.to_name,
'type': 'labels',
'value': {"start": ent.start_char, "end": ent.end_char, "text": ent.text, 'labels': [ent.label_]}
} for ent in doc.ents]
predictions.append({'result': result})
print(predictions)
return predictions
def fit(self, completions, workdir=None, **kwargs):
train_data = []
_labels = []
# train the model
self.reset_model()
if self.new_model:
optimizer = self.model.begin_training()
else:
optimizer = self.model.resume_training()
for completion in completions:
# get input text from task data
if completion['annotations'][0].get('skipped') or completion['annotations'][0].get('was_cancelled'):
continue
# get an annotation
output_labels = []
for annotation in completion['annotations']:
for result in annotation['result']:
start = result['value']['start']
end = result['value']['end']
for label in result['value']['labels']:
output_labels.append((start, end, label))
_labels.append(label)
train_data.append((completion['data'][self.value], {
'entities': output_labels}))
new_labels = set(_labels)
if len(new_labels) != len(self.labels):
self.labels = list(sorted(new_labels))
print('Label set has been changed:' + str(self.labels))
# Training for 30 iterations
for _ in tqdm(range(30)):
random.shuffle(train_data)
for raw_text, entities in train_data:
doc = self.model.make_doc(raw_text)
example = Example.from_dict(doc, entities)
self.model.update([example], sgd=optimizer)
# save spaCy pipeline to model file
model_file = os.path.join(workdir, 'model')
self.model.to_disk(model_file)
train_output = {
'labels': self.labels,
'model_file': model_file
}
return train_output
|
[
"logging.basicConfig",
"random.shuffle",
"spacy.training.Example.from_dict",
"spacy.load",
"spacy.blank",
"os.path.join"
] |
[((181, 220), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.INFO'}), '(level=logging.INFO)\n', (200, 220), False, 'import logging\n'), ((2231, 2248), 'spacy.blank', 'spacy.blank', (['"""en"""'], {}), "('en')\n", (2242, 2248), False, 'import spacy\n'), ((4661, 4691), 'os.path.join', 'os.path.join', (['workdir', '"""model"""'], {}), "(workdir, 'model')\n", (4673, 4691), False, 'import os\n'), ((1804, 1831), 'spacy.load', 'spacy.load', (['self.model_file'], {}), '(self.model_file)\n', (1814, 1831), False, 'import spacy\n'), ((4347, 4373), 'random.shuffle', 'random.shuffle', (['train_data'], {}), '(train_data)\n', (4361, 4373), False, 'import random\n'), ((4502, 4534), 'spacy.training.Example.from_dict', 'Example.from_dict', (['doc', 'entities'], {}), '(doc, entities)\n', (4519, 4534), False, 'from spacy.training import Example\n')]
|
import pytest
from pyball import PyBall
from pyball.models.config import Platform
@pytest.fixture(scope='module')
def test_platform():
pyball = PyBall()
return pyball.get_platforms()
def test_get_platform_returns_platform(test_platform):
assert isinstance(test_platform, list)
assert isinstance(test_platform[0], Platform)
|
[
"pytest.fixture",
"pyball.PyBall"
] |
[((85, 115), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""module"""'}), "(scope='module')\n", (99, 115), False, 'import pytest\n'), ((150, 158), 'pyball.PyBall', 'PyBall', ([], {}), '()\n', (156, 158), False, 'from pyball import PyBall\n')]
|
from flask import g, session
from SoftLayer import TokenAuthentication, Client
def get_client():
if not hasattr(g, 'client'):
if session.get('sl_user_id'):
auth = TokenAuthentication(session['sl_user_id'],
session['sl_user_hash'])
if auth:
g.client = Client(auth=auth)
return g.client
|
[
"flask.session.get",
"SoftLayer.Client",
"SoftLayer.TokenAuthentication"
] |
[((144, 169), 'flask.session.get', 'session.get', (['"""sl_user_id"""'], {}), "('sl_user_id')\n", (155, 169), False, 'from flask import g, session\n'), ((190, 257), 'SoftLayer.TokenAuthentication', 'TokenAuthentication', (["session['sl_user_id']", "session['sl_user_hash']"], {}), "(session['sl_user_id'], session['sl_user_hash'])\n", (209, 257), False, 'from SoftLayer import TokenAuthentication, Client\n'), ((345, 362), 'SoftLayer.Client', 'Client', ([], {'auth': 'auth'}), '(auth=auth)\n', (351, 362), False, 'from SoftLayer import TokenAuthentication, Client\n')]
|
import unittest
import requests
import bom_water.bom_water as bm
import os
from pathlib import Path
import shapely
from bom_water.spatial_util import spatail_utilty
class test_core(unittest.TestCase):
# def __init__(self):
# super(test_core, self).__init__(self)
# self.setUp()
@classmethod
def setUpClass(self):
remove_file = os.path.join(Path.home(), '/bom_water/cache/waterML_GetCapabilities.json')
if os.path.exists(remove_file):
os.remove(remove_file)
# def test_user_path(self):
# from pathlib import Path
# print(Path.home())
def test_bom_service(self):
'''Test that the service is up
:rtype: None
'''
_bm = bm.BomWater()
try:
response = _bm.request(_bm.actions.GetCapabilities)
if response.status_code == 200:
assert True, "Test BoM service passed"
else:
assert False, f'Test BoM service failed with status_code: {response.status_code}'
except requests.exceptions.RequestException as e:
assert False, f'Test BoM service failed with RequestException: {e}'
except requests.exceptions.ConnectionError as ece:
assert False, f'Test BoM service failed with ConnectionError: {ece}'
except requests.exceptions.Timeout as et:
assert False, f'Test BoM service failed with Timeout: {et}'
def test_get_capabilities(self):
'''Get Capabilities test'''
_bm = bm.BomWater()
response = _bm.request(_bm.actions.GetCapabilities)
test_json = _bm.xml_to_json(response.text)#, f'test_GetCapabilities.json')
actions = test_json['sos:Capabilities']['ows:OperationsMetadata']['ows:Operation']
for action in actions:
for property, value in vars(_bm.actions).items():
if not action['@name'] == 'DescribeSensor':
if property == action['@name']:
print(value)
assert True, f'Test GetCapabilities passed'
continue
assert False, f'Test GetCapabilities, falied to get action: expected {action}'
def test_get_feature_of_interest(self):
'''Get Feature of interest test'''
_bm = bm.BomWater()
'''Todo: Need a small bounding box with known stations contained'''
response = _bm.request(_bm.actions.GetFeatureOfInterest,
"http://bom.gov.au/waterdata/services/stations/GW036501.2.2")
test_json = _bm.xml_to_json(response.text)#, f'test_GetFeatureOfInterest.json')
features = test_json['soap12:Envelope']['soap12:Body']['sos:GetFeatureOfInterestResponse'][
'sos:featureMember']
long_statioId = features['wml2:MonitoringPoint']['gml:identifier']['#text']
if os.path.basename(long_statioId) == 'GW036501.2.2':
assert True, "Test GetFeatureOfInterest passed"
else:
assert False, "Test GetFeatureOfInterest falied"
def test_get_data_availability(self):
'''Get Data availability test'''
_bm = bm.BomWater()
def test_get_observation(self):
'''Get Observation test'''
_bm = bm.BomWater()
def test_create_feature_geojson_list(self):
_bom = bm.BomWater()
response = _bom.request(_bom.actions.GetFeatureOfInterest, None, None, None, None, None, "-37.505032 140.999283", "-28.157021 153.638824" )
response_json = _bom.xml_to_json(response.text)
folder = f'C:\\Users\\fre171\\Documents\\pyBOMwater_dummyData\\test_stations.json'
_bom.create_feature_list(response_json, folder )
if __name__ == '__main__':
unittest.main()
|
[
"unittest.main",
"os.remove",
"pathlib.Path.home",
"os.path.basename",
"os.path.exists",
"bom_water.bom_water.BomWater"
] |
[((3746, 3761), 'unittest.main', 'unittest.main', ([], {}), '()\n', (3759, 3761), False, 'import unittest\n'), ((452, 479), 'os.path.exists', 'os.path.exists', (['remove_file'], {}), '(remove_file)\n', (466, 479), False, 'import os\n'), ((734, 747), 'bom_water.bom_water.BomWater', 'bm.BomWater', ([], {}), '()\n', (745, 747), True, 'import bom_water.bom_water as bm\n'), ((1528, 1541), 'bom_water.bom_water.BomWater', 'bm.BomWater', ([], {}), '()\n', (1539, 1541), True, 'import bom_water.bom_water as bm\n'), ((2317, 2330), 'bom_water.bom_water.BomWater', 'bm.BomWater', ([], {}), '()\n', (2328, 2330), True, 'import bom_water.bom_water as bm\n'), ((3169, 3182), 'bom_water.bom_water.BomWater', 'bm.BomWater', ([], {}), '()\n', (3180, 3182), True, 'import bom_water.bom_water as bm\n'), ((3269, 3282), 'bom_water.bom_water.BomWater', 'bm.BomWater', ([], {}), '()\n', (3280, 3282), True, 'import bom_water.bom_water as bm\n'), ((3347, 3360), 'bom_water.bom_water.BomWater', 'bm.BomWater', ([], {}), '()\n', (3358, 3360), True, 'import bom_water.bom_water as bm\n'), ((379, 390), 'pathlib.Path.home', 'Path.home', ([], {}), '()\n', (388, 390), False, 'from pathlib import Path\n'), ((493, 515), 'os.remove', 'os.remove', (['remove_file'], {}), '(remove_file)\n', (502, 515), False, 'import os\n'), ((2885, 2916), 'os.path.basename', 'os.path.basename', (['long_statioId'], {}), '(long_statioId)\n', (2901, 2916), False, 'import os\n')]
|
from __future__ import unicode_literals
from mock import Mock
from world.weather.models import WeatherType, WeatherEmit
from server.utils.test_utils import ArxCommandTest
from world.weather import weather_commands, weather_script, utils
from evennia.server.models import ServerConfig
class TestWeatherCommands(ArxCommandTest):
def setUp(self):
super(TestWeatherCommands,self).setUp()
self.weather1 = WeatherType.objects.create(name='Test', gm_notes='Test weather')
self.emit1 = WeatherEmit.objects.create(weather=self.weather1,
text='Test1 weather happens.')
self.weather2 = WeatherType.objects.create(name='Test2', gm_notes='Test weather')
self.emit2 = WeatherEmit.objects.create(weather=self.weather2,
text='Test2 weather happens.')
ServerConfig.objects.conf('weather_type_current', value=1)
ServerConfig.objects.conf('weather_intensity_current', value=5)
ServerConfig.objects.conf('weather_type_target', value=2)
ServerConfig.objects.conf('weather_intensity_target', value=5)
def test_cmd_adminweather(self):
self.setup_cmd(weather_commands.CmdAdminWeather, self.char1)
self.call_cmd("", "Weather pattern is Test (intensity 5), moving towards Test2 (intensity 5).")
self.call_cmd("/lock", "Weather is now locked and will not change.")
self.call_cmd("/unlock", "Weather is now unlocked and will change again as normal.")
self.call_cmd("/set Pigs soar through the sky.", "Custom weather emit set. "
"Remember to @admin_weather/announce if you want the "
"players to know.")
self.call_cmd("/set", "Custom weather message cleared. Remember to @admin_weather/announce if you want the "
"players to see a new weather emit.")
def test_weather_utils(self):
new_weather, new_intensity = utils.advance_weather()
assert(new_intensity < 5)
|
[
"world.weather.utils.advance_weather",
"evennia.server.models.ServerConfig.objects.conf",
"world.weather.models.WeatherType.objects.create",
"world.weather.models.WeatherEmit.objects.create"
] |
[((423, 487), 'world.weather.models.WeatherType.objects.create', 'WeatherType.objects.create', ([], {'name': '"""Test"""', 'gm_notes': '"""Test weather"""'}), "(name='Test', gm_notes='Test weather')\n", (449, 487), False, 'from world.weather.models import WeatherType, WeatherEmit\n'), ((509, 594), 'world.weather.models.WeatherEmit.objects.create', 'WeatherEmit.objects.create', ([], {'weather': 'self.weather1', 'text': '"""Test1 weather happens."""'}), "(weather=self.weather1, text='Test1 weather happens.'\n )\n", (535, 594), False, 'from world.weather.models import WeatherType, WeatherEmit\n'), ((662, 727), 'world.weather.models.WeatherType.objects.create', 'WeatherType.objects.create', ([], {'name': '"""Test2"""', 'gm_notes': '"""Test weather"""'}), "(name='Test2', gm_notes='Test weather')\n", (688, 727), False, 'from world.weather.models import WeatherType, WeatherEmit\n'), ((749, 834), 'world.weather.models.WeatherEmit.objects.create', 'WeatherEmit.objects.create', ([], {'weather': 'self.weather2', 'text': '"""Test2 weather happens."""'}), "(weather=self.weather2, text='Test2 weather happens.'\n )\n", (775, 834), False, 'from world.weather.models import WeatherType, WeatherEmit\n'), ((886, 944), 'evennia.server.models.ServerConfig.objects.conf', 'ServerConfig.objects.conf', (['"""weather_type_current"""'], {'value': '(1)'}), "('weather_type_current', value=1)\n", (911, 944), False, 'from evennia.server.models import ServerConfig\n'), ((953, 1016), 'evennia.server.models.ServerConfig.objects.conf', 'ServerConfig.objects.conf', (['"""weather_intensity_current"""'], {'value': '(5)'}), "('weather_intensity_current', value=5)\n", (978, 1016), False, 'from evennia.server.models import ServerConfig\n'), ((1025, 1082), 'evennia.server.models.ServerConfig.objects.conf', 'ServerConfig.objects.conf', (['"""weather_type_target"""'], {'value': '(2)'}), "('weather_type_target', value=2)\n", (1050, 1082), False, 'from evennia.server.models import ServerConfig\n'), ((1091, 1153), 'evennia.server.models.ServerConfig.objects.conf', 'ServerConfig.objects.conf', (['"""weather_intensity_target"""'], {'value': '(5)'}), "('weather_intensity_target', value=5)\n", (1116, 1153), False, 'from evennia.server.models import ServerConfig\n'), ((2068, 2091), 'world.weather.utils.advance_weather', 'utils.advance_weather', ([], {}), '()\n', (2089, 2091), False, 'from world.weather import weather_commands, weather_script, utils\n')]
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import textwrap
import jinja2
from aip_site.jinja.ext.tab import TabExtension
def test_tab():
t = jinja2.Template(textwrap.dedent("""
{% tab proto %}
Something something
More more more
{% endtabs %}
"""), extensions=[TabExtension])
rendered = t.render()
assert '=== "Protocol buffers"' in rendered
assert ' Something something\n' in rendered
assert ' More more more\n' in rendered
def test_multiple_tabs():
t = jinja2.Template(textwrap.dedent("""
{% tab proto %}
Something something
{% tab oas %}
Something else
{% endtabs %}
"""), extensions=[TabExtension])
rendered = t.render()
assert '=== "Protocol buffers"' in rendered
assert '=== "OpenAPI 3.0"' in rendered
|
[
"textwrap.dedent"
] |
[((697, 832), 'textwrap.dedent', 'textwrap.dedent', (['"""\n {% tab proto %}\n Something something\n More more more\n {% endtabs %}\n """'], {}), '(\n """\n {% tab proto %}\n Something something\n More more more\n {% endtabs %}\n """\n )\n', (712, 832), False, 'import textwrap\n'), ((1070, 1227), 'textwrap.dedent', 'textwrap.dedent', (['"""\n {% tab proto %}\n Something something\n {% tab oas %}\n Something else\n {% endtabs %}\n """'], {}), '(\n """\n {% tab proto %}\n Something something\n {% tab oas %}\n Something else\n {% endtabs %}\n """\n )\n', (1085, 1227), False, 'import textwrap\n')]
|
from typing import Union, Tuple, Callable
import pygame
from schafkopf.game_modes import *
from schafkopf.pygame_gui.Button import Button
from schafkopf.pygame_gui.colors import WHITE, BLACK, RED
class GameModeWidget(Button):
def __init__(
self,
topleft: Tuple[int, int] = (0, 0),
bidding_option: Tuple[int, Union[int, None]] = (NO_GAME, None),
callback: Callable = None,
font_size: int = 40,
clickable: bool = True
):
margin = 10
font = pygame.font.Font(None, font_size)
text = font.render(get_bidding_option_as_text(bidding_option), True, BLACK)
height = font_size
width = text.get_width() + 2 * margin
image = pygame.Surface((width, height))
image.fill(WHITE)
image.set_alpha(180)
image.blit(text, (margin, margin))
if clickable:
button_down_image = pygame.Surface((width, height))
button_down_image.fill(pygame.Color('grey'))
button_down_image.set_alpha(180)
button_down_image.blit(text, (margin, margin))
hover_image = pygame.Surface((width + 5, height + 5))
hover_image.fill(pygame.Color("lightgrey"))
hover_image.set_alpha(180)
hover_image.blit(text, (margin + 2, margin + 2))
else:
button_down_image = image
hover_image = image
super().__init__(
topleft=topleft,
image=image,
button_down_image=button_down_image,
hover_image=hover_image,
callback=callback
)
def get_bidding_option_as_text(option: tuple[int, Union[int, None]]):
if option[1] is None:
return game_mode_dict[option[0]]
else:
return game_mode_dict[option[0]] + " " + suit_dict[option[1]]
game_mode_dict: dict[int, str] = {
NO_GAME: "Weiter",
PARTNER_MODE: "Sauspiel",
WENZ: "Wenz",
SOLO: "Solo",
}
suit_dict: dict[int, str] = {
ACORNS: "Eichel",
LEAVES: "Gras",
HEARTS: "Herz",
BELLS: "Schellen"
}
|
[
"pygame.Color",
"pygame.Surface",
"pygame.font.Font"
] |
[((514, 547), 'pygame.font.Font', 'pygame.font.Font', (['None', 'font_size'], {}), '(None, font_size)\n', (530, 547), False, 'import pygame\n'), ((721, 752), 'pygame.Surface', 'pygame.Surface', (['(width, height)'], {}), '((width, height))\n', (735, 752), False, 'import pygame\n'), ((905, 936), 'pygame.Surface', 'pygame.Surface', (['(width, height)'], {}), '((width, height))\n', (919, 936), False, 'import pygame\n'), ((1124, 1163), 'pygame.Surface', 'pygame.Surface', (['(width + 5, height + 5)'], {}), '((width + 5, height + 5))\n', (1138, 1163), False, 'import pygame\n'), ((972, 992), 'pygame.Color', 'pygame.Color', (['"""grey"""'], {}), "('grey')\n", (984, 992), False, 'import pygame\n'), ((1193, 1218), 'pygame.Color', 'pygame.Color', (['"""lightgrey"""'], {}), "('lightgrey')\n", (1205, 1218), False, 'import pygame\n')]
|
# Modules
import pygame
import numpy as np
import random
from pygame.constants import KEYDOWN
import settings as s
# Initialize pygame
pygame.init()
# screen
screen = pygame.display.set_mode((s.WIDTH,s.HEIGHT))
# Title and Icon
pygame.display.set_caption('TIC TAC TOE')
icon = pygame.image.load('icon.png')
pygame.display.set_icon(icon)
screen.fill(s.BG_COLOR)
# console board
board = np.zeros((3,3))
# Functions
def drawLines(): # Drawing lines function
# horizontal lines
pygame.draw.line(screen, s.LINE_COLOR, (0,s.SQUARE_SIZE), (500,s.SQUARE_SIZE), s.LINE_WIDTH)
pygame.draw.line(screen, s.LINE_COLOR, (0, 332), (500, 332), s.LINE_WIDTH)
# vertical lines
pygame.draw.line(screen, s.LINE_COLOR, (s.SQUARE_SIZE, 0), (s.SQUARE_SIZE, 500), s.LINE_WIDTH)
pygame.draw.line(screen, s.LINE_COLOR, (332, 0), (332, 500), s.LINE_WIDTH)
def playerEquals(x, y, z):
return x!=0 and x==y and y==z
def checkDraw():
emp = 0
for row in range (s.ROWS):
for col in range (s.COLS):
if availableSquare(row, col):
emp += 1
if emp==0:
return 'Draw'
def checkWinner():
winner = None
# check for tie
winner = checkDraw()
# vertical win
for col in range (s.COLS):
if playerEquals(board[0][col], board[1][col], board[2][col]):
winner = board[0][col]
# horizontal win
for row in range (s.ROWS):
if playerEquals(board[row][0], board[row][1], board[row][2]):
winner = board[row][0]
# ascending diagonal win
if playerEquals(board[2][0], board[1][1], board[0][2]):
winner = board[2][0]
# descending diagonal win
if playerEquals(board[0][0], board[1][1], board[2][2]):
winner = board[0][0]
return winner
# functions for drawing winning lines
def vertical_winline(col, winner):
posX = col * s.SQUARE_SIZE + s.SQUARE_SIZE//2 # column is constant
if winner == 1:
color = s.O_COLOR
elif winner == 2:
color = s.X_COLOR
pygame.draw.line(screen, color, (posX, 15), (posX, s.HEIGHT-15), 15)
def horizontal_winline(row, winner):
posY = row * s.SQUARE_SIZE + s.SQUARE_SIZE//2 # row is constant
if winner == 1:
color = s.O_COLOR
else:
color = s.X_COLOR
pygame.draw.line(screen, color, (15, posY), (s.WIDTH-15, posY), 15)
def asc_diagonal_winline(winner):
if winner == 1:
color = s.O_COLOR
else:
color = s.X_COLOR
pygame.draw.line(screen, color, (15, s.HEIGHT-15), (s.WIDTH-15, 15), 15)
def desc_diagonal_winline(winner):
if winner == 1:
color = s.O_COLOR
else:
color = s.X_COLOR
pygame.draw.line(screen, color, (15, 15), (s.WIDTH-15, s.HEIGHT-15), 15)
# function for drawing Os and Xs
def figures():
for row in range(3):
for col in range(3):
if board[row][col] == 1:
pygame.draw.circle(screen, s.O_COLOR, ( int(col * s.SQUARE_SIZE + 83), int(row * s.SQUARE_SIZE + 83)), s.C_RADIUS, s.C_WIDTH)
elif board[row][col] == 2:
pygame.draw.line(screen, s.X_COLOR, (col * s.SQUARE_SIZE + s.SPACE, row * s.SQUARE_SIZE + s.SQUARE_SIZE - s.SPACE ), (col * s.SQUARE_SIZE + s.SQUARE_SIZE - s.SPACE, row * s.SQUARE_SIZE + s.SPACE), s.CROSS_WIDTH)
pygame.draw.line(screen, s.X_COLOR, (col * s.SQUARE_SIZE + s.SPACE, row * s.SQUARE_SIZE + s.SPACE ), (col * s.SQUARE_SIZE + s.SQUARE_SIZE - s.SPACE, row * s.SQUARE_SIZE + s.SQUARE_SIZE - s.SPACE), s.CROSS_WIDTH)
def markSquare(row, col, player):
board[row][col] = player
def availableSquare(row, col):
return board[row][col] == 0
def isBoardFull():
for row in range (3):
for col in range (3):
if board[row][col] == 0:
return False
return True
def restart():
screen.fill(s.BG_COLOR)
drawLines()
player = 1
for row in range (s.ROWS):
for col in range (s.COLS):
board[row][col] = 0
def render():
x = checkWinner()
if x != None and x != 'Draw':
# vertical win
for col in range (s.COLS):
if playerEquals(board[0][col], board[1][col], board[2][col]):
winner = board[0][col]
vertical_winline(col, winner)
# horizontal win
for row in range (s.ROWS):
if playerEquals(board[row][0], board[row][1], board[row][2]):
winner = board[row][0]
horizontal_winline(row, winner)
# ascending diagonal win
if playerEquals(board[2][0], board[1][1], board[0][2]):
winner = board[2][0]
asc_diagonal_winline(winner)
# descending diagonal win
if playerEquals(board[0][0], board[1][1], board[2][2]):
winner = board[0][0]
desc_diagonal_winline(winner)
display(x)
def display(x):
if x == 1:
text = "O WINS!!! Press 'R' to play again!"
drawTexttoScreen (screen, text, 250, 250, 'GREEN')
elif x == 2:
text = "X WINS!!! Press 'R' to play again!"
drawTexttoScreen (screen, text, 250, 250)
elif x == 'Draw':
text = "DRAW!!! Press 'R' to play again!"
drawTexttoScreen (screen, text, 250, 250)
def drawTexttoScreen (screen, text, x, y, color = (250, 0, 0)):
font = pygame.font.SysFont('chalkduster.ttf', 30)
textSurface = font.render(text, True, color)
textRect = textSurface.get_rect()
textRect.centerx = x
textRect.centery = y
screen.blit(textSurface, textRect)
def playerMove(row, col, player):
markSquare(row, col, player)
return
def compMove():
bestScore = float('-inf')
new_r = new_c = None
for row in range(s.ROWS):
for col in range(s.COLS):
if availableSquare(row, col):
markSquare(row, col, 1)
score = minimax(0, float('-inf'), float('inf'), False)
markSquare(row, col, 0)
if score > bestScore:
bestScore = score
new_r, new_c = row, col
markSquare(new_r, new_c, 1)
return
# Minimax function
def minimax(depth, alpha, beta, is_maximizing):
winner = checkWinner()
if winner != None:
return s.score[winner]
if is_maximizing:
bestScore = float('-inf')
for row in range(s.ROWS):
for col in range(s.COLS):
if availableSquare(row, col):
markSquare(row, col, 1)
score = minimax(depth + 1, alpha, beta, False)
markSquare(row, col, 0)
bestScore = max(score, bestScore)
alpha = max(alpha, bestScore) # pruning
if beta <= alpha:
return bestScore
return bestScore
else:
bestScore = float('inf')
for row in range(3):
for col in range(3):
if availableSquare(row, col):
markSquare(row, col, 2)
score = minimax(depth + 1, alpha, beta, True)
markSquare(row, col, 0)
bestScore = min(score, bestScore)
beta = min(beta, bestScore) # pruning
if beta <= alpha:
return bestScore
return bestScore
drawLines()
player = random.choice(s.p) # initializing player
gameOver = False
# game loop
run = True
while run:
for event in pygame.event.get():
if event.type == pygame.QUIT:
run = False
# for comp move
if player == 1 and not gameOver:
compMove()
winner = checkWinner()
if winner != None:
gameOver = True
player = 2
figures()
render()
if event.type == pygame.MOUSEBUTTONDOWN and not gameOver:
mouseX = event.pos[0] # x coordinate
mouseY = event.pos[1] # y coordinate
clicked_row = int(mouseY // s.SQUARE_SIZE)
clicked_col = int(mouseX // s.SQUARE_SIZE)
# for player move
if availableSquare (clicked_row, clicked_col):
if player == 2:
playerMove(clicked_row, clicked_col, 2)
winner = checkWinner()
if winner != None:
gameOver = True
player = 1
figures()
render()
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_r:
restart()
gameOver = False # changing gameOver to False for the next game
pygame.display.update()
|
[
"pygame.draw.line",
"pygame.display.set_icon",
"pygame.font.SysFont",
"pygame.event.get",
"pygame.display.set_mode",
"numpy.zeros",
"random.choice",
"pygame.init",
"pygame.display.update",
"pygame.image.load",
"pygame.display.set_caption"
] |
[((136, 149), 'pygame.init', 'pygame.init', ([], {}), '()\n', (147, 149), False, 'import pygame\n'), ((169, 213), 'pygame.display.set_mode', 'pygame.display.set_mode', (['(s.WIDTH, s.HEIGHT)'], {}), '((s.WIDTH, s.HEIGHT))\n', (192, 213), False, 'import pygame\n'), ((230, 271), 'pygame.display.set_caption', 'pygame.display.set_caption', (['"""TIC TAC TOE"""'], {}), "('TIC TAC TOE')\n", (256, 271), False, 'import pygame\n'), ((279, 308), 'pygame.image.load', 'pygame.image.load', (['"""icon.png"""'], {}), "('icon.png')\n", (296, 308), False, 'import pygame\n'), ((309, 338), 'pygame.display.set_icon', 'pygame.display.set_icon', (['icon'], {}), '(icon)\n', (332, 338), False, 'import pygame\n'), ((389, 405), 'numpy.zeros', 'np.zeros', (['(3, 3)'], {}), '((3, 3))\n', (397, 405), True, 'import numpy as np\n'), ((7387, 7405), 'random.choice', 'random.choice', (['s.p'], {}), '(s.p)\n', (7400, 7405), False, 'import random\n'), ((489, 588), 'pygame.draw.line', 'pygame.draw.line', (['screen', 's.LINE_COLOR', '(0, s.SQUARE_SIZE)', '(500, s.SQUARE_SIZE)', 's.LINE_WIDTH'], {}), '(screen, s.LINE_COLOR, (0, s.SQUARE_SIZE), (500, s.\n SQUARE_SIZE), s.LINE_WIDTH)\n', (505, 588), False, 'import pygame\n'), ((586, 660), 'pygame.draw.line', 'pygame.draw.line', (['screen', 's.LINE_COLOR', '(0, 332)', '(500, 332)', 's.LINE_WIDTH'], {}), '(screen, s.LINE_COLOR, (0, 332), (500, 332), s.LINE_WIDTH)\n', (602, 660), False, 'import pygame\n'), ((686, 785), 'pygame.draw.line', 'pygame.draw.line', (['screen', 's.LINE_COLOR', '(s.SQUARE_SIZE, 0)', '(s.SQUARE_SIZE, 500)', 's.LINE_WIDTH'], {}), '(screen, s.LINE_COLOR, (s.SQUARE_SIZE, 0), (s.SQUARE_SIZE, \n 500), s.LINE_WIDTH)\n', (702, 785), False, 'import pygame\n'), ((785, 859), 'pygame.draw.line', 'pygame.draw.line', (['screen', 's.LINE_COLOR', '(332, 0)', '(332, 500)', 's.LINE_WIDTH'], {}), '(screen, s.LINE_COLOR, (332, 0), (332, 500), s.LINE_WIDTH)\n', (801, 859), False, 'import pygame\n'), ((2026, 2096), 'pygame.draw.line', 'pygame.draw.line', (['screen', 'color', '(posX, 15)', '(posX, s.HEIGHT - 15)', '(15)'], {}), '(screen, color, (posX, 15), (posX, s.HEIGHT - 15), 15)\n', (2042, 2096), False, 'import pygame\n'), ((2289, 2358), 'pygame.draw.line', 'pygame.draw.line', (['screen', 'color', '(15, posY)', '(s.WIDTH - 15, posY)', '(15)'], {}), '(screen, color, (15, posY), (s.WIDTH - 15, posY), 15)\n', (2305, 2358), False, 'import pygame\n'), ((2479, 2555), 'pygame.draw.line', 'pygame.draw.line', (['screen', 'color', '(15, s.HEIGHT - 15)', '(s.WIDTH - 15, 15)', '(15)'], {}), '(screen, color, (15, s.HEIGHT - 15), (s.WIDTH - 15, 15), 15)\n', (2495, 2555), False, 'import pygame\n'), ((2677, 2753), 'pygame.draw.line', 'pygame.draw.line', (['screen', 'color', '(15, 15)', '(s.WIDTH - 15, s.HEIGHT - 15)', '(15)'], {}), '(screen, color, (15, 15), (s.WIDTH - 15, s.HEIGHT - 15), 15)\n', (2693, 2753), False, 'import pygame\n'), ((5331, 5373), 'pygame.font.SysFont', 'pygame.font.SysFont', (['"""chalkduster.ttf"""', '(30)'], {}), "('chalkduster.ttf', 30)\n", (5350, 5373), False, 'import pygame\n'), ((7497, 7515), 'pygame.event.get', 'pygame.event.get', ([], {}), '()\n', (7513, 7515), False, 'import pygame\n'), ((8717, 8740), 'pygame.display.update', 'pygame.display.update', ([], {}), '()\n', (8738, 8740), False, 'import pygame\n'), ((3089, 3309), 'pygame.draw.line', 'pygame.draw.line', (['screen', 's.X_COLOR', '(col * s.SQUARE_SIZE + s.SPACE, row * s.SQUARE_SIZE + s.SQUARE_SIZE - s.SPACE)', '(col * s.SQUARE_SIZE + s.SQUARE_SIZE - s.SPACE, row * s.SQUARE_SIZE + s.SPACE)', 's.CROSS_WIDTH'], {}), '(screen, s.X_COLOR, (col * s.SQUARE_SIZE + s.SPACE, row * s\n .SQUARE_SIZE + s.SQUARE_SIZE - s.SPACE), (col * s.SQUARE_SIZE + s.\n SQUARE_SIZE - s.SPACE, row * s.SQUARE_SIZE + s.SPACE), s.CROSS_WIDTH)\n', (3105, 3309), False, 'import pygame\n'), ((3317, 3536), 'pygame.draw.line', 'pygame.draw.line', (['screen', 's.X_COLOR', '(col * s.SQUARE_SIZE + s.SPACE, row * s.SQUARE_SIZE + s.SPACE)', '(col * s.SQUARE_SIZE + s.SQUARE_SIZE - s.SPACE, row * s.SQUARE_SIZE + s.\n SQUARE_SIZE - s.SPACE)', 's.CROSS_WIDTH'], {}), '(screen, s.X_COLOR, (col * s.SQUARE_SIZE + s.SPACE, row * s\n .SQUARE_SIZE + s.SPACE), (col * s.SQUARE_SIZE + s.SQUARE_SIZE - s.SPACE,\n row * s.SQUARE_SIZE + s.SQUARE_SIZE - s.SPACE), s.CROSS_WIDTH)\n', (3333, 3536), False, 'import pygame\n')]
|
"""
Aggregation of all application routes into a single router. All created routers are imported here and
added to a single router for access from the app.api.server file.
router:
- Initial instantiation of a router
- All routers are aggregated to this router
- All routers are given a name (to appear in the URL) and a tag (for documentation)
"""
from fastapi import APIRouter
# Import routes
from app.api.routes.dummy import router as dummy_router
from app.api.routes.forecast import router as forecast_router
from app.api.routes.viz import router as viz_router
from app.api.routes.users import router as users_router
# Access all routes with this router
router = APIRouter()
# Include all routes
router.include_router(dummy_router, prefix="/dummy", tags=["dummy"])
router.include_router(forecast_router, prefix="/forecast", tags=["forecast"])
router.include_router(users_router, prefix="/users", tags=["users"])
|
[
"fastapi.APIRouter"
] |
[((681, 692), 'fastapi.APIRouter', 'APIRouter', ([], {}), '()\n', (690, 692), False, 'from fastapi import APIRouter\n')]
|
from django.db import models
# Create your models here.
class New(models.Model):
heading_one = models.CharField(max_length=500)
h1_paragraph1 = models.TextField()
h1_paragraph2 = models.TextField(blank=True)
h1_paragraph3 = models.TextField(blank=True)
image_one = models.ImageField(upload_to='news/news_photos')
file_one = models.FileField(upload_to='news/news_files', blank=True)
heading_two = models.CharField(max_length=500)
h2_paragraph1 = models.TextField()
h2_paragraph2 = models.TextField(blank=True)
h2_paragraph3 = models.TextField(blank=True)
image_two = models.ImageField(upload_to='news/news_photos')
file_two = models.FileField(upload_to='news/news_files', blank=True)
heading_three = models.CharField(max_length=500)
h3_paragraph1 = models.TextField()
h3_paragraph2 = models.TextField(blank=True)
h3_paragraph3 = models.TextField(blank=True)
image_three = models.ImageField(upload_to='news/news_photos')
file_three = models.FileField(upload_to='news/news_files', blank=True)
heading_four = models.CharField(max_length=500)
h4_paragraph1 = models.TextField()
h4_paragraph2 = models.TextField(blank=True)
h4_paragraph3 = models.TextField(blank=True)
image_four = models.ImageField(upload_to='news/news_photos')
file_four = models.FileField(upload_to='news/news_files', blank=True)
heading_five = models.CharField(max_length=500)
h5_paragraph1 = models.TextField()
h5_paragraph2 = models.TextField(blank=True)
h5_paragraph3 = models.TextField(blank=True)
image_five = models.ImageField(upload_to='news/news_photos')
file_five = models.FileField(upload_to='news/news_files', blank=True)
heading_six = models.CharField(max_length=500)
h6_paragraph1 = models.TextField()
h6_paragraph2 = models.TextField(blank=True)
h6_paragraph3 = models.TextField(blank=True)
image_six = models.ImageField(upload_to='news/news_photos')
file_six = models.FileField(upload_to='news/news_files', blank=True)
pub_date = models.DateTimeField('Date Published', auto_now_add=True, auto_now=False)
def __str__(self):
return str(self.pub_date)
# Create your models here.
|
[
"django.db.models.FileField",
"django.db.models.TextField",
"django.db.models.CharField",
"django.db.models.ImageField",
"django.db.models.DateTimeField"
] |
[((101, 133), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(500)'}), '(max_length=500)\n', (117, 133), False, 'from django.db import models\n'), ((154, 172), 'django.db.models.TextField', 'models.TextField', ([], {}), '()\n', (170, 172), False, 'from django.db import models\n'), ((193, 221), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)'}), '(blank=True)\n', (209, 221), False, 'from django.db import models\n'), ((242, 270), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)'}), '(blank=True)\n', (258, 270), False, 'from django.db import models\n'), ((287, 334), 'django.db.models.ImageField', 'models.ImageField', ([], {'upload_to': '"""news/news_photos"""'}), "(upload_to='news/news_photos')\n", (304, 334), False, 'from django.db import models\n'), ((351, 408), 'django.db.models.FileField', 'models.FileField', ([], {'upload_to': '"""news/news_files"""', 'blank': '(True)'}), "(upload_to='news/news_files', blank=True)\n", (367, 408), False, 'from django.db import models\n'), ((428, 460), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(500)'}), '(max_length=500)\n', (444, 460), False, 'from django.db import models\n'), ((481, 499), 'django.db.models.TextField', 'models.TextField', ([], {}), '()\n', (497, 499), False, 'from django.db import models\n'), ((520, 548), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)'}), '(blank=True)\n', (536, 548), False, 'from django.db import models\n'), ((569, 597), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)'}), '(blank=True)\n', (585, 597), False, 'from django.db import models\n'), ((614, 661), 'django.db.models.ImageField', 'models.ImageField', ([], {'upload_to': '"""news/news_photos"""'}), "(upload_to='news/news_photos')\n", (631, 661), False, 'from django.db import models\n'), ((678, 735), 'django.db.models.FileField', 'models.FileField', ([], {'upload_to': '"""news/news_files"""', 'blank': '(True)'}), "(upload_to='news/news_files', blank=True)\n", (694, 735), False, 'from django.db import models\n'), ((757, 789), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(500)'}), '(max_length=500)\n', (773, 789), False, 'from django.db import models\n'), ((810, 828), 'django.db.models.TextField', 'models.TextField', ([], {}), '()\n', (826, 828), False, 'from django.db import models\n'), ((849, 877), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)'}), '(blank=True)\n', (865, 877), False, 'from django.db import models\n'), ((898, 926), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)'}), '(blank=True)\n', (914, 926), False, 'from django.db import models\n'), ((945, 992), 'django.db.models.ImageField', 'models.ImageField', ([], {'upload_to': '"""news/news_photos"""'}), "(upload_to='news/news_photos')\n", (962, 992), False, 'from django.db import models\n'), ((1011, 1068), 'django.db.models.FileField', 'models.FileField', ([], {'upload_to': '"""news/news_files"""', 'blank': '(True)'}), "(upload_to='news/news_files', blank=True)\n", (1027, 1068), False, 'from django.db import models\n'), ((1089, 1121), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(500)'}), '(max_length=500)\n', (1105, 1121), False, 'from django.db import models\n'), ((1142, 1160), 'django.db.models.TextField', 'models.TextField', ([], {}), '()\n', (1158, 1160), False, 'from django.db import models\n'), ((1181, 1209), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)'}), '(blank=True)\n', (1197, 1209), False, 'from django.db import models\n'), ((1230, 1258), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)'}), '(blank=True)\n', (1246, 1258), False, 'from django.db import models\n'), ((1276, 1323), 'django.db.models.ImageField', 'models.ImageField', ([], {'upload_to': '"""news/news_photos"""'}), "(upload_to='news/news_photos')\n", (1293, 1323), False, 'from django.db import models\n'), ((1341, 1398), 'django.db.models.FileField', 'models.FileField', ([], {'upload_to': '"""news/news_files"""', 'blank': '(True)'}), "(upload_to='news/news_files', blank=True)\n", (1357, 1398), False, 'from django.db import models\n'), ((1419, 1451), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(500)'}), '(max_length=500)\n', (1435, 1451), False, 'from django.db import models\n'), ((1472, 1490), 'django.db.models.TextField', 'models.TextField', ([], {}), '()\n', (1488, 1490), False, 'from django.db import models\n'), ((1511, 1539), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)'}), '(blank=True)\n', (1527, 1539), False, 'from django.db import models\n'), ((1560, 1588), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)'}), '(blank=True)\n', (1576, 1588), False, 'from django.db import models\n'), ((1606, 1653), 'django.db.models.ImageField', 'models.ImageField', ([], {'upload_to': '"""news/news_photos"""'}), "(upload_to='news/news_photos')\n", (1623, 1653), False, 'from django.db import models\n'), ((1671, 1728), 'django.db.models.FileField', 'models.FileField', ([], {'upload_to': '"""news/news_files"""', 'blank': '(True)'}), "(upload_to='news/news_files', blank=True)\n", (1687, 1728), False, 'from django.db import models\n'), ((1748, 1780), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(500)'}), '(max_length=500)\n', (1764, 1780), False, 'from django.db import models\n'), ((1801, 1819), 'django.db.models.TextField', 'models.TextField', ([], {}), '()\n', (1817, 1819), False, 'from django.db import models\n'), ((1840, 1868), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)'}), '(blank=True)\n', (1856, 1868), False, 'from django.db import models\n'), ((1889, 1917), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)'}), '(blank=True)\n', (1905, 1917), False, 'from django.db import models\n'), ((1934, 1981), 'django.db.models.ImageField', 'models.ImageField', ([], {'upload_to': '"""news/news_photos"""'}), "(upload_to='news/news_photos')\n", (1951, 1981), False, 'from django.db import models\n'), ((1998, 2055), 'django.db.models.FileField', 'models.FileField', ([], {'upload_to': '"""news/news_files"""', 'blank': '(True)'}), "(upload_to='news/news_files', blank=True)\n", (2014, 2055), False, 'from django.db import models\n'), ((2072, 2145), 'django.db.models.DateTimeField', 'models.DateTimeField', (['"""Date Published"""'], {'auto_now_add': '(True)', 'auto_now': '(False)'}), "('Date Published', auto_now_add=True, auto_now=False)\n", (2092, 2145), False, 'from django.db import models\n')]
|
import discord
from discord.ext import tasks, commands
from urllib.parse import quote as uriquote
import html
from utils.time import human_timedelta
from datetime import datetime
import base64
class Twitter(commands.Cog):
"""All twittery functions like subscribe and lasttweet"""
def __init__(self, bot):
self.bot = bot
self.tweet_subscriptions.start() # pylint: disable=no-member
self.last_checked = {}
def cog_unload(self):
self.tweet_subscriptions.cancel() # pylint: disable=no-member
# TODO : Subscribe/unsubscribe functions here.
# Need a different config method for subs
# Preferably something I can keep in-memory with write on add/remove
# Possibly just a simple json file?
# consider ignore-retweets on subscription?
@commands.command(hidden=True)
@commands.is_owner()
async def twitter_token(self, ctx):
auth = f"{self.bot.config.twitterconsumerkey}:{self.bot.config.twitterconsumersecret}"
auth = "Basic " + base64.b64encode(auth.encode()).decode()
url = "https://api.twitter.com/oauth2/token"
body = {"grant_type" : "client_credentials"}
headers = {"Authorization": auth, "Content-Type" : "application/x-www-form-urlencoded;charset=UTF-8"}
async with self.bot.session.post(url, data=body, headers=headers) as resp:
response = await resp.json()
print(response)
@commands.command(name='lasttweet')
async def last_tweet(self, ctx, *, handle: str):
"""Show the last tweet of a twitter user"""
tweet = await self.read_timeline(handle)
if tweet:
#parsed = self.parse_tweet(tweet[0])
e = self.embed_tweet(tweet[0])
await ctx.send(embed=e)
#await ctx.send("{author}: {text} ({ago})".format(**parsed))
else:
await ctx.send(f"Failed to load tweets from twitter user @{handle}")
@commands.command(hidden=True)
async def trump(self, ctx):
"""Show trump's most recent words of wisdom"""
await self.last_tweet(ctx, handle='realDonaldTrump')
# TODO Handle retweets better
def embed_tweet(self, tweet):
handle = tweet['user']['screen_name']
link = f"https://twitter.com/{handle}/status/{tweet['id']}"
e = discord.Embed(title='Tweet', url=link, color=0x1da1f2)
author = f"{tweet['user']['name']} (@{handle})"
aurl = f"https://twitter.com/{handle}"
e.set_author(name=author, url=aurl, icon_url=tweet['user']['profile_image_url_https'])
e.description = html.unescape(tweet['full_text'].strip())
ts = datetime.strptime(tweet['created_at'], "%a %b %d %H:%M:%S +0000 %Y")
e.timestamp = ts
return e
def parse_tweet(self, tweet):
print(tweet)
updated = datetime.strptime(tweet['created_at'], "%a %b %d %H:%M:%S +0000 %Y")
ago = human_timedelta(updated, brief=True)
author = tweet['user']['screen_name']
text = html.unescape(tweet['full_text'].strip())
return {'author': author, 'text': text, "ago": ago, "updated": updated}
async def read_timeline(self, user, count=1):
url = "https://api.twitter.com/1.1/statuses/user_timeline.json"
params = {"screen_name": user, "count": count, "tweet_mode": "extended"}
headers = {"Authorization": "Bearer " + self.bot.config.twitter_token}
async with self.bot.session.get(url, params=params, headers=headers) as resp:
if resp.status == 200:
return await resp.json()
else:
return None
@tasks.loop(minutes=1.0)
async def tweet_subscriptions(self):
"""Reads a twitter timeline and posts the new tweets to any channels that sub it"""
subs = self.bot.config.twitter_subscriptions
for twitter_nick in subs:
if twitter_nick not in self.last_checked:
self.last_checked[twitter_nick] = datetime.utcnow()
self.bot.logger.info(f"Starting tweet loop. Last checked: {self.last_checked}")
tweets = await self.read_timeline(twitter_nick, count=3)
self.bot.logger.debug(f"Raw tweetsdata: {tweets}")
if not tweets:
continue
text = ""
data = None
# Newest tweets first, so reverse
for tweet in reversed(tweets):
data = self.parse_tweet(tweet)
self.bot.logger.debug(f"I have data {data}")
if data['updated'] > self.last_checked[twitter_nick]:
text += data['text'] + "\n"
self.bot.logger.debug(f"I have a tweet: {text}")
for channel in subs[twitter_nick]:
# a count of 3 per minute seems to work....
if data and text.strip():
self.last_checked[twitter_nick] = data['updated']
message = f"{data['author']}: {text.strip()}"
chan = self.bot.get_channel(channel)
if chan:
await chan.send(message)
def setup(bot):
bot.add_cog(Twitter(bot))
|
[
"discord.ext.commands.command",
"discord.Embed",
"utils.time.human_timedelta",
"datetime.datetime.strptime",
"discord.ext.tasks.loop",
"datetime.datetime.utcnow",
"discord.ext.commands.is_owner"
] |
[((801, 830), 'discord.ext.commands.command', 'commands.command', ([], {'hidden': '(True)'}), '(hidden=True)\n', (817, 830), False, 'from discord.ext import tasks, commands\n'), ((836, 855), 'discord.ext.commands.is_owner', 'commands.is_owner', ([], {}), '()\n', (853, 855), False, 'from discord.ext import tasks, commands\n'), ((1432, 1466), 'discord.ext.commands.command', 'commands.command', ([], {'name': '"""lasttweet"""'}), "(name='lasttweet')\n", (1448, 1466), False, 'from discord.ext import tasks, commands\n'), ((1941, 1970), 'discord.ext.commands.command', 'commands.command', ([], {'hidden': '(True)'}), '(hidden=True)\n', (1957, 1970), False, 'from discord.ext import tasks, commands\n'), ((3643, 3666), 'discord.ext.tasks.loop', 'tasks.loop', ([], {'minutes': '(1.0)'}), '(minutes=1.0)\n', (3653, 3666), False, 'from discord.ext import tasks, commands\n'), ((2315, 2368), 'discord.Embed', 'discord.Embed', ([], {'title': '"""Tweet"""', 'url': 'link', 'color': '(1942002)'}), "(title='Tweet', url=link, color=1942002)\n", (2328, 2368), False, 'import discord\n'), ((2656, 2724), 'datetime.datetime.strptime', 'datetime.strptime', (["tweet['created_at']", '"""%a %b %d %H:%M:%S +0000 %Y"""'], {}), "(tweet['created_at'], '%a %b %d %H:%M:%S +0000 %Y')\n", (2673, 2724), False, 'from datetime import datetime\n'), ((2842, 2910), 'datetime.datetime.strptime', 'datetime.strptime', (["tweet['created_at']", '"""%a %b %d %H:%M:%S +0000 %Y"""'], {}), "(tweet['created_at'], '%a %b %d %H:%M:%S +0000 %Y')\n", (2859, 2910), False, 'from datetime import datetime\n'), ((2925, 2961), 'utils.time.human_timedelta', 'human_timedelta', (['updated'], {'brief': '(True)'}), '(updated, brief=True)\n', (2940, 2961), False, 'from utils.time import human_timedelta\n'), ((4001, 4018), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (4016, 4018), False, 'from datetime import datetime\n')]
|
"""adapted from: https://gist.github.com/shivakar/82ac5c9cb17c95500db1906600e5e1ea"""
import argparse
import os
import sys
from http.server import SimpleHTTPRequestHandler, HTTPServer
from os.path import realpath, join, dirname, isdir, exists
parser = argparse.ArgumentParser(description='Start simple HTTP server supporting HTTP/1.1 requests (needed to play'
'the aligned audio in HTML5)!')
parser.add_argument('cwd', type=str, nargs='?', default='htdocs',
help='(optional) directory to serve from (default: \'htdocs\')')
parser.add_argument('port', type=int, nargs='?', default=8000, help='(optional) port to use (default: 8000)')
args = parser.parse_args()
class RangeHTTPRequestHandler(SimpleHTTPRequestHandler):
"""RangeHTTPRequestHandler is a SimpleHTTPRequestHandler
with HTTP 'Range' support"""
def send_head(self):
"""Common code for GET and HEAD commands.
Return value is either a file object or None
"""
path = self.translate_path(self.path)
ctype = self.guess_type(path)
# Handling file location
# If directory, let SimpleHTTPRequestHandler handle the request
if isdir(path):
return SimpleHTTPRequestHandler.send_head(self)
# Handle file not found
if not exists(path):
return self.send_error(404, self.responses.get(404)[0])
# Handle file request
f = open(path, 'rb')
fs = os.fstat(f.fileno())
size = fs[6]
# Parse range header
# Range headers look like 'bytes=500-1000'
start, end = 0, size - 1
print('headers', self.headers.__dict__)
if 'Range' in self.headers:
start, end = self.headers.get('Range').strip().strip('bytes=').split('-')
if start == "":
# If no start, then the request is for last N bytes
## e.g. bytes=-500
try:
end = int(end)
except ValueError as e:
self.send_error(400, 'invalid range')
start = size - end
else:
try:
start = int(start)
except ValueError as e:
self.send_error(400, 'invalid range')
if start >= size:
# If requested start is greater than filesize
self.send_error(416, self.responses.get(416)[0])
if end == "":
# If only start is provided then serve till end
end = size - 1
else:
try:
end = int(end)
except ValueError as e:
self.send_error(400, 'invalid range')
# Correct the values of start and end
start = max(start, 0)
end = min(end, size - 1)
self.range = (start, end)
# Setup headers and response
l = end - start + 1
if 'Range' in self.headers:
self.send_response(206)
else:
self.send_response(200)
self.send_header('Content-type', ctype)
self.send_header('Accept-Ranges', 'bytes')
self.send_header('Content-Range',
'bytes %s-%s/%s' % (start, end, size))
self.send_header('Content-Length', str(l))
self.send_header('Last-Modified', self.date_time_string(fs.st_mtime))
self.end_headers()
return f
def copyfile(self, infile, outfile):
"""Copies data between two file objects
If the current request is a 'Range' request then only the requested
bytes are copied.
Otherwise, the entire file is copied using SimpleHTTPServer.copyfile
"""
if 'Range' not in self.headers:
SimpleHTTPRequestHandler.copyfile(self, infile, outfile)
return
start, end = self.range
infile.seek(start)
bufsize = 64 * 1024 # 64KB
remainder = (end - start) % bufsize
times = int((end - start) / bufsize)
steps = [bufsize] * times + [remainder]
for astep in steps:
buf = infile.read(bufsize)
print('sending', infile, len(buf))
outfile.write(buf)
return
if __name__ == '__main__':
print(f'serving from {args.cwd} on port {args.port}')
os.chdir(join(dirname(realpath(__file__)), args.cwd))
server_address = ('', args.port)
HandlerClass = RangeHTTPRequestHandler
ServerClass = HTTPServer
HandlerClass.protocol_version = "HTTP/1.1"
httpd = ServerClass(server_address, HandlerClass)
sa = httpd.socket.getsockname()
sys.stderr.write(f'started MJC\n cwd={args.cwd} port={args.port}')
sys.stderr.flush()
print(f'Serving HTTP on {sa[0]}:{sa[1]} ... in background')
httpd.serve_forever()
|
[
"argparse.ArgumentParser",
"os.path.isdir",
"os.path.realpath",
"os.path.exists",
"http.server.SimpleHTTPRequestHandler.copyfile",
"http.server.SimpleHTTPRequestHandler.send_head",
"sys.stderr.write",
"sys.stderr.flush"
] |
[((253, 399), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Start simple HTTP server supporting HTTP/1.1 requests (needed to playthe aligned audio in HTML5)!"""'}), "(description=\n 'Start simple HTTP server supporting HTTP/1.1 requests (needed to playthe aligned audio in HTML5)!'\n )\n", (276, 399), False, 'import argparse\n'), ((4626, 4695), 'sys.stderr.write', 'sys.stderr.write', (['f"""started MJC\n cwd={args.cwd} port={args.port}"""'], {}), '(f"""started MJC\n cwd={args.cwd} port={args.port}""")\n', (4642, 4695), False, 'import sys\n'), ((4697, 4715), 'sys.stderr.flush', 'sys.stderr.flush', ([], {}), '()\n', (4713, 4715), False, 'import sys\n'), ((1222, 1233), 'os.path.isdir', 'isdir', (['path'], {}), '(path)\n', (1227, 1233), False, 'from os.path import realpath, join, dirname, isdir, exists\n'), ((1254, 1294), 'http.server.SimpleHTTPRequestHandler.send_head', 'SimpleHTTPRequestHandler.send_head', (['self'], {}), '(self)\n', (1288, 1294), False, 'from http.server import SimpleHTTPRequestHandler, HTTPServer\n'), ((1343, 1355), 'os.path.exists', 'exists', (['path'], {}), '(path)\n', (1349, 1355), False, 'from os.path import realpath, join, dirname, isdir, exists\n'), ((3761, 3817), 'http.server.SimpleHTTPRequestHandler.copyfile', 'SimpleHTTPRequestHandler.copyfile', (['self', 'infile', 'outfile'], {}), '(self, infile, outfile)\n', (3794, 3817), False, 'from http.server import SimpleHTTPRequestHandler, HTTPServer\n'), ((4343, 4361), 'os.path.realpath', 'realpath', (['__file__'], {}), '(__file__)\n', (4351, 4361), False, 'from os.path import realpath, join, dirname, isdir, exists\n')]
|
"""
show simplest database operation
"""
import sqlite3
sql_statements = (
"drop table if exists test",
"create table test (id, name)",
"insert into test values (1, 'abc')",
"insert into test values (2, 'def')",
"insert into test values (3, 'xyz')",
"select id, name from test",
)
def main():
""" run the sql """
conn = sqlite3.connect("dbms.db")
c = conn.cursor()
[c.execute(statement) for statement in sql_statements]
conn.commit()
rows = c.fetchall()
print(rows)
c.close()
conn.close()
if __name__ == "__main__":
main()
|
[
"sqlite3.connect"
] |
[((356, 382), 'sqlite3.connect', 'sqlite3.connect', (['"""dbms.db"""'], {}), "('dbms.db')\n", (371, 382), False, 'import sqlite3\n')]
|
#encoding: utf-8
from flask import Blueprint
admin = Blueprint('admin', '__name__')
# import views
|
[
"flask.Blueprint"
] |
[((53, 83), 'flask.Blueprint', 'Blueprint', (['"""admin"""', '"""__name__"""'], {}), "('admin', '__name__')\n", (62, 83), False, 'from flask import Blueprint\n')]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Create Github Actions Job Matrix for building dockerfiles.
Expects one optional input as the first positional argument. This is the upstream branch name, which
the current working tree will be compared against in order to understand if a benchmark should
be labeled as changed or not. If this input is not given, then "master" will be used.
A benchmark will be labeled as changed if any of the following conditions are met:
* A core component of benchmark-wrapper has changed, known as a 'bone'. Please see $bones for a list of
regex searches.
* Any of the files underneath the benchmark's module path
The JSON output looks like this, in accordance to the GHA Job Matrix Format:
```json
{
"include": [
{
"dockerfile": "path to dockerfile relative to repo root",
"image_name": "name of the image (i.e. name of directory containing the DF)",
"benchmark": "name of the benchmark (i.e. name of directory containing the DF)",
"env_var": "environment variable where image URL will be stored (i.e. <BENCHMARK>_IMAGE)",
"tag_suffix": "suffix of the image tag that should be used (i.e. arch of the DF with a dash)",
"tags": "space separated list of tags that should be applied to the image",
"arch": "architecture that the DF should be built on",
"changed": "whether or not changes have been made which require the benchmark to be tested",
},
...
]
}
```
If the `--manifest` option is given, then GHA job matrices will be printed which can be used for
building and pushing multi-arch image manifests to quay. The output looks like this:
```json
{
"build": "build matrix from above",
"manifest": {
"include": [
{
"benchmark": "name of the benchmark associated with the image",
"image_name": "name of the image",
"dockerfile": "relative path to dockerfile of image",
"tag": "tag the manifest will be built for",
"archs": "archictectures that should be added into the image manifest, space separated",
"tag_suffixes": "tag suffixes to add into the image manifest, space separated",
"changed": "whether or not changes have been made which require the benchmark to be tested",
},
...
]
}
}
"""
import argparse
import dataclasses
import json
import pathlib
import re
import shlex
import subprocess
from typing import Dict, Iterable, List, Set, Union
ARCHS = (
"amd64",
"arm64",
)
BONES = (
r"ci/",
r".github/workflows",
r"MANIFEST.in",
r"setup.py",
r"setup.cfg",
r"snafu/benchmarks/_[a-z]*.py",
r"snafu/[a-z]*.py",
r"tox.ini",
r"version.txt",
r"requirements/",
)
IGNORES = (r"Dockerfile\.ppc64le$",)
def get_git_diff(upstream_branch: str) -> str:
"""
Run git-diff against upstream branch.
Will pull fetch upstream branch to ensure it can be compared against.
Arguments
---------
upstream_branch : str
Upstream branch to compare against.
Returns
-------
str
Output of git diff
"""
subprocess.run(shlex.split(f"git fetch origin {upstream_branch}"), check=True)
completed_process = subprocess.run(
shlex.split(f"git diff origin/{upstream_branch} --name-only"),
check=True,
stdout=subprocess.PIPE,
)
return completed_process.stdout.decode("utf-8")
def parse_git_diff(diff_str: str) -> Set[str]:
"""
Return parsed output of `git-diff --name-only`.
Arguments
---------
diff_str : str
Output of `git-diff --name-only`.
Returns
-------
set of str
Unique set of files changed, according to git-diff
"""
return set(map(str.strip, diff_str.strip().split("\n")))
def get_dockerfile_list() -> str:
"""
Use the find command to get list of all dockerfiles within snafu.
Returns
-------
str
Output of find command
"""
completed_process = subprocess.run(
shlex.split("find snafu/ -name Dockerfile*"), check=True, stdout=subprocess.PIPE
)
return completed_process.stdout.decode("utf-8")
def parse_dockerfile_list(df_list: str) -> Set[str]:
"""
Parse given list of Dockerfiles into a set of str.
If a given Dockerfile path matches a regex in IGNORES, then the Dockerfile will
not be included in returned set.
Arguments
---------
df_list : str
Dockerfile list to parse. Should be newline-separated list of relative paths from
project root.
Returns
-------
set of str
Set of all unique dockerfile paths parsed from given input.
"""
result = []
for dockerfile in df_list.strip().split("\n"):
dockerfile = dockerfile.strip()
ignored = False
for ignore in IGNORES:
if re.search(ignore, dockerfile) is not None:
ignored = True
break
if not ignored:
result.append(dockerfile)
return set(result)
@dataclasses.dataclass
class MatrixEntry:
"""
Entry within the matrix.
See module docstring for details.
"""
dockerfile: str
image_name: str
benchmark: str
env_var: str
archs: Iterable[str]
changed: bool
tags: Iterable[str]
@classmethod
def new(cls, dockerfile: str, changed: bool, archs: Iterable[str], tags: Iterable[str]) -> "MatrixEntry":
"""
Create a new instances of the MatrixEntry
Parameters
----------
dockerfile : str
Relative path to Dockerfile. Will be used to determine other attributes.
changed : bool
Sets the changed attribute.
archs : list of str
Sets the archs attribute.
tags : list of str
Sets the tags attribute.
"""
benchmark = str(pathlib.Path(dockerfile).parent.stem).replace("_wrapper", "")
return cls(
dockerfile=dockerfile,
changed=changed,
archs=archs,
image_name=benchmark,
benchmark=benchmark,
env_var=f"{benchmark.upper()}_IMAGE",
tags=tags,
)
def build_json(self) -> Iterable[Dict[str, Union[str, bool]]]:
"""Convert the given MatrixEntry into series of JSON-dicts, one for each arch."""
for arch in self.archs:
tag_suffix = f"-{arch}"
yield {
"dockerfile": self.dockerfile,
"image_name": self.image_name,
"benchmark": self.benchmark,
"env_var": self.env_var,
"tag_suffix": tag_suffix,
"arch": arch,
"changed": self.changed,
"tags": " ".join([f"{tag}{tag_suffix}" for tag in self.tags]),
}
def manifest_json(self) -> Iterable[Dict[str, Union[str, bool]]]:
"""Convert the given MatrixEntry into series of JSON-dicts, one for each tag."""
for tag in self.tags:
tag_suffixes = " ".join([f"-{arch}" for arch in self.archs])
archs = " ".join(self.archs)
yield {
"benchmark": self.benchmark,
"image_name": self.image_name,
"dockerfile": self.dockerfile,
"tag": tag,
"tag_suffixes": tag_suffixes,
"changed": self.changed,
"archs": archs,
}
class MatrixBuilder:
"""
Builder for the GHA Jobs Matrix.
Parameters
----------
archs : iterable of str
List of architectures to build against. Will create a matrix entry for each architecture for each
Dockerfile.
tags : iterable of str
List of tags that will be applied to the built images.
bones : iterable of str
List of regex strings to match paths against to determine if the path is a snafu "bone".
upstream_branch : str
Upstream branch to compare changes to, in order to determine the value of "changed".
dockerfile_set : set of str
Set of dockerfiles within the snafu repository.
changed_set : set of str
Set of changed files within the snafu repository.
"""
def __init__(
self,
archs: Iterable[str],
tags: Iterable[str],
bones: Iterable[str],
upstream_branch: str,
dockerfile_set: Set[str],
changed_set: Set[str],
):
"""Contsruct the matrix builder."""
self.archs = archs
self.tags = tags
self.bones = bones
self.upstream_branch = upstream_branch
self.dockerfile_set = dockerfile_set
self.changed_set = changed_set
self.manifest_matrix: Dict[str, List[Dict[str, Union[str, bool]]]] = {}
self.build_matrix: Dict[str, List[Dict[str, Union[str, bool]]]] = {}
self.reset()
def reset(self):
"""Reset the matrix to empty starting point."""
self.build_matrix = {"include": []}
self.manifest_matrix = {"include": []}
def add_entry(self, entry: MatrixEntry):
"""Add the given MatrixEntry into the jobs matrix."""
for json_dict in entry.build_json():
self.build_matrix["include"].append(json_dict)
for json_dict in entry.manifest_json():
self.manifest_matrix["include"].append(json_dict)
def bones_changed(self) -> bool:
"""Return True if a bone has is found in the changed set."""
for bone in self.bones:
bone_regex = re.compile(bone)
for changed in self.changed_set:
if bone_regex.search(changed) is not None:
return True
return False
def benchmark_changed(self, dockerfile: str) -> bool:
"""Return True if the given dockerfile's benchmark has changed."""
dockerfile_dir = pathlib.Path(dockerfile).parent
for changed in self.changed_set:
try:
pathlib.Path(changed).relative_to(dockerfile_dir)
except ValueError:
pass
else:
return True
return False
def build(self, changed_only: bool = True):
"""
Build the GHA jobs matrix.
Parameters
----------
changed_only : bool, optional
If True, then only dockerfiles that are considered changed will be added into the matrix.
Defaults to True.
"""
bones_changed = self.bones_changed()
for dockerfile in self.dockerfile_set:
changed = bones_changed or self.benchmark_changed(dockerfile)
if (changed_only and changed) or not changed_only:
entry = MatrixEntry.new(
dockerfile=dockerfile, archs=self.archs, changed=changed, tags=self.tags
)
self.add_entry(entry)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument("tags", nargs="+", help="Tags to apply to the built images")
parser.add_argument(
"--upstream",
default="master",
help="Upstream branch to compare against. Defaults to 'master'",
)
parser.add_argument("--changed-only", action="store_true", help="Only output changed Dockerfiles")
parser.add_argument(
"--manifest", action="store_true", help="Output both the build and manifest matrix JSON"
)
args = parser.parse_args()
builder = MatrixBuilder(
archs=ARCHS,
tags=args.tags,
bones=BONES,
upstream_branch=args.upstream,
dockerfile_set=parse_dockerfile_list(get_dockerfile_list()),
changed_set=parse_git_diff(get_git_diff(args.upstream)),
)
builder.build(changed_only=args.changed_only)
if args.manifest:
print(json.dumps({"build": builder.build_matrix, "manifest": builder.manifest_matrix}))
else:
print(json.dumps(builder.build_matrix))
|
[
"argparse.ArgumentParser",
"shlex.split",
"json.dumps",
"pathlib.Path",
"re.search",
"re.compile"
] |
[((11061, 11105), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '__doc__'}), '(description=__doc__)\n', (11084, 11105), False, 'import argparse\n'), ((3265, 3315), 'shlex.split', 'shlex.split', (['f"""git fetch origin {upstream_branch}"""'], {}), "(f'git fetch origin {upstream_branch}')\n", (3276, 3315), False, 'import shlex\n'), ((3377, 3438), 'shlex.split', 'shlex.split', (['f"""git diff origin/{upstream_branch} --name-only"""'], {}), "(f'git diff origin/{upstream_branch} --name-only')\n", (3388, 3438), False, 'import shlex\n'), ((4153, 4197), 'shlex.split', 'shlex.split', (['"""find snafu/ -name Dockerfile*"""'], {}), "('find snafu/ -name Dockerfile*')\n", (4164, 4197), False, 'import shlex\n'), ((9673, 9689), 're.compile', 're.compile', (['bone'], {}), '(bone)\n', (9683, 9689), False, 'import re\n'), ((10007, 10031), 'pathlib.Path', 'pathlib.Path', (['dockerfile'], {}), '(dockerfile)\n', (10019, 10031), False, 'import pathlib\n'), ((11965, 12050), 'json.dumps', 'json.dumps', (["{'build': builder.build_matrix, 'manifest': builder.manifest_matrix}"], {}), "({'build': builder.build_matrix, 'manifest': builder.manifest_matrix}\n )\n", (11975, 12050), False, 'import json\n'), ((12071, 12103), 'json.dumps', 'json.dumps', (['builder.build_matrix'], {}), '(builder.build_matrix)\n', (12081, 12103), False, 'import json\n'), ((4985, 5014), 're.search', 're.search', (['ignore', 'dockerfile'], {}), '(ignore, dockerfile)\n', (4994, 5014), False, 'import re\n'), ((10113, 10134), 'pathlib.Path', 'pathlib.Path', (['changed'], {}), '(changed)\n', (10125, 10134), False, 'import pathlib\n'), ((6009, 6033), 'pathlib.Path', 'pathlib.Path', (['dockerfile'], {}), '(dockerfile)\n', (6021, 6033), False, 'import pathlib\n')]
|
__author__ = "<NAME>"
__license__ = 'MIT'
# -------------------------------------------------------------------------------------------------------------------- #
# IMPORTS
# Modules
import io
from contextlib import redirect_stdout
# RiBuild Modules
from delphin_6_automation.database_interactions.db_templates import user_entry
from delphin_6_automation.database_interactions import user_interactions
from delphin_6_automation.database_interactions.db_templates import delphin_entry
# -------------------------------------------------------------------------------------------------------------------- #
# RIBuild
def test_add_user(empty_database):
user_interactions.create_account('User Test', '<EMAIL>')
assert user_entry.User.objects().first()
assert len(user_entry.User.objects()) == 1
def test_user_properties(empty_database):
u_email = '<EMAIL>'
u_name = '<NAME>'
user_interactions.create_account(u_name, u_email)
expected_user = user_entry.User.objects().first()
assert expected_user.name == u_name
assert expected_user.email == u_email
assert expected_user.simulations == []
assert not expected_user.password
def test_find_user_by_email(add_single_user):
expected_user = user_entry.User.objects().first()
found_user = user_interactions.find_account_by_email(expected_user.email)
assert expected_user.id == found_user.id
def test_add_simulation_to_user(db_one_project):
user = user_entry.User.objects().first()
simulation = delphin_entry.Delphin.objects().first()
user_interactions.add_simulation_to_user(user, simulation)
user.reload()
assert user.simulations
assert simulation.id == user.simulations[0].id
def test_user_simulations(db_one_project):
user = user_entry.User.objects().first()
simulation = delphin_entry.Delphin.objects().first()
user_interactions.add_simulation_to_user(user, simulation)
user.reload()
expected_out = f"ID: {simulation.id} - " \
f"Added: {simulation.added_date} - " \
f"With priority: {simulation.queue_priority}\n"
f = io.StringIO()
with redirect_stdout(f):
user_interactions.list_user_simulations(user)
out = f.getvalue()
assert expected_out == out
|
[
"io.StringIO",
"delphin_6_automation.database_interactions.db_templates.user_entry.User.objects",
"delphin_6_automation.database_interactions.user_interactions.find_account_by_email",
"delphin_6_automation.database_interactions.user_interactions.list_user_simulations",
"delphin_6_automation.database_interactions.db_templates.delphin_entry.Delphin.objects",
"delphin_6_automation.database_interactions.user_interactions.create_account",
"contextlib.redirect_stdout",
"delphin_6_automation.database_interactions.user_interactions.add_simulation_to_user"
] |
[((661, 717), 'delphin_6_automation.database_interactions.user_interactions.create_account', 'user_interactions.create_account', (['"""User Test"""', '"""<EMAIL>"""'], {}), "('User Test', '<EMAIL>')\n", (693, 717), False, 'from delphin_6_automation.database_interactions import user_interactions\n'), ((907, 956), 'delphin_6_automation.database_interactions.user_interactions.create_account', 'user_interactions.create_account', (['u_name', 'u_email'], {}), '(u_name, u_email)\n', (939, 956), False, 'from delphin_6_automation.database_interactions import user_interactions\n'), ((1296, 1356), 'delphin_6_automation.database_interactions.user_interactions.find_account_by_email', 'user_interactions.find_account_by_email', (['expected_user.email'], {}), '(expected_user.email)\n', (1335, 1356), False, 'from delphin_6_automation.database_interactions import user_interactions\n'), ((1562, 1620), 'delphin_6_automation.database_interactions.user_interactions.add_simulation_to_user', 'user_interactions.add_simulation_to_user', (['user', 'simulation'], {}), '(user, simulation)\n', (1602, 1620), False, 'from delphin_6_automation.database_interactions import user_interactions\n'), ((1872, 1930), 'delphin_6_automation.database_interactions.user_interactions.add_simulation_to_user', 'user_interactions.add_simulation_to_user', (['user', 'simulation'], {}), '(user, simulation)\n', (1912, 1930), False, 'from delphin_6_automation.database_interactions import user_interactions\n'), ((2130, 2143), 'io.StringIO', 'io.StringIO', ([], {}), '()\n', (2141, 2143), False, 'import io\n'), ((2153, 2171), 'contextlib.redirect_stdout', 'redirect_stdout', (['f'], {}), '(f)\n', (2168, 2171), False, 'from contextlib import redirect_stdout\n'), ((2181, 2226), 'delphin_6_automation.database_interactions.user_interactions.list_user_simulations', 'user_interactions.list_user_simulations', (['user'], {}), '(user)\n', (2220, 2226), False, 'from delphin_6_automation.database_interactions import user_interactions\n'), ((730, 755), 'delphin_6_automation.database_interactions.db_templates.user_entry.User.objects', 'user_entry.User.objects', ([], {}), '()\n', (753, 755), False, 'from delphin_6_automation.database_interactions.db_templates import user_entry\n'), ((779, 804), 'delphin_6_automation.database_interactions.db_templates.user_entry.User.objects', 'user_entry.User.objects', ([], {}), '()\n', (802, 804), False, 'from delphin_6_automation.database_interactions.db_templates import user_entry\n'), ((978, 1003), 'delphin_6_automation.database_interactions.db_templates.user_entry.User.objects', 'user_entry.User.objects', ([], {}), '()\n', (1001, 1003), False, 'from delphin_6_automation.database_interactions.db_templates import user_entry\n'), ((1245, 1270), 'delphin_6_automation.database_interactions.db_templates.user_entry.User.objects', 'user_entry.User.objects', ([], {}), '()\n', (1268, 1270), False, 'from delphin_6_automation.database_interactions.db_templates import user_entry\n'), ((1466, 1491), 'delphin_6_automation.database_interactions.db_templates.user_entry.User.objects', 'user_entry.User.objects', ([], {}), '()\n', (1489, 1491), False, 'from delphin_6_automation.database_interactions.db_templates import user_entry\n'), ((1517, 1548), 'delphin_6_automation.database_interactions.db_templates.delphin_entry.Delphin.objects', 'delphin_entry.Delphin.objects', ([], {}), '()\n', (1546, 1548), False, 'from delphin_6_automation.database_interactions.db_templates import delphin_entry\n'), ((1776, 1801), 'delphin_6_automation.database_interactions.db_templates.user_entry.User.objects', 'user_entry.User.objects', ([], {}), '()\n', (1799, 1801), False, 'from delphin_6_automation.database_interactions.db_templates import user_entry\n'), ((1827, 1858), 'delphin_6_automation.database_interactions.db_templates.delphin_entry.Delphin.objects', 'delphin_entry.Delphin.objects', ([], {}), '()\n', (1856, 1858), False, 'from delphin_6_automation.database_interactions.db_templates import delphin_entry\n')]
|
"""
A class for converting ``discretize`` meshes to OMF objects
"""
import omf
import numpy as np
import discretize
def ravel_data_array(arr, nx, ny, nz):
"""Ravel's a numpy array into proper order for passing to the OMF
specification from ``discretize``/UBC formats
"""
dim = (nz, ny, nx)
return np.reshape(arr, dim, order="C").ravel(order="F")
def unravel_data_array(arr, nx, ny, nz):
"""Unravel's a numpy array from the OMF specification to
``discretize``/UBC formats - the is the inverse of ``ravel_data_array``
"""
dim = (nz, ny, nx)
return np.reshape(arr, dim, order="F").ravel(order="C")
class InterfaceOMF(object):
def _tensor_mesh_to_omf(mesh, models=None):
"""
Constructs an :class:`omf.VolumeElement` object of this tensor mesh and
the given models as cell data of that grid.
Parameters
----------
mesh : discretize.TensorMesh
The tensor mesh to convert to a :class:`omf.VolumeElement`
models : dict(numpy.ndarray)
Name('s) and array('s). Match number of cells
"""
if models is None:
models = {}
# Make the geometry
geometry = omf.VolumeGridGeometry()
# Set tensors
tensors = mesh.h
if len(tensors) < 1:
raise RuntimeError(
"Your mesh is empty... fill it out before converting to OMF"
)
elif len(tensors) == 1:
geometry.tensor_u = tensors[0]
geometry.tensor_v = np.array(
[
0.0,
]
)
geometry.tensor_w = np.array(
[
0.0,
]
)
elif len(tensors) == 2:
geometry.tensor_u = tensors[0]
geometry.tensor_v = tensors[1]
geometry.tensor_w = np.array(
[
0.0,
]
)
elif len(tensors) == 3:
geometry.tensor_u = tensors[0]
geometry.tensor_v = tensors[1]
geometry.tensor_w = tensors[2]
else:
raise RuntimeError("This mesh is too high-dimensional for OMF")
# Set rotation axes
geometry.axis_u = mesh.axis_u
geometry.axis_v = mesh.axis_v
geometry.axis_w = mesh.axis_w
# Set the origin
geometry.origin = mesh.origin
# Make sure the geometry is built correctly
geometry.validate()
# Make the volume elemet (the OMF object)
omfmesh = omf.VolumeElement(
geometry=geometry,
)
# Add model data arrays onto the cells of the mesh
omfmesh.data = []
for name, arr in models.items():
data = omf.ScalarData(
name=name,
array=ravel_data_array(arr, *mesh.shape_cells),
location="cells",
)
omfmesh.data.append(data)
# Validate to make sure a proper OMF object is returned to the user
omfmesh.validate()
return omfmesh
def _tree_mesh_to_omf(mesh, models=None):
raise NotImplementedError("Not possible until OMF v2 is released.")
def _curvilinear_mesh_to_omf(mesh, models=None):
raise NotImplementedError("Not currently possible.")
def _cyl_mesh_to_omf(mesh, models=None):
raise NotImplementedError("Not currently possible.")
def to_omf(mesh, models=None):
"""Convert this mesh object to it's proper ``omf`` data object with
the given model dictionary as the cell data of that dataset.
Parameters
----------
models : dict(numpy.ndarray)
Name('s) and array('s). Match number of cells
"""
# TODO: mesh.validate()
converters = {
# TODO: 'tree' : InterfaceOMF._tree_mesh_to_omf,
"tensor": InterfaceOMF._tensor_mesh_to_omf,
# TODO: 'curv' : InterfaceOMF._curvilinear_mesh_to_omf,
# TODO: 'CylindricalMesh' : InterfaceOMF._cyl_mesh_to_omf,
}
key = mesh._meshType.lower()
try:
convert = converters[key]
except KeyError:
raise RuntimeError(
"Mesh type `{}` is not currently supported for OMF conversion.".format(
key
)
)
# Convert the data object
return convert(mesh, models=models)
@staticmethod
def _omf_volume_to_tensor(element):
"""Convert an :class:`omf.VolumeElement` to :class:`discretize.TensorMesh`"""
geometry = element.geometry
h = [geometry.tensor_u, geometry.tensor_v, geometry.tensor_w]
mesh = discretize.TensorMesh(h)
mesh.axis_u = geometry.axis_u
mesh.axis_v = geometry.axis_v
mesh.axis_w = geometry.axis_w
mesh.origin = geometry.origin
data_dict = {}
for data in element.data:
# NOTE: this is agnostic about data location - i.e. nodes vs cells
data_dict[data.name] = unravel_data_array(
np.array(data.array), *mesh.shape_cells
)
# Return TensorMesh and data dictionary
return mesh, data_dict
@staticmethod
def from_omf(element):
"""Convert an OMF element to it's proper ``discretize`` type.
Automatically determines the output type. Returns both the mesh and a
dictionary of model arrays.
"""
element.validate()
converters = {
omf.VolumeElement.__name__: InterfaceOMF._omf_volume_to_tensor,
}
key = element.__class__.__name__
try:
convert = converters[key]
except KeyError:
raise RuntimeError(
"OMF type `{}` is not currently supported for conversion.".format(key)
)
# Convert the data object
return convert(element)
|
[
"discretize.TensorMesh",
"omf.VolumeElement",
"omf.VolumeGridGeometry",
"numpy.array",
"numpy.reshape"
] |
[((1217, 1241), 'omf.VolumeGridGeometry', 'omf.VolumeGridGeometry', ([], {}), '()\n', (1239, 1241), False, 'import omf\n'), ((2589, 2625), 'omf.VolumeElement', 'omf.VolumeElement', ([], {'geometry': 'geometry'}), '(geometry=geometry)\n', (2606, 2625), False, 'import omf\n'), ((4741, 4765), 'discretize.TensorMesh', 'discretize.TensorMesh', (['h'], {}), '(h)\n', (4762, 4765), False, 'import discretize\n'), ((320, 351), 'numpy.reshape', 'np.reshape', (['arr', 'dim'], {'order': '"""C"""'}), "(arr, dim, order='C')\n", (330, 351), True, 'import numpy as np\n'), ((591, 622), 'numpy.reshape', 'np.reshape', (['arr', 'dim'], {'order': '"""F"""'}), "(arr, dim, order='F')\n", (601, 622), True, 'import numpy as np\n'), ((1548, 1563), 'numpy.array', 'np.array', (['[0.0]'], {}), '([0.0])\n', (1556, 1563), True, 'import numpy as np\n'), ((1665, 1680), 'numpy.array', 'np.array', (['[0.0]'], {}), '([0.0])\n', (1673, 1680), True, 'import numpy as np\n'), ((5126, 5146), 'numpy.array', 'np.array', (['data.array'], {}), '(data.array)\n', (5134, 5146), True, 'import numpy as np\n'), ((1900, 1915), 'numpy.array', 'np.array', (['[0.0]'], {}), '([0.0])\n', (1908, 1915), True, 'import numpy as np\n')]
|
import sys
import numpy as np
def tvDenoising1D(data, lamb):
"""
This function implements a 1-D Total Variation denoising according to <NAME>. (2013) "A direct algorithm for 1-D total variation denoising."
See also: `<NAME>. (2013). A direct algorithm for 1-D total variation denoising. IEEE Signal Processing Letters, 20(11), 1054–1057. doi:10.1109/LSP.2013.2278339 <http://dx.doi.org/10.1109/LSP.2013.2278339>`_
Parameters
----------
data : array
Data to be fit
lamb : float
.. note::
**lamb** must be nonnegative. **lamb = 0** will result in **output = data**.
Returns
-------
fitData: `array`
Examples
--------
>>> import pylab as pl
>>> data = 'testdata.txt'
>>> X = pl.loadtxt(data);
>>> x = X[:,0];
>>> data = X[:,7];
>>>
>>> denoised = tvDenoising1D(data, lamb=200)
>>>
>>> pl.plot(x, data, 'b')
>>> pl.hold(True)
>>> pl.plot(x, denoised, 'r--')
>>> pl.show()
"""
N = len(data)
k = k0 = k_ = kp = 0
vmin = data[0]-lamb
vmax = data[0]+lamb
umin = lamb
umax = -lamb
x = np.zeros(len(data))
while True:
# 2:
if(k == N):
return np.array([vmin+umin])
# Break condition to avoid overflow...
if k+1 >= N:
break
# 3:
if(data[k+1]+umin < vmin-lamb):
for i in range(k0, k_+1):
x[i] = vmin
x[k0] = x[k_] = vmin
k = k0 = k_ = kp = k_+1
vmin = data[k]
vmax = data[k]+(2*lamb)
umin = lamb
umax = -lamb
# 4:
elif(data[k+1]+umax > vmax+lamb):
for i in range(k0, kp+1):
x[i] = vmax
x[k0] = x[k_] = x[kp] = vmax
k = k0 = k_ = kp = kp+1
vmin = data[k]-(2*lamb)
vmax = data[k]
umin = lamb
umax = -lamb
# 5:
else:
k = k+1
umin = umin +data[k] - vmin
umax = umax + data[k] - vmax
# 6:
if(umin >= lamb):
vmin = vmin + ((umin -lamb)/(k-k0+1))
umin = lamb
k_ = k
if(umax <= -lamb):
vmax = vmax+((umax + lamb)/(k-k0+1))
umax = -lamb
kp = k
# 7:
if k < N:
continue
# 8:
if(umin < 0):
for i in range(k0, k_+1):
x[i] = vmin
k = k0 = k_ = k_ + 1
vmin = data[k]
umin = lamb
umax = data[k] + lamb - vmax
continue
# 9:
elif(umax > 0):
for i in range(k0, kp+1):
x[i] = vmax
k = k0 = kp = kp+1
vmax = data[k]
umax = -lamb
umin = data[k]-lamb-vmin
continue
else:
for i in range(k0, N):
x[i] = vmin+(umin/(k-k0+1))
break
return x
def fitGauss(xarray, yarray):
"""
This function mix a Linear Model with a Gaussian Model (LMFit).
See also: `Lmfit Documentation <http://cars9.uchicago.edu/software/python/lmfit/>`_
Parameters
----------
xarray : array
X data
yarray : array
Y data
Returns
-------
peak value: `float`
peak position: `float`
min value: `float`
min position: `float`
fwhm: `float`
fwhm positon: `float`
center of mass: `float`
fit_Y: `array`
fit_result: `ModelFit`
Examples
--------
>>> import pylab as pl
>>> data = 'testdata.txt'
>>> X = pl.loadtxt(data);
>>> x = X[:,0];
>>> y = X[:,7];
>>>
>>> pkv, pkp, minv, minp, fwhm, fwhmp, com = fitGauss(x, y)
>>> print("Peak ", pkv, " at ", pkp)
>>> print("Min ", minv, " at ", minp)
>>> print("Fwhm ", fwhm, " at ", fwhmp)
>>> print("COM = ", com)
>>>
"""
from lmfit.models import GaussianModel, LinearModel
y = yarray
x = xarray
gaussMod = GaussianModel()
linMod = LinearModel()
pars = linMod.make_params(intercept=y.min(), slope=0)
pars += linMod.guess(y, x=x)
pars += gaussMod.guess(y, x=x)
mod = gaussMod + linMod
fwhm = 0
fwhm_position = 0
try:
result = mod.fit(y, pars, x=x)
fwhm = result.values['fwhm']
fwhm_position = result.values['center']
except:
result = None
peak_position = xarray[np.argmax(y)]
peak = np.max(y)
minv_position = x[np.argmin(y)]
minv = np.min(y)
COM = (np.multiply(x,y).sum())/y.sum()
return (peak, peak_position, minv, minv_position, fwhm, fwhm_position, COM, result)
if __name__ == '__main__':
import pylab as pl
#file = '/home/ABTLUS/hugo.slepicka/devfiles/workspacePython/FIT_Test/teste'
file = "/home/ABTLUS/hugo.slepicka/SVN/Py4Syn/trunk/lab6_summed.dat"
X = np.loadtxt(file);
x = X[:,0];
y = X[:,1];
#x = np.asarray([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12])
#y = np.asarray([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1])
#peak, peak_position, minv, minv_position, fwhm, fwhm_position, COM, result = fitGauss(x, y)
#print("COM = ", result)
data = y
denoised = tvDenoising1D(data, lamb=200)
pl.plot(x, data, 'b')
pl.hold(True)
pl.plot(x, denoised, 'r--')
pl.show()
|
[
"pylab.hold",
"lmfit.models.LinearModel",
"pylab.show",
"numpy.multiply",
"numpy.argmax",
"numpy.argmin",
"numpy.max",
"numpy.min",
"numpy.array",
"numpy.loadtxt",
"lmfit.models.GaussianModel",
"pylab.plot"
] |
[((4077, 4092), 'lmfit.models.GaussianModel', 'GaussianModel', ([], {}), '()\n', (4090, 4092), False, 'from lmfit.models import GaussianModel, LinearModel\n'), ((4106, 4119), 'lmfit.models.LinearModel', 'LinearModel', ([], {}), '()\n', (4117, 4119), False, 'from lmfit.models import GaussianModel, LinearModel\n'), ((4535, 4544), 'numpy.max', 'np.max', (['y'], {}), '(y)\n', (4541, 4544), True, 'import numpy as np\n'), ((4593, 4602), 'numpy.min', 'np.min', (['y'], {}), '(y)\n', (4599, 4602), True, 'import numpy as np\n'), ((4950, 4966), 'numpy.loadtxt', 'np.loadtxt', (['file'], {}), '(file)\n', (4960, 4966), True, 'import numpy as np\n'), ((5310, 5331), 'pylab.plot', 'pl.plot', (['x', 'data', '"""b"""'], {}), "(x, data, 'b')\n", (5317, 5331), True, 'import pylab as pl\n'), ((5336, 5349), 'pylab.hold', 'pl.hold', (['(True)'], {}), '(True)\n', (5343, 5349), True, 'import pylab as pl\n'), ((5354, 5381), 'pylab.plot', 'pl.plot', (['x', 'denoised', '"""r--"""'], {}), "(x, denoised, 'r--')\n", (5361, 5381), True, 'import pylab as pl\n'), ((5386, 5395), 'pylab.show', 'pl.show', ([], {}), '()\n', (5393, 5395), True, 'import pylab as pl\n'), ((4510, 4522), 'numpy.argmax', 'np.argmax', (['y'], {}), '(y)\n', (4519, 4522), True, 'import numpy as np\n'), ((4568, 4580), 'numpy.argmin', 'np.argmin', (['y'], {}), '(y)\n', (4577, 4580), True, 'import numpy as np\n'), ((1231, 1254), 'numpy.array', 'np.array', (['[vmin + umin]'], {}), '([vmin + umin])\n', (1239, 1254), True, 'import numpy as np\n'), ((4615, 4632), 'numpy.multiply', 'np.multiply', (['x', 'y'], {}), '(x, y)\n', (4626, 4632), True, 'import numpy as np\n')]
|
#!C:\Users\user\myprojects\angello\angello-venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'gprof2dot==2016.10.13','console_scripts','gprof2dot'
__requires__ = 'gprof2dot==2016.10.13'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('gprof2dot==2016.10.13', 'console_scripts', 'gprof2dot')()
)
|
[
"pkg_resources.load_entry_point",
"re.sub"
] |
[((299, 351), 're.sub', 're.sub', (['"""(-script\\\\.pyw?|\\\\.exe)?$"""', '""""""', 'sys.argv[0]'], {}), "('(-script\\\\.pyw?|\\\\.exe)?$', '', sys.argv[0])\n", (305, 351), False, 'import re\n'), ((373, 446), 'pkg_resources.load_entry_point', 'load_entry_point', (['"""gprof2dot==2016.10.13"""', '"""console_scripts"""', '"""gprof2dot"""'], {}), "('gprof2dot==2016.10.13', 'console_scripts', 'gprof2dot')\n", (389, 446), False, 'from pkg_resources import load_entry_point\n')]
|
"""Functions and utilities used to format the databases."""
import numpy as np
import jax.numpy as jnp
from scipy.integrate import quadrature
import tools21cm as t2c
def apply_uv_coverage(Box_uv, uv_bool):
"""Apply UV coverage to the data.
Args:
Box_uv: data box in Fourier space
uv_bool: mask of measured baselines
Returns:
Box_uv
"""
Box_uv = Box_uv * uv_bool
return Box_uv
def compute_uv_coverage(redshifts, ncells=200, boxsize=300):
"""Computing UV coverage box for SKA antenna configuration.
Args:
redshifts: list of redshifts for which the UV coverage is computed.
ncells: lsize of a grid in UV space (in pixels)
boxsize: size of the simulation (in Mpc)
Returns:
uv: UV coverage box
"""
uv = np.empty((ncells, ncells, len(redshifts)))
for i in range(len(redshifts)):
print(i, end=" ")
uv[..., i], _ = t2c.noise_model.get_uv_map(
ncells=200, z=redshifts[i], boxsize=300
)
return uv
def noise(seed, redshifts, uv, ncells=200, boxsize=300.0, obs_time=1000, N_ant=512):
"""Computing telescope thermal noise.
Args:
seed: noise seed
redshifts: list of redshifts for each slice of UV
uv: UV coveragebox
ncells: size of a box in real/UV space (in pixels)
boxsize: size of the simulation (in Mpc)
obs_time: total observation time (in hours)
N_ant: number of antennas in the configuration
Returns:
finalBox: noise in UV space
"""
redshifts = np.append(
redshifts, 2 * redshifts[-1] - redshifts[-2]
) # appending the last difference
finalBox = np.empty(uv.shape, dtype=np.complex64)
for i in range(uv.shape[-1]):
depth_mhz = t2c.cosmology.z_to_nu(redshifts[i]) - t2c.cosmology.z_to_nu(
redshifts[i + 1]
)
noise = t2c.noise_model.noise_map(
ncells=ncells,
z=redshifts[i],
depth_mhz=depth_mhz,
obs_time=obs_time,
boxsize=boxsize,
uv_map=uv[..., i],
N_ant=N_ant,
seed=10000 * seed + i,
)
noise = t2c.telescope_functions.jansky_2_kelvin(
noise, redshifts[i], boxsize=boxsize
).astype(np.complex64)
finalBox[..., i] = noise
return finalBox
def wedge_removal(
OMm,
redshifts,
HII_DIM,
cell_size,
Box_uv,
chunk_length=501,
blackman=True,
):
"""Computing horizon wedge removal. Implements "sliding" procedure
of removing the wedge for every redshift separately.
Args:
OMm: Omega matter
redshifts: list of redshifts in a lightcone
HII_DIM: size of the HII simulation box (see `21cmFASTv3`)
cell_size: size of a cell in Mpc
Box_uv: box in UV space on which wedge removal is to be computed
chunk_length: length of a sliding chunk (in number of z-slices)
blackman: either to use Blackman-Harris taper or not
Returns:
Box_final: wedge-removed box in real space
"""
def one_over_E(z, OMm):
return 1 / np.sqrt(OMm * (1.0 + z) ** 3 + (1 - OMm))
def multiplicative_factor(z, OMm):
return (
1
/ one_over_E(z, OMm)
/ (1 + z)
* quadrature(lambda x: one_over_E(x, OMm), 0, z)[0]
)
MF = jnp.array([multiplicative_factor(z, OMm) for z in redshifts]).astype(
np.float32
)
redshifts = jnp.array(redshifts).astype(np.float32)
k = jnp.fft.fftfreq(HII_DIM, d=cell_size)
k_parallel = jnp.fft.fftfreq(chunk_length, d=cell_size)
delta_k = k_parallel[1] - k_parallel[0]
k_cube = jnp.meshgrid(k, k, k_parallel)
bm = jnp.abs(jnp.fft.fft(jnp.blackman(chunk_length))) ** 2
buffer = delta_k * (jnp.where(bm / jnp.amax(bm) <= 1e-10)[0][0] - 1)
BM = jnp.blackman(chunk_length)[jnp.newaxis, jnp.newaxis, :]
box_shape = Box_uv.shape
Box_final = np.empty(box_shape, dtype=np.float32)
empty_box = jnp.zeros(k_cube[0].shape)
Box_uv = jnp.concatenate(
(empty_box, jnp.array(Box_uv, dtype=jnp.float32), empty_box), axis=2
)
for i in range(chunk_length, box_shape[-1] + chunk_length):
t_box = Box_uv[..., i - chunk_length // 2 : i + chunk_length // 2 + 1]
W = k_cube[2] / (
jnp.sqrt(k_cube[0] ** 2 + k_cube[1] ** 2)
* MF[min(i - chunk_length // 2 - 1, box_shape[-1] - 1)]
+ buffer
)
w = jnp.logical_or(W < -1.0, W > 1.0)
# w = cp.array(W[i + chunk_length - 1])
if blackman == True:
t_box = t_box * BM
Box_final[..., i - chunk_length] = jnp.real(
jnp.fft.ifftn(jnp.fft.fft(t_box, axis=-1) * w)
)[
..., chunk_length // 2
] # taking only middle slice in redshift
return Box_final.astype(np.float32)
def BoxCar3D(data, filter=(4, 4, 4)):
"""Computing BoxCar filter on the input data.
Args:
data: data to filter
filter: filter shape
Returns:
filtered data
"""
if len(data.shape) != 3:
raise AttributeError("data has to be 3D")
if len(filter) != 3:
raise AttributeError("filter has to be 3D")
s = data.shape
Nx, Ny, Nz = filter
return jnp.einsum(
"ijklmn->ikm",
data[: s[0] // Nx * Nx, : s[1] // Ny * Ny, : s[2] // Nz * Nz].reshape(
(s[0] // Nx, Nx, s[1] // Ny, Ny, s[2] // Nz, Nz)
),
) / (Nx * Ny * Nz)
|
[
"jax.numpy.array",
"jax.numpy.amax",
"jax.numpy.logical_or",
"jax.numpy.fft.fft",
"tools21cm.noise_model.noise_map",
"numpy.empty",
"tools21cm.telescope_functions.jansky_2_kelvin",
"jax.numpy.fft.fftfreq",
"tools21cm.noise_model.get_uv_map",
"numpy.append",
"tools21cm.cosmology.z_to_nu",
"jax.numpy.blackman",
"jax.numpy.zeros",
"jax.numpy.meshgrid",
"jax.numpy.sqrt",
"numpy.sqrt"
] |
[((1580, 1635), 'numpy.append', 'np.append', (['redshifts', '(2 * redshifts[-1] - redshifts[-2])'], {}), '(redshifts, 2 * redshifts[-1] - redshifts[-2])\n', (1589, 1635), True, 'import numpy as np\n'), ((1698, 1736), 'numpy.empty', 'np.empty', (['uv.shape'], {'dtype': 'np.complex64'}), '(uv.shape, dtype=np.complex64)\n', (1706, 1736), True, 'import numpy as np\n'), ((3566, 3603), 'jax.numpy.fft.fftfreq', 'jnp.fft.fftfreq', (['HII_DIM'], {'d': 'cell_size'}), '(HII_DIM, d=cell_size)\n', (3581, 3603), True, 'import jax.numpy as jnp\n'), ((3621, 3663), 'jax.numpy.fft.fftfreq', 'jnp.fft.fftfreq', (['chunk_length'], {'d': 'cell_size'}), '(chunk_length, d=cell_size)\n', (3636, 3663), True, 'import jax.numpy as jnp\n'), ((3721, 3751), 'jax.numpy.meshgrid', 'jnp.meshgrid', (['k', 'k', 'k_parallel'], {}), '(k, k, k_parallel)\n', (3733, 3751), True, 'import jax.numpy as jnp\n'), ((4000, 4037), 'numpy.empty', 'np.empty', (['box_shape'], {'dtype': 'np.float32'}), '(box_shape, dtype=np.float32)\n', (4008, 4037), True, 'import numpy as np\n'), ((4054, 4080), 'jax.numpy.zeros', 'jnp.zeros', (['k_cube[0].shape'], {}), '(k_cube[0].shape)\n', (4063, 4080), True, 'import jax.numpy as jnp\n'), ((936, 1003), 'tools21cm.noise_model.get_uv_map', 't2c.noise_model.get_uv_map', ([], {'ncells': '(200)', 'z': 'redshifts[i]', 'boxsize': '(300)'}), '(ncells=200, z=redshifts[i], boxsize=300)\n', (962, 1003), True, 'import tools21cm as t2c\n'), ((1907, 2085), 'tools21cm.noise_model.noise_map', 't2c.noise_model.noise_map', ([], {'ncells': 'ncells', 'z': 'redshifts[i]', 'depth_mhz': 'depth_mhz', 'obs_time': 'obs_time', 'boxsize': 'boxsize', 'uv_map': 'uv[..., i]', 'N_ant': 'N_ant', 'seed': '(10000 * seed + i)'}), '(ncells=ncells, z=redshifts[i], depth_mhz=\n depth_mhz, obs_time=obs_time, boxsize=boxsize, uv_map=uv[..., i], N_ant\n =N_ant, seed=10000 * seed + i)\n', (1932, 2085), True, 'import tools21cm as t2c\n'), ((3898, 3924), 'jax.numpy.blackman', 'jnp.blackman', (['chunk_length'], {}), '(chunk_length)\n', (3910, 3924), True, 'import jax.numpy as jnp\n'), ((4529, 4562), 'jax.numpy.logical_or', 'jnp.logical_or', (['(W < -1.0)', '(W > 1.0)'], {}), '(W < -1.0, W > 1.0)\n', (4543, 4562), True, 'import jax.numpy as jnp\n'), ((1791, 1826), 'tools21cm.cosmology.z_to_nu', 't2c.cosmology.z_to_nu', (['redshifts[i]'], {}), '(redshifts[i])\n', (1812, 1826), True, 'import tools21cm as t2c\n'), ((1829, 1868), 'tools21cm.cosmology.z_to_nu', 't2c.cosmology.z_to_nu', (['redshifts[i + 1]'], {}), '(redshifts[i + 1])\n', (1850, 1868), True, 'import tools21cm as t2c\n'), ((3154, 3195), 'numpy.sqrt', 'np.sqrt', (['(OMm * (1.0 + z) ** 3 + (1 - OMm))'], {}), '(OMm * (1.0 + z) ** 3 + (1 - OMm))\n', (3161, 3195), True, 'import numpy as np\n'), ((3517, 3537), 'jax.numpy.array', 'jnp.array', (['redshifts'], {}), '(redshifts)\n', (3526, 3537), True, 'import jax.numpy as jnp\n'), ((4131, 4167), 'jax.numpy.array', 'jnp.array', (['Box_uv'], {'dtype': 'jnp.float32'}), '(Box_uv, dtype=jnp.float32)\n', (4140, 4167), True, 'import jax.numpy as jnp\n'), ((2199, 2276), 'tools21cm.telescope_functions.jansky_2_kelvin', 't2c.telescope_functions.jansky_2_kelvin', (['noise', 'redshifts[i]'], {'boxsize': 'boxsize'}), '(noise, redshifts[i], boxsize=boxsize)\n', (2238, 2276), True, 'import tools21cm as t2c\n'), ((3782, 3808), 'jax.numpy.blackman', 'jnp.blackman', (['chunk_length'], {}), '(chunk_length)\n', (3794, 3808), True, 'import jax.numpy as jnp\n'), ((4376, 4417), 'jax.numpy.sqrt', 'jnp.sqrt', (['(k_cube[0] ** 2 + k_cube[1] ** 2)'], {}), '(k_cube[0] ** 2 + k_cube[1] ** 2)\n', (4384, 4417), True, 'import jax.numpy as jnp\n'), ((4750, 4777), 'jax.numpy.fft.fft', 'jnp.fft.fft', (['t_box'], {'axis': '(-1)'}), '(t_box, axis=-1)\n', (4761, 4777), True, 'import jax.numpy as jnp\n'), ((3855, 3867), 'jax.numpy.amax', 'jnp.amax', (['bm'], {}), '(bm)\n', (3863, 3867), True, 'import jax.numpy as jnp\n')]
|
from app.repositories.base_repo import BaseRepo
from app.models.student_event import StudentEvent
class StudentEventRepo(BaseRepo):
def __init__(self):
BaseRepo.__init__(self, StudentEvent)
def new_student_event(self, event_id, student_id):
student_event = StudentEvent(event_id=event_id, student_id=student_id)
student_event.save()
return student_event
|
[
"app.repositories.base_repo.BaseRepo.__init__",
"app.models.student_event.StudentEvent"
] |
[((158, 195), 'app.repositories.base_repo.BaseRepo.__init__', 'BaseRepo.__init__', (['self', 'StudentEvent'], {}), '(self, StudentEvent)\n', (175, 195), False, 'from app.repositories.base_repo import BaseRepo\n'), ((267, 321), 'app.models.student_event.StudentEvent', 'StudentEvent', ([], {'event_id': 'event_id', 'student_id': 'student_id'}), '(event_id=event_id, student_id=student_id)\n', (279, 321), False, 'from app.models.student_event import StudentEvent\n')]
|
import json
from datetime import date
from django.contrib.auth import get_user_model
from django.core.management.base import BaseCommand
from django.db import transaction
from opentech.apply.categories.models import Category
from opentech.apply.funds.models import ApplicationForm, FundType, Round
from opentech.apply.funds.models.forms import ApplicationBaseForm, ApplicationBaseReviewForm
from opentech.apply.review.models import ReviewForm
from opentech.apply.home.models import ApplyHomePage
from opentech.apply.users.groups import STAFF_GROUP_NAME
FS_ROUND_TITLE = 'Fellowship (archive round)'
FS_FUND_TITLE = 'Fellowship (archive fund)'
class Command(BaseCommand):
help = "Pre-seeds the fellowship application and proposal form and fund type. Depends on the categories seed being run first."
@transaction.atomic
def handle(self, *args, **options):
# There's an Internet Freedom Fund open round, so bail out. Avoids duplicate command runs.
if Round.objects.filter(title=FS_ROUND_TITLE).count():
self.stdout.write(self.style.WARNING('Skipping. The target Round/Fund Type and Application Form exist'))
return
application_form = self.create_fellowship_application_form()
proposal_form = self.create_fellowship_proposal_form()
application_review_form = self.create_fellowship_application_review_form()
proposal_review_form = self.create_fellowship_proposal_review_form()
fund = self.create_fellowship_fund_type(application_form, proposal_form, application_review_form, proposal_review_form)
self.create_fellowship_round(fund)
def create_fellowship_application_form(self):
focus_id = Category.objects.get(name='Focus').id
objectives_id = Category.objects.get(name='Objective(s)').id
beneficiaries_id = Category.objects.get(name='Beneficiaries').id
regions_id = Category.objects.get(name='Region(s)').id
addressed_id = Category.objects.get(name='Addressed problems').id
status_id = Category.objects.get(name='Project status').id
data = [
{"type": "text_markup", "value": "<h3>About you</h3>", "id": "ef672ec5-f24c-4e95-9f18-522a5a1e6833"},
{"type": "title", "value": {"field_label": "What is your project name?", "help_text": "", "info": None}, "id": "32c37ee8-7d5b-4fc0-b606-9697a1c7e5c2"},
{"type": "full_name", "value": {"field_label": "Your name", "help_text": "", "info": None}, "id": "3b051ef2-3c75-4a70-aae3-999d58852810"},
{"type": "email", "value": {"field_label": "E-mail", "help_text": "", "info": None}, "id": "bfc488d3-b77d-427d-825d-9000797e9576"},
{"type": "address", "value": {"field_label": "Address", "help_text": "", "info": None}, "id": "2c0db01a-b5ab-4882-aad8-8c9a2ec05e8f"},
{"type": "value", "value": {"field_label": "If you are applying for direct funding, how much do you need?", "help_text": "Amount requested should be less than 50000 USD.", "info": None}, "id": "cfae89dc-f327-45f4-80e9-f267c3bd1ec7"},
{"type": "char", "value": {"field_label": "What is your current or most recent position and employer or research institution?", "help_text": "", "required": "", "format": "", "default_value": ""}, "id": "1282223d-77f5-4047-be03-4df4c4b2148a"},
{"type": "rich_text", "value": {"field_label": "What are (or were) your roles and responsibilities there?", "help_text": "", "required": "", "default_value": ""}, "id": "9c0256e4-42e1-41fe-9880-7f621d6c3458"},
{"type": "dropdown", "value": {"field_label": "Have you ever applied or received funding through an OTF fellowship program?", "help_text": "", "required": "", "choices": ["Yes", "No"]}, "id": "f8efef0a-0632-4c81-b4db-7bc6a06caa7d"},
{"type": "text_markup", "value": "<h3>About your project</h3>", "id": "3541d1b1-afc7-4dcd-8ed9-e9af27de5f3d"},
{"type": "rich_text", "value": {"field_label": "What is your project idea?", "help_text": "", "required": "", "default_value": ""}, "id": "1eb8b4e3-e2bb-4810-a8ce-3fc82a3192c8"},
{"type": "rich_text", "value": {"field_label": "How would you do it?", "help_text": "", "required": "", "default_value": ""}, "id": "177d56e8-2df1-4ead-8e3d-4916610fbed6"},
{"type": "rich_text", "value": {"field_label": "Why are you the right person for this project?", "help_text": "", "required": "", "default_value": ""}, "id": "05ff1755-947b-4e41-8f71-aae99977c572"},
{"type": "duration", "value": {"field_label": "How long do you want to work on this fellowship?", "help_text": "", "info": None}, "id": "3ccac109-2839-4b5d-b133-0e6cfca7c766"},
{"type": "text_markup", "value": "<h3>Host organization</h3>", "id": "f4b3ae6f-a1d6-4c9d-b334-e40614167257"},
{"type": "char", "value": {"field_label": "What is your most ideal host organization?", "help_text": "", "required": "", "format": "", "default_value": ""}, "id": "0afaf4e1-4556-4e79-aa3d-4990e33620da"},
{"type": "char", "value": {"field_label": "What is your next best host organization?", "help_text": "", "required": "", "format": "", "default_value": ""}, "id": "a543b34f-ae6a-4b17-8ac3-ececc14573a0"},
{"type": "text_markup", "value": "<h3>Request specific questions</h3>", "id": "755363fa-6a1c-422f-a03f-89db07a96e17"},
{"type": "rich_text", "value": {"field_label": "Request specific questions", "help_text": "", "required": "", "default_value": ""}, "id": "57cc52e2-b3ff-4e9f-a5fe-42e7735e16c2"},
{"type": "text_markup", "value": "<h3>Descriptors</h3>", "id": "b6ee65b3-d5cd-4cb0-9d7c-6e29d86deaaf"},
{"type": "category", "value": {"field_label": "Status", "help_text": "", "required": "", "category": status_id, "multi": "true"}, "id": "ff4d12ff-7b88-4e87-bb5b-81543aef0e25"},
{"type": "category", "value": {"field_label": "Objectives", "help_text": "", "required": "true", "category": objectives_id, "multi": "true"}, "id": "30c41288-a762-4003-acce-8c12e7343d90"},
{"type": "category", "value": {"field_label": "Beneficiaries", "help_text": "", "required": "", "category": beneficiaries_id, "multi": "true"}, "id": "56833441-542b-4a06-8ad2-8e7e8fd1a334"},
{"type": "category", "value": {"field_label": "Focus", "help_text": "", "required": "", "category": focus_id, "multi": "true"}, "id": "6b404851-ce2b-494f-b9f7-62858a937469"},
{"type": "category", "value": {"field_label": "Addressed problems", "help_text": "", "required": "true", "category": addressed_id, "multi": "true"}, "id": "590e4b77-c4f4-4bd0-b5be-2ad2851da4f5"},
{"type": "category", "value": {"field_label": "Region", "help_text": "", "required": "", "category": regions_id, "multi": "true"}, "id": "81c01278-8ba4-4d84-a1da-e05a07aad874"},
{"type": "multi_file", "value": {"field_label": "Upload", "help_text": "", "required": ""}, "id": "25740b9d-0f8f-4ce1-88fa-c6ee831c6aef"},
{"type": "text_markup", "value": "<h3>I acknowledge</h3>", "id": "f69d3a56-491a-4321-89b7-4d7e34d69a1d"},
{"type": "checkbox", "value": {"field_label": "My application will be dismissed if it does not fit within OTF\'s mission, values, principles statements.", "help_text": "", "default_value": ""}, "id": "5178e15f-d442-4d36-824d-a4292ef77062"},
{"type": "text_markup", "value": "Read our <a href=\"\/about/program\">mission, values, and principles</a>.", "id": "b0c69627-d7db-4633-b46f-0e787dddc779"},
{"type": "checkbox", "value": {"field_label": "I have read and understand OTF\'s Terms and Privacy policy.", "help_text": "", "default_value": ""}, "id": "bd91e220-4cdb-4392-8054-7b7dfe667d46"},
{"type": "text_markup", "value": "Read the <a href=\"\/tos\">Terms and Privacy policy</a>.", "id": "6f6236fd-9d1d-4090-a819-72fb96205bc0"},
{"type": "checkbox", "value": {"field_label": "I am legally able to sign contracts or represent an organization that can.", "help_text": "", "default_value": ""}, "id": "8d000129-ca8b-48cf-8dc2-4651bcbe46e8"},
{"type": "checkbox", "value": {"field_label": "I understand that all intellectual property created with support for this application must be openly licensed.", "help_text": "", "default_value": ""}, "id": "92f0801e-b9dc-4edc-9716-3f1709ae1c9b"},
{"type": "checkbox", "value": {"field_label": "I understand that if my application is incomplete in any way, it will be dismissed.", "help_text": "", "default_value": ""}, "id": "3a3f2da3-4e32-4b86-9060-29c606927114"},
{"type": "checkbox", "value": {"field_label": "I understand that if my application is after a deadline, it will not be reviewed until after the next deadline.", "help_text": "", "default_value": ""}, "id": "19395179-ed9f-4556-9b6b-ab5caef4f610"},
{"type": "text_markup", "value": "<h3>I would like to</h3>", "id": "21c9a554-d0d2-4543-9ca5-f53e506fb7c4"},
{"type": "checkbox", "value": {"field_label": "Sign up to the OTF-Announce list, low traffic (funding opportunities, major alerts, etc).", "help_text": "", "default_value": ""}, "id": "1345a8eb-4dcc-4170-a5ac-edda42d4dafc"},
{"type": "checkbox", "value": {"field_label": "Sign up for OTF\'s daily newsletter (collection of news related to global internet freedom).", "help_text": "", "default_value": ""}, "id": "4ca22ebb-daba-4fb6-a4a6-b130dc6311a8"}
]
application_form, _ = ApplicationForm.objects.get_or_create(name='Fellowship application', defaults={'form_fields': json.dumps(data)})
return application_form
def create_fellowship_proposal_form(self):
data2 = [
{"type": "text_markup", "value": "<h3>Proposal information</h3>", "id": ""},
{"type": "title", "value": {"field_label": "Proposal title", "help_text": "", "info": None}, "id": ""},
{"type": "full_name", "value": {"field_label": "Your name", "help_text": "", "info": None}, "id": "c0c75948-b3c3-42be-8646-bc2a5d8521c3"},
{"type": "email", "value": {"field_label": "E-mail", "help_text": "", "info": None}, "id": "a607ec56-da2a-46d4-b0c9-7c8f3c351a6e"},
{"type": "address", "value": {"field_label": "Address", "help_text": "", "info": None}, "id": "8d3cf1ac-928f-4ee2-ad12-2e5fb16b4748"},
{"type": "value", "value": {"field_label": "If you are applying for direct funding, how much do you need?", "help_text": "Amount requested should be less than 50000 USD.", "info": None}, "id": "cfae89dc-f327-45f4-80e9-f267c3bd1ec7"},
{"type": "duration", "value": {"field_label": "How long do you want to work on this fellowship?", "help_text": "", "info": None}, "id": "08b9b5c3-e01d-41ac-95be-600a4fee7d87"},
{"type": "char", "value": {"field_label": "Host organisation", "help_text": "", "required": "", "format": "", "default_value": ""}, "id": "bc03235e-3c78-4770-9fc2-97feb93c2c8c"},
{"type": "date", "value": {"field_label": "Start date", "help_text": "", "required": "", "default_value": ""}, "id": "672cb6f1-335c-4005-a0f1-46c414feda06"},
{"type": "date", "value": {"field_label": "Completion date", "help_text": "", "required": "", "default_value": ""}, "id": "8262f209-f084-4a79-9dfa-2d18137119bb"},
{"type": "rich_text", "value": {"field_label": "Objectives", "help_text": "", "required": "", "default_value": ""}, "id": "af2c5f38-7257-4295-87fa-787060e845ef"},
{"type": "rich_text", "value": {"field_label": "Milestones and dates", "help_text": "", "required": "", "default_value": ""}, "id": "3c521847-7642-4cae-aca9-d5336ad8962d"},
{"type": "rich_text", "value": {"field_label": "Anticipated outputs and outcomes", "help_text": "", "required": "", "default_value": ""}, "id": "fd0eb7ea-e054-4bcf-9580-eb672d44745c"},
{"type": "text_markup", "value": "<h3>Request specific questions</h3>", "id": "b05a54d1-3a59-41d1-bb70-d5f0f0acd67d"},
{"type": "rich_text", "value": {"field_label": "Request specific questions", "help_text": "", "required": "", "default_value": ""}, "id": "b6d71932-98c2-4ce8-a5e6-454a1f800d21"},
{"type": "multi_file", "value": {"field_label": "Upload", "help_text": "", "required": ""}, "id": "30dfa46e-f656-46c9-9efc-bab9029f2008"}
]
proposal_form, _ = ApplicationForm.objects.get_or_create(name='Fellowship proposal', defaults={'form_fields': json.dumps(data2)})
return proposal_form
def create_fellowship_application_review_form(self):
data3 = [
{"type": "recommendation", "value": {"field_label": "Overall, do you think we should select this applicant and their project to be part of the fellowship program?", "help_text": "", "info": None}, "id": "56264b32-fa39-4c08-b41e-68e9c54b2712"},
{"type": "rich_text", "value": {"field_label": "If no, please select a reason why not.", "help_text": "", "required": "", "default_value": ""}, "id": "f0533950-57f5-4bb7-81ec-2d3813490c88"},
{"type": "rich_text", "value": {"field_label": "Request specific questions", "help_text": "", "required": "", "default_value": ""}, "id": "ba789376-e3f9-434e-8da5-330811723b30"},
{"type": "comments", "value": {"field_label": "Other comments", "help_text": "", "info": None}, "id": "e74e2581-d06c-43b1-9c0b-911407225834"}
]
application_review_form, _ = ReviewForm.objects.get_or_create(name='Fellowship application review', defaults={'form_fields': json.dumps(data3)})
return application_review_form
def create_fellowship_proposal_review_form(self):
data4 = [
{"type": "recommendation", "value": {"field_label": "Overall, do you think we should select this applicant and their project to be part of the fellowship program?", "help_text": "", "info": None}, "id": "e1ea4f9d-64e2-4f28-a68a-851ec0f2d9ad"},
{"type": "rich_text", "value": {"field_label": "If no, please select a reason why not.", "help_text": "", "required": "", "default_value": ""}, "id": "e68b6fe9-8b11-4cf0-8ae4-2ffed75e1e80"},
{"type": "rich_text", "value": {"field_label": "If yes, but you believe some changes need to be made to the proposed effort, please let us know.", "help_text": "", "required": "", "default_value": ""}, "id": "a413f3a2-b486-4bf3-9e2d-c48d19626876"},
{"type": "rich_text", "value": {"field_label": "Request specific questions", "help_text": "", "required": "", "default_value": ""}, "id": "536c963a-f183-45bc-b83f-458b46dc5542"},
{"type": "comments", "value": {"field_label": "Anything else you'd like to give us feedback on?", "help_text": "", "info": None}, "id": "cc82ba7b-b55e-4309-85f0-f68ad6f43471"}
]
proposal_review_form, _ = ReviewForm.objects.get_or_create(name='Fellowship proposal review', defaults={'form_fields': json.dumps(data4)})
return proposal_review_form
def create_fellowship_fund_type(self, application_form, proposal_form, application_review_form, proposal_review_form):
try:
fund = FundType.objects.get(title=FS_FUND_TITLE)
except FundType.DoesNotExist:
apply_home = ApplyHomePage.objects.first()
fund = FundType(title=FS_FUND_TITLE, workflow_name='double')
apply_home.add_child(instance=fund)
fund_form = ApplicationBaseForm.objects.create(application=fund, form=application_form)
fund_form2 = ApplicationBaseForm.objects.create(application=fund, form=proposal_form)
fund.forms = [fund_form, fund_form2]
fund_review_form = ApplicationBaseReviewForm.objects.create(application=fund, form=application_review_form)
fund_review_form2 = ApplicationBaseReviewForm.objects.create(application=fund, form=proposal_review_form)
fund.review_forms = [fund_review_form, fund_review_form2]
fund.save()
return fund
def create_fellowship_round(self, fund):
User = get_user_model()
try:
lead = User.objects.get(full_name="<NAME>")
except User.DoesNotExist:
lead = User.objects.filter(groups__name=STAFF_GROUP_NAME).first()
round = Round(
title=FS_ROUND_TITLE,
lead=lead,
# The date of the original Information Controls Fellowship request type
start_date=date(2013, 1, 1),
end_date=date(2018, 8, 29)
)
round.parent_page = fund
fund.add_child(instance=round)
|
[
"opentech.apply.funds.models.Round.objects.filter",
"opentech.apply.funds.models.FundType.objects.get",
"opentech.apply.home.models.ApplyHomePage.objects.first",
"opentech.apply.categories.models.Category.objects.get",
"django.contrib.auth.get_user_model",
"datetime.date",
"json.dumps",
"opentech.apply.funds.models.forms.ApplicationBaseReviewForm.objects.create",
"opentech.apply.funds.models.forms.ApplicationBaseForm.objects.create",
"opentech.apply.funds.models.FundType"
] |
[((16090, 16106), 'django.contrib.auth.get_user_model', 'get_user_model', ([], {}), '()\n', (16104, 16106), False, 'from django.contrib.auth import get_user_model\n'), ((1707, 1741), 'opentech.apply.categories.models.Category.objects.get', 'Category.objects.get', ([], {'name': '"""Focus"""'}), "(name='Focus')\n", (1727, 1741), False, 'from opentech.apply.categories.models import Category\n'), ((1769, 1810), 'opentech.apply.categories.models.Category.objects.get', 'Category.objects.get', ([], {'name': '"""Objective(s)"""'}), "(name='Objective(s)')\n", (1789, 1810), False, 'from opentech.apply.categories.models import Category\n'), ((1841, 1883), 'opentech.apply.categories.models.Category.objects.get', 'Category.objects.get', ([], {'name': '"""Beneficiaries"""'}), "(name='Beneficiaries')\n", (1861, 1883), False, 'from opentech.apply.categories.models import Category\n'), ((1908, 1946), 'opentech.apply.categories.models.Category.objects.get', 'Category.objects.get', ([], {'name': '"""Region(s)"""'}), "(name='Region(s)')\n", (1928, 1946), False, 'from opentech.apply.categories.models import Category\n'), ((1973, 2020), 'opentech.apply.categories.models.Category.objects.get', 'Category.objects.get', ([], {'name': '"""Addressed problems"""'}), "(name='Addressed problems')\n", (1993, 2020), False, 'from opentech.apply.categories.models import Category\n'), ((2044, 2087), 'opentech.apply.categories.models.Category.objects.get', 'Category.objects.get', ([], {'name': '"""Project status"""'}), "(name='Project status')\n", (2064, 2087), False, 'from opentech.apply.categories.models import Category\n'), ((15171, 15212), 'opentech.apply.funds.models.FundType.objects.get', 'FundType.objects.get', ([], {'title': 'FS_FUND_TITLE'}), '(title=FS_FUND_TITLE)\n', (15191, 15212), False, 'from opentech.apply.funds.models import ApplicationForm, FundType, Round\n'), ((984, 1026), 'opentech.apply.funds.models.Round.objects.filter', 'Round.objects.filter', ([], {'title': 'FS_ROUND_TITLE'}), '(title=FS_ROUND_TITLE)\n', (1004, 1026), False, 'from opentech.apply.funds.models import ApplicationForm, FundType, Round\n'), ((15276, 15305), 'opentech.apply.home.models.ApplyHomePage.objects.first', 'ApplyHomePage.objects.first', ([], {}), '()\n', (15303, 15305), False, 'from opentech.apply.home.models import ApplyHomePage\n'), ((15326, 15379), 'opentech.apply.funds.models.FundType', 'FundType', ([], {'title': 'FS_FUND_TITLE', 'workflow_name': '"""double"""'}), "(title=FS_FUND_TITLE, workflow_name='double')\n", (15334, 15379), False, 'from opentech.apply.funds.models import ApplicationForm, FundType, Round\n'), ((15453, 15528), 'opentech.apply.funds.models.forms.ApplicationBaseForm.objects.create', 'ApplicationBaseForm.objects.create', ([], {'application': 'fund', 'form': 'application_form'}), '(application=fund, form=application_form)\n', (15487, 15528), False, 'from opentech.apply.funds.models.forms import ApplicationBaseForm, ApplicationBaseReviewForm\n'), ((15554, 15626), 'opentech.apply.funds.models.forms.ApplicationBaseForm.objects.create', 'ApplicationBaseForm.objects.create', ([], {'application': 'fund', 'form': 'proposal_form'}), '(application=fund, form=proposal_form)\n', (15588, 15626), False, 'from opentech.apply.funds.models.forms import ApplicationBaseForm, ApplicationBaseReviewForm\n'), ((15707, 15800), 'opentech.apply.funds.models.forms.ApplicationBaseReviewForm.objects.create', 'ApplicationBaseReviewForm.objects.create', ([], {'application': 'fund', 'form': 'application_review_form'}), '(application=fund, form=\n application_review_form)\n', (15747, 15800), False, 'from opentech.apply.funds.models.forms import ApplicationBaseForm, ApplicationBaseReviewForm\n'), ((15828, 15918), 'opentech.apply.funds.models.forms.ApplicationBaseReviewForm.objects.create', 'ApplicationBaseReviewForm.objects.create', ([], {'application': 'fund', 'form': 'proposal_review_form'}), '(application=fund, form=\n proposal_review_form)\n', (15868, 15918), False, 'from opentech.apply.funds.models.forms import ApplicationBaseForm, ApplicationBaseReviewForm\n'), ((16477, 16493), 'datetime.date', 'date', (['(2013)', '(1)', '(1)'], {}), '(2013, 1, 1)\n', (16481, 16493), False, 'from datetime import date\n'), ((16516, 16533), 'datetime.date', 'date', (['(2018)', '(8)', '(29)'], {}), '(2018, 8, 29)\n', (16520, 16533), False, 'from datetime import date\n'), ((9617, 9633), 'json.dumps', 'json.dumps', (['data'], {}), '(data)\n', (9627, 9633), False, 'import json\n'), ((12512, 12529), 'json.dumps', 'json.dumps', (['data2'], {}), '(data2)\n', (12522, 12529), False, 'import json\n'), ((13587, 13604), 'json.dumps', 'json.dumps', (['data3'], {}), '(data3)\n', (13597, 13604), False, 'import json\n'), ((14958, 14975), 'json.dumps', 'json.dumps', (['data4'], {}), '(data4)\n', (14968, 14975), False, 'import json\n')]
|
import os
def clean_up_files(path):
os.remove(path)
|
[
"os.remove"
] |
[((42, 57), 'os.remove', 'os.remove', (['path'], {}), '(path)\n', (51, 57), False, 'import os\n')]
|
from persimmon.view.pins.circularbutton import CircularButton # MYPY HACK
from persimmon.view.util import Type, AbstractWidget, Connection
from kivy.properties import ObjectProperty
from kivy.lang import Builder
from kivy.graphics import Color, Ellipse, Line
from kivy.input import MotionEvent
from abc import abstractmethod
Builder.load_file('persimmon/view/pins/pin.kv')
class Pin(CircularButton, metaclass=AbstractWidget):
val = ObjectProperty(None, force_dispatch=True)
block = ObjectProperty()
type_ = ObjectProperty(Type.ANY)
@abstractmethod
def on_touch_down(self, touch: MotionEvent) -> bool:
raise NotImplementedError
@abstractmethod
def on_touch_up(self, touch: MotionEvent) -> bool:
raise NotImplementedError
@abstractmethod
def on_connection_delete(self, connection: Connection):
raise NotImplementedError
@abstractmethod
def connect_pin(self, connection: Connection):
raise NotImplementedError
def typesafe(self, other: 'Pin') -> bool:
""" Tells if a relation between two pins is typesafe. """
if self.block == other.block or self.__class__ == other.__class__:
return False
elif self.type_ == Type.ANY or other.type_ == Type.ANY:
return True # Anything is possible with ANY
else:
return self.type_ == other.type_
# Hack
def on_type_(self, instance: 'Pin', value: Type):
""" If the kv lang was a bit smarted this would not be needed
"""
self.color = value.value
|
[
"kivy.lang.Builder.load_file",
"kivy.properties.ObjectProperty"
] |
[((327, 374), 'kivy.lang.Builder.load_file', 'Builder.load_file', (['"""persimmon/view/pins/pin.kv"""'], {}), "('persimmon/view/pins/pin.kv')\n", (344, 374), False, 'from kivy.lang import Builder\n'), ((439, 480), 'kivy.properties.ObjectProperty', 'ObjectProperty', (['None'], {'force_dispatch': '(True)'}), '(None, force_dispatch=True)\n', (453, 480), False, 'from kivy.properties import ObjectProperty\n'), ((493, 509), 'kivy.properties.ObjectProperty', 'ObjectProperty', ([], {}), '()\n', (507, 509), False, 'from kivy.properties import ObjectProperty\n'), ((522, 546), 'kivy.properties.ObjectProperty', 'ObjectProperty', (['Type.ANY'], {}), '(Type.ANY)\n', (536, 546), False, 'from kivy.properties import ObjectProperty\n')]
|
from django.contrib import admin
from.models import Profile
# Register your models here.
admin.site.register(Profile)
|
[
"django.contrib.admin.site.register"
] |
[((91, 119), 'django.contrib.admin.site.register', 'admin.site.register', (['Profile'], {}), '(Profile)\n', (110, 119), False, 'from django.contrib import admin\n')]
|
import traceback
from django.conf import settings
from django.core.exceptions import MiddlewareNotUsed
from marketplace import logger
class DebugModeLoggingMiddleware(object):
"""
Use this middleware to force logging of errors even when Debug = True. You'll
find this useful in the case that you have QA in DEBUG mode, and you'd still
like to log exceptions that show up there (not just to the screen, which is the
default behavior in DEBUG mode)
If you don't want to fiddle with your middlewares list in different
environments, you can just add this permanently, and then easily turn it off by
using this setting:
DEBUG_MODE_LOGGING = False
You might find it useful to do that for local development, since it may get
annoying to wade through exception logs on your console when you're already
seeing every error on the screen.
"""
def __init__(self):
super(DebugModeLoggingMiddleware,self).__init__()
self.log = logger.get_log(__name__)
if not getattr(settings, 'DEBUG', False):
self.log.info('DebugModeLoggingMiddleware has been turned off for all requests cuz we\'re not in debug mode')
raise MiddlewareNotUsed
if not getattr(settings, 'DEBUG_MODE_LOGGING', True):
self.log.info('DebugModeLoggingMiddleware has been explicitly turned off for all requests')
raise MiddlewareNotUsed
self.log.info('DebugModeLoggingMiddleware has been activated')
def process_exception(self, request, exception):
if settings.DEBUG:
self.log.error(traceback.format_exc(exception))
|
[
"traceback.format_exc",
"marketplace.logger.get_log"
] |
[((948, 972), 'marketplace.logger.get_log', 'logger.get_log', (['__name__'], {}), '(__name__)\n', (962, 972), False, 'from marketplace import logger\n'), ((1562, 1593), 'traceback.format_exc', 'traceback.format_exc', (['exception'], {}), '(exception)\n', (1582, 1593), False, 'import traceback\n')]
|
# %% [markdown]
# # THE MIND OF A MAGGOT
# %% [markdown]
# ## Imports
import os
import time
import warnings
from itertools import chain
import colorcet as cc
import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.transforms as transforms
import numpy as np
import pandas as pd
import seaborn as sns
from anytree import LevelOrderGroupIter, NodeMixin
from mpl_toolkits.mplot3d import Axes3D
from scipy.linalg import orthogonal_procrustes
from scipy.optimize import linear_sum_assignment
from sklearn.exceptions import ConvergenceWarning
from sklearn.metrics import adjusted_rand_score
from sklearn.utils.testing import ignore_warnings
from tqdm import tqdm
import pymaid
from graspy.cluster import GaussianCluster
from graspy.embed import AdjacencySpectralEmbed, LaplacianSpectralEmbed, selectSVD
from graspy.models import DCSBMEstimator, RDPGEstimator, SBMEstimator
from graspy.plot import heatmap, pairplot
from graspy.simulations import rdpg
from graspy.utils import augment_diagonal, binarize, pass_to_ranks
from src.data import load_metagraph
from src.graph import preprocess
from src.hierarchy import signal_flow
from src.io import savecsv, savefig
from src.pymaid import start_instance
from src.traverse import Cascade, TraverseDispatcher, to_transmission_matrix
from src.visualization import (
CLASS_COLOR_DICT,
adjplot,
barplot_text,
gridmap,
matrixplot,
set_axes_equal,
stacked_barplot,
)
warnings.filterwarnings(action="ignore", category=ConvergenceWarning)
FNAME = os.path.basename(__file__)[:-3]
print(FNAME)
rc_dict = {
"axes.spines.right": False,
"axes.spines.top": False,
"axes.formatter.limits": (-3, 3),
"figure.figsize": (6, 3),
"figure.dpi": 100,
}
for key, val in rc_dict.items():
mpl.rcParams[key] = val
context = sns.plotting_context(context="talk", font_scale=1, rc=rc_dict)
sns.set_context(context)
np.random.seed(8888)
def stashfig(name, **kws):
savefig(name, foldername=FNAME, save_on=True, **kws)
def stashcsv(df, name, **kws):
savecsv(df, name)
mg = load_metagraph("G", version="2020-04-01")
mg = preprocess(
mg,
threshold=0,
sym_threshold=False,
remove_pdiff=True,
binarize=False,
weight="weight",
)
meta = mg.meta
# plot where we are cutting out nodes based on degree
degrees = mg.calculate_degrees()
fig, ax = plt.subplots(1, 1, figsize=(5, 2.5))
sns.distplot(np.log10(degrees["Total edgesum"]), ax=ax)
q = np.quantile(degrees["Total edgesum"], 0.05)
ax.axvline(np.log10(q), linestyle="--", color="r")
ax.set_xlabel("log10(total synapses)")
# remove low degree neurons
idx = meta[degrees["Total edgesum"] > q].index
mg = mg.reindex(idx, use_ids=True)
# remove center neurons # FIXME
idx = mg.meta[mg.meta["hemisphere"].isin(["L", "R"])].index
mg = mg.reindex(idx, use_ids=True)
mg = mg.make_lcc()
mg.calculate_degrees(inplace=True)
meta = mg.meta
meta["inds"] = range(len(meta))
adj = mg.adj
# %% [markdown]
# ## Setup for paths
out_groups = [
("dVNC", "dVNC;CN", "dVNC;RG", "dSEZ;dVNC"),
("dSEZ", "dSEZ;CN", "dSEZ;LHN", "dSEZ;dVNC"),
("motor-PaN", "motor-MN", "motor-VAN", "motor-AN"),
("RG", "RG-IPC", "RG-ITP", "RG-CA-LP", "dVNC;RG"),
("dUnk",),
]
out_group_names = ["VNC", "SEZ" "motor", "RG", "dUnk"]
source_groups = [
("sens-ORN",),
("sens-MN",),
("sens-photoRh5", "sens-photoRh6"),
("sens-thermo",),
("sens-vtd",),
("sens-AN",),
]
source_group_names = ["Odor", "MN", "Photo", "Temp", "VTD", "AN"]
class_key = "merge_class"
sg = list(chain.from_iterable(source_groups))
og = list(chain.from_iterable(out_groups))
sg_name = "All"
og_name = "All"
from src.traverse import to_markov_matrix
np.random.seed(888)
max_hops = 10
n_init = 100
p = 0.05
traverse = Cascade
simultaneous = True
transition_probs = to_transmission_matrix(adj, p)
transition_probs = to_markov_matrix(adj)
source_inds = meta[meta[class_key].isin(sg)]["inds"].values
out_inds = meta[meta[class_key].isin(og)]["inds"].values
# %% [markdown]
# ## Run paths
from src.traverse import RandomWalk
n_init = 1000
paths = []
path_lens = []
for s in source_inds:
rw = RandomWalk(
transition_probs, stop_nodes=out_inds, max_hops=10, allow_loops=False
)
for n in range(n_init):
rw.start(s)
paths.append(rw.traversal_)
path_lens.append(len(rw.traversal_))
# %% [markdown]
# ## Look at distribution of path lengths
for p in paths:
path_lens.append(len(p))
sns.distplot(path_lens)
paths_by_len = {i: [] for i in range(1, max_hops + 1)}
for p in paths:
paths_by_len[len(p)].append(p)
# %% [markdown]
# ## Embed for a dissimilarity measure
from src.cluster import get_paired_inds
embedder = AdjacencySpectralEmbed(n_components=None, n_elbows=2)
embed = embedder.fit_transform(pass_to_ranks(adj))
embed = np.concatenate(embed, axis=-1)
lp_inds, rp_inds = get_paired_inds(meta)
R, _, = orthogonal_procrustes(embed[lp_inds], embed[rp_inds])
left_inds = meta[meta["left"]]["inds"]
right_inds = meta[meta["right"]]["inds"]
embed[left_inds] = embed[left_inds] @ R
from sklearn.metrics import pairwise_distances
pdist = pairwise_distances(embed, metric="cosine")
# %% [markdown]
# ##
subsample = 2 ** 11
paths = paths_by_len[6]
new_paths = []
for p in paths:
if p[-1] in out_inds:
new_paths.append(p)
paths = new_paths
print(len(paths))
if subsample != -1:
inds = np.random.choice(len(paths), size=subsample, replace=False)
new_paths = []
for i, p in enumerate(paths):
if i in inds:
new_paths.append(p)
paths = new_paths
print(len(paths))
# %% [markdown]
# ##
path_len = len(paths[0])
path_dist_mat = np.zeros((len(paths), len(paths)))
for i in range(len(paths)):
for j in range(len(paths)):
p1 = paths[i]
p2 = paths[j]
dist_sum = 0
for t in range(path_len):
dist = pdist[p1[t], p2[t]]
dist_sum += dist
path_dist_mat[i, j] = dist_sum
path_indicator_mat = np.zeros((len(paths), len(adj)), dtype=int)
for i, p in enumerate(paths):
for j, visit in enumerate(p):
path_indicator_mat[i, visit] = j + 1
# %% [markdown]
# ## Cluster and look at distance mat
from scipy.cluster.hierarchy import linkage
from scipy.spatial.distance import squareform
Z = linkage(squareform(path_dist_mat), method="average")
sns.clustermap(
path_dist_mat,
figsize=(20, 20),
row_linkage=Z,
col_linkage=Z,
xticklabels=False,
yticklabels=False,
)
stashfig("clustermap")
# %% [markdown]
# ##
from graspy.embed import ClassicalMDS
from src.visualization import screeplot
cmds = ClassicalMDS(dissimilarity="precomputed", n_components=10)
path_embed = cmds.fit_transform(path_dist_mat)
plt.plot(cmds.singular_values_, "o")
# %% [markdown]
# ##
from graspy.plot import pairplot
n_components = 5
pairplot(path_embed[:, :n_components], alpha=0.1)
# %% [markdown]
# ##
from graspy.cluster import AutoGMMCluster
n_components = 4
agmm = AutoGMMCluster(max_components=20, n_jobs=-2)
pred = agmm.fit_predict(path_embed[:, :n_components])
print(agmm.n_components_)
pairplot(path_embed[:, :n_components], alpha=0.1, labels=pred, palette=cc.glasbey_light)
# %% [markdown]
# ##
color_dict = dict(zip(np.unique(pred), cc.glasbey_light))
fig, ax = plt.subplots(1, 1, figsize=(20, 20))
adjplot(
path_dist_mat,
sort_class=pred,
cmap=None,
center=None,
ax=ax,
gridline_kws=dict(linewidth=0.5, color="grey", linestyle="--"),
ticks=False,
colors=pred,
palette=color_dict,
cbar=False,
)
stashfig("adjplot-GMMoCMDSoPathDist")
# %% [markdown]
# ##
from sklearn.cluster import AgglomerativeClustering
ag = AgglomerativeClustering(n_clusters=60, affinity="precomputed", linkage="average")
pred = ag.fit_predict(path_dist_mat)
fig, ax = plt.subplots(1, 1, figsize=(20, 20))
color_dict = dict(zip(np.unique(pred), cc.glasbey_light))
adjplot(
path_dist_mat,
sort_class=pred,
cmap=None,
center=None,
ax=ax,
gridline_kws=dict(linewidth=0.5, color="grey", linestyle="--"),
ticks=False,
colors=pred,
palette=color_dict,
)
# %% [markdown]
# ##
meta["signal_flow"] = -signal_flow(adj)
fig, ax = plt.subplots(1, 1, figsize=(20, 20))
meta["class2"].fillna(" ", inplace=True)
matrixplot(
path_indicator_mat,
ax=ax,
plot_type="scattermap",
col_sort_class=["class1", "class2"],
col_class_order="signal_flow",
col_ticks=False,
col_meta=meta,
col_colors="merge_class",
col_palette=CLASS_COLOR_DICT,
# col_ticks=False,
row_sort_class=pred,
row_ticks=False,
sizes=(1, 1),
hue="weight",
palette="tab10",
gridline_kws=dict(linewidth=0.3, color="grey", linestyle="--"),
)
# %% [markdown]
# ##
from sklearn.manifold import MDS
n_components = 8
metric = True
mds = MDS(
n_components=n_components,
metric=True,
n_init=16,
n_jobs=-1,
dissimilarity="precomputed",
)
embed = mds.fit_transform(pass_to_ranks(path_dist_mat))
pairplot(embed, alpha=0.1)
# %%
name = "122.1-BDP-silly-model-testing"
load = True
loc = f"maggot_models/notebooks/outs/{name}/csvs/stash-label-meta.csv"
if load:
meta = pd.read_csv(loc, index_col=0)
for col in ["0_pred", "1_pred", "2_pred", "hemisphere"]:
# meta[col] = meta[col].fillna("")
meta[col] = meta[col].astype(str)
meta[col] = meta[col].replace("nan", "")
meta[col] = meta[col].str.replace(".0", "")
# meta[col] = meta[col].astype(int).astype(str)
# meta[col] = meta[col].fillna("")
# vals =
# meta[col] = meta[col].astype(int).astype(str)
# meta[col].fillna("")
meta["lvl0_labels"] = meta["0_pred"]
meta["lvl1_labels"] = meta["0_pred"] + "-" + meta["1_pred"]
meta["lvl2_labels"] = meta["0_pred"] + "-" + meta["1_pred"] + "-" + meta["2_pred"]
meta["lvl0_labels_side"] = meta["lvl0_labels"] + meta["hemisphere"]
meta["lvl1_labels_side"] = meta["lvl1_labels"] + meta["hemisphere"]
meta["lvl2_labels_side"] = meta["lvl2_labels"] + meta["hemisphere"]
# %%
# %% [markdown]
# ##
# %% [markdown]
# ##
# inds = np.random.choice(len(path_dist_mat), replace=False, size=16000)
# sub_path_indicator_mat = path_indicator_mat[inds]
# %% [markdown]
# ##
fig, ax = plt.subplots(1, 1, figsize=(30, 20))
matrixplot(
path_indicator_mat,
ax=ax,
plot_type="scattermap",
col_sort_class=["lvl2_labels"],
col_class_order="signal_flow",
col_meta=meta,
col_colors="merge_class",
col_item_order=["merge_class", "signal_flow"],
col_palette=CLASS_COLOR_DICT,
col_ticks=False,
row_sort_class=pred,
# row_class_order="size",
row_ticks=False,
sizes=(1, 1),
hue="weight",
palette="Set1",
gridline_kws=dict(linewidth=0.3, color="grey", linestyle="--"),
)
stashfig("path-indicator-map")
# %% [markdown]
# ## compute orders
mean_orders = []
for n in range(path_indicator_mat.shape[1]):
nz = np.nonzero(path_indicator_mat[:, n])
mean_order = np.mean(nz)
mean_orders.append(mean_order)
meta["mean_order"] = mean_orders
# %% [markdown]
# ##
from src.visualization import palplot
fig, axs = plt.subplots(
1, 2, figsize=(30, 20), gridspec_kw=dict(width_ratios=[0.95, 0.02], wspace=0.02)
)
pal = sns.color_palette("Set1", n_colors=7)
pal = pal[:5] + pal[6:]
ax = axs[0]
matrixplot(
path_indicator_mat,
ax=ax,
plot_type="scattermap",
col_sort_class=["lvl2_labels"],
col_class_order="signal_flow",
col_meta=meta,
col_colors="merge_class",
col_item_order=["merge_class", "mean_order"],
col_palette=CLASS_COLOR_DICT,
col_ticks=True,
tick_rot=90,
row_sort_class=pred,
# row_class_order="size",
row_ticks=True,
sizes=(1, 1),
hue="weight",
palette=pal,
gridline_kws=dict(linewidth=0.3, color="grey", linestyle="--"),
)
ax = axs[1]
palplot(pal, cmap="Set1", ax=ax)
ax.set_title("Visit order")
stashfig("path-indicator-map")
|
[
"numpy.random.seed",
"scipy.linalg.orthogonal_procrustes",
"src.io.savefig",
"pandas.read_csv",
"src.traverse.to_transmission_matrix",
"src.cluster.get_paired_inds",
"src.traverse.RandomWalk",
"numpy.mean",
"graspy.cluster.AutoGMMCluster",
"sklearn.manifold.MDS",
"numpy.unique",
"src.graph.preprocess",
"seaborn.clustermap",
"graspy.utils.pass_to_ranks",
"sklearn.cluster.AgglomerativeClustering",
"numpy.log10",
"matplotlib.pyplot.subplots",
"seaborn.set_context",
"seaborn.plotting_context",
"graspy.embed.AdjacencySpectralEmbed",
"os.path.basename",
"sklearn.metrics.pairwise_distances",
"scipy.spatial.distance.squareform",
"graspy.plot.pairplot",
"numpy.concatenate",
"graspy.embed.ClassicalMDS",
"numpy.quantile",
"matplotlib.pyplot.plot",
"warnings.filterwarnings",
"src.visualization.palplot",
"src.hierarchy.signal_flow",
"src.data.load_metagraph",
"numpy.nonzero",
"src.traverse.to_markov_matrix",
"seaborn.distplot",
"seaborn.color_palette",
"src.io.savecsv",
"itertools.chain.from_iterable"
] |
[((1450, 1519), 'warnings.filterwarnings', 'warnings.filterwarnings', ([], {'action': '"""ignore"""', 'category': 'ConvergenceWarning'}), "(action='ignore', category=ConvergenceWarning)\n", (1473, 1519), False, 'import warnings\n'), ((1813, 1875), 'seaborn.plotting_context', 'sns.plotting_context', ([], {'context': '"""talk"""', 'font_scale': '(1)', 'rc': 'rc_dict'}), "(context='talk', font_scale=1, rc=rc_dict)\n", (1833, 1875), True, 'import seaborn as sns\n'), ((1876, 1900), 'seaborn.set_context', 'sns.set_context', (['context'], {}), '(context)\n', (1891, 1900), True, 'import seaborn as sns\n'), ((1902, 1922), 'numpy.random.seed', 'np.random.seed', (['(8888)'], {}), '(8888)\n', (1916, 1922), True, 'import numpy as np\n'), ((2071, 2112), 'src.data.load_metagraph', 'load_metagraph', (['"""G"""'], {'version': '"""2020-04-01"""'}), "('G', version='2020-04-01')\n", (2085, 2112), False, 'from src.data import load_metagraph\n'), ((2118, 2222), 'src.graph.preprocess', 'preprocess', (['mg'], {'threshold': '(0)', 'sym_threshold': '(False)', 'remove_pdiff': '(True)', 'binarize': '(False)', 'weight': '"""weight"""'}), "(mg, threshold=0, sym_threshold=False, remove_pdiff=True,\n binarize=False, weight='weight')\n", (2128, 2222), False, 'from src.graph import preprocess\n'), ((2359, 2395), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(1)'], {'figsize': '(5, 2.5)'}), '(1, 1, figsize=(5, 2.5))\n', (2371, 2395), True, 'import matplotlib.pyplot as plt\n'), ((2456, 2499), 'numpy.quantile', 'np.quantile', (["degrees['Total edgesum']", '(0.05)'], {}), "(degrees['Total edgesum'], 0.05)\n", (2467, 2499), True, 'import numpy as np\n'), ((3694, 3713), 'numpy.random.seed', 'np.random.seed', (['(888)'], {}), '(888)\n', (3708, 3713), True, 'import numpy as np\n'), ((3808, 3838), 'src.traverse.to_transmission_matrix', 'to_transmission_matrix', (['adj', 'p'], {}), '(adj, p)\n', (3830, 3838), False, 'from src.traverse import Cascade, TraverseDispatcher, to_transmission_matrix\n'), ((3858, 3879), 'src.traverse.to_markov_matrix', 'to_markov_matrix', (['adj'], {}), '(adj)\n', (3874, 3879), False, 'from src.traverse import to_markov_matrix\n'), ((4469, 4492), 'seaborn.distplot', 'sns.distplot', (['path_lens'], {}), '(path_lens)\n', (4481, 4492), True, 'import seaborn as sns\n'), ((4710, 4763), 'graspy.embed.AdjacencySpectralEmbed', 'AdjacencySpectralEmbed', ([], {'n_components': 'None', 'n_elbows': '(2)'}), '(n_components=None, n_elbows=2)\n', (4732, 4763), False, 'from graspy.embed import AdjacencySpectralEmbed, LaplacianSpectralEmbed, selectSVD\n'), ((4823, 4853), 'numpy.concatenate', 'np.concatenate', (['embed'], {'axis': '(-1)'}), '(embed, axis=-1)\n', (4837, 4853), True, 'import numpy as np\n'), ((4874, 4895), 'src.cluster.get_paired_inds', 'get_paired_inds', (['meta'], {}), '(meta)\n', (4889, 4895), False, 'from src.cluster import get_paired_inds\n'), ((4904, 4957), 'scipy.linalg.orthogonal_procrustes', 'orthogonal_procrustes', (['embed[lp_inds]', 'embed[rp_inds]'], {}), '(embed[lp_inds], embed[rp_inds])\n', (4925, 4957), False, 'from scipy.linalg import orthogonal_procrustes\n'), ((5136, 5178), 'sklearn.metrics.pairwise_distances', 'pairwise_distances', (['embed'], {'metric': '"""cosine"""'}), "(embed, metric='cosine')\n", (5154, 5178), False, 'from sklearn.metrics import pairwise_distances\n'), ((6351, 6471), 'seaborn.clustermap', 'sns.clustermap', (['path_dist_mat'], {'figsize': '(20, 20)', 'row_linkage': 'Z', 'col_linkage': 'Z', 'xticklabels': '(False)', 'yticklabels': '(False)'}), '(path_dist_mat, figsize=(20, 20), row_linkage=Z, col_linkage=\n Z, xticklabels=False, yticklabels=False)\n', (6365, 6471), True, 'import seaborn as sns\n'), ((6624, 6682), 'graspy.embed.ClassicalMDS', 'ClassicalMDS', ([], {'dissimilarity': '"""precomputed"""', 'n_components': '(10)'}), "(dissimilarity='precomputed', n_components=10)\n", (6636, 6682), False, 'from graspy.embed import ClassicalMDS\n'), ((6732, 6768), 'matplotlib.pyplot.plot', 'plt.plot', (['cmds.singular_values_', '"""o"""'], {}), "(cmds.singular_values_, 'o')\n", (6740, 6768), True, 'import matplotlib.pyplot as plt\n'), ((6842, 6891), 'graspy.plot.pairplot', 'pairplot', (['path_embed[:, :n_components]'], {'alpha': '(0.1)'}), '(path_embed[:, :n_components], alpha=0.1)\n', (6850, 6891), False, 'from graspy.plot import pairplot\n'), ((6982, 7026), 'graspy.cluster.AutoGMMCluster', 'AutoGMMCluster', ([], {'max_components': '(20)', 'n_jobs': '(-2)'}), '(max_components=20, n_jobs=-2)\n', (6996, 7026), False, 'from graspy.cluster import AutoGMMCluster\n'), ((7108, 7201), 'graspy.plot.pairplot', 'pairplot', (['path_embed[:, :n_components]'], {'alpha': '(0.1)', 'labels': 'pred', 'palette': 'cc.glasbey_light'}), '(path_embed[:, :n_components], alpha=0.1, labels=pred, palette=cc.\n glasbey_light)\n', (7116, 7201), False, 'from graspy.plot import pairplot\n'), ((7286, 7322), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(1)'], {'figsize': '(20, 20)'}), '(1, 1, figsize=(20, 20))\n', (7298, 7322), True, 'import matplotlib.pyplot as plt\n'), ((7677, 7763), 'sklearn.cluster.AgglomerativeClustering', 'AgglomerativeClustering', ([], {'n_clusters': '(60)', 'affinity': '"""precomputed"""', 'linkage': '"""average"""'}), "(n_clusters=60, affinity='precomputed', linkage=\n 'average')\n", (7700, 7763), False, 'from sklearn.cluster import AgglomerativeClustering\n'), ((7806, 7842), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(1)'], {'figsize': '(20, 20)'}), '(1, 1, figsize=(20, 20))\n', (7818, 7842), True, 'import matplotlib.pyplot as plt\n'), ((8195, 8231), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(1)'], {'figsize': '(20, 20)'}), '(1, 1, figsize=(20, 20))\n', (8207, 8231), True, 'import matplotlib.pyplot as plt\n'), ((8817, 8915), 'sklearn.manifold.MDS', 'MDS', ([], {'n_components': 'n_components', 'metric': '(True)', 'n_init': '(16)', 'n_jobs': '(-1)', 'dissimilarity': '"""precomputed"""'}), "(n_components=n_components, metric=True, n_init=16, n_jobs=-1,\n dissimilarity='precomputed')\n", (8820, 8915), False, 'from sklearn.manifold import MDS\n'), ((8992, 9018), 'graspy.plot.pairplot', 'pairplot', (['embed'], {'alpha': '(0.1)'}), '(embed, alpha=0.1)\n', (9000, 9018), False, 'from graspy.plot import pairplot\n'), ((10204, 10240), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(1)'], {'figsize': '(30, 20)'}), '(1, 1, figsize=(30, 20))\n', (10216, 10240), True, 'import matplotlib.pyplot as plt\n'), ((11195, 11232), 'seaborn.color_palette', 'sns.color_palette', (['"""Set1"""'], {'n_colors': '(7)'}), "('Set1', n_colors=7)\n", (11212, 11232), True, 'import seaborn as sns\n'), ((11795, 11827), 'src.visualization.palplot', 'palplot', (['pal'], {'cmap': '"""Set1"""', 'ax': 'ax'}), "(pal, cmap='Set1', ax=ax)\n", (11802, 11827), False, 'from src.visualization import palplot\n'), ((1529, 1555), 'os.path.basename', 'os.path.basename', (['__file__'], {}), '(__file__)\n', (1545, 1555), False, 'import os\n'), ((1956, 2008), 'src.io.savefig', 'savefig', (['name'], {'foldername': 'FNAME', 'save_on': '(True)'}), '(name, foldername=FNAME, save_on=True, **kws)\n', (1963, 2008), False, 'from src.io import savecsv, savefig\n'), ((2046, 2063), 'src.io.savecsv', 'savecsv', (['df', 'name'], {}), '(df, name)\n', (2053, 2063), False, 'from src.io import savecsv, savefig\n'), ((2409, 2443), 'numpy.log10', 'np.log10', (["degrees['Total edgesum']"], {}), "(degrees['Total edgesum'])\n", (2417, 2443), True, 'import numpy as np\n'), ((2511, 2522), 'numpy.log10', 'np.log10', (['q'], {}), '(q)\n', (2519, 2522), True, 'import numpy as np\n'), ((3539, 3573), 'itertools.chain.from_iterable', 'chain.from_iterable', (['source_groups'], {}), '(source_groups)\n', (3558, 3573), False, 'from itertools import chain\n'), ((3585, 3616), 'itertools.chain.from_iterable', 'chain.from_iterable', (['out_groups'], {}), '(out_groups)\n', (3604, 3616), False, 'from itertools import chain\n'), ((4140, 4226), 'src.traverse.RandomWalk', 'RandomWalk', (['transition_probs'], {'stop_nodes': 'out_inds', 'max_hops': '(10)', 'allow_loops': '(False)'}), '(transition_probs, stop_nodes=out_inds, max_hops=10, allow_loops=\n False)\n', (4150, 4226), False, 'from src.traverse import RandomWalk\n'), ((4795, 4813), 'graspy.utils.pass_to_ranks', 'pass_to_ranks', (['adj'], {}), '(adj)\n', (4808, 4813), False, 'from graspy.utils import augment_diagonal, binarize, pass_to_ranks\n'), ((6305, 6330), 'scipy.spatial.distance.squareform', 'squareform', (['path_dist_mat'], {}), '(path_dist_mat)\n', (6315, 6330), False, 'from scipy.spatial.distance import squareform\n'), ((8167, 8183), 'src.hierarchy.signal_flow', 'signal_flow', (['adj'], {}), '(adj)\n', (8178, 8183), False, 'from src.hierarchy import signal_flow\n'), ((8961, 8989), 'graspy.utils.pass_to_ranks', 'pass_to_ranks', (['path_dist_mat'], {}), '(path_dist_mat)\n', (8974, 8989), False, 'from graspy.utils import augment_diagonal, binarize, pass_to_ranks\n'), ((9168, 9197), 'pandas.read_csv', 'pd.read_csv', (['loc'], {'index_col': '(0)'}), '(loc, index_col=0)\n', (9179, 9197), True, 'import pandas as pd\n'), ((10882, 10918), 'numpy.nonzero', 'np.nonzero', (['path_indicator_mat[:, n]'], {}), '(path_indicator_mat[:, n])\n', (10892, 10918), True, 'import numpy as np\n'), ((10936, 10947), 'numpy.mean', 'np.mean', (['nz'], {}), '(nz)\n', (10943, 10947), True, 'import numpy as np\n'), ((7240, 7255), 'numpy.unique', 'np.unique', (['pred'], {}), '(pred)\n', (7249, 7255), True, 'import numpy as np\n'), ((7865, 7880), 'numpy.unique', 'np.unique', (['pred'], {}), '(pred)\n', (7874, 7880), True, 'import numpy as np\n')]
|
def Prox(tests, num_test_bytes, write_tests_to_nvm, reset):
try:
import board
from pimoroni_circuitpython_adapter import not_SMBus
from pimoroni_ltr559 import LTR559
i2c = board.I2C()
i2c_dev = not_SMBus(I2C=i2c)
ltr559 = LTR559(i2c_dev=i2c_dev)
if 0 <= ltr559.get_proximity() <= 2047:
tests["Prox"]["Passed"] = True
print("Passed with", ltr559.get_proximity())
else:
tests["Prox"]["Passed"] = False
print("Failed")
except Exception as e:
tests["Prox"]["Passed"] = False
print("Failed with ", e)
finally:
tests["Prox"]["Test Run"] = True
write_tests_to_nvm(tests, num_test_bytes)
reset()
|
[
"board.I2C",
"pimoroni_circuitpython_adapter.not_SMBus",
"pimoroni_ltr559.LTR559"
] |
[((208, 219), 'board.I2C', 'board.I2C', ([], {}), '()\n', (217, 219), False, 'import board\n'), ((238, 256), 'pimoroni_circuitpython_adapter.not_SMBus', 'not_SMBus', ([], {'I2C': 'i2c'}), '(I2C=i2c)\n', (247, 256), False, 'from pimoroni_circuitpython_adapter import not_SMBus\n'), ((274, 297), 'pimoroni_ltr559.LTR559', 'LTR559', ([], {'i2c_dev': 'i2c_dev'}), '(i2c_dev=i2c_dev)\n', (280, 297), False, 'from pimoroni_ltr559 import LTR559\n')]
|
from ray.rllib.agents.bco.inverse_dynamics_model import InverseDynamicsModel
from osim.env import ProstheticsEnv
env = ProstheticsEnv(visualize=True)
# env.change_model(model='3D', prosthetic=False)
print(env.action_space) # Returns `Box(19,)`
print(env.action_space.low) # Returns list of 19 zeroes
print(env.action_space.high) # Returns list of 19 ones
env_model = InverseDynamicsModel(env_creator, config, True)
env_mdoel_data = pickle.load(open("/data/nips/ckpt/checkpoint-1128.env_mdoel_data", "rb"))
env_model.set_weights(env_mdoel_data)
actions = env_model.test_model()
observation = env.reset()
for i in range(300):
# action = df.loc[i][1:].tolist()
action = actions[i]
observation, reward, done, info = env.step(action)
print(reward)
|
[
"ray.rllib.agents.bco.inverse_dynamics_model.InverseDynamicsModel",
"osim.env.ProstheticsEnv"
] |
[((121, 151), 'osim.env.ProstheticsEnv', 'ProstheticsEnv', ([], {'visualize': '(True)'}), '(visualize=True)\n', (135, 151), False, 'from osim.env import ProstheticsEnv\n'), ((390, 437), 'ray.rllib.agents.bco.inverse_dynamics_model.InverseDynamicsModel', 'InverseDynamicsModel', (['env_creator', 'config', '(True)'], {}), '(env_creator, config, True)\n', (410, 437), False, 'from ray.rllib.agents.bco.inverse_dynamics_model import InverseDynamicsModel\n')]
|
#!/usr/bin/env python
# this script classifies TE position as genic or intergenic
# it also outputs the sequeunce name if a TE was found in a gene and whether or not that TE was in the the "border" region of a gene(within 10bp from the end)
# or if it is in an "internal" region
# USE: separate_gene_assignments.py
import re
internal_TEs={}
full_ins_TEs={}
all_TEs={}
'''
chr X
start_TE X
end_TE x
start_gene/NA X
TE X
orient X
RS X
part X
gene_name/NA X
gene class protein_coding/pseduogene/NA X
'''
# put TEs overlapped with genes into dict
with open("all_window.txt", 'r') as IN:
for line in IN:
line=line.rstrip('\n')
items=re.split("[\t]", line)
TE=items[12]
#gene info:
gene_start=items[3]
gene_end=items[4]
gene_info=items[8]
match=re.search("sequence_name=([A-za-z\d\.]+);", gene_info)
gene_name=match.group(1)
match=re.search("biotype=([A-za-z]+);", gene_info)
gene_class=match.group(1)
#TE info:
match = re.search("([A-Z]+)_(\d+)_(\d+)_(([A-Za-z\d+_-]+))_((\w+-)?reference)_+([a-z]+)_([a-z]+)_(\d+)_([\d\.]+)_([\+-])_([A-Za-z]+)_(\w+)", TE) #([A-Za-z\d+_])_((\w+-)?reference)\w+_\d+_\d+
chromosome = match.group(1)
start = match.group(2)
start2 = match.group(3)
te = match.group(4)
RS = match.group(11)
orient = match.group(12)
method=match.group(13)
sample = match.group(14)
new_info="{chromosome}\t{start}\t{start2}\t{method}\t{gene_start}\t{gene_end}\t{te}\t{orient}\t{RS}\t{gene_name}\t{gene_class}".format(**locals())
internal_TEs[TE]=new_info
# put insertion TEs overlapped with full genes giff into dict
with open("insertions_full_window.txt", 'r') as IN:
for line in IN:
line=line.rstrip('\n')
items=re.split("[\t]", line)
TE=items[12]
#gene info:
gene_start=items[3]
gene_end=items[4]
gene_info=items[8]
match=re.search("sequence_name=([A-za-z\d\.]+);", gene_info)
gene_name=match.group(1)
match=re.search("biotype=([A-za-z]+);", gene_info)
gene_class=match.group(1)
#TE info:
match = re.search("([A-Z]+)_(\d+)_(\d+)_(([A-Za-z\d+_-]+))_((\w+-)?reference)_+([a-z]+)_([a-z]+)_(\d+)_([\d\.]+)_([\+-])_([A-Za-z]+)_(\w+)", TE) #([A-Za-z\d+_])_((\w+-)?reference)\w+_\d+_\d+
chromosome = match.group(1)
start = match.group(2)
start2 = match.group(3)
te = match.group(4)
RS = match.group(11)
orient = match.group(12)
method=match.group(13)
sample = match.group(14)
new_info="{chromosome}\t{start}\t{start2}\t{method}\t{gene_start}\t{gene_end}\t{te}\t{orient}\t{RS}\t{gene_name}\t{gene_class}".format(**locals())
full_ins_TEs[TE]=new_info
# put all TEs into dict
with open("all.bed", 'r') as IN:
for line in IN:
line=line.rstrip('\n')
items=re.split("[\t]", line)
TE=items[3]
#TE info:
match = re.search("([A-Z]+)_(\d+)_(\d+)_(([A-Za-z\d+_-]+))_((\w+-)?reference)_+([a-z]+)_([a-z]+)_(\d+)_([\d\.]+)_([\+-])_([A-Za-z]+)_(\w+)", TE) #([A-Za-z\d+_])_((\w+-)?reference)\w+_\d+_\d+
chromosome = match.group(1)
start = match.group(2)
start2 = match.group(3)
te = match.group(4)
RS = match.group(11)
orient = match.group(12)
method=match.group(13)
sample = match.group(14)
gene_start="NA"
gene_end="NA"
gene_name="NA"
gene_class="NA"
new_info="{chromosome}\t{start}\t{start2}\t{method}\t{gene_start}\t{gene_end}\t{TE}\t{orient}\t{RS}\t{gene_name}\t{gene_class}".format(**locals())
all_TEs[TE]=new_info
OUT=open("TE_gene_interrupt_output.txt", 'w')
for key, value in all_TEs.items():
if key in full_ins_TEs.keys() and key not in internal_TEs.keys():
part="border"
overall="Genic"
value=full_ins_TEs[key]
elif key in internal_TEs.keys():
part="internal"
overall="Genic"
value=internal_TEs[key]
else:
part="intergenic"
overall="Intergenic"
value="{value}\t{part}\t{overall}".format(**locals())
OUT.write(value)
OUT.write('\n')
OUT.close()
|
[
"re.split",
"re.search"
] |
[((641, 663), 're.split', 're.split', (['"""[\t]"""', 'line'], {}), "('[\\t]', line)\n", (649, 663), False, 'import re\n'), ((767, 823), 're.search', 're.search', (['"""sequence_name=([A-za-z\\\\d\\\\.]+);"""', 'gene_info'], {}), "('sequence_name=([A-za-z\\\\d\\\\.]+);', gene_info)\n", (776, 823), False, 'import re\n'), ((857, 901), 're.search', 're.search', (['"""biotype=([A-za-z]+);"""', 'gene_info'], {}), "('biotype=([A-za-z]+);', gene_info)\n", (866, 901), False, 'import re\n'), ((952, 1107), 're.search', 're.search', (['"""([A-Z]+)_(\\\\d+)_(\\\\d+)_(([A-Za-z\\\\d+_-]+))_((\\\\w+-)?reference)_+([a-z]+)_([a-z]+)_(\\\\d+)_([\\\\d\\\\.]+)_([\\\\+-])_([A-Za-z]+)_(\\\\w+)"""', 'TE'], {}), "(\n '([A-Z]+)_(\\\\d+)_(\\\\d+)_(([A-Za-z\\\\d+_-]+))_((\\\\w+-)?reference)_+([a-z]+)_([a-z]+)_(\\\\d+)_([\\\\d\\\\.]+)_([\\\\+-])_([A-Za-z]+)_(\\\\w+)'\n , TE)\n", (961, 1107), False, 'import re\n'), ((1688, 1710), 're.split', 're.split', (['"""[\t]"""', 'line'], {}), "('[\\t]', line)\n", (1696, 1710), False, 'import re\n'), ((1814, 1870), 're.search', 're.search', (['"""sequence_name=([A-za-z\\\\d\\\\.]+);"""', 'gene_info'], {}), "('sequence_name=([A-za-z\\\\d\\\\.]+);', gene_info)\n", (1823, 1870), False, 'import re\n'), ((1904, 1948), 're.search', 're.search', (['"""biotype=([A-za-z]+);"""', 'gene_info'], {}), "('biotype=([A-za-z]+);', gene_info)\n", (1913, 1948), False, 'import re\n'), ((1999, 2154), 're.search', 're.search', (['"""([A-Z]+)_(\\\\d+)_(\\\\d+)_(([A-Za-z\\\\d+_-]+))_((\\\\w+-)?reference)_+([a-z]+)_([a-z]+)_(\\\\d+)_([\\\\d\\\\.]+)_([\\\\+-])_([A-Za-z]+)_(\\\\w+)"""', 'TE'], {}), "(\n '([A-Z]+)_(\\\\d+)_(\\\\d+)_(([A-Za-z\\\\d+_-]+))_((\\\\w+-)?reference)_+([a-z]+)_([a-z]+)_(\\\\d+)_([\\\\d\\\\.]+)_([\\\\+-])_([A-Za-z]+)_(\\\\w+)'\n , TE)\n", (2008, 2154), False, 'import re\n'), ((2674, 2696), 're.split', 're.split', (['"""[\t]"""', 'line'], {}), "('[\\t]', line)\n", (2682, 2696), False, 'import re\n'), ((2734, 2889), 're.search', 're.search', (['"""([A-Z]+)_(\\\\d+)_(\\\\d+)_(([A-Za-z\\\\d+_-]+))_((\\\\w+-)?reference)_+([a-z]+)_([a-z]+)_(\\\\d+)_([\\\\d\\\\.]+)_([\\\\+-])_([A-Za-z]+)_(\\\\w+)"""', 'TE'], {}), "(\n '([A-Z]+)_(\\\\d+)_(\\\\d+)_(([A-Za-z\\\\d+_-]+))_((\\\\w+-)?reference)_+([a-z]+)_([a-z]+)_(\\\\d+)_([\\\\d\\\\.]+)_([\\\\+-])_([A-Za-z]+)_(\\\\w+)'\n , TE)\n", (2743, 2889), False, 'import re\n')]
|
import re
f = open("adam-results-128-gpus-all-algos", "r")
ourAdam = f.read()
f.close()
f = open("/philly/rr3/msrhyperprojvc2_scratch/saemal/abhinav/nccl-manual/samples/optim-bench-results-128GPUs", "r")
otherAdams = f.read()
f.close()
adamResults = {"FusedAdam":{}, "PyTorchAdam":{}, "OurAdam":{}} #dictionary of [FusedAdam, PyTorchAdam, OurAdam]x[Sizes]x[Times]
allSizes = []
for size, time in re.findall(r'\(null\) (\d+) ([\d\.]+)', ourAdam):
adamResults["OurAdam"][int(size)] = float(time)
allSizes += [int(size)]
for size, time in re.findall(r'fusedadam (\d+) \d+ ([\d\.]+)', otherAdams):
adamResults["FusedAdam"][int(size)] = float(time)
for size, time in re.findall(r'adam (\d+) \d+ ([\d\.]+)', otherAdams):
adamResults["PyTorchAdam"][int(size)] = float(time)
print ("{:<15} {:<15} {:<15} {:<15} {:<15} {:<15}".format("Size", "FusedAdam", "PyTorchAdam", "OurAdam", "Speedup Over FusedAdam", "Speedup Over PytorchAdam"))
for sz in allSizes:
print("{:<15} {:<15.2f} {:<15.2f} {:<15.2f} {:<20.2f} {:<20.2f}".format(sz, adamResults["FusedAdam"][sz], adamResults["PyTorchAdam"][sz], adamResults["OurAdam"][sz],
adamResults["FusedAdam"][sz]/adamResults["OurAdam"][sz], adamResults["PyTorchAdam"][sz]/adamResults["OurAdam"][sz]))
|
[
"re.findall"
] |
[((399, 451), 're.findall', 're.findall', (['"""\\\\(null\\\\) (\\\\d+) ([\\\\d\\\\.]+)"""', 'ourAdam'], {}), "('\\\\(null\\\\) (\\\\d+) ([\\\\d\\\\.]+)', ourAdam)\n", (409, 451), False, 'import re\n'), ((548, 607), 're.findall', 're.findall', (['"""fusedadam (\\\\d+) \\\\d+ ([\\\\d\\\\.]+)"""', 'otherAdams'], {}), "('fusedadam (\\\\d+) \\\\d+ ([\\\\d\\\\.]+)', otherAdams)\n", (558, 607), False, 'import re\n'), ((679, 733), 're.findall', 're.findall', (['"""adam (\\\\d+) \\\\d+ ([\\\\d\\\\.]+)"""', 'otherAdams'], {}), "('adam (\\\\d+) \\\\d+ ([\\\\d\\\\.]+)', otherAdams)\n", (689, 733), False, 'import re\n')]
|
import cv2
import rest
import numpy as np
class ChromaKeyServiceImpl(rest.ChromaKeyingService):
def replace(self, src_image_str, bg_image_str) -> bytes:
bg = cv2.imdecode(np.frombuffer(bg_image_str, np.uint8), cv2.IMREAD_COLOR)
img = cv2.imdecode(np.frombuffer(src_image_str, np.uint8), cv2.IMREAD_COLOR)
RED, GREEN, BLUE = (2, 1, 0)
reds = img[:, :, RED]
greens = img[:, :, GREEN]
blues = img[:, :, BLUE]
# z = np.zeros(shape=img.shape, dtype=in
mask = (greens < 70) | (reds > greens) | (blues > greens)
mask = mask.astype("uint8") * 255
# print(mask)
mask_inv = cv2.bitwise_not(mask)
# cv2.imshow("Mask", mask)
# cv2.imshow("Mask inv", mask_inv)
# converting mask 2d to 3d
result = cv2.bitwise_and(img, img, mask=mask)
bg = cv2.resize(bg, (1280, 720))
bg = cv2.bitwise_and(bg, bg, mask=mask_inv)
res = cv2.add(result, bg)
is_success, im_buf_arr = cv2.imencode(".jpg", res)
return im_buf_arr.tobytes()
# cv2.imshow("Result", res)
# # cv2.imshow("Bg", bg)
# cv2.waitKey(0)
# cv2.destroyAllWindows()
|
[
"cv2.bitwise_not",
"cv2.bitwise_and",
"numpy.frombuffer",
"cv2.imencode",
"cv2.add",
"cv2.resize"
] |
[((665, 686), 'cv2.bitwise_not', 'cv2.bitwise_not', (['mask'], {}), '(mask)\n', (680, 686), False, 'import cv2\n'), ((819, 855), 'cv2.bitwise_and', 'cv2.bitwise_and', (['img', 'img'], {'mask': 'mask'}), '(img, img, mask=mask)\n', (834, 855), False, 'import cv2\n'), ((870, 897), 'cv2.resize', 'cv2.resize', (['bg', '(1280, 720)'], {}), '(bg, (1280, 720))\n', (880, 897), False, 'import cv2\n'), ((911, 949), 'cv2.bitwise_and', 'cv2.bitwise_and', (['bg', 'bg'], {'mask': 'mask_inv'}), '(bg, bg, mask=mask_inv)\n', (926, 949), False, 'import cv2\n'), ((965, 984), 'cv2.add', 'cv2.add', (['result', 'bg'], {}), '(result, bg)\n', (972, 984), False, 'import cv2\n'), ((1019, 1044), 'cv2.imencode', 'cv2.imencode', (['""".jpg"""', 'res'], {}), "('.jpg', res)\n", (1031, 1044), False, 'import cv2\n'), ((186, 223), 'numpy.frombuffer', 'np.frombuffer', (['bg_image_str', 'np.uint8'], {}), '(bg_image_str, np.uint8)\n', (199, 223), True, 'import numpy as np\n'), ((270, 308), 'numpy.frombuffer', 'np.frombuffer', (['src_image_str', 'np.uint8'], {}), '(src_image_str, np.uint8)\n', (283, 308), True, 'import numpy as np\n')]
|
import pylab
import numpy as np
from qiskit import Aer
from qiskit.utils import QuantumInstance
from qiskit.tools.visualization import plot_histogram
from qiskit.algorithms import Grover, AmplificationProblem
from qiskit.circuit.library.phase_oracle import PhaseOracle
### Finding Solutions to 3-SAT Problems
input_3sat_instance = '''
c example DIMACS-CNF 3-SAT
p cnf 3 5
-1 -2 -3 0
1 -2 3 0
1 2 -3 0
1 -2 -3 0
-1 2 3 0
''' # example problem has 3 solutions: (1 -2 3), (-1 -2 -3), (1 2 -3)
# Create corresponding oracle for Grover search using PhaseOracle (supports DIMACS-CNF format strings)
import os
import tempfile
from qiskit.exceptions import MissingOptionalLibraryError
fp = tempfile.NamedTemporaryFile(mode='w+t', delete=False)
fp.write(input_3sat_instance)
file_name = fp.name
fp.close()
oracle = None
try:
oracle = PhaseOracle.from_dimacs_file(file_name)
except MissingOptionalLibraryError as ex:
print(ex)
finally:
os.remove(file_name)
# create Grover instance using oracle
problem = None
if oracle is not None:
problem = AmplificationProblem(oracle, is_good_state=oracle.evaluate_bitstring)
# configure backend & run Grover instance to obtain result
backend = Aer.get_backend('aer_simulator')
quantum_instance = QuantumInstance(backend, shots=1024)
grover = Grover(quantum_instance=quantum_instance)
result = None
if problem is not None:
result = grover.amplify(problem)
print(result.assignment)
plot_histogram(result.circuit_results[0]).show()
### Boolean Logical Expressions
# Construct oracle using arbitrary Boolean logic expression
expression = '(w ^ x) & ~(y ^ z) & (x & y & z)'
try:
oracle = PhaseOracle(expression)
problem = AmplificationProblem(oracle, is_good_state=oracle.evaluate_bitstring)
grover = Grover(quantum_instance=QuantumInstance(Aer.get_backend('aer_simulator'),
shots=1024))
result = grover.amplify(problem)
plot_histogram(result.circuit_results[0]).show()
except MissingOptionalLibraryError as ex:
print(ex)
|
[
"qiskit.algorithms.Grover",
"tempfile.NamedTemporaryFile",
"os.remove",
"qiskit.algorithms.AmplificationProblem",
"qiskit.tools.visualization.plot_histogram",
"qiskit.circuit.library.phase_oracle.PhaseOracle",
"qiskit.circuit.library.phase_oracle.PhaseOracle.from_dimacs_file",
"qiskit.Aer.get_backend",
"qiskit.utils.QuantumInstance"
] |
[((686, 739), 'tempfile.NamedTemporaryFile', 'tempfile.NamedTemporaryFile', ([], {'mode': '"""w+t"""', 'delete': '(False)'}), "(mode='w+t', delete=False)\n", (713, 739), False, 'import tempfile\n'), ((1194, 1226), 'qiskit.Aer.get_backend', 'Aer.get_backend', (['"""aer_simulator"""'], {}), "('aer_simulator')\n", (1209, 1226), False, 'from qiskit import Aer\n'), ((1246, 1282), 'qiskit.utils.QuantumInstance', 'QuantumInstance', (['backend'], {'shots': '(1024)'}), '(backend, shots=1024)\n', (1261, 1282), False, 'from qiskit.utils import QuantumInstance\n'), ((1292, 1333), 'qiskit.algorithms.Grover', 'Grover', ([], {'quantum_instance': 'quantum_instance'}), '(quantum_instance=quantum_instance)\n', (1298, 1333), False, 'from qiskit.algorithms import Grover, AmplificationProblem\n'), ((833, 872), 'qiskit.circuit.library.phase_oracle.PhaseOracle.from_dimacs_file', 'PhaseOracle.from_dimacs_file', (['file_name'], {}), '(file_name)\n', (861, 872), False, 'from qiskit.circuit.library.phase_oracle import PhaseOracle\n'), ((942, 962), 'os.remove', 'os.remove', (['file_name'], {}), '(file_name)\n', (951, 962), False, 'import os\n'), ((1054, 1123), 'qiskit.algorithms.AmplificationProblem', 'AmplificationProblem', (['oracle'], {'is_good_state': 'oracle.evaluate_bitstring'}), '(oracle, is_good_state=oracle.evaluate_bitstring)\n', (1074, 1123), False, 'from qiskit.algorithms import Grover, AmplificationProblem\n'), ((1654, 1677), 'qiskit.circuit.library.phase_oracle.PhaseOracle', 'PhaseOracle', (['expression'], {}), '(expression)\n', (1665, 1677), False, 'from qiskit.circuit.library.phase_oracle import PhaseOracle\n'), ((1692, 1761), 'qiskit.algorithms.AmplificationProblem', 'AmplificationProblem', (['oracle'], {'is_good_state': 'oracle.evaluate_bitstring'}), '(oracle, is_good_state=oracle.evaluate_bitstring)\n', (1712, 1761), False, 'from qiskit.algorithms import Grover, AmplificationProblem\n'), ((1442, 1483), 'qiskit.tools.visualization.plot_histogram', 'plot_histogram', (['result.circuit_results[0]'], {}), '(result.circuit_results[0])\n', (1456, 1483), False, 'from qiskit.tools.visualization import plot_histogram\n'), ((1923, 1964), 'qiskit.tools.visualization.plot_histogram', 'plot_histogram', (['result.circuit_results[0]'], {}), '(result.circuit_results[0])\n', (1937, 1964), False, 'from qiskit.tools.visualization import plot_histogram\n'), ((1815, 1847), 'qiskit.Aer.get_backend', 'Aer.get_backend', (['"""aer_simulator"""'], {}), "('aer_simulator')\n", (1830, 1847), False, 'from qiskit import Aer\n')]
|
# ******************************************************************************
#
# test_allauth_2f2a.py: allauth_2f2a tests
#
# SPDX-License-Identifier: Apache-2.0
#
# django-allauth-2f2a, a 2fa adapter for django-allauth.
#
# ******************************************************************************
#
# django-allauth-2f2a, a 2fa adapter for django-allauth.
#
# Copyright 2021 <NAME> <<EMAIL>>.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you
# may not use this file except in compliance with the License. You
# may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied. See the License for the specific language governing
# permissions and limitations under the License.
#
# ******************************************************************************
#
"""allauth_2f2a tests."""
import base64
import re
from urllib.parse import parse_qsl
from urllib.parse import urlencode
from urllib.parse import urlparse
from urllib.parse import urlunparse
from allauth.account.signals import user_logged_in
from django.conf import settings
from django.contrib import messages
from django.contrib.auth import get_user_model
from django.core.exceptions import ImproperlyConfigured
from django.forms import ValidationError
from django.test import TestCase
from django.test import override_settings
from django.urls import reverse
from django_otp.oath import TOTP
from pyfakefs.fake_filesystem_unittest import patchfs
from allauth_2f2a import app_settings
from allauth_2f2a.middleware import BaseRequire2FAMiddleware
def normalize_url(url):
"""Sort the URL query string parameters."""
url = str(url) # Coerce reverse_lazy() URLs.
scheme, netloc, path, params, query, fragment = urlparse(url)
query_parts = sorted(parse_qsl(query))
return urlunparse(
(
scheme,
netloc,
path,
params,
urlencode(query_parts),
fragment,
)
)
class Test2Factor(TestCase):
"""2fa tests."""
def setUp(self):
"""Set up Test2Factor()."""
self.user_logged_in_count = 0
user_logged_in.connect(self._login_callback)
def tearDown(self):
"""Reset after each test."""
# Set TWOFA_FORMS to default.
setattr(
app_settings,
"TWOFA_FORMS",
{
"authenticate": "allauth_2f2a.forms.TOTPAuthenticateForm",
"device": "allauth_2f2a.forms.TOTPDeviceForm",
"remove": "allauth_2f2a.forms.TOTPDeviceRemoveForm",
},
)
def _login_callback(self, sender, **kwargs):
"""Increment the login count."""
self.user_logged_in_count += 1
def test_standard_login(self):
"""Should login if 2fa is not configured."""
user = get_user_model().objects.create(username="john")
user.set_password("<PASSWORD>")
user.save()
resp = self.client.post(
reverse("account_login"),
{"login": "john", "password": "<PASSWORD>"},
)
self.assertRedirects(
resp,
settings.LOGIN_REDIRECT_URL,
fetch_redirect_response=False,
)
# Ensure the signal is received as expected.
self.assertEqual(self.user_logged_in_count, 1)
def test_2fa_login(self):
"""Should login when 2fa is configured."""
user = get_user_model().objects.create(username="john")
user.set_password("<PASSWORD>")
user.save()
totp_model = user.totpdevice_set.create()
resp = self.client.post(
reverse("account_login"),
{"login": "john", "password": "<PASSWORD>"},
)
self.assertRedirects(
resp,
reverse("two-factor-authenticate"),
fetch_redirect_response=False,
)
# Now ensure that logging in actually works.
totp = TOTP(
totp_model.bin_key,
totp_model.step,
totp_model.t0,
totp_model.digits,
)
resp = self.client.post(
reverse("two-factor-authenticate"),
{"otp_token": totp.token()},
)
self.assertRedirects(
resp,
settings.LOGIN_REDIRECT_URL,
fetch_redirect_response=False,
)
# Ensure the signal is received as expected.
self.assertEqual(self.user_logged_in_count, 1)
def test_2fa_setup(self):
"""Should setup device and redirect to backup tokens."""
# Create a user.
user = get_user_model().objects.create(username="john")
user.set_password("<PASSWORD>")
user.save()
# Login.
response = self.client.post(
reverse("account_login"),
{"login": "john", "password": "<PASSWORD>"},
)
# GET the setup page.
response = self.client.get(
reverse("two-factor-setup"),
)
# Find the device created by GET.
device = user.totpdevice_set.filter(confirmed=False).first()
# Calculate the token.
totp = TOTP(
device.bin_key,
device.step,
device.t0,
device.digits,
)
# POST the token to the setup page.
response = self.client.post(
reverse("two-factor-setup"),
{
"token": totp.token(),
},
)
self.assertRedirects(
response,
reverse("two-factor-backup"),
)
def test_2fa_already_setup(self):
"""Should redirect to backup tokens."""
# Create a user.
user = get_user_model().objects.create(username="john")
user.set_password("<PASSWORD>")
user.save()
# Login.
self.client.post(
reverse("account_login"),
{"login": "john", "password": "<PASSWORD>"},
)
# GET the setup page.
self.client.get(
reverse("two-factor-setup"),
)
# Find the device created by GET.
device = user.totpdevice_set.filter(confirmed=False).first()
# Calculate the token.
totp = TOTP(
device.bin_key,
device.step,
device.t0,
device.digits,
)
# POST the token to the setup page.
response = self.client.post(
reverse("two-factor-setup"),
{
"token": totp.token(),
},
)
self.assertRedirects(
response,
reverse("two-factor-backup"),
)
# GET the setup page again.
self.client.get(
reverse("two-factor-setup"),
)
# Since 2FA is configured, should redirect to backup token
# generator.
self.assertRedirects(
response,
reverse("two-factor-backup"),
)
def test_2fa_generate_backup_tokens(self):
"""Should generate backup tokens."""
# Create a user.
user = get_user_model().objects.create(username="john")
user.set_password("<PASSWORD>")
user.save()
# Login.
self.client.post(
reverse("account_login"),
{"login": "john", "password": "<PASSWORD>"},
)
# GET the setup page.
self.client.get(
reverse("two-factor-setup"),
)
# Find the device created by GET.
device = user.totpdevice_set.filter(confirmed=False).first()
# Calculate the token.
totp = TOTP(
device.bin_key,
device.step,
device.t0,
device.digits,
)
# POST the token to the setup page.
response = self.client.post(
reverse("two-factor-setup"),
{
"token": totp.token(),
},
)
self.assertRedirects(
response,
reverse("two-factor-backup"),
)
# POST to make new tokens.
response = self.client.post(
reverse("two-factor-backup"),
)
self.assertContains(
response,
"Two-Factor Authentication Backup Tokens",
)
def test_2fa_setup_bad_token(self):
"""Should raise ``django.forms.ValidationError``."""
# Create a user.
user = get_user_model().objects.create(username="john")
user.set_password("<PASSWORD>")
user.save()
# Login.
self.client.post(
reverse("account_login"),
{"login": "john", "password": "<PASSWORD>"},
)
# GET the setup page.
self.client.get(
reverse("two-factor-setup"),
)
# POST the token to the setup page.
response = self.client.post(
reverse("two-factor-setup"),
{
"token": "123456",
},
)
# Should rerender the form successfully.
self.assertEqual(
response.status_code,
200,
)
# Should contain the error message.
self.assertContains(
response,
"Setup Two-Factor Authentication",
)
def test_2fa_remove(self):
"""Should remove device and redirect to backup tokens."""
# Create a user.
user = get_user_model().objects.create(username="john")
user.set_password("<PASSWORD>")
user.save()
# Login.
response = self.client.post(
reverse("account_login"),
{"login": "john", "password": "<PASSWORD>"},
)
# GET the setup page.
response = self.client.get(
reverse("two-factor-setup"),
)
# Find the device created by GET.
device = user.totpdevice_set.filter(confirmed=False).first()
# Calculate the token.
totp = TOTP(
device.bin_key,
device.step,
device.t0,
device.digits,
)
# POST the token to the setup page.
response = self.client.post(
reverse("two-factor-setup"),
{
"token": totp.token(),
},
)
# Should redirect to 2FA backup token generator.
self.assertRedirects(
response,
reverse("two-factor-backup"),
)
# POST a device removal request.
response = self.client.post(
reverse("two-factor-remove"),
)
# Should redirect to 2FA setup.
self.assertRedirects(
response,
reverse("two-factor-setup"),
)
def test_2fa_login_custom_form(self):
"""Should login when 2fa is configured."""
setattr(
app_settings,
"TWOFA_FORMS",
{
"authentication": "tests.forms.CustomTOTPAuthenticateForm",
"device": "allauth_2f2a.forms.TOTPDeviceForm",
"remove": "allauth_2f2a.forms.TOTPDeviceRemoveForm",
},
)
user = get_user_model().objects.create(username="john")
user.set_password("<PASSWORD>")
user.save()
totp_model = user.totpdevice_set.create()
resp = self.client.post(
reverse("account_login"),
{"login": "john", "password": "<PASSWORD>"},
)
self.assertRedirects(
resp,
reverse("two-factor-authenticate"),
fetch_redirect_response=False,
)
# Now ensure that logging in actually works.
totp = TOTP(
totp_model.bin_key,
totp_model.step,
totp_model.t0,
totp_model.digits,
)
resp = self.client.post(
reverse("two-factor-authenticate"),
{"otp_token": totp.token()},
)
self.assertRedirects(
resp,
settings.LOGIN_REDIRECT_URL,
fetch_redirect_response=False,
)
# Ensure the signal is received as expected.
self.assertEqual(self.user_logged_in_count, 1)
def test_invalid_2fa_login(self):
"""Should not login when wrong 2fa code is provided."""
user = get_user_model().objects.create(username="john")
user.set_password("<PASSWORD>")
user.save()
user.totpdevice_set.create()
resp = self.client.post(
reverse("account_login"),
{"login": "john", "password": "<PASSWORD>"},
)
self.assertRedirects(
resp,
reverse("two-factor-authenticate"),
fetch_redirect_response=False,
)
# Ensure that logging in does not work with invalid token
resp = self.client.post(
reverse("two-factor-authenticate"),
{"otp_token": "invalid"},
)
self.assertEqual(resp.status_code, 200)
def test_2fa_redirect(self):
"""Should redirect if 2fa is not necessry."""
user = get_user_model().objects.create(username="john")
user.set_password("<PASSWORD>")
user.save()
# Not logged in.
resp = self.client.get(reverse("two-factor-authenticate"))
self.assertRedirects(
resp,
reverse("account_login"),
fetch_redirect_response=False,
)
# Logged in.
resp = self.client.post(
reverse("account_login"),
{"login": "john", "password": "<PASSWORD>"},
)
resp = self.client.get(reverse("two-factor-authenticate"))
self.assertRedirects(
resp,
reverse("account_login"),
fetch_redirect_response=False,
)
def test_2fa_reset_flow(self):
"""Should redirect to login on 2fa interruption."""
user = get_user_model().objects.create(username="john")
user.set_password("<PASSWORD>")
user.save()
user.totpdevice_set.create()
resp = self.client.post(
reverse("account_login"), {"login": "john", "password": "<PASSWORD>"}
)
self.assertRedirects(
resp, reverse("two-factor-authenticate"), fetch_redirect_response=False
)
# The user ID should be in the session.
self.assertIn("allauth_2f2a_user_id", self.client.session)
# Navigate to a different page.
self.client.get(reverse("account_login"))
# The middleware should reset the login flow.
self.assertNotIn("allauth_2f2a_user_id", self.client.session)
# Trying to continue with two-factor without logging in again will
# redirect to login.
resp = self.client.get(reverse("two-factor-authenticate"))
self.assertRedirects(
resp, reverse("account_login"), fetch_redirect_response=False
)
def test_2fa_login_forwarding_get_parameters(self):
"""Should pass route parameters through 2fa views."""
user = get_user_model().objects.create(username="john")
user.set_password("<PASSWORD>")
user.save()
user.totpdevice_set.create()
# Add a next to unnamed-view.
resp = self.client.post(
reverse("account_login") + "?existing=param&next=unnamed-view",
{"login": "john", "password": "<PASSWORD>"},
follow=True,
)
# Ensure that the unnamed-view is still being forwarded to.
resp.redirect_chain[-1] = (
normalize_url(resp.redirect_chain[-1][0]),
resp.redirect_chain[-1][1],
)
self.assertRedirects(
resp,
normalize_url(
reverse("two-factor-authenticate")
+ "?existing=param&next=unnamed-view",
),
fetch_redirect_response=False,
)
def test_2fa_login_forwarding_next_via_post(self):
"""Should respect ``next`` parameter on POST."""
user = get_user_model().objects.create(username="john")
user.set_password("<PASSWORD>")
user.save()
user.totpdevice_set.create()
# Add a next to unnamed-view.
resp = self.client.post(
reverse("account_login") + "?existing=param",
{"login": "john", "password": "<PASSWORD>", "next": "unnamed-view"},
follow=True,
)
# Ensure that the unnamed-view is still being forwarded to,
# preserving existing query params.
resp.redirect_chain[-1] = (
normalize_url(resp.redirect_chain[-1][0]),
resp.redirect_chain[-1][1],
)
self.assertRedirects(
resp,
normalize_url(
reverse("two-factor-authenticate") + "?existing=param&next=unnamed-view"
),
fetch_redirect_response=False,
)
def test_anonymous(self):
"""Anonymous users should not access 2fa views."""
# The authentication page redirects to the login page.
url = reverse("two-factor-authenticate")
resp = self.client.get(url)
self.assertRedirects(
resp, reverse("account_login"), fetch_redirect_response=False
)
# Some pages redirect to the login page and then will redirect back.
for url in [
"two-factor-setup",
"two-factor-backup",
"two-factor-remove",
]:
url = reverse(url)
resp = self.client.get(url)
self.assertRedirects(
resp,
reverse("account_login") + "?next=" + url,
fetch_redirect_response=False,
)
def test_unnamed_view(self):
"""Should reset login if 2fa is interrupted."""
user = get_user_model().objects.create(username="john")
user.set_password("<PASSWORD>")
user.save()
user.totpdevice_set.create()
resp = self.client.post(
reverse("account_login"), {"login": "john", "password": "<PASSWORD>"}
)
self.assertRedirects(
resp, reverse("two-factor-authenticate"), fetch_redirect_response=False
)
# The user ID should be in the session.
self.assertIn("allauth_2f2a_user_id", self.client.session)
# Navigate to a different (unnamed) page.
resp = self.client.get("/unnamed-view")
# The middleware should reset the login flow.
self.assertNotIn("allauth_2f2a_user_id", self.client.session)
# Trying to continue with two-factor without logging in again
# will redirect to login.
resp = self.client.get(reverse("two-factor-authenticate"))
self.assertRedirects(
resp, reverse("account_login"), fetch_redirect_response=False
)
def test_backwards_compatible_url(self):
"""Should still work."""
user = get_user_model().objects.create(username="john")
user.set_password("<PASSWORD>")
user.save()
totp_model = user.totpdevice_set.create()
resp = self.client.post(
reverse("account_login"),
{"login": "john", "password": "<PASSWORD>"},
)
self.assertRedirects(
resp,
reverse("two-factor-authenticate"),
fetch_redirect_response=False,
)
# Now ensure that logging in actually works.
totp = TOTP(
totp_model.bin_key,
totp_model.step,
totp_model.t0,
totp_model.digits,
)
# The old URL doesn't have a trailing slash.
url = reverse("two-factor-authenticate").rstrip("/")
resp = self.client.post(url, {"otp_token": totp.token()})
self.assertRedirects(
resp,
settings.LOGIN_REDIRECT_URL,
fetch_redirect_response=False,
)
# Ensure the signal is received as expected.
self.assertEqual(self.user_logged_in_count, 1)
def test_not_configured_redirect(self):
"""Should redirect if 2fa is not configured."""
user = get_user_model().objects.create(username="john")
user.set_password("<PASSWORD>")
user.save()
# Login.
resp = self.client.post(
reverse("account_login"),
{"login": "john", "password": "<PASSWORD>"},
)
# The 2FA pages should redirect.
for url_name in ["two-factor-backup", "two-factor-remove"]:
resp = self.client.get(reverse(url_name))
self.assertRedirects(
resp,
reverse("two-factor-setup"),
fetch_redirect_response=False,
)
class Require2FA(BaseRequire2FAMiddleware):
"""Require 2fa if configured."""
def require_2fa(self, request):
"""Determine if 2fa is required if configured."""
return True
class NoRequire2FA(BaseRequire2FAMiddleware):
"""Require 2fa if configured."""
def require_2fa(self, request):
"""Determine if 2fa is required if configured."""
return False
class Require2FANonexistentAllowed(BaseRequire2FAMiddleware):
"""Require 2fa if configured."""
allowed_pages = [
"bob-is-your-uncle",
"account_logout",
"account_change_password",
"account_reset_password",
"two-factor-setup",
]
def require_2fa(self, request):
"""Determine if 2fa is required if configured."""
return True
class Require2FAWithMessage(BaseRequire2FAMiddleware):
"""Require 2fa if configured and add a message."""
def require_2fa(self, request):
"""Determine if 2fa is required and add a message."""
messages.info(
request,
"2fa required",
extra_tags="2fa_required",
)
return True
@override_settings(
# Don't redirect to an "allowed" URL.
LOGIN_REDIRECT_URL="/unnamed-view",
# Add the middleware that requires 2FA.
MIDDLEWARE=settings.MIDDLEWARE
+ ("allauth_2f2a.middleware.BaseRequire2FAMiddleware",),
)
class TestRequire2FAMiddlewareNotConfigured(TestCase):
"""Unconfigured 2fa middleware tests."""
def test_require2fa_not_implemented(self):
"""Should raise ``NotImplementedError``."""
user = get_user_model().objects.create(username="john")
user.set_password("<PASSWORD>")
user.save()
self.assertRaises(
NotImplementedError,
self.client.post,
reverse("account_login"),
{"login": "john", "password": "<PASSWORD>"},
follow=True,
)
@override_settings(
# Don't redirect to an "allowed" URL.
LOGIN_REDIRECT_URL="/unnamed-view",
# Add the middleware that requires 2FA.
MIDDLEWARE=settings.MIDDLEWARE + ("tests.test_allauth_2f2a.Require2FA",),
)
class TestRequire2FAMiddleware(TestCase):
"""2fa middleware tests."""
def test_no_2fa(self):
"""Should redirect to setup if 2fa is not configured."""
user = get_user_model().objects.create(username="john")
user.set_password("<PASSWORD>")
user.save()
resp = self.client.post(
reverse("account_login"),
{"login": "john", "password": "<PASSWORD>"},
follow=True,
)
# The user is redirected to the 2FA setup page.
self.assertRedirects(
resp,
reverse("two-factor-setup"),
fetch_redirect_response=False,
)
@override_settings(
# Don't redirect to an "allowed" URL.
LOGIN_REDIRECT_URL="/unnamed-view",
# Add the middleware that requires 2FA.
MIDDLEWARE=settings.MIDDLEWARE + ("tests.test_allauth_2f2a.NoRequire2FA",),
)
def test_no_2fa_not_required(self):
"""Should redirect to ``LOGIN_REDIRECT_URL``."""
user = get_user_model().objects.create(username="john")
user.set_password("<PASSWORD>")
user.save()
resp = self.client.post(
reverse("account_login"),
{"login": "john", "password": "<PASSWORD>"},
follow=True,
)
# The user is redirected to ``LOGIN_REDIRECT_URL``.
self.assertRedirects(
resp,
settings.LOGIN_REDIRECT_URL,
fetch_redirect_response=False,
)
@override_settings(
# Don't redirect to an "allowed" URL.
LOGIN_REDIRECT_URL="/unnamed-view",
# Add the middleware that requires 2FA.
MIDDLEWARE=settings.MIDDLEWARE
+ ("tests.test_allauth_2f2a.Require2FANonexistentAllowed",),
)
def test_no_2fa_nonexistent_allowed(self):
"""Should warn on nonexistent URL."""
user = get_user_model().objects.create(username="john")
user.set_password("<PASSWORD>")
user.save()
with self.assertWarns(UserWarning):
self.client.post(
reverse("account_login"),
{"login": "john", "password": "<PASSWORD>"},
follow=True,
)
def test_2fa(self):
"""Should login when 2fa is configured."""
user = get_user_model().objects.create(username="john")
user.set_password("<PASSWORD>")
user.save()
totp_model = user.totpdevice_set.create()
resp = self.client.post(
reverse("account_login"),
{"login": "john", "password": "<PASSWORD>"},
)
self.assertRedirects(
resp,
reverse("two-factor-authenticate"),
fetch_redirect_response=False,
)
# Now ensure that logging in actually works.
totp = TOTP(
totp_model.bin_key,
totp_model.step,
totp_model.t0,
totp_model.digits,
)
resp = self.client.post(
reverse("two-factor-authenticate"),
{"otp_token": totp.token()},
)
# The user ends up on the normal redirect login page.
self.assertRedirects(
resp,
settings.LOGIN_REDIRECT_URL,
fetch_redirect_response=False,
)
def test_2fa_already_configured(self):
"""Should access all URLs.."""
# Create a user.
user = get_user_model().objects.create(username="john")
user.set_password("<PASSWORD>")
user.save()
# Login.
response = self.client.post(
reverse("account_login"),
{"login": "john", "password": "<PASSWORD>"},
)
# GET the setup page.
response = self.client.get(
reverse("two-factor-setup"),
)
# Find the device created by GET.
device = user.totpdevice_set.filter(confirmed=False).first()
# Calculate the token.
totp = TOTP(
device.bin_key,
device.step,
device.t0,
device.digits,
)
# POST the token to the setup page.
response = self.client.post(
reverse("two-factor-setup"),
{
"token": totp.token(),
},
)
# Should redirect to 2FA backup token generator.
self.assertRedirects(
response,
reverse("two-factor-backup"),
)
# Load a 2fa protected URL.
response = self.client.get(
settings.LOGIN_REDIRECT_URL,
)
# Should load successfully.
self.assertEqual(
response.status_code,
200,
)
@override_settings(
INSTALLED_APPS=settings.INSTALLED_APPS + ("django.contrib.messages",),
# This doesn't seem to stack nicely with the class-based one,
# so add the middleware here.
MIDDLEWARE=settings.MIDDLEWARE
+ (
"tests.test_allauth_2f2a.Require2FA",
"django.contrib.messages.middleware.MessageMiddleware",
),
)
def test_no_2fa_messages(self):
"""Should redirect to 2fa setup."""
user = get_user_model().objects.create(username="john")
user.set_password("<PASSWORD>")
user.save()
resp = self.client.post(
reverse("account_login"),
{"login": "john", "password": "<PASSWORD>"},
follow=True,
)
# The user is redirected to the 2FA setup page.
self.assertRedirects(
resp, reverse("two-factor-setup"), fetch_redirect_response=False
)
@override_settings(
INSTALLED_APPS=settings.INSTALLED_APPS + ("django.contrib.messages",),
MIDDLEWARE=settings.MIDDLEWARE
+ (
"tests.test_allauth_2f2a.Require2FAWithMessage",
"django.contrib.messages.middleware.MessageMiddleware",
),
)
def test_with_2fa_messages(self):
"""Should redirect to 2fa setup."""
user = get_user_model().objects.create(username="john")
user.set_password("<PASSWORD>")
user.save()
response = self.client.post(
reverse("account_login"),
{"login": "john", "password": "<PASSWORD>"},
follow=True,
)
# The user is redirected to the 2FA setup page.
self.assertRedirects(
response,
reverse("two-factor-setup"),
fetch_redirect_response=False,
)
class TestQRCodeGeneration(TestCase):
"""Tests for QR code generation via file or data: protocol."""
def tearDown(self):
"""Reset settings to default."""
setattr(app_settings, "QRCODE_TYPE", "data")
def test_2fa_setup_data(self):
"""Test 2FA setup using 'data:' protocol."""
user = get_user_model().objects.create(username="john")
user.set_password("<PASSWORD>")
user.save()
self.client.post(reverse("account_login"), {"login": "john", "password": "<PASSWORD>"})
response = self.client.get(reverse("two-factor-setup"))
# Should have the data: URI.
self.assertRegex(
response.content.decode(),
r"data:image\/svg\+xml;base64,",
)
# Should have a valid SVG image in the base64 string.
# Get the base64 string.
svg_match = re.search(
r"\"data:image\/svg\+xml;base64,(.*?)\"",
response.content.decode(),
)
# Assert the string is base64 encoded.
self.assertEqual(
svg_match.group(1),
base64.b64encode(base64.b64decode(svg_match.group(1))).decode(),
)
# Assert the string is a valid SVG image. Well, SVGish at least.
self.assertRegex(
base64.b64decode(svg_match.group(1)).decode(),
r"<svg.*?>",
)
@patchfs
def test_2fa_setup_file(self, fs):
"""Test 2FA setup using an SVG file."""
# Create the fake qrcodes directory.
fs.create_dir("qrcodes")
user = get_user_model().objects.create(username="john")
user.set_password("<PASSWORD>")
user.save()
setattr(app_settings, "QRCODE_TYPE", "file")
self.client.post(reverse("account_login"), {"login": "john", "password": "<PASSWORD>"})
response = self.client.get(reverse("two-factor-setup"))
self.assertRegex(
response.content.decode(),
r"qrcodes\/[a-f0-9]{32}\.svg",
)
def test_2fa_setup_file_no_dir(self):
"""Test 2FA setup using an SVG file without the qr code directory."""
user = get_user_model().objects.create(username="john")
user.set_password("<PASSWORD>")
user.save()
setattr(app_settings, "QRCODE_TYPE", "file")
self.client.post(
reverse("account_login"),
{
"login": "john",
"password": "<PASSWORD>",
},
)
self.assertRaises(
ImproperlyConfigured,
self.client.get,
reverse("two-factor-setup"),
)
|
[
"django_otp.oath.TOTP",
"allauth.account.signals.user_logged_in.connect",
"urllib.parse.urlencode",
"django.contrib.auth.get_user_model",
"django.urls.reverse",
"django.contrib.messages.info",
"urllib.parse.parse_qsl",
"django.test.override_settings",
"urllib.parse.urlparse"
] |
[((22082, 22228), 'django.test.override_settings', 'override_settings', ([], {'LOGIN_REDIRECT_URL': '"""/unnamed-view"""', 'MIDDLEWARE': "(settings.MIDDLEWARE + ('allauth_2f2a.middleware.BaseRequire2FAMiddleware',))"}), "(LOGIN_REDIRECT_URL='/unnamed-view', MIDDLEWARE=settings.\n MIDDLEWARE + ('allauth_2f2a.middleware.BaseRequire2FAMiddleware',))\n", (22099, 22228), False, 'from django.test import override_settings\n'), ((22873, 23005), 'django.test.override_settings', 'override_settings', ([], {'LOGIN_REDIRECT_URL': '"""/unnamed-view"""', 'MIDDLEWARE': "(settings.MIDDLEWARE + ('tests.test_allauth_2f2a.Require2FA',))"}), "(LOGIN_REDIRECT_URL='/unnamed-view', MIDDLEWARE=settings.\n MIDDLEWARE + ('tests.test_allauth_2f2a.Require2FA',))\n", (22890, 23005), False, 'from django.test import override_settings\n'), ((1960, 1973), 'urllib.parse.urlparse', 'urlparse', (['url'], {}), '(url)\n', (1968, 1973), False, 'from urllib.parse import urlparse\n'), ((23758, 23892), 'django.test.override_settings', 'override_settings', ([], {'LOGIN_REDIRECT_URL': '"""/unnamed-view"""', 'MIDDLEWARE': "(settings.MIDDLEWARE + ('tests.test_allauth_2f2a.NoRequire2FA',))"}), "(LOGIN_REDIRECT_URL='/unnamed-view', MIDDLEWARE=settings.\n MIDDLEWARE + ('tests.test_allauth_2f2a.NoRequire2FA',))\n", (23775, 23892), False, 'from django.test import override_settings\n'), ((24599, 24749), 'django.test.override_settings', 'override_settings', ([], {'LOGIN_REDIRECT_URL': '"""/unnamed-view"""', 'MIDDLEWARE': "(settings.MIDDLEWARE + (\n 'tests.test_allauth_2f2a.Require2FANonexistentAllowed',))"}), "(LOGIN_REDIRECT_URL='/unnamed-view', MIDDLEWARE=settings.\n MIDDLEWARE + ('tests.test_allauth_2f2a.Require2FANonexistentAllowed',))\n", (24616, 24749), False, 'from django.test import override_settings\n'), ((27791, 28022), 'django.test.override_settings', 'override_settings', ([], {'INSTALLED_APPS': "(settings.INSTALLED_APPS + ('django.contrib.messages',))", 'MIDDLEWARE': "(settings.MIDDLEWARE + ('tests.test_allauth_2f2a.Require2FA',\n 'django.contrib.messages.middleware.MessageMiddleware'))"}), "(INSTALLED_APPS=settings.INSTALLED_APPS + (\n 'django.contrib.messages',), MIDDLEWARE=settings.MIDDLEWARE + (\n 'tests.test_allauth_2f2a.Require2FA',\n 'django.contrib.messages.middleware.MessageMiddleware'))\n", (27808, 28022), False, 'from django.test import override_settings\n'), ((28731, 28973), 'django.test.override_settings', 'override_settings', ([], {'INSTALLED_APPS': "(settings.INSTALLED_APPS + ('django.contrib.messages',))", 'MIDDLEWARE': "(settings.MIDDLEWARE + ('tests.test_allauth_2f2a.Require2FAWithMessage',\n 'django.contrib.messages.middleware.MessageMiddleware'))"}), "(INSTALLED_APPS=settings.INSTALLED_APPS + (\n 'django.contrib.messages',), MIDDLEWARE=settings.MIDDLEWARE + (\n 'tests.test_allauth_2f2a.Require2FAWithMessage',\n 'django.contrib.messages.middleware.MessageMiddleware'))\n", (28748, 28973), False, 'from django.test import override_settings\n'), ((1999, 2015), 'urllib.parse.parse_qsl', 'parse_qsl', (['query'], {}), '(query)\n', (2008, 2015), False, 'from urllib.parse import parse_qsl\n'), ((2359, 2403), 'allauth.account.signals.user_logged_in.connect', 'user_logged_in.connect', (['self._login_callback'], {}), '(self._login_callback)\n', (2381, 2403), False, 'from allauth.account.signals import user_logged_in\n'), ((4166, 4241), 'django_otp.oath.TOTP', 'TOTP', (['totp_model.bin_key', 'totp_model.step', 'totp_model.t0', 'totp_model.digits'], {}), '(totp_model.bin_key, totp_model.step, totp_model.t0, totp_model.digits)\n', (4170, 4241), False, 'from django_otp.oath import TOTP\n'), ((5366, 5425), 'django_otp.oath.TOTP', 'TOTP', (['device.bin_key', 'device.step', 'device.t0', 'device.digits'], {}), '(device.bin_key, device.step, device.t0, device.digits)\n', (5370, 5425), False, 'from django_otp.oath import TOTP\n'), ((6442, 6501), 'django_otp.oath.TOTP', 'TOTP', (['device.bin_key', 'device.step', 'device.t0', 'device.digits'], {}), '(device.bin_key, device.step, device.t0, device.digits)\n', (6446, 6501), False, 'from django_otp.oath import TOTP\n'), ((7830, 7889), 'django_otp.oath.TOTP', 'TOTP', (['device.bin_key', 'device.step', 'device.t0', 'device.digits'], {}), '(device.bin_key, device.step, device.t0, device.digits)\n', (7834, 7889), False, 'from django_otp.oath import TOTP\n'), ((10175, 10234), 'django_otp.oath.TOTP', 'TOTP', (['device.bin_key', 'device.step', 'device.t0', 'device.digits'], {}), '(device.bin_key, device.step, device.t0, device.digits)\n', (10179, 10234), False, 'from django_otp.oath import TOTP\n'), ((11875, 11950), 'django_otp.oath.TOTP', 'TOTP', (['totp_model.bin_key', 'totp_model.step', 'totp_model.t0', 'totp_model.digits'], {}), '(totp_model.bin_key, totp_model.step, totp_model.t0, totp_model.digits)\n', (11879, 11950), False, 'from django_otp.oath import TOTP\n'), ((17278, 17312), 'django.urls.reverse', 'reverse', (['"""two-factor-authenticate"""'], {}), "('two-factor-authenticate')\n", (17285, 17312), False, 'from django.urls import reverse\n'), ((19656, 19731), 'django_otp.oath.TOTP', 'TOTP', (['totp_model.bin_key', 'totp_model.step', 'totp_model.t0', 'totp_model.digits'], {}), '(totp_model.bin_key, totp_model.step, totp_model.t0, totp_model.digits)\n', (19660, 19731), False, 'from django_otp.oath import TOTP\n'), ((21946, 22011), 'django.contrib.messages.info', 'messages.info', (['request', '"""2fa required"""'], {'extra_tags': '"""2fa_required"""'}), "(request, '2fa required', extra_tags='2fa_required')\n", (21959, 22011), False, 'from django.contrib import messages\n'), ((25915, 25990), 'django_otp.oath.TOTP', 'TOTP', (['totp_model.bin_key', 'totp_model.step', 'totp_model.t0', 'totp_model.digits'], {}), '(totp_model.bin_key, totp_model.step, totp_model.t0, totp_model.digits)\n', (25919, 25990), False, 'from django_otp.oath import TOTP\n'), ((27055, 27114), 'django_otp.oath.TOTP', 'TOTP', (['device.bin_key', 'device.step', 'device.t0', 'device.digits'], {}), '(device.bin_key, device.step, device.t0, device.digits)\n', (27059, 27114), False, 'from django_otp.oath import TOTP\n'), ((2141, 2163), 'urllib.parse.urlencode', 'urlencode', (['query_parts'], {}), '(query_parts)\n', (2150, 2163), False, 'from urllib.parse import urlencode\n'), ((3209, 3233), 'django.urls.reverse', 'reverse', (['"""account_login"""'], {}), "('account_login')\n", (3216, 3233), False, 'from django.urls import reverse\n'), ((3855, 3879), 'django.urls.reverse', 'reverse', (['"""account_login"""'], {}), "('account_login')\n", (3862, 3879), False, 'from django.urls import reverse\n'), ((4008, 4042), 'django.urls.reverse', 'reverse', (['"""two-factor-authenticate"""'], {}), "('two-factor-authenticate')\n", (4015, 4042), False, 'from django.urls import reverse\n'), ((4346, 4380), 'django.urls.reverse', 'reverse', (['"""two-factor-authenticate"""'], {}), "('two-factor-authenticate')\n", (4353, 4380), False, 'from django.urls import reverse\n'), ((4996, 5020), 'django.urls.reverse', 'reverse', (['"""account_login"""'], {}), "('account_login')\n", (5003, 5020), False, 'from django.urls import reverse\n'), ((5168, 5195), 'django.urls.reverse', 'reverse', (['"""two-factor-setup"""'], {}), "('two-factor-setup')\n", (5175, 5195), False, 'from django.urls import reverse\n'), ((5579, 5606), 'django.urls.reverse', 'reverse', (['"""two-factor-setup"""'], {}), "('two-factor-setup')\n", (5586, 5606), False, 'from django.urls import reverse\n'), ((5751, 5779), 'django.urls.reverse', 'reverse', (['"""two-factor-backup"""'], {}), "('two-factor-backup')\n", (5758, 5779), False, 'from django.urls import reverse\n'), ((6083, 6107), 'django.urls.reverse', 'reverse', (['"""account_login"""'], {}), "('account_login')\n", (6090, 6107), False, 'from django.urls import reverse\n'), ((6244, 6271), 'django.urls.reverse', 'reverse', (['"""two-factor-setup"""'], {}), "('two-factor-setup')\n", (6251, 6271), False, 'from django.urls import reverse\n'), ((6655, 6682), 'django.urls.reverse', 'reverse', (['"""two-factor-setup"""'], {}), "('two-factor-setup')\n", (6662, 6682), False, 'from django.urls import reverse\n'), ((6827, 6855), 'django.urls.reverse', 'reverse', (['"""two-factor-backup"""'], {}), "('two-factor-backup')\n", (6834, 6855), False, 'from django.urls import reverse\n'), ((6941, 6968), 'django.urls.reverse', 'reverse', (['"""two-factor-setup"""'], {}), "('two-factor-setup')\n", (6948, 6968), False, 'from django.urls import reverse\n'), ((7133, 7161), 'django.urls.reverse', 'reverse', (['"""two-factor-backup"""'], {}), "('two-factor-backup')\n", (7140, 7161), False, 'from django.urls import reverse\n'), ((7471, 7495), 'django.urls.reverse', 'reverse', (['"""account_login"""'], {}), "('account_login')\n", (7478, 7495), False, 'from django.urls import reverse\n'), ((7632, 7659), 'django.urls.reverse', 'reverse', (['"""two-factor-setup"""'], {}), "('two-factor-setup')\n", (7639, 7659), False, 'from django.urls import reverse\n'), ((8043, 8070), 'django.urls.reverse', 'reverse', (['"""two-factor-setup"""'], {}), "('two-factor-setup')\n", (8050, 8070), False, 'from django.urls import reverse\n'), ((8215, 8243), 'django.urls.reverse', 'reverse', (['"""two-factor-backup"""'], {}), "('two-factor-backup')\n", (8222, 8243), False, 'from django.urls import reverse\n'), ((8340, 8368), 'django.urls.reverse', 'reverse', (['"""two-factor-backup"""'], {}), "('two-factor-backup')\n", (8347, 8368), False, 'from django.urls import reverse\n'), ((8804, 8828), 'django.urls.reverse', 'reverse', (['"""account_login"""'], {}), "('account_login')\n", (8811, 8828), False, 'from django.urls import reverse\n'), ((8965, 8992), 'django.urls.reverse', 'reverse', (['"""two-factor-setup"""'], {}), "('two-factor-setup')\n", (8972, 8992), False, 'from django.urls import reverse\n'), ((9098, 9125), 'django.urls.reverse', 'reverse', (['"""two-factor-setup"""'], {}), "('two-factor-setup')\n", (9105, 9125), False, 'from django.urls import reverse\n'), ((9805, 9829), 'django.urls.reverse', 'reverse', (['"""account_login"""'], {}), "('account_login')\n", (9812, 9829), False, 'from django.urls import reverse\n'), ((9977, 10004), 'django.urls.reverse', 'reverse', (['"""two-factor-setup"""'], {}), "('two-factor-setup')\n", (9984, 10004), False, 'from django.urls import reverse\n'), ((10388, 10415), 'django.urls.reverse', 'reverse', (['"""two-factor-setup"""'], {}), "('two-factor-setup')\n", (10395, 10415), False, 'from django.urls import reverse\n'), ((10617, 10645), 'django.urls.reverse', 'reverse', (['"""two-factor-backup"""'], {}), "('two-factor-backup')\n", (10624, 10645), False, 'from django.urls import reverse\n'), ((10748, 10776), 'django.urls.reverse', 'reverse', (['"""two-factor-remove"""'], {}), "('two-factor-remove')\n", (10755, 10776), False, 'from django.urls import reverse\n'), ((10893, 10920), 'django.urls.reverse', 'reverse', (['"""two-factor-setup"""'], {}), "('two-factor-setup')\n", (10900, 10920), False, 'from django.urls import reverse\n'), ((11564, 11588), 'django.urls.reverse', 'reverse', (['"""account_login"""'], {}), "('account_login')\n", (11571, 11588), False, 'from django.urls import reverse\n'), ((11717, 11751), 'django.urls.reverse', 'reverse', (['"""two-factor-authenticate"""'], {}), "('two-factor-authenticate')\n", (11724, 11751), False, 'from django.urls import reverse\n'), ((12055, 12089), 'django.urls.reverse', 'reverse', (['"""two-factor-authenticate"""'], {}), "('two-factor-authenticate')\n", (12062, 12089), False, 'from django.urls import reverse\n'), ((12703, 12727), 'django.urls.reverse', 'reverse', (['"""account_login"""'], {}), "('account_login')\n", (12710, 12727), False, 'from django.urls import reverse\n'), ((12856, 12890), 'django.urls.reverse', 'reverse', (['"""two-factor-authenticate"""'], {}), "('two-factor-authenticate')\n", (12863, 12890), False, 'from django.urls import reverse\n'), ((13057, 13091), 'django.urls.reverse', 'reverse', (['"""two-factor-authenticate"""'], {}), "('two-factor-authenticate')\n", (13064, 13091), False, 'from django.urls import reverse\n'), ((13458, 13492), 'django.urls.reverse', 'reverse', (['"""two-factor-authenticate"""'], {}), "('two-factor-authenticate')\n", (13465, 13492), False, 'from django.urls import reverse\n'), ((13554, 13578), 'django.urls.reverse', 'reverse', (['"""account_login"""'], {}), "('account_login')\n", (13561, 13578), False, 'from django.urls import reverse\n'), ((13700, 13724), 'django.urls.reverse', 'reverse', (['"""account_login"""'], {}), "('account_login')\n", (13707, 13724), False, 'from django.urls import reverse\n'), ((13825, 13859), 'django.urls.reverse', 'reverse', (['"""two-factor-authenticate"""'], {}), "('two-factor-authenticate')\n", (13832, 13859), False, 'from django.urls import reverse\n'), ((13921, 13945), 'django.urls.reverse', 'reverse', (['"""account_login"""'], {}), "('account_login')\n", (13928, 13945), False, 'from django.urls import reverse\n'), ((14303, 14327), 'django.urls.reverse', 'reverse', (['"""account_login"""'], {}), "('account_login')\n", (14310, 14327), False, 'from django.urls import reverse\n'), ((14431, 14465), 'django.urls.reverse', 'reverse', (['"""two-factor-authenticate"""'], {}), "('two-factor-authenticate')\n", (14438, 14465), False, 'from django.urls import reverse\n'), ((14688, 14712), 'django.urls.reverse', 'reverse', (['"""account_login"""'], {}), "('account_login')\n", (14695, 14712), False, 'from django.urls import reverse\n'), ((14975, 15009), 'django.urls.reverse', 'reverse', (['"""two-factor-authenticate"""'], {}), "('two-factor-authenticate')\n", (14982, 15009), False, 'from django.urls import reverse\n'), ((15060, 15084), 'django.urls.reverse', 'reverse', (['"""account_login"""'], {}), "('account_login')\n", (15067, 15084), False, 'from django.urls import reverse\n'), ((17397, 17421), 'django.urls.reverse', 'reverse', (['"""account_login"""'], {}), "('account_login')\n", (17404, 17421), False, 'from django.urls import reverse\n'), ((17689, 17701), 'django.urls.reverse', 'reverse', (['url'], {}), '(url)\n', (17696, 17701), False, 'from django.urls import reverse\n'), ((18215, 18239), 'django.urls.reverse', 'reverse', (['"""account_login"""'], {}), "('account_login')\n", (18222, 18239), False, 'from django.urls import reverse\n'), ((18343, 18377), 'django.urls.reverse', 'reverse', (['"""two-factor-authenticate"""'], {}), "('two-factor-authenticate')\n", (18350, 18377), False, 'from django.urls import reverse\n'), ((18895, 18929), 'django.urls.reverse', 'reverse', (['"""two-factor-authenticate"""'], {}), "('two-factor-authenticate')\n", (18902, 18929), False, 'from django.urls import reverse\n'), ((18980, 19004), 'django.urls.reverse', 'reverse', (['"""account_login"""'], {}), "('account_login')\n", (18987, 19004), False, 'from django.urls import reverse\n'), ((19345, 19369), 'django.urls.reverse', 'reverse', (['"""account_login"""'], {}), "('account_login')\n", (19352, 19369), False, 'from django.urls import reverse\n'), ((19498, 19532), 'django.urls.reverse', 'reverse', (['"""two-factor-authenticate"""'], {}), "('two-factor-authenticate')\n", (19505, 19532), False, 'from django.urls import reverse\n'), ((20512, 20536), 'django.urls.reverse', 'reverse', (['"""account_login"""'], {}), "('account_login')\n", (20519, 20536), False, 'from django.urls import reverse\n'), ((22752, 22776), 'django.urls.reverse', 'reverse', (['"""account_login"""'], {}), "('account_login')\n", (22759, 22776), False, 'from django.urls import reverse\n'), ((23435, 23459), 'django.urls.reverse', 'reverse', (['"""account_login"""'], {}), "('account_login')\n", (23442, 23459), False, 'from django.urls import reverse\n'), ((23670, 23697), 'django.urls.reverse', 'reverse', (['"""two-factor-setup"""'], {}), "('two-factor-setup')\n", (23677, 23697), False, 'from django.urls import reverse\n'), ((24272, 24296), 'django.urls.reverse', 'reverse', (['"""account_login"""'], {}), "('account_login')\n", (24279, 24296), False, 'from django.urls import reverse\n'), ((25604, 25628), 'django.urls.reverse', 'reverse', (['"""account_login"""'], {}), "('account_login')\n", (25611, 25628), False, 'from django.urls import reverse\n'), ((25757, 25791), 'django.urls.reverse', 'reverse', (['"""two-factor-authenticate"""'], {}), "('two-factor-authenticate')\n", (25764, 25791), False, 'from django.urls import reverse\n'), ((26095, 26129), 'django.urls.reverse', 'reverse', (['"""two-factor-authenticate"""'], {}), "('two-factor-authenticate')\n", (26102, 26129), False, 'from django.urls import reverse\n'), ((26685, 26709), 'django.urls.reverse', 'reverse', (['"""account_login"""'], {}), "('account_login')\n", (26692, 26709), False, 'from django.urls import reverse\n'), ((26857, 26884), 'django.urls.reverse', 'reverse', (['"""two-factor-setup"""'], {}), "('two-factor-setup')\n", (26864, 26884), False, 'from django.urls import reverse\n'), ((27268, 27295), 'django.urls.reverse', 'reverse', (['"""two-factor-setup"""'], {}), "('two-factor-setup')\n", (27275, 27295), False, 'from django.urls import reverse\n'), ((27497, 27525), 'django.urls.reverse', 'reverse', (['"""two-factor-backup"""'], {}), "('two-factor-backup')\n", (27504, 27525), False, 'from django.urls import reverse\n'), ((28433, 28457), 'django.urls.reverse', 'reverse', (['"""account_login"""'], {}), "('account_login')\n", (28440, 28457), False, 'from django.urls import reverse\n'), ((28656, 28683), 'django.urls.reverse', 'reverse', (['"""two-factor-setup"""'], {}), "('two-factor-setup')\n", (28663, 28683), False, 'from django.urls import reverse\n'), ((29282, 29306), 'django.urls.reverse', 'reverse', (['"""account_login"""'], {}), "('account_login')\n", (29289, 29306), False, 'from django.urls import reverse\n'), ((29521, 29548), 'django.urls.reverse', 'reverse', (['"""two-factor-setup"""'], {}), "('two-factor-setup')\n", (29528, 29548), False, 'from django.urls import reverse\n'), ((30067, 30091), 'django.urls.reverse', 'reverse', (['"""account_login"""'], {}), "('account_login')\n", (30074, 30091), False, 'from django.urls import reverse\n'), ((30173, 30200), 'django.urls.reverse', 'reverse', (['"""two-factor-setup"""'], {}), "('two-factor-setup')\n", (30180, 30200), False, 'from django.urls import reverse\n'), ((31358, 31382), 'django.urls.reverse', 'reverse', (['"""account_login"""'], {}), "('account_login')\n", (31365, 31382), False, 'from django.urls import reverse\n'), ((31464, 31491), 'django.urls.reverse', 'reverse', (['"""two-factor-setup"""'], {}), "('two-factor-setup')\n", (31471, 31491), False, 'from django.urls import reverse\n'), ((31948, 31972), 'django.urls.reverse', 'reverse', (['"""account_login"""'], {}), "('account_login')\n", (31955, 31972), False, 'from django.urls import reverse\n'), ((32191, 32218), 'django.urls.reverse', 'reverse', (['"""two-factor-setup"""'], {}), "('two-factor-setup')\n", (32198, 32218), False, 'from django.urls import reverse\n'), ((15490, 15514), 'django.urls.reverse', 'reverse', (['"""account_login"""'], {}), "('account_login')\n", (15497, 15514), False, 'from django.urls import reverse\n'), ((16463, 16487), 'django.urls.reverse', 'reverse', (['"""account_login"""'], {}), "('account_login')\n", (16470, 16487), False, 'from django.urls import reverse\n'), ((19859, 19893), 'django.urls.reverse', 'reverse', (['"""two-factor-authenticate"""'], {}), "('two-factor-authenticate')\n", (19866, 19893), False, 'from django.urls import reverse\n'), ((20750, 20767), 'django.urls.reverse', 'reverse', (['url_name'], {}), '(url_name)\n', (20757, 20767), False, 'from django.urls import reverse\n'), ((20841, 20868), 'django.urls.reverse', 'reverse', (['"""two-factor-setup"""'], {}), "('two-factor-setup')\n", (20848, 20868), False, 'from django.urls import reverse\n'), ((25178, 25202), 'django.urls.reverse', 'reverse', (['"""account_login"""'], {}), "('account_login')\n", (25185, 25202), False, 'from django.urls import reverse\n'), ((3054, 3070), 'django.contrib.auth.get_user_model', 'get_user_model', ([], {}), '()\n', (3068, 3070), False, 'from django.contrib.auth import get_user_model\n'), ((3650, 3666), 'django.contrib.auth.get_user_model', 'get_user_model', ([], {}), '()\n', (3664, 3666), False, 'from django.contrib.auth import get_user_model\n'), ((4820, 4836), 'django.contrib.auth.get_user_model', 'get_user_model', ([], {}), '()\n', (4834, 4836), False, 'from django.contrib.auth import get_user_model\n'), ((5918, 5934), 'django.contrib.auth.get_user_model', 'get_user_model', ([], {}), '()\n', (5932, 5934), False, 'from django.contrib.auth import get_user_model\n'), ((7306, 7322), 'django.contrib.auth.get_user_model', 'get_user_model', ([], {}), '()\n', (7320, 7322), False, 'from django.contrib.auth import get_user_model\n'), ((8639, 8655), 'django.contrib.auth.get_user_model', 'get_user_model', ([], {}), '()\n', (8653, 8655), False, 'from django.contrib.auth import get_user_model\n'), ((9629, 9645), 'django.contrib.auth.get_user_model', 'get_user_model', ([], {}), '()\n', (9643, 9645), False, 'from django.contrib.auth import get_user_model\n'), ((11359, 11375), 'django.contrib.auth.get_user_model', 'get_user_model', ([], {}), '()\n', (11373, 11375), False, 'from django.contrib.auth import get_user_model\n'), ((12511, 12527), 'django.contrib.auth.get_user_model', 'get_user_model', ([], {}), '()\n', (12525, 12527), False, 'from django.contrib.auth import get_user_model\n'), ((13292, 13308), 'django.contrib.auth.get_user_model', 'get_user_model', ([], {}), '()\n', (13306, 13308), False, 'from django.contrib.auth import get_user_model\n'), ((14111, 14127), 'django.contrib.auth.get_user_model', 'get_user_model', ([], {}), '()\n', (14125, 14127), False, 'from django.contrib.auth import get_user_model\n'), ((15260, 15276), 'django.contrib.auth.get_user_model', 'get_user_model', ([], {}), '()\n', (15274, 15276), False, 'from django.contrib.auth import get_user_model\n'), ((15947, 15981), 'django.urls.reverse', 'reverse', (['"""two-factor-authenticate"""'], {}), "('two-factor-authenticate')\n", (15954, 15981), False, 'from django.urls import reverse\n'), ((16233, 16249), 'django.contrib.auth.get_user_model', 'get_user_model', ([], {}), '()\n', (16247, 16249), False, 'from django.contrib.auth import get_user_model\n'), ((16970, 17004), 'django.urls.reverse', 'reverse', (['"""two-factor-authenticate"""'], {}), "('two-factor-authenticate')\n", (16977, 17004), False, 'from django.urls import reverse\n'), ((18023, 18039), 'django.contrib.auth.get_user_model', 'get_user_model', ([], {}), '()\n', (18037, 18039), False, 'from django.contrib.auth import get_user_model\n'), ((19140, 19156), 'django.contrib.auth.get_user_model', 'get_user_model', ([], {}), '()\n', (19154, 19156), False, 'from django.contrib.auth import get_user_model\n'), ((20340, 20356), 'django.contrib.auth.get_user_model', 'get_user_model', ([], {}), '()\n', (20354, 20356), False, 'from django.contrib.auth import get_user_model\n'), ((22540, 22556), 'django.contrib.auth.get_user_model', 'get_user_model', ([], {}), '()\n', (22554, 22556), False, 'from django.contrib.auth import get_user_model\n'), ((23280, 23296), 'django.contrib.auth.get_user_model', 'get_user_model', ([], {}), '()\n', (23294, 23296), False, 'from django.contrib.auth import get_user_model\n'), ((24117, 24133), 'django.contrib.auth.get_user_model', 'get_user_model', ([], {}), '()\n', (24131, 24133), False, 'from django.contrib.auth import get_user_model\n'), ((24978, 24994), 'django.contrib.auth.get_user_model', 'get_user_model', ([], {}), '()\n', (24992, 24994), False, 'from django.contrib.auth import get_user_model\n'), ((25399, 25415), 'django.contrib.auth.get_user_model', 'get_user_model', ([], {}), '()\n', (25413, 25415), False, 'from django.contrib.auth import get_user_model\n'), ((26509, 26525), 'django.contrib.auth.get_user_model', 'get_user_model', ([], {}), '()\n', (26523, 26525), False, 'from django.contrib.auth import get_user_model\n'), ((28278, 28294), 'django.contrib.auth.get_user_model', 'get_user_model', ([], {}), '()\n', (28292, 28294), False, 'from django.contrib.auth import get_user_model\n'), ((29123, 29139), 'django.contrib.auth.get_user_model', 'get_user_model', ([], {}), '()\n', (29137, 29139), False, 'from django.contrib.auth import get_user_model\n'), ((29933, 29949), 'django.contrib.auth.get_user_model', 'get_user_model', ([], {}), '()\n', (29947, 29949), False, 'from django.contrib.auth import get_user_model\n'), ((31171, 31187), 'django.contrib.auth.get_user_model', 'get_user_model', ([], {}), '()\n', (31185, 31187), False, 'from django.contrib.auth import get_user_model\n'), ((31748, 31764), 'django.contrib.auth.get_user_model', 'get_user_model', ([], {}), '()\n', (31762, 31764), False, 'from django.contrib.auth import get_user_model\n'), ((17814, 17838), 'django.urls.reverse', 'reverse', (['"""account_login"""'], {}), "('account_login')\n", (17821, 17838), False, 'from django.urls import reverse\n')]
|
import cPickle as pickle
from ram.classes.module import UnitService
from ram.classes.module import UseFilename
from ram.classes import DumbResults
import ram.process
from ram.osutils import setenv
class __api__(UnitService):
"""runs dialogs to interact with user
To run dialogs for the unit:
$ ram input <namepath> [<param>] ...
"""
_wrapper = UseFilename('input', required=True)
_results = DumbResults
def __call__(self, ctx, *args, **kwargs):
setenv('RAMARGS', pickle.dumps(args))
if ctx.filename:
ram.process.invoke(ctx.filename, *args, environ=ctx._environ())
|
[
"cPickle.dumps",
"ram.classes.module.UseFilename"
] |
[((371, 406), 'ram.classes.module.UseFilename', 'UseFilename', (['"""input"""'], {'required': '(True)'}), "('input', required=True)\n", (382, 406), False, 'from ram.classes.module import UseFilename\n'), ((508, 526), 'cPickle.dumps', 'pickle.dumps', (['args'], {}), '(args)\n', (520, 526), True, 'import cPickle as pickle\n')]
|
"""
.. module:: Multi
:platform: Unix, Windows
:synopsis: Provides container classes for spline geoemtries
.. moduleauthor:: <NAME> <<EMAIL>>
"""
import abc
import warnings
from functools import partial
from multiprocessing import Value, Lock
from . import abstract
from . import vis
from . import voxelize
from . import utilities
from . import tessellate
from . import _utilities as utl
from .exceptions import GeomdlException
@utl.add_metaclass(abc.ABCMeta)
class AbstractContainer(abstract.GeomdlBase):
""" Abstract class for geometry containers.
This class implements Python Iterator Protocol and therefore any instance of this class can be directly used in
a for loop.
This class provides the following properties:
* :py:attr:`type` = container
* :py:attr:`id`
* :py:attr:`name`
* :py:attr:`dimension`
* :py:attr:`opt`
* :py:attr:`pdimension`
* :py:attr:`evalpts`
* :py:attr:`bbox`
* :py:attr:`vis`
* :py:attr:`delta`
* :py:attr:`sample_size`
"""
def __init__(self, *args, **kwargs):
self._pdim = 0 if not hasattr(self, '_pdim') else self._pdim # number of parametric dimensions
self._dinit = 0.01 if not hasattr(self, '_dinit') else self._dinit # delta initialization value
super(AbstractContainer, self).__init__(**kwargs)
self._geometry_type = "container"
self._name = self._geometry_type
self._delta = [float(self._dinit) for _ in range(self._pdim)] # evaluation delta
self._elements = [] # list of elements contained
self._vis_component = None # visualization component
self._cache['evalpts'] = []
def __iter__(self):
self._iter_index = 0
return self
def next(self):
return self.__next__()
def __next__(self):
try:
result = self._elements[self._iter_index]
except IndexError:
raise StopIteration
self._iter_index += 1
return result
def __reversed__(self):
return reversed(self._elements)
def __getitem__(self, index):
return self._elements[index]
def __len__(self):
return len(self._elements)
def __add__(self, other):
if not isinstance(other, self.__class__):
raise GeomdlException("Cannot add non-matching container types")
self.add(other)
return self
@property
def pdimension(self):
""" Parametric dimension.
Please refer to the `wiki <https://github.com/orbingol/NURBS-Python/wiki/Using-Python-Properties>`_ for details
on using this class member.
:getter: Gets the parametric dimension
:type: int
"""
return self._pdim
@property
def evalpts(self):
""" Evaluated points.
Since there are multiple geometry objects contained in the multi objects, the evaluated points will be returned in the
format of list of individual evaluated points which is also a list of Cartesian coordinates.
The following code example illustrates these details:
.. code-block:: python
:linenos:
multi_obj = multi.SurfaceContainer() # it can also be multi.CurveContainer()
# Add geometries to multi_obj via multi_obj.add() method
# Then, the following loop will print all the evaluated points of the Multi object
for idx, mpt in enumerate(multi_obj.evalpts):
print("Shape", idx+1, "contains", len(mpt), "points. These points are:")
for pt in mpt:
line = ", ".join([str(p) for p in pt])
print(line)
Please refer to the `wiki <https://github.com/orbingol/NURBS-Python/wiki/Using-Python-Properties>`_ for details
on using this class member.
:getter: Gets the evaluated points of all contained geometries
"""
if not self._cache['evalpts']:
for elem in self._elements:
elem.delta = self._delta[0] if self._pdim == 1 else self._delta
evalpts = elem.evalpts
self._cache['evalpts'] += evalpts
return self._cache['evalpts']
@property
def bbox(self):
""" Bounding box.
Please refer to the `wiki <https://github.com/orbingol/NURBS-Python/wiki/Using-Python-Properties>`_ for details
on using this class member.
:getter: Gets the bounding box of all contained geometries
"""
all_box = []
for elem in self._elements:
all_box += list(elem.bbox)
return utilities.evaluate_bounding_box(all_box)
@property
def vis(self):
""" Visualization component.
Please refer to the `wiki <https://github.com/orbingol/NURBS-Python/wiki/Using-Python-Properties>`_ for details
on using this class member.
:getter: Gets the visualization component
:setter: Sets the visualization component
"""
return self._vis_component
@vis.setter
def vis(self, value):
if not isinstance(value, vis.VisAbstract):
warnings.warn("Visualization component is NOT an instance of the vis.VisAbstract class")
return
self._vis_component = value
@property
def delta(self):
""" Evaluation delta (for all parametric directions).
Evaluation delta corresponds to the *step size*. Decreasing the step size results in evaluation of more points.
Therefore; smaller the delta value, smoother the shape.
The following figure illustrates the working principles of the delta property:
.. math::
\\left[{{u_{start}},{u_{start}} + \\delta ,({u_{start}} + \\delta ) + \\delta , \\ldots ,{u_{end}}} \\right]
Please refer to the `wiki <https://github.com/orbingol/NURBS-Python/wiki/Using-Python-Properties>`_ for details
on using this class member.
:getter: Gets the delta value
:setter: Sets the delta value
"""
return self._delta[0] if self._pdim == 1 else self._delta
@delta.setter
def delta(self, value):
if self._pdim == 1 and isinstance(value, (int, float)):
delta_vals = [value]
else:
if isinstance(value, (list, tuple)):
if len(value) != self._pdim:
raise ValueError("The input must be a list of a tuple with a length of " + str(self._pdim))
delta_vals = value
elif isinstance(value, (int, float)):
delta_vals = [value for _ in range(self._pdim)]
else:
raise TypeError("Unsupported input type for evaluation delta. Use float, list or tuple")
# Set delta values
for idx, dval in enumerate(delta_vals):
self._delta_setter_common(idx, dval)
# Reset the cache
self.reset()
def _delta_setter_common(self, idx, value):
# Check and set the delta value corresponding to the idx-th parametric dimension
if float(value) <= 0 or float(value) >= 1:
raise ValueError("Evaluation delta should be between 0.0 and 1.0. You are trying to set it to " + str(value)
+ " for the " + str(idx + 1) + "st parametric dimension.")
self._delta[idx] = float(value)
@property
def sample_size(self):
""" Sample size (for all parametric directions).
Sample size defines the number of points to evaluate. It also sets the ``delta`` property.
The following figure illustrates the working principles of sample size property:
.. math::
\\underbrace {\\left[ {{u_{start}}, \\ldots ,{u_{end}}} \\right]}_{{n_{sample}}}
Please refer to the `wiki <https://github.com/orbingol/NURBS-Python/wiki/Using-Python-Properties>`_ for details
on using this class member.
:getter: Gets sample size
:setter: Sets sample size
"""
ssz = [self._sample_size_getter_common(idx) for idx in range(self._pdim)]
return ssz[0] if self._pdim == 1 else ssz
@sample_size.setter
def sample_size(self, value):
if self._pdim == 1 and isinstance(value, (int, float)):
ssz = [value]
else:
if isinstance(value, (list, tuple)):
if len(value) != self._pdim:
raise ValueError("The input must be a list of a tuple with a length of " + str(self._pdim))
ssz = value
elif isinstance(value, (int, float)):
ssz = [value for _ in range(self._pdim)]
else:
raise TypeError("Unsupported input type for sample size. Use float, list or tuple")
# Set sample size
for idx, sval in enumerate(ssz):
self._sample_size_setter_common(idx, sval)
# Reset the cache
self.reset()
def _sample_size_getter_common(self, idx):
return int(1 / self._delta[idx]) + 1
def _sample_size_setter_common(self, idx, value):
# Check and set the delta value corresponding to the idx-th parametric dimension
if not isinstance(value, int):
raise GeomdlException("Sample size must be an integer value bigger than 2")
if value < 2:
raise GeomdlException("Sample size must be an integer value bigger than 2")
self._delta[idx] = 1.0 / float(value - 1)
@property
def data(self):
""" Returns a dict which contains the geometry data.
Please refer to the `wiki <https://github.com/orbingol/NURBS-Python/wiki/Using-Python-Properties>`_ for details
on using this class member.
"""
return tuple([e.data for e in self._elements])
def add(self, element):
""" Adds geometry objects to the container.
The input can be a single geometry, a list of geometry objects or a geometry container object.
:param element: geometry object
"""
if isinstance(element, (self.__class__, list, tuple)):
for elem in element:
self.add(elem)
elif hasattr(self, '_pdim'):
if element.pdimension == self.pdimension:
if self.dimension == 0:
self._dimension = element.dimension
else:
if self.dimension != element.dimension:
raise GeomdlException("The spatial dimensions of the container and the input must be the same")
self._elements.append(element)
else:
raise GeomdlException("Cannot add the element to the container")
# Reset the cache
self.reset()
# Make container look like a list
append = add
def reset(self):
""" Resets the cache. """
self._cache['evalpts'][:] = []
# Runs visualization component to render the surface
@abc.abstractmethod
def render(self, **kwargs):
""" Renders plots using the visualization component.
.. note::
This is an abstract method and it must be implemented in the subclass.
"""
pass
@utl.export
class CurveContainer(AbstractContainer):
""" Container class for storing multiple curves.
This class implements Python Iterator Protocol and therefore any instance of this class can be directly used in
a for loop.
This class provides the following properties:
* :py:attr:`type` = container
* :py:attr:`id`
* :py:attr:`name`
* :py:attr:`dimension`
* :py:attr:`opt`
* :py:attr:`pdimension`
* :py:attr:`evalpts`
* :py:attr:`bbox`
* :py:attr:`vis`
* :py:attr:`delta`
* :py:attr:`sample_size`
The following code example illustrates the usage of the Python properties:
.. code-block:: python
# Create a multi-curve container instance
mcrv = multi.CurveContainer()
# Add single or multi curves to the multi container using mcrv.add() command
# Addition operator, e.g. mcrv1 + mcrv2, also works
# Set the evaluation delta of the multi-curve
mcrv.delta = 0.05
# Get the evaluated points
curve_points = mcrv.evalpts
"""
def __init__(self, *args, **kwargs):
self._pdim = 1 # number of parametric dimensions
self._dinit = 0.01 # evaluation delta
super(CurveContainer, self).__init__(*args, **kwargs)
for arg in args:
self.add(arg)
def render(self, **kwargs):
""" Renders the curves.
The visualization component must be set using :py:attr:`~vis` property before calling this method.
Keyword Arguments:
* ``cpcolor``: sets the color of the control points grid
* ``evalcolor``: sets the color of the surface
* ``filename``: saves the plot with the input name
* ``plot``: controls plot window visibility. *Default: True*
* ``animate``: activates animation (if supported). *Default: False*
* ``delta``: if True, the evaluation delta of the container object will be used. *Default: True*
* ``reset_names``: resets the name of the curves inside the container. *Default: False*
The ``cpcolor`` and ``evalcolor`` arguments can be a string or a list of strings corresponding to the color
values. Both arguments are processed separately, e.g. ``cpcolor`` can be a string whereas ``evalcolor`` can be
a list or a tuple, or vice versa. A single string value sets the color to the same value. List input allows
customization over the color values. If none provided, a random color will be selected.
The ``plot`` argument is useful when you would like to work on the command line without any window context.
If ``plot`` flag is False, this method saves the plot as an image file (.png file where possible) and disables
plot window popping out. If you don't provide a file name, the name of the image file will be pulled from the
configuration class.
"""
if not self._vis_component:
warnings.warn("No visualization component has set")
return
# Get the color values from keyword arguments
cpcolor = kwargs.get('cpcolor')
evalcolor = kwargs.get('evalcolor')
filename = kwargs.get('filename', None)
plot_visible = kwargs.get('plot', True)
animate_plot = kwargs.get('animate', False)
# Flag to control evaluation delta updates
update_delta = kwargs.get('delta', True)
reset_names = kwargs.get('reset_names', False)
# Check if the input list sizes are equal
if isinstance(cpcolor, (list, tuple)):
if len(cpcolor) < len(self._elements):
raise ValueError("The number of color values in 'cpcolor' (" + str(len(cpcolor)) +
") cannot be less than the number of geometries contained ("
+ str(len(self._elements)) + ")")
if isinstance(evalcolor, (list, tuple)):
if len(evalcolor) < len(self._elements):
raise ValueError("The number of color values in 'evalcolor' (" + str(len(evalcolor)) +
") cannot be less than the number of geometries contained ("
+ str(len(self._elements)) + ")")
# Run the visualization component
self._vis_component.clear()
for idx, elem in enumerate(self._elements):
if update_delta:
elem.delta = self.delta
elem.evaluate()
# Reset element name
if reset_names:
elem.name = "curve"
# Fix element name
if elem.name == "curve":
elem.name = elem.name + " " + str(idx)
# Color selection
color = select_color(cpcolor, evalcolor, idx=idx)
self._vis_component.add(ptsarr=elem.ctrlpts, name=(elem.name, "(CP)"),
color=color[0], plot_type='ctrlpts', idx=idx)
self._vis_component.add(ptsarr=elem.evalpts, name=elem.name,
color=color[1], plot_type='evalpts', idx=idx)
# Display the figures
if animate_plot:
self._vis_component.animate(fig_save_as=filename, display_plot=plot_visible)
else:
self._vis_component.render(fig_save_as=filename, display_plot=plot_visible)
@utl.export
class SurfaceContainer(AbstractContainer):
""" Container class for storing multiple surfaces.
This class implements Python Iterator Protocol and therefore any instance of this class can be directly used in
a for loop.
This class provides the following properties:
* :py:attr:`type` = container
* :py:attr:`id`
* :py:attr:`name`
* :py:attr:`dimension`
* :py:attr:`opt`
* :py:attr:`pdimension`
* :py:attr:`evalpts`
* :py:attr:`bbox`
* :py:attr:`vis`
* :py:attr:`delta`
* :py:attr:`delta_u`
* :py:attr:`delta_v`
* :py:attr:`sample_size`
* :py:attr:`sample_size_u`
* :py:attr:`sample_size_v`
* :py:attr:`tessellator`
* :py:attr:`vertices`
* :py:attr:`faces`
The following code example illustrates the usage of these Python properties:
.. code-block:: python
# Create a multi-surface container instance
msurf = multi.SurfaceContainer()
# Add single or multi surfaces to the multi container using msurf.add() command
# Addition operator, e.g. msurf1 + msurf2, also works
# Set the evaluation delta of the multi-surface
msurf.delta = 0.05
# Get the evaluated points
surface_points = msurf.evalpts
"""
def __init__(self, *args, **kwargs):
self._pdim = 2 # number of parametric dimensions
self._dinit = 0.05 # evaluation delta
super(SurfaceContainer, self).__init__(*args, **kwargs)
self._cache['vertices'] = []
self._cache['faces'] = []
for arg in args:
self.add(arg)
@property
def delta_u(self):
""" Evaluation delta for the u-direction.
Evaluation delta corresponds to the *step size*. Decreasing the step size results in evaluation of more points.
Therefore; smaller the delta, smoother the shape.
Please note that ``delta_u`` and ``sample_size_u`` properties correspond to the same variable with different
descriptions. Therefore, setting ``delta_u`` will also set ``sample_size_u``.
Please refer to the `wiki <https://github.com/orbingol/NURBS-Python/wiki/Using-Python-Properties>`_ for details
on using this class member.
:getter: Gets the delta value for the u-direction
:setter: Sets the delta value for the u-direction
:type: float
"""
return self._delta[0]
@delta_u.setter
def delta_u(self, value):
self._delta_setter_common(0, value)
@property
def delta_v(self):
""" Evaluation delta for the v-direction.
Evaluation delta corresponds to the *step size*. Decreasing the step size results in evaluation of more points.
Therefore; smaller the delta, smoother the shape.
Please note that ``delta_v`` and ``sample_size_v`` properties correspond to the same variable with different
descriptions. Therefore, setting ``delta_v`` will also set ``sample_size_v``.
Please refer to the `wiki <https://github.com/orbingol/NURBS-Python/wiki/Using-Python-Properties>`_ for details
on using this class member.
:getter: Gets the delta value for the v-direction
:setter: Sets the delta value for the v-direction
:type: float
"""
return self._delta[1]
@delta_v.setter
def delta_v(self, value):
self._delta_setter_common(1, value)
@property
def sample_size_u(self):
""" Sample size for the u-direction.
Sample size defines the number of points to evaluate. It also sets the ``delta_u`` property.
Please refer to the `wiki <https://github.com/orbingol/NURBS-Python/wiki/Using-Python-Properties>`_ for details
on using this class member.
:getter: Gets sample size for the u-direction
:setter: Sets sample size for the u-direction
:type: int
"""
return self._sample_size_getter_common(0)
@sample_size_u.setter
def sample_size_u(self, value):
self._sample_size_setter_common(0, value)
@property
def sample_size_v(self):
""" Sample size for the v-direction.
Sample size defines the number of points to evaluate. It also sets the ``delta_v`` property.
Please refer to the `wiki <https://github.com/orbingol/NURBS-Python/wiki/Using-Python-Properties>`_ for details
on using this class member.
:getter: Gets sample size for the v-direction
:setter: Sets sample size for the v-direction
:type: int
"""
return self._sample_size_getter_common(1)
@sample_size_v.setter
def sample_size_v(self, value):
self._sample_size_setter_common(1, value)
@property
def tessellator(self):
""" Tessellation component of the surfaces inside the container.
Please refer to :doc:`Tessellation <module_tessellate>` documentation for details.
.. code-block:: python
:linenos:
from geomdl import multi
from geomdl import tessellate
# Create the surface container
surf_container = multi.SurfaceContainer(surf_list)
# Set tessellator component
surf_container.tessellator = tessellate.TrimTessellate()
:getter: gets the tessellation component
:setter: sets the tessellation component
"""
tsl_comps = []
for idx in range(len(self._elements)):
tsl_comps.append(self._elements[idx].tessellator)
return tsl_comps
@tessellator.setter
def tessellator(self, value):
# Set tessellation component
for idx in range(len(self._elements)):
self._elements[idx].tessellator = value.__class__()
@property
def vertices(self):
""" Vertices generated by the tessellation operation.
If the tessellation component is set to None, the result will be an empty list.
:getter: Gets the vertices
"""
if not self._cache['vertices']:
self.tessellate()
return self._cache['vertices']
@property
def faces(self):
""" Faces (triangles, quads, etc.) generated by the tessellation operation.
If the tessellation component is set to None, the result will be an empty list.
:getter: Gets the faces
"""
if not self._cache['faces']:
self.tessellate()
return self._cache['faces']
def tessellate(self, **kwargs):
""" Tessellates the surfaces inside the container.
Keyword arguments are directly passed to the tessellation component.
The following code snippet illustrates getting the vertices and faces of the surfaces inside the container:
.. code-block:: python
:linenos:
# Tessellate the surfaces inside the container
surf_container.tessellate()
# Vertices and faces are stored inside the tessellator component
tsl = surf_container.tessellator
# Loop through all tessellator components
for t in tsl:
# Get the vertices
vertices = t.tessellator.vertices
# Get the faces (triangles, quads, etc.)
faces = t.tessellator.faces
Keyword Arguments:
* ``num_procs``: number of concurrent processes for tessellating the surfaces. *Default: 1*
* ``delta``: if True, the evaluation delta of the container object will be used. *Default: True*
* ``force``: flag to force tessellation. *Default: False*
"""
# Keyword arguments
force_tsl = kwargs.get('force', False)
update_delta = kwargs.pop('delta', True)
# Don't re-tessellate if everything is in place
if all((self._cache['vertices'], self._cache['faces'])) and not force_tsl:
return
# Tessellate the surfaces in the container
num_procs = kwargs.pop('num_procs', 1)
new_elems = []
if num_procs > 1:
with utl.pool_context(processes=num_procs) as pool:
tmp_elem = pool.map(partial(process_tessellate, delta=self.delta, update_delta=update_delta, **kwargs),
self._elements)
new_elems += tmp_elem
else:
for idx in range(len(self._elements)):
tmp_elem = process_tessellate(self._elements[idx], delta=self.delta, update_delta=update_delta, **kwargs)
new_elems.append(tmp_elem)
self._elements = new_elems
# Update caches
verts = []
faces = []
v_offset = 0
f_offset = 0
for elem in self._elements:
v = elem.vertices
for i in range(len(v)):
v[i].id += v_offset
verts += v
f = elem.faces
for i in range(len(f)):
f[i].id += f_offset
# for j in range(len(f[i]._data)):
# f[i]._data[j].id += v_offset
faces += f
v_offset += len(v)
f_offset += len(f)
self._cache['vertices'] = verts
self._cache['faces'] = faces
def reset(self):
""" Resets the cache. """
super(SurfaceContainer, self).reset()
self._cache['vertices'][:] = []
self._cache['faces'][:] = []
def render(self, **kwargs):
""" Renders the surfaces.
The visualization component must be set using :py:attr:`~vis` property before calling this method.
Keyword Arguments:
* ``cpcolor``: sets the color of the control points grids
* ``evalcolor``: sets the color of the surface
* ``filename``: saves the plot with the input name
* ``plot``: controls plot window visibility. *Default: True*
* ``animate``: activates animation (if supported). *Default: False*
* ``colormap``: sets the colormap of the surfaces
* ``delta``: if True, the evaluation delta of the container object will be used. *Default: True*
* ``reset_names``: resets the name of the surfaces inside the container. *Default: False*
* ``num_procs``: number of concurrent processes for rendering the surfaces. *Default: 1*
The ``cpcolor`` and ``evalcolor`` arguments can be a string or a list of strings corresponding to the color
values. Both arguments are processed separately, e.g. ``cpcolor`` can be a string whereas ``evalcolor`` can be
a list or a tuple, or vice versa. A single string value sets the color to the same value. List input allows
customization over the color values. If none provided, a random color will be selected.
The ``plot`` argument is useful when you would like to work on the command line without any window context.
If ``plot`` flag is False, this method saves the plot as an image file (.png file where possible) and disables
plot window popping out. If you don't provide a file name, the name of the image file will be pulled from the
configuration class.
Please note that ``colormap`` argument can only work with visualization classes that support colormaps. As an
example, please see :py:class:`.VisMPL.VisSurfTriangle()` class documentation. This method expects multiple
colormap inputs as a list or tuple, preferable the input list size is the same as the number of surfaces
contained in the class. In the case of number of surfaces is bigger than number of input colormaps, this method
will automatically assign a random color for the remaining surfaces.
"""
# Validation
if not self._vis_component:
warnings.warn("No visualization component has been set")
return
# Get the color values from keyword arguments
cpcolor = kwargs.get('cpcolor')
evalcolor = kwargs.get('evalcolor')
trimcolor = kwargs.get('trimcolor', 'black')
filename = kwargs.get('filename', None)
plot_visible = kwargs.get('plot', True)
animate_plot = kwargs.get('animate', False)
# Flag to control evaluation delta updates
update_delta = kwargs.get('delta', True)
reset_names = kwargs.get('reset_names', False)
# Number of parallel processes
num_procs = kwargs.get('num_procs', 1)
force_tsl = bool(kwargs.pop('force', False)) # flag to force re-tessellation
# Check if the input list sizes are equal
if isinstance(cpcolor, (list, tuple)):
if len(cpcolor) != len(self._elements):
raise ValueError("The number of colors in 'cpcolor' (" + str(len(cpcolor)) +
") cannot be less than the number of geometries contained(" +
str(len(self._elements)) + ")")
if isinstance(evalcolor, (list, tuple)):
if len(evalcolor) != len(self._elements):
raise ValueError("The number of colors in 'evalcolor' (" + str(len(evalcolor)) +
") cannot be less than the number of geometries contained ("
+ str(len(self._elements)) + ")")
# Get colormaps as a list
surf_cmaps = kwargs.get('colormap', [])
if not isinstance(surf_cmaps, (list, tuple)):
warnings.warn("Expecting a list of colormap values, not " + str(type(surf_cmaps)))
surf_cmaps = []
# Run the visualization component
self._vis_component.clear()
vis_list = []
if num_procs > 1:
mp_lock = Lock()
mp_val = Value('i', 0)
with utl.pool_context(initializer=mp_init, initargs=(mp_lock, mp_val), processes=num_procs) as pool:
tmp = pool.map(partial(process_elements_surface, mconf=self._vis_component.mconf,
colorval=(cpcolor, evalcolor, trimcolor), idx=-1, force_tsl=force_tsl,
update_delta=update_delta, delta=self.delta, reset_names=reset_names),
self._elements)
vis_list += tmp
else:
for idx, elem in enumerate(self._elements):
tmp = process_elements_surface(elem, self._vis_component.mconf, (cpcolor, evalcolor, trimcolor),
idx, force_tsl, update_delta, self.delta, reset_names)
vis_list += tmp
for vl in vis_list:
if isinstance(vl, dict):
self._vis_component.add(**vl)
else:
for v in vl:
self._vis_component.add(**v)
# Display the figures
if animate_plot:
self._vis_component.animate(fig_save_as=filename, display_plot=plot_visible, colormap=surf_cmaps)
else:
self._vis_component.render(fig_save_as=filename, display_plot=plot_visible, colormap=surf_cmaps)
@utl.export
class VolumeContainer(AbstractContainer):
""" Container class for storing multiple volumes.
This class implements Python Iterator Protocol and therefore any instance of this class can be directly used in
a for loop.
This class provides the following properties:
* :py:attr:`type`
* :py:attr:`id`
* :py:attr:`name`
* :py:attr:`dimension`
* :py:attr:`opt`
* :py:attr:`pdimension`
* :py:attr:`evalpts`
* :py:attr:`bbox`
* :py:attr:`vis`
* :py:attr:`delta`
* :py:attr:`delta_u`
* :py:attr:`delta_v`
* :py:attr:`delta_w`
* :py:attr:`sample_size`
* :py:attr:`sample_size_u`
* :py:attr:`sample_size_v`
* :py:attr:`sample_size_w`
The following code example illustrates the usage of these Python properties:
.. code-block:: python
# Create a multi-volume container instance
mvol = multi.VolumeContainer()
# Add single or multi volumes to the multi container using mvol.add() command
# Addition operator, e.g. mvol1 + mvol2, also works
# Set the evaluation delta of the multi-volume
mvol.delta = 0.05
# Get the evaluated points
volume_points = mvol.evalpts
"""
def __init__(self, *args, **kwargs):
self._pdim = 3 # number of parametric dimensions
self._dinit = 0.1 # evaluation delta
super(VolumeContainer, self).__init__()
for arg in args:
self.add(arg)
@property
def delta_u(self):
""" Evaluation delta for the u-direction.
Evaluation delta corresponds to the *step size*. Decreasing the step size results in evaluation of more points.
Therefore; smaller the delta, smoother the shape.
Please note that ``delta_u`` and ``sample_size_u`` properties correspond to the same variable with different
descriptions. Therefore, setting ``delta_u`` will also set ``sample_size_u``.
Please refer to the `wiki <https://github.com/orbingol/NURBS-Python/wiki/Using-Python-Properties>`_ for details
on using this class member.
:getter: Gets the delta value for the u-direction
:setter: Sets the delta value for the u-direction
:type: float
"""
return self._delta[0]
@delta_u.setter
def delta_u(self, value):
self._delta_setter_common(0, value)
@property
def delta_v(self):
""" Evaluation delta for the v-direction.
Evaluation delta corresponds to the *step size*. Decreasing the step size results in evaluation of more points.
Therefore; smaller the delta, smoother the shape.
Please note that ``delta_v`` and ``sample_size_v`` properties correspond to the same variable with different
descriptions. Therefore, setting ``delta_v`` will also set ``sample_size_v``.
Please refer to the `wiki <https://github.com/orbingol/NURBS-Python/wiki/Using-Python-Properties>`_ for details
on using this class member.
:getter: Gets the delta value for the v-direction
:setter: Sets the delta value for the v-direction
:type: float
"""
return self._delta[1]
@delta_v.setter
def delta_v(self, value):
self._delta_setter_common(1, value)
@property
def delta_w(self):
""" Evaluation delta for the w-direction.
Evaluation delta corresponds to the *step size*. Decreasing the step size results in evaluation of more points.
Therefore; smaller the delta, smoother the shape.
Please note that ``delta_w`` and ``sample_size_w`` properties correspond to the same variable with different
descriptions. Therefore, setting ``delta_w`` will also set ``sample_size_w``.
Please refer to the `wiki <https://github.com/orbingol/NURBS-Python/wiki/Using-Python-Properties>`_ for details
on using this class member.
:getter: Gets the delta value for the w-direction
:setter: Sets the delta value for the w-direction
:type: float
"""
return self._delta[2]
@delta_w.setter
def delta_w(self, value):
self._delta_setter_common(2, value)
@property
def sample_size_u(self):
""" Sample size for the u-direction.
Sample size defines the number of points to evaluate. It also sets the ``delta_u`` property.
Please refer to the `wiki <https://github.com/orbingol/NURBS-Python/wiki/Using-Python-Properties>`_ for details
on using this class member.
:getter: Gets sample size for the u-direction
:setter: Sets sample size for the u-direction
:type: int
"""
return self._sample_size_getter_common(0)
@sample_size_u.setter
def sample_size_u(self, value):
self._sample_size_setter_common(0, value)
@property
def sample_size_v(self):
""" Sample size for the v-direction.
Sample size defines the number of points to evaluate. It also sets the ``delta_v`` property.
Please refer to the `wiki <https://github.com/orbingol/NURBS-Python/wiki/Using-Python-Properties>`_ for details
on using this class member.
:getter: Gets sample size for the v-direction
:setter: Sets sample size for the v-direction
:type: int
"""
return self._sample_size_getter_common(1)
@sample_size_v.setter
def sample_size_v(self, value):
self._sample_size_setter_common(1, value)
@property
def sample_size_w(self):
""" Sample size for the w-direction.
Sample size defines the number of points to evaluate. It also sets the ``delta_w`` property.
Please refer to the `wiki <https://github.com/orbingol/NURBS-Python/wiki/Using-Python-Properties>`_ for details
on using this class member.
:getter: Gets sample size for the w-direction
:setter: Sets sample size for the w-direction
:type: int
"""
return self._sample_size_getter_common(2)
@sample_size_w.setter
def sample_size_w(self, value):
self._sample_size_setter_common(2, value)
def render(self, **kwargs):
""" Renders the volumes.
The visualization component must be set using :py:attr:`~vis` property before calling this method.
Keyword Arguments:
* ``cpcolor``: sets the color of the control points plot
* ``evalcolor``: sets the color of the volume
* ``filename``: saves the plot with the input name
* ``plot``: controls plot window visibility. *Default: True*
* ``animate``: activates animation (if supported). *Default: False*
* ``delta``: if True, the evaluation delta of the container object will be used. *Default: True*
* ``reset_names``: resets the name of the volumes inside the container. *Default: False*
* ``grid_size``: grid size for voxelization. *Default: (16, 16, 16)*
* ``num_procs``: number of concurrent processes for voxelization. *Default: 1*
The ``cpcolor`` and ``evalcolor`` arguments can be a string or a list of strings corresponding to the color
values. Both arguments are processed separately, e.g. ``cpcolor`` can be a string whereas ``evalcolor`` can be
a list or a tuple, or vice versa. A single string value sets the color to the same value. List input allows
customization over the color values. If none provided, a random color will be selected.
The ``plot`` argument is useful when you would like to work on the command line without any window context.
If ``plot`` flag is False, this method saves the plot as an image file (.png file where possible) and disables
plot window popping out. If you don't provide a file name, the name of the image file will be pulled from the
configuration class.
"""
if not self._vis_component:
warnings.warn("No visualization component has been set")
return
cpcolor = kwargs.pop('cpcolor', None)
evalcolor = kwargs.pop('evalcolor', None)
filename = kwargs.pop('filename', None)
plot_visible = kwargs.pop('plot', True)
animate_plot = kwargs.pop('animate', False)
# Flag to control evaluation delta updates
update_delta = kwargs.pop('delta', True)
reset_names = kwargs.get('reset_names', False)
# Check if the input list sizes are equal
if isinstance(cpcolor, (list, tuple)):
if len(cpcolor) != len(self._elements):
raise ValueError("The number of colors in 'cpcolor' (" + str(len(cpcolor)) +
") cannot be less than the number of geometries contained(" +
str(len(self._elements)) + ")")
if isinstance(evalcolor, (list, tuple)):
if len(evalcolor) != len(self._elements):
raise ValueError("The number of colors in 'evalcolor' (" + str(len(evalcolor)) +
") cannot be less than the number of geometries contained ("
+ str(len(self._elements)) + ")")
# Run the visualization component
self._vis_component.clear()
for idx, elem in enumerate(self._elements):
if update_delta:
elem.delta = self.delta
elem.evaluate()
# Reset element name
if reset_names:
elem.name = "volume"
# Fix element name
if elem.name == "volume":
elem.name = elem.name + " " + str(idx)
# Color selection
color = select_color(cpcolor, evalcolor, idx=idx)
# Add control points
if self._vis_component.mconf['ctrlpts'] == 'points':
self._vis_component.add(ptsarr=elem.ctrlpts, name=(elem.name, "(CP)"),
color=color[0], plot_type='ctrlpts', idx=idx)
# Add evaluated points
if self._vis_component.mconf['evalpts'] == 'points':
self._vis_component.add(ptsarr=elem.evalpts, name=elem.name,
color=color[1], plot_type='evalpts', idx=idx)
# Add evaluated points as voxels
if self._vis_component.mconf['evalpts'] == 'voxels':
grid, filled = voxelize.voxelize(elem, **kwargs)
polygrid = voxelize.convert_bb_to_faces(grid)
self._vis_component.add(ptsarr=[polygrid, filled], name=elem.name,
color=color[1], plot_type='evalpts', idx=idx)
# Display the figures
if animate_plot:
self._vis_component.animate(fig_save_as=filename, display_plot=plot_visible)
else:
self._vis_component.render(fig_save_as=filename, display_plot=plot_visible)
def select_color(cpcolor, evalcolor, idx=0):
""" Selects item color for plotting.
:param cpcolor: color for control points grid item
:type cpcolor: str, list, tuple
:param evalcolor: color for evaluated points grid item
:type evalcolor: str, list, tuple
:param idx: index of the current geometry object
:type idx: int
:return: a list of color values
:rtype: list
"""
# Random colors by default
color = utilities.color_generator()
# Constant color for control points grid
if isinstance(cpcolor, str):
color[0] = cpcolor
# User-defined color for control points grid
if isinstance(cpcolor, (list, tuple)):
color[0] = cpcolor[idx]
# Constant color for evaluated points grid
if isinstance(evalcolor, str):
color[1] = evalcolor
# User-defined color for evaluated points grid
if isinstance(evalcolor, (list, tuple)):
color[1] = evalcolor[idx]
return color
def process_tessellate(elem, update_delta, delta, **kwargs):
""" Tessellates surfaces.
.. note:: Helper function required for ``multiprocessing``
:param elem: surface
:type elem: abstract.Surface
:param update_delta: flag to control evaluation delta updates
:type update_delta: bool
:param delta: evaluation delta
:type delta: list, tuple
:return: updated surface
:rtype: abstract.Surface
"""
if update_delta:
elem.delta = delta
elem.evaluate()
elem.tessellate(**kwargs)
return elem
def process_elements_surface(elem, mconf, colorval, idx, force_tsl, update_delta, delta, reset_names):
""" Processes visualization elements for surfaces.
.. note:: Helper function required for ``multiprocessing``
:param elem: surface
:type elem: abstract.Surface
:param mconf: visualization module configuration
:type mconf: dict
:param colorval: color values
:type colorval: tuple
:param idx: index of the surface
:type idx: int
:param force_tsl: flag to force re-tessellation
:type force_tsl: bool
:param update_delta: flag to update surface delta
:type update_delta: bool
:param delta: new surface evaluation delta
:type delta: list, tuple
:param reset_names: flag to reset names
:type reset_names: bool
:return: visualization element (as a dict)
:rtype: list
"""
if idx < 0:
lock.acquire()
idx = counter.value
counter.value += 1
lock.release()
if update_delta:
elem.delta = delta
elem.evaluate()
# Reset element name
if reset_names:
elem.name = "surface"
# Fix element name
if elem.name == "surface" and idx >= 0:
elem.name = elem.name + " " + str(idx)
# Color selection
color = select_color(colorval[0], colorval[1], idx=idx)
# Initialize the return list
rl = []
# Add control points
if mconf['ctrlpts'] == 'points':
ret = dict(ptsarr=elem.ctrlpts, name=(elem.name, "(CP)"),
color=color[0], plot_type='ctrlpts', idx=idx)
rl.append(ret)
# Add control points as quads
if mconf['ctrlpts'] == 'quads':
qtsl = tessellate.QuadTessellate()
qtsl.tessellate(elem.ctrlpts, size_u=elem.ctrlpts_size_u, size_v=elem.ctrlpts_size_v)
ret = dict(ptsarr=[qtsl.vertices, qtsl.faces], name=(elem.name, "(CP)"),
color=color[0], plot_type='ctrlpts', idx=idx)
rl.append(ret)
# Add surface points
if mconf['evalpts'] == 'points':
ret = dict(ptsarr=elem.evalpts, name=(elem.name, idx), color=color[1], plot_type='evalpts', idx=idx)
rl.append(ret)
# Add surface points as quads
if mconf['evalpts'] == 'quads':
qtsl = tessellate.QuadTessellate()
qtsl.tessellate(elem.evalpts, size_u=elem.sample_size_u, size_v=elem.sample_size_v)
ret = dict(ptsarr=[qtsl.vertices, qtsl.faces],
name=elem.name, color=color[1], plot_type='evalpts', idx=idx)
rl.append(ret)
# Add surface points as vertices and triangles
if mconf['evalpts'] == 'triangles':
elem.tessellate(force=force_tsl)
ret = dict(ptsarr=[elem.tessellator.vertices, elem.tessellator.faces],
name=elem.name, color=color[1], plot_type='evalpts', idx=idx)
rl.append(ret)
# Add the trim curves
for itc, trim in enumerate(elem.trims):
ret = dict(ptsarr=elem.evaluate_list(trim.evalpts), name=("trim", itc),
color=colorval[2], plot_type='trimcurve', idx=idx)
rl.append(ret)
# Return the list
return rl
def mp_init(l, c):
""" Initialization function for multi-threaded operations.
:param l: lock
:param c: value for common counter
"""
global lock
global counter
lock = l
counter = c
|
[
"warnings.warn",
"functools.partial",
"multiprocessing.Value",
"multiprocessing.Lock"
] |
[((5135, 5228), 'warnings.warn', 'warnings.warn', (['"""Visualization component is NOT an instance of the vis.VisAbstract class"""'], {}), "(\n 'Visualization component is NOT an instance of the vis.VisAbstract class')\n", (5148, 5228), False, 'import warnings\n'), ((14100, 14151), 'warnings.warn', 'warnings.warn', (['"""No visualization component has set"""'], {}), "('No visualization component has set')\n", (14113, 14151), False, 'import warnings\n'), ((28275, 28331), 'warnings.warn', 'warnings.warn', (['"""No visualization component has been set"""'], {}), "('No visualization component has been set')\n", (28288, 28331), False, 'import warnings\n'), ((30192, 30198), 'multiprocessing.Lock', 'Lock', ([], {}), '()\n', (30196, 30198), False, 'from multiprocessing import Value, Lock\n'), ((30220, 30233), 'multiprocessing.Value', 'Value', (['"""i"""', '(0)'], {}), "('i', 0)\n", (30225, 30233), False, 'from multiprocessing import Value, Lock\n'), ((39518, 39574), 'warnings.warn', 'warnings.warn', (['"""No visualization component has been set"""'], {}), "('No visualization component has been set')\n", (39531, 39574), False, 'import warnings\n'), ((24648, 24735), 'functools.partial', 'partial', (['process_tessellate'], {'delta': 'self.delta', 'update_delta': 'update_delta'}), '(process_tessellate, delta=self.delta, update_delta=update_delta, **\n kwargs)\n', (24655, 24735), False, 'from functools import partial\n'), ((30378, 30594), 'functools.partial', 'partial', (['process_elements_surface'], {'mconf': 'self._vis_component.mconf', 'colorval': '(cpcolor, evalcolor, trimcolor)', 'idx': '(-1)', 'force_tsl': 'force_tsl', 'update_delta': 'update_delta', 'delta': 'self.delta', 'reset_names': 'reset_names'}), '(process_elements_surface, mconf=self._vis_component.mconf, colorval\n =(cpcolor, evalcolor, trimcolor), idx=-1, force_tsl=force_tsl,\n update_delta=update_delta, delta=self.delta, reset_names=reset_names)\n', (30385, 30594), False, 'from functools import partial\n')]
|
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Modified from espnet(https://github.com/espnet/espnet)
# Modified from wenet(https://github.com/wenet-e2e/wenet)
from typing import Optional
from paddle.io import Dataset
from yacs.config import CfgNode
from paddlespeech.s2t.frontend.utility import read_manifest
from paddlespeech.s2t.utils.log import Log
__all__ = ["ManifestDataset", "TransformDataset"]
logger = Log(__name__).getlog()
class ManifestDataset(Dataset):
@classmethod
def params(cls, config: Optional[CfgNode]=None) -> CfgNode:
default = CfgNode(
dict(
manifest="",
max_input_len=27.0,
min_input_len=0.0,
max_output_len=float('inf'),
min_output_len=0.0,
max_output_input_ratio=float('inf'),
min_output_input_ratio=0.0, ))
if config is not None:
config.merge_from_other_cfg(default)
return default
@classmethod
def from_config(cls, config):
"""Build a ManifestDataset object from a config.
Args:
config (yacs.config.CfgNode): configs object.
Returns:
ManifestDataset: dataet object.
"""
assert 'manifest' in config.data
assert config.data.manifest
dataset = cls(
manifest_path=config.data.manifest,
max_input_len=config.data.max_input_len,
min_input_len=config.data.min_input_len,
max_output_len=config.data.max_output_len,
min_output_len=config.data.min_output_len,
max_output_input_ratio=config.data.max_output_input_ratio,
min_output_input_ratio=config.data.min_output_input_ratio, )
return dataset
def __init__(self,
manifest_path,
max_input_len=float('inf'),
min_input_len=0.0,
max_output_len=float('inf'),
min_output_len=0.0,
max_output_input_ratio=float('inf'),
min_output_input_ratio=0.0):
"""Manifest Dataset
Args:
manifest_path (str): manifest josn file path
max_input_len ([type], optional): maximum output seq length,
in seconds for raw wav, in frame numbers for feature data. Defaults to float('inf').
min_input_len (float, optional): minimum input seq length,
in seconds for raw wav, in frame numbers for feature data. Defaults to 0.0.
max_output_len (float, optional): maximum input seq length,
in modeling units. Defaults to 500.0.
min_output_len (float, optional): minimum input seq length,
in modeling units. Defaults to 0.0.
max_output_input_ratio (float, optional): maximum output seq length/output seq length ratio.
Defaults to 10.0.
min_output_input_ratio (float, optional): minimum output seq length/output seq length ratio.
Defaults to 0.05.
"""
super().__init__()
# read manifest
self._manifest = read_manifest(
manifest_path=manifest_path,
max_input_len=max_input_len,
min_input_len=min_input_len,
max_output_len=max_output_len,
min_output_len=min_output_len,
max_output_input_ratio=max_output_input_ratio,
min_output_input_ratio=min_output_input_ratio)
self._manifest.sort(key=lambda x: x["feat_shape"][0])
def __len__(self):
return len(self._manifest)
def __getitem__(self, idx):
return self._manifest[idx]
class TransformDataset(Dataset):
"""Transform Dataset.
Args:
data: list object from make_batchset
converter: batch function
reader: read data
"""
def __init__(self, data, converter, reader):
"""Init function."""
super().__init__()
self.data = data
self.converter = converter
self.reader = reader
def __len__(self):
"""Len function."""
return len(self.data)
def __getitem__(self, idx):
"""[] operator."""
return self.converter([self.reader(self.data[idx], return_uttid=True)])
class AudioDataset(Dataset):
def __init__(self,
data_file,
max_length=10240,
min_length=0,
token_max_length=200,
token_min_length=1,
batch_type='static',
batch_size=1,
max_frames_in_batch=0,
sort=True,
raw_wav=True,
stride_ms=10):
"""Dataset for loading audio data.
Attributes::
data_file: input data file
Plain text data file, each line contains following 7 fields,
which is split by '\t':
utt:utt1
feat:tmp/data/file1.wav or feat:tmp/data/fbank.ark:30
feat_shape: 4.95(in seconds) or feat_shape:495,80(495 is in frames)
text:i love you
token: i <space> l o v e <space> y o u
tokenid: int id of this token
token_shape: M,N # M is the number of token, N is vocab size
max_length: drop utterance which is greater than max_length(10ms), unit 10ms.
min_length: drop utterance which is less than min_length(10ms), unit 10ms.
token_max_length: drop utterance which is greater than token_max_length,
especially when use char unit for english modeling
token_min_length: drop utterance which is less than token_max_length
batch_type: static or dynamic, see max_frames_in_batch(dynamic)
batch_size: number of utterances in a batch,
it's for static batch size.
max_frames_in_batch: max feature frames in a batch,
when batch_type is dynamic, it's for dynamic batch size.
Then batch_size is ignored, we will keep filling the
batch until the total frames in batch up to max_frames_in_batch.
sort: whether to sort all data, so the utterance with the same
length could be filled in a same batch.
raw_wav: use raw wave or extracted featute.
if raw wave is used, dynamic waveform-level augmentation could be used
and the feature is extracted by torchaudio.
if extracted featute(e.g. by kaldi) is used, only feature-level
augmentation such as specaug could be used.
"""
assert batch_type in ['static', 'dynamic']
# read manifest
data = read_manifest(data_file)
if sort:
data = sorted(data, key=lambda x: x["feat_shape"][0])
if raw_wav:
assert data[0]['feat'].split(':')[0].splitext()[-1] not in ('.ark',
'.scp')
data = map(lambda x: (float(x['feat_shape'][0]) * 1000 / stride_ms))
self.input_dim = data[0]['feat_shape'][1]
self.output_dim = data[0]['token_shape'][1]
# with open(data_file, 'r') as f:
# for line in f:
# arr = line.strip().split('\t')
# if len(arr) != 7:
# continue
# key = arr[0].split(':')[1]
# tokenid = arr[5].split(':')[1]
# output_dim = int(arr[6].split(':')[1].split(',')[1])
# if raw_wav:
# wav_path = ':'.join(arr[1].split(':')[1:])
# duration = int(float(arr[2].split(':')[1]) * 1000 / 10)
# data.append((key, wav_path, duration, tokenid))
# else:
# feat_ark = ':'.join(arr[1].split(':')[1:])
# feat_info = arr[2].split(':')[1].split(',')
# feat_dim = int(feat_info[1].strip())
# num_frames = int(feat_info[0].strip())
# data.append((key, feat_ark, num_frames, tokenid))
# self.input_dim = feat_dim
# self.output_dim = output_dim
valid_data = []
for i in range(len(data)):
length = data[i]['feat_shape'][0]
token_length = data[i]['token_shape'][0]
# remove too lang or too short utt for both input and output
# to prevent from out of memory
if length > max_length or length < min_length:
# logging.warn('ignore utterance {} feature {}'.format(
# data[i][0], length))
pass
elif token_length > token_max_length or token_length < token_min_length:
pass
else:
valid_data.append(data[i])
data = valid_data
self.minibatch = []
num_data = len(data)
# Dynamic batch size
if batch_type == 'dynamic':
assert (max_frames_in_batch > 0)
self.minibatch.append([])
num_frames_in_batch = 0
for i in range(num_data):
length = data[i]['feat_shape'][0]
num_frames_in_batch += length
if num_frames_in_batch > max_frames_in_batch:
self.minibatch.append([])
num_frames_in_batch = length
self.minibatch[-1].append(data[i])
# Static batch size
else:
cur = 0
while cur < num_data:
end = min(cur + batch_size, num_data)
item = []
for i in range(cur, end):
item.append(data[i])
self.minibatch.append(item)
cur = end
def __len__(self):
return len(self.minibatch)
def __getitem__(self, idx):
return self.minibatch[idx]
|
[
"paddlespeech.s2t.utils.log.Log",
"paddlespeech.s2t.frontend.utility.read_manifest"
] |
[((981, 994), 'paddlespeech.s2t.utils.log.Log', 'Log', (['__name__'], {}), '(__name__)\n', (984, 994), False, 'from paddlespeech.s2t.utils.log import Log\n'), ((3712, 3981), 'paddlespeech.s2t.frontend.utility.read_manifest', 'read_manifest', ([], {'manifest_path': 'manifest_path', 'max_input_len': 'max_input_len', 'min_input_len': 'min_input_len', 'max_output_len': 'max_output_len', 'min_output_len': 'min_output_len', 'max_output_input_ratio': 'max_output_input_ratio', 'min_output_input_ratio': 'min_output_input_ratio'}), '(manifest_path=manifest_path, max_input_len=max_input_len,\n min_input_len=min_input_len, max_output_len=max_output_len,\n min_output_len=min_output_len, max_output_input_ratio=\n max_output_input_ratio, min_output_input_ratio=min_output_input_ratio)\n', (3725, 3981), False, 'from paddlespeech.s2t.frontend.utility import read_manifest\n'), ((7354, 7378), 'paddlespeech.s2t.frontend.utility.read_manifest', 'read_manifest', (['data_file'], {}), '(data_file)\n', (7367, 7378), False, 'from paddlespeech.s2t.frontend.utility import read_manifest\n')]
|
import datetime
import factory
import uuid
from apps.fund.models import Donation, Order
from bluebottle.test.factory_models.accounts import BlueBottleUserFactory
from onepercentclub.tests.factory_models.project_factories import OnePercentProjectFactory
def random_order_number(length=30):
return unicode(uuid.uuid4().hex)[0:length]
class OrderFactory(factory.DjangoModelFactory):
FACTORY_FOR = Order
user = factory.SubFactory(BlueBottleUserFactory)
order_number = factory.LazyAttribute(lambda t: random_order_number())
class DonationFactory(factory.DjangoModelFactory):
FACTORY_FOR = Donation
user = factory.SubFactory(BlueBottleUserFactory)
amount = 20
project = factory.SubFactory(OnePercentProjectFactory)
order = factory.SubFactory(OrderFactory)
status = 'pending'
|
[
"factory.SubFactory",
"uuid.uuid4"
] |
[((425, 466), 'factory.SubFactory', 'factory.SubFactory', (['BlueBottleUserFactory'], {}), '(BlueBottleUserFactory)\n', (443, 466), False, 'import factory\n'), ((633, 674), 'factory.SubFactory', 'factory.SubFactory', (['BlueBottleUserFactory'], {}), '(BlueBottleUserFactory)\n', (651, 674), False, 'import factory\n'), ((705, 749), 'factory.SubFactory', 'factory.SubFactory', (['OnePercentProjectFactory'], {}), '(OnePercentProjectFactory)\n', (723, 749), False, 'import factory\n'), ((762, 794), 'factory.SubFactory', 'factory.SubFactory', (['OrderFactory'], {}), '(OrderFactory)\n', (780, 794), False, 'import factory\n'), ((311, 323), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (321, 323), False, 'import uuid\n')]
|
from django import forms
from django.contrib.auth.forms import UserCreationForm
from django.contrib.auth.models import User
from .models import Photo
class PhotoForm(forms.ModelForm):
name = forms.TextInput()
image = forms.ImageField()
class Meta:
model = Photo
fields = ["name", "image"]
class RegisterForm(UserCreationForm):
email = forms.EmailField()
class Meta:
model = User
fields = ["username", "email", "<PASSWORD>", "<PASSWORD>"]
|
[
"django.forms.TextInput",
"django.forms.EmailField",
"django.forms.ImageField"
] |
[((197, 214), 'django.forms.TextInput', 'forms.TextInput', ([], {}), '()\n', (212, 214), False, 'from django import forms\n'), ((227, 245), 'django.forms.ImageField', 'forms.ImageField', ([], {}), '()\n', (243, 245), False, 'from django import forms\n'), ((372, 390), 'django.forms.EmailField', 'forms.EmailField', ([], {}), '()\n', (388, 390), False, 'from django import forms\n')]
|
import codecs
import os
import tempfile
import pytest
from pji.control.model import Identification, ResourceLimit
from .base import TASK_TEMPLATE_SUCCESS_1, TASK_TEMPLATE_SUCCESS_2
from ..section.section.base import COMPLEX_TEXT
# noinspection DuplicatedCode
@pytest.mark.unittest
class TestServiceTaskTask:
def test_task_simple(self):
tt = TASK_TEMPLATE_SUCCESS_1
with tempfile.TemporaryDirectory() as scriptdir:
with codecs.open(os.path.join(scriptdir, 'README.md'), 'w') as of:
of.write(COMPLEX_TEXT)
t = tt(
scriptdir=scriptdir,
resources=dict(max_real_time='1.0s'),
environ=dict(K='233'),
)
assert t.name == 'task1_x233x'
assert t.identification == Identification.loads('nobody')
assert t.resources == ResourceLimit.loads(dict(max_real_time='1.0s'))
assert t.environ == {'K': '233', 'NAME': 'x233x', 'PJI_TASK_NAME': 'task1_x233x'}
assert len(t.sections.getters) == 1
assert repr(t) == "<Task name: 'task1_x233x', identification: <Identification user: nobody, " \
"group: nogroup>, resources: <ResourceLimit real time: 1.000s>, " \
"sections: <SectionCollection sections: ('name_233',)>>"
def test_task_invalid(self):
tt = TASK_TEMPLATE_SUCCESS_1
with tempfile.TemporaryDirectory() as scriptdir:
with codecs.open(os.path.join(scriptdir, 'README.md'), 'w') as of:
of.write(COMPLEX_TEXT)
with pytest.raises(ValueError):
tt(
scriptdir=scriptdir,
resources=dict(max_real_time='1.0s'),
environ=dict(K='???'),
)
def test_task_call(self):
tt = TASK_TEMPLATE_SUCCESS_2
with tempfile.TemporaryDirectory() as scriptdir:
with codecs.open(os.path.join(scriptdir, 'README.md'), 'w') as of:
of.write(COMPLEX_TEXT)
t = tt(
scriptdir=scriptdir,
resources=dict(max_real_time='1.0s'),
environ=dict(K='233', ENV='xxx', VF='123'),
)
_success, _results = t()
assert _success
_name_1, (_section_1_success, _section_1_results, _section_1_info) = _results[0]
assert _name_1 == 'name_233'
assert _section_1_success
assert len(_section_1_results) == 4
assert _section_1_results[0].ok
assert _section_1_results[1].ok
assert _section_1_results[2].ok
assert _section_1_results[3].ok
assert _section_1_info == {'static': 'this is v : 233', 'value': 233,
'local': 'I have a dream that one day, down in Alabama, with its '
'vicious racists, \nwith its governor having his lips '
'dripping with the words of "interposition" and "nullification"\n'
' -- one day right there in Alabama little black boys and black '
'girls will be able to join \n hands with little white boys and '
'white girls as sisters and brothers.',
'tag': 'I have a dream that one day, down in Alabama, with its vicious '
'racists, \nwith its governor having his lips dripping with the '
'words of "interposition" and "nullification"\n -- one day right '
'there in Alabama little black boys and black girls will be able to '
'join \n hands with little white boys and white girls as sisters '
'and brothers.',
'base64': 'SSBoYXZlIGEgZHJlYW0gdGhhdCBvbmUgZGF5LCBkb3duIGluIEFsYWJhbWEsIHd'
'pdGggaXRzIHZp\nY2lvdXMgcmFjaXN0cywgCndpdGggaXRzIGdvdmVybm9yIGhh'
'dmluZyBoaXMgbGlwcyBkcmlwcGlu\nZyB3aXRoIHRoZSB3b3JkcyBvZiAiaW50Z'
'XJwb3NpdGlvbiIgYW5kICJudWxsaWZpY2F0aW9uIgog\nLS0gb25lIGRheSByaW'
'dodCB0aGVyZSBpbiBBbGFiYW1hIGxpdHRsZSBibGFjayBib3lzIGFuZCBi\nbGF'
'jayBnaXJscyB3aWxsIGJlIGFibGUgdG8gam9pbiAKIGhhbmRzIHdpdGggbGl0dG'
'xlIHdoaXRl\nIGJveXMgYW5kIHdoaXRlIGdpcmxzIGFzIHNpc3RlcnMgYW5kIGJ'
'yb3RoZXJzLg==\n'}
_name_2, (_section_2_success, _section_2_results, _section_2_info) = _results[1]
assert _name_2 == 'name_2_123233'
assert _section_2_success
assert len(_section_2_results) == 3
assert _section_2_results[0].ok
assert _section_2_results[1].ok
assert _section_2_results[2].ok
assert _section_2_info == {'static': 'this is vt : 123233',
'tag_1': 'I have a dream that one day, down in Alabama, with its vicious '
'racists, \nwith its governor having his lips dripping with the '
'words of "interposition" and "nullification"\n -- one day right '
'there in Alabama little black boys and black girls will be able '
'to join \n hands with little white boys and white girls as sisters '
'and brothers.',
'tag_2': '<KEY>IGEgZ<KEY>Z<KEY>IG<KEY>sIHdpdGgg'
'aXRzIHZp\nY2lvdXMgcmFjaXN0cywgCndpdGggaXRzIGdvdmVybm9yIGhhdmluZyBoaX'
'MgbGlwcyBkcmlwcGlu\nZyB3aXRoIHRoZSB3b3JkcyBvZiAiaW50ZXJwb3NpdGlvbiIg'
'YW5kICJudWxsaWZpY2F0aW9uIgog\nLS0gb25lIGRheSByaWdodCB0aGVyZSBpbiBBbG'
'<KEY>ZSBibGFjayBib3lzIG<KEY>IGFi'
'bGUgdG8gam9pbiAKIGhhbmRzIHdpdGggbGl0dGxlIHdoaXRl\nIGJveXMgYW5kIHdoaX'
'RlIGdpcmxzIGFzIHNpc3RlcnMgYW5kIG<KEY>g==\n',
'tag_3t': 'sys\n',
'tag_4t': 'SSBoYXZlIGEgZHJlYW0gdGhhdCBvbmUgZGF5LCBkb3duIGluIEFsYWJhbWEsIHdpdGg'
'gaXRzIHZp\nY2lvdXMgcmFjaXN0cywgCndpdGggaXRzIGdvdmVybm9yIGhhdmluZyBo'
'aXMgbGlwcyBkcmlwcGlu\nZyB3aXRoIHRoZSB3b3JkcyBvZiAiaW50ZXJwb3NpdGlvb'
'iIgYW5kICJudWxsaWZpY2F0aW9uIgog\nLS0gb25lIGRheSByaW<KEY>i'
'BBbGFiYW1hIG<KEY>IGFuZCBi\nbGFjayBnaXJscyB3aWxsIGJ'
'lIGFibGUgdG8gam9pbiAKIGhhbmRzIHdpdGggbGl0dGxlIHdoaXRl\nIGJveXMgYW5k'
'IHdoaXRlIGdpcmxzIGFzIHNpc3RlcnMgYW5kIGJyb3RoZXJzLg==\n',
'tag_5t': 'U1NCb1lYWmxJR0VnWkhKbFlXMGdkR2hoZENCdmJtVWdaR0Y1TENCa2IzZHVJR2x1SUV'
'Gc1lXSmhi\nV0VzSUhkcGRHZ2dhWFJ6SUhacApZMmx2ZFhNZ2NtRmphWE4wY3l3Z0Nu'
'ZHBkR2dnYVhSeklHZHZk\nbVZ5Ym05eUlHaGhkbWx1WnlCb2FYTWdiR2x3Y3lCa2Ntb'
'HdjR2x1Clp5QjNhWFJvSUhSb1pTQjNi\nM0prY3lCdlppQWlhVzUwWlhKd2IzTnBkR2'
'x2YmlJZ1lXNWtJQ0p1ZFd4c2FXWnBZMkYwYVc5dUln\nb2cKTFMwZ2IyNWxJR1JoZVN'
'CeWFXZG9kQ0IwYUdWeVpTQnBiaUJCYkdGaVlXMWhJR3hwZEhSc1pT\nQmliR0ZqYXlC'
'aWIzbHpJR0Z1WkNCaQpiR0ZqYXlCbmFYSnNjeUIzYVd4c0lHSmxJR0ZpYkdVZ2RH\nO'
'GdhbTlwYmlBS0lHaGhibVJ6SUhkcGRHZ2diR2wwZEd4bElIZG9hWFJsCklHSnZlWE1n'
'WVc1a0lI\nZG9hWFJsSUdkcGNteHpJR0Z6SUhOcGMzUmxjbk1nWVc1a0lHSnliM1JvW'
'lhKekxnPT0K\n'}
|
[
"os.path.join",
"pytest.raises",
"tempfile.TemporaryDirectory",
"pji.control.model.Identification.loads"
] |
[((395, 424), 'tempfile.TemporaryDirectory', 'tempfile.TemporaryDirectory', ([], {}), '()\n', (422, 424), False, 'import tempfile\n'), ((1438, 1467), 'tempfile.TemporaryDirectory', 'tempfile.TemporaryDirectory', ([], {}), '()\n', (1465, 1467), False, 'import tempfile\n'), ((1906, 1935), 'tempfile.TemporaryDirectory', 'tempfile.TemporaryDirectory', ([], {}), '()\n', (1933, 1935), False, 'import tempfile\n'), ((805, 835), 'pji.control.model.Identification.loads', 'Identification.loads', (['"""nobody"""'], {}), "('nobody')\n", (825, 835), False, 'from pji.control.model import Identification, ResourceLimit\n'), ((1617, 1642), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (1630, 1642), False, 'import pytest\n'), ((468, 504), 'os.path.join', 'os.path.join', (['scriptdir', '"""README.md"""'], {}), "(scriptdir, 'README.md')\n", (480, 504), False, 'import os\n'), ((1511, 1547), 'os.path.join', 'os.path.join', (['scriptdir', '"""README.md"""'], {}), "(scriptdir, 'README.md')\n", (1523, 1547), False, 'import os\n'), ((1979, 2015), 'os.path.join', 'os.path.join', (['scriptdir', '"""README.md"""'], {}), "(scriptdir, 'README.md')\n", (1991, 2015), False, 'import os\n')]
|
import pandas as pd
import json
from pprint import pprint
def JSONLineToDict(JSONRoute):
'''
Funcion auxiliar que dado un archivo json con JSONObjects en cada linea,
lo abre y lo convierte a lista de diccionarios
'''
with open(JSONRoute) as f:
jsonList = list(f)
return json.loads(json.dumps([json.loads(jsonLine) for jsonLine in jsonList]))
def findValueIndex(dictList, key, value):
'''
Funcion auxiliar que dada una lista de diccionarios y un valor de este,
encuentra en qué diccionario está dicho valor
'''
for i, dict in enumerate(dictList):
if dict[key] == value:
return i
return -1
def extractLatestQuestionCSV(csvRoute):
df = pd.read_csv(csvRoute, sep=";")
return df.iloc[-1,0]
dictList = JSONLineToDict("Vanilla_Dataset_Test.json")
#pprint(dictList)
#print(len(dictList))
#dictList[:] = [value for counter, value in enumerate(dictList) if counter > 10635]
#print(len(dictList))
question = extractLatestQuestionCSV("VANiLLA.csv")
#print(question)
print("Index:", findValueIndex(dictList, 'question', question))
|
[
"pandas.read_csv",
"json.loads"
] |
[((727, 757), 'pandas.read_csv', 'pd.read_csv', (['csvRoute'], {'sep': '""";"""'}), "(csvRoute, sep=';')\n", (738, 757), True, 'import pandas as pd\n'), ((334, 354), 'json.loads', 'json.loads', (['jsonLine'], {}), '(jsonLine)\n', (344, 354), False, 'import json\n')]
|
import logging
import telegram
import datetime
from time import sleep
from toolbox import ToolBox
from threading import Thread
from telegram.ext import Updater
from telegram.ext import Filters
from telegram.ext import MessageHandler
from telegram.ext import CommandHandler
class Bot(object):
def __init__(self):
self.updater = Updater('TOKEN')
self.update_id = None
logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
self.dp = self.updater.dispatcher
# Adicionando os comandos
self.dp.add_handler(CommandHandler("start", self._init_config))
# Filtrando o tipo por document
self.dp.add_handler(MessageHandler(Filters.document, self._receive_file))
# Inicia bot
self.updater.start_polling()
self.updater.idle()
def _init_config(self, bot, update):
try:
self.update_id = bot.get_updates()[0].update_id
except BaseException as e:
self.update_id = None
update.message.reply_text('Olá! Vou auxiliar você a realizar as configurações, \
para que eu possa acessar suas faturas e te lembrar delas =D')
update.message.reply_text('Primeiro insira o arquivo .json com as credênciais de sua API do Google')
def _receive_file(self, bot, update):
table = -1
try:
# Recebe o arquivo .json e faz a conexão com a planilha do usuário
file_id = update.message.document.file_id
_json_key = bot.get_file(file_id)
_json_key.download('bot/keys/' + str(file_id) + '.json')
table = ToolBox.connect_api(str(file_id) + '.json', 'faturas')
if table == -1:
update.message.reply_text('Erro ao tentar se conectar com a planilha de gastos, insira o arquivo novamente')
else:
update.message.reply_text('Pronto! Agora já tenho acesso a suas faturas, logo menos estarei verificando e enviando notificações sobre suas faturas para você')
# Cria thread para evitar que o bot fique inutilizavel durante o processo de verificação
thread = Thread(target = self.send_notify, args = (bot, update, table))
thread.start()
except BaseException as e:
print(e)
def send_notify(self, bot, update, table):
# Faz verificação e envia notificação se necessário
while True:
update.message.reply_text('Estou fazendo a verificação de suas faturas agora')
itens_table = table.get_all_values()[1:]
# Data do dia atual
hoje = datetime.date.today()
for fatura in itens_table:
if fatura[-1] != 'Fechado':
vencimento = fatura[2].split('-')[::-1]
data_vencimento = datetime.date(int(vencimento[0]),
int(vencimento[1]),
int(vencimento[2]))
atraso = (hoje - data_vencimento).days
if atraso > int(fatura[-2]):
update.message.reply_text('A seguinte fatura precisa ser paga: \n ' +
'- Data de emissão: ' + fatura[1] +
'\n - Data de vencimento: ' + fatura[2] +
'\n - Nome da empresa: ' + fatura[3] +
'\n - Valor da conta: ' + fatura[4] +
'\n - Status da fatura: ' + fatura[6])
# Entra em espera durante um dia
sleep(86400)
if __name__ == '__main__':
Bot()
|
[
"threading.Thread",
"logging.basicConfig",
"datetime.date.today",
"time.sleep",
"telegram.ext.Updater",
"telegram.ext.MessageHandler",
"telegram.ext.CommandHandler"
] |
[((342, 358), 'telegram.ext.Updater', 'Updater', (['"""TOKEN"""'], {}), "('TOKEN')\n", (349, 358), False, 'from telegram.ext import Updater\n'), ((398, 485), 'logging.basicConfig', 'logging.basicConfig', ([], {'format': '"""%(asctime)s - %(name)s - %(levelname)s - %(message)s"""'}), "(format=\n '%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n", (417, 485), False, 'import logging\n'), ((587, 629), 'telegram.ext.CommandHandler', 'CommandHandler', (['"""start"""', 'self._init_config'], {}), "('start', self._init_config)\n", (601, 629), False, 'from telegram.ext import CommandHandler\n'), ((699, 751), 'telegram.ext.MessageHandler', 'MessageHandler', (['Filters.document', 'self._receive_file'], {}), '(Filters.document, self._receive_file)\n', (713, 751), False, 'from telegram.ext import MessageHandler\n'), ((2670, 2691), 'datetime.date.today', 'datetime.date.today', ([], {}), '()\n', (2689, 2691), False, 'import datetime\n'), ((3752, 3764), 'time.sleep', 'sleep', (['(86400)'], {}), '(86400)\n', (3757, 3764), False, 'from time import sleep\n'), ((2196, 2254), 'threading.Thread', 'Thread', ([], {'target': 'self.send_notify', 'args': '(bot, update, table)'}), '(target=self.send_notify, args=(bot, update, table))\n', (2202, 2254), False, 'from threading import Thread\n')]
|
"""
Message editor with a wheel zoom functionality
"""
# pylint: disable=bad-continuation
from PyQt4 import QtCore, QtGui
class MessageCompose(QtGui.QTextEdit):
"""Editor class with wheel zoom functionality"""
def __init__(self, parent=0):
super(MessageCompose, self).__init__(parent)
self.setAcceptRichText(False)
self.defaultFontPointSize = self.currentFont().pointSize()
def wheelEvent(self, event):
"""Mouse wheel scroll event handler"""
if (
QtGui.QApplication.queryKeyboardModifiers() & QtCore.Qt.ControlModifier
) == QtCore.Qt.ControlModifier and event.orientation() == QtCore.Qt.Vertical:
if event.delta() > 0:
self.zoomIn(1)
else:
self.zoomOut(1)
zoom = self.currentFont().pointSize() * 100 / self.defaultFontPointSize
QtGui.QApplication.activeWindow().statusBar().showMessage(
QtGui.QApplication.translate("MainWindow", "Zoom level %1%").arg(
str(zoom)
)
)
else:
# in QTextEdit, super does not zoom, only scroll
super(MessageCompose, self).wheelEvent(event)
def reset(self):
"""Clear the edit content"""
self.setText('')
|
[
"PyQt4.QtGui.QApplication.translate",
"PyQt4.QtGui.QApplication.queryKeyboardModifiers",
"PyQt4.QtGui.QApplication.activeWindow"
] |
[((515, 558), 'PyQt4.QtGui.QApplication.queryKeyboardModifiers', 'QtGui.QApplication.queryKeyboardModifiers', ([], {}), '()\n', (556, 558), False, 'from PyQt4 import QtCore, QtGui\n'), ((959, 1019), 'PyQt4.QtGui.QApplication.translate', 'QtGui.QApplication.translate', (['"""MainWindow"""', '"""Zoom level %1%"""'], {}), "('MainWindow', 'Zoom level %1%')\n", (987, 1019), False, 'from PyQt4 import QtCore, QtGui\n'), ((884, 917), 'PyQt4.QtGui.QApplication.activeWindow', 'QtGui.QApplication.activeWindow', ([], {}), '()\n', (915, 917), False, 'from PyQt4 import QtCore, QtGui\n')]
|
"""
This tests when a Datastore experiences some typical changes to the underlying definition.
Test Cases:
- Table is renamed.
- Table is created.
- Table is dropped.
- Column is dropped.
- [PENDING] Column attributes are updated.
"""
from app.revisioner.tests.e2e import inspected
from app.revisioner.tests.test_e2e import mutate_inspected
preload_fixtures = ['datastore']
inspected_tables = mutate_inspected(inspected.tables_and_views, [
# (1) We renamed the table `employee.departments` to `employee.depts`.
{
"type": "modified",
"filters": (
lambda row: row['table_object_id'] == 16392
),
"metadata": {
"field": "table_name",
"new_value": "depts",
},
},
# (2) We dropped the `app`.`productlines` table at some point.
{
"type": "dropped",
"filters": (
lambda row: row['table_object_id'] == 16456
),
},
# (3) We dropped the `app`.`productlines` table at some point.
{
"type": "dropped",
"filters": (
lambda row: row['table_object_id'] == 16488
),
"column_filters": (
lambda col: col['column_object_id'] == "16488/9"
),
},
# (4) The column `app`.`customers`.`postalcode` has a `default_value` change.
{
"type": "modified",
"filters": (
lambda row: row['table_object_id'] == 16442
),
"column_filters": (
lambda col: col['column_name'] == "postalcode"
),
"metadata": {
"field": "columns.default_value",
"new_value": "default_sequence()",
},
},
# (5) The column `app`.`orders`.`status` has a data type change.
{
"type": "modified",
"filters": (
lambda row: row['table_object_id'] == 16465
),
"column_filters": (
lambda col: col['column_name'] == "status"
),
"metadata": {
"field": "columns.data_type",
"new_value": "integer",
},
},
# (6) Comment was added to a resource.
{
"type": "modified",
"filters": (
lambda row: row['table_object_id'] == 16442
),
"column_filters": (
lambda col: col['column_name'] == "postalcode"
),
"metadata": {
"field": "columns.column_description",
"new_value": "5-digit mailing code",
},
},
{
"type": "modified",
"filters": (
lambda row: row['table_object_id'] == 16465
),
"column_filters": (
lambda col: col['column_name'] == "status"
),
"metadata": {
"field": "columns.max_length",
"new_value": 50,
},
},
{
"type": "modified",
"filters": (
lambda row: row['table_object_id'] == 16465
),
"column_filters": (
lambda col: col['column_name'] == "status"
),
"metadata": {
"field": "columns.numeric_scale",
"new_value": 0,
},
},
# (6) The column `app`.`orders`.`amount` is changed.
{
"type": "modified",
"filters": (
lambda row: row['table_object_id'] == 16478
),
"column_filters": (
lambda col: col['column_name'] == "amount"
),
"metadata": {
"field": "columns.is_nullable",
"new_value": True,
},
},
{
"type": "modified",
"filters": (
lambda row: row['table_object_id'] == 16478
),
"column_filters": (
lambda col: col['column_name'] == "amount"
),
"metadata": {
"field": "columns.column_name",
"new_value": "dollar_amount",
},
},
])
# (7) We create a brand new table called `app.categories`.
inspected_tables += [
{
"schema_object_id": 16441,
"table_schema": "app",
"table_object_id": 99999,
"table_name": "categories",
"table_type": "base table",
"properties": {},
"columns": [
{
"column_object_id": "99999/1",
"column_name": "category_id",
"column_description": None,
"ordinal_position": 1,
"data_type": "integer",
"max_length": 32,
"numeric_scale": 0,
"is_nullable": False,
"is_primary": True,
"default_value": ""
},
{
"column_object_id": "99999/1",
"column_name": "name",
"column_description": None,
"ordinal_position": 2,
"data_type": "varchar",
"max_length": 256,
"numeric_scale": None,
"is_nullable": False,
"is_primary": False,
"default_value": ""
},
]
}
]
test_cases = [
{
"model": "Table",
"description": "Expect `employees.departments` table to be renamed.",
"filters": {
"schema__name": "employees",
"object_ref": "16392",
},
"assertions": [
{
"evaluation": lambda datastore, table: table.name,
"pass_value": "depts",
},
# It has a new object identifier due to the name change.
{
"evaluation": lambda datastore, table: table.object_id,
"pass_value": "2<PASSWORD>",
},
# It retains associated metadata.
{
"evaluation": lambda datastore, table: table.tags,
"pass_value": ["one", "two"],
},
]
},
{
"model": "Table",
"description": "Expect `app.departments` table NOT be be renamed.",
"filters": {
"schema__name": "app",
"object_ref": "16522",
},
"assertions": [
{
"evaluation": lambda datastore, table: table.name,
"pass_value": "departments",
},
{
"evaluation": lambda datastore, table: table.object_id,
"pass_value": "<PASSWORD>",
},
]
},
{
"model": "Table",
"description": "Expect `app.productlines` table to be deleted.",
"filters": {
"schema__name": "app",
"name": "productlines",
},
"assertions": [
{
"evaluation": lambda datastore, table: table,
"pass_value": None,
}
],
},
{
"model": "Column",
"description": "Expect `app.productlines` columns to be deleted.",
"filters": {
"table__schema__name": "app",
"table__name": "productlines",
},
"assertions": [
{
"evaluation": lambda datastore, column: column,
"pass_value": None,
}
],
},
{
"model": "Column",
"description": "Expect `app.products.msrp` column to be deleted.",
"filters": {
"pk": 44,
},
"assertions": [
{
"evaluation": lambda datastore, column: column,
"pass_value": None,
}
],
},
{
"model": "Table",
"description": "Expect `app.categories` table to be created.",
"filters": {
"schema__name": "app",
"name": "categories",
"object_ref": "99999",
},
"assertions": [
{
"evaluation": lambda datastore, table: table.name,
"pass_value": "categories",
},
{
"evaluation": lambda datastore, table: table.columns.count(),
"pass_value": 2,
}
]
},
{
"model": "Column",
"description": "The column `app`.`customers`.`postalcode` has a default_value change.",
"filters": {
"table__schema__name": "app",
"table__object_ref": "16442",
"name": "postalcode",
},
"assertions": [
{
"evaluation": lambda datastore, column: column.ordinal_position,
"pass_value": 10,
},
{
"evaluation": lambda datastore, column: column.default_value,
"pass_value": "default_sequence()",
},
{
"evaluation": lambda datastore, column: column.db_comment,
"pass_value": "5-digit mailing code",
}
]
},
{
"model": "Column",
"description": "The column `app`.`orders`.`status` has a data type change.",
"filters": {
"table__schema__name": "app",
"table__object_ref": "16465",
"name": "status",
},
"assertions": [
{
"evaluation": lambda datastore, column: column.data_type,
"pass_value": "integer",
},
{
"evaluation": lambda datastore, column: column.max_length,
"pass_value": 50,
},
{
"evaluation": lambda datastore, column: column.numeric_scale,
"pass_value": 0,
},
]
},
{
"model": "Column",
"description": "The column `app`.`orders`.`amount` has changed.",
"filters": {
"table__schema__name": "app",
"table__object_ref": "16478",
"ordinal_position": 4,
},
"assertions": [
{
"evaluation": lambda datastore, column: column.full_data_type,
"pass_value": "numeric(10, 2)",
},
{
"evaluation": lambda datastore, column: column.name,
"pass_value": "dollar_amount",
},
{
"evaluation": lambda datastore, column: column.is_nullable,
"pass_value": True,
},
]
},
]
|
[
"app.revisioner.tests.test_e2e.mutate_inspected"
] |
[((409, 2435), 'app.revisioner.tests.test_e2e.mutate_inspected', 'mutate_inspected', (['inspected.tables_and_views', "[{'type': 'modified', 'filters': lambda row: row['table_object_id'] == \n 16392, 'metadata': {'field': 'table_name', 'new_value': 'depts'}}, {\n 'type': 'dropped', 'filters': lambda row: row['table_object_id'] == \n 16456}, {'type': 'dropped', 'filters': lambda row: row[\n 'table_object_id'] == 16488, 'column_filters': lambda col: col[\n 'column_object_id'] == '16488/9'}, {'type': 'modified', 'filters': lambda\n row: row['table_object_id'] == 16442, 'column_filters': lambda col: col\n ['column_name'] == 'postalcode', 'metadata': {'field':\n 'columns.default_value', 'new_value': 'default_sequence()'}}, {'type':\n 'modified', 'filters': lambda row: row['table_object_id'] == 16465,\n 'column_filters': lambda col: col['column_name'] == 'status',\n 'metadata': {'field': 'columns.data_type', 'new_value': 'integer'}}, {\n 'type': 'modified', 'filters': lambda row: row['table_object_id'] == \n 16442, 'column_filters': lambda col: col['column_name'] == 'postalcode',\n 'metadata': {'field': 'columns.column_description', 'new_value':\n '5-digit mailing code'}}, {'type': 'modified', 'filters': lambda row: \n row['table_object_id'] == 16465, 'column_filters': lambda col: col[\n 'column_name'] == 'status', 'metadata': {'field': 'columns.max_length',\n 'new_value': 50}}, {'type': 'modified', 'filters': lambda row: row[\n 'table_object_id'] == 16465, 'column_filters': lambda col: col[\n 'column_name'] == 'status', 'metadata': {'field':\n 'columns.numeric_scale', 'new_value': 0}}, {'type': 'modified',\n 'filters': lambda row: row['table_object_id'] == 16478,\n 'column_filters': lambda col: col['column_name'] == 'amount',\n 'metadata': {'field': 'columns.is_nullable', 'new_value': True}}, {\n 'type': 'modified', 'filters': lambda row: row['table_object_id'] == \n 16478, 'column_filters': lambda col: col['column_name'] == 'amount',\n 'metadata': {'field': 'columns.column_name', 'new_value': 'dollar_amount'}}\n ]"], {}), "(inspected.tables_and_views, [{'type': 'modified',\n 'filters': lambda row: row['table_object_id'] == 16392, 'metadata': {\n 'field': 'table_name', 'new_value': 'depts'}}, {'type': 'dropped',\n 'filters': lambda row: row['table_object_id'] == 16456}, {'type':\n 'dropped', 'filters': lambda row: row['table_object_id'] == 16488,\n 'column_filters': lambda col: col['column_object_id'] == '16488/9'}, {\n 'type': 'modified', 'filters': lambda row: row['table_object_id'] == \n 16442, 'column_filters': lambda col: col['column_name'] == 'postalcode',\n 'metadata': {'field': 'columns.default_value', 'new_value':\n 'default_sequence()'}}, {'type': 'modified', 'filters': lambda row: row\n ['table_object_id'] == 16465, 'column_filters': lambda col: col[\n 'column_name'] == 'status', 'metadata': {'field': 'columns.data_type',\n 'new_value': 'integer'}}, {'type': 'modified', 'filters': lambda row: \n row['table_object_id'] == 16442, 'column_filters': lambda col: col[\n 'column_name'] == 'postalcode', 'metadata': {'field':\n 'columns.column_description', 'new_value': '5-digit mailing code'}}, {\n 'type': 'modified', 'filters': lambda row: row['table_object_id'] == \n 16465, 'column_filters': lambda col: col['column_name'] == 'status',\n 'metadata': {'field': 'columns.max_length', 'new_value': 50}}, {'type':\n 'modified', 'filters': lambda row: row['table_object_id'] == 16465,\n 'column_filters': lambda col: col['column_name'] == 'status',\n 'metadata': {'field': 'columns.numeric_scale', 'new_value': 0}}, {\n 'type': 'modified', 'filters': lambda row: row['table_object_id'] == \n 16478, 'column_filters': lambda col: col['column_name'] == 'amount',\n 'metadata': {'field': 'columns.is_nullable', 'new_value': True}}, {\n 'type': 'modified', 'filters': lambda row: row['table_object_id'] == \n 16478, 'column_filters': lambda col: col['column_name'] == 'amount',\n 'metadata': {'field': 'columns.column_name', 'new_value':\n 'dollar_amount'}}])\n", (425, 2435), False, 'from app.revisioner.tests.test_e2e import mutate_inspected\n')]
|
#!/usr/bin/env python3
# vi:nu:et:sts=4 ts=4 sw=4
""" Generate SQL Applications for all the Test01 Input Data
Test01 Input Data has test data for each SQL Server type supported
by genapp so that it can be properly tested. This program scans
./misc/ for all the test01 application definitions and generates
them. Included in the generation process is the generation of
Jenkins support for building, testing and deploying to Docker Hub
each of the applications.
To simplify things, this script must be self-contained using only
the standard Python Library.
TODO:
- Finish jenkins/build generation
- Finish jenkins/deploy generation
- Finish jenkins/push generation
- Finish jenkins/test generation
- Finish jenkinsfile generation
- Finish application generation
"""
# This is free and unencumbered software released into the public domain.
#
# Anyone is free to copy, modify, publish, use, compile, sell, or
# distribute this software, either in source code form or as a compiled
# binary, for any purpose, commercial or non-commercial, and by any
# means.
#
# In jurisdictions that recognize copyright laws, the author or authors
# of this software dedicate any and all copyright interest in the
# software to the public domain. We make this dedication for the benefit
# of the public at large and to the detriment of our heirs and
# successors. We intend this dedication to be an overt act of
# relinquishment in perpetuity of all present and future rights to this
# software under copyright law.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE
# OR OTHER DEALINGS IN THE SOFTWARE.
#
# For more information, please refer to <http://unlicense.org/>
import os
import subprocess
import sys
sys.path.insert(0, './scripts')
import util # pylint: disable=wrong-import-position
################################################################################
# Object Classes and Functions
################################################################################
class Main(util.MainBase):
""" Main Command Execution Class
"""
def __init__(self):
super().__init__()
self.test_suffixes = ['01ma', '01ms', '01my', '01pg', '01sq']
self.genapp_name = 'genapp'
def arg_parse_add(self):
""" Add additional arguments.
"""
self.arg_prs.add_argument('-b', '--build', action='store_false', dest='flg_build',
default=True, help='Do not build genapp before using it'
)
self.arg_prs.add_argument('--appdir', action='store', dest='app_dir',
default='/tmp', help='Set Application Base Directory'
)
self.arg_prs.add_argument('--appname', action='store', dest='app_name',
default='app01', help='Set Application Base Name'
)
self.arg_prs.add_argument('--bindir', action='store', dest='bin_dir',
default='/tmp/bin', help='Set Binary Directory'
)
self.arg_prs.add_argument('--srcdir', action='store', dest='src_dir',
default='./cmd', help='Set genapp Source Directory'
)
self.arg_prs.add_argument('--mdldir', action='store', dest='mdl_dir',
default='./models', help='Set genapp Model Directory'
)
self.arg_prs.add_argument('--mscdir', action='store', dest='msc_dir',
default='./misc', help='Set genapp Misc Directory'
)
self.arg_prs.add_argument('--tstdir', action='store', dest='tst_dir',
default='./misc', help='Set genapp Test Directory'
)
def arg_parse_exec(self):
""" Execute the argument parsing.
Warning - Main should override this method if additional cli
arguments are needed or argparse needs some form of modification
before execution.
"""
self.arg_parse_setup()
self.arg_parse_add()
self.arg_parse_parse()
self.args.app_path = os.path.join(self.args.bin_dir, self.genapp_name)
def build(self):
""" Build the Golang program, genapp.
"""
try:
src_path = os.path.join(self.args.src_dir, self.genapp_name, '*.go')
if self.args.flg_debug:
print("\tapp_path:", self.args.app_path)
print("\tsrc_path:", src_path)
cmd = 'go build -o "{0}" -v -race {1}'.format(self.args.app_path, src_path)
if not self.args.flg_exec:
print("\tWould have executed:", cmd)
self.result_code = 0
else:
if not os.path.exists(self.args.bin_dir):
if self.args.flg_debug:
print("\tCreating dir:", self.args.bin_dir)
os.makedirs(self.args.bin_dir, 0o777)
print("\tExecuting:", cmd)
self.result_code = subprocess.call(cmd, shell=True)
except: # pylint: disable=bare-except
self.result_code = 8
def exec_pgm(self): # pylint: disable=no-self-use
""" Program Execution
Warning - Main should override this method and make certain that
it returns an exit code in self.result_code.
"""
if len(self.args.args) > 0:
print("ERROR - too many command arguments!")
self.arg_prs.print_help()
self.result_code = 0
return
if self.args.flg_debug:
print('\tsrc_dir:', self.args.src_dir)
# Set up base objects, files and directories.
if not os.path.exists(self.args.app_dir):
print("\tCreating Directory:", self.args.app_dir)
if self.args.flg_exec:
os.makedirs(self.args.app_dir)
else:
print("\tWould have executed: mkdir -p", self.args.app_dir)
# Perform the specified actions.
try:
# Build genapp if needed.
if self.args.flg_build:
print("\tBuilding genapp...")
self.build()
# Generate the application subdirectories.
for suffix in self.test_suffixes:
print("\tCreating app for app{0}...".format(suffix))
self.genapp("test{0}.exec.json.txt".format(suffix))
if self.result_code != 0:
break
finally:
pass
print()
def genapp(self, file_name):
""" Generate a test application.
:arg szExecFileName: Exec JSON file name which is expected
to be in the szMiscDir.
:arg szOutPath: path to write the output to.
"""
exec_path = os.path.join(self.args.msc_dir, file_name)
app_path = os.path.join(self.args.bin_dir, self.genapp_name)
cmd = '"{0}" --mdldir {1} -x {2}'.format(app_path, self.args.mdl_dir, exec_path)
try:
self.result_code = 0
if self.args.flg_exec:
print("\tExecuting:", cmd)
os.system(cmd)
else:
print("\tWould have executed:", cmd)
except: # pylint: disable=bare-except
self.result_code = 4
################################################################################
# Command-line interface
################################################################################
if __name__ == '__main__':
Main().run()
|
[
"os.makedirs",
"os.path.exists",
"sys.path.insert",
"os.system",
"subprocess.call",
"os.path.join"
] |
[((2217, 2248), 'sys.path.insert', 'sys.path.insert', (['(0)', '"""./scripts"""'], {}), "(0, './scripts')\n", (2232, 2248), False, 'import sys\n'), ((4850, 4899), 'os.path.join', 'os.path.join', (['self.args.bin_dir', 'self.genapp_name'], {}), '(self.args.bin_dir, self.genapp_name)\n', (4862, 4899), False, 'import os\n'), ((7645, 7687), 'os.path.join', 'os.path.join', (['self.args.msc_dir', 'file_name'], {}), '(self.args.msc_dir, file_name)\n', (7657, 7687), False, 'import os\n'), ((7707, 7756), 'os.path.join', 'os.path.join', (['self.args.bin_dir', 'self.genapp_name'], {}), '(self.args.bin_dir, self.genapp_name)\n', (7719, 7756), False, 'import os\n'), ((5016, 5073), 'os.path.join', 'os.path.join', (['self.args.src_dir', 'self.genapp_name', '"""*.go"""'], {}), "(self.args.src_dir, self.genapp_name, '*.go')\n", (5028, 5073), False, 'import os\n'), ((6506, 6539), 'os.path.exists', 'os.path.exists', (['self.args.app_dir'], {}), '(self.args.app_dir)\n', (6520, 6539), False, 'import os\n'), ((5755, 5787), 'subprocess.call', 'subprocess.call', (['cmd'], {'shell': '(True)'}), '(cmd, shell=True)\n', (5770, 5787), False, 'import subprocess\n'), ((6654, 6684), 'os.makedirs', 'os.makedirs', (['self.args.app_dir'], {}), '(self.args.app_dir)\n', (6665, 6684), False, 'import os\n'), ((7986, 8000), 'os.system', 'os.system', (['cmd'], {}), '(cmd)\n', (7995, 8000), False, 'import os\n'), ((5472, 5505), 'os.path.exists', 'os.path.exists', (['self.args.bin_dir'], {}), '(self.args.bin_dir)\n', (5486, 5505), False, 'import os\n'), ((5639, 5674), 'os.makedirs', 'os.makedirs', (['self.args.bin_dir', '(511)'], {}), '(self.args.bin_dir, 511)\n', (5650, 5674), False, 'import os\n')]
|
from statistics import mode
from django.contrib.auth import get_user_model
from django.db import models
from django.db.models import UniqueConstraint
from django.db.models.deletion import CASCADE
User = get_user_model()
class Bank(models.Model):
long_name = models.CharField(
verbose_name="Официальное польное наименование",
max_length=1000,
)
short_name = models.CharField(
verbose_name="Официальное сокращенное наименование",
max_length=500,
)
bic = models.CharField(
max_length=9,
verbose_name="Код BIC кредитной организации",
)
reg_date = models.DateField(
verbose_name="Дата регистрации кредитной организации в ЦБ",
)
name = models.CharField(
verbose_name="Наименование кредитной организации",
max_length=256,
)
ogrn = models.CharField(
verbose_name="Основной государственный регистрационный номер",
max_length=256,
)
reg_number = models.CharField(
verbose_name="Регистрационный номер кредитной организации в ЦБ",
max_length=256,
)
internal_number = models.CharField(
verbose_name="Внутренний номер кредитной организации в ЦБ",
max_length=256,
)
cregnr = models.CharField(
verbose_name="Дополнительный регистрационный номер в ЦБ",
max_length=256,
blank=True
)
def __str__(self):
return self.name
class Meta:
ordering = ['name']
class Region(models.Model):
name = models.CharField(
verbose_name='Название региона',
max_length=1000,
)
code = models.PositiveSmallIntegerField(
verbose_name='Код региона',
unique=True
)
def __str__(self):
return f'{self.code} - {self.name}'
class Meta:
ordering = ['name']
class BalanceAccount(models.Model):
# Например, 202 или 20202
indCode = models.CharField(
verbose_name='Номер счета',
max_length=30,
unique=True
)
name = models.CharField(
verbose_name='Название счетов баланса',
max_length=1000,
)
# Порядок балансового счета (1 или 2)
indType = models.CharField(
verbose_name='Код счета',
max_length=30,
)
# Например, Раздел - "А"
indChapter = models.CharField(
verbose_name='Код раздела',
max_length=30,
)
def __str__(self):
return f'{self.name}'
class Meta:
ordering = ['indCode']
class BanksBalance(models.Model):
date = models.DateField(
verbose_name="Балансовые данные на дату",
)
bank = models.ForeignKey(Bank,
verbose_name="Банк",
on_delete=CASCADE,
blank=False,
)
# Например, 202 или 20202
indCode = models.ForeignKey(BalanceAccount,
verbose_name="Номер счета",
on_delete=CASCADE,
blank=False,
)
rub_balance = models.IntegerField(
verbose_name='Остаток в рублях на дату',
)
cur_balance = models.IntegerField(
verbose_name='Остаток в валюте на дату',
)
itog_balance = models.IntegerField(
verbose_name='Итоговый остаток в рублях и в валюте на дату',
)
ora = models.IntegerField(
verbose_name='Оборот в рублях по дебиту',
)
ova = models.IntegerField(
verbose_name='Оборот в валюте по дебиту',
)
oitga = models.IntegerField(
verbose_name='Итоговый оборот в рублях и в валюте по дебету',
)
orp = models.IntegerField(
verbose_name='Оборот в рублях по кредиту',
)
ovp = models.IntegerField(
verbose_name='Оборот в валюте по кредиту',
)
oitgp = models.IntegerField(
verbose_name='Итоговый оборот в рублях и в валюте по кредиту',
)
def __str__(self):
return f'{self.value} : {self.indCode} {self.itog_balance}'
class Meta:
UniqueConstraint(fields=['date', 'indCode', 'bank'],
name='unique_date_indCode_bank')
verbose_name = 'Остатки на дату и обороты за предыдущий период'
verbose_name_plural = 'Остатки на дату и обороты за предыдущий период'
ordering = ['-date']
|
[
"django.db.models.UniqueConstraint",
"django.db.models.CharField",
"django.db.models.ForeignKey",
"django.contrib.auth.get_user_model",
"django.db.models.PositiveSmallIntegerField",
"django.db.models.IntegerField",
"django.db.models.DateField"
] |
[((205, 221), 'django.contrib.auth.get_user_model', 'get_user_model', ([], {}), '()\n', (219, 221), False, 'from django.contrib.auth import get_user_model\n'), ((266, 352), 'django.db.models.CharField', 'models.CharField', ([], {'verbose_name': '"""Официальное польное наименование"""', 'max_length': '(1000)'}), "(verbose_name='Официальное польное наименование',\n max_length=1000)\n", (282, 352), False, 'from django.db import models\n'), ((389, 478), 'django.db.models.CharField', 'models.CharField', ([], {'verbose_name': '"""Официальное сокращенное наименование"""', 'max_length': '(500)'}), "(verbose_name='Официальное сокращенное наименование',\n max_length=500)\n", (405, 478), False, 'from django.db import models\n'), ((508, 584), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(9)', 'verbose_name': '"""Код BIC кредитной организации"""'}), "(max_length=9, verbose_name='Код BIC кредитной организации')\n", (524, 584), False, 'from django.db import models\n'), ((623, 699), 'django.db.models.DateField', 'models.DateField', ([], {'verbose_name': '"""Дата регистрации кредитной организации в ЦБ"""'}), "(verbose_name='Дата регистрации кредитной организации в ЦБ')\n", (639, 699), False, 'from django.db import models\n'), ((726, 813), 'django.db.models.CharField', 'models.CharField', ([], {'verbose_name': '"""Наименование кредитной организации"""', 'max_length': '(256)'}), "(verbose_name='Наименование кредитной организации',\n max_length=256)\n", (742, 813), False, 'from django.db import models\n'), ((844, 944), 'django.db.models.CharField', 'models.CharField', ([], {'verbose_name': '"""Основной государственный регистрационный номер"""', 'max_length': '(256)'}), "(verbose_name=\n 'Основной государственный регистрационный номер', max_length=256)\n", (860, 944), False, 'from django.db import models\n'), ((980, 1082), 'django.db.models.CharField', 'models.CharField', ([], {'verbose_name': '"""Регистрационный номер кредитной организации в ЦБ"""', 'max_length': '(256)'}), "(verbose_name=\n 'Регистрационный номер кредитной организации в ЦБ', max_length=256)\n", (996, 1082), False, 'from django.db import models\n'), ((1123, 1219), 'django.db.models.CharField', 'models.CharField', ([], {'verbose_name': '"""Внутренний номер кредитной организации в ЦБ"""', 'max_length': '(256)'}), "(verbose_name='Внутренний номер кредитной организации в ЦБ',\n max_length=256)\n", (1139, 1219), False, 'from django.db import models\n'), ((1252, 1358), 'django.db.models.CharField', 'models.CharField', ([], {'verbose_name': '"""Дополнительный регистрационный номер в ЦБ"""', 'max_length': '(256)', 'blank': '(True)'}), "(verbose_name='Дополнительный регистрационный номер в ЦБ',\n max_length=256, blank=True)\n", (1268, 1358), False, 'from django.db import models\n'), ((1520, 1586), 'django.db.models.CharField', 'models.CharField', ([], {'verbose_name': '"""Название региона"""', 'max_length': '(1000)'}), "(verbose_name='Название региона', max_length=1000)\n", (1536, 1586), False, 'from django.db import models\n'), ((1621, 1694), 'django.db.models.PositiveSmallIntegerField', 'models.PositiveSmallIntegerField', ([], {'verbose_name': '"""Код региона"""', 'unique': '(True)'}), "(verbose_name='Код региона', unique=True)\n", (1653, 1694), False, 'from django.db import models\n'), ((1912, 1984), 'django.db.models.CharField', 'models.CharField', ([], {'verbose_name': '"""Номер счета"""', 'max_length': '(30)', 'unique': '(True)'}), "(verbose_name='Номер счета', max_length=30, unique=True)\n", (1928, 1984), False, 'from django.db import models\n'), ((2026, 2099), 'django.db.models.CharField', 'models.CharField', ([], {'verbose_name': '"""Название счетов баланса"""', 'max_length': '(1000)'}), "(verbose_name='Название счетов баланса', max_length=1000)\n", (2042, 2099), False, 'from django.db import models\n'), ((2179, 2236), 'django.db.models.CharField', 'models.CharField', ([], {'verbose_name': '"""Код счета"""', 'max_length': '(30)'}), "(verbose_name='Код счета', max_length=30)\n", (2195, 2236), False, 'from django.db import models\n'), ((2307, 2366), 'django.db.models.CharField', 'models.CharField', ([], {'verbose_name': '"""Код раздела"""', 'max_length': '(30)'}), "(verbose_name='Код раздела', max_length=30)\n", (2323, 2366), False, 'from django.db import models\n'), ((2539, 2597), 'django.db.models.DateField', 'models.DateField', ([], {'verbose_name': '"""Балансовые данные на дату"""'}), "(verbose_name='Балансовые данные на дату')\n", (2555, 2597), False, 'from django.db import models\n'), ((2624, 2700), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Bank'], {'verbose_name': '"""Банк"""', 'on_delete': 'CASCADE', 'blank': '(False)'}), "(Bank, verbose_name='Банк', on_delete=CASCADE, blank=False)\n", (2641, 2700), False, 'from django.db import models\n'), ((2775, 2873), 'django.db.models.ForeignKey', 'models.ForeignKey', (['BalanceAccount'], {'verbose_name': '"""Номер счета"""', 'on_delete': 'CASCADE', 'blank': '(False)'}), "(BalanceAccount, verbose_name='Номер счета', on_delete=\n CASCADE, blank=False)\n", (2792, 2873), False, 'from django.db import models\n'), ((2917, 2977), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'verbose_name': '"""Остаток в рублях на дату"""'}), "(verbose_name='Остаток в рублях на дату')\n", (2936, 2977), False, 'from django.db import models\n'), ((3011, 3071), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'verbose_name': '"""Остаток в валюте на дату"""'}), "(verbose_name='Остаток в валюте на дату')\n", (3030, 3071), False, 'from django.db import models\n'), ((3106, 3191), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'verbose_name': '"""Итоговый остаток в рублях и в валюте на дату"""'}), "(verbose_name='Итоговый остаток в рублях и в валюте на дату'\n )\n", (3125, 3191), False, 'from django.db import models\n'), ((3212, 3273), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'verbose_name': '"""Оборот в рублях по дебиту"""'}), "(verbose_name='Оборот в рублях по дебиту')\n", (3231, 3273), False, 'from django.db import models\n'), ((3299, 3360), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'verbose_name': '"""Оборот в валюте по дебиту"""'}), "(verbose_name='Оборот в валюте по дебиту')\n", (3318, 3360), False, 'from django.db import models\n'), ((3388, 3474), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'verbose_name': '"""Итоговый оборот в рублях и в валюте по дебету"""'}), "(verbose_name=\n 'Итоговый оборот в рублях и в валюте по дебету')\n", (3407, 3474), False, 'from django.db import models\n'), ((3495, 3557), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'verbose_name': '"""Оборот в рублях по кредиту"""'}), "(verbose_name='Оборот в рублях по кредиту')\n", (3514, 3557), False, 'from django.db import models\n'), ((3583, 3645), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'verbose_name': '"""Оборот в валюте по кредиту"""'}), "(verbose_name='Оборот в валюте по кредиту')\n", (3602, 3645), False, 'from django.db import models\n'), ((3673, 3760), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'verbose_name': '"""Итоговый оборот в рублях и в валюте по кредиту"""'}), "(verbose_name=\n 'Итоговый оборот в рублях и в валюте по кредиту')\n", (3692, 3760), False, 'from django.db import models\n'), ((3893, 3983), 'django.db.models.UniqueConstraint', 'UniqueConstraint', ([], {'fields': "['date', 'indCode', 'bank']", 'name': '"""unique_date_indCode_bank"""'}), "(fields=['date', 'indCode', 'bank'], name=\n 'unique_date_indCode_bank')\n", (3909, 3983), False, 'from django.db.models import UniqueConstraint\n')]
|
# coding=utf-8
# Copyright 2022 HyperBO Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for linalg.py."""
import copy
from absl.testing import absltest
from hyperbo.basics import linalg
import jax
from jax import random
import jax.numpy as jnp
import jax.scipy.linalg as jspla
import numpy as np
grad = jax.grad
def test_grad(fun, params, index, eps=1e-4, cached_cholesky=False):
key = random.PRNGKey(0)
key, subkey = random.split(key)
vec = random.normal(subkey, params[index].shape)
if index == 0:
vec = 0.5 * jnp.dot(vec.T, vec)
unitvec = vec / jnp.sqrt(jnp.vdot(vec, vec))
else:
unitvec = vec / jnp.sqrt(jnp.vdot(vec, vec))
params_copy = copy.deepcopy(params)
params_copy[index] += eps / 2. * unitvec
if cached_cholesky:
params_copy[2] = jspla.cholesky(params_copy[0], lower=True)
f1 = fun(*params_copy)
params_copy = copy.deepcopy(params)
params_copy[index] -= eps / 2. * unitvec
if cached_cholesky:
params_copy[2] = jspla.cholesky(params_copy[0], lower=True)
f2 = fun(*params_copy)
exact_grad_prod = jnp.vdot(grad(fun, index)(*params), unitvec)
return {'Numerical': (f1 - f2) / eps, 'Exact': exact_grad_prod}
class LinalgTest(absltest.TestCase):
def test_inverse_spdmatrix_vector_product(self):
np.random.seed(1)
dim = 10
noise = 1e-3
num_replicas = 10
def fun(spd_matrix, x):
return jnp.dot(x, linalg.inverse_spdmatrix_vector_product(spd_matrix, x))
def test_grad_at_index(index):
for _ in range(num_replicas):
matrix = np.random.randn(dim, dim)
spd_matrix = matrix.T.dot(matrix) + noise * np.eye(matrix.shape[0])
x = np.random.randn(dim)
params = [spd_matrix, x]
grads = test_grad(fun, params, index)
numerical_grad = grads['Numerical']
exact_grad = grads['Exact']
self.assertTrue(jnp.allclose(numerical_grad, exact_grad, rtol=1))
test_grad_at_index(0)
test_grad_at_index(1)
def test_inverse_spdmatrix_vector_product_cached_cholesky(self):
"""Tests if the gradient works when the Cholesky factor is given."""
np.random.seed(1)
dim = 10
noise = 1e-3
num_replicas = 10
def fun(spd_matrix, x, cached_cholesky):
return jnp.dot(
x,
linalg.inverse_spdmatrix_vector_product(
spd_matrix, x, cached_cholesky=cached_cholesky))
def test_grad_at_index(index):
for _ in range(num_replicas):
matrix = np.random.randn(dim, dim)
spd_matrix = matrix.T.dot(matrix) + noise * np.eye(matrix.shape[0])
chol_factor = jspla.cholesky(spd_matrix, lower=True)
x = np.random.randn(dim)
params = [spd_matrix, x, chol_factor]
grads = test_grad(fun, params, index, cached_cholesky=True)
numerical_grad = grads['Numerical']
exact_grad = grads['Exact']
print(numerical_grad, exact_grad)
self.assertTrue(jnp.allclose(numerical_grad, exact_grad, rtol=1))
test_grad_at_index(0)
test_grad_at_index(1)
if __name__ == '__main__':
absltest.main()
|
[
"absl.testing.absltest.main",
"copy.deepcopy",
"numpy.random.seed",
"jax.random.normal",
"hyperbo.basics.linalg.inverse_spdmatrix_vector_product",
"jax.numpy.dot",
"jax.scipy.linalg.cholesky",
"numpy.random.randn",
"jax.numpy.vdot",
"jax.random.PRNGKey",
"jax.numpy.allclose",
"numpy.eye",
"jax.random.split"
] |
[((909, 926), 'jax.random.PRNGKey', 'random.PRNGKey', (['(0)'], {}), '(0)\n', (923, 926), False, 'from jax import random\n'), ((943, 960), 'jax.random.split', 'random.split', (['key'], {}), '(key)\n', (955, 960), False, 'from jax import random\n'), ((969, 1011), 'jax.random.normal', 'random.normal', (['subkey', 'params[index].shape'], {}), '(subkey, params[index].shape)\n', (982, 1011), False, 'from jax import random\n'), ((1188, 1209), 'copy.deepcopy', 'copy.deepcopy', (['params'], {}), '(params)\n', (1201, 1209), False, 'import copy\n'), ((1380, 1401), 'copy.deepcopy', 'copy.deepcopy', (['params'], {}), '(params)\n', (1393, 1401), False, 'import copy\n'), ((3564, 3579), 'absl.testing.absltest.main', 'absltest.main', ([], {}), '()\n', (3577, 3579), False, 'from absl.testing import absltest\n'), ((1296, 1338), 'jax.scipy.linalg.cholesky', 'jspla.cholesky', (['params_copy[0]'], {'lower': '(True)'}), '(params_copy[0], lower=True)\n', (1310, 1338), True, 'import jax.scipy.linalg as jspla\n'), ((1488, 1530), 'jax.scipy.linalg.cholesky', 'jspla.cholesky', (['params_copy[0]'], {'lower': '(True)'}), '(params_copy[0], lower=True)\n', (1502, 1530), True, 'import jax.scipy.linalg as jspla\n'), ((1783, 1800), 'numpy.random.seed', 'np.random.seed', (['(1)'], {}), '(1)\n', (1797, 1800), True, 'import numpy as np\n'), ((2619, 2636), 'numpy.random.seed', 'np.random.seed', (['(1)'], {}), '(1)\n', (2633, 2636), True, 'import numpy as np\n'), ((1045, 1064), 'jax.numpy.dot', 'jnp.dot', (['vec.T', 'vec'], {}), '(vec.T, vec)\n', (1052, 1064), True, 'import jax.numpy as jnp\n'), ((1094, 1112), 'jax.numpy.vdot', 'jnp.vdot', (['vec', 'vec'], {}), '(vec, vec)\n', (1102, 1112), True, 'import jax.numpy as jnp\n'), ((1151, 1169), 'jax.numpy.vdot', 'jnp.vdot', (['vec', 'vec'], {}), '(vec, vec)\n', (1159, 1169), True, 'import jax.numpy as jnp\n'), ((1906, 1960), 'hyperbo.basics.linalg.inverse_spdmatrix_vector_product', 'linalg.inverse_spdmatrix_vector_product', (['spd_matrix', 'x'], {}), '(spd_matrix, x)\n', (1945, 1960), False, 'from hyperbo.basics import linalg\n'), ((2051, 2076), 'numpy.random.randn', 'np.random.randn', (['dim', 'dim'], {}), '(dim, dim)\n', (2066, 2076), True, 'import numpy as np\n'), ((2165, 2185), 'numpy.random.randn', 'np.random.randn', (['dim'], {}), '(dim)\n', (2180, 2185), True, 'import numpy as np\n'), ((2780, 2872), 'hyperbo.basics.linalg.inverse_spdmatrix_vector_product', 'linalg.inverse_spdmatrix_vector_product', (['spd_matrix', 'x'], {'cached_cholesky': 'cached_cholesky'}), '(spd_matrix, x, cached_cholesky=\n cached_cholesky)\n', (2819, 2872), False, 'from hyperbo.basics import linalg\n'), ((2973, 2998), 'numpy.random.randn', 'np.random.randn', (['dim', 'dim'], {}), '(dim, dim)\n', (2988, 2998), True, 'import numpy as np\n'), ((3097, 3135), 'jax.scipy.linalg.cholesky', 'jspla.cholesky', (['spd_matrix'], {'lower': '(True)'}), '(spd_matrix, lower=True)\n', (3111, 3135), True, 'import jax.scipy.linalg as jspla\n'), ((3148, 3168), 'numpy.random.randn', 'np.random.randn', (['dim'], {}), '(dim)\n', (3163, 3168), True, 'import numpy as np\n'), ((2370, 2418), 'jax.numpy.allclose', 'jnp.allclose', (['numerical_grad', 'exact_grad'], {'rtol': '(1)'}), '(numerical_grad, exact_grad, rtol=1)\n', (2382, 2418), True, 'import jax.numpy as jnp\n'), ((3430, 3478), 'jax.numpy.allclose', 'jnp.allclose', (['numerical_grad', 'exact_grad'], {'rtol': '(1)'}), '(numerical_grad, exact_grad, rtol=1)\n', (3442, 3478), True, 'import jax.numpy as jnp\n'), ((2129, 2152), 'numpy.eye', 'np.eye', (['matrix.shape[0]'], {}), '(matrix.shape[0])\n', (2135, 2152), True, 'import numpy as np\n'), ((3051, 3074), 'numpy.eye', 'np.eye', (['matrix.shape[0]'], {}), '(matrix.shape[0])\n', (3057, 3074), True, 'import numpy as np\n')]
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
from google.protobuf import timestamp_pb2 # type: ignore
from google.protobuf import wrappers_pb2 # type: ignore
__protobuf__ = proto.module(
package="google.analytics.admin.v1alpha",
manifest={
"IndustryCategory",
"ServiceLevel",
"ActorType",
"ActionType",
"ChangeHistoryResourceType",
"GoogleSignalsState",
"GoogleSignalsConsent",
"LinkProposalInitiatingProduct",
"LinkProposalState",
"Account",
"Property",
"AndroidAppDataStream",
"IosAppDataStream",
"WebDataStream",
"DataStream",
"UserLink",
"AuditUserLink",
"FirebaseLink",
"GlobalSiteTag",
"GoogleAdsLink",
"DataSharingSettings",
"AccountSummary",
"PropertySummary",
"MeasurementProtocolSecret",
"ChangeHistoryEvent",
"ChangeHistoryChange",
"DisplayVideo360AdvertiserLink",
"DisplayVideo360AdvertiserLinkProposal",
"LinkProposalStatusDetails",
"ConversionEvent",
"GoogleSignalsSettings",
"CustomDimension",
"CustomMetric",
"DataRetentionSettings",
},
)
class IndustryCategory(proto.Enum):
r"""The category selected for this property, used for industry
benchmarking.
"""
INDUSTRY_CATEGORY_UNSPECIFIED = 0
AUTOMOTIVE = 1
BUSINESS_AND_INDUSTRIAL_MARKETS = 2
FINANCE = 3
HEALTHCARE = 4
TECHNOLOGY = 5
TRAVEL = 6
OTHER = 7
ARTS_AND_ENTERTAINMENT = 8
BEAUTY_AND_FITNESS = 9
BOOKS_AND_LITERATURE = 10
FOOD_AND_DRINK = 11
GAMES = 12
HOBBIES_AND_LEISURE = 13
HOME_AND_GARDEN = 14
INTERNET_AND_TELECOM = 15
LAW_AND_GOVERNMENT = 16
NEWS = 17
ONLINE_COMMUNITIES = 18
PEOPLE_AND_SOCIETY = 19
PETS_AND_ANIMALS = 20
REAL_ESTATE = 21
REFERENCE = 22
SCIENCE = 23
SPORTS = 24
JOBS_AND_EDUCATION = 25
SHOPPING = 26
class ServiceLevel(proto.Enum):
r"""Various levels of service for Google Analytics."""
SERVICE_LEVEL_UNSPECIFIED = 0
GOOGLE_ANALYTICS_STANDARD = 1
GOOGLE_ANALYTICS_360 = 2
class ActorType(proto.Enum):
r"""Different kinds of actors that can make changes to Google
Analytics resources.
"""
ACTOR_TYPE_UNSPECIFIED = 0
USER = 1
SYSTEM = 2
SUPPORT = 3
class ActionType(proto.Enum):
r"""Types of actions that may change a resource."""
ACTION_TYPE_UNSPECIFIED = 0
CREATED = 1
UPDATED = 2
DELETED = 3
class ChangeHistoryResourceType(proto.Enum):
r"""Types of resources whose changes may be returned from change
history.
"""
CHANGE_HISTORY_RESOURCE_TYPE_UNSPECIFIED = 0
ACCOUNT = 1
PROPERTY = 2
WEB_DATA_STREAM = 3
ANDROID_APP_DATA_STREAM = 4
IOS_APP_DATA_STREAM = 5
FIREBASE_LINK = 6
GOOGLE_ADS_LINK = 7
GOOGLE_SIGNALS_SETTINGS = 8
CONVERSION_EVENT = 9
MEASUREMENT_PROTOCOL_SECRET = 10
CUSTOM_DIMENSION = 11
CUSTOM_METRIC = 12
DATA_RETENTION_SETTINGS = 13
DISPLAY_VIDEO_360_ADVERTISER_LINK = 14
DISPLAY_VIDEO_360_ADVERTISER_LINK_PROPOSAL = 15
class GoogleSignalsState(proto.Enum):
r"""Status of the Google Signals settings (i.e., whether this
feature has been enabled for the property).
"""
GOOGLE_SIGNALS_STATE_UNSPECIFIED = 0
GOOGLE_SIGNALS_ENABLED = 1
GOOGLE_SIGNALS_DISABLED = 2
class GoogleSignalsConsent(proto.Enum):
r"""Consent field of the Google Signals settings (i.e., whether
the user has consented to the Google Signals terms of service.)
"""
GOOGLE_SIGNALS_CONSENT_UNSPECIFIED = 0
GOOGLE_SIGNALS_CONSENT_CONSENTED = 2
GOOGLE_SIGNALS_CONSENT_NOT_CONSENTED = 1
class LinkProposalInitiatingProduct(proto.Enum):
r"""An indication of which product the user initiated a link
proposal from.
"""
LINK_PROPOSAL_INITIATING_PRODUCT_UNSPECIFIED = 0
GOOGLE_ANALYTICS = 1
LINKED_PRODUCT = 2
class LinkProposalState(proto.Enum):
r"""The state of a link proposal resource."""
LINK_PROPOSAL_STATE_UNSPECIFIED = 0
AWAITING_REVIEW_FROM_GOOGLE_ANALYTICS = 1
AWAITING_REVIEW_FROM_LINKED_PRODUCT = 2
WITHDRAWN = 3
DECLINED = 4
EXPIRED = 5
OBSOLETE = 6
class Account(proto.Message):
r"""A resource message representing a Google Analytics account.
Attributes:
name (str):
Output only. Resource name of this account.
Format: accounts/{account}
Example: "accounts/100".
create_time (google.protobuf.timestamp_pb2.Timestamp):
Output only. Time when this account was
originally created.
update_time (google.protobuf.timestamp_pb2.Timestamp):
Output only. Time when account payload fields
were last updated.
display_name (str):
Required. Human-readable display name for
this account.
region_code (str):
Country of business. Must be a Unicode CLDR
region code.
deleted (bool):
Output only. Indicates whether this Account
is soft-deleted or not. Deleted accounts are
excluded from List results unless specifically
requested.
"""
name = proto.Field(proto.STRING, number=1,)
create_time = proto.Field(proto.MESSAGE, number=2, message=timestamp_pb2.Timestamp,)
update_time = proto.Field(proto.MESSAGE, number=3, message=timestamp_pb2.Timestamp,)
display_name = proto.Field(proto.STRING, number=4,)
region_code = proto.Field(proto.STRING, number=5,)
deleted = proto.Field(proto.BOOL, number=6,)
class Property(proto.Message):
r"""A resource message representing a Google Analytics GA4
property.
Attributes:
name (str):
Output only. Resource name of this property. Format:
properties/{property_id} Example: "properties/1000".
create_time (google.protobuf.timestamp_pb2.Timestamp):
Output only. Time when the entity was
originally created.
update_time (google.protobuf.timestamp_pb2.Timestamp):
Output only. Time when entity payload fields
were last updated.
parent (str):
Immutable. Resource name of this property's
logical parent.
Note: The Property-Moving UI can be used to
change the parent. Format: accounts/{account}
Example: "accounts/100".
display_name (str):
Required. Human-readable display name for
this property.
The max allowed display name length is 100
UTF-16 code units.
industry_category (google.analytics.admin_v1alpha.types.IndustryCategory):
Industry associated with this property Example: AUTOMOTIVE,
FOOD_AND_DRINK
time_zone (str):
Required. Reporting Time Zone, used as the day boundary for
reports, regardless of where the data originates. If the
time zone honors DST, Analytics will automatically adjust
for the changes.
NOTE: Changing the time zone only affects data going
forward, and is not applied retroactively.
Format: https://www.iana.org/time-zones Example:
"America/Los_Angeles".
currency_code (str):
The currency type used in reports involving monetary values.
Format: https://en.wikipedia.org/wiki/ISO_4217 Examples:
"USD", "EUR", "JPY".
service_level (google.analytics.admin_v1alpha.types.ServiceLevel):
Output only. The Google Analytics service
level that applies to this property.
delete_time (google.protobuf.timestamp_pb2.Timestamp):
Output only. If set, the time at which this
property was trashed. If not set, then this
property is not currently in the trash can.
expire_time (google.protobuf.timestamp_pb2.Timestamp):
Output only. If set, the time at which this
trashed property will be permanently deleted. If
not set, then this property is not currently in
the trash can and is not slated to be deleted.
account (str):
Immutable. The resource name of the parent account Format:
accounts/{account_id} Example: "accounts/123".
"""
name = proto.Field(proto.STRING, number=1,)
create_time = proto.Field(proto.MESSAGE, number=3, message=timestamp_pb2.Timestamp,)
update_time = proto.Field(proto.MESSAGE, number=4, message=timestamp_pb2.Timestamp,)
parent = proto.Field(proto.STRING, number=2,)
display_name = proto.Field(proto.STRING, number=5,)
industry_category = proto.Field(proto.ENUM, number=6, enum="IndustryCategory",)
time_zone = proto.Field(proto.STRING, number=7,)
currency_code = proto.Field(proto.STRING, number=8,)
service_level = proto.Field(proto.ENUM, number=10, enum="ServiceLevel",)
delete_time = proto.Field(
proto.MESSAGE, number=11, message=timestamp_pb2.Timestamp,
)
expire_time = proto.Field(
proto.MESSAGE, number=12, message=timestamp_pb2.Timestamp,
)
account = proto.Field(proto.STRING, number=13,)
class AndroidAppDataStream(proto.Message):
r"""A resource message representing a Google Analytics Android
app stream.
Attributes:
name (str):
Output only. Resource name of this Data Stream. Format:
properties/{property_id}/androidAppDataStreams/{stream_id}
Example: "properties/1000/androidAppDataStreams/2000".
firebase_app_id (str):
Output only. ID of the corresponding Android
app in Firebase, if any. This ID can change if
the Android app is deleted and recreated.
create_time (google.protobuf.timestamp_pb2.Timestamp):
Output only. Time when this stream was
originally created.
update_time (google.protobuf.timestamp_pb2.Timestamp):
Output only. Time when stream payload fields
were last updated.
package_name (str):
Immutable. The package name for the app being
measured. Example: "com.example.myandroidapp".
display_name (str):
Human-readable display name for the Data
Stream.
The max allowed display name length is 255
UTF-16 code units.
"""
name = proto.Field(proto.STRING, number=1,)
firebase_app_id = proto.Field(proto.STRING, number=2,)
create_time = proto.Field(proto.MESSAGE, number=3, message=timestamp_pb2.Timestamp,)
update_time = proto.Field(proto.MESSAGE, number=4, message=timestamp_pb2.Timestamp,)
package_name = proto.Field(proto.STRING, number=5,)
display_name = proto.Field(proto.STRING, number=6,)
class IosAppDataStream(proto.Message):
r"""A resource message representing a Google Analytics IOS app
stream.
Attributes:
name (str):
Output only. Resource name of this Data Stream. Format:
properties/{property_id}/iosAppDataStreams/{stream_id}
Example: "properties/1000/iosAppDataStreams/2000".
firebase_app_id (str):
Output only. ID of the corresponding iOS app
in Firebase, if any. This ID can change if the
iOS app is deleted and recreated.
create_time (google.protobuf.timestamp_pb2.Timestamp):
Output only. Time when this stream was
originally created.
update_time (google.protobuf.timestamp_pb2.Timestamp):
Output only. Time when stream payload fields
were last updated.
bundle_id (str):
Required. Immutable. The Apple App Store
Bundle ID for the app Example:
"com.example.myiosapp".
display_name (str):
Human-readable display name for the Data
Stream.
The max allowed display name length is 255
UTF-16 code units.
"""
name = proto.Field(proto.STRING, number=1,)
firebase_app_id = proto.Field(proto.STRING, number=2,)
create_time = proto.Field(proto.MESSAGE, number=3, message=timestamp_pb2.Timestamp,)
update_time = proto.Field(proto.MESSAGE, number=4, message=timestamp_pb2.Timestamp,)
bundle_id = proto.Field(proto.STRING, number=5,)
display_name = proto.Field(proto.STRING, number=6,)
class WebDataStream(proto.Message):
r"""A resource message representing a Google Analytics web
stream.
Attributes:
name (str):
Output only. Resource name of this Data Stream. Format:
properties/{property_id}/webDataStreams/{stream_id} Example:
"properties/1000/webDataStreams/2000".
measurement_id (str):
Output only. Analytics "Measurement ID",
without the "G-" prefix. Example: "G-1A2BCD345E"
would just be "1A2BCD345E".
firebase_app_id (str):
Output only. ID of the corresponding web app
in Firebase, if any. This ID can change if the
web app is deleted and recreated.
create_time (google.protobuf.timestamp_pb2.Timestamp):
Output only. Time when this stream was
originally created.
update_time (google.protobuf.timestamp_pb2.Timestamp):
Output only. Time when stream payload fields
were last updated.
default_uri (str):
Immutable. Domain name of the web app being
measured, or empty. Example:
"http://www.google.com",
"https://www.google.com".
display_name (str):
Required. Human-readable display name for the
Data Stream.
The max allowed display name length is 255
UTF-16 code units.
"""
name = proto.Field(proto.STRING, number=1,)
measurement_id = proto.Field(proto.STRING, number=2,)
firebase_app_id = proto.Field(proto.STRING, number=3,)
create_time = proto.Field(proto.MESSAGE, number=4, message=timestamp_pb2.Timestamp,)
update_time = proto.Field(proto.MESSAGE, number=5, message=timestamp_pb2.Timestamp,)
default_uri = proto.Field(proto.STRING, number=6,)
display_name = proto.Field(proto.STRING, number=7,)
class DataStream(proto.Message):
r"""A resource message representing a data stream.
This message has `oneof`_ fields (mutually exclusive fields).
For each oneof, at most one member field can be set at the same time.
Setting any member of the oneof automatically clears all other
members.
.. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields
Attributes:
web_stream_data (google.analytics.admin_v1alpha.types.DataStream.WebStreamData):
Data specific to web streams. Must be populated if type is
WEB_DATA_STREAM.
This field is a member of `oneof`_ ``stream_data``.
android_app_stream_data (google.analytics.admin_v1alpha.types.DataStream.AndroidAppStreamData):
Data specific to Android app streams. Must be populated if
type is ANDROID_APP_DATA_STREAM.
This field is a member of `oneof`_ ``stream_data``.
ios_app_stream_data (google.analytics.admin_v1alpha.types.DataStream.IosAppStreamData):
Data specific to iOS app streams. Must be populated if type
is IOS_APP_DATA_STREAM.
This field is a member of `oneof`_ ``stream_data``.
name (str):
Output only. Resource name of this Data Stream. Format:
properties/{property_id}/dataStreams/{stream_id} Example:
"properties/1000/dataStreams/2000".
type_ (google.analytics.admin_v1alpha.types.DataStream.DataStreamType):
Required. Immutable. The type of this
DataStream resource.
display_name (str):
Human-readable display name for the Data
Stream.
Required for web data streams.
The max allowed display name length is 255
UTF-16 code units.
create_time (google.protobuf.timestamp_pb2.Timestamp):
Output only. Time when this stream was
originally created.
update_time (google.protobuf.timestamp_pb2.Timestamp):
Output only. Time when stream payload fields
were last updated.
"""
class DataStreamType(proto.Enum):
r"""The type of the data stream."""
DATA_STREAM_TYPE_UNSPECIFIED = 0
WEB_DATA_STREAM = 1
ANDROID_APP_DATA_STREAM = 2
IOS_APP_DATA_STREAM = 3
class WebStreamData(proto.Message):
r"""Data specific to web streams.
Attributes:
measurement_id (str):
Output only. Analytics "Measurement ID",
without the "G-" prefix. Example: "G-1A2BCD345E"
would just be "1A2BCD345E".
firebase_app_id (str):
Output only. ID of the corresponding web app
in Firebase, if any. This ID can change if the
web app is deleted and recreated.
default_uri (str):
Immutable. Domain name of the web app being
measured, or empty. Example:
"http://www.google.com",
"https://www.google.com".
"""
measurement_id = proto.Field(proto.STRING, number=1,)
firebase_app_id = proto.Field(proto.STRING, number=2,)
default_uri = proto.Field(proto.STRING, number=3,)
class AndroidAppStreamData(proto.Message):
r"""Data specific to Android app streams.
Attributes:
firebase_app_id (str):
Output only. ID of the corresponding Android
app in Firebase, if any. This ID can change if
the Android app is deleted and recreated.
package_name (str):
Immutable. The package name for the app being
measured. Example: "com.example.myandroidapp".
"""
firebase_app_id = proto.Field(proto.STRING, number=1,)
package_name = proto.Field(proto.STRING, number=2,)
class IosAppStreamData(proto.Message):
r"""Data specific to iOS app streams.
Attributes:
firebase_app_id (str):
Output only. ID of the corresponding iOS app
in Firebase, if any. This ID can change if the
iOS app is deleted and recreated.
bundle_id (str):
Required. Immutable. The Apple App Store
Bundle ID for the app Example:
"com.example.myiosapp".
"""
firebase_app_id = proto.Field(proto.STRING, number=1,)
bundle_id = proto.Field(proto.STRING, number=2,)
web_stream_data = proto.Field(
proto.MESSAGE, number=6, oneof="stream_data", message=WebStreamData,
)
android_app_stream_data = proto.Field(
proto.MESSAGE, number=7, oneof="stream_data", message=AndroidAppStreamData,
)
ios_app_stream_data = proto.Field(
proto.MESSAGE, number=8, oneof="stream_data", message=IosAppStreamData,
)
name = proto.Field(proto.STRING, number=1,)
type_ = proto.Field(proto.ENUM, number=2, enum=DataStreamType,)
display_name = proto.Field(proto.STRING, number=3,)
create_time = proto.Field(proto.MESSAGE, number=4, message=timestamp_pb2.Timestamp,)
update_time = proto.Field(proto.MESSAGE, number=5, message=timestamp_pb2.Timestamp,)
class UserLink(proto.Message):
r"""A resource message representing a user's permissions on an
Account or Property resource.
Attributes:
name (str):
Output only. Example format:
properties/1234/userLinks/5678
email_address (str):
Immutable. Email address of the user to link
direct_roles (Sequence[str]):
Roles directly assigned to this user for this account or
property.
Valid values: predefinedRoles/viewer predefinedRoles/analyst
predefinedRoles/editor predefinedRoles/admin
predefinedRoles/no-cost-data predefinedRoles/no-revenue-data
Excludes roles that are inherited from a higher-level
entity, group, or organization admin role.
A UserLink that is updated to have an empty list of
direct_roles will be deleted.
"""
name = proto.Field(proto.STRING, number=1,)
email_address = proto.Field(proto.STRING, number=2,)
direct_roles = proto.RepeatedField(proto.STRING, number=3,)
class AuditUserLink(proto.Message):
r"""Read-only resource used to summarize a principal's effective
roles.
Attributes:
name (str):
Example format:
properties/1234/userLinks/5678
email_address (str):
Email address of the linked user
direct_roles (Sequence[str]):
Roles directly assigned to this user for this
entity.
Format: predefinedRoles/viewer
Excludes roles that are inherited from an
account (if this is for a property), group, or
organization admin role.
effective_roles (Sequence[str]):
Union of all permissions a user has at this
account or property (includes direct
permissions, group-inherited permissions, etc.).
Format: predefinedRoles/viewer
"""
name = proto.Field(proto.STRING, number=1,)
email_address = proto.Field(proto.STRING, number=2,)
direct_roles = proto.RepeatedField(proto.STRING, number=3,)
effective_roles = proto.RepeatedField(proto.STRING, number=4,)
class FirebaseLink(proto.Message):
r"""A link between a GA4 property and a Firebase project.
Attributes:
name (str):
Output only. Example format:
properties/1234/firebaseLinks/5678
project (str):
Immutable. Firebase project resource name. When creating a
FirebaseLink, you may provide this resource name using
either a project number or project ID. Once this resource
has been created, returned FirebaseLinks will always have a
project_name that contains a project number.
Format: 'projects/{project number}' Example: 'projects/1234'
create_time (google.protobuf.timestamp_pb2.Timestamp):
Output only. Time when this FirebaseLink was
originally created.
"""
name = proto.Field(proto.STRING, number=1,)
project = proto.Field(proto.STRING, number=2,)
create_time = proto.Field(proto.MESSAGE, number=3, message=timestamp_pb2.Timestamp,)
class GlobalSiteTag(proto.Message):
r"""Read-only resource with the tag for sending data from a
website to a WebDataStream.
Attributes:
name (str):
Output only. Resource name for this
GlobalSiteTag resource. Format:
properties/{propertyId}/globalSiteTag
snippet (str):
Immutable. JavaScript code snippet to be
pasted as the first item into the head tag of
every webpage to measure.
"""
name = proto.Field(proto.STRING, number=1,)
snippet = proto.Field(proto.STRING, number=2,)
class GoogleAdsLink(proto.Message):
r"""A link between a GA4 property and a Google Ads account.
Attributes:
name (str):
Output only. Format:
properties/{propertyId}/googleAdsLinks/{googleAdsLinkId}
Note: googleAdsLinkId is not the Google Ads
customer ID.
customer_id (str):
Immutable. Google Ads customer ID.
can_manage_clients (bool):
Output only. If true, this link is for a
Google Ads manager account.
ads_personalization_enabled (google.protobuf.wrappers_pb2.BoolValue):
Enable personalized advertising features with
this integration. Automatically publish my
Google Analytics audience lists and Google
Analytics remarketing events/parameters to the
linked Google Ads account. If this field is not
set on create/update, it will be defaulted to
true.
create_time (google.protobuf.timestamp_pb2.Timestamp):
Output only. Time when this link was
originally created.
update_time (google.protobuf.timestamp_pb2.Timestamp):
Output only. Time when this link was last
updated.
creator_email_address (str):
Output only. Email address of the user that
created the link. An empty string will be
returned if the email address can't be
retrieved.
"""
name = proto.Field(proto.STRING, number=1,)
customer_id = proto.Field(proto.STRING, number=3,)
can_manage_clients = proto.Field(proto.BOOL, number=4,)
ads_personalization_enabled = proto.Field(
proto.MESSAGE, number=5, message=wrappers_pb2.BoolValue,
)
create_time = proto.Field(proto.MESSAGE, number=7, message=timestamp_pb2.Timestamp,)
update_time = proto.Field(proto.MESSAGE, number=8, message=timestamp_pb2.Timestamp,)
creator_email_address = proto.Field(proto.STRING, number=9,)
class DataSharingSettings(proto.Message):
r"""A resource message representing data sharing settings of a
Google Analytics account.
Attributes:
name (str):
Output only. Resource name.
Format: accounts/{account}/dataSharingSettings
Example: "accounts/1000/dataSharingSettings".
sharing_with_google_support_enabled (bool):
Allows Google support to access the data in
order to help troubleshoot issues.
sharing_with_google_assigned_sales_enabled (bool):
Allows Google sales teams that are assigned
to the customer to access the data in order to
suggest configuration changes to improve
results. Sales team restrictions still apply
when enabled.
sharing_with_google_any_sales_enabled (bool):
Allows any of Google sales to access the data
in order to suggest configuration changes to
improve results.
sharing_with_google_products_enabled (bool):
Allows Google to use the data to improve
other Google products or services.
sharing_with_others_enabled (bool):
Allows Google to share the data anonymously
in aggregate form with others.
"""
name = proto.Field(proto.STRING, number=1,)
sharing_with_google_support_enabled = proto.Field(proto.BOOL, number=2,)
sharing_with_google_assigned_sales_enabled = proto.Field(proto.BOOL, number=3,)
sharing_with_google_any_sales_enabled = proto.Field(proto.BOOL, number=4,)
sharing_with_google_products_enabled = proto.Field(proto.BOOL, number=5,)
sharing_with_others_enabled = proto.Field(proto.BOOL, number=6,)
class AccountSummary(proto.Message):
r"""A virtual resource representing an overview of an account and
all its child GA4 properties.
Attributes:
name (str):
Resource name for this account summary. Format:
accountSummaries/{account_id} Example:
"accountSummaries/1000".
account (str):
Resource name of account referred to by this account summary
Format: accounts/{account_id} Example: "accounts/1000".
display_name (str):
Display name for the account referred to in
this account summary.
property_summaries (Sequence[google.analytics.admin_v1alpha.types.PropertySummary]):
List of summaries for child accounts of this
account.
"""
name = proto.Field(proto.STRING, number=1,)
account = proto.Field(proto.STRING, number=2,)
display_name = proto.Field(proto.STRING, number=3,)
property_summaries = proto.RepeatedField(
proto.MESSAGE, number=4, message="PropertySummary",
)
class PropertySummary(proto.Message):
r"""A virtual resource representing metadata for a GA4 property.
Attributes:
property (str):
Resource name of property referred to by this property
summary Format: properties/{property_id} Example:
"properties/1000".
display_name (str):
Display name for the property referred to in
this property summary.
"""
property = proto.Field(proto.STRING, number=1,)
display_name = proto.Field(proto.STRING, number=2,)
class MeasurementProtocolSecret(proto.Message):
r"""A secret value used for sending hits to Measurement Protocol.
Attributes:
name (str):
Output only. Resource name of this secret.
This secret may be a child of any type of
stream. Format:
properties/{property}/webDataStreams/{webDataStream}/measurementProtocolSecrets/{measurementProtocolSecret}
display_name (str):
Required. Human-readable display name for
this secret.
secret_value (str):
Output only. The measurement protocol secret value. Pass
this value to the api_secret field of the Measurement
Protocol API when sending hits to this secret's parent
property.
"""
name = proto.Field(proto.STRING, number=1,)
display_name = proto.Field(proto.STRING, number=2,)
secret_value = proto.Field(proto.STRING, number=3,)
class ChangeHistoryEvent(proto.Message):
r"""A set of changes within a Google Analytics account or its
child properties that resulted from the same cause. Common
causes would be updates made in the Google Analytics UI, changes
from customer support, or automatic Google Analytics system
changes.
Attributes:
id (str):
ID of this change history event. This ID is
unique across Google Analytics.
change_time (google.protobuf.timestamp_pb2.Timestamp):
Time when change was made.
actor_type (google.analytics.admin_v1alpha.types.ActorType):
The type of actor that made this change.
user_actor_email (str):
Email address of the Google account that made
the change. This will be a valid email address
if the actor field is set to USER, and empty
otherwise. Google accounts that have been
deleted will cause an error.
changes_filtered (bool):
If true, then the list of changes returned
was filtered, and does not represent all changes
that occurred in this event.
changes (Sequence[google.analytics.admin_v1alpha.types.ChangeHistoryChange]):
A list of changes made in this change history
event that fit the filters specified in
SearchChangeHistoryEventsRequest.
"""
id = proto.Field(proto.STRING, number=1,)
change_time = proto.Field(proto.MESSAGE, number=2, message=timestamp_pb2.Timestamp,)
actor_type = proto.Field(proto.ENUM, number=3, enum="ActorType",)
user_actor_email = proto.Field(proto.STRING, number=4,)
changes_filtered = proto.Field(proto.BOOL, number=5,)
changes = proto.RepeatedField(
proto.MESSAGE, number=6, message="ChangeHistoryChange",
)
class ChangeHistoryChange(proto.Message):
r"""A description of a change to a single Google Analytics
resource.
Attributes:
resource (str):
Resource name of the resource whose changes
are described by this entry.
action (google.analytics.admin_v1alpha.types.ActionType):
The type of action that changed this
resource.
resource_before_change (google.analytics.admin_v1alpha.types.ChangeHistoryChange.ChangeHistoryResource):
Resource contents from before the change was
made. If this resource was created in this
change, this field will be missing.
resource_after_change (google.analytics.admin_v1alpha.types.ChangeHistoryChange.ChangeHistoryResource):
Resource contents from after the change was
made. If this resource was deleted in this
change, this field will be missing.
"""
class ChangeHistoryResource(proto.Message):
r"""A snapshot of a resource as before or after the result of a
change in change history.
This message has `oneof`_ fields (mutually exclusive fields).
For each oneof, at most one member field can be set at the same time.
Setting any member of the oneof automatically clears all other
members.
.. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields
Attributes:
account (google.analytics.admin_v1alpha.types.Account):
A snapshot of an Account resource in change
history.
This field is a member of `oneof`_ ``resource``.
property (google.analytics.admin_v1alpha.types.Property):
A snapshot of a Property resource in change
history.
This field is a member of `oneof`_ ``resource``.
web_data_stream (google.analytics.admin_v1alpha.types.WebDataStream):
A snapshot of a WebDataStream resource in
change history.
This field is a member of `oneof`_ ``resource``.
android_app_data_stream (google.analytics.admin_v1alpha.types.AndroidAppDataStream):
A snapshot of an AndroidAppDataStream
resource in change history.
This field is a member of `oneof`_ ``resource``.
ios_app_data_stream (google.analytics.admin_v1alpha.types.IosAppDataStream):
A snapshot of an IosAppDataStream resource in
change history.
This field is a member of `oneof`_ ``resource``.
firebase_link (google.analytics.admin_v1alpha.types.FirebaseLink):
A snapshot of a FirebaseLink resource in
change history.
This field is a member of `oneof`_ ``resource``.
google_ads_link (google.analytics.admin_v1alpha.types.GoogleAdsLink):
A snapshot of a GoogleAdsLink resource in
change history.
This field is a member of `oneof`_ ``resource``.
google_signals_settings (google.analytics.admin_v1alpha.types.GoogleSignalsSettings):
A snapshot of a GoogleSignalsSettings
resource in change history.
This field is a member of `oneof`_ ``resource``.
display_video_360_advertiser_link (google.analytics.admin_v1alpha.types.DisplayVideo360AdvertiserLink):
A snapshot of a DisplayVideo360AdvertiserLink
resource in change history.
This field is a member of `oneof`_ ``resource``.
display_video_360_advertiser_link_proposal (google.analytics.admin_v1alpha.types.DisplayVideo360AdvertiserLinkProposal):
A snapshot of a
DisplayVideo360AdvertiserLinkProposal resource
in change history.
This field is a member of `oneof`_ ``resource``.
conversion_event (google.analytics.admin_v1alpha.types.ConversionEvent):
A snapshot of a ConversionEvent resource in
change history.
This field is a member of `oneof`_ ``resource``.
measurement_protocol_secret (google.analytics.admin_v1alpha.types.MeasurementProtocolSecret):
A snapshot of a MeasurementProtocolSecret
resource in change history.
This field is a member of `oneof`_ ``resource``.
custom_dimension (google.analytics.admin_v1alpha.types.CustomDimension):
A snapshot of a CustomDimension resource in
change history.
This field is a member of `oneof`_ ``resource``.
custom_metric (google.analytics.admin_v1alpha.types.CustomMetric):
A snapshot of a CustomMetric resource in
change history.
This field is a member of `oneof`_ ``resource``.
data_retention_settings (google.analytics.admin_v1alpha.types.DataRetentionSettings):
A snapshot of a data retention settings
resource in change history.
This field is a member of `oneof`_ ``resource``.
"""
account = proto.Field(
proto.MESSAGE, number=1, oneof="resource", message="Account",
)
property = proto.Field(
proto.MESSAGE, number=2, oneof="resource", message="Property",
)
web_data_stream = proto.Field(
proto.MESSAGE, number=3, oneof="resource", message="WebDataStream",
)
android_app_data_stream = proto.Field(
proto.MESSAGE, number=4, oneof="resource", message="AndroidAppDataStream",
)
ios_app_data_stream = proto.Field(
proto.MESSAGE, number=5, oneof="resource", message="IosAppDataStream",
)
firebase_link = proto.Field(
proto.MESSAGE, number=6, oneof="resource", message="FirebaseLink",
)
google_ads_link = proto.Field(
proto.MESSAGE, number=7, oneof="resource", message="GoogleAdsLink",
)
google_signals_settings = proto.Field(
proto.MESSAGE, number=8, oneof="resource", message="GoogleSignalsSettings",
)
display_video_360_advertiser_link = proto.Field(
proto.MESSAGE,
number=9,
oneof="resource",
message="DisplayVideo360AdvertiserLink",
)
display_video_360_advertiser_link_proposal = proto.Field(
proto.MESSAGE,
number=10,
oneof="resource",
message="DisplayVideo360AdvertiserLinkProposal",
)
conversion_event = proto.Field(
proto.MESSAGE, number=11, oneof="resource", message="ConversionEvent",
)
measurement_protocol_secret = proto.Field(
proto.MESSAGE,
number=12,
oneof="resource",
message="MeasurementProtocolSecret",
)
custom_dimension = proto.Field(
proto.MESSAGE, number=13, oneof="resource", message="CustomDimension",
)
custom_metric = proto.Field(
proto.MESSAGE, number=14, oneof="resource", message="CustomMetric",
)
data_retention_settings = proto.Field(
proto.MESSAGE, number=15, oneof="resource", message="DataRetentionSettings",
)
resource = proto.Field(proto.STRING, number=1,)
action = proto.Field(proto.ENUM, number=2, enum="ActionType",)
resource_before_change = proto.Field(
proto.MESSAGE, number=3, message=ChangeHistoryResource,
)
resource_after_change = proto.Field(
proto.MESSAGE, number=4, message=ChangeHistoryResource,
)
class DisplayVideo360AdvertiserLink(proto.Message):
r"""A link between a GA4 property and a Display & Video 360
advertiser.
Attributes:
name (str):
Output only. The resource name for this
DisplayVideo360AdvertiserLink resource. Format:
properties/{propertyId}/displayVideo360AdvertiserLinks/{linkId}
Note: linkId is not the Display & Video 360
Advertiser ID
advertiser_id (str):
Immutable. The Display & Video 360
Advertiser's advertiser ID.
advertiser_display_name (str):
Output only. The display name of the Display
& Video 360 Advertiser.
ads_personalization_enabled (google.protobuf.wrappers_pb2.BoolValue):
Enables personalized advertising features
with this integration. If this field is not set
on create/update, it will be defaulted to true.
campaign_data_sharing_enabled (google.protobuf.wrappers_pb2.BoolValue):
Immutable. Enables the import of campaign
data from Display & Video 360 into the GA4
property. After link creation, this can only be
updated from the Display & Video 360 product.
If this field is not set on create, it will be
defaulted to true.
cost_data_sharing_enabled (google.protobuf.wrappers_pb2.BoolValue):
Immutable. Enables the import of cost data from Display &
Video 360 into the GA4 property. This can only be enabled if
campaign_data_sharing_enabled is enabled. After link
creation, this can only be updated from the Display & Video
360 product. If this field is not set on create, it will be
defaulted to true.
"""
name = proto.Field(proto.STRING, number=1,)
advertiser_id = proto.Field(proto.STRING, number=2,)
advertiser_display_name = proto.Field(proto.STRING, number=3,)
ads_personalization_enabled = proto.Field(
proto.MESSAGE, number=4, message=wrappers_pb2.BoolValue,
)
campaign_data_sharing_enabled = proto.Field(
proto.MESSAGE, number=5, message=wrappers_pb2.BoolValue,
)
cost_data_sharing_enabled = proto.Field(
proto.MESSAGE, number=6, message=wrappers_pb2.BoolValue,
)
class DisplayVideo360AdvertiserLinkProposal(proto.Message):
r"""A proposal for a link between a GA4 property and a Display &
Video 360 advertiser.
A proposal is converted to a DisplayVideo360AdvertiserLink once
approved. Google Analytics admins approve inbound proposals
while Display & Video 360 admins approve outbound proposals.
Attributes:
name (str):
Output only. The resource name for this
DisplayVideo360AdvertiserLinkProposal resource.
Format:
properties/{propertyId}/displayVideo360AdvertiserLinkProposals/{proposalId}
Note: proposalId is not the Display & Video 360
Advertiser ID
advertiser_id (str):
Immutable. The Display & Video 360
Advertiser's advertiser ID.
link_proposal_status_details (google.analytics.admin_v1alpha.types.LinkProposalStatusDetails):
Output only. The status information for this
link proposal.
advertiser_display_name (str):
Output only. The display name of the Display
& Video Advertiser. Only populated for proposals
that originated from Display & Video 360.
validation_email (str):
Input only. On a proposal being sent to
Display & Video 360, this field must be set to
the email address of an admin on the target
advertiser. This is used to verify that the
Google Analytics admin is aware of at least one
admin on the Display & Video 360 Advertiser.
This does not restrict approval of the proposal
to a single user. Any admin on the Display &
Video 360 Advertiser may approve the proposal.
ads_personalization_enabled (google.protobuf.wrappers_pb2.BoolValue):
Immutable. Enables personalized advertising
features with this integration. If this field is
not set on create, it will be defaulted to true.
campaign_data_sharing_enabled (google.protobuf.wrappers_pb2.BoolValue):
Immutable. Enables the import of campaign
data from Display & Video 360. If this field is
not set on create, it will be defaulted to true.
cost_data_sharing_enabled (google.protobuf.wrappers_pb2.BoolValue):
Immutable. Enables the import of cost data from Display &
Video 360. This can only be enabled if
campaign_data_sharing_enabled is enabled. If this field is
not set on create, it will be defaulted to true.
"""
name = proto.Field(proto.STRING, number=1,)
advertiser_id = proto.Field(proto.STRING, number=2,)
link_proposal_status_details = proto.Field(
proto.MESSAGE, number=3, message="LinkProposalStatusDetails",
)
advertiser_display_name = proto.Field(proto.STRING, number=4,)
validation_email = proto.Field(proto.STRING, number=5,)
ads_personalization_enabled = proto.Field(
proto.MESSAGE, number=6, message=wrappers_pb2.BoolValue,
)
campaign_data_sharing_enabled = proto.Field(
proto.MESSAGE, number=7, message=wrappers_pb2.BoolValue,
)
cost_data_sharing_enabled = proto.Field(
proto.MESSAGE, number=8, message=wrappers_pb2.BoolValue,
)
class LinkProposalStatusDetails(proto.Message):
r"""Status information for a link proposal.
Attributes:
link_proposal_initiating_product (google.analytics.admin_v1alpha.types.LinkProposalInitiatingProduct):
Output only. The source of this proposal.
requestor_email (str):
Output only. The email address of the user
that proposed this linkage.
link_proposal_state (google.analytics.admin_v1alpha.types.LinkProposalState):
Output only. The state of this proposal.
"""
link_proposal_initiating_product = proto.Field(
proto.ENUM, number=1, enum="LinkProposalInitiatingProduct",
)
requestor_email = proto.Field(proto.STRING, number=2,)
link_proposal_state = proto.Field(proto.ENUM, number=3, enum="LinkProposalState",)
class ConversionEvent(proto.Message):
r"""A conversion event in a Google Analytics property.
Attributes:
name (str):
Output only. Resource name of this conversion event. Format:
properties/{property}/conversionEvents/{conversion_event}
event_name (str):
Immutable. The event name for this conversion
event. Examples: 'click', 'purchase'
create_time (google.protobuf.timestamp_pb2.Timestamp):
Output only. Time when this conversion event
was created in the property.
deletable (bool):
Output only. If set, this event can currently
be deleted via DeleteConversionEvent.
custom (bool):
Output only. If set to true, this conversion
event refers to a custom event. If set to
false, this conversion event refers to a default
event in GA. Default events typically have
special meaning in GA. Default events are
usually created for you by the GA system, but in
some cases can be created by property admins.
Custom events count towards the maximum number
of custom conversion events that may be created
per property.
"""
name = proto.Field(proto.STRING, number=1,)
event_name = proto.Field(proto.STRING, number=2,)
create_time = proto.Field(proto.MESSAGE, number=3, message=timestamp_pb2.Timestamp,)
deletable = proto.Field(proto.BOOL, number=4,)
custom = proto.Field(proto.BOOL, number=5,)
class GoogleSignalsSettings(proto.Message):
r"""Settings values for Google Signals. This is a singleton
resource.
Attributes:
name (str):
Output only. Resource name of this setting. Format:
properties/{property_id}/googleSignalsSettings Example:
"properties/1000/googleSignalsSettings".
state (google.analytics.admin_v1alpha.types.GoogleSignalsState):
Status of this setting.
consent (google.analytics.admin_v1alpha.types.GoogleSignalsConsent):
Output only. Terms of Service acceptance.
"""
name = proto.Field(proto.STRING, number=1,)
state = proto.Field(proto.ENUM, number=3, enum="GoogleSignalsState",)
consent = proto.Field(proto.ENUM, number=4, enum="GoogleSignalsConsent",)
class CustomDimension(proto.Message):
r"""A definition for a CustomDimension.
Attributes:
name (str):
Output only. Resource name for this
CustomDimension resource. Format:
properties/{property}/customDimensions/{customDimension}
parameter_name (str):
Required. Immutable. Tagging parameter name
for this custom dimension.
If this is a user-scoped dimension, then this is
the user property name. If this is an event-
scoped dimension, then this is the event
parameter name.
May only contain alphanumeric and underscore
characters, starting with a letter. Max length
of 24 characters for user-scoped dimensions, 40
characters for event-scoped dimensions.
display_name (str):
Required. Display name for this custom
dimension as shown in the Analytics UI. Max
length of 82 characters, alphanumeric plus space
and underscore starting with a letter. Legacy
system-generated display names may contain
square brackets, but updates to this field will
never permit square brackets.
description (str):
Optional. Description for this custom
dimension. Max length of 150 characters.
scope (google.analytics.admin_v1alpha.types.CustomDimension.DimensionScope):
Required. Immutable. The scope of this
dimension.
disallow_ads_personalization (bool):
Optional. If set to true, sets this dimension
as NPA and excludes it from ads personalization.
This is currently only supported by user-scoped
custom dimensions.
"""
class DimensionScope(proto.Enum):
r"""Valid values for the scope of this dimension."""
DIMENSION_SCOPE_UNSPECIFIED = 0
EVENT = 1
USER = 2
name = proto.Field(proto.STRING, number=1,)
parameter_name = proto.Field(proto.STRING, number=2,)
display_name = proto.Field(proto.STRING, number=3,)
description = proto.Field(proto.STRING, number=4,)
scope = proto.Field(proto.ENUM, number=5, enum=DimensionScope,)
disallow_ads_personalization = proto.Field(proto.BOOL, number=6,)
class CustomMetric(proto.Message):
r"""A definition for a custom metric.
Attributes:
name (str):
Output only. Resource name for this
CustomMetric resource. Format:
properties/{property}/customMetrics/{customMetric}
parameter_name (str):
Required. Immutable. Tagging name for this
custom metric.
If this is an event-scoped metric, then this is
the event parameter name.
May only contain alphanumeric and underscore
charactes, starting with a letter. Max length of
40 characters for event-scoped metrics.
display_name (str):
Required. Display name for this custom metric
as shown in the Analytics UI. Max length of 82
characters, alphanumeric plus space and
underscore starting with a letter. Legacy
system-generated display names may contain
square brackets, but updates to this field will
never permit square brackets.
description (str):
Optional. Description for this custom
dimension. Max length of 150 characters.
measurement_unit (google.analytics.admin_v1alpha.types.CustomMetric.MeasurementUnit):
Required. The type for the custom metric's
value.
scope (google.analytics.admin_v1alpha.types.CustomMetric.MetricScope):
Required. Immutable. The scope of this custom
metric.
"""
class MeasurementUnit(proto.Enum):
r"""Possible types of representing the custom metric's value.
Currency representation may change in the future, requiring a
breaking API change.
"""
MEASUREMENT_UNIT_UNSPECIFIED = 0
STANDARD = 1
CURRENCY = 2
FEET = 3
METERS = 4
KILOMETERS = 5
MILES = 6
MILLISECONDS = 7
SECONDS = 8
MINUTES = 9
HOURS = 10
class MetricScope(proto.Enum):
r"""The scope of this metric."""
METRIC_SCOPE_UNSPECIFIED = 0
EVENT = 1
name = proto.Field(proto.STRING, number=1,)
parameter_name = proto.Field(proto.STRING, number=2,)
display_name = proto.Field(proto.STRING, number=3,)
description = proto.Field(proto.STRING, number=4,)
measurement_unit = proto.Field(proto.ENUM, number=5, enum=MeasurementUnit,)
scope = proto.Field(proto.ENUM, number=6, enum=MetricScope,)
class DataRetentionSettings(proto.Message):
r"""Settings values for data retention. This is a singleton
resource.
Attributes:
name (str):
Output only. Resource name for this
DataRetentionSetting resource. Format:
properties/{property}/dataRetentionSettings
event_data_retention (google.analytics.admin_v1alpha.types.DataRetentionSettings.RetentionDuration):
The length of time that event-level data is
retained.
reset_user_data_on_new_activity (bool):
If true, reset the retention period for the
user identifier with every event from that user.
"""
class RetentionDuration(proto.Enum):
r"""Valid values for the data retention duration."""
RETENTION_DURATION_UNSPECIFIED = 0
TWO_MONTHS = 1
FOURTEEN_MONTHS = 3
TWENTY_SIX_MONTHS = 4
THIRTY_EIGHT_MONTHS = 5
FIFTY_MONTHS = 6
name = proto.Field(proto.STRING, number=1,)
event_data_retention = proto.Field(proto.ENUM, number=2, enum=RetentionDuration,)
reset_user_data_on_new_activity = proto.Field(proto.BOOL, number=3,)
__all__ = tuple(sorted(__protobuf__.manifest))
|
[
"proto.RepeatedField",
"proto.module",
"proto.Field"
] |
[((762, 1586), 'proto.module', 'proto.module', ([], {'package': '"""google.analytics.admin.v1alpha"""', 'manifest': "{'IndustryCategory', 'ServiceLevel', 'ActorType', 'ActionType',\n 'ChangeHistoryResourceType', 'GoogleSignalsState',\n 'GoogleSignalsConsent', 'LinkProposalInitiatingProduct',\n 'LinkProposalState', 'Account', 'Property', 'AndroidAppDataStream',\n 'IosAppDataStream', 'WebDataStream', 'DataStream', 'UserLink',\n 'AuditUserLink', 'FirebaseLink', 'GlobalSiteTag', 'GoogleAdsLink',\n 'DataSharingSettings', 'AccountSummary', 'PropertySummary',\n 'MeasurementProtocolSecret', 'ChangeHistoryEvent',\n 'ChangeHistoryChange', 'DisplayVideo360AdvertiserLink',\n 'DisplayVideo360AdvertiserLinkProposal', 'LinkProposalStatusDetails',\n 'ConversionEvent', 'GoogleSignalsSettings', 'CustomDimension',\n 'CustomMetric', 'DataRetentionSettings'}"}), "(package='google.analytics.admin.v1alpha', manifest={\n 'IndustryCategory', 'ServiceLevel', 'ActorType', 'ActionType',\n 'ChangeHistoryResourceType', 'GoogleSignalsState',\n 'GoogleSignalsConsent', 'LinkProposalInitiatingProduct',\n 'LinkProposalState', 'Account', 'Property', 'AndroidAppDataStream',\n 'IosAppDataStream', 'WebDataStream', 'DataStream', 'UserLink',\n 'AuditUserLink', 'FirebaseLink', 'GlobalSiteTag', 'GoogleAdsLink',\n 'DataSharingSettings', 'AccountSummary', 'PropertySummary',\n 'MeasurementProtocolSecret', 'ChangeHistoryEvent',\n 'ChangeHistoryChange', 'DisplayVideo360AdvertiserLink',\n 'DisplayVideo360AdvertiserLinkProposal', 'LinkProposalStatusDetails',\n 'ConversionEvent', 'GoogleSignalsSettings', 'CustomDimension',\n 'CustomMetric', 'DataRetentionSettings'})\n", (774, 1586), False, 'import proto\n'), ((5911, 5946), 'proto.Field', 'proto.Field', (['proto.STRING'], {'number': '(1)'}), '(proto.STRING, number=1)\n', (5922, 5946), False, 'import proto\n'), ((5966, 6035), 'proto.Field', 'proto.Field', (['proto.MESSAGE'], {'number': '(2)', 'message': 'timestamp_pb2.Timestamp'}), '(proto.MESSAGE, number=2, message=timestamp_pb2.Timestamp)\n', (5977, 6035), False, 'import proto\n'), ((6055, 6124), 'proto.Field', 'proto.Field', (['proto.MESSAGE'], {'number': '(3)', 'message': 'timestamp_pb2.Timestamp'}), '(proto.MESSAGE, number=3, message=timestamp_pb2.Timestamp)\n', (6066, 6124), False, 'import proto\n'), ((6145, 6180), 'proto.Field', 'proto.Field', (['proto.STRING'], {'number': '(4)'}), '(proto.STRING, number=4)\n', (6156, 6180), False, 'import proto\n'), ((6200, 6235), 'proto.Field', 'proto.Field', (['proto.STRING'], {'number': '(5)'}), '(proto.STRING, number=5)\n', (6211, 6235), False, 'import proto\n'), ((6251, 6284), 'proto.Field', 'proto.Field', (['proto.BOOL'], {'number': '(6)'}), '(proto.BOOL, number=6)\n', (6262, 6284), False, 'import proto\n'), ((9062, 9097), 'proto.Field', 'proto.Field', (['proto.STRING'], {'number': '(1)'}), '(proto.STRING, number=1)\n', (9073, 9097), False, 'import proto\n'), ((9117, 9186), 'proto.Field', 'proto.Field', (['proto.MESSAGE'], {'number': '(3)', 'message': 'timestamp_pb2.Timestamp'}), '(proto.MESSAGE, number=3, message=timestamp_pb2.Timestamp)\n', (9128, 9186), False, 'import proto\n'), ((9206, 9275), 'proto.Field', 'proto.Field', (['proto.MESSAGE'], {'number': '(4)', 'message': 'timestamp_pb2.Timestamp'}), '(proto.MESSAGE, number=4, message=timestamp_pb2.Timestamp)\n', (9217, 9275), False, 'import proto\n'), ((9290, 9325), 'proto.Field', 'proto.Field', (['proto.STRING'], {'number': '(2)'}), '(proto.STRING, number=2)\n', (9301, 9325), False, 'import proto\n'), ((9346, 9381), 'proto.Field', 'proto.Field', (['proto.STRING'], {'number': '(5)'}), '(proto.STRING, number=5)\n', (9357, 9381), False, 'import proto\n'), ((9407, 9465), 'proto.Field', 'proto.Field', (['proto.ENUM'], {'number': '(6)', 'enum': '"""IndustryCategory"""'}), "(proto.ENUM, number=6, enum='IndustryCategory')\n", (9418, 9465), False, 'import proto\n'), ((9483, 9518), 'proto.Field', 'proto.Field', (['proto.STRING'], {'number': '(7)'}), '(proto.STRING, number=7)\n', (9494, 9518), False, 'import proto\n'), ((9540, 9575), 'proto.Field', 'proto.Field', (['proto.STRING'], {'number': '(8)'}), '(proto.STRING, number=8)\n', (9551, 9575), False, 'import proto\n'), ((9597, 9652), 'proto.Field', 'proto.Field', (['proto.ENUM'], {'number': '(10)', 'enum': '"""ServiceLevel"""'}), "(proto.ENUM, number=10, enum='ServiceLevel')\n", (9608, 9652), False, 'import proto\n'), ((9672, 9742), 'proto.Field', 'proto.Field', (['proto.MESSAGE'], {'number': '(11)', 'message': 'timestamp_pb2.Timestamp'}), '(proto.MESSAGE, number=11, message=timestamp_pb2.Timestamp)\n', (9683, 9742), False, 'import proto\n'), ((9776, 9846), 'proto.Field', 'proto.Field', (['proto.MESSAGE'], {'number': '(12)', 'message': 'timestamp_pb2.Timestamp'}), '(proto.MESSAGE, number=12, message=timestamp_pb2.Timestamp)\n', (9787, 9846), False, 'import proto\n'), ((9876, 9912), 'proto.Field', 'proto.Field', (['proto.STRING'], {'number': '(13)'}), '(proto.STRING, number=13)\n', (9887, 9912), False, 'import proto\n'), ((11135, 11170), 'proto.Field', 'proto.Field', (['proto.STRING'], {'number': '(1)'}), '(proto.STRING, number=1)\n', (11146, 11170), False, 'import proto\n'), ((11194, 11229), 'proto.Field', 'proto.Field', (['proto.STRING'], {'number': '(2)'}), '(proto.STRING, number=2)\n', (11205, 11229), False, 'import proto\n'), ((11249, 11318), 'proto.Field', 'proto.Field', (['proto.MESSAGE'], {'number': '(3)', 'message': 'timestamp_pb2.Timestamp'}), '(proto.MESSAGE, number=3, message=timestamp_pb2.Timestamp)\n', (11260, 11318), False, 'import proto\n'), ((11338, 11407), 'proto.Field', 'proto.Field', (['proto.MESSAGE'], {'number': '(4)', 'message': 'timestamp_pb2.Timestamp'}), '(proto.MESSAGE, number=4, message=timestamp_pb2.Timestamp)\n', (11349, 11407), False, 'import proto\n'), ((11428, 11463), 'proto.Field', 'proto.Field', (['proto.STRING'], {'number': '(5)'}), '(proto.STRING, number=5)\n', (11439, 11463), False, 'import proto\n'), ((11484, 11519), 'proto.Field', 'proto.Field', (['proto.STRING'], {'number': '(6)'}), '(proto.STRING, number=6)\n', (11495, 11519), False, 'import proto\n'), ((12730, 12765), 'proto.Field', 'proto.Field', (['proto.STRING'], {'number': '(1)'}), '(proto.STRING, number=1)\n', (12741, 12765), False, 'import proto\n'), ((12789, 12824), 'proto.Field', 'proto.Field', (['proto.STRING'], {'number': '(2)'}), '(proto.STRING, number=2)\n', (12800, 12824), False, 'import proto\n'), ((12844, 12913), 'proto.Field', 'proto.Field', (['proto.MESSAGE'], {'number': '(3)', 'message': 'timestamp_pb2.Timestamp'}), '(proto.MESSAGE, number=3, message=timestamp_pb2.Timestamp)\n', (12855, 12913), False, 'import proto\n'), ((12933, 13002), 'proto.Field', 'proto.Field', (['proto.MESSAGE'], {'number': '(4)', 'message': 'timestamp_pb2.Timestamp'}), '(proto.MESSAGE, number=4, message=timestamp_pb2.Timestamp)\n', (12944, 13002), False, 'import proto\n'), ((13020, 13055), 'proto.Field', 'proto.Field', (['proto.STRING'], {'number': '(5)'}), '(proto.STRING, number=5)\n', (13031, 13055), False, 'import proto\n'), ((13076, 13111), 'proto.Field', 'proto.Field', (['proto.STRING'], {'number': '(6)'}), '(proto.STRING, number=6)\n', (13087, 13111), False, 'import proto\n'), ((14545, 14580), 'proto.Field', 'proto.Field', (['proto.STRING'], {'number': '(1)'}), '(proto.STRING, number=1)\n', (14556, 14580), False, 'import proto\n'), ((14603, 14638), 'proto.Field', 'proto.Field', (['proto.STRING'], {'number': '(2)'}), '(proto.STRING, number=2)\n', (14614, 14638), False, 'import proto\n'), ((14662, 14697), 'proto.Field', 'proto.Field', (['proto.STRING'], {'number': '(3)'}), '(proto.STRING, number=3)\n', (14673, 14697), False, 'import proto\n'), ((14717, 14786), 'proto.Field', 'proto.Field', (['proto.MESSAGE'], {'number': '(4)', 'message': 'timestamp_pb2.Timestamp'}), '(proto.MESSAGE, number=4, message=timestamp_pb2.Timestamp)\n', (14728, 14786), False, 'import proto\n'), ((14806, 14875), 'proto.Field', 'proto.Field', (['proto.MESSAGE'], {'number': '(5)', 'message': 'timestamp_pb2.Timestamp'}), '(proto.MESSAGE, number=5, message=timestamp_pb2.Timestamp)\n', (14817, 14875), False, 'import proto\n'), ((14895, 14930), 'proto.Field', 'proto.Field', (['proto.STRING'], {'number': '(6)'}), '(proto.STRING, number=6)\n', (14906, 14930), False, 'import proto\n'), ((14951, 14986), 'proto.Field', 'proto.Field', (['proto.STRING'], {'number': '(7)'}), '(proto.STRING, number=7)\n', (14962, 14986), False, 'import proto\n'), ((19568, 19653), 'proto.Field', 'proto.Field', (['proto.MESSAGE'], {'number': '(6)', 'oneof': '"""stream_data"""', 'message': 'WebStreamData'}), "(proto.MESSAGE, number=6, oneof='stream_data', message=WebStreamData\n )\n", (19579, 19653), False, 'import proto\n'), ((19694, 19786), 'proto.Field', 'proto.Field', (['proto.MESSAGE'], {'number': '(7)', 'oneof': '"""stream_data"""', 'message': 'AndroidAppStreamData'}), "(proto.MESSAGE, number=7, oneof='stream_data', message=\n AndroidAppStreamData)\n", (19705, 19786), False, 'import proto\n'), ((19823, 19911), 'proto.Field', 'proto.Field', (['proto.MESSAGE'], {'number': '(8)', 'oneof': '"""stream_data"""', 'message': 'IosAppStreamData'}), "(proto.MESSAGE, number=8, oneof='stream_data', message=\n IosAppStreamData)\n", (19834, 19911), False, 'import proto\n'), ((19933, 19968), 'proto.Field', 'proto.Field', (['proto.STRING'], {'number': '(1)'}), '(proto.STRING, number=1)\n', (19944, 19968), False, 'import proto\n'), ((19982, 20036), 'proto.Field', 'proto.Field', (['proto.ENUM'], {'number': '(2)', 'enum': 'DataStreamType'}), '(proto.ENUM, number=2, enum=DataStreamType)\n', (19993, 20036), False, 'import proto\n'), ((20057, 20092), 'proto.Field', 'proto.Field', (['proto.STRING'], {'number': '(3)'}), '(proto.STRING, number=3)\n', (20068, 20092), False, 'import proto\n'), ((20112, 20181), 'proto.Field', 'proto.Field', (['proto.MESSAGE'], {'number': '(4)', 'message': 'timestamp_pb2.Timestamp'}), '(proto.MESSAGE, number=4, message=timestamp_pb2.Timestamp)\n', (20123, 20181), False, 'import proto\n'), ((20201, 20270), 'proto.Field', 'proto.Field', (['proto.MESSAGE'], {'number': '(5)', 'message': 'timestamp_pb2.Timestamp'}), '(proto.MESSAGE, number=5, message=timestamp_pb2.Timestamp)\n', (20212, 20270), False, 'import proto\n'), ((21195, 21230), 'proto.Field', 'proto.Field', (['proto.STRING'], {'number': '(1)'}), '(proto.STRING, number=1)\n', (21206, 21230), False, 'import proto\n'), ((21252, 21287), 'proto.Field', 'proto.Field', (['proto.STRING'], {'number': '(2)'}), '(proto.STRING, number=2)\n', (21263, 21287), False, 'import proto\n'), ((21308, 21351), 'proto.RepeatedField', 'proto.RepeatedField', (['proto.STRING'], {'number': '(3)'}), '(proto.STRING, number=3)\n', (21327, 21351), False, 'import proto\n'), ((22233, 22268), 'proto.Field', 'proto.Field', (['proto.STRING'], {'number': '(1)'}), '(proto.STRING, number=1)\n', (22244, 22268), False, 'import proto\n'), ((22290, 22325), 'proto.Field', 'proto.Field', (['proto.STRING'], {'number': '(2)'}), '(proto.STRING, number=2)\n', (22301, 22325), False, 'import proto\n'), ((22346, 22389), 'proto.RepeatedField', 'proto.RepeatedField', (['proto.STRING'], {'number': '(3)'}), '(proto.STRING, number=3)\n', (22365, 22389), False, 'import proto\n'), ((22413, 22456), 'proto.RepeatedField', 'proto.RepeatedField', (['proto.STRING'], {'number': '(4)'}), '(proto.STRING, number=4)\n', (22432, 22456), False, 'import proto\n'), ((23288, 23323), 'proto.Field', 'proto.Field', (['proto.STRING'], {'number': '(1)'}), '(proto.STRING, number=1)\n', (23299, 23323), False, 'import proto\n'), ((23339, 23374), 'proto.Field', 'proto.Field', (['proto.STRING'], {'number': '(2)'}), '(proto.STRING, number=2)\n', (23350, 23374), False, 'import proto\n'), ((23394, 23463), 'proto.Field', 'proto.Field', (['proto.MESSAGE'], {'number': '(3)', 'message': 'timestamp_pb2.Timestamp'}), '(proto.MESSAGE, number=3, message=timestamp_pb2.Timestamp)\n', (23405, 23463), False, 'import proto\n'), ((23970, 24005), 'proto.Field', 'proto.Field', (['proto.STRING'], {'number': '(1)'}), '(proto.STRING, number=1)\n', (23981, 24005), False, 'import proto\n'), ((24021, 24056), 'proto.Field', 'proto.Field', (['proto.STRING'], {'number': '(2)'}), '(proto.STRING, number=2)\n', (24032, 24056), False, 'import proto\n'), ((25546, 25581), 'proto.Field', 'proto.Field', (['proto.STRING'], {'number': '(1)'}), '(proto.STRING, number=1)\n', (25557, 25581), False, 'import proto\n'), ((25601, 25636), 'proto.Field', 'proto.Field', (['proto.STRING'], {'number': '(3)'}), '(proto.STRING, number=3)\n', (25612, 25636), False, 'import proto\n'), ((25663, 25696), 'proto.Field', 'proto.Field', (['proto.BOOL'], {'number': '(4)'}), '(proto.BOOL, number=4)\n', (25674, 25696), False, 'import proto\n'), ((25732, 25800), 'proto.Field', 'proto.Field', (['proto.MESSAGE'], {'number': '(5)', 'message': 'wrappers_pb2.BoolValue'}), '(proto.MESSAGE, number=5, message=wrappers_pb2.BoolValue)\n', (25743, 25800), False, 'import proto\n'), ((25834, 25903), 'proto.Field', 'proto.Field', (['proto.MESSAGE'], {'number': '(7)', 'message': 'timestamp_pb2.Timestamp'}), '(proto.MESSAGE, number=7, message=timestamp_pb2.Timestamp)\n', (25845, 25903), False, 'import proto\n'), ((25923, 25992), 'proto.Field', 'proto.Field', (['proto.MESSAGE'], {'number': '(8)', 'message': 'timestamp_pb2.Timestamp'}), '(proto.MESSAGE, number=8, message=timestamp_pb2.Timestamp)\n', (25934, 25992), False, 'import proto\n'), ((26022, 26057), 'proto.Field', 'proto.Field', (['proto.STRING'], {'number': '(9)'}), '(proto.STRING, number=9)\n', (26033, 26057), False, 'import proto\n'), ((27373, 27408), 'proto.Field', 'proto.Field', (['proto.STRING'], {'number': '(1)'}), '(proto.STRING, number=1)\n', (27384, 27408), False, 'import proto\n'), ((27452, 27485), 'proto.Field', 'proto.Field', (['proto.BOOL'], {'number': '(2)'}), '(proto.BOOL, number=2)\n', (27463, 27485), False, 'import proto\n'), ((27536, 27569), 'proto.Field', 'proto.Field', (['proto.BOOL'], {'number': '(3)'}), '(proto.BOOL, number=3)\n', (27547, 27569), False, 'import proto\n'), ((27615, 27648), 'proto.Field', 'proto.Field', (['proto.BOOL'], {'number': '(4)'}), '(proto.BOOL, number=4)\n', (27626, 27648), False, 'import proto\n'), ((27693, 27726), 'proto.Field', 'proto.Field', (['proto.BOOL'], {'number': '(5)'}), '(proto.BOOL, number=5)\n', (27704, 27726), False, 'import proto\n'), ((27762, 27795), 'proto.Field', 'proto.Field', (['proto.BOOL'], {'number': '(6)'}), '(proto.BOOL, number=6)\n', (27773, 27795), False, 'import proto\n'), ((28598, 28633), 'proto.Field', 'proto.Field', (['proto.STRING'], {'number': '(1)'}), '(proto.STRING, number=1)\n', (28609, 28633), False, 'import proto\n'), ((28649, 28684), 'proto.Field', 'proto.Field', (['proto.STRING'], {'number': '(2)'}), '(proto.STRING, number=2)\n', (28660, 28684), False, 'import proto\n'), ((28705, 28740), 'proto.Field', 'proto.Field', (['proto.STRING'], {'number': '(3)'}), '(proto.STRING, number=3)\n', (28716, 28740), False, 'import proto\n'), ((28767, 28838), 'proto.RepeatedField', 'proto.RepeatedField', (['proto.MESSAGE'], {'number': '(4)', 'message': '"""PropertySummary"""'}), "(proto.MESSAGE, number=4, message='PropertySummary')\n", (28786, 28838), False, 'import proto\n'), ((29308, 29343), 'proto.Field', 'proto.Field', (['proto.STRING'], {'number': '(1)'}), '(proto.STRING, number=1)\n', (29319, 29343), False, 'import proto\n'), ((29364, 29399), 'proto.Field', 'proto.Field', (['proto.STRING'], {'number': '(2)'}), '(proto.STRING, number=2)\n', (29375, 29399), False, 'import proto\n'), ((30194, 30229), 'proto.Field', 'proto.Field', (['proto.STRING'], {'number': '(1)'}), '(proto.STRING, number=1)\n', (30205, 30229), False, 'import proto\n'), ((30250, 30285), 'proto.Field', 'proto.Field', (['proto.STRING'], {'number': '(2)'}), '(proto.STRING, number=2)\n', (30261, 30285), False, 'import proto\n'), ((30306, 30341), 'proto.Field', 'proto.Field', (['proto.STRING'], {'number': '(3)'}), '(proto.STRING, number=3)\n', (30317, 30341), False, 'import proto\n'), ((31771, 31806), 'proto.Field', 'proto.Field', (['proto.STRING'], {'number': '(1)'}), '(proto.STRING, number=1)\n', (31782, 31806), False, 'import proto\n'), ((31826, 31895), 'proto.Field', 'proto.Field', (['proto.MESSAGE'], {'number': '(2)', 'message': 'timestamp_pb2.Timestamp'}), '(proto.MESSAGE, number=2, message=timestamp_pb2.Timestamp)\n', (31837, 31895), False, 'import proto\n'), ((31914, 31965), 'proto.Field', 'proto.Field', (['proto.ENUM'], {'number': '(3)', 'enum': '"""ActorType"""'}), "(proto.ENUM, number=3, enum='ActorType')\n", (31925, 31965), False, 'import proto\n'), ((31990, 32025), 'proto.Field', 'proto.Field', (['proto.STRING'], {'number': '(4)'}), '(proto.STRING, number=4)\n', (32001, 32025), False, 'import proto\n'), ((32050, 32083), 'proto.Field', 'proto.Field', (['proto.BOOL'], {'number': '(5)'}), '(proto.BOOL, number=5)\n', (32061, 32083), False, 'import proto\n'), ((32099, 32174), 'proto.RepeatedField', 'proto.RepeatedField', (['proto.MESSAGE'], {'number': '(6)', 'message': '"""ChangeHistoryChange"""'}), "(proto.MESSAGE, number=6, message='ChangeHistoryChange')\n", (32118, 32174), False, 'import proto\n'), ((39679, 39714), 'proto.Field', 'proto.Field', (['proto.STRING'], {'number': '(1)'}), '(proto.STRING, number=1)\n', (39690, 39714), False, 'import proto\n'), ((39729, 39781), 'proto.Field', 'proto.Field', (['proto.ENUM'], {'number': '(2)', 'enum': '"""ActionType"""'}), "(proto.ENUM, number=2, enum='ActionType')\n", (39740, 39781), False, 'import proto\n'), ((39812, 39879), 'proto.Field', 'proto.Field', (['proto.MESSAGE'], {'number': '(3)', 'message': 'ChangeHistoryResource'}), '(proto.MESSAGE, number=3, message=ChangeHistoryResource)\n', (39823, 39879), False, 'import proto\n'), ((39923, 39990), 'proto.Field', 'proto.Field', (['proto.MESSAGE'], {'number': '(4)', 'message': 'ChangeHistoryResource'}), '(proto.MESSAGE, number=4, message=ChangeHistoryResource)\n', (39934, 39990), False, 'import proto\n'), ((41823, 41858), 'proto.Field', 'proto.Field', (['proto.STRING'], {'number': '(1)'}), '(proto.STRING, number=1)\n', (41834, 41858), False, 'import proto\n'), ((41880, 41915), 'proto.Field', 'proto.Field', (['proto.STRING'], {'number': '(2)'}), '(proto.STRING, number=2)\n', (41891, 41915), False, 'import proto\n'), ((41947, 41982), 'proto.Field', 'proto.Field', (['proto.STRING'], {'number': '(3)'}), '(proto.STRING, number=3)\n', (41958, 41982), False, 'import proto\n'), ((42018, 42086), 'proto.Field', 'proto.Field', (['proto.MESSAGE'], {'number': '(4)', 'message': 'wrappers_pb2.BoolValue'}), '(proto.MESSAGE, number=4, message=wrappers_pb2.BoolValue)\n', (42029, 42086), False, 'import proto\n'), ((42138, 42206), 'proto.Field', 'proto.Field', (['proto.MESSAGE'], {'number': '(5)', 'message': 'wrappers_pb2.BoolValue'}), '(proto.MESSAGE, number=5, message=wrappers_pb2.BoolValue)\n', (42149, 42206), False, 'import proto\n'), ((42254, 42322), 'proto.Field', 'proto.Field', (['proto.MESSAGE'], {'number': '(6)', 'message': 'wrappers_pb2.BoolValue'}), '(proto.MESSAGE, number=6, message=wrappers_pb2.BoolValue)\n', (42265, 42322), False, 'import proto\n'), ((44958, 44993), 'proto.Field', 'proto.Field', (['proto.STRING'], {'number': '(1)'}), '(proto.STRING, number=1)\n', (44969, 44993), False, 'import proto\n'), ((45015, 45050), 'proto.Field', 'proto.Field', (['proto.STRING'], {'number': '(2)'}), '(proto.STRING, number=2)\n', (45026, 45050), False, 'import proto\n'), ((45087, 45160), 'proto.Field', 'proto.Field', (['proto.MESSAGE'], {'number': '(3)', 'message': '"""LinkProposalStatusDetails"""'}), "(proto.MESSAGE, number=3, message='LinkProposalStatusDetails')\n", (45098, 45160), False, 'import proto\n'), ((45206, 45241), 'proto.Field', 'proto.Field', (['proto.STRING'], {'number': '(4)'}), '(proto.STRING, number=4)\n', (45217, 45241), False, 'import proto\n'), ((45266, 45301), 'proto.Field', 'proto.Field', (['proto.STRING'], {'number': '(5)'}), '(proto.STRING, number=5)\n', (45277, 45301), False, 'import proto\n'), ((45337, 45405), 'proto.Field', 'proto.Field', (['proto.MESSAGE'], {'number': '(6)', 'message': 'wrappers_pb2.BoolValue'}), '(proto.MESSAGE, number=6, message=wrappers_pb2.BoolValue)\n', (45348, 45405), False, 'import proto\n'), ((45457, 45525), 'proto.Field', 'proto.Field', (['proto.MESSAGE'], {'number': '(7)', 'message': 'wrappers_pb2.BoolValue'}), '(proto.MESSAGE, number=7, message=wrappers_pb2.BoolValue)\n', (45468, 45525), False, 'import proto\n'), ((45573, 45641), 'proto.Field', 'proto.Field', (['proto.MESSAGE'], {'number': '(8)', 'message': 'wrappers_pb2.BoolValue'}), '(proto.MESSAGE, number=8, message=wrappers_pb2.BoolValue)\n', (45584, 45641), False, 'import proto\n'), ((46250, 46321), 'proto.Field', 'proto.Field', (['proto.ENUM'], {'number': '(1)', 'enum': '"""LinkProposalInitiatingProduct"""'}), "(proto.ENUM, number=1, enum='LinkProposalInitiatingProduct')\n", (46261, 46321), False, 'import proto\n'), ((46359, 46394), 'proto.Field', 'proto.Field', (['proto.STRING'], {'number': '(2)'}), '(proto.STRING, number=2)\n', (46370, 46394), False, 'import proto\n'), ((46422, 46481), 'proto.Field', 'proto.Field', (['proto.ENUM'], {'number': '(3)', 'enum': '"""LinkProposalState"""'}), "(proto.ENUM, number=3, enum='LinkProposalState')\n", (46433, 46481), False, 'import proto\n'), ((47779, 47814), 'proto.Field', 'proto.Field', (['proto.STRING'], {'number': '(1)'}), '(proto.STRING, number=1)\n', (47790, 47814), False, 'import proto\n'), ((47833, 47868), 'proto.Field', 'proto.Field', (['proto.STRING'], {'number': '(2)'}), '(proto.STRING, number=2)\n', (47844, 47868), False, 'import proto\n'), ((47888, 47957), 'proto.Field', 'proto.Field', (['proto.MESSAGE'], {'number': '(3)', 'message': 'timestamp_pb2.Timestamp'}), '(proto.MESSAGE, number=3, message=timestamp_pb2.Timestamp)\n', (47899, 47957), False, 'import proto\n'), ((47975, 48008), 'proto.Field', 'proto.Field', (['proto.BOOL'], {'number': '(4)'}), '(proto.BOOL, number=4)\n', (47986, 48008), False, 'import proto\n'), ((48023, 48056), 'proto.Field', 'proto.Field', (['proto.BOOL'], {'number': '(5)'}), '(proto.BOOL, number=5)\n', (48034, 48056), False, 'import proto\n'), ((48665, 48700), 'proto.Field', 'proto.Field', (['proto.STRING'], {'number': '(1)'}), '(proto.STRING, number=1)\n', (48676, 48700), False, 'import proto\n'), ((48714, 48774), 'proto.Field', 'proto.Field', (['proto.ENUM'], {'number': '(3)', 'enum': '"""GoogleSignalsState"""'}), "(proto.ENUM, number=3, enum='GoogleSignalsState')\n", (48725, 48774), False, 'import proto\n'), ((48790, 48852), 'proto.Field', 'proto.Field', (['proto.ENUM'], {'number': '(4)', 'enum': '"""GoogleSignalsConsent"""'}), "(proto.ENUM, number=4, enum='GoogleSignalsConsent')\n", (48801, 48852), False, 'import proto\n'), ((50841, 50876), 'proto.Field', 'proto.Field', (['proto.STRING'], {'number': '(1)'}), '(proto.STRING, number=1)\n', (50852, 50876), False, 'import proto\n'), ((50899, 50934), 'proto.Field', 'proto.Field', (['proto.STRING'], {'number': '(2)'}), '(proto.STRING, number=2)\n', (50910, 50934), False, 'import proto\n'), ((50955, 50990), 'proto.Field', 'proto.Field', (['proto.STRING'], {'number': '(3)'}), '(proto.STRING, number=3)\n', (50966, 50990), False, 'import proto\n'), ((51010, 51045), 'proto.Field', 'proto.Field', (['proto.STRING'], {'number': '(4)'}), '(proto.STRING, number=4)\n', (51021, 51045), False, 'import proto\n'), ((51059, 51113), 'proto.Field', 'proto.Field', (['proto.ENUM'], {'number': '(5)', 'enum': 'DimensionScope'}), '(proto.ENUM, number=5, enum=DimensionScope)\n', (51070, 51113), False, 'import proto\n'), ((51150, 51183), 'proto.Field', 'proto.Field', (['proto.BOOL'], {'number': '(6)'}), '(proto.BOOL, number=6)\n', (51161, 51183), False, 'import proto\n'), ((53316, 53351), 'proto.Field', 'proto.Field', (['proto.STRING'], {'number': '(1)'}), '(proto.STRING, number=1)\n', (53327, 53351), False, 'import proto\n'), ((53374, 53409), 'proto.Field', 'proto.Field', (['proto.STRING'], {'number': '(2)'}), '(proto.STRING, number=2)\n', (53385, 53409), False, 'import proto\n'), ((53430, 53465), 'proto.Field', 'proto.Field', (['proto.STRING'], {'number': '(3)'}), '(proto.STRING, number=3)\n', (53441, 53465), False, 'import proto\n'), ((53485, 53520), 'proto.Field', 'proto.Field', (['proto.STRING'], {'number': '(4)'}), '(proto.STRING, number=4)\n', (53496, 53520), False, 'import proto\n'), ((53545, 53600), 'proto.Field', 'proto.Field', (['proto.ENUM'], {'number': '(5)', 'enum': 'MeasurementUnit'}), '(proto.ENUM, number=5, enum=MeasurementUnit)\n', (53556, 53600), False, 'import proto\n'), ((53614, 53665), 'proto.Field', 'proto.Field', (['proto.ENUM'], {'number': '(6)', 'enum': 'MetricScope'}), '(proto.ENUM, number=6, enum=MetricScope)\n', (53625, 53665), False, 'import proto\n'), ((54639, 54674), 'proto.Field', 'proto.Field', (['proto.STRING'], {'number': '(1)'}), '(proto.STRING, number=1)\n', (54650, 54674), False, 'import proto\n'), ((54703, 54760), 'proto.Field', 'proto.Field', (['proto.ENUM'], {'number': '(2)', 'enum': 'RetentionDuration'}), '(proto.ENUM, number=2, enum=RetentionDuration)\n', (54714, 54760), False, 'import proto\n'), ((54800, 54833), 'proto.Field', 'proto.Field', (['proto.BOOL'], {'number': '(3)'}), '(proto.BOOL, number=3)\n', (54811, 54833), False, 'import proto\n'), ((18131, 18166), 'proto.Field', 'proto.Field', (['proto.STRING'], {'number': '(1)'}), '(proto.STRING, number=1)\n', (18142, 18166), False, 'import proto\n'), ((18194, 18229), 'proto.Field', 'proto.Field', (['proto.STRING'], {'number': '(2)'}), '(proto.STRING, number=2)\n', (18205, 18229), False, 'import proto\n'), ((18253, 18288), 'proto.Field', 'proto.Field', (['proto.STRING'], {'number': '(3)'}), '(proto.STRING, number=3)\n', (18264, 18288), False, 'import proto\n'), ((18822, 18857), 'proto.Field', 'proto.Field', (['proto.STRING'], {'number': '(1)'}), '(proto.STRING, number=1)\n', (18833, 18857), False, 'import proto\n'), ((18882, 18917), 'proto.Field', 'proto.Field', (['proto.STRING'], {'number': '(2)'}), '(proto.STRING, number=2)\n', (18893, 18917), False, 'import proto\n'), ((19451, 19486), 'proto.Field', 'proto.Field', (['proto.STRING'], {'number': '(1)'}), '(proto.STRING, number=1)\n', (19462, 19486), False, 'import proto\n'), ((19508, 19543), 'proto.Field', 'proto.Field', (['proto.STRING'], {'number': '(2)'}), '(proto.STRING, number=2)\n', (19519, 19543), False, 'import proto\n'), ((37495, 37568), 'proto.Field', 'proto.Field', (['proto.MESSAGE'], {'number': '(1)', 'oneof': '"""resource"""', 'message': '"""Account"""'}), "(proto.MESSAGE, number=1, oneof='resource', message='Account')\n", (37506, 37568), False, 'import proto\n'), ((37611, 37685), 'proto.Field', 'proto.Field', (['proto.MESSAGE'], {'number': '(2)', 'oneof': '"""resource"""', 'message': '"""Property"""'}), "(proto.MESSAGE, number=2, oneof='resource', message='Property')\n", (37622, 37685), False, 'import proto\n'), ((37735, 37814), 'proto.Field', 'proto.Field', (['proto.MESSAGE'], {'number': '(3)', 'oneof': '"""resource"""', 'message': '"""WebDataStream"""'}), "(proto.MESSAGE, number=3, oneof='resource', message='WebDataStream')\n", (37746, 37814), False, 'import proto\n'), ((37872, 37963), 'proto.Field', 'proto.Field', (['proto.MESSAGE'], {'number': '(4)', 'oneof': '"""resource"""', 'message': '"""AndroidAppDataStream"""'}), "(proto.MESSAGE, number=4, oneof='resource', message=\n 'AndroidAppDataStream')\n", (37883, 37963), False, 'import proto\n'), ((38012, 38099), 'proto.Field', 'proto.Field', (['proto.MESSAGE'], {'number': '(5)', 'oneof': '"""resource"""', 'message': '"""IosAppDataStream"""'}), "(proto.MESSAGE, number=5, oneof='resource', message=\n 'IosAppDataStream')\n", (38023, 38099), False, 'import proto\n'), ((38142, 38220), 'proto.Field', 'proto.Field', (['proto.MESSAGE'], {'number': '(6)', 'oneof': '"""resource"""', 'message': '"""FirebaseLink"""'}), "(proto.MESSAGE, number=6, oneof='resource', message='FirebaseLink')\n", (38153, 38220), False, 'import proto\n'), ((38270, 38349), 'proto.Field', 'proto.Field', (['proto.MESSAGE'], {'number': '(7)', 'oneof': '"""resource"""', 'message': '"""GoogleAdsLink"""'}), "(proto.MESSAGE, number=7, oneof='resource', message='GoogleAdsLink')\n", (38281, 38349), False, 'import proto\n'), ((38407, 38499), 'proto.Field', 'proto.Field', (['proto.MESSAGE'], {'number': '(8)', 'oneof': '"""resource"""', 'message': '"""GoogleSignalsSettings"""'}), "(proto.MESSAGE, number=8, oneof='resource', message=\n 'GoogleSignalsSettings')\n", (38418, 38499), False, 'import proto\n'), ((38562, 38662), 'proto.Field', 'proto.Field', (['proto.MESSAGE'], {'number': '(9)', 'oneof': '"""resource"""', 'message': '"""DisplayVideo360AdvertiserLink"""'}), "(proto.MESSAGE, number=9, oneof='resource', message=\n 'DisplayVideo360AdvertiserLink')\n", (38573, 38662), False, 'import proto\n'), ((38770, 38879), 'proto.Field', 'proto.Field', (['proto.MESSAGE'], {'number': '(10)', 'oneof': '"""resource"""', 'message': '"""DisplayVideo360AdvertiserLinkProposal"""'}), "(proto.MESSAGE, number=10, oneof='resource', message=\n 'DisplayVideo360AdvertiserLinkProposal')\n", (38781, 38879), False, 'import proto\n'), ((38961, 39048), 'proto.Field', 'proto.Field', (['proto.MESSAGE'], {'number': '(11)', 'oneof': '"""resource"""', 'message': '"""ConversionEvent"""'}), "(proto.MESSAGE, number=11, oneof='resource', message=\n 'ConversionEvent')\n", (38972, 39048), False, 'import proto\n'), ((39105, 39202), 'proto.Field', 'proto.Field', (['proto.MESSAGE'], {'number': '(12)', 'oneof': '"""resource"""', 'message': '"""MeasurementProtocolSecret"""'}), "(proto.MESSAGE, number=12, oneof='resource', message=\n 'MeasurementProtocolSecret')\n", (39116, 39202), False, 'import proto\n'), ((39284, 39371), 'proto.Field', 'proto.Field', (['proto.MESSAGE'], {'number': '(13)', 'oneof': '"""resource"""', 'message': '"""CustomDimension"""'}), "(proto.MESSAGE, number=13, oneof='resource', message=\n 'CustomDimension')\n", (39295, 39371), False, 'import proto\n'), ((39414, 39493), 'proto.Field', 'proto.Field', (['proto.MESSAGE'], {'number': '(14)', 'oneof': '"""resource"""', 'message': '"""CustomMetric"""'}), "(proto.MESSAGE, number=14, oneof='resource', message='CustomMetric')\n", (39425, 39493), False, 'import proto\n'), ((39551, 39644), 'proto.Field', 'proto.Field', (['proto.MESSAGE'], {'number': '(15)', 'oneof': '"""resource"""', 'message': '"""DataRetentionSettings"""'}), "(proto.MESSAGE, number=15, oneof='resource', message=\n 'DataRetentionSettings')\n", (39562, 39644), False, 'import proto\n')]
|
from prometheus_client.core import GaugeMetricFamily
import prometheus_client as prom
import time
from vault_integration import Vault
class CustomVaultExporter:
def __init__(self):
pass
def collect(self):
vault = Vault()
tokens_info = vault.get_key_data_from_vault()
for token_info in tokens_info:
gauge = GaugeMetricFamily(
name="vault_token_expire_time",
documentation="Collect time remaining to expire Vault service token",
labels=['display_name', 'time_format']
)
gauge.add_metric(
labels=[token_info.name, 'minute'],
value=token_info.expiration_time
)
yield gauge
if __name__ == "__main__":
custom_exporter = CustomVaultExporter()
prom.REGISTRY.register(custom_exporter)
prom.start_http_server(9121)
while True:
time.sleep(30)
|
[
"vault_integration.Vault",
"prometheus_client.start_http_server",
"prometheus_client.REGISTRY.register",
"time.sleep",
"prometheus_client.core.GaugeMetricFamily"
] |
[((830, 869), 'prometheus_client.REGISTRY.register', 'prom.REGISTRY.register', (['custom_exporter'], {}), '(custom_exporter)\n', (852, 869), True, 'import prometheus_client as prom\n'), ((874, 902), 'prometheus_client.start_http_server', 'prom.start_http_server', (['(9121)'], {}), '(9121)\n', (896, 902), True, 'import prometheus_client as prom\n'), ((240, 247), 'vault_integration.Vault', 'Vault', ([], {}), '()\n', (245, 247), False, 'from vault_integration import Vault\n'), ((928, 942), 'time.sleep', 'time.sleep', (['(30)'], {}), '(30)\n', (938, 942), False, 'import time\n'), ((362, 531), 'prometheus_client.core.GaugeMetricFamily', 'GaugeMetricFamily', ([], {'name': '"""vault_token_expire_time"""', 'documentation': '"""Collect time remaining to expire Vault service token"""', 'labels': "['display_name', 'time_format']"}), "(name='vault_token_expire_time', documentation=\n 'Collect time remaining to expire Vault service token', labels=[\n 'display_name', 'time_format'])\n", (379, 531), False, 'from prometheus_client.core import GaugeMetricFamily\n')]
|
"""
For more informations on the contents of this module:
- help(plastic.GenotypeMatrix)
- help(clustering.cluster_mutations)
--------
Module that exposes the clustering algorithm presented at
https://github.com/AlgoLab/celluloid
Simple example workflow:
from plastic import clustering
to_cluster = cl.GenotypeMatrix.from_files('to_cluster.txt', mutations_file = 'mutations.txt')
# Reduce the size of the input down to 50 to speed up some complex computation
# (for instance SASC tree inference)
clustered = clustering.cluster_mutations(to_cluster, k = 50)
# Get the clustered mutations as comma separated lists of simple mutations
muts = clustered.mutations()
# Save the matrix and use it for some intensive computation
clustering.GenotypeMatrix.to_files('clustered.txt', mutations_file = 'clustered_mutations.txt')
"""
from ._core.genotypematrix import GenotypeMatrix
import numpy as np
from kmodes.kmodes import KModes
from collections import defaultdict
def cluster_mutations(
genotype_matrix,
k,
n_inits=10,
max_iter=100,
verbose=False,
**kwargs):
"""
Clusters the mutations in a genotype matrix by applying kmodes
Parameters:
genotype_matrix(GenotypeMatrix):
A matrix representing the results of single-cell sequencing.
k(int):
The number of clustered mutations in the output matrix.
Note that empty clusters will be discarded after clustering.
n_inits(int):
The number of initiliazations in the clustering process.
max_iter(int):
The maximum number of iterations in the clustering process.
verbose (bool)
**kwargs:
Additional arguments passed to KModes process.
Returns:
GenotypeMatrix:
The result of the clustering process. Each column in the matrix
will be the centroid of a non-empty cluster, and will be labeled with
a comma-separated list of the labels of the mutations within the cluster.
Cell labels are left unaltered.
"""
if type(k) != int or k < 1:
raise ValueError(f'the number of clusters must be a positive integer, but {k} is not.')
if type(max_iter) != int or max_iter < 1:
raise ValueError(f'the number of iterations must be a positive integer, but {max_iter} is not.')
if type(n_inits) != int or n_inits < 1:
raise ValueError(f'the number of initializations must be a positive integer, but {n_inits} is not.')
return _celluloid(genotype_matrix, k, n_inits, max_iter,verbose,**kwargs)
def _conflict_dissim(a, b, **_):
v = np.vectorize(lambda ai, bi: ai != 2 and bi != 2 and ai != bi)
return np.sum(v(a, b), axis=1)
def _celluloid(
genotype_matrix,
k,
n_inits,
max_iter,
verbose,
**kwargs
):
"""
Clusters the mutations in a genotype matrix by applying kmodes
Parameters:
genotype_matrix(GenotypeMatrix):
A matrix representing the results of single-cell sequencing.
k(int):
The number of clustered mutations in the output matrix.
Note that empty clusters will be discarded after clustering.
n_inits(int):
The number of initiliazations in the clustering process.
max_iter(int):
The maximum number of iterations in the clustering process.
verbose (bool)
**kwargs:
Additional arguments passed to KModes process.
Returns:
GenotypeMatrix:
The result of the clustering process. Each column in the matrix
will be the centroid of a non-empty cluster, and will be labeled with
a comma-separated list of the labels of the mutations within the cluster.
Cell labels are left unaltered.
"""
mutations_as_points = np.array(genotype_matrix.matrix(), dtype='int').transpose()
mutation_labels = genotype_matrix.mutation_labels
km = KModes(
n_clusters=k,
cat_dissim=_conflict_dissim,
init='huang',
n_init=n_inits,
max_iter=max_iter,
verbose=(1 if verbose else 0),
**kwargs
)
clusters = km.fit_predict(mutations_as_points)
# Each cluster will be labeled with the labels of its components.
clusters_of_mutations = km.labels_
clustered_mutation_labels = defaultdict(list)
for mutation_label, mutation_cluster in zip(mutation_labels, clusters_of_mutations):
clustered_mutation_labels[mutation_cluster].append(mutation_label)
nonempty_clusters = clustered_mutation_labels.keys()
# build the output matrix and the mutation labels as strings
cluster_centroids = km.cluster_centroids_
clustered_mutation_labels_strings = [','.join(clustered_mutation_labels[cluster_id]) for cluster_id in
sorted(nonempty_clusters)]
out_matrix = [cluster_centroids[cluster_id] for cluster_id in sorted(nonempty_clusters)]
# the matrix needs to be transposed back to its original orientation
out_matrix = np.array(out_matrix).transpose()
return GenotypeMatrix(out_matrix, cell_labels=genotype_matrix.cell_labels,
mutation_labels=clustered_mutation_labels_strings)
|
[
"kmodes.kmodes.KModes",
"collections.defaultdict",
"numpy.vectorize",
"numpy.array"
] |
[((2650, 2711), 'numpy.vectorize', 'np.vectorize', (['(lambda ai, bi: ai != 2 and bi != 2 and ai != bi)'], {}), '(lambda ai, bi: ai != 2 and bi != 2 and ai != bi)\n', (2662, 2711), True, 'import numpy as np\n'), ((4007, 4149), 'kmodes.kmodes.KModes', 'KModes', ([], {'n_clusters': 'k', 'cat_dissim': '_conflict_dissim', 'init': '"""huang"""', 'n_init': 'n_inits', 'max_iter': 'max_iter', 'verbose': '(1 if verbose else 0)'}), "(n_clusters=k, cat_dissim=_conflict_dissim, init='huang', n_init=\n n_inits, max_iter=max_iter, verbose=1 if verbose else 0, **kwargs)\n", (4013, 4149), False, 'from kmodes.kmodes import KModes\n'), ((4403, 4420), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (4414, 4420), False, 'from collections import defaultdict\n'), ((5114, 5134), 'numpy.array', 'np.array', (['out_matrix'], {}), '(out_matrix)\n', (5122, 5134), True, 'import numpy as np\n')]
|
# -*- coding: utf-8 -*-
import wx
import PexpectRunnerConsolGUI
###########################################################################
## Class PexpectRunnerImp
###########################################################################
class PexpectRunnerImpl ( PexpectRunnerConsolGUI.PexpectRunnerGUI ):
def __init__( self, parent ):
PexpectRunnerConsolGUI.PexpectRunnerGUI.__init__ ( self, parent)
def __del__( self ):
pass
# TODO Doesn't really work right now. Just going to leave it autoscrolling which is default. Someone can fix this in the future.
def AppendMessage(self, msg_text):
print(self.TextCtrlConsol.GetScrollPos(wx.VERTICAL))
print(self.TextCtrlConsol.GetScrollRange(wx.VERTICAL))
print(self.TextCtrlConsol.GetScrollThumb(wx.VERTICAL))
# If we are at the bottom of the consol, shift the box to show new text
if self.TextCtrlConsol.GetScrollPos(wx.VERTICAL) + self.TextCtrlConsol.GetScrollThumb(wx.VERTICAL) == self.TextCtrlConsol.GetScrollRange(wx.VERTICAL):
print('At bottom...')
self.TextCtrlConsol.write(msg_text + '\n') # Write text with new line to consol
self.TextCtrlConsol.SetScrollPos(wx.VERTICAL, self.TextCtrlConsol.GetScrollRange(wx.VERTICAL)) # Set the scroll to the end
# If we are not at the botton of the consol, just append the text and do nothing else
else:
print('Not at bottom...')
self.TextCtrlConsol.Freeze()
self.TextCtrlConsol.write(msg_text + '\n')
self.TextCtrlConsol.Thaw()
# Override these
def onWindowClose( self, event ):
event.Skip()
def onMouseWheel( self, event ):
event.Skip()
|
[
"PexpectRunnerConsolGUI.PexpectRunnerGUI.__init__"
] |
[((349, 411), 'PexpectRunnerConsolGUI.PexpectRunnerGUI.__init__', 'PexpectRunnerConsolGUI.PexpectRunnerGUI.__init__', (['self', 'parent'], {}), '(self, parent)\n', (397, 411), False, 'import PexpectRunnerConsolGUI\n')]
|
"""
Inserts metadata and figures into the report template.
"""
import base64
import json
import logging
from pathlib import Path
import re
import subprocess
import tempfile
from bokeh import __version__ as bokeh_version
from jinja2 import Environment, PackageLoader, select_autoescape, ChoiceLoader
from jinja2.runtime import Undefined
from plotly import __version__ as plotly_version
from solarforecastarbiter import datamodel
from solarforecastarbiter.reports.figures import plotly_figures
logger = logging.getLogger(__name__)
def build_metrics_json(report):
"""Creates a dict from the metrics results in the report.
Parameters
----------
report: :py:class:`solarforecastarbiter.datamodel.Report`
Returns
-------
str
The json representing the report metrics. The string will be a string
representing an empty json array if the report does not have a
computed raw_report.
"""
if getattr(report, 'raw_report') is not None:
df = plotly_figures.construct_metrics_dataframe(
list(filter(lambda x: not getattr(x, 'is_summary', False),
report.raw_report.metrics)),
rename=plotly_figures.abbreviate)
return df.to_json(orient="records")
else:
return "[]"
def build_summary_stats_json(report):
"""Creates a dict from the summary statistics in the report.
Parameters
----------
report: :py:class:`solarforecastarbiter.datamodel.Report`
Returns
-------
str
The json representing the summary statistics. Will be a string
representing an empty json array if the report does not have a
computed raw_report.
Raises
------
ValueError
If report.raw_report is populated but no
report.raw_report.metrics have `is_summary == True`
indicating that the report was made without
summary statistics.
"""
if getattr(report, 'raw_report') is not None:
df = plotly_figures.construct_metrics_dataframe(
list(filter(lambda x: getattr(x, 'is_summary', False),
report.raw_report.metrics)),
rename=plotly_figures.abbreviate)
if df.empty:
raise ValueError('No summary statistics in report.')
return df.to_json(orient="records")
else:
return "[]"
def build_metadata_json(report):
"""Creates a JSON array of ProcessedForecastObservations parameters
in the report.
Parameters
----------
report: :py:class:`solarforecastarbiter.datamodel.Report`
Returns
-------
str
The JSON representing the report forecast-observation metadata.
"""
if getattr(report, 'raw_report') is None:
return "[]"
drop_keys = {
'__blurb__', 'site', 'aggregate',
}
def _process_forecast(fx):
if fx is None:
return None
out = {k: v for k, v in fx.to_dict().items()
if k not in drop_keys}
if isinstance(fx, datamodel.ProbabilisticForecast):
out['constant_values'] = [
cdf.constant_value for cdf in fx.constant_values]
return out
out = []
for pfxobs in report.raw_report.processed_forecasts_observations:
minp = pfxobs.replace(original=None)
thisout = {k: v for k, v in minp.to_dict().items()
if k in (
'name', 'interval_value_type', 'interval_length',
'interval_label', 'normalization_factor',
'uncertainty', 'cost')}
thisout['forecast'] = _process_forecast(pfxobs.original.forecast)
thisout['reference_forecast'] = _process_forecast(
pfxobs.original.reference_forecast)
thisout['observation'] = None
thisout['aggregate'] = None
if hasattr(pfxobs.original, 'observation'):
thisout['observation'] = {
k: v for k, v in pfxobs.original.observation.to_dict().items()
if k not in drop_keys
}
elif hasattr(pfxobs.original, 'aggregate'):
thisout['aggregate'] = {
k: v for k, v in pfxobs.original.aggregate.to_dict().items()
if k not in drop_keys or k == 'observations'
}
obs = []
for aggobs in pfxobs.original.aggregate.observations:
obsd = aggobs.to_dict()
obsd['observation_id'] = obsd.pop('observation')[
'observation_id']
obs.append(obsd)
thisout['aggregate']['observations'] = obs
out.append(thisout)
return json.dumps(out).replace('NaN', 'null')
def _get_render_kwargs(report, dash_url, with_timeseries):
"""Creates a dictionary of key word template arguments for a jinja2
report template.
Parameters
----------
report: :py:class:`solarforecastarbiter.datamodel.Report`
dash_url: str
URL of the Solar Forecast arbiter dashboard to use when building links.
with_timeseries: bool
Whether or not to include timeseries plots. If an error occurs when
trying to generate timeseries plots, the `timeseries_spec`,
`scatter_spec`, and `timeseries_prob_spec` arguments will not be
defined.
Returns
-------
kwargs: dict
Dictionary of template variables to unpack as key word arguments when
rendering.
"""
# macros render columns for every allowed summary statistic, so be
# specific about which columns to include to avoid unnecessary blanks.
# Check that the report is complete, and if the processed forecasts are
# all event forecasts. Checking processed forecast pairs instead of
# report_parameters.object pairs allows us to skip the step of loading
# or shuffling around forecasts when working with a raw api response on
# the dashboard without the aid of solarforecastarbiter.io.api's
# process_report_dict. See issue 694 for context.
if report.status == "complete" and all(
type(x.original.forecast) is datamodel.EventForecast for x in
report.raw_report.processed_forecasts_observations
):
human_statistics = datamodel.ALLOWED_EVENT_SUMMARY_STATISTICS
else:
human_statistics = datamodel.ALLOWED_DETERMINISTIC_SUMMARY_STATISTICS
# macros only render columns/plots for metrics that actually exist,
# so no need to be specific to avoid unnecessary blanks
kwargs = dict(
human_categories=datamodel.ALLOWED_CATEGORIES,
human_metrics=datamodel.ALLOWED_METRICS,
human_statistics=human_statistics,
report=report,
category_blurbs=datamodel.CATEGORY_BLURBS,
dash_url=dash_url,
metrics_json=build_metrics_json(report),
metadata_json=build_metadata_json(report),
templating_messages=[]
)
report_plots = getattr(report.raw_report, 'plots', None)
# get plotting library versions used when plots were generated.
# if plot generation failed, fallback to the curent version
plot_bokeh = getattr(report_plots, 'bokeh_version', None)
kwargs['bokeh_version'] = plot_bokeh if plot_bokeh else bokeh_version
plot_plotly = getattr(report_plots, 'plotly_version', None)
kwargs['plotly_version'] = plot_plotly if plot_plotly else plotly_version
try:
kwargs['summary_stats'] = build_summary_stats_json(report)
except ValueError:
kwargs['templating_messages'].append(
'No data summary statistics were calculated with this report.')
kwargs['summary_stats'] = '[]'
if with_timeseries:
try:
timeseries_specs = plotly_figures.timeseries_plots(report)
except Exception:
logger.exception(
'Failed to make Plotly items for timeseries and scatterplot')
else:
if timeseries_specs[0] is not None:
kwargs['timeseries_spec'] = timeseries_specs[0]
if timeseries_specs[1] is not None:
kwargs['scatter_spec'] = timeseries_specs[1]
if timeseries_specs[2] is not None:
kwargs['timeseries_prob_spec'] = timeseries_specs[2]
kwargs['includes_distribution'] = timeseries_specs[3]
return kwargs
def _pretty_json(value):
if isinstance(value, Undefined): # pragma: no cover
return value
return json.dumps(value, indent=4, separators=(',', ':'))
def _figure_name_filter(value):
"""replace characters that may cause problems for html/javascript ids"""
if isinstance(value, Undefined):
return value
out = (value
.replace('^', '-')
.replace(' ', '-')
.replace('.', 'dot')
.replace('%', 'percent')
.replace('<', 'lt')
.replace('>', 'gt')
.replace('=', 'eq')
.replace('(', 'lp')
.replace(')', 'rp')
.replace('/', 'fsl')
.replace('\\', 'bsl')
)
out = re.sub('[^\\w-]', 'special', out)
return out
def _unique_flags_filter(proc_fxobs_list, before_resample):
# use a dict to preserve order and guarantee uniqueness of keys
names = {}
for proc_fxobs in proc_fxobs_list:
for val_result in proc_fxobs.validation_results:
if val_result.before_resample == before_resample:
names[val_result.flag] = None
unique_names = list(names.keys())
return unique_names
def get_template_and_kwargs(report, dash_url, with_timeseries, body_only):
"""Returns the jinja2 Template object and a dict of template variables for
the report. If the report failed to compute, the template and kwargs will
be for an error page.
Parameters
----------
report: :py:class:`solarforecastarbiter.datamodel.Report`
dash_url: str
URL of the Solar Forecast arbiter dashboard to use when building links.
with_timeseries: bool
Whether or not to include timeseries plots.
body_only: bool
When True, returns a div for injecting into another template,
otherwise returns a full html document with the required
<html> and <head> tags.
Returns
-------
template: jinja2.environment.Template
kwargs: dict
Dictionary of template variables to use as keyword arguments to
template.render().
"""
env = Environment(
loader=ChoiceLoader([
PackageLoader('solarforecastarbiter.reports', 'templates/html'),
PackageLoader('solarforecastarbiter.reports', 'templates'),
]),
autoescape=select_autoescape(['html', 'xml']),
lstrip_blocks=True,
trim_blocks=True
)
env.filters['pretty_json'] = _pretty_json
env.filters['figure_name_filter'] = _figure_name_filter
env.filters['unique_flags_filter'] = _unique_flags_filter
kwargs = _get_render_kwargs(report, dash_url, with_timeseries)
if report.status == 'complete':
template = env.get_template('body.html')
elif report.status == 'failed':
template = env.get_template('failure.html')
elif report.status == 'pending':
template = env.get_template('pending.html')
else:
raise ValueError(f'Unknown status for report {report.status}')
if body_only:
kwargs['base_template'] = env.get_template('empty_base.html')
else:
kwargs['base_template'] = env.get_template('base.html')
return template, kwargs
def render_html(report, dash_url=datamodel.DASH_URL,
with_timeseries=True, body_only=False):
"""Create full html file.
The Solar Forecast Arbiter dashboard will likely use its own
templates for rendering the full html.
Parameters
----------
report: :py:class:`solarforecastarbiter.datamodel.Report`
dash_url: str
URL of the Solar Forecast arbiter dashboard to use when building links.
with_timeseries: bool
Whether or not to include timeseries plots.
body_only: bool
When True, returns a div for injecting into another template,
otherwise returns a full html document with the required
<html> and <head> tags.
Returns
-------
str
The rendered html report
"""
template, kwargs = get_template_and_kwargs(
report, dash_url, with_timeseries, body_only)
out = template.render(**kwargs)
return out
def _link_filter(value):
"""convert html href markup to tex href markup"""
if isinstance(value, Undefined): # pragma: no cover
return value
match = re.search(
"""<a\\s+(?:[^>]*?\\s+)?href=(["'])(.*?)(["'])>(.*?)<\\/a>""",
value, re.DOTALL)
if match:
new = "\\href{" + match.group(2) + "}{" + match.group(4) + "}"
out = value[:match.start()] + new + value[match.end():]
return out
else:
return value
def _html_to_tex(value):
if isinstance(value, Undefined):
return value
value = (value
.replace('<p>', '')
.replace('</p>', '\n')
.replace('<em>', '\\emph{')
.replace('</em>', '}')
.replace('<code>', '\\verb|')
.replace('</code>', '|')
.replace('<b>', '\\textbf{')
.replace('</b>', '}')
.replace('<ol>', '\\begin{enumerate}')
.replace('</ol>', '\\end{enumerate}')
.replace('<li>', '\\item ')
.replace('</li>', '\n')
.replace('</a>', '')
.replace('<=', '$\\leq$')
.replace("%", "\\%")
.replace('W/m^2', '$W/m^2$')
)
value = re.sub('\\<a.*\\>', '', value)
return value
def render_pdf(report, dash_url, max_runs=5):
"""
Create a PDF report using LaTeX.
Parameters
----------
report: :py:class:`solarforecastarbiter.datamodel.Report`
dash_url: str
URL of the Solar Forecast Arbiter dashboard to use when building links.
max_runs: int, default 5
Maximum number of times to run pdflatex
Returns
-------
bytes
The rendered PDF report
Notes
-----
This code was inspired by the latex package available at
https://github.com/mbr/latex/ under the following license:
Copyright (c) 2015, <NAME>
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of latex nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
""" # NOQA
env = Environment(
loader=ChoiceLoader([
PackageLoader('solarforecastarbiter.reports', 'templates/pdf'),
PackageLoader('solarforecastarbiter.reports', 'templates'),
]),
autoescape=False,
lstrip_blocks=True,
trim_blocks=True,
block_start_string='\\BLOCK{',
block_end_string='}',
variable_start_string='\\VAR{',
variable_end_string='}',
comment_start_string='\\#{',
comment_end_string='}',
line_statement_prefix='%-',
line_comment_prefix='%#'
)
env.filters['html_to_tex'] = _html_to_tex
env.filters['link_filter'] = _link_filter
env.filters['pretty_json'] = _pretty_json
env.filters['unique_flags_filter'] = _unique_flags_filter
kwargs = _get_render_kwargs(report, dash_url, False)
with tempfile.TemporaryDirectory() as _tmpdir:
tmpdir = Path(_tmpdir)
logfile, auxfile = _prepare_latex_support_files(tmpdir, env, kwargs)
_save_figures_to_pdf(tmpdir, report)
_compile_files_into_pdf(tmpdir, logfile, auxfile, max_runs)
return (tmpdir / 'out.pdf').read_bytes()
def _prepare_latex_support_files(tmpdir, env, kwargs):
template = env.get_template('base.tex')
tex = template.render(**kwargs)
texfile = tmpdir / 'out.tex'
texfile.write_text(tex)
auxfile = tmpdir / 'out.aux'
logfile = tmpdir / 'out.log'
return logfile, auxfile
def _save_figures_to_pdf(tmpdir, report):
figdir = tmpdir / 'figs'
figdir.mkdir()
for fig in report.raw_report.plots.figures:
name = (
fig.category + '+' + fig.metric + '+' +
fig.name
).replace('^', '-').replace(' ', '+').replace('_', '+').replace(
'<=', 'lte').replace('%', 'pct').replace('.', '').replace('/', '-')
name += '.pdf'
# handle characters that will cause problems for tex
figpath = figdir / name
figpath.write_bytes(base64.a85decode(fig.pdf))
def _compile_files_into_pdf(tmpdir, logfile, auxfile, max_runs):
args = (
'pdflatex',
'-interaction=batchmode',
'-halt-on-error',
'-no-shell-escape',
'-file-line-error',
'out.tex'
)
runs_left = max_runs
prev_aux = 'nothing to see here'
# run pdflatex until it settles
while runs_left > 0:
try:
subprocess.run(args, check=True, cwd=str(tmpdir.absolute()))
except subprocess.CalledProcessError:
try:
logger.exception(logfile.read_text())
except Exception:
logger.exception('Pdflatex failed and so did reading log')
raise
aux = auxfile.read_text()
if aux == prev_aux:
break
else:
prev_aux = aux
runs_left -= 1
else:
raise RuntimeError(
f'PDF generation unstable after {max_runs} runs')
|
[
"tempfile.TemporaryDirectory",
"solarforecastarbiter.reports.figures.plotly_figures.timeseries_plots",
"json.dumps",
"jinja2.select_autoescape",
"jinja2.PackageLoader",
"pathlib.Path",
"re.search",
"re.sub",
"base64.a85decode",
"logging.getLogger"
] |
[((507, 534), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (524, 534), False, 'import logging\n'), ((8427, 8477), 'json.dumps', 'json.dumps', (['value'], {'indent': '(4)', 'separators': "(',', ':')"}), "(value, indent=4, separators=(',', ':'))\n", (8437, 8477), False, 'import json\n'), ((9035, 9068), 're.sub', 're.sub', (['"""[^\\\\w-]"""', '"""special"""', 'out'], {}), "('[^\\\\w-]', 'special', out)\n", (9041, 9068), False, 'import re\n'), ((12603, 12695), 're.search', 're.search', (['"""<a\\\\s+(?:[^>]*?\\\\s+)?href=(["\'])(.*?)(["\'])>(.*?)<\\\\/a>"""', 'value', 're.DOTALL'], {}), '(\'<a\\\\s+(?:[^>]*?\\\\s+)?href=(["\\\'])(.*?)(["\\\'])>(.*?)<\\\\/a>\',\n value, re.DOTALL)\n', (12612, 12695), False, 'import re\n'), ((13675, 13705), 're.sub', 're.sub', (['"""\\\\<a.*\\\\>"""', '""""""', 'value'], {}), "('\\\\<a.*\\\\>', '', value)\n", (13681, 13705), False, 'import re\n'), ((16715, 16744), 'tempfile.TemporaryDirectory', 'tempfile.TemporaryDirectory', ([], {}), '()\n', (16742, 16744), False, 'import tempfile\n'), ((16774, 16787), 'pathlib.Path', 'Path', (['_tmpdir'], {}), '(_tmpdir)\n', (16778, 16787), False, 'from pathlib import Path\n'), ((4660, 4675), 'json.dumps', 'json.dumps', (['out'], {}), '(out)\n', (4670, 4675), False, 'import json\n'), ((7697, 7736), 'solarforecastarbiter.reports.figures.plotly_figures.timeseries_plots', 'plotly_figures.timeseries_plots', (['report'], {}), '(report)\n', (7728, 7736), False, 'from solarforecastarbiter.reports.figures import plotly_figures\n'), ((10635, 10669), 'jinja2.select_autoescape', 'select_autoescape', (["['html', 'xml']"], {}), "(['html', 'xml'])\n", (10652, 10669), False, 'from jinja2 import Environment, PackageLoader, select_autoescape, ChoiceLoader\n'), ((17846, 17871), 'base64.a85decode', 'base64.a85decode', (['fig.pdf'], {}), '(fig.pdf)\n', (17862, 17871), False, 'import base64\n'), ((10467, 10530), 'jinja2.PackageLoader', 'PackageLoader', (['"""solarforecastarbiter.reports"""', '"""templates/html"""'], {}), "('solarforecastarbiter.reports', 'templates/html')\n", (10480, 10530), False, 'from jinja2 import Environment, PackageLoader, select_autoescape, ChoiceLoader\n'), ((10544, 10602), 'jinja2.PackageLoader', 'PackageLoader', (['"""solarforecastarbiter.reports"""', '"""templates"""'], {}), "('solarforecastarbiter.reports', 'templates')\n", (10557, 10602), False, 'from jinja2 import Environment, PackageLoader, select_autoescape, ChoiceLoader\n'), ((15935, 15997), 'jinja2.PackageLoader', 'PackageLoader', (['"""solarforecastarbiter.reports"""', '"""templates/pdf"""'], {}), "('solarforecastarbiter.reports', 'templates/pdf')\n", (15948, 15997), False, 'from jinja2 import Environment, PackageLoader, select_autoescape, ChoiceLoader\n'), ((16011, 16069), 'jinja2.PackageLoader', 'PackageLoader', (['"""solarforecastarbiter.reports"""', '"""templates"""'], {}), "('solarforecastarbiter.reports', 'templates')\n", (16024, 16069), False, 'from jinja2 import Environment, PackageLoader, select_autoescape, ChoiceLoader\n')]
|
from pathlib import Path
import matplotlib.pyplot as plt
import numpy as np
import skimage
from sklearn import svm, metrics, datasets
from sklearn.utils import Bunch
from sklearn.model_selection import GridSearchCV, train_test_split
#import opencv
from skimage.io import imread
from skimage.transform import resize
import time
import sys
start = time.time()
def load_image_files(container_path, dimension=(256, 256, 3)):
"""
Load image files with categories as subfolder names
which performs like scikit-learn sample dataset
Parameters
----------
container_path : string or unicode
Path to the main folder holding one subfolder per category
dimension : tuple
size to which image are adjusted to
Returns
-------
Bunch
"""
image_dir = Path(container_path)
folders = [directory for directory in image_dir.iterdir() if directory.is_dir()]
categories = [fo.name for fo in folders]
descr = "A image classification dataset"
images = []
flat_data = []
target = []
for i, direc in enumerate(folders):
for file in direc.iterdir():
img = skimage.io.imread(file)
img_resized = resize(img, dimension, anti_aliasing=True, mode='reflect')
flat_data.append(img_resized.flatten())
images.append(img_resized)
target.append(i)
flat_data = np.array(flat_data)
target = np.array(target)
images = np.array(images)
#print(images)
return Bunch(data=flat_data,
target=target,
target_names=categories,
images=images,
DESCR=descr),folders
image_dataset_train,folders_train = load_image_files("train/")
image_dataset_test,folders_test = load_image_files("test/")
#image_dataset = load_image_files("images/")
X_train = image_dataset_train.data
y_train = image_dataset_train.target
X_test = image_dataset_test.data
y_test = image_dataset_test.target
# image_dataset.data, image_dataset.target, test_size=0.3,random_state=109)
# param_grid = [
# {'C': [1, 10, 100, 1000], 'kernel': ['linear']},
# {'C': [1, 10, 100, 1000], 'gamma': [0.001, 0.0001], 'kernel': ['rbf']},
# ]
#svc = svm.SVC()
clf = svm.SVC()
#clf = GridSearchCV(svc, param_grid)
clf.fit(X_train, y_train)
print(folders_train)
y_pred = clf.predict(X_test)
print(y_pred)
print(y_test)
len_of_y = len(y_pred)
predict_correct_covid = 0
predict_wrong_covid = 0
predict_correct_noncovid = 0
predict_wrong_noncovid = 0
for i in range(len_of_y):
if y_pred[i] == y_test[i] and y_pred[i] == 0:
predict_correct_covid += 1
elif y_pred[i] == y_test[i] and y_pred[i] == 1:
predict_correct_noncovid += 1
elif y_pred[i] != y_test[i] and y_pred[i] == 0:
predict_wrong_covid += 1
elif y_pred[i] != y_test[i] and y_pred[i] == 1:
predict_wrong_noncovid += 1
print("predict_correct_covid", predict_correct_covid)
print("predict_wrong_covid", predict_wrong_covid)
print("predict_correct_noncovid", predict_correct_noncovid)
print("predict_wrong_noncovid", predict_wrong_noncovid)
print("percen of correct covid", predict_correct_covid/(predict_correct_covid + predict_wrong_covid))
print("percen of correct noncovid", predict_correct_noncovid/(predict_correct_noncovid + predict_wrong_noncovid))
print("precent over all", (predict_correct_covid + predict_correct_noncovid)/len_of_y)
end = time.time()
print("time", end - start)
|
[
"sklearn.utils.Bunch",
"time.time",
"pathlib.Path",
"numpy.array",
"skimage.transform.resize",
"sklearn.svm.SVC",
"skimage.io.imread"
] |
[((360, 371), 'time.time', 'time.time', ([], {}), '()\n', (369, 371), False, 'import time\n'), ((2311, 2320), 'sklearn.svm.SVC', 'svm.SVC', ([], {}), '()\n', (2318, 2320), False, 'from sklearn import svm, metrics, datasets\n'), ((3544, 3555), 'time.time', 'time.time', ([], {}), '()\n', (3553, 3555), False, 'import time\n'), ((834, 854), 'pathlib.Path', 'Path', (['container_path'], {}), '(container_path)\n', (838, 854), False, 'from pathlib import Path\n'), ((1441, 1460), 'numpy.array', 'np.array', (['flat_data'], {}), '(flat_data)\n', (1449, 1460), True, 'import numpy as np\n'), ((1475, 1491), 'numpy.array', 'np.array', (['target'], {}), '(target)\n', (1483, 1491), True, 'import numpy as np\n'), ((1506, 1522), 'numpy.array', 'np.array', (['images'], {}), '(images)\n', (1514, 1522), True, 'import numpy as np\n'), ((1555, 1648), 'sklearn.utils.Bunch', 'Bunch', ([], {'data': 'flat_data', 'target': 'target', 'target_names': 'categories', 'images': 'images', 'DESCR': 'descr'}), '(data=flat_data, target=target, target_names=categories, images=images,\n DESCR=descr)\n', (1560, 1648), False, 'from sklearn.utils import Bunch\n'), ((1187, 1210), 'skimage.io.imread', 'skimage.io.imread', (['file'], {}), '(file)\n', (1204, 1210), False, 'import skimage\n'), ((1240, 1298), 'skimage.transform.resize', 'resize', (['img', 'dimension'], {'anti_aliasing': '(True)', 'mode': '"""reflect"""'}), "(img, dimension, anti_aliasing=True, mode='reflect')\n", (1246, 1298), False, 'from skimage.transform import resize\n')]
|
import io
import math
from textwrap import wrap
from time import strftime, gmtime
import bezier
import matplotlib
import numpy as np
import pandas as pd
import seaborn as sns
from PIL import Image
from matplotlib import pyplot as plt
from ..utils import Log
def graph_bpm(map_obj):
"""
graphs the bpm changes on map
:param map_obj: a MapStats object
:return: image in io stream
"""
Log.log(f"Graphing BPM for {map_obj.title}")
data = [(i.starttime / map_obj.speed_multiplier,
1000 / i.ms_per_beat * 60 / map_obj.speed_multiplier)
for i in map_obj.beatmap.timingpoints if i.change]
chart_points = list()
for i, j in enumerate(data):
if i != 0:
last = data[i - 1]
chart_points.append((j[0] - .01, last[1]))
chart_points.append(j)
if len(data) - 1 == i:
chart_points.append((map_obj.beatmap.hitobjects[-1].starttime
/ map_obj.speed_multiplier, j[1]))
points = pd.DataFrame(chart_points)
points.columns = ["Time", "BPM"]
col = (38 / 255, 50 / 255, 59 / 255, .9)
sns.set(rc={'axes.facecolor': col,
'text.color': (236 / 255, 239 / 255, 241 / 255),
'figure.facecolor': col,
'savefig.facecolor': col,
'xtick.color': (176 / 255, 190 / 255, 197 / 255),
'ytick.color': (176 / 255, 190 / 255, 197 / 255),
'grid.color': (69 / 255, 90 / 255, 100 / 255),
'axes.labelcolor': (240 / 255, 98 / 255, 150 / 255),
'xtick.bottom': True,
'xtick.direction': 'in',
'figure.figsize': (6, 4),
'savefig.dpi': 100
})
ax = sns.lineplot(x="Time", y="BPM", data=points, color=(240 / 255, 98 / 255, 150 / 255))
length = int(map_obj.total_length) * 1000
m = length / 50
plt.xlim(-m, length + m)
formatter = matplotlib.ticker.FuncFormatter(lambda ms, x: strftime('%M:%S', gmtime(ms // 1000)))
ax.xaxis.set_major_formatter(formatter)
comp = round(max(1, (map_obj.bpm_max - map_obj.bpm_min) / 20), 2)
top = round(map_obj.bpm_max, 2) + comp
bot = max(round(map_obj.bpm_min, 2) - comp, 0)
dist = top - bot
plt.yticks(np.arange(bot, top, dist / 6 - .0001))
plt.ylim(bot, top)
round_num = 0 if dist > 10 else 2
formatter = matplotlib.ticker.FuncFormatter(lambda dig, y:
f"{max(dig - .004, 0.0):.{round_num}f}")
ax.yaxis.set_major_formatter(formatter)
ax.xaxis.grid(False)
width = 85
map_text = "\n".join(wrap(f"{map_obj.title} by {map_obj.artist}", width=width)) + "\n" + \
"\n".join(wrap(f"Mapset by {map_obj.creator}, "
f"Difficulty: {map_obj.version}", width=width))
plt.title(map_text)
plt.box(False)
image = io.BytesIO()
plt.savefig(image, bbox_inches='tight')
image.seek(0)
plt.clf()
plt.close()
return image
def map_strain_graph(map_strains, progress=1., width=399., height=40., max_chunks=100, low_cut=30.):
"""
generats a strains graph based on map
:param map_strains: get_strains object
:param progress: how much of the map player finished
:param width: width of image
:param height: height of image
:param max_chunks: resolution to get out of map
:param low_cut: adds some beefing to the bottem
:return: an image in a bytesio object
"""
strains, max_strain = map_strains["strains"], map_strains["max_strain"]
strains_chunks = list()
chunk_size = math.ceil(len(strains) / max_chunks)
for i in range(0, len(strains), chunk_size):
strain_part = strains[i:i + chunk_size]
strains_chunks.append(max(strain_part))
x = np.linspace(0, width, num=len(strains_chunks))
y = np.minimum(low_cut,
height * 0.125 + height * .875 - np.array([i / max_strain for i in
strains_chunks]) * height * .875)
x = np.insert(x, 0, 0)
x = np.insert(x, 0, 0)
x = np.append(x, width)
x = np.append(x, width)
y = np.insert(y, 0, low_cut)
y = np.insert(y, 0, low_cut)
y = np.append(y, low_cut)
y = np.append(y, low_cut)
curves = list()
curves.append(bezier.Curve(np.asfortranarray([[0.0, 0.0], [height, low_cut]]), degree=1))
for i in range(1, len(y) - 1):
node = np.asfortranarray([
[avgpt(x, i - 1), x[i], avgpt(x, i)],
[avgpt(y, i - 1), y[i], avgpt(y, i)]])
curves.append(
bezier.Curve(node, degree=2)
)
curves.append(bezier.Curve(np.asfortranarray([[width, width], [low_cut, height]]), degree=1))
curves.append(bezier.Curve(np.asfortranarray([[width, 0.0], [height, height]]), degree=1))
polygon = bezier.CurvedPolygon(*curves)
_, ax = plt.subplots(figsize=(round(width * 1.30), round(height * 1.30)), dpi=1)
polygon.plot(pts_per_edge=200, color=(240 / 255, 98 / 255, 146 / 255, 1), ax=ax)
plt.xlim(0, width)
plt.ylim(height, 0)
plt.axis('off')
plt.box(False)
image = io.BytesIO()
fig1 = plt.gcf()
fig1.savefig(image, bbox_inches='tight', transparent=True, pad_inches=0, dpi=1)
image.seek(0)
plt.clf()
plt.close()
img = Image.open(image)
data = np.array(img)
for j in data:
for pos, i in enumerate(j):
if pos > len(j) * progress:
j[pos] = i / 1.5
if i[3] != 0:
j[pos][3] = i[3] / 159 * 255
img = Image.fromarray(data)
image.close()
image = io.BytesIO()
img.save(image, "png")
image.seek(0)
return image
def avgpt(points, index):
"""
get the average between current point and the next one
:param points: list of points
:param index: index
:return: average
"""
return (points[index] + points[index + 1]) / 2.0
|
[
"matplotlib.pyplot.title",
"seaborn.lineplot",
"matplotlib.pyplot.clf",
"textwrap.wrap",
"matplotlib.pyplot.box",
"numpy.arange",
"bezier.Curve",
"pandas.DataFrame",
"matplotlib.pyplot.close",
"bezier.CurvedPolygon",
"numpy.insert",
"numpy.append",
"seaborn.set",
"io.BytesIO",
"matplotlib.pyplot.ylim",
"numpy.asfortranarray",
"matplotlib.pyplot.gcf",
"matplotlib.pyplot.xlim",
"time.gmtime",
"matplotlib.pyplot.axis",
"PIL.Image.open",
"numpy.array",
"PIL.Image.fromarray",
"matplotlib.pyplot.savefig"
] |
[((1024, 1050), 'pandas.DataFrame', 'pd.DataFrame', (['chart_points'], {}), '(chart_points)\n', (1036, 1050), True, 'import pandas as pd\n'), ((1138, 1591), 'seaborn.set', 'sns.set', ([], {'rc': "{'axes.facecolor': col, 'text.color': (236 / 255, 239 / 255, 241 / 255),\n 'figure.facecolor': col, 'savefig.facecolor': col, 'xtick.color': (176 /\n 255, 190 / 255, 197 / 255), 'ytick.color': (176 / 255, 190 / 255, 197 /\n 255), 'grid.color': (69 / 255, 90 / 255, 100 / 255), 'axes.labelcolor':\n (240 / 255, 98 / 255, 150 / 255), 'xtick.bottom': True,\n 'xtick.direction': 'in', 'figure.figsize': (6, 4), 'savefig.dpi': 100}"}), "(rc={'axes.facecolor': col, 'text.color': (236 / 255, 239 / 255, 241 /\n 255), 'figure.facecolor': col, 'savefig.facecolor': col, 'xtick.color':\n (176 / 255, 190 / 255, 197 / 255), 'ytick.color': (176 / 255, 190 / 255,\n 197 / 255), 'grid.color': (69 / 255, 90 / 255, 100 / 255),\n 'axes.labelcolor': (240 / 255, 98 / 255, 150 / 255), 'xtick.bottom': \n True, 'xtick.direction': 'in', 'figure.figsize': (6, 4), 'savefig.dpi':\n 100})\n", (1145, 1591), True, 'import seaborn as sns\n'), ((1770, 1859), 'seaborn.lineplot', 'sns.lineplot', ([], {'x': '"""Time"""', 'y': '"""BPM"""', 'data': 'points', 'color': '(240 / 255, 98 / 255, 150 / 255)'}), "(x='Time', y='BPM', data=points, color=(240 / 255, 98 / 255, \n 150 / 255))\n", (1782, 1859), True, 'import seaborn as sns\n'), ((1926, 1950), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(-m)', '(length + m)'], {}), '(-m, length + m)\n', (1934, 1950), True, 'from matplotlib import pyplot as plt\n'), ((2343, 2361), 'matplotlib.pyplot.ylim', 'plt.ylim', (['bot', 'top'], {}), '(bot, top)\n', (2351, 2361), True, 'from matplotlib import pyplot as plt\n'), ((2879, 2898), 'matplotlib.pyplot.title', 'plt.title', (['map_text'], {}), '(map_text)\n', (2888, 2898), True, 'from matplotlib import pyplot as plt\n'), ((2904, 2918), 'matplotlib.pyplot.box', 'plt.box', (['(False)'], {}), '(False)\n', (2911, 2918), True, 'from matplotlib import pyplot as plt\n'), ((2932, 2944), 'io.BytesIO', 'io.BytesIO', ([], {}), '()\n', (2942, 2944), False, 'import io\n'), ((2949, 2988), 'matplotlib.pyplot.savefig', 'plt.savefig', (['image'], {'bbox_inches': '"""tight"""'}), "(image, bbox_inches='tight')\n", (2960, 2988), True, 'from matplotlib import pyplot as plt\n'), ((3012, 3021), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (3019, 3021), True, 'from matplotlib import pyplot as plt\n'), ((3026, 3037), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (3035, 3037), True, 'from matplotlib import pyplot as plt\n'), ((4112, 4130), 'numpy.insert', 'np.insert', (['x', '(0)', '(0)'], {}), '(x, 0, 0)\n', (4121, 4130), True, 'import numpy as np\n'), ((4139, 4157), 'numpy.insert', 'np.insert', (['x', '(0)', '(0)'], {}), '(x, 0, 0)\n', (4148, 4157), True, 'import numpy as np\n'), ((4166, 4185), 'numpy.append', 'np.append', (['x', 'width'], {}), '(x, width)\n', (4175, 4185), True, 'import numpy as np\n'), ((4194, 4213), 'numpy.append', 'np.append', (['x', 'width'], {}), '(x, width)\n', (4203, 4213), True, 'import numpy as np\n'), ((4222, 4246), 'numpy.insert', 'np.insert', (['y', '(0)', 'low_cut'], {}), '(y, 0, low_cut)\n', (4231, 4246), True, 'import numpy as np\n'), ((4255, 4279), 'numpy.insert', 'np.insert', (['y', '(0)', 'low_cut'], {}), '(y, 0, low_cut)\n', (4264, 4279), True, 'import numpy as np\n'), ((4288, 4309), 'numpy.append', 'np.append', (['y', 'low_cut'], {}), '(y, low_cut)\n', (4297, 4309), True, 'import numpy as np\n'), ((4318, 4339), 'numpy.append', 'np.append', (['y', 'low_cut'], {}), '(y, low_cut)\n', (4327, 4339), True, 'import numpy as np\n'), ((4906, 4935), 'bezier.CurvedPolygon', 'bezier.CurvedPolygon', (['*curves'], {}), '(*curves)\n', (4926, 4935), False, 'import bezier\n'), ((5111, 5129), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(0)', 'width'], {}), '(0, width)\n', (5119, 5129), True, 'from matplotlib import pyplot as plt\n'), ((5134, 5153), 'matplotlib.pyplot.ylim', 'plt.ylim', (['height', '(0)'], {}), '(height, 0)\n', (5142, 5153), True, 'from matplotlib import pyplot as plt\n'), ((5158, 5173), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (5166, 5173), True, 'from matplotlib import pyplot as plt\n'), ((5178, 5192), 'matplotlib.pyplot.box', 'plt.box', (['(False)'], {}), '(False)\n', (5185, 5192), True, 'from matplotlib import pyplot as plt\n'), ((5206, 5218), 'io.BytesIO', 'io.BytesIO', ([], {}), '()\n', (5216, 5218), False, 'import io\n'), ((5230, 5239), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (5237, 5239), True, 'from matplotlib import pyplot as plt\n'), ((5346, 5355), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (5353, 5355), True, 'from matplotlib import pyplot as plt\n'), ((5360, 5371), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (5369, 5371), True, 'from matplotlib import pyplot as plt\n'), ((5383, 5400), 'PIL.Image.open', 'Image.open', (['image'], {}), '(image)\n', (5393, 5400), False, 'from PIL import Image\n'), ((5412, 5425), 'numpy.array', 'np.array', (['img'], {}), '(img)\n', (5420, 5425), True, 'import numpy as np\n'), ((5637, 5658), 'PIL.Image.fromarray', 'Image.fromarray', (['data'], {}), '(data)\n', (5652, 5658), False, 'from PIL import Image\n'), ((5689, 5701), 'io.BytesIO', 'io.BytesIO', ([], {}), '()\n', (5699, 5701), False, 'import io\n'), ((2299, 2337), 'numpy.arange', 'np.arange', (['bot', 'top', '(dist / 6 - 0.0001)'], {}), '(bot, top, dist / 6 - 0.0001)\n', (2308, 2337), True, 'import numpy as np\n'), ((2759, 2844), 'textwrap.wrap', 'wrap', (['f"""Mapset by {map_obj.creator}, Difficulty: {map_obj.version}"""'], {'width': 'width'}), "(f'Mapset by {map_obj.creator}, Difficulty: {map_obj.version}', width=width\n )\n", (2763, 2844), False, 'from textwrap import wrap\n'), ((4391, 4441), 'numpy.asfortranarray', 'np.asfortranarray', (['[[0.0, 0.0], [height, low_cut]]'], {}), '([[0.0, 0.0], [height, low_cut]])\n', (4408, 4441), True, 'import numpy as np\n'), ((4660, 4688), 'bezier.Curve', 'bezier.Curve', (['node'], {'degree': '(2)'}), '(node, degree=2)\n', (4672, 4688), False, 'import bezier\n'), ((4730, 4784), 'numpy.asfortranarray', 'np.asfortranarray', (['[[width, width], [low_cut, height]]'], {}), '([[width, width], [low_cut, height]])\n', (4747, 4784), True, 'import numpy as np\n'), ((4828, 4879), 'numpy.asfortranarray', 'np.asfortranarray', (['[[width, 0.0], [height, height]]'], {}), '([[width, 0.0], [height, height]])\n', (4845, 4879), True, 'import numpy as np\n'), ((2032, 2050), 'time.gmtime', 'gmtime', (['(ms // 1000)'], {}), '(ms // 1000)\n', (2038, 2050), False, 'from time import strftime, gmtime\n'), ((2664, 2721), 'textwrap.wrap', 'wrap', (['f"""{map_obj.title} by {map_obj.artist}"""'], {'width': 'width'}), "(f'{map_obj.title} by {map_obj.artist}', width=width)\n", (2668, 2721), False, 'from textwrap import wrap\n'), ((3973, 4025), 'numpy.array', 'np.array', (['[(i / max_strain) for i in strains_chunks]'], {}), '([(i / max_strain) for i in strains_chunks])\n', (3981, 4025), True, 'import numpy as np\n')]
|