id
stringlengths 3
8
| content
stringlengths 100
981k
|
|---|---|
11560336
|
from pointers import fopen, fprintf, fclose
file = fopen("/dev/null", "w")
fprintf(file, "hello world")
fclose(file)
|
11560358
|
import os
import numpy as np
import pickle
import torch
from yacs.config import CfgNode
from .dataset import Dataset
from .utils import get_example
class BatchedImageDataset(Dataset):
def __init__(self,
cfg: CfgNode,
dataset_file: str,
img_dir: str,
train: bool = False,
**kwargs):
"""
Batched version of ImageDataset, where instead of a single example a list of examples is loaded (e.g. multiple views).
Args:
cfg (CfgNode): Model config file.
dataset_file (str): Path to npz file containing dataset info.
img_dir (str): Path to image folder.
train (bool): Whether it is for training or not (enables data augmentation).
"""
super(BatchedImageDataset, self).__init__()
self.data = pickle.load(open(dataset_file, 'rb'))
self.train = train
self.cfg = cfg
self.img_size = cfg.MODEL.IMAGE_SIZE
self.mean = 255. * np.array(self.cfg.MODEL.IMAGE_MEAN)
self.std = 255. * np.array(self.cfg.MODEL.IMAGE_STD)
self.img_dir = img_dir
body_permutation = [0, 1, 5, 6, 7, 2, 3, 4, 8, 12, 13, 14, 9, 10, 11, 16, 15, 18, 17, 22, 23, 24, 19, 20, 21]
extra_permutation = [5, 4, 3, 2, 1, 0, 11, 10, 9, 8, 7, 6, 12, 13, 14, 15, 16, 17, 18]
flip_keypoint_permutation = body_permutation + [25 + i for i in extra_permutation]
self.flip_keypoint_permutation = flip_keypoint_permutation
def __len__(self):
return len(self.data)
def total_length(self):
"""
Return the total number of images in the dataset.
"""
return sum([len(datum['imgname']) for datum in self.data])
def __getitem__(self, idx: int):
data = self.data[idx]
num_images = len(data['imgname'])
augm_config = self.cfg.DATASETS.CONFIG
img_patch = []
keypoints_2d = []
keypoints_3d = []
smpl_params = []
has_smpl_params = []
smpl_params_is_axis_angle = []
img_size = []
center = []
scale = []
if 'body_keypoints_3d' in data:
body_keypoints_3d = data['body_keypoints_3d']
extra_keypoints_3d = data['extra_keypoints_3d']
keypoints_3d_all = np.concatenate((body_keypoints_3d, extra_keypoints_3d), axis=1)
else:
keypoints_3d_all = np.zeros((num_images, 44, 4))
for n in range(num_images):
imgname = data['imgname'][n]
image_file = os.path.join(self.img_dir, imgname)
keypoints_2d_n = np.zeros((44, 3))
keypoints_3d_n = keypoints_3d_all[n]
center_n = data['center'][n].copy()
center_x = center_n[0]
center_y = center_n[1]
bbox_size_n = 1.2*data['scale'][n]
if 'body_pose' in data:
body_pose_n = data['body_pose'][n]
else:
body_pose_n = np.zeros(72, dtype=np.float32)
if 'betas' in data:
betas_n = data['betas'][n]
else:
betas_n = np.zeros(10, dtype=np.float32)
if 'has_body_pose' in data:
has_body_pose_n = data['has_body_pose'][n]
else:
has_body_pose_n = 0.0
if 'has_betas' in data:
has_betas_n = data['has_betas'][n]
else:
has_betas_n = 0.0
smpl_params_n = {'global_orient': body_pose_n[:3],
'body_pose': body_pose_n[3:],
'betas': betas_n
}
has_smpl_params_n = {'global_orient': has_body_pose_n,
'body_pose': has_body_pose_n,
'betas': has_betas_n
}
smpl_params_is_axis_angle_n = {'global_orient': True,
'body_pose': True,
'betas': False
}
img_patch_n, keypoints_2d_n, keypoints_3d_n, smpl_params_n, has_smpl_params_n, img_size_n = get_example(image_file,
center_x, center_y,
bbox_size_n, bbox_size_n,
keypoints_2d_n, keypoints_3d_n,
smpl_params_n, has_smpl_params_n,
self.flip_keypoint_permutation,
self.img_size, self.img_size,
self.mean, self.std, self.train, augm_config)
img_patch.append(img_patch_n)
keypoints_2d.append(keypoints_2d_n)
keypoints_3d.append(keypoints_3d_n)
smpl_params.append(smpl_params_n)
has_smpl_params.append(has_smpl_params_n)
smpl_params_is_axis_angle.append(smpl_params_is_axis_angle_n)
img_size.append(img_size_n)
img_patch = np.stack(img_patch, axis=0)
keypoints_2d = np.stack(keypoints_2d, axis=0)
keypoints_3d = np.stack(keypoints_3d, axis=0)
smpl_params = {k: np.stack([sp[k] for sp in smpl_params], axis=0) for k in smpl_params[0].keys()}
has_smpl_params = {k: np.stack([sp[k] for sp in has_smpl_params], axis=0) for k in has_smpl_params[0].keys()}
smpl_params_is_axis_angle = {k: np.stack([sp[k] for sp in smpl_params_is_axis_angle], axis=0) for k in smpl_params_is_axis_angle[0].keys()}
img_size = np.stack(img_size, axis=0)
item = {}
item['img'] = torch.from_numpy(img_patch)
item['keypoints_2d'] = torch.from_numpy(keypoints_2d.astype(np.float32))
item['keypoints_3d'] = torch.from_numpy(keypoints_3d.astype(np.float32))
item['smpl_params'] = {k: torch.from_numpy(v).float() for k,v in smpl_params.items()}
item['has_smpl_params'] = {k: torch.from_numpy(v).bool() for k,v in has_smpl_params.items()}
item['smpl_params_is_axis_angle'] = {k: torch.from_numpy(v).bool() for k,v in smpl_params_is_axis_angle.items()}
return item
|
11560395
|
import numpy as np
import matplotlib.pyplot as plt
from pybasicbayes.util.text import progprint_xrange
from pyhsmm.basic.distributions import PoissonDuration
import pyhsmm_spiketrains.models
reload(pyhsmm_spiketrains.models)
# Set the seed
seed = 0
print "setting seed to ", seed
np.random.seed(seed)
# Generate a synthetic dataset
N = 25 # Number of neurons
K = 20 # Number of states
T = 1000 # Number of time bins to simulate
T_test = 200 # Number of time bins to hold out for testing
N_iter = 500 # Number of iterations of Gibbs sampling
# Simulate from an HMM with a known transition matrix
true_A = np.eye(K) + 0.25*np.random.rand(K,K)
true_A /= true_A.sum(axis=1)[:,None]
true_hmm = pyhsmm_spiketrains.models.PoissonHMM(N=N, K=K, trans_matrix=true_A)
# Generate test spike trains (S) and their underlying state sequences (Z)
S_train, _ = true_hmm.generate(T)
S_test, _ = true_hmm.generate(T_test)
true_hmm.relabel_by_usage()
Z_train, Z_test = true_hmm.stateseqs
N_used = len(true_hmm.used_states)
print "Number of used states: ", N_used
# Create a test model with the same parameters, and add the data
test_hmm = pyhsmm_spiketrains.models.PoissonHDPHMM(N=N, K_max=100)
test_hmm.add_data(S_train)
# Fit the test model with Gibbs sampling
lls = []
pred_lls = []
for itr in progprint_xrange(N_iter):
test_hmm.resample_model()
# Collect the log likelihood and predictive log likelihood
lls.append(test_hmm.log_likelihood(S_train))
pred_lls.append(test_hmm.log_likelihood(S_test))
# Get the inferred state sequence
test_hmm.relabel_by_usage()
Z_train_inf = test_hmm.stateseqs[0]
N_used_inf = len(test_hmm.used_states)
# Plot the log likelihood over time
plt.figure()
plt.plot(lls, 'b')
plt.plot([0,N_iter], true_hmm.log_likelihood(S_train) * np.ones(2), ':k')
plt.xlabel("Iteration")
plt.ylabel("Log Likelihood")
# Visualize the data and the true and inferred state sequences
plt.figure()
plt.subplot(311)
plt.imshow(S_train.T[:,:100], interpolation="none", cmap="Greys", vmin=0, vmax=S_train.max())
plt.title("Spike train")
plt.subplot(312)
plt.imshow(Z_train.reshape((1,-1))[:,:100], aspect=10.0, cmap="YlOrRd", interpolation="none", vmin=0, vmax=N_used)
plt.title("True states")
plt.subplot(313)
plt.title("Inferred states")
plt.imshow(Z_train_inf.reshape((1,-1))[:,:100], aspect=10.0, cmap="YlOrRd", interpolation="none", vmin=0, vmax=N_used)
# Visualize the true and inferred transition matrices
plt.figure()
plt.subplot(121)
plt.imshow(true_hmm.A[:N_used, :N_used], interpolation="none", cmap="Greys", vmin=0, vmax=1)
plt.title("True Transitions")
plt.subplot(122)
plt.imshow(test_hmm.A[:N_used_inf, :N_used_inf], interpolation="none", cmap="Greys", vmin=0, vmax=1)
plt.title("Inf. Transitions")
# Visualize the true and inferred firing rates
plt.figure()
plt.subplot(121)
plt.imshow(true_hmm.rates[:N_used, :], interpolation="none", cmap="Greys", vmin=0, vmax=1)
plt.title("True Firing Rates")
plt.subplot(122)
plt.imshow(test_hmm.rates[:N_used_inf, :], interpolation="none", cmap="Greys", vmin=0, vmax=1)
plt.title("Inf. Firing Rates")
plt.show()
|
11560406
|
import pandas as pd
import os
from tqdm.notebook import tqdm
# read csv
df = pd.read_csv('thaisum.csv', encoding='utf-8')
'''
Conditions
________________________
if type is not null
________________________
1. 'ทั่วไทย' = {'ภูมิภาค'}
2. 'การเมือง' = {'ความมั่นคง', 'เลือกตั้ง', }
3. 'สังคม'
4. 'กีฬา' = {'ฟุตบอลยุโรป', 'ไทยรัฐเชียร์ไทยแลนด์', 'กีฬาอื่นๆ', 'ฟุตบอลไทย', 'มวย/MMA', 'ฟุตบอลโลก', 'วอลเลย์บอล', 'เอเชียนเกมส์', 'ไทยลีก', 'ฟุตซอล'}
5. 'ต่างประเทศ'
6. 'เศรษฐกิจ' = {ทองคำ}
7. 'ไลฟ์สไตล์' = {'ผู้หญิง', 'ท่องเที่ยว', 'อาหาร', 'ไลฟ์', 'บ้าน', 'หนัง'}
8. 'บันเทิง' = {'ศิลปะ-บันเทิง', 'วัฒนธรรม', 'ข่าวบันเทิง', ''}
9. 'คุณภาพชีวิต' = {'สิทธิมนุษยชน'}
10. 'วิทยาศาสตร์เทคโนโลยี' = {'E-Sport', 'ไอซีที', 'วิทยาศาสตร์', 'การศึกษา'}
11. 'สิ่งแวดล้อม' = {'ภัยพิบัติ', ''}
12. 'unspecified' = { if row('tags') isnull and row('type') == 'unspecified'}
_________________________________
if row('type') == 'unspecified'
_________________________________
1. 'ทั่วไทย' = ['ข่าวทั่วไทย', ''ข่าวภูมิภาค', 'ทั่วไทย']
2. 'การเมือง' = ['ความมั่นคง', 'เลือกตั้ง', 'ข่าวการเมือง', 'คสช.', 'กกต', 'รัฐบาล', 'ยิ่งลักษณ์ ชินวัตร', 'การเลือกตั้ง', 'ร่างรัฐธรรมนูญ', 'ประชามติ', 'ประชาธิปัตย์', 'พรรคเพื่อไทย']
3. 'สังคม' = ['ข่าวสังคม', 'ข่าวโซเชียล']
4. 'กีฬา' = ['ข่าวกีฬา', 'พรีเมียร์ลีก', 'แมนเชสเตอร์ ยูไนเต็ด', 'ลิเวอร์พูล', 'ผลบอล', 'ทีมชาติไทย', 'เชลซี']
5. 'ต่างประเทศ' = ['ข่าวต่างประเทศ', 'จีน', 'สหรัฐ', 'อังกฤษ', 'ญี่ปุ่น']
6. 'เศรษฐกิจ' = ['ข่าวเศรษฐกิจ','ทองคำ', 'หวย', 'เศรษฐกิจ']
7. 'ไลฟ์สไตล์' = ['ผู้หญิง', 'ท่องเที่ยว', 'อาหาร', 'ไลฟ์', 'บ้าน', 'หนัง', 'ข่าวไลฟ์สไตล์']
8. 'บันเทิง' = ['ศิลปะ-บันเทิง', 'วัฒนธรรม', 'ข่าวบันเทิง', 'ดารา', 'ละคร', 'กอสซิป', 'นักร้อง',]
9. 'คุณภาพชีวิต' = ['สิทธิมนุษยชน', 'สุขภาพ', 'เกษตรกร', 'COVID19', 'ฆ่าตัวตาย', 'COVID-19', 'โควิด-19', 'ไวรัสโคโรนา', 'สาธารณสุข', ]
10. 'วิทยาศาสตร์เทคโนโลยี' = {'E-Sport', 'ไอซีที', 'วิทยาศาสตร์', 'การศึกษา', 'เกษตร', 'ข่าวการศึกษา'}
11. 'สิ่งแวดล้อม' = ['ภัยพิบัติ', 'น้ำท่วม', 'ภัยแล้ง', 'กรมอุตุนิยมวิทยา', 'ไฟไหม้', 'พยากรณ์อากาศ', 'อากาศวันนี้', ]
'''
for index, row in tqdm(df.iterrows(), total=df.shape[0]):
output_df = pd.DataFrame()
if str(row['type']) == 'unspecified': # if 'types' is NOT available ('unspecified'), then assign 'label' to the article from 'tags' as following conditions:
if (pd.isnull(row['tags'])) and str(row['type']) == 'unspecified':
label = 'unspecified'
elif 'ความมั่นคง' in row["tags"] or 'เลือกตั้ง' in row["tags"] or 'ข่าวการเมือง' in row["tags"] or 'คสช.' in \
row["tags"] or 'กกต' in row["tags"] or 'รัฐบาล' in row["tags"] or 'ยิ่งลักษณ์ ชินวัตร' in row[
"tags"] or 'การเลือกตั้ง' in row["tags"] or 'ร่างรัฐธรรมนูญ' in row["tags"] or 'ประชามติ' in row[
"tags"] or 'ประชาธิปัตย์' in row["tags"] or 'พรรคเพื่อไทย' in row["tags"] or 'การเมือง' in row["tags"]:
label = 'politic' # การเมือง
elif 'ข่าวสังคม' in row["tags"] or 'ข่าวโซเชียล' in row["tags"]:
label = 'society' # สังคม
elif 'ข่าวกีฬา' in row["tags"] or 'พรีเมียร์ลีก' in row["tags"] or 'แมนเชสเตอร์ ยูไนเต็ด' in row[
"tags"] or 'ลิเวอร์พูล' in row["tags"] or 'ผลบอล' in row["tags"] or 'ทีมชาติไทย' in row[
"tags"] or 'เชลซี' in row["tags"]:
label = 'sport' # กีฬา
elif 'ข่าวต่างประเทศ' in row["tags"] or 'จีน' in row["tags"] or 'สหรัฐ' in row["tags"] or 'อังกฤษ' in row[
"tags"] or 'ญี่ปุ่น' in row["tags"]:
label = 'foreign' # ต่างประเทศ
elif 'ผู้หญิง' in row["tags"] or 'ท่องเที่ยว' in row["tags"] or 'อาหาร' in row["tags"] or 'ไลฟ์' in row[
"tags"] or 'บ้าน' in row["tags"] or 'หนัง' in row["tags"] or 'ข่าวไลฟ์สไตล์' in row["tags"]:
label = 'lifestyle' # ไลฟ์สไตล์
elif 'ข่าวเศรษฐกิจ' in row["tags"] or 'ทองคำ' in row["tags"] or 'หวย' in row["tags"] or 'เศรษฐกิจ' in row[
"tags"]:
label = 'economy' # เศรษฐกิจ
elif 'ศิลปะ-บันเทิง' in row["tags"] or 'วัฒนธรรม' in row["tags"] or 'ข่าวบันเทิง' in row["tags"] or 'ดารา' in \
row["tags"] or 'ละคร' in row["tags"] or 'กอสซิป' in row["tags"] or 'นักร้อง' in row["tags"]:
label = 'entertainment' # บันเทิง & วัฒนธรรม
elif 'สิทธิมนุษยชน' in row["tags"] or 'สุขภาพ' in row["tags"] or 'เกษตรกร' in row["tags"] or 'COVID19' in row[
"tags"] or 'ฆ่าตัวตาย' in row["tags"] or 'COVID-19' in row["tags"] or 'โควิด-19' in row[
"tags"] or 'ไวรัสโคโรนา' in row["tags"] or 'สาธารณสุข' in row["tags"]:
label = 'quality-of-life' # คุณภาพชีวิต
elif 'E-Sport' in row["tags"] or 'ไอซีที' in row["tags"] or 'วิทยาศาสตร์' in row["tags"] or 'การศึกษา' in row[
"tags"] or 'เกษตร' in row["tags"] or 'ข่าวการศึกษา' in row["tags"]:
label = 'science' # วิทยาศาสตร์ & การศึกษา
elif 'ภัยพิบัติ' in row["tags"] or 'น้ำท่วม' in row["tags"] or 'ภัยแล้ง' in row["tags"] or 'กรมอุตุนิยมวิทยา' in \
row["tags"] or 'ไฟไหม้' in row["tags"] or 'พยากรณ์อากาศ' in row["tags"] or 'อากาศวันนี้' in row["tags"]:
label = 'environment' # สิ่งแวดล้อม
elif 'ข่าวทั่วไทย' in row["tags"] or 'ข่าวภูมิภาค' in row["tags"] or 'ทั่วไทย' in row["tags"]:
label = 'local' # ทั่วไทย
else:
label = 'others' # อื่นๆ (This type of news doesn't have type but does have tags. However, these tags are not occoured many times.)
else: # if 'types' are already available, then assign 'label' to the article as following conditions:
if 'ภัยพิบัติ' in row["type"] or 'สิ่งแวดล้อม' in row["type"]:
label = 'environment' # สิ่งแวดล้อม
elif 'ความมั่นคง' in row["type"] or 'เลือกตั้ง' in row["type"] or 'การเมือง' in row["type"]:
label = 'politic' # การเมือง
elif 'สังคม' in row["type"]:
label = 'society' # สังคม
elif 'กีฬา' in row["type"] or 'ฟุตบอลยุโรป' in row["type"] or 'ไทยรัฐเชียร์ไทยแลนด์' in row[
"type"] or 'กีฬาอื่นๆ' in row["type"] or 'ฟุตบอลไทย' in row["type"] or 'มวย/MMA' in row[
"type"] or 'ฟุตบอลโลก' in row["type"] or 'วอลเลย์บอล' in row["type"] or 'เอเชียนเกมส์' in row[
"type"] or 'ไทยลีก' in row["type"] or 'ฟุตซอล' in row["type"]:
label = 'sport' # กีฬา
elif 'ต่างประเทศ' in row["type"]:
label = 'foreign' # ต่างประเทศ
elif 'ผู้หญิง' in row["type"] or 'ท่องเที่ยว' in row["type"] or 'อาหาร' in row["type"] or 'ไลฟ์' in row[
"type"] or 'บ้าน' in row["type"] or 'หนัง' in row["type"] or 'ไลฟ์สไตล์' in row["type"]:
label = 'lifestyle' # ไลฟ์สไตล์
elif 'เศรษฐกิจ' in row["type"] or 'หวย' in row["type"] or 'ทองคำ' in row["type"]:
label = 'economy' # เศรษฐกิจ
elif 'บันเทิง' in row["type"] or 'ศิลปะ-บันเทิง' in row["type"] or 'วัฒนธรรม' in row["type"] or 'ข่าวบันเทิง' in \
row["type"]:
label = 'entertainment' # บันเทิง & วัฒนธรรม
elif 'คุณภาพชีวิต' in row["type"] or 'สิทธิมนุษยชน' in row["type"]:
label = 'quality-of-life' # คุณภาพชีวิต
elif 'วิทยาศาสตร์เทคโนโลยี' in row["type"] or 'E-Sport' in row["type"] or 'ไอซีที' in row[
"type"] or 'วิทยาศาสตร์' in row["type"] or 'การศึกษา' in row["type"]:
label = 'science'
elif 'สิ่งแวดล้อม' in row["type"] or 'ภัยพิบัติ' in row["type"]:
label = 'environment' # สิ่งแวดล้อม
elif 'ทั่วไทย' in row["type"] or 'ภูมิภาค' in row["type"]:
label = 'local' # ทั่วไทย
else:
label = 'others' # อื่นๆ (This type of news does have tags. However, these types are not occoured many times.)
output_df.loc[index, 'tags'] = str(row['tags'])
output_df.loc[index, 'type'] = row['type']
output_df.loc[index, 'label'] = label
output_df.loc[index, 'url'] = row['url']
output_name = os.path.join("tag-type-label-2.csv")
if not os.path.isfile(output_name):
output_df.to_csv(output_name, index=False, encoding='utf-8-sig', header=["tags", "type", "label", "url"])
else: # else it exists so append without writing the header
output_df.to_csv(output_name, index=False, encoding='utf-8-sig', mode='a', header=False)
|
11560411
|
import requests
import json
import pandas as pd
def QA_fetch_get_future_domain():
"""
获取快期的主连代码
return [list]
"""
res = pd.DataFrame(json.loads(requests.get("https://openmd.shinnytech.com/t/md/symbols/latest.json").text)).T
return res.loc[res.ins_name.str.contains('主连')].underlying_symbol.apply(lambda x: x.split('.')[1]).tolist()
if __name__ == "__main__":
print(QA_fetch_get_future_domain())
|
11560414
|
import os
import sys
import subprocess as sp
from shutil import rmtree
from subprocess import TimeoutExpired
import renv
import logging
logger = logging.getLogger(__name__)
def get_system_venv():
if os.name == "posix":
if sys.platform == "darwin":
renv.MacRenvBuilder()
elif "linux" in str(sys.platform):
return renv.LinuxRenvBuilder
else:
logger.error("renv does not support %s operating system at this time." % sys.platform)
elif os.name == "nt":
if sys.platform == "win32":
renv.WindowsRenvBuilder()
else:
logger.error("renv does not support %s operating system at this time." % sys.platform)
def get_r_path():
"""
Get current R installed path in Linux
# TODO make this function cross-platform
:return: path to R
"""
sp_out = sp.run(["which R"], shell=True, stdout=sp.PIPE, encoding="utf8")
if sp_out.returncode:
logger.exception("Could not get the default R path. Is R installed?")
return sp_out.stdout.strip()
def get_r_installed_root():
"""
Get the installed root of R (without /bin/R
:return: path to root where R is installed
"""
r_path = get_r_path()
return os.path.dirname(os.path.dirname(r_path)) # remove /bin/R
def get_user_home_dir():
"""
Get home directory in Linux where users can create directory.
# TODO make this function cross-platform
:return: None
"""
sp_out = os.environ['HOME']
return sp_out.strip()
def get_renv_path(has_root_access=False):
"""
Get the default R environment path
# TODO make this function cross-platform
:param has_root_access: whether user has root access in Linux.
:return: path to .renv, inclusive.
"""
if has_root_access:
return os.path.join(get_r_installed_root(), ".renv")
else:
return os.path.join(get_user_home_dir(), ".renv")
def create_directory(directory, clear=False):
"""
Create directory if it does not exist yet.
:param clear: Clear the directory if it already exists.
:param directory: path of the directory
:return: None
"""
if os.path.exists(directory):
if clear:
rmtree(directory)
logger.debug("%s has been deleted." % directory)
else:
logger.error("Environment directory %s exists. Set clear to True to delete the original directory." % directory)
elif os.path.islink(directory) or os.path.isfile(directory):
logger.error(ValueError("Unable to create directory '%r" % directory + "' for the new environment."))
else:
os.makedirs(directory)
logger.debug("%s has been created." % directory)
def create_symlink(src, dst, subfolders=[]):
"""
Create symlink in the dst folder from the src folder.
:param src: source folder
:param dst: desitnation foler
:param subfolders: symlink to be created for these subfolders in src specifically
:return: None
"""
if len(subfolders) == 0:
os.symlink(src, dst, target_is_directory=True)
else:
for subfolder in subfolders:
src_folder = os.path.join(src, subfolder)
dst_folder = os.path.join(dst, subfolder)
if not os.path.exists(src_folder):
logger.warning("Cannot create symlink from " + src_folder)
elif not os.path.exists(dst_folder):
logger.warning("Cannot create symlink at " + dst_folder)
os.symlink(src_folder, dst_folder, target_is_directory=True)
def system_r_call(rcmd_type, rscript):
"""
Call the current R with system calls in order to obtain specific types
of information.
:param rcmd_type: A string that designates the R command to use in the system call
:param rscript: The absolute path to the desired Rscript exe.
:return: Returns the stdout and stderr from the system call.
"""
if rcmd_type == "major":
rcmd = "%s -e \'R.version$major\'" % rscript
elif rcmd_type == "minor":
rcmd = "%s -e \'R.version$minor\'" % rscript
elif rcmd_type == "base":
rcmd = "%s -e \'base::cat(rownames(installed.packages(priority=\"base\")))\'" % rscript
elif rcmd_type == "recommended":
rcmd = "%s -e \'base::cat(rownames(installed.packages(priority=\"recommended\")))\'" % rscript
recommended_pkgs = sp.Popen([rcmd], stderr=sp.PIPE, stdout=sp.PIPE, shell=True, encoding='utf-8')
try:
stdout, stderr = recommended_pkgs.communicate(timeout=15)
except TimeoutExpired:
recommended_pkgs.kill()
stdout, stderr = recommended_pkgs.communicate()
return stdout, stderr
def format_pkg_list(config_dict):
"""
Takes the YAML configuration information and parses/formats the R
package list for use with an "Rscript -e **" call.
:param config_dict: The configuration dictionary created with the YAML file.
"""
config_dict = {k: v for k, v in config_dict.items() if "PKG_LIST" in k}
fmtd_list = dict()
for list_name in config_dict:
pkg_dict = config_dict[list_name]
pkg_list_count = len(pkg_dict) - 1
pkg_list_string = ""
for k, v in enumerate(pkg_dict):
if k == pkg_list_count:
pkg_list_string = "%s%s=\"%s\"" % (pkg_list_string, v, pkg_dict[v])
else:
sep = ", "
pkg_list_string = "%s%s=\"%s\"%s" % (pkg_list_string, v, pkg_dict[v], sep)
pkg_list_string = "list(%s)" % pkg_list_string
fmtd_list[list_name] = pkg_list_string
return fmtd_list
|
11560423
|
import gridfs
import pymongo
from . import message
class YahooBackupDB:
"""Interface to store Yahoo! Group messages to a MongoDB. Group data is stored in a database
whose name is the same as the group name. File data is stored in that database name plus `_gridfs`, where
the gridfs _id is teh same as the file document _id, which is also the file path.
The `messages` collection contains all the message data returned by the Yahoo! Groups API.
Notable fields:
* '_id' - the message id ('msgId' from the API)
* 'authorName' - often empty, but may have a name
* 'from' - sender's email address
* 'profile' - profile name of the poster
* 'subject' - the subject
* 'postDate' - a timestamp of when the post was made
* 'messageBody' - the message body, as formatted HTML
* 'rawEmail' - the full raw email, with headers and everything
* 'nextInTime' - next message id, in time order
* 'nextInTopic' - next message id, in topic order
* 'prevInTime' - prev message id, in time order
* 'prevInTopic' - prev message id, in topic order
If a message is missing, then the document will contain an `_id` field and nothing else.
The `files` collection contains all the data about files:
* `_id` - the full file path and name (unique)
* `url` - the url the file was downloaded from
* `mime` - file mimetype
* `size` - file size as reported by yahoo - float, in kilobytes
* `profile` - profile of user that posted the file
* `date` - date listed on the Yahoo! Group for the file
"""
def __init__(self, mongo_cli, group_name):
self.group_name = group_name
self.cli = mongo_cli
self.db = getattr(self.cli, group_name)
self.fs = gridfs.GridFS(getattr(self.cli, "%s_gridfs" % group_name))
self._ensure_indices()
def _ensure_indices(self):
self.db.messages.create_index([("postDate", pymongo.ASCENDING)])
self.db.messages.create_index([("authorName", pymongo.ASCENDING)])
self.db.messages.create_index([("from", pymongo.ASCENDING)])
self.db.messages.create_index([("profile", pymongo.ASCENDING)])
def has_updated_message(self, message_number):
"""Return whether we already have the given message number loaded and fully updated."""
query = self.db.messages.find({'_id': message_number})
if not query.count():
return False
msg = query[0]
if msg.get('nextInTime', None) == 0:
# maybe need to update the 'next' link
return False
return True
def upsert_message(self, message_number, message_obj):
"""Insert the message document, for the given message number. If the message is already stored, will
update it. For a missing message, pass `None` for `message_obj`."""
if not message_obj:
self.db.messages.insert_one({'_id': message_number})
else:
assert message_number == message_obj['msgId']
doc = {**message_obj, '_id': message_number}
del doc['msgId']
self.db.messages.update_one({'_id': message_number}, {'$set': doc}, upsert=True)
def yield_all_messages(self, start=None, end=None):
"""Yield all existing messages (skipping missing ones), in reverse message_id order."""
query = {'_id': {'$gte': start or 0, '$lt': end or 9999999999}}
for msg in self.db.messages.find(query).sort('_id', -1):
if not msg.get('messageBody'):
continue
yield msg
def num_messages(self):
"""Return the number of non-empty messages in the database."""
return self.db.messages.find({'messageBody': {'$exists': True}}).count()
def get_latest_message(self):
"""Return the latest message."""
return next(self.yield_all_messages())
def missing_message_ids(self):
"""Return the set of the ids of all missing messages.."""
latest = self.get_latest_message()
ids = set(range(1, latest['_id']+1))
present_ids = set(doc['_id'] for doc in self.db.messages.find({}, {'_id': 1}))
return ids - present_ids
# -- File operations
def has_file_entry(self, filePath):
return self.db.files.find({'_id': filePath}).count() > 0
def has_file_data(self, filePath):
return self.fs.exists({'_id': filePath})
def upsert_file_entry(self, file_entry):
doc = {**file_entry, '_id': file_entry['filePath']}
del doc['filePath']
self.db.files.update_one({'_id': doc['_id']}, {'$set': doc}, upsert=True)
def update_file_data(self, file_path, data):
if self.fs.exists({'_id': file_path}):
self.fs.delete(file_path)
self.fs.put(data, _id=file_path, filename=file_path)
def yield_all_files(self):
"""Yield all (file_entry, grid_out_file) for all files in the database."""
for entry in self.db.files.find():
data = None
if self.fs.exists({'_id': entry['_id']}):
data = self.fs.get(entry['_id'])
yield entry, data
|
11560447
|
import unittest
from unittest.mock import patch
from datetime import datetime
from calendar import is_weekday
class CalendarTests(unittest.TestCase):
@patch('calendar.my_datetime', autospec=True)
def test_sunday_is_not_weekday(self, datetime_mock):
sunday = datetime(year=2020, month=4, day=26)
datetime_mock.today.return_value = sunday
self.assertFalse(is_weekday())
if __name__ == '__main__':
unittest.main()
|
11560465
|
from django.urls import path
from . import views
app_name = "management"
urlpatterns = [
path('submissions', views.SubmissionsView.as_view(), name="submissions"),
path('user_management', views.UserManagementView.as_view(), name="user_management")
]
|
11560499
|
import re
from . import Tripcode
__all__ = ['Public']
class Public (Tripcode):
"""
Represents a regular tripcode.
"""
pattern = re.compile(r'^!([\w\.\/]+)')
def __str__ (self):
"""
Returns a string representation.
"""
return '!' + super(Public, self).__str__()
|
11560520
|
import sys
import re
from setuptools.command.test import test as TestCommand
from setuptools import setup
from setuptools import find_packages
metadata = dict(
re.findall("__([a-z]+)__ = '([^']+)'", open('vanilla/meta.py').read()))
requirements = [
x.strip() for x
in open('requirements.txt').readlines() if not x.startswith('#')]
description = "Straightforward concurrency for Python " \
"http://vanillapy.readthedocs.org/"
class PyTest(TestCommand):
def finalize_options(self):
TestCommand.finalize_options(self)
self.test_args = []
self.test_suite = True
def run_tests(self):
import pytest
errno = pytest.main(self.test_args)
sys.exit(errno)
setup(
name='vanilla',
version=metadata['version'],
author='<NAME>',
author_email='<EMAIL>',
install_requires=requirements,
packages=find_packages(),
url='https://github.com/cablehead/vanilla',
license='MIT',
description=description,
long_description=open('README.rst').read(),
tests_require=['pytest'],
cmdclass={'test': PyTest},
)
|
11560602
|
import brownie
from brownie import ZERO_ADDRESS
import pytest
TEST_HASH = "TEST"
PRICE = 100
@pytest.mark.parametrize("price", [0, 100, 1e18])
def test_purchase(alice, bob, market, token, token_id, price):
market.makeSellOffer(token_id, price, {"from": alice})
bob_initial_balance = bob.balance()
alice_initial_balance = alice.balance()
expected_royalties = (price * token.royaltiesPercentage()) // 100
net_sale_amount = price - expected_royalties
tx = market.purchase(token_id, {"from": bob, "value": price})
assert token.ownerOf(token_id) == bob
assert alice.balance() == alice_initial_balance + net_sale_amount
assert bob.balance() == bob_initial_balance - price
assert market.activeBuyOffers(token_id) == (ZERO_ADDRESS, 0, 0)
assert market.activeSellOffers(token_id) == (ZERO_ADDRESS, 0)
assert len(tx.events) == 4
assert "Transfer" in tx.events
assert "Approval" in tx.events
assert tx.events["RoyaltiesPaid"]["tokenId"] == token_id
assert tx.events["RoyaltiesPaid"]["value"] == expected_royalties
assert tx.events["Sale"]["tokenId"] == token_id
assert tx.events["Sale"]["seller"] == alice
assert tx.events["Sale"]["buyer"] == bob
assert tx.events["Sale"]["value"] == price
def test_purchase_no_sell_offer(bob, market, token_id):
with brownie.reverts("No active sell offer"):
market.purchase(token_id, {"from": bob, "value": PRICE})
def test_purchase_by_token_owner(alice, market, token_id):
with brownie.reverts("Token owner not allowed"):
market.purchase(token_id, {"from": alice, "value": PRICE})
def test_purchase_withdrawn_sell_offer(alice, bob, market, token_id):
market.makeSellOffer(token_id, PRICE, {"from": alice})
market.withdrawSellOffer(token_id, {"from": alice})
with brownie.reverts("No active sell offer"):
market.purchase(token_id, {"from": bob, "value": PRICE})
def test_purchase_low_offer(alice, bob, market, token_id):
market.makeSellOffer(token_id, PRICE, {"from": alice})
with brownie.reverts("Amount sent too low"):
market.purchase(token_id, {"from": bob, "value": PRICE - 1})
def test_purchase_unapproved_token(alice, bob, market, token, token_id):
market.makeSellOffer(token_id, PRICE, {"from": alice})
token.approve(ZERO_ADDRESS, token_id, {"from": alice})
with brownie.reverts("Invalid sell offer"):
tx = market.purchase(token_id, {"from": bob, "value": PRICE})
assert len(tx.events) == 1
assert tx.events["SellOfferWithdrawn"]["tokenId"] == token_id
assert tx.events["SellOfferWithdrawn"]["seller"] == alice
assert market.activeSellOffers(token_id) == (ZERO_ADDRESS, 0)
|
11560610
|
class FoundValue:
"""
A class containing a record of a prop key existing in both an original props file and a translated props file
"""
common_path: str
original_file: str
translated_file: str
key: str
orig_val: str
translated_val: str
def __init__(self, common_path, original_file, translated_file, key, orig_val, translated_val):
"""
Constructor.
Args:
common_path: The folder common to both files.
original_file: The original file path.
translated_file: The translated file path.
key: The common prop key.
orig_val: The original (English) value.
translated_val: The translated value.
"""
self.common_path = common_path
self.original_file = original_file
self.translated_file = translated_file
self.key = key
self.orig_val = orig_val
self.translated_val = translated_val
|
11560617
|
import numpy as np
from PIL import Image
def intensity(arr):
# calcluates intensity of a pixel from 0 to 9
mini = 999
maxi = 0
for i in range(len(arr)):
for j in range(len(arr[0])):
maxi = max(arr[i][j],maxi)
mini = min(arr[i][j],mini)
level = float(float(maxi-mini)/float(10));
brr = [[0]*len(arr[0]) for i in range(len(arr))]
for i in range(10):
l1 = mini+level*i
l2 = l1+level
for j in range(len(arr)):
for k in range(len(arr[0])):
if(arr[j][k] >= l1 and arr[j][k] <= l2):
brr[j][k]=i
return brr
def order_dither(image):
arr = np.asarray(image)
brr = intensity(arr)
crr = [[8, 3, 4], [6, 1, 2], [7, 5, 9]]
drr = np.zeros((len(arr),len(arr[0])))
for i in range(len(arr)):
for j in range(len(arr[0])):
if(brr[i][j] > crr[i%3][j%3]):
drr[i][j] = 255
else:
drr[i][j] = 0
return drr
def main():
fname = 'test.jpg'
image = Image.open(fname).convert('L')
output = order_dither(image)
im = Image.fromarray(output)
im.convert('RGB').save("output.jpg")
im.show()
if __name__=="__main__":
main()
|
11560632
|
import torch
from ..nn import XLogic, Conv2Concepts
def prune_logic_layers(model: torch.nn.Module, current_epoch: int, prune_epoch: int,
fan_in: int = None, device: torch.device = torch.device('cpu')) -> torch.nn.Module:
"""
Prune the inputs of the model.
:param model: pytorch model
:param fan_in: number of features to retain
:param device: cpu or cuda device
:return: pruned model
"""
if current_epoch != prune_epoch:
return model
model.eval()
for i, module in enumerate(model.children()):
# prune only Linear layers
if isinstance(module, XLogic):
if not module.top:
if hasattr(module, 'weight_orig'):
break
_prune(module, fan_in, device=device)
if isinstance(module, Conv2Concepts):
_prune(module, fan_in, device=device)
# break
model.train()
return model
def _prune(module: torch.nn.Module, fan_in: int, device: torch.device = torch.device('cpu')):
# pruning
w_size = (module.weight.shape[0], module.weight.shape[1])
# identify weights with the lowest absolute values
w_abs = torch.norm(module.weight, dim=0)
# if w is not None:
# w_abs *= w
w_sorted = torch.argsort(w_abs, descending=True)
if fan_in:
w_to_prune = w_sorted[fan_in:]
else:
w_max = torch.max(w_abs)
w_to_prune = (w_abs / w_max) < 0.5
mask = torch.ones(w_size)
# if linear:
# mask[:, w_to_prune] = 0
# mask[w_to_prune, :] = 0
# else:
# # mask[w_to_prune, :] = 0
mask[:, w_to_prune] = 0
# prune
torch.nn.utils.prune.custom_from_mask(module, name="weight", mask=mask.to(device))
# torch.nn.utils.prune.custom_from_mask(module, name="bias", mask=mask.mean(dim=0).to(device))
return
def l1_loss(model: torch.nn.Module):
loss = 0
for module in model.children():
if isinstance(module, XLogic):
loss += torch.norm(module.weight, 1) + torch.norm(module.bias, 1)
break
return loss
def whitening_loss(model: torch.nn.Module, device: torch.device = torch.device('cpu')):
loss = 0
cov = None
for module in model.children():
if isinstance(module, XLogic):
# the target covariance matrix is diagonal
n_concepts = module.conceptizator.concepts.shape[1]
cov_objective = torch.eye(n_concepts).to(device)
# compute covariance matrix of activations
cov = 1 / (n_concepts - 1) * torch.matmul(module.conceptizator.concepts.T, module.conceptizator.concepts)
loss += torch.norm(cov - cov_objective, p=2)
break
return loss, cov
|
11560641
|
import os
import json
import argparse
import pytest
from maggot import Experiment
from maggot import Config
@pytest.fixture
def simple_dict_config():
config = dict(
a=10,
b=[1, 2, 3],
c="a"
)
return config
@pytest.fixture
def nested_dict_config(simple_dict_config):
config = dict(
a=10,
_b="a",
c=simple_dict_config
)
return config
def test_experiment_initialization(nested_dict_config, tmpdir):
experiments_dir = tmpdir.join("experiments").strpath
experiment = Experiment(nested_dict_config, experiments_dir=experiments_dir)
assert experiment.config.to_dict() == nested_dict_config
assert os.path.exists(
os.path.join(experiments_dir, experiment.config.identifier, ".maggot")
)
def test_experiment_initialization_with_custom_name(nested_dict_config, tmpdir):
experiments_dir = tmpdir.join("experiments").strpath
experiment = Experiment(
nested_dict_config,
experiments_dir=experiments_dir,
experiment_name="custom"
)
assert os.path.isdir(os.path.join(experiments_dir, "custom"))
assert os.path.isdir(os.path.join(experiments_dir, "custom", ".maggot"))
def test_experiment_restoration(nested_dict_config, tmpdir):
experiments_dir = tmpdir.join("experiments").strpath
# create an experiment
experiment = Experiment(nested_dict_config, experiments_dir=experiments_dir)
experiment.register_directory("temp")
# test restoration from identifier
restored_experiment = Experiment(
resume_from=experiment.experiment_dir, experiments_dir=experiments_dir
)
assert restored_experiment.config.to_dict() == experiment.config.to_dict()
# test that `temp` is registered after restoration
assert os.path.isdir(experiment.directories.temp)
def test_experiment_logging(nested_dict_config, tmpdir):
experiments_dir = tmpdir.join("experiments").strpath
with Experiment(
nested_dict_config,
experiments_dir=experiments_dir
) as experiment:
print("test")
with open(experiment.logfile, "r") as fp:
assert fp.readlines()[-1].strip() == "test"
print("test2")
# check that nothing is logged when print is called
# outside context managaer
with open(experiment.logfile, "r") as fp:
assert fp.readlines()[-1].strip() == "test"
def test_experiment_commit_hash_saving(nested_dict_config, tmpdir):
experiments_dir = tmpdir.join("experiments").strpath
experiment = Experiment(
nested_dict_config, experiments_dir=experiments_dir
)
assert os.path.isfile(
os.path.join(
experiment.experiment_dir, ".maggot", "commit_hash"
)
)
def test_experiment_register_directory(nested_dict_config, tmpdir):
experiments_dir = tmpdir.join("experiments").strpath
experiment = Experiment(nested_dict_config, experiments_dir=experiments_dir)
experiment.register_directory("temp")
target = os.path.join(experiment.experiment_dir, "temp")
assert os.path.isdir(experiment.directories.temp)
assert experiment.directories.temp == target
def test_experiment_register_result(simple_dict_config, tmpdir):
experiments_dir = tmpdir.join("experiments").strpath
experiment = Experiment(simple_dict_config, experiments_dir=experiments_dir)
experiment.register_result("fold1.accuracy", 0.97)
experiment.register_result("fold2.accuracy", 0.99)
experiment.register_result("fold1.loss", 0.03)
experiment.register_result("fold2.loss", 0.01)
experiment.register_result("overall_accuracy", 0.98)
results = experiment.results.to_dict()
assert results["fold1"]["accuracy"] == 0.97
assert results["fold2"]["accuracy"] == 0.99
assert results["fold1"]["loss"] == 0.03
assert results["fold2"]["loss"] == 0.01
assert results["overall_accuracy"] == 0.98
|
11560656
|
from datetime import datetime, timedelta
import numpy as np
import numexpr as ne
from netCDF4 import Dataset
from scipy.interpolate import CubicSpline
from typhon.utils import Timer
import xarray as xr
from .common import NetCDF4, expects_file_info
from .testers import check_lat_lon
__all__ = [
'AVHRR_GAC_HDF',
'MHS_HDF',
]
class AAPP_HDF(NetCDF4):
"""Base class for handling TOVS satellite data converted with AAPP tools
"""
# This file handler always wants to return at least time, lat and lon
# fields. These fields are required for this:
standard_fields = {
"Data/scnlintime", # milliseconds since midnight
"Data/scnlinyr",
"Data/scnlindy",
"Data/scnlin",
"Geolocation/Latitude",
"Geolocation/Longitude"
}
def __init__(self, **kwargs):
"""
Args:
**kwargs: Additional key word arguments for base class.
"""
# Call the base class initializer
super().__init__(**kwargs)
@expects_file_info()
def get_info(self, file_info, **kwargs):
with Dataset(file_info.path, "r") as file:
file_info.times[0] = \
datetime(int(file.startdatayr[0]), 1, 1) \
+ timedelta(days=int(file.startdatady[0]) - 1) \
+ timedelta(milliseconds=int(file.startdatatime_ms[0]))
file_info.times[1] = \
datetime(int(file.enddatayr), 1, 1) \
+ timedelta(days=int(file.enddatady) - 1) \
+ timedelta(milliseconds=int(file.enddatatime_ms))
return file_info
@staticmethod
def _get_time_field(dataset, user_fields):
time = \
(dataset["Data/scnlinyr"].values - 1970).astype('datetime64[Y]') \
+ (dataset["Data/scnlindy"].values - 1).astype('timedelta64[D]') \
+ dataset["Data/scnlintime"].values.astype("timedelta64[ms]")
dataset["time"] = "scnline", time
# Remove the time fields that we do not need any longer (expect the
# user asked for them explicitly)
dataset = dataset.drop_vars(
{"Data/scnlinyr", "Data/scnlindy", "Data/scnlintime"}
- set(user_fields),
)
return dataset
@staticmethod
def _mask_and_scale(dataset):
# xarray.open_dataset can mask and scale automatically, but it does not
# know the attribute *Scale* (which is specific for AAPP files):
for var in dataset.variables:
# We want to remove some attributes after applying them but
# OrderedDict does not allow to pop the values:
attrs = dict(dataset[var].attrs)
mask = attrs.pop('FillValue', None)
if mask is not None:
dataset[var] = dataset[var].where(
# Also cover overflow errors as they are in
# NSS.MHSX.NN.D07045.S2234.E0021.B0896162.GC.h5
(dataset[var] != mask) & (dataset[var] != -2147483648.0)
)
scaling = attrs.pop('Scale', None)
if scaling is not None:
dataset[var] = dataset[var].astype(float) * scaling
dataset[var].attrs = attrs
def _test_coords(self, dataset, wanted=None):
# Maximal these dimensions (or less) should be in the dataset:
if wanted is None:
wanted = {'channel', 'scnline', 'scnpos'}
reality = set(dataset.dims.keys())
if reality - wanted:
raise ValueError(
f"Unexpected dimension in AAPP file! {reality - wanted}"
)
class MHS_HDF(AAPP_HDF):
"""File handler for MHS level 1C HDF files
"""
def __init__(self, **kwargs):
super(MHS_HDF, self).__init__(**kwargs)
# Map the standard fields to standard names (make also the names of all
# dimensions more meaningful):
self.mapping = {
"Geolocation/Latitude": "lat",
"Geolocation/Longitude": "lon",
"Data/scnlin": "scnline",
"Data/phony_dim_0": "scnline",
"Data/phony_dim_1": "scnpos",
"Data/phony_dim_2": "channel",
"Geolocation/phony_dim_3": "scnline",
"Geolocation/phony_dim_4": "scnpos",
}
@expects_file_info()
def read(self, file_info, mask_and_scale=True, **kwargs):
"""Read and parse MHS AAPP HDF5 files and load them to xarray
Args:
file_info: Path and name of the file as string or FileInfo object.
This can also be a tuple/list of file names or a path with
asterisk.
mask_and_scale: Where the data contains missing values, it will be
masked with NaNs. Furthermore, data with scaling attributes
will be scaled with them.
**kwargs: Additional keyword arguments that are valid for
:class:`~typhon.files.handlers.common.NetCDF4`.
Returns:
A xrarray.Dataset object.
"""
# Make sure that the standard fields are always gonna be imported:
user_fields = kwargs.pop("fields", {})
if user_fields:
fields = self.standard_fields | set(user_fields)
else:
fields = None
# We catch the user mapping here, since we do not want to deal with
# user-defined names in the further processing. Instead, we use our own
# mapping
user_mapping = kwargs.pop("mapping", None)
# Load the dataset from the file:
dataset = super().read(
file_info, fields=fields, mapping=self.mapping,
mask_and_scale=mask_and_scale, **kwargs
)
scnlines = dataset["scnline"].values
dataset = dataset.assign_coords(
scnline=dataset["scnline"]
)
dataset["scnline"] = np.arange(1, dataset.scnline.size+1)
dataset["scnpos"] = np.arange(1, 91)
dataset["channel"] = "channel", np.arange(1, 6)
# Create the time variable (is built from several other variables):
dataset = self._get_time_field(dataset, user_fields)
if mask_and_scale:
self._mask_and_scale(dataset)
# Make a fast check whether everything is alright
self._test_coords(dataset)
# Check the latitudes and longitudes:
check_lat_lon(dataset)
if user_mapping is not None:
dataset = dataset.rename(user_mapping)
return dataset
class AVHRR_GAC_HDF(AAPP_HDF):
"""File handler for AVHRR GAC level 1C HDF files
"""
def __init__(self, **kwargs):
super(AVHRR_GAC_HDF, self).__init__(**kwargs)
# Map the standard fields to standard names (make also the names of all
# dimensions more meaningful):
self.mapping = {
"Geolocation/Latitude": "lat",
"Geolocation/Longitude": "lon",
"Data/scnlin": "scnline",
"Data/phony_dim_0": "scnline",
"Data/phony_dim_1": "scnpos",
"Data/phony_dim_2": "channel",
"Data/phony_dim_3": "calib",
"Geolocation/phony_dim_4": "scnline",
"Geolocation/phony_dim_5": "packed_pixels",
}
@expects_file_info()
def read(self, file_info, mask_and_scale=True, interpolate_packed_pixels=True,
max_nans_interpolation=10, **kwargs):
"""Read and parse MHS AAPP HDF5 files and load them to xarray
Args:
file_info: Path and name of the file as string or FileInfo object.
This can also be a tuple/list of file names or a path with
asterisk.
mask_and_scale: Where the data contains missing values, it will be
masked with NaNs. Furthermore, data with scaling attributes
will be scaled with them.
interpolate_packed_pixels: Geo-location data is packed and must be
interpolated to use them as reference for each pixel.
max_nans_interpolation: How many NaN values are allowed in latitude
and longitudes before raising an error?
**kwargs: Additional keyword arguments that are valid for
:class:`~typhon.files.handlers.common.NetCDF4`.
Returns:
A xrarray.Dataset object.
"""
# Make sure that the standard fields are always gonna be imported:
user_fields = kwargs.pop("fields", {})
if user_fields:
fields = self.standard_fields | set(user_fields)
else:
fields = None
# We catch the user mapping here, since we do not want to deal with
# user-defined names in the further processing. Instead, we use our own
# mapping
user_mapping = kwargs.pop("mapping", None)
# Load the dataset from the file:
dataset = super().read(
file_info, fields=fields, mapping=self.mapping,
mask_and_scale=mask_and_scale, **kwargs
)
# Keep the original scnlines
scnlines = dataset["scnline"].values
dataset = dataset.assign_coords(
scnline=dataset["scnline"]
)
dataset["scnline"] = np.arange(1, dataset.scnline.size+1)
dataset["scnpos"] = np.arange(1, 2049)
dataset["channel"] = "channel", np.arange(1, 6)
# Currently, the AAPP converting tool seems to have a bug. Instead of
# retrieving 409 pixels per scanline, one gets 2048 pixels. The
# additional values are simply duplicates (or rather quintuplicates):
dataset = dataset.sel(scnpos=slice(4, None, 5))
dataset["scnpos"] = np.arange(1, 410)
# Create the time variable (is built from several other variables):
dataset = self._get_time_field(dataset, user_fields)
if mask_and_scale:
self._mask_and_scale(dataset)
# All geolocation fields are packed in the AVHRR GAC files:
if interpolate_packed_pixels:
self._interpolate_packed_pixels(dataset, max_nans_interpolation)
allowed_coords = {'channel', 'calib', 'scnline', 'scnpos'}
else:
allowed_coords = {'channel', 'calib', 'scnline', 'scnpos',
'packed_pixels'}
# Make a fast check whether everything is alright
self._test_coords(dataset, allowed_coords)
# Check the latitudes and longitudes:
check_lat_lon(dataset)
if user_mapping is not None:
dataset = dataset.rename(user_mapping)
return dataset
@staticmethod
def _interpolate_packed_pixels(dataset, max_nans_interpolation):
given_pos = np.arange(5, 409, 8)
new_pos = np.arange(1, 410)
lat_in = np.deg2rad(dataset["lat"].values)
lon_in = np.deg2rad(dataset["lon"].values)
# We cannot define given positions for each scanline, but we have to
# set them for all equally. Hence, we skip every scan position of all
# scan lines even if only one contains a NaN value:
nan_scnpos = \
np.isnan(lat_in).sum(axis=0) + np.isnan(lon_in).sum(axis=0)
valid_pos = nan_scnpos == 0
if valid_pos.sum() < 52 - max_nans_interpolation:
raise ValueError(
"Too many NaNs in latitude and longitude of this AVHRR file. "
"Cannot guarantee a good interpolation!"
)
# Filter NaNs because CubicSpline cannot handle it:
lat_in = lat_in[:, valid_pos]
lon_in = lon_in[:, valid_pos]
given_pos = given_pos[valid_pos]
x_in = np.cos(lon_in) * np.cos(lat_in)
y_in = np.sin(lon_in) * np.cos(lat_in)
z_in = np.sin(lat_in)
xf = CubicSpline(given_pos, x_in, axis=1, extrapolate=True)(new_pos)
yf = CubicSpline(given_pos, y_in, axis=1, extrapolate=True)(new_pos)
zf = CubicSpline(given_pos, z_in, axis=1, extrapolate=True)(new_pos)
lon = np.rad2deg(np.arctan2(yf, xf))
lat = np.rad2deg(np.arctan2(zf, np.sqrt(xf ** 2 + yf ** 2)))
dataset["lat"] = ("scnline", "scnpos"), lat
dataset["lon"] = ("scnline", "scnpos"), lon
# The other packed variables will be simply padded:
for var_name, var in dataset.data_vars.items():
if "packed_pixels" not in var.dims:
continue
nan_scnpos = np.isnan(var).sum(axis=0)
valid_pos = nan_scnpos == 0
given_pos = np.arange(5, 409, 8)[valid_pos]
dataset[var_name] = xr.DataArray(
CubicSpline(
given_pos, var.values[:, valid_pos], axis=1,
extrapolate=True)(new_pos),
dims=("scnline", "scnpos")
)
|
11560675
|
import logging
def basicConfig():
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(filename)s:%(lineno)d %(levelname)s %(message)s',
datefmt='%Y%m%dT%H:%M:%S')
|
11560684
|
import numpy as np
import pytest
import torch
from probflow.distributions import Poisson
tod = torch.distributions
def is_close(a, b, tol=1e-3):
return np.abs(a - b) < tol
def test_Poisson():
"""Tests Poisson distribution"""
# Create the distribution
dist = Poisson(3)
# Check default params
assert dist.rate == 3
# Call should return backend obj
assert isinstance(dist(), tod.poisson.Poisson)
# Test methods
zero = torch.tensor([0.0])
one = torch.tensor([1.0])
two = torch.tensor([2.0])
three = torch.tensor([3.0])
ppdf = lambda x, r: np.power(r, x) * np.exp(-r) / np.math.factorial(x)
assert is_close(dist.prob(zero).numpy(), ppdf(0, 3))
assert is_close(dist.prob(one).numpy(), ppdf(1, 3))
assert is_close(dist.prob(two).numpy(), ppdf(2, 3))
assert is_close(dist.prob(three).numpy(), ppdf(3, 3))
assert is_close(dist.log_prob(zero).numpy(), np.log(ppdf(0, 3)))
assert is_close(dist.log_prob(one).numpy(), np.log(ppdf(1, 3)))
assert is_close(dist.log_prob(two).numpy(), np.log(ppdf(2, 3)))
assert is_close(dist.log_prob(three).numpy(), np.log(ppdf(3, 3)))
assert dist.mean().numpy() == 3
# Only takes Tensor-like objs
with pytest.raises(TypeError):
dist = Poisson("lalala")
# Test sampling
samples = dist.sample()
assert isinstance(samples, torch.Tensor)
assert samples.ndim == 0
samples = dist.sample(10)
assert isinstance(samples, torch.Tensor)
assert samples.ndim == 1
assert samples.shape[0] == 10
|
11560754
|
import numpy as np
import matplotlib.pyplot as plt
def main():
# plot results for each experiment type and capacity
save_path = './results'
dataset = 'cub200'
for experiment_type in ['iid', 'class_iid']:
for c in [2, 4, 8, 16]:
acc_name = 'acc_' + experiment_type + '_' + dataset + '_exstream_capacity_' + str(c)
mpca_name = 'mpca_' + acc_name
res = np.load(save_path + '/' + mpca_name + '.npy')
plt.figure()
plt.plot(res, label='ExStream')
plt.xlabel('Sample Number', fontsize=14)
plt.ylabel('Mean-Class Accuracy [%]', fontsize=14)
plt.title('Type: %s -- Capacity: %d' % (experiment_type, c), fontsize=14)
plt.legend()
plt.show()
if __name__ == '__main__':
main()
|
11560806
|
import json
from ast import literal_eval
import pandas as pd
import re
import sys
import os
from flask import Blueprint
from flask import jsonify
from flask import request
from flask import current_app as app
import numpy as np
from utillities.exceptions import ExceptionHelpers
mod = Blueprint('combine_dataframe', __name__)
null = None
@mod.route('merge/', methods=["POST"])
def merge():
try:
request_dict = request.get_json()
left_df = request_dict['leftDf']
right_df = request_dict['rightDf']
how = request_dict['how']
left_on = request_dict['left_on']
right_on = request_dict['right_on']
left_index = request_dict['left_index']
right_index = request_dict['right_index']
left_df = pd.read_json(json.dumps(eval(left_df)), orient='split')
right_df = pd.read_json(json.dumps(eval(right_df)), orient='split')
df = left_df.merge(right_df, how=how, left_on=left_on, right_on=right_on, left_index=left_index, right_index=right_index)
df_json = df.to_json(orient='split')
response = app.response_class(
response=df_json,
status=200,
mimetype='application/json'
)
except:
exception = ExceptionHelpers.format_exception(sys.exc_info())
response = app.response_class(
response=exception,
status=400,
mimetype='application/json'
)
return response
@mod.route('concatenate/', methods=["POST"])
def concatenate():
try:
request_dict = request.get_json()
df_json_list = request_dict['df_json_list']
axis = request_dict['axis']
join = request_dict['join']
ignore_index = request_dict['ignore_index']
df_list = []
for i in df_json_list:
df_list.append(pd.read_json(json.dumps(eval(i)), orient='split'))
df = pd.concat(df_list, axis=axis, ignore_index=ignore_index)
df_json = df.to_json(orient='split')
response = app.response_class(
response=df_json,
status=200,
mimetype='application/json'
)
except:
exception = ExceptionHelpers.format_exception(sys.exc_info())
response = app.response_class(
response=exception,
status=400,
mimetype='application/json'
)
return response
|
11560868
|
import torch
import torch.nn as nn
from utils.custom_modules import CausalConv1d, DTLNSeparationCore
from utils.training_process import TrainingProcess
class DTLN(nn.Module):
""" Dual-Signal Transformation Long Short-Term Memory Network.
Based on the work presented by <NAME> et. al for the
DNS INTERSPEECH 2020:
https://www.isca-speech.org/archive/Interspeech_2020/pdfs/2631.pdf
The original implementation was coded in tensorflow 2.x. The current
implementation is based on it with some changes to attempt to improve
the existing model for noise suppression tasks on speech signals.
Args:
sample_rate (int): Sample rate of the input files.
window_size (int): Window (and FFT) size used to perform the STFT over
the input files.
hop_size (int): Hop size used to perform the STFT over the input files.
sample_duration (int): Duration in seconds of each input file.
hidden_size (int): Hidden size of the RNN stack.
encoder_size (int): Channel output size of Conv1D layers used in the
network (see architecture for the details).
rnn_type (nn.Module): Type of RNN layer used in the stack.
rnn_stack_size (int): Size of the RNN stack.
rnn_bidirectional (bool): If True, makes the RNN layer bidirectional.
dropout_rate (float): Dropout rate used between RNN stacks.
batch_size (int): Size of a single batch.
eps (float): Machine epsilon.
"""
def __init__(self,
sample_rate: int = 16000,
window_size: int = 512,
hop_size: int = 128,
sample_duration: float = 10.0,
hidden_size: int = 128,
encoder_size: int = 256,
rnn_type: nn.Module = nn.LSTM,
rnn_stack_size: int = 2,
rnn_bidirectional: bool = False,
dropout_rate: float = 0.25,
batch_size: int = 32,
eps: float = 1e-10):
super().__init__()
# audio parmeters
self.sample_rate = sample_rate
self.window_size = window_size
self._window = torch.hann_window(self.window_size)
self.hop_size = hop_size
self.sample_duration = sample_duration
self.eps = eps
# network params
self.hidden_size = hidden_size
self.encoder_size = encoder_size
self.dropout_rate = dropout_rate
self.rnn_type = rnn_type
self.rnn_stack_size = rnn_stack_size
self.rnn_bidirectional = rnn_bidirectional
self.batch_size = batch_size
# network modules
self.layer_norm_0 = nn.LayerNorm(self.window_size // 2 + 1,
eps=self.eps)
# first separation core
self.separation_core_0 = DTLNSeparationCore(
self.window_size // 2 + 1,
self.hidden_size,
self.window_size // 2 + 1,
rnn_type=self.rnn_type,
rnn_stack_size=self.rnn_stack_size,
rnn_bidirectional=self.rnn_bidirectional,
activation=nn.Sigmoid
)
# modules between separation cores
self.conv1d_1 = nn.Conv1d(self.window_size, self.encoder_size,
kernel_size=1, stride=1, bias=False)
self.layer_norm_1 = nn.LayerNorm(self.encoder_size, eps=self.eps)
# second separation core
self.separation_core_1 = DTLNSeparationCore(
self.encoder_size,
self.hidden_size,
self.encoder_size,
rnn_type=self.rnn_type, rnn_stack_size=self.rnn_stack_size,
rnn_bidirectional=self.rnn_bidirectional,
activation=nn.Sigmoid
)
# additional tail block
self.causal_conv1d_1 = CausalConv1d(self.encoder_size,
self.window_size,
kernel_size=1, bias=False)
self.overlap_and_add = nn.Fold((1, self.sample_len),
kernel_size=(1, self.window_size),
stride=(1, self.hop_size))
# init weights and biases
self.init_weights_and_biases()
def init_weights_and_biases(self):
""" Initialize weights and biases based on each module type. """
for module in self.modules():
if isinstance(module, nn.Linear):
self.init_linear_(module)
if isinstance(module, nn.LayerNorm):
self.init_layer_norm_(module)
if isinstance(module, nn.GRU):
self.init_gru_(module)
if isinstance(module, nn.LSTM):
self.init_lstm_(module)
def init_lstm_(self, lstm_l):
torch.nn.init.xavier_uniform_(lstm_l.weight_ih_l0)
torch.nn.init.xavier_uniform_(lstm_l.weight_hh_l0)
if lstm_l.bias is not None:
torch.nn.init.zeros_(lstm_l.bias_ih_l0)
torch.nn.init.zeros_(lstm_l.bias_hh_l0)
def init_gru_(self, gru_l):
torch.nn.init.xavier_uniform_(gru_l.weight_ih_l0)
torch.nn.init.xavier_uniform_(gru_l.weight_hh_l0)
if gru_l.bias is not None:
torch.nn.init.zeros_(gru_l.bias_ih_l0)
torch.nn.init.zeros_(gru_l.bias_hh_l0)
def init_linear_(self, linear_l):
torch.nn.init.xavier_uniform_(linear_l.weight)
if linear_l.bias is not None:
torch.nn.init.zeros_(linear_l.bias)
def init_layer_norm_(self, layer_norm):
torch.nn.init.ones_(layer_norm.weight)
if layer_norm.bias is not None:
torch.nn.init.zeros_(layer_norm.bias)
def stft(self, x: torch.tensor, normalize: bool = False,
complex_output: bool = False):
""" Return the short-time fourier transform of a tensor.
Args:
x (torch.tensor): Input tensor.
normalize (bool): If True, the stft values are normalized.
complex_output (bool): If True, the output is return in complex
form.
Returns:
stft (torch.tensor): STFT of the input tensor.
"""
window = self._window.to(x.device)
stft = torch.stft(x, onesided=True, center=False,
n_fft=self.window_size, hop_length=self.hop_size,
normalized=normalize, window=window,
return_complex=True)
if complex_output:
return stft
else:
return torch.abs(stft), torch.angle(stft)
def ifft(self, x_mag: torch.tensor, x_phase: torch.tensor):
""" Return the inverse fourier transform of a pair of magnitude
and phase tensors.
IMPORTANT: This function assumes the reconstruction is done using only
first half of the input features.
Args:
x_mag (torch.tensor): Magnitude input tensor.
x_phase (torch.tensor): Phase input tensor.
Returns:
ifft (torch.tensor): Inverse fourier transform of the input tensor.
"""
x_real = x_mag * torch.cos(x_phase)
x_imag = x_mag * torch.sin(x_phase)
x_complex = torch.complex(x_real, x_imag)
ifft = torch.fft.irfft(x_complex, dim=-1)
return ifft
@property
def sample_len(self):
""" Returns the amount of samples on each input audio chunk.
Returns:
sample_len (int): Audio chunk length in samples
"""
return int(self.sample_duration * self.sample_rate)
@torch.no_grad()
def predict(self, x: torch.tensor):
""" Takes a input frame of noisy speech in the time domain and
produces a clean speech output frame.
Args:
x (torch.tensor): Input noisy speech frame.
Returns:
(torch.tensor): Output predicted clean speech frame.
"""
return self(x).cpu().numpy().reshape(-1)
def forward(self, x):
# get magnitude and phase in a (batch_size, bins, frames) tensor
x_mag, x_phase = self.stft(x)
# obtain log spectrum
x_mag_log = torch.log10(x_mag + self.eps)
# (batch_size, bins, frames) -> (batch_size, frames, bins)
x_mag_log = x_mag_log.permute(0, 2, 1)
# norm with learnable weights (batch_size, frames, bins)
x_mag_log = self.layer_norm_0(x_mag_log)
# (batch_size, frames, bins) -> (frames, batch_size, bins)
x_mag_log = x_mag_log.permute(1, 0, 2)
# first separation core -> (batch_size, frames, bins)
x_mask_0 = self.separation_core_0(x_mag_log)
# (batch_size, bins, frames)
x_mag_embedding = x_mag.permute(0, 2, 1) * x_mask_0
# back to "time" domain
x = self.ifft(x_mag_embedding, x_phase.permute(0, 2, 1))
# (batch_size, frames, samples) -> (batch_size, samples, frames)
x = x.permute(0, 2, 1)
# mask to be multiplied by the output of the second separation core
x_skip_1 = self.conv1d_1(x)
# (batch_size, features, frames) -> (batch_size, frames, features)
x = x_skip_1.permute(0, 2, 1)
# instant layer norm
x = self.layer_norm_1(x)
# (batch_size, frames, features) -> (frames, batch_size, features)
x = x.permute(1, 0, 2)
# second separation core -> (batch_size, frames, features)
x_mask_1 = self.separation_core_1(x)
# (batch_size, features, frames)
x = x_mask_1.permute(0, 2, 1) * x_skip_1
# causal conv before overlap and add
x = self.causal_conv1d_1(x)
# reconstruction to time domain
x = self.overlap_and_add(x)
# reshape output vector to (chl, samples)
x = torch.reshape(x, (self.batch_size, -1))
return x
class DTLNTrainingProcess(TrainingProcess):
""" Callbacks executed in different phases of the training process of a
DTLN instance. For further information check the TrainingProcess
parent class. """
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def on_train_step(self, batch_idx, batch):
x, y = batch
x = x.to(self.device)
y = y.to(self.device)
y_pred = self.model(x)
loss = self.criterion(y_pred, y)
self.optimizer.zero_grad()
loss.backward()
if self.grad_norm_clipping is not None:
torch.nn.utils.clip_grad_norm_(
self.model.parameters(),
max_norm=self.grad_norm_clipping)
self.optimizer.step()
self.running_dict.set_value("train_loss", loss.item())
def on_val_step(self, batch_idx, batch):
x, y = batch
x = x.to(self.device)
y = y.to(self.device)
y_pred = self.model(x)
loss = self.criterion(y_pred, y)
self.running_dict.set_value("val_loss", loss.item())
def on_overfit_train_step(self, batch_idx, batch):
self.on_train_step(batch_idx, batch)
|
11560908
|
import sys
from tqdm import tqdm
import mxnet as mx
import gluoncv as gcv
gcv.utils.check_version('0.6.0')
from gluoncv.data import mscoco
from gluoncv.data.transforms.pose import (flip_heatmap,
heatmap_to_coord_alpha_pose)
from gluoncv.data.transforms.presets.alpha_pose import \
AlphaPoseDefaultValTransform
from gluoncv.utils.metrics.coco_keypoints import COCOKeyPointsMetric
from mxnet import gluon, nd
class NullWriter(object):
def write(self, arg):
pass
def get_dataset(dataset):
if dataset == 'coco':
val_dataset = mscoco.keypoints.COCOKeyPoints(splits=('person_keypoints_val2017'), skip_empty=False)
else:
raise NotImplementedError("Dataset: {} not supported.".format(dataset))
return val_dataset
def val_batch_fn(batch, ctx):
data = gluon.utils.split_and_load(batch[0], ctx_list=ctx,
batch_axis=0, even_split=False)
return tuple([data] + batch[1:])
def get_val_data_loader(dataset, batch_size, num_workers, input_size, opt):
val_dataset = get_dataset(dataset)
transform_val = AlphaPoseDefaultValTransform(num_joints=val_dataset.num_joints,
joint_pairs=val_dataset.joint_pairs,
image_size=input_size)
val_data = gluon.data.DataLoader(
val_dataset.transform(transform_val),
batch_size=batch_size, shuffle=False, last_batch='keep',
num_workers=num_workers)
return val_dataset, val_data
def validate(val_data, val_dataset, net, ctx, opt):
if isinstance(ctx, mx.Context):
ctx = [ctx]
val_metric = COCOKeyPointsMetric(val_dataset, 'coco_keypoints',
in_vis_thresh=0)
for batch in tqdm(val_data, dynamic_ncols=True):
# data, scale, center, score, imgid = val_batch_fn(batch, ctx)
data, scale_box, score, imgid = val_batch_fn(batch, ctx)
outputs = [net(X) for X in data]
if opt.flip_test:
data_flip = [nd.flip(X, axis=3) for X in data]
outputs_flip = [net(X) for X in data_flip]
outputs_flipback = [flip_heatmap(o, val_dataset.joint_pairs, shift=True) for o in outputs_flip]
outputs = [(o + o_flip) / 2 for o, o_flip in zip(outputs, outputs_flipback)]
if len(outputs) > 1:
outputs_stack = nd.concat(*[o.as_in_context(mx.cpu()) for o in outputs], dim=0)
else:
outputs_stack = outputs[0].as_in_context(mx.cpu())
# preds, maxvals = get_final_preds(outputs_stack, center.asnumpy(), scale.asnumpy())
preds, maxvals = heatmap_to_coord_alpha_pose(outputs_stack, scale_box)
val_metric.update(preds, maxvals, score, imgid)
nullwriter = NullWriter()
oldstdout = sys.stdout
sys.stdout = nullwriter
try:
res = val_metric.get()
finally:
sys.stdout = oldstdout
return res
|
11560911
|
from office365.runtime.client_value import ClientValue
class PageLinks(ClientValue):
"""Links for opening a OneNote page."""
def __init__(self, onenote_client_url=None, onenote_web_url=None):
"""
:param str onenote_client_url: Opens the page in the OneNote native client if it's installed.
:param str onenote_web_url: Opens the page in OneNote on the web.
"""
super(PageLinks, self).__init__()
self.oneNoteClientUrl = onenote_client_url
self.oneNoteWebUrl = onenote_web_url
|
11560954
|
import os
import sys
import shutil
from subprocess import call, DEVNULL
from annogesiclib.multiparser import Multiparser
from annogesiclib.converter import Converter
from annogesiclib.format_fixer import FormatFixer
from annogesiclib.helper import Helper
class RATT(object):
'''annotation transfer'''
def __init__(self, args_ratt):
self.multiparser = Multiparser()
self.converter = Converter()
self.format_fixer = FormatFixer()
self.helper = Helper()
if args_ratt.ref_gbk:
self.gbk = os.path.join(args_ratt.ref_gbk, "gbk_tmp")
self.gbk_tmp = os.path.join(self.gbk, "tmp")
self.embl = os.path.join(args_ratt.ref_gbk, "embls")
if args_ratt.ref_embls:
self.embl = args_ratt.ref_embls
self.ratt_log = os.path.join(args_ratt.output_path, "ratt_log.txt")
self.tmp_files = {"tar": os.path.join(args_ratt.tar_fastas, "tmp"),
"ref": os.path.join(args_ratt.ref_fastas, "tmp"),
"out_gff": os.path.join(args_ratt.gff_outfolder,
"tmp"),
"gff": os.path.join(args_ratt.gff_outfolder,
"tmp.gff"),
"ptt": os.path.join(args_ratt.gff_outfolder,
"tmp.ptt"),
"rnt": os.path.join(args_ratt.gff_outfolder,
"tmp.rnt")}
def _convert_to_pttrnt(self, gffs, files, log):
for gff in files:
if gff.endswith(".gff"):
gff = os.path.join(gffs, gff)
filename = gff.split("/")
prefix = filename[-1][:-4]
rnt = gff[:-3] + "rnt"
ptt = gff[:-3] + "ptt"
fasta = self.helper.get_correct_file(self.tmp_files["tar"],
".fa", prefix, None, None)
if fasta:
self.converter.convert_gff2rntptt(gff, fasta, ptt, rnt,
None, None)
log.write("\t" + ptt + " is generated.\n")
log.write("\t" + rnt + " is generated.\n")
def _remove_files(self, args_ratt, out_gbk, log):
self.helper.remove_all_content(args_ratt.gff_outfolder, ".gff", "file")
self.helper.remove_all_content(args_ratt.gff_outfolder, ".ptt", "file")
self.helper.remove_all_content(args_ratt.gff_outfolder, ".rnt", "file")
log.write("Moving the final output files to {0}.\n".format(args_ratt.gff_outfolder))
self.helper.move_all_content(self.tmp_files["out_gff"],
args_ratt.gff_outfolder, None)
log.write("Remove the temperary files.\n")
shutil.rmtree(self.tmp_files["out_gff"])
shutil.rmtree(self.tmp_files["tar"])
shutil.rmtree(self.tmp_files["ref"])
self.helper.remove_tmp_dir(args_ratt.tar_fastas)
self.helper.remove_tmp_dir(args_ratt.ref_fastas)
self.helper.remove_tmp_dir(args_ratt.ref_embls)
self.helper.remove_tmp_dir(args_ratt.ref_gbk)
def _convert_to_gff(self, ratt_result, args_ratt, files, log):
name = ratt_result.split(".")
filename = ".".join(name[1:-2]) + ".gff"
output_file = os.path.join(args_ratt.output_path, filename)
self.converter.convert_embl2gff(
os.path.join(args_ratt.output_path, ratt_result), output_file)
self.format_fixer.fix_ratt(output_file, ".".join(name[1:-2]),
"tmp_gff")
shutil.move("tmp_gff", output_file)
shutil.copy(output_file, os.path.join(args_ratt.gff_outfolder,
filename))
log.write("\t" + os.path.join(args_ratt.gff_outfolder, filename) +
" is generated.\n")
files.append(filename)
def _parser_embl_gbk(self, files):
self.helper.check_make_folder(self.gbk)
for file_ in files:
close = False
with open(file_, "r") as f_h:
for line in f_h:
if (line.startswith("LOCUS")):
out = open(self.gbk_tmp, "w")
datas = line.split(" ")
for data in datas:
if (len(data) != 0) and (data != "LOCUS"):
filename = ".".join([data.strip(), "gbk"])
break
elif (line.startswith("VERSION")):
datas = line.split(" ")
for data in datas:
if (len(data) != 0) and (data != "VERSION"):
new_filename = ".".join([data.strip(), "gbk"])
break
if new_filename.find(filename):
filename = new_filename
if out:
out.write(line)
if line.startswith("//"):
out.close()
close = True
shutil.move(self.gbk_tmp,
os.path.join(self.gbk, filename))
if not close:
out.close()
return self.gbk
def _convert_embl(self, ref_embls, log):
'''convert gbk to embl'''
detect_gbk = False
gbks = []
out_gbk = None
for embl in os.listdir(ref_embls):
if (embl.endswith(".gbk")) or (
embl.endswith(".gbff")) or (
embl.endswith(".gb")):
detect_gbk = True
gbks.append(os.path.join(ref_embls, embl))
if not detect_gbk:
log.write("--related_gbk_files is assigned, but not gbk files are detected.\n"
"The gbk file names need to be ended at .gbk, .gb, or .gbff. \n")
print("Error: Please assign proper Genebank files!")
sys.exit()
elif detect_gbk:
out_gbk = self._parser_embl_gbk(gbks)
log.write("Running converter.py to convert gbk file to embl format.\n")
self.converter.convert_gbk2embl(out_gbk)
self.helper.check_make_folder(self.embl)
self.helper.move_all_content(out_gbk, self.embl, [".embl"])
log.write("\t" + self.embl + " is generated and the embl files are stored in it.\n")
return out_gbk
def _run_ratt(self, args_ratt, tar, ref, out, log):
if (not os.path.exists(self.embl)) or (
not os.path.exists(os.path.join(
self.tmp_files["tar"], tar + ".fa"))) or (
not os.path.exists(os.path.join(
self.tmp_files["ref"], ref + ".fa"))):
print("Error: Please check --compare_pair, the strain names "
"should be the same as the strain names in fasta, "
"genbank or embl files!")
log.write("The strain names in --compare_pair should be the same "
"as the strain names in fasta, genbank, or embl files.\n")
sys.exit()
log.write("Make sure your RATT version is at least 1.64.\n")
log.write("If the RATT can not run properly, please check the "
"RATT_HOME and PAGIT_HOME is assigned correctly.\n")
temp_embl_folder = os.path.join(self.embl, ref)
os.mkdir(temp_embl_folder)
shutil.copy(os.path.join(self.embl, ref + ".embl"), os.path.join(self.embl, ref))
log.write(" ".join([args_ratt.ratt_path, self.embl,
os.path.join(self.tmp_files["tar"], tar + ".fa"),
args_ratt.element, args_ratt.transfer_type,
os.path.join(self.tmp_files["ref"], ref + ".fa")]) + "\n")
call([args_ratt.ratt_path, temp_embl_folder,
os.path.join(self.tmp_files["tar"], tar + ".fa"),
args_ratt.element, args_ratt.transfer_type,
os.path.join(self.tmp_files["ref"], ref + ".fa")],
stdout=out, stderr=DEVNULL)
shutil.rmtree(temp_embl_folder)
# call([args_ratt.ratt_path, self.embl,
# os.path.join(self.tmp_files["tar"], tar + ".fa"),
# args_ratt.element, args_ratt.transfer_type,
# os.path.join(self.tmp_files["ref"], ref + ".fa")],
# stdout=out, stderr=DEVNULL)
log.write("Done!\n")
def _format_and_run(self, args_ratt, log):
print("Running RATT")
for pair in args_ratt.pairs:
ref = pair.split(":")[0]
tar = pair.split(":")[1]
out = open(self.ratt_log, "w+")
self._run_ratt(args_ratt, tar, ref, out, log)
log.write("The following files are generatd:\n")
for filename in os.listdir():
if ("final" in filename):
log.write("\t" + filename + "\n")
shutil.move(filename, os.path.join(args_ratt.output_path,
filename))
elif (args_ratt.element in filename) or (
"query" in filename) or (
"Reference" in filename) or (
"Query" in filename) or (
"Sequences" in filename):
log.write("\t" + filename + "\n")
if os.path.isfile(filename):
os.remove(filename)
if os.path.isdir(filename):
shutil.rmtree(filename)
out.close()
def annotation_transfer(self, args_ratt, log):
self.multiparser.parser_fasta(args_ratt.tar_fastas)
self.multiparser.parser_fasta(args_ratt.ref_fastas)
out_gbk = None
if args_ratt.ref_embls is None:
out_gbk = self._convert_embl(args_ratt.ref_gbk, log)
self._format_and_run(args_ratt, log)
files = []
for data in os.listdir(args_ratt.output_path):
if "final.embl" in data:
log.write("Running converter.py to convert embl "
"files in {0} to gff, ptt, and rnt format.\n".format(data))
self._convert_to_gff(data, args_ratt, files, log)
self._convert_to_pttrnt(args_ratt.gff_outfolder, files, log)
self.helper.check_make_folder(self.tmp_files["out_gff"])
log.write("Merging the output of {0}.\n".format(data))
for folder in os.listdir(args_ratt.tar_fastas):
files = []
if "_folder" in folder:
datas = folder.split("_folder")
prefix = ".".join(datas[0].split(".")[:-1])
for file_ in os.listdir(os.path.join(args_ratt.tar_fastas,
folder)):
files.append(file_[:-3])
for gff in os.listdir(args_ratt.gff_outfolder):
for file_ in files:
if (".gff" in gff) and (file_ == gff[:-4]):
self.helper.merge_file(os.path.join(
args_ratt.gff_outfolder, gff),
self.tmp_files["gff"])
if (".ptt" in gff) and (file_ == gff[:-4]):
self.helper.merge_file(os.path.join(
args_ratt.gff_outfolder, gff),
self.tmp_files["ptt"])
if (".rnt" in gff) and (file_ == gff[:-4]):
self.helper.merge_file(os.path.join(
args_ratt.gff_outfolder, gff),
self.tmp_files["rnt"])
if os.path.exists(self.tmp_files["gff"]):
shutil.move(self.tmp_files["gff"], os.path.join(
self.tmp_files["out_gff"], prefix + ".gff"))
shutil.move(self.tmp_files["ptt"], os.path.join(
self.tmp_files["out_gff"], prefix + ".ptt"))
shutil.move(self.tmp_files["rnt"], os.path.join(
self.tmp_files["out_gff"], prefix + ".rnt"))
else:
print("Error: Please check your fasta or "
"annotation files, they should only contain "
"the query genome. And make sure your RATT can "
"work properly (check $ANNOgesic/output/"
"annotation_transfer/ratt_log.txt).")
log.write("Please check your fasta or "
"annotation files, they should only contain "
"the query genome. And make sure your RATT can "
"work properly (check $ANNOgesic/output/"
"annotation_transfer/ratt_log.txt).\n")
self._remove_files(args_ratt, out_gbk, log)
|
11560964
|
from __future__ import annotations
import numpy as np
import tensorflow as tf
from .base_environment import BaseEnvironment
from tfne.helper_functions import read_option_from_config
class XOREnvironment(BaseEnvironment):
"""
TFNE compatible environment for the XOR problem
"""
def __init__(self, weight_training, config=None, verbosity=0, **kwargs):
"""
Initializes XOR environment by setting up the dataset and processing the supplied config or supplied config
parameters. The configuration of the environment can either be supplied via a config file or via seperate config
parameters in the initialization.
@param weight_training: bool flag, indicating wether evaluation should be weight training or not
@param config: ConfigParser instance holding an 'Environment' section specifying the required environment
parameters for the chosen evaluation method.
@param verbosity: integer specifying the verbosity of the evaluation
@param kwargs: Optionally supplied dict of each configuration parameter seperately in order to allow the
creation of the evaluation environment without the requirement of a config file.
"""
# Initialize corresponding input and output mappings
print("Setting up XOR environment...")
self.x = np.array([[0, 0], [0, 1], [1, 0], [1, 1]])
self.y = np.array([[0], [1], [1], [0]])
# Initialize loss function to evaluate performance on either evaluation method and safe verbosity parameter
self.loss_function = tf.keras.losses.BinaryCrossentropy()
self.verbosity = verbosity
# Determine and setup explicit evaluation method in accordance to supplied parameters
if not weight_training:
# Set up XOR environment as non-weight training, requiring no parameters
self.eval_genome_fitness = self._eval_genome_fitness_non_weight_training
elif config is None and len(kwargs) == 0:
raise RuntimeError("XOR environment is being set up as weight training, though neither config file nor "
"explicit config parameters for the weight training were supplied")
elif len(kwargs) == 0:
# Set up XOR environment as weight training and with a supplied config file
self.eval_genome_fitness = self._eval_genome_fitness_weight_training
self.epochs = read_option_from_config(config, 'EVALUATION', 'epochs')
self.batch_size = read_option_from_config(config, 'EVALUATION', 'batch_size')
elif config is None:
# Set up XOR environment as weight training and explicitely supplied parameters
self.eval_genome_fitness = self._eval_genome_fitness_weight_training
self.epochs = kwargs['epochs']
self.batch_size = kwargs['batch_size']
def eval_genome_fitness(self, genome) -> float:
# TO BE OVERRIDEN
raise RuntimeError()
def _eval_genome_fitness_weight_training(self, genome) -> float:
"""
Evaluates the genome's fitness by obtaining the associated Tensorflow model and optimizer, compiling them and
then training them for the config specified duration. The genomes fitness is then calculated and returned as
the binary cross entropy in percent of the predicted to the actual results
@param genome: TFNE compatible genome that is to be evaluated
@return: genome calculated fitness
"""
# Get model and optimizer required for compilation
model = genome.get_model()
optimizer = genome.get_optimizer()
# Compile and train model
model.compile(optimizer=optimizer, loss=self.loss_function)
model.fit(x=self.x, y=self.y, epochs=self.epochs, batch_size=self.batch_size, verbose=self.verbosity)
# Evaluate and return its fitness
evaluated_fitness = float(100 * (1 - self.loss_function(self.y, model(self.x))))
# FIXME Tensorflow arbitrary NaN loss when using float16 datatype. Confirmed by TF.
# Github TF issue: https://github.com/tensorflow/tensorflow/issues/38457
if tf.math.is_nan(evaluated_fitness):
evaluated_fitness = 0
return round(evaluated_fitness, 4)
def _eval_genome_fitness_non_weight_training(self, genome) -> float:
"""
Evaluates genome's fitness by calculating and returning the binary cross entropy in percent of the predicted to
the actual results
@param genome: TFNE compatible genome that is to be evaluated
@return: genome calculated fitness
"""
# Evaluate and return its fitness by calling genome directly with input
evaluated_fitness = float(100 * (1 - self.loss_function(self.y, genome(self.x))))
return round(evaluated_fitness, 4)
def replay_genome(self, genome):
"""
Replay genome on environment by calculating its fitness and printing it.
@param genome: TFNE compatible genome that is to be evaluated
"""
print("Replaying Genome #{}:".format(genome.get_id()))
evaluated_fitness = round(float(100 * (1 - self.loss_function(self.y, genome(self.x)))), 4)
print("Solution Values: \t{}\n".format(self.y))
print("Predicted Values:\t{}\n".format(genome(self.x)))
print("Achieved Fitness:\t{}\n".format(evaluated_fitness))
def duplicate(self) -> XOREnvironment:
"""
@return: New instance of the XOR environment with identical parameters
"""
if hasattr(self, 'epochs'):
return XOREnvironment(True, verbosity=self.verbosity, epochs=self.epochs, batch_size=self.batch_size)
else:
return XOREnvironment(False, verbosity=self.verbosity)
def get_input_shape(self) -> (int,):
""""""
return (2,)
def get_output_shape(self) -> (int,):
""""""
return (1,)
|
11560965
|
from __future__ import division, print_function, absolute_import
import tensorflow as tf
import functools
import numpy as np
from selfsup.util import DummyDict
from selfsup import ops, caffe
from selfsup.moving_averages import ExponentialMovingAverageExtended
import sys
def _pretrained_resnet_conv_weights_initializer(name, data, info=None, full_info=None, pre_adjust_batch_norm=False, bn_name=None, scale_name=None):
shape = None
#callback = lambda x: x
if name in data and '0' in data[name]:
W = data[name]['0'].copy()
if W.ndim == 2 and name == 'fc1000':
W = W.reshape((W.shape[0], -1, 1, 1))
W = W.transpose(2, 3, 1, 0)
init_type = 'file'
if name == 'conv1' and W.shape[2] == 3:
W = W[:, :, ::-1]
init_type += ':bgr-flipped'
init = tf.constant_initializer(W)
#if full_info['config']['return_weights']:
#full_info['weights'][name+':weights'] = W
shape = W.shape
else:
init_type = 'init'
init = tf.contrib.layers.variance_scaling_initializer()
if info is not None:
info[name + '/weights'] = init_type
return init, shape
def _pretrained_resnet_inner_weights_initializer(name, data, info=DummyDict(), full_info=DummyDict(), pre_adjust_batch_norm=False, bn_name=None):
shape = None
mu = 0.0
sg = 1.0
if name in data and '0' in data[name]:
W = data[name]['0']
W = W.T
init_type = 'file'
#if pre_adjust_batch_norm and bn_name is not None and bn_name in data:
# bn_data = data[bn_name]
# sigma = np.sqrt(1e-5 + bn_data['1'] / bn_data['2'])
# W /= sigma
# init_type += ':batch-adjusted'
if pre_adjust_batch_norm and bn_name is not None and bn_name in data:
bn_data = data[bn_name]
bn_sigma = np.sqrt(1e-5 + bn_data['1'] / bn_data['2'])
sc_sigma = data[scale_name]['0']
#W /= bn_sigma / sc_sigma
#callback = lambda x: x * sc_sigma / bn_sigma
#mu = -bn_data['0'] / bn_data['2'] * sc_sigma / bn_sigma
mu = data[scale_name]['1'] - bn_data['0'] / bn_data['2'] * sc_sigma / bn_sigma
#mu = data[scale_name]['1']
sg = sc_sigma / bn_sigma
init_type += ':batch-adjusted'#(W*={})'.format(sc_sigma / bn_sigma)
init = tf.constant_initializer(W.copy())
#if full_info['config']['return_weights']:
#full_info['weights'][name+':weights'] = W
shape = W.shape
else:
init_type = 'init'
init = tf.contrib.layers.variance_scaling_initializer()
info[name + '/weights'] = init_type
return init, shape, mu, sg
def _pretrained_resnet_biases_initializer(name, data, info=DummyDict(), full_info=DummyDict(), pre_adjust_batch_norm=False, bn_name=None, scale_name=None):
shape = None
#callback = lambda x: x
if name in data and '1' in data[name]:
init_type = 'file'
sc_sigma = data[name]['0'].copy()
sc_bias = data[name]['1'].copy()
#if pre_adjust_batch_norm and scale_name is not None and bn_name is not None and bn_name in data:
if pre_adjust_batch_norm and bn_name is not None and bn_name in data:
bn_data = data[bn_name]
bn_sigma = np.sqrt(1e-5 + bn_data['1'] / bn_data['2'])
mu = bn_data['0'] / bn_data['2']
#sc_bias = sc_bias - mu * sc_sigma / bn_sigma
#callback = lambda x: x - mu * sc_sigma / bn_sigma
#sc_bias = -mu / bn_sigma
#sc_bias = -mu
sc_bias = np.zeros_like(mu)
init_type += ':batch-adjusted'#(b-={})'.format(mu*sc_sigma/bn_sigma)
init = tf.constant_initializer(sc_bias)
#if full_info['config']['return_weights']:
#full_info['weights'][name+':biases'] = sc_bias
shape = sc_bias.shape
else:
init_type = 'init'
init = tf.constant_initializer(0.0)
info[name + '/biases'] = init_type
return init, shape#, callback
def resnet_conv(x, channels, size=3, padding='SAME', stride=1, batch_norm=False,
phase_test=None, activation=tf.nn.relu, name=None,
parameter_name=None, bn_name=None, scale_name=None, summarize_scale=False, info=DummyDict(), parameters={},
pre_adjust_batch_norm=False, iteration=None):
if parameter_name is None:
parameter_name = name
if scale_name is None:
scale_name = parameter_name
with tf.name_scope(name):
features = int(x.get_shape()[3])
f = channels
shape = [size, size, features, f]
W_init, W_shape = _pretrained_resnet_conv_weights_initializer(parameter_name, parameters,
info=info.get('init'),
full_info=info)
#b_init, b_shape = _pretrained_resnet_biases_initializer(scale_name, parameters,
#info=info.get('init'),
#full_info=info,
#pre_adjust_batch_norm=pre_adjust_batch_norm,
#bn_name=bn_name)
assert W_shape is None or tuple(W_shape) == tuple(shape), "Incorrect weights shape for {} (file: {}, spec: {})".format(name, W_shape, shape)
with tf.variable_scope(name):
W = tf.get_variable('weights', shape, dtype=tf.float32,
initializer=W_init)
raw_conv0 = tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding=padding)
#conv0 = tf.nn.conv2d(x, W, strides=[1, stride, stride, 1], padding=padding)
if stride > 1:
conv0 = tf.strided_slice(raw_conv0, [0, 0, 0, 0], raw_conv0.get_shape(), [1, 2, 2, 1])
else:
conv0 = raw_conv0
z = conv0
if True:
assert phase_test is not None, "phase_test required for batch norm"
if bn_name in parameters:
bn_data = parameters[bn_name]
bn_mean = bn_data['0'] / bn_data['2']
bn_var = bn_data['1'] / bn_data['2']
else:
bn_mean = np.zeros(f, dtype=np.float32)
bn_var = np.full(f, 0.5, dtype=np.float32) # a bit strange, but we don't know
if scale_name in parameters:
mu = parameters[scale_name]['1']
sg = parameters[scale_name]['0']
else:
mu = np.zeros(f, dtype=np.float32)
sg = np.ones(f, dtype=np.float32)
mm, vv = tf.nn.moments(z, [0, 1, 2], name='mommy')
assert mu.size == f
assert sg.size == f
beta = tf.Variable(tf.constant(mu, shape=[f]), name='beta', trainable=True)
gamma = tf.Variable(tf.constant(sg, shape=[f]), name='gamma', trainable=True)
ema = ExponentialMovingAverageExtended(decay=0.999, value=[bn_mean, bn_var],
num_updates=iteration)
def mean_var_train():
ema_apply_op = ema.apply([mm, vv])
with tf.control_dependencies([ema_apply_op]):
return tf.identity(mm), tf.identity(vv)
def mean_var_test():
return ema.average(mm), ema.average(vv)
mean, var = tf.cond(~phase_test,
mean_var_train,
mean_var_test)
info['activations']['last_mean'] = mean
info['activations']['last_var'] = var
z = tf.nn.batch_normalization(z, mean, var, beta, gamma, 1e-5)
info['activations']['preact_' + name] = z
if activation is not None:
z = activation(z)
if info.get('scale_summary'):
with tf.name_scope('activation'):
tf.summary.scalar('activation/' + name, tf.sqrt(tf.reduce_mean(z**2)))
info['activations'][name] = z
if 'weights' in info:
info['weights'][name + ':weights'] = W
#info['weights'][name + ':biases'] = b
return z
def resnet_atrous_conv(x, channels, size=3, padding='SAME', stride=1, hole=1, batch_norm=False,
phase_test=None, activation=tf.nn.relu, name=None,
parameter_name=None, bn_name=None, scale_name=None, summarize_scale=False, info=DummyDict(), parameters={},
pre_adjust_batch_norm=False):
if parameter_name is None:
parameter_name = name
if scale_name is None:
scale_name = parameter_name
with tf.name_scope(name):
features = int(x.get_shape()[3])
f = channels
shape = [size, size, features, f]
W_init, W_shape = _pretrained_resnet_conv_weights_initializer(parameter_name, parameters,
info=info.get('init'),
pre_adjust_batch_norm=pre_adjust_batch_norm,
bn_name=bn_name, scale_name=scale_name)
b_init, b_shape = _pretrained_resnet_biases_initializer(scale_name, parameters,
info=info.get('init'),
pre_adjust_batch_norm=pre_adjust_batch_norm,
bn_name=bn_name)
assert W_shape is None or tuple(W_shape) == tuple(shape), "Incorrect weights shape for {} (file: {}, spec: {})".format(name, W_shape, shape)
assert b_shape is None or tuple(b_shape) == (f,), "Incorrect bias shape for {} (file: {}, spec; {})".format(name, b_shape, (f,))
with tf.variable_scope(name):
W = tf.get_variable('weights', shape, dtype=tf.float32,
initializer=W_init)
b = tf.get_variable('biases', [f], dtype=tf.float32,
initializer=b_init)
if hole == 1:
raw_conv0 = tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding=padding)
else:
assert stride == 1
raw_conv0 = tf.nn.atrous_conv2d(x, W, rate=hole, padding=padding)
#conv0 = tf.nn.conv2d(x, W, strides=[1, stride, stride, 1], padding=padding)
if stride > 1:
conv0 = tf.strided_slice(raw_conv0, [0, 0, 0, 0], raw_conv0.get_shape(), [1, stride, stride, 1])
else:
conv0 = raw_conv0
h1 = tf.reshape(tf.nn.bias_add(conv0, b), conv0.get_shape())
z = h1
if activation is not None:
z = activation(z)
if info.get('scale_summary'):
with tf.name_scope('activation'):
tf.summary.scalar('activation/' + name, tf.sqrt(tf.reduce_mean(z**2)))
info['activations'][name] = z
return z
def resnet_inner(x, channels, info=DummyDict(), stddev=None,
activation=tf.nn.relu, name=None, parameters={},
parameter_name=None):
if parameter_name is None:
parameter_name = name
with tf.name_scope(name):
f = channels
features = np.prod(x.get_shape().as_list()[1:])
xflat = tf.reshape(x, [-1, features])
shape = [features, channels]
W_init, W_shape, mu, sg = _pretrained_resnet_inner_weights_initializer(parameter_name, parameters, info=info.get('init'))
b_init, b_shape = _pretrained_resnet_biases_initializer(parameter_name, parameters, info=info.get('init'))
assert W_shape is None or tuple(W_shape) == tuple(shape), "Incorrect weights shape for {} (file: {}, spec: {})".format(name, W_shape, shape)
assert b_shape is None or tuple(b_shape) == (f,), "Incorrect bias shape for {} (file: {}, spec; {})".format(name, b_shape, (f,))
with tf.variable_scope(name):
W = tf.get_variable('weights', shape, dtype=tf.float32,
initializer=W_init)
#b = tf.get_variable('biases', [f], dtype=tf.float32,
#initializer=b_init)
z = tf.matmul(xflat, W)
z = z * sg + mu
#z = tf.nn.bias_add(z, b)
if activation is not None:
z = activation(z)
info['activations'][name] = z
if info.get('scale_summary'):
with tf.name_scope('activation'):
tf.summary.scalar('activation/' + name, tf.sqrt(tf.reduce_mean(z**2)))
return z
def build_network(x, info=DummyDict(), parameters={},
phase_test=None, convolutional=False, final_layer=True,
pre_adjust_batch_norm=False,
num_features_mult=1.0, iteration=None):
# Set up VGG-16
conv = functools.partial(resnet_conv, size=3, parameters=parameters,
info=info, pre_adjust_batch_norm=pre_adjust_batch_norm,
phase_test=phase_test, iteration=iteration)
pool = functools.partial(ops.max_pool, info=info)
avg_pool = functools.partial(ops.avg_pool, info=info)
dropout = functools.partial(ops.dropout, phase_test=phase_test, info=info)
def num(f):
return int(f * num_features_mult)
z = x
conv1 = conv(z, num(64), size=7, stride=2, name='conv1', bn_name='bn_conv1',
scale_name='scale_conv1')
pool1 = pool(conv1, 3, stride=2, name='pool1')
res2a_branch1 = conv(pool1, num(256), size=1, name='res2a_branch1', bn_name='bn2a_branch1',
scale_name='scale2a_branch1', activation=None)
res2a_branch2a = conv(pool1, num(64), size=1, name='res2a_branch2a', bn_name='bn2a_branch2a',
scale_name='scale2a_branch2a')
res2a_branch2b = conv(res2a_branch2a, num(64), size=3, name='res2a_branch2b', bn_name='bn2a_branch2b',
scale_name='scale2a_branch2b')
res2a_branch2c = conv(res2a_branch2b, num(256), size=1, name='res2a_branch2c', bn_name='bn2a_branch2c',
scale_name='scale2a_branch2c', activation=None)
res2a_preact = tf.add(res2a_branch1, res2a_branch2c)
info['activations']['preact_res2a'] = res2a_preact
res2a = tf.nn.relu(res2a_preact, name='res2a')
info['activations']['res2a'] = res2a
# ---
"""
:call nobias-conv 1 0 1 64 res2a res2b_branch2a
:call batch-norm res2b_branch2a bn2b_branch2a
:call bias res2b_branch2a scale2b_branch2a
:call relu res2b_branch2a
:#
:call nobias-conv 3 1 1 64 res2b_branch2a res2b_branch2b
:call batch-norm res2b_branch2b bn2b_branch2b
:call bias res2b_branch2b scale2b_branch2b
:call relu res2b_branch2b
:#
:call nobias-conv 1 0 1 256 res2b_branch2b res2b_branch2c
:call batch-norm res2b_branch2c bn2b_branch2c
:call bias res2b_branch2c scale2b_branch2c
:call add res2a res2b_branch2c res2b
:call relu res2b
"""
def block(x, ch1, ch2, b):
output = 'res{}'.format(b)
branch2a = conv(x, num(ch1), size=1, name='res{}_branch2a'.format(b),
bn_name='bn{}_branch2a'.format(b),
scale_name='scale{}_branch2a'.format(b))
branch2b = conv(branch2a, num(ch1), size=3, name='res{}_branch2b'.format(b),
bn_name='bn{}_branch2b'.format(b),
scale_name='scale{}_branch2b'.format(b))
branch2c = conv(branch2b, num(ch2), size=1, name='res{}_branch2c'.format(b),
bn_name='bn{}_branch2c'.format(b),
scale_name='scale{}_branch2c'.format(b), activation=None)
z0 = tf.add(x, branch2c)
info['activations']['preact_' + output] = z0
z = tf.nn.relu(z0, name=output)
info['activations'][output] = z
return z
"""
:call nobias-conv 1 0 2 ${ch2} res${a} res${b}_branch1
:call batch-norm res${b}_branch1 bn${b}_branch1
:call bias res${b}_branch1 scale${b}_branch1
:#
:call nobias-conv 1 0 2 ${ch1} res${a} res${b}_branch2a
:call batch-norm res${b}_branch2a bn${b}_branch2a
:call bias res${b}_branch2a scale${b}_branch2a
:call relu res${b}_branch2a
:#
:call nobias-conv 3 1 1 ${ch1} res${b}_branch2a res${b}_branch2b
:call batch-norm res${b}_branch2b bn${b}_branch2b
:call bias res${b}_branch2b scale${b}_branch2b
:call relu res${b}_branch2b
:#
:call nobias-conv 1 0 1 ${ch2} res${b}_branch2b res${b}_branch2c
:call batch-norm res${b}_branch2c bn${b}_branch2c
:call bias res${b}_branch2c scale${b}_branch2c
:call add res${b}_branch1 res${b}_branch2c res${b}
:call relu res${b}
"""
def block_reduce(x, ch1, ch2, b, stride=2):
output = 'res{}'.format(b)
branch1 = conv(x, num(ch2), size=1, stride=stride,
name='res{}_branch1'.format(b),
bn_name='bn{}_branch1'.format(b),
scale_name='scale{}_branch1'.format(b),
activation=None)
branch2a = conv(x, num(ch1), size=1, stride=stride,
name='res{}_branch2a'.format(b),
bn_name='bn{}_branch2a'.format(b),
scale_name='scale{}_branch2a'.format(b))
branch2b = conv(branch2a, num(ch1), size=3,
name='res{}_branch2b'.format(b),
bn_name='bn{}_branch2b'.format(b),
scale_name='scale{}_branch2b'.format(b))
branch2c = conv(branch2b, num(ch2), size=1,
name='res{}_branch2c'.format(b),
bn_name='bn{}_branch2c'.format(b),
scale_name='scale{}_branch2c'.format(b), activation=None)
z0 = tf.add(branch1, branch2c)
info['activations']['preact_' + output] = z0
z = tf.nn.relu(z0, name=output)
#z = tf.nn.relu(tf.add(branch1, branch2c), name=output)
info['activations'][output] = z
return z
res2b = block(res2a, 64, 256, '2b')
res2c = block(res2b, 64, 256, '2c')
res3a = block_reduce(res2c, 128, 512, '3a')
"""
:call resnet 128 512 3a 3b1
:call resnet 128 512 3b1 3b2
:call resnet 128 512 3b2 3b3
:call resnet 128 512 3b3 3b4
:call resnet 128 512 3b4 3b5
:call resnet 128 512 3b5 3b6
:call resnet 128 512 3b6 3b7
"""
res3b1 = block(res3a, 128, 512, '3b1')
res3b2 = block(res3b1, 128, 512, '3b2')
res3b3 = block(res3b2, 128, 512, '3b3')
res3b4 = block(res3b3, 128, 512, '3b4')
res3b5 = block(res3b4, 128, 512, '3b5')
res3b6 = block(res3b5, 128, 512, '3b6')
res3b7 = block(res3b6, 128, 512, '3b7')
"""
:call resnet-reduce 256 1024 3b7 4a
"""
res4a = block_reduce(res3b7, 256, 1024, '4a')
"""
:call resnet 256 1024 4a 4b1
:call resnet 256 1024 4b1 4b2
:call resnet 256 1024 4b2 4b3
:call resnet 256 1024 4b3 4b4
:call resnet 256 1024 4b4 4b5
:call resnet 256 1024 4b5 4b6
:call resnet 256 1024 4b6 4b7
:call resnet 256 1024 4b7 4b8
:call resnet 256 1024 4b8 4b9
:call resnet 256 1024 4b9 4b10
:call resnet 256 1024 4b10 4b11
:call resnet 256 1024 4b11 4b12
:call resnet 256 1024 4b12 4b13
:call resnet 256 1024 4b13 4b14
:call resnet 256 1024 4b14 4b15
:call resnet 256 1024 4b15 4b16
:call resnet 256 1024 4b16 4b17
:call resnet 256 1024 4b17 4b18
:call resnet 256 1024 4b18 4b19
:call resnet 256 1024 4b19 4b20
:call resnet 256 1024 4b20 4b21
:call resnet 256 1024 4b21 4b22
:call resnet 256 1024 4b22 4b23
:call resnet 256 1024 4b23 4b24
:call resnet 256 1024 4b24 4b25
:call resnet 256 1024 4b25 4b26
:call resnet 256 1024 4b26 4b27
:call resnet 256 1024 4b27 4b28
:call resnet 256 1024 4b28 4b29
:call resnet 256 1024 4b29 4b30
:call resnet 256 1024 4b30 4b31
:call resnet 256 1024 4b31 4b32
:call resnet 256 1024 4b32 4b33
:call resnet 256 1024 4b33 4b34
:call resnet 256 1024 4b34 4b35
"""
res4b1 = block(res4a, 256, 1024, '4b1')
res4b2 = block(res4b1, 256, 1024, '4b2')
res4b3 = block(res4b2, 256, 1024, '4b3')
res4b4 = block(res4b3, 256, 1024, '4b4')
res4b5 = block(res4b4, 256, 1024, '4b5')
res4b6 = block(res4b5, 256, 1024, '4b6')
res4b7 = block(res4b6, 256, 1024, '4b7')
res4b8 = block(res4b7, 256, 1024, '4b8')
res4b9 = block(res4b8, 256, 1024, '4b9')
res4b10 = block(res4b9, 256, 1024, '4b10')
res4b11 = block(res4b10, 256, 1024, '4b11')
res4b12 = block(res4b11, 256, 1024, '4b12')
res4b13 = block(res4b12, 256, 1024, '4b13')
res4b14 = block(res4b13, 256, 1024, '4b14')
res4b15 = block(res4b14, 256, 1024, '4b15')
res4b16 = block(res4b15, 256, 1024, '4b16')
res4b17 = block(res4b16, 256, 1024, '4b17')
res4b18 = block(res4b17, 256, 1024, '4b18')
res4b19 = block(res4b18, 256, 1024, '4b19')
res4b20 = block(res4b19, 256, 1024, '4b20')
res4b21 = block(res4b20, 256, 1024, '4b21')
res4b22 = block(res4b21, 256, 1024, '4b22')
res4b23 = block(res4b22, 256, 1024, '4b23')
res4b24 = block(res4b23, 256, 1024, '4b24')
res4b25 = block(res4b24, 256, 1024, '4b25')
res4b26 = block(res4b25, 256, 1024, '4b26')
res4b27 = block(res4b26, 256, 1024, '4b27')
res4b28 = block(res4b27, 256, 1024, '4b28')
res4b29 = block(res4b28, 256, 1024, '4b29')
res4b30 = block(res4b29, 256, 1024, '4b30')
res4b31 = block(res4b30, 256, 1024, '4b31')
res4b32 = block(res4b31, 256, 1024, '4b32')
res4b33 = block(res4b32, 256, 1024, '4b33')
res4b34 = block(res4b33, 256, 1024, '4b34')
res4b35 = block(res4b34, 256, 1024, '4b35')
"""
:call resnet-reduce 512 2048 4b35 5a
"""
res5a = block_reduce(res4b35, 512, 2048, '5a')
"""
:call resnet 512 2048 5a 5b
:call resnet 512 2048 5b 5c
"""
res5b = block(res5a, 512, 2048, '5b')
res5c = block(res5b, 512, 2048, '5c')
"""
layer {
bottom: "res5c"
top: "pool5"
name: "pool5"
type: "Pooling"
pooling_param {
kernel_size: 7
stride: 1
pool: AVE
}
}
"""
if final_layer:
pool5 = avg_pool(res5c, 7, stride=1, name='pool5', padding='VALID')
if convolutional:
z = conv(pool5, 1000, size=1, name='fc1000', activation=None)
else:
z = resnet_inner(pool5, 1000, info=info, parameters=parameters, activation=None, name='fc1000')
else:
z = res5c
return z
def build_network_atrous2(x, info=DummyDict(), parameters={},
phase_test=None, convolutional=False, final_layer=True,
pre_adjust_batch_norm=False):
# Set up VGG-16
conv = functools.partial(resnet_conv, size=3, parameters=parameters,
info=info, pre_adjust_batch_norm=pre_adjust_batch_norm)
aconv = functools.partial(resnet_atrous_conv, size=3, parameters=parameters,
info=info, pre_adjust_batch_norm=pre_adjust_batch_norm)
pool = functools.partial(ops.max_pool, info=info)
avg_pool = functools.partial(ops.avg_pool, info=info)
dropout = functools.partial(ops.dropout, phase_test=phase_test, info=info)
z = x
conv1 = conv(z, 64, size=7, stride=2, name='conv1', bn_name='bn_conv1',
scale_name='scale_conv1')
pool1 = pool(conv1, 3, stride=2, name='pool1')
res2a_branch1 = conv(pool1, 256, size=1, name='res2a_branch1', bn_name='bn2a_branch1',
scale_name='scale2a_branch1', activation=None)
res2a_branch2a = conv(pool1, 64, size=1, name='res2a_branch2a', bn_name='bn2a_branch2a',
scale_name='scale2a_branch2a')
res2a_branch2b = conv(res2a_branch2a, 64, size=3, name='res2a_branch2b', bn_name='bn2a_branch2b',
scale_name='scale2a_branch2b')
res2a_branch2c = conv(res2a_branch2b, 256, size=1, name='res2a_branch2c', bn_name='bn2a_branch2c',
scale_name='scale2a_branch2c', activation=None)
res2a = tf.nn.relu(tf.add(res2a_branch1, res2a_branch2c), name='res2a')
info['activations']['res2a'] = res2a
# ---
"""
:call nobias-conv 1 0 1 64 res2a res2b_branch2a
:call batch-norm res2b_branch2a bn2b_branch2a
:call bias res2b_branch2a scale2b_branch2a
:call relu res2b_branch2a
:#
:call nobias-conv 3 1 1 64 res2b_branch2a res2b_branch2b
:call batch-norm res2b_branch2b bn2b_branch2b
:call bias res2b_branch2b scale2b_branch2b
:call relu res2b_branch2b
:#
:call nobias-conv 1 0 1 256 res2b_branch2b res2b_branch2c
:call batch-norm res2b_branch2c bn2b_branch2c
:call bias res2b_branch2c scale2b_branch2c
:call add res2a res2b_branch2c res2b
:call relu res2b
"""
def block(x, ch1, ch2, b, hole=1):
output = 'res{}'.format(b)
branch2a = aconv(x, ch1, size=1, hole=hole, name='res{}_branch2a'.format(b),
bn_name='bn{}_branch2a'.format(b),
scale_name='scale{}_branch2a'.format(b))
branch2b = aconv(branch2a, ch1, size=3, hole=hole, name='res{}_branch2b'.format(b),
bn_name='bn{}_branch2b'.format(b),
scale_name='scale{}_branch2b'.format(b))
branch2c = aconv(branch2b, ch2, size=1, hole=hole, name='res{}_branch2c'.format(b),
bn_name='bn{}_branch2c'.format(b),
scale_name='scale{}_branch2c'.format(b), activation=None)
z = tf.nn.relu(tf.add(x, branch2c), name=output)
info['activations'][output] = z
return z
"""
:call nobias-conv 1 0 2 ${ch2} res${a} res${b}_branch1
:call batch-norm res${b}_branch1 bn${b}_branch1
:call bias res${b}_branch1 scale${b}_branch1
:#
:call nobias-conv 1 0 2 ${ch1} res${a} res${b}_branch2a
:call batch-norm res${b}_branch2a bn${b}_branch2a
:call bias res${b}_branch2a scale${b}_branch2a
:call relu res${b}_branch2a
:#
:call nobias-conv 3 1 1 ${ch1} res${b}_branch2a res${b}_branch2b
:call batch-norm res${b}_branch2b bn${b}_branch2b
:call bias res${b}_branch2b scale${b}_branch2b
:call relu res${b}_branch2b
:#
:call nobias-conv 1 0 1 ${ch2} res${b}_branch2b res${b}_branch2c
:call batch-norm res${b}_branch2c bn${b}_branch2c
:call bias res${b}_branch2c scale${b}_branch2c
:call add res${b}_branch1 res${b}_branch2c res${b}
:call relu res${b}
"""
def block_reduce(x, ch1, ch2, b, stride=2):
output = 'res{}'.format(b)
branch1 = conv(x, ch2, size=1, stride=stride,
name='res{}_branch1'.format(b),
bn_name='bn{}_branch1'.format(b),
scale_name='scale{}_branch1'.format(b),
activation=None)
branch2a = conv(x, ch1, size=1, stride=stride,
name='res{}_branch2a'.format(b),
bn_name='bn{}_branch2a'.format(b),
scale_name='scale{}_branch2a'.format(b))
branch2b = conv(branch2a, ch1, size=3,
name='res{}_branch2b'.format(b),
bn_name='bn{}_branch2b'.format(b),
scale_name='scale{}_branch2b'.format(b))
branch2c = conv(branch2b, ch2, size=1,
name='res{}_branch2c'.format(b),
bn_name='bn{}_branch2c'.format(b),
scale_name='scale{}_branch2c'.format(b), activation=None)
z = tf.nn.relu(tf.add(branch1, branch2c), name=output)
info['activations'][output] = z
return z
res2b = block(res2a, 64, 256, '2b')
res2c = block(res2b, 64, 256, '2c')
res3a = block_reduce(res2c, 128, 512, '3a')
"""
:call resnet 128 512 3a 3b1
:call resnet 128 512 3b1 3b2
:call resnet 128 512 3b2 3b3
:call resnet 128 512 3b3 3b4
:call resnet 128 512 3b4 3b5
:call resnet 128 512 3b5 3b6
:call resnet 128 512 3b6 3b7
"""
res3b1 = block(res3a, 128, 512, '3b1')
res3b2 = block(res3b1, 128, 512, '3b2')
res3b3 = block(res3b2, 128, 512, '3b3')
res3b4 = block(res3b3, 128, 512, '3b4')
res3b5 = block(res3b4, 128, 512, '3b5')
res3b6 = block(res3b5, 128, 512, '3b6')
res3b7 = block(res3b6, 128, 512, '3b7')
"""
:call resnet-reduce 256 1024 3b7 4a
"""
res4a = block_reduce(res3b7, 256, 1024, '4a')
"""
:call resnet 256 1024 4a 4b1
:call resnet 256 1024 4b1 4b2
:call resnet 256 1024 4b2 4b3
:call resnet 256 1024 4b3 4b4
:call resnet 256 1024 4b4 4b5
:call resnet 256 1024 4b5 4b6
:call resnet 256 1024 4b6 4b7
:call resnet 256 1024 4b7 4b8
:call resnet 256 1024 4b8 4b9
:call resnet 256 1024 4b9 4b10
:call resnet 256 1024 4b10 4b11
:call resnet 256 1024 4b11 4b12
:call resnet 256 1024 4b12 4b13
:call resnet 256 1024 4b13 4b14
:call resnet 256 1024 4b14 4b15
:call resnet 256 1024 4b15 4b16
:call resnet 256 1024 4b16 4b17
:call resnet 256 1024 4b17 4b18
:call resnet 256 1024 4b18 4b19
:call resnet 256 1024 4b19 4b20
:call resnet 256 1024 4b20 4b21
:call resnet 256 1024 4b21 4b22
:call resnet 256 1024 4b22 4b23
:call resnet 256 1024 4b23 4b24
:call resnet 256 1024 4b24 4b25
:call resnet 256 1024 4b25 4b26
:call resnet 256 1024 4b26 4b27
:call resnet 256 1024 4b27 4b28
:call resnet 256 1024 4b28 4b29
:call resnet 256 1024 4b29 4b30
:call resnet 256 1024 4b30 4b31
:call resnet 256 1024 4b31 4b32
:call resnet 256 1024 4b32 4b33
:call resnet 256 1024 4b33 4b34
:call resnet 256 1024 4b34 4b35
"""
res4b1 = block(res4a, 256, 1024, '4b1')
res4b2 = block(res4b1, 256, 1024, '4b2')
res4b3 = block(res4b2, 256, 1024, '4b3')
res4b4 = block(res4b3, 256, 1024, '4b4')
res4b5 = block(res4b4, 256, 1024, '4b5')
res4b6 = block(res4b5, 256, 1024, '4b6')
res4b7 = block(res4b6, 256, 1024, '4b7')
res4b8 = block(res4b7, 256, 1024, '4b8')
res4b9 = block(res4b8, 256, 1024, '4b9')
res4b10 = block(res4b9, 256, 1024, '4b10')
res4b11 = block(res4b10, 256, 1024, '4b11')
res4b12 = block(res4b11, 256, 1024, '4b12')
res4b13 = block(res4b12, 256, 1024, '4b13')
res4b14 = block(res4b13, 256, 1024, '4b14')
res4b15 = block(res4b14, 256, 1024, '4b15')
res4b16 = block(res4b15, 256, 1024, '4b16')
res4b17 = block(res4b16, 256, 1024, '4b17')
res4b18 = block(res4b17, 256, 1024, '4b18')
res4b19 = block(res4b18, 256, 1024, '4b19')
res4b20 = block(res4b19, 256, 1024, '4b20')
res4b21 = block(res4b20, 256, 1024, '4b21')
res4b22 = block(res4b21, 256, 1024, '4b22')
res4b23 = block(res4b22, 256, 1024, '4b23')
res4b24 = block(res4b23, 256, 1024, '4b24')
res4b25 = block(res4b24, 256, 1024, '4b25')
res4b26 = block(res4b25, 256, 1024, '4b26')
res4b27 = block(res4b26, 256, 1024, '4b27')
res4b28 = block(res4b27, 256, 1024, '4b28')
res4b29 = block(res4b28, 256, 1024, '4b29')
res4b30 = block(res4b29, 256, 1024, '4b30')
res4b31 = block(res4b30, 256, 1024, '4b31')
res4b32 = block(res4b31, 256, 1024, '4b32')
res4b33 = block(res4b32, 256, 1024, '4b33')
res4b34 = block(res4b33, 256, 1024, '4b34')
res4b35 = block(res4b34, 256, 1024, '4b35')
"""
:call resnet-reduce 512 2048 4b35 5a
"""
res5a = block_reduce(res4b35, 512, 2048, '5a', stride=1)
"""
:call resnet 512 2048 5a 5b
:call resnet 512 2048 5b 5c
"""
res5b = block(res5a, 512, 2048, '5b', hole=2)
res5c = block(res5b, 512, 2048, '5c', hole=2)
"""
layer {
bottom: "res5c"
top: "pool5"
name: "pool5"
type: "Pooling"
pooling_param {
kernel_size: 7
stride: 1
pool: AVE
}
}
"""
if final_layer:
pool5 = avg_pool(res5c, 7, stride=1, name='pool5', padding='VALID')
if convolutional:
z = conv(pool5, 1000, size=1, name='fc1000', activation=None)
else:
z = resnet_inner(pool5, 1000, info=info, parameters=parameters, activation=None, name='fc1000')
else:
z = res5c
return z
def build_network_atrous4(x, info=DummyDict(), parameters={},
phase_test=None, convolutional=False, final_layer=True,
pre_adjust_batch_norm=False):
# Set up VGG-16
conv = functools.partial(resnet_conv, size=3, parameters=parameters,
info=info, pre_adjust_batch_norm=pre_adjust_batch_norm)
aconv = functools.partial(resnet_atrous_conv, size=3, parameters=parameters,
info=info, pre_adjust_batch_norm=pre_adjust_batch_norm)
pool = functools.partial(ops.max_pool, info=info)
avg_pool = functools.partial(ops.avg_pool, info=info)
dropout = functools.partial(ops.dropout, phase_test=phase_test, info=info)
z = x
conv1 = conv(z, 64, size=7, stride=2, name='conv1', bn_name='bn_conv1',
scale_name='scale_conv1')
pool1 = pool(conv1, 3, stride=2, name='pool1')
res2a_branch1 = conv(pool1, 256, size=1, name='res2a_branch1', bn_name='bn2a_branch1',
scale_name='scale2a_branch1', activation=None)
res2a_branch2a = conv(pool1, 64, size=1, name='res2a_branch2a', bn_name='bn2a_branch2a',
scale_name='scale2a_branch2a')
res2a_branch2b = conv(res2a_branch2a, 64, size=3, name='res2a_branch2b', bn_name='bn2a_branch2b',
scale_name='scale2a_branch2b')
res2a_branch2c = conv(res2a_branch2b, 256, size=1, name='res2a_branch2c', bn_name='bn2a_branch2c',
scale_name='scale2a_branch2c', activation=None)
res2a = tf.nn.relu(tf.add(res2a_branch1, res2a_branch2c), name='res2a')
info['activations']['res2a'] = res2a
# ---
"""
:call nobias-conv 1 0 1 64 res2a res2b_branch2a
:call batch-norm res2b_branch2a bn2b_branch2a
:call bias res2b_branch2a scale2b_branch2a
:call relu res2b_branch2a
:#
:call nobias-conv 3 1 1 64 res2b_branch2a res2b_branch2b
:call batch-norm res2b_branch2b bn2b_branch2b
:call bias res2b_branch2b scale2b_branch2b
:call relu res2b_branch2b
:#
:call nobias-conv 1 0 1 256 res2b_branch2b res2b_branch2c
:call batch-norm res2b_branch2c bn2b_branch2c
:call bias res2b_branch2c scale2b_branch2c
:call add res2a res2b_branch2c res2b
:call relu res2b
"""
def block(x, ch1, ch2, b, hole=1):
output = 'res{}'.format(b)
branch2a = conv(x, ch1, size=1, name='res{}_branch2a'.format(b),
bn_name='bn{}_branch2a'.format(b),
scale_name='scale{}_branch2a'.format(b))
branch2b = aconv(branch2a, ch1, size=3, hole=hole, name='res{}_branch2b'.format(b),
bn_name='bn{}_branch2b'.format(b),
scale_name='scale{}_branch2b'.format(b))
branch2c = conv(branch2b, ch2, size=1, name='res{}_branch2c'.format(b),
bn_name='bn{}_branch2c'.format(b),
scale_name='scale{}_branch2c'.format(b), activation=None)
z = tf.nn.relu(tf.add(x, branch2c), name=output)
info['activations'][output] = z
return z
"""
:call nobias-conv 1 0 2 ${ch2} res${a} res${b}_branch1
:call batch-norm res${b}_branch1 bn${b}_branch1
:call bias res${b}_branch1 scale${b}_branch1
:#
:call nobias-conv 1 0 2 ${ch1} res${a} res${b}_branch2a
:call batch-norm res${b}_branch2a bn${b}_branch2a
:call bias res${b}_branch2a scale${b}_branch2a
:call relu res${b}_branch2a
:#
:call nobias-conv 3 1 1 ${ch1} res${b}_branch2a res${b}_branch2b
:call batch-norm res${b}_branch2b bn${b}_branch2b
:call bias res${b}_branch2b scale${b}_branch2b
:call relu res${b}_branch2b
:#
:call nobias-conv 1 0 1 ${ch2} res${b}_branch2b res${b}_branch2c
:call batch-norm res${b}_branch2c bn${b}_branch2c
:call bias res${b}_branch2c scale${b}_branch2c
:call add res${b}_branch1 res${b}_branch2c res${b}
:call relu res${b}
"""
def block_reduce(x, ch1, ch2, b, stride=2, hole=1):
output = 'res{}'.format(b)
branch1 = conv(x, ch2, size=1, stride=stride,
name='res{}_branch1'.format(b),
bn_name='bn{}_branch1'.format(b),
scale_name='scale{}_branch1'.format(b),
activation=None)
branch2a = conv(x, ch1, size=1, stride=stride,
name='res{}_branch2a'.format(b),
bn_name='bn{}_branch2a'.format(b),
scale_name='scale{}_branch2a'.format(b))
branch2b = aconv(branch2a, ch1, size=3, hole=hole,
name='res{}_branch2b'.format(b),
bn_name='bn{}_branch2b'.format(b),
scale_name='scale{}_branch2b'.format(b))
branch2c = conv(branch2b, ch2, size=1,
name='res{}_branch2c'.format(b),
bn_name='bn{}_branch2c'.format(b),
scale_name='scale{}_branch2c'.format(b), activation=None)
z = tf.nn.relu(tf.add(branch1, branch2c), name=output)
info['activations'][output] = z
return z
res2b = block(res2a, 64, 256, '2b')
res2c = block(res2b, 64, 256, '2c')
res3a = block_reduce(res2c, 128, 512, '3a')
"""
:call resnet 128 512 3a 3b1
:call resnet 128 512 3b1 3b2
:call resnet 128 512 3b2 3b3
:call resnet 128 512 3b3 3b4
:call resnet 128 512 3b4 3b5
:call resnet 128 512 3b5 3b6
:call resnet 128 512 3b6 3b7
"""
res3b1 = block(res3a, 128, 512, '3b1')
res3b2 = block(res3b1, 128, 512, '3b2')
res3b3 = block(res3b2, 128, 512, '3b3')
res3b4 = block(res3b3, 128, 512, '3b4')
res3b5 = block(res3b4, 128, 512, '3b5')
res3b6 = block(res3b5, 128, 512, '3b6')
res3b7 = block(res3b6, 128, 512, '3b7')
"""
:call resnet-reduce 256 1024 3b7 4a
"""
res4a = block_reduce(res3b7, 256, 1024, '4a', stride=1, hole=2)
"""
:call resnet 256 1024 4a 4b1
:call resnet 256 1024 4b1 4b2
:call resnet 256 1024 4b2 4b3
:call resnet 256 1024 4b3 4b4
:call resnet 256 1024 4b4 4b5
:call resnet 256 1024 4b5 4b6
:call resnet 256 1024 4b6 4b7
:call resnet 256 1024 4b7 4b8
:call resnet 256 1024 4b8 4b9
:call resnet 256 1024 4b9 4b10
:call resnet 256 1024 4b10 4b11
:call resnet 256 1024 4b11 4b12
:call resnet 256 1024 4b12 4b13
:call resnet 256 1024 4b13 4b14
:call resnet 256 1024 4b14 4b15
:call resnet 256 1024 4b15 4b16
:call resnet 256 1024 4b16 4b17
:call resnet 256 1024 4b17 4b18
:call resnet 256 1024 4b18 4b19
:call resnet 256 1024 4b19 4b20
:call resnet 256 1024 4b20 4b21
:call resnet 256 1024 4b21 4b22
:call resnet 256 1024 4b22 4b23
:call resnet 256 1024 4b23 4b24
:call resnet 256 1024 4b24 4b25
:call resnet 256 1024 4b25 4b26
:call resnet 256 1024 4b26 4b27
:call resnet 256 1024 4b27 4b28
:call resnet 256 1024 4b28 4b29
:call resnet 256 1024 4b29 4b30
:call resnet 256 1024 4b30 4b31
:call resnet 256 1024 4b31 4b32
:call resnet 256 1024 4b32 4b33
:call resnet 256 1024 4b33 4b34
:call resnet 256 1024 4b34 4b35
"""
res4b1 = block(res4a, 256, 1024, '4b1', hole=2)
res4b2 = block(res4b1, 256, 1024, '4b2', hole=2)
res4b3 = block(res4b2, 256, 1024, '4b3', hole=2)
res4b4 = block(res4b3, 256, 1024, '4b4', hole=2)
res4b5 = block(res4b4, 256, 1024, '4b5', hole=2)
res4b6 = block(res4b5, 256, 1024, '4b6', hole=2)
res4b7 = block(res4b6, 256, 1024, '4b7', hole=2)
res4b8 = block(res4b7, 256, 1024, '4b8', hole=2)
res4b9 = block(res4b8, 256, 1024, '4b9', hole=2)
res4b10 = block(res4b9, 256, 1024, '4b10', hole=2)
res4b11 = block(res4b10, 256, 1024, '4b11', hole=2)
res4b12 = block(res4b11, 256, 1024, '4b12', hole=2)
res4b13 = block(res4b12, 256, 1024, '4b13', hole=2)
res4b14 = block(res4b13, 256, 1024, '4b14', hole=2)
res4b15 = block(res4b14, 256, 1024, '4b15', hole=2)
res4b16 = block(res4b15, 256, 1024, '4b16', hole=2)
res4b17 = block(res4b16, 256, 1024, '4b17', hole=2)
res4b18 = block(res4b17, 256, 1024, '4b18', hole=2)
res4b19 = block(res4b18, 256, 1024, '4b19', hole=2)
res4b20 = block(res4b19, 256, 1024, '4b20', hole=2)
res4b21 = block(res4b20, 256, 1024, '4b21', hole=2)
res4b22 = block(res4b21, 256, 1024, '4b22', hole=2)
res4b23 = block(res4b22, 256, 1024, '4b23', hole=2)
res4b24 = block(res4b23, 256, 1024, '4b24', hole=2)
res4b25 = block(res4b24, 256, 1024, '4b25', hole=2)
res4b26 = block(res4b25, 256, 1024, '4b26', hole=2)
res4b27 = block(res4b26, 256, 1024, '4b27', hole=2)
res4b28 = block(res4b27, 256, 1024, '4b28', hole=2)
res4b29 = block(res4b28, 256, 1024, '4b29', hole=2)
res4b30 = block(res4b29, 256, 1024, '4b30', hole=2)
res4b31 = block(res4b30, 256, 1024, '4b31', hole=2)
res4b32 = block(res4b31, 256, 1024, '4b32', hole=2)
res4b33 = block(res4b32, 256, 1024, '4b33', hole=2)
res4b34 = block(res4b33, 256, 1024, '4b34', hole=2)
res4b35 = block(res4b34, 256, 1024, '4b35', hole=2)
"""
:call resnet-reduce 512 2048 4b35 5a
"""
res5a = block_reduce(res4b35, 512, 2048, '5a', stride=1, hole=4)
"""
:call resnet 512 2048 5a 5b
:call resnet 512 2048 5b 5c
"""
res5b = block(res5a, 512, 2048, '5b', hole=4)
res5c = block(res5b, 512, 2048, '5c', hole=4)
"""
layer {
bottom: "res5c"
top: "pool5"
name: "pool5"
type: "Pooling"
pooling_param {
kernel_size: 7
stride: 1
pool: AVE
}
}
"""
#res5c =
#res5c = tf.strided_slice(res5c, [0, 0, 0, 0], res5c.get_shape(), [1, 4, 4, 1])
if final_layer:
pool5 = ops.atrous_avg_pool(res5c, 7, rate=4, name='pool5', padding='SAME' if convolutional else 'VALID')
info['activations']['pool5'] = pool5
##pool5 = avg_pool(res5c, 7 * 4, stride=1, name='pool5', padding='SAME' if convolutional else 'VALID')
#pool5 = res5c
if convolutional:
z = conv(pool5, 1000, size=1, name='fc1000', activation=None)
else:
z = resnet_inner(pool5, 1000, info=info, parameters=parameters, activation=None, name='fc1000')
else:
z = res5c
return z
|
11560968
|
import ctypes
# dll = windll.LoadLibrary('lib/dllcore.dll')
dll=ctypes.CDLL('lib/server2.dll')
# print(dll)
# dll.HelloWorld('WDNMD')
# print(dll.printX())
# a=dll.Double(123)
# print(dll.printX())
#
# print(type(a))
# print(a)
dll.init()
dll.wait()
# buff=dll.returnBuff(buffer)
# print(buff)
|
11560974
|
import numpy as np
from src.layers.pooling import MaxPoolLayer
class TestMaxPoolLayer:
def test_forward_pass_single_channel_single_item(self):
# given
pool_size = (2, 2)
stride = 2
activation = np.array([[
[[1], [2], [2], [1]],
[[3], [4], [0], [0]],
[[5], [2], [1], [1]],
[[3], [4], [0], [3]]
]])
expected_result = np.array([[
[[4], [2]],
[[5], [3]],
]])
# when
layer = MaxPoolLayer(pool_size=pool_size, stride=stride)
result = layer.forward_pass(activation, training=True)
# then
assert result.shape == (1, 2, 2, 1)
assert np.alltrue(expected_result == result)
def test_forward_pass_two_channels_single_item(self):
# given
pool_size = (2, 2)
stride = 2
activation = np.array([[
[
[1, 5],
[2, 2],
[2, 2],
[1, 1]
],
[
[3, 3],
[4, 4],
[0, 3],
[0, 0]
],
[
[5, 2],
[2, 2],
[1, 1],
[1, 1]
],
[
[3, 3],
[4, 4],
[0, 2],
[3, 0]
]
]])
expected_result = np.array([[
[
[4, 5],
[2, 3]
],
[
[5, 4],
[3, 2]
]
]])
# when
layer = MaxPoolLayer(pool_size=pool_size, stride=stride)
result = layer.forward_pass(activation, training=True)
# then
assert result.shape == (1, 2, 2, 2)
assert np.alltrue(expected_result == result)
def test_forward_pass_single_channel_two_items(self):
# given
pool_size = (2, 2)
stride = 2
activation = np.array([
[
[[1], [2], [2], [1]],
[[3], [4], [0], [0]],
[[5], [2], [1], [1]],
[[3], [4], [0], [3]]
],
[
[[5], [2], [2], [1]],
[[3], [4], [3], [0]],
[[2], [2], [1], [1]],
[[3], [4], [2], [0]]
]
])
expected_result = np.array([
[
[[4], [2]],
[[5], [3]]
],
[
[[5], [3]],
[[4], [2]]
]
])
# when
layer = MaxPoolLayer(pool_size=pool_size, stride=stride)
result = layer.forward_pass(activation, training=True)
# then
assert result.shape == (2, 2, 2, 1)
assert np.alltrue(expected_result == result)
def test_backward_pass_single_channel_single_item(self):
# given
pool_size = (2, 2)
stride = 2
forward_activation = np.array([[
[[1], [2], [2], [1]],
[[3], [4], [0], [0]],
[[5], [2], [1], [1]],
[[3], [4], [0], [3]]
]])
backward_activation = np.array([[
[[3], [1]],
[[8], [2]],
]])
expected_backward_result = np.array([[
[[0], [0], [1], [0]],
[[0], [3], [0], [0]],
[[8], [0], [0], [0]],
[[0], [0], [0], [2]]
]])
# when
layer = MaxPoolLayer(pool_size=pool_size, stride=stride)
_ = layer.forward_pass(forward_activation, training=True)
backward_result = layer.backward_pass(backward_activation)
# then
assert np.alltrue(expected_backward_result == backward_result)
def test_backward_pass_two_channels_single_item(self):
# given
pool_size = (2, 2)
stride = 2
forward_activation = np.array([[
[
[1, 5],
[2, 2],
[2, 2],
[1, 1]
],
[
[3, 3],
[4, 4],
[0, 3],
[0, 0]
],
[
[5, 2],
[2, 2],
[1, 1],
[1, 1]
],
[
[3, 3],
[4, 4],
[0, 2],
[3, 0]
]
]])
backward_activation = np.array([[
[
[7, 2],
[4, 3]
],
[
[1, 5],
[2, 2]
]
]])
expected_backward_result = np.array([[
[
[0, 2],
[0, 0],
[4, 0],
[0, 0]
],
[
[0, 0],
[7, 0],
[0, 3],
[0, 0]
],
[
[1, 0],
[0, 0],
[0, 0],
[0, 0]
],
[
[0, 0],
[0, 5],
[0, 2],
[2, 0]
]
]])
# when
layer = MaxPoolLayer(pool_size=pool_size, stride=stride)
_ = layer.forward_pass(forward_activation, training=True)
backward_result = layer.backward_pass(backward_activation)
# then
assert np.alltrue(expected_backward_result == backward_result)
def test_backward_pass_single_channel_two_items(self):
# given
pool_size = (2, 2)
stride = 2
forward_activation = np.array([
[
[[1], [2], [2], [1]],
[[3], [4], [0], [0]],
[[5], [2], [1], [1]],
[[3], [4], [0], [3]]
],
[
[[5], [2], [2], [1]],
[[3], [4], [3], [0]],
[[2], [2], [1], [1]],
[[3], [4], [2], [0]]
]
])
backward_activation = np.array([
[
[[7], [2]],
[[4], [3]]
],
[
[[1], [5]],
[[2], [2]]
]
])
expected_backward_result = np.array([
[
[[0], [0], [2], [0]],
[[0], [7], [0], [0]],
[[4], [0], [0], [0]],
[[0], [0], [0], [3]]
],
[
[[1], [0], [0], [0]],
[[0], [0], [5], [0]],
[[0], [0], [0], [0]],
[[0], [2], [2], [0]]
]
])
# when
layer = MaxPoolLayer(pool_size=pool_size, stride=stride)
_ = layer.forward_pass(forward_activation, training=True)
backward_result = layer.backward_pass(backward_activation)
# then
assert np.alltrue(expected_backward_result == backward_result)
|
11560986
|
from typing import List
from ._types import Options, Key, KeyResponse
from ._utils import _request
class Keys:
_root = "/projects"
def __init__(self, options: Options) -> None:
self.options = options
async def list(self, project_id: str) -> KeyResponse:
"""Retrieves all keys associated with the provided projectId."""
return await _request(
f'{self._root}/{project_id}/keys', self.options
)
async def get(self, project_id: str, key: str) -> Key:
"""Retrieves a specific key associated with the provided projectId."""
return await _request(
f'{self._root}/{project_id}/keys/{key}', self.options
)
async def create(
self, project_id: str, comment: str, scopes: List[str]
) -> Key:
"""Creates an API key with the provided scopes."""
return await _request(
f'{self._root}/{project_id}/keys', self.options,
method='POST', payload={'comment': comment, 'scopes': scopes},
headers={'Content-Type': 'application/json'}
)
async def delete(self, project_id: str, key: str) -> None:
"""Deletes an API key."""
await _request(
f'{self._root}/{project_id}/keys/{key}', self.options,
method='DELETE'
)
|
11561002
|
import torch
import torch.nn as nn
from torch.autograd import Variable
import torch.nn.functional as F
import numpy as np
import math
import scipy.stats as stats
from torch.autograd import Variable
pixel_mean = Variable(torch.FloatTensor(
[115.9839754, 126.63120922, 137.73309306]).view(1, 3, 1, 1))
eps_div = 1e-20
class BasicConv2d(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, **kwargs):
super(BasicConv2d, self).__init__()
self.conv = nn.Conv2d(in_channels, out_channels,
kernel_size, bias=False, **kwargs)
def forward(self, x):
x = self.conv(x)
return F.relu(x, inplace=True)
class Decoder(nn.Module):
def __init__(self, input_dim, channel, out1, out2):
super(Decoder, self).__init__()
self.nInput = input_dim
self.conv1 = BasicConv2d(self.nInput, channel, 3, padding=1)
self.conv1_1 = nn.Conv2d(channel, out1[0], 1, padding=0)
self.conv1_2 = nn.Conv2d(channel, out1[1], 1, padding=0)
self.conv2 = BasicConv2d(self.nInput, channel, 3, padding=1)
self.conv2_1 = nn.Conv2d(channel, out2[0], 1, padding=0)
self.conv2_2 = nn.Conv2d(channel, out2[1], 1, padding=0)
def forward(self, input):
x0 = self.conv1(input)
junction_logits = self.conv1_1(x0)
junction_loc = self.conv1_2(x0)
x1 = self.conv2(input)
bin_logits = self.conv2_1(x1)
bin_residual = self.conv2_2(x1)
return junction_logits, junction_loc, bin_logits, bin_residual
class DecodeNet(nn.Module):
def __init__(self, opt, phase):
super(DecodeNet, self).__init__()
H = opt.hype
self.batch_size = opt.batch_size
self.num_bin = H['num_bin']
self.grid_h = self.grid_w = H['grid_size']
self.num_grids = self.grid_h * self.grid_w
self.out_size = self.grid_h * self.grid_w * self.batch_size
if opt.balance:
out1 = (3 * H['max_len'], 2 * H['max_len'])
out2 = (2 * H['num_bin'] * H['max_len'], H['num_bin'] * H['max_len'])
else:
out1 = (2 * H['max_len'], 2 * H['max_len'])
out2 = (2 * H['num_bin'] * H['max_len'], H['num_bin'] * H['max_len'])
decodeFeats = H.get('decodeFeats', 256) # 256 is the reported structure in paper.
self.decoder = Decoder(decodeFeats, 256, out1, out2)
def forward(self, input):
(junction_logits,
junction_loc,
bin_logits,
bin_residual
) = self.decoder(input)
return (
junction_logits,
junction_loc,
bin_logits,
bin_residual
)
|
11561011
|
import pytest
from policyglass import Action, Condition, EffectiveCondition
def test_bad_intersection():
with pytest.raises(ValueError) as ex:
EffectiveCondition(
frozenset({Condition("aws:PrincipalOrgId", "StringNotEquals", ["o-123456"])}), frozenset()
).intersection(Action("S3:*"))
assert "Cannot intersect EffectiveCondition with Action" in str(ex.value)
INTERSECTION_SCENARIOS = {
"proper_subset": {
"first": EffectiveCondition(
frozenset(
{
Condition("aws:PrincipalOrgId", "StringNotEquals", ["o-123456"]),
Condition(key="s3:x-amz-server-side-encryption", operator="StringNotEquals", values=["AES256"]),
}
),
frozenset(),
),
"second": EffectiveCondition(
frozenset({Condition("aws:PrincipalOrgId", "StringNotEquals", ["o-123456"])}), frozenset()
),
"result": EffectiveCondition(
frozenset({Condition("aws:PrincipalOrgId", "StringNotEquals", ["o-123456"])}), frozenset()
),
},
"proper_subset_with_exclusions": {
"first": EffectiveCondition(
frozenset(
{
Condition("aws:PrincipalOrgId", "StringNotEquals", ["o-123456"]),
Condition(key="s3:x-amz-server-side-encryption", operator="StringNotEquals", values=["AES256"]),
}
),
frozenset(),
),
"second": EffectiveCondition(
frozenset(
{
Condition("aws:PrincipalOrgId", "StringNotEquals", ["o-123456"]),
}
),
frozenset({Condition(key="key", operator="BinaryEquals", values=["QmluYXJ5VmFsdWVJbkJhc2U2NA=="])}),
),
"result": EffectiveCondition(
frozenset(
{
Condition("aws:PrincipalOrgId", "StringNotEquals", ["o-123456"]),
}
),
frozenset(),
),
},
# This is commented out until we deal with the fact that some conditions can negate each other, as the exclusions
# of first set won't negate second, but a condition in first that negates a condition in second will.
# "excluded_proper_subset": {
# "first": EffectiveCondition(
# frozenset(
# {
# Condition("aws:PrincipalOrgId", "StringNotEquals", ["o-123456"]),
# Condition(key="s3:x-amz-server-side-encryption", operator="StringNotEquals", values=["AES256"]),
# }
# ),
# frozenset({Condition(key="key", operator="BinaryEquals", values=["QmluYXJ5VmFsdWVJbkJhc2U2NA=="])}),
# ),
# "second": EffectiveCondition(
# frozenset({Condition("aws:PrincipalOrgId", "StringNotEquals", ["o-123456"])}), frozenset()
# ),
# "result": None,
# },
"subset": {
"first": EffectiveCondition(
frozenset({Condition("aws:PrincipalOrgId", "StringNotEquals", ["o-123456"])}), frozenset()
),
"second": EffectiveCondition(
frozenset({Condition("aws:PrincipalOrgId", "StringNotEquals", ["o-123456"])}), frozenset()
),
"result": EffectiveCondition(
frozenset({Condition("aws:PrincipalOrgId", "StringNotEquals", ["o-123456"])}), frozenset()
),
},
"disjoint": {
"first": EffectiveCondition(
frozenset({Condition("aws:PrincipalOrgId", "StringNotEquals", ["o-123456"])}), frozenset()
),
"second": EffectiveCondition(
frozenset(
{Condition(key="s3:x-amz-server-side-encryption", operator="StringNotEquals", values=["AES256"])}
),
frozenset(),
),
"result": EffectiveCondition(frozenset(), frozenset()),
},
"larger": {
"first": EffectiveCondition(
frozenset({Condition("aws:PrincipalOrgId", "StringNotEquals", ["o-123456"])}),
frozenset(),
),
"second": EffectiveCondition(
frozenset(
{
Condition("aws:PrincipalOrgId", "StringNotEquals", ["o-123456"]),
Condition(key="s3:x-amz-server-side-encryption", operator="StringNotEquals", values=["AES256"]),
}
),
frozenset(),
),
"result": EffectiveCondition(
frozenset({Condition("aws:PrincipalOrgId", "StringNotEquals", ["o-123456"])}),
frozenset(),
),
},
# "larger_with_exclusion": {
# "first": EffectiveCondition(Action("S3:Get*")),
# "second": EffectiveCondition(
# frozenset(
# {
# Condition("aws:PrincipalOrgId", "StringNotEquals", ["o-123456"]),
# Condition(key="s3:x-amz-server-side-encryption", operator="StringNotEquals", values=["AES256"]),
# }
# ),
# frozenset(),
# ),
# "result": EffectiveCondition(Action("S3:Get*"), frozenset({Action("S3:GetObject")})),
# },
}
@pytest.mark.parametrize("_, scenario", INTERSECTION_SCENARIOS.items())
def test_intersection(_, scenario):
first, second, result = scenario.values()
assert first.intersection(second) == result
|
11561029
|
from logic_analyzer.screen import Screen
from logic_analyzer.timing_collector import TimingCollector
from logic_analyzer.text_loader import TextLoader
class ScreenRenderer:
def __init__(self):
self.signals = []
self.visible_signals = []
self.failed_signals = []
self.requested_signals = []
self.temp_visible_signals = []
def render_log(self, log, channels=""):
self._load_log_and_build_channel_objects(log)
self._pick_channel_objects_to_render(channels)
return Screen().render(self.visible_signals)
def report_rendered_signals(self):
return self.visible_signals
def report_failed_signals(self):
return self.failed_signals
def _load_log_and_build_channel_objects(self, log):
log_address = log.replace('"','')
text_loader = TextLoader(log_address)
timing_collector = TimingCollector(text_loader)
self.signals = timing_collector.get_raw_info()
def _pick_channel_objects_to_render(self, channels):
self.requested_signals = [channel.strip() for channel in channels.split('|')]
if self.requested_signals == ['']:
self.visible_signals = self.signals
else:
self._sort_and_hide_channel_objects()
def _sort_and_hide_channel_objects(self):
self.temp_visible_signals = [None,] * len(self.requested_signals)
self._move_selected_signals_to_temp_and_remove_request()
self.visible_signals = [signal for signal in self.temp_visible_signals if signal != None]
self.failed_signals = [signal for signal in self.requested_signals if signal != None]
def _move_selected_signals_to_temp_and_remove_request(self):
for signal in self.signals:
if signal.name in self.requested_signals:
signal_index = self.requested_signals.index(signal.name)
self.requested_signals[signal_index] = None
self.temp_visible_signals[signal_index] = signal
|
11561058
|
from src.utils import pdump, pload, bmtv, bmtm
from src.lie_algebra import SO3
from termcolor import cprint
from torch.utils.data.dataset import Dataset
from scipy.interpolate import interp1d
import numpy as np
import matplotlib.pyplot as plt
import pickle
import os
import torch
import sys
class BaseDataset(Dataset):
def __init__(self, predata_dir, train_seqs, val_seqs, test_seqs, mode, N,
min_train_freq=128, max_train_freq=512, dt=0.005):
super().__init__()
# where record pre loaded data
self.predata_dir = predata_dir
self.path_normalize_factors = os.path.join(predata_dir, 'nf.p')
self.mode = mode
# choose between training, validation or test sequences
train_seqs, self.sequences = self.get_sequences(train_seqs, val_seqs,
test_seqs)
# get and compute value for normalizing inputs
self.mean_u, self.std_u = self.init_normalize_factors(train_seqs)
self.mode = mode # train, val or test
self._train = False
self._val = False
# noise density
self.imu_std = torch.Tensor([8e-5, 1e-3]).float()
# bias repeatability (without in-run bias stability)
self.imu_b0 = torch.Tensor([1e-3, 1e-3]).float()
# IMU sampling time
self.dt = dt # (s)
# sequence size during training
self.N = N # power of 2
self.min_train_freq = min_train_freq
self.max_train_freq = max_train_freq
self.uni = torch.distributions.uniform.Uniform(-torch.ones(1),
torch.ones(1))
def get_sequences(self, train_seqs, val_seqs, test_seqs):
"""Choose sequence list depending on dataset mode"""
sequences_dict = {
'train': train_seqs,
'val': val_seqs,
'test': test_seqs,
}
return sequences_dict['train'], sequences_dict[self.mode]
def __getitem__(self, i):
mondict = self.load_seq(i)
N_max = mondict['xs'].shape[0]
if self._train: # random start
n0 = torch.randint(0, self.max_train_freq, (1, ))
nend = n0 + self.N
elif self._val: # end sequence
n0 = self.max_train_freq + self.N
nend = N_max - ((N_max - n0) % self.max_train_freq)
else: # full sequence
n0 = 0
nend = N_max - (N_max % self.max_train_freq)
u = mondict['us'][n0: nend]
x = mondict['xs'][n0: nend]
return u, x
def __len__(self):
return len(self.sequences)
def add_noise(self, u):
"""Add Gaussian noise and bias to input"""
noise = torch.randn_like(u)
noise[:, :, :3] = noise[:, :, :3] * self.imu_std[0]
noise[:, :, 3:6] = noise[:, :, 3:6] * self.imu_std[1]
# bias repeatability (without in run bias stability)
b0 = self.uni.sample(u[:, 0].shape).cuda()
b0[:, :, :3] = b0[:, :, :3] * self.imu_b0[0]
b0[:, :, 3:6] = b0[:, :, 3:6] * self.imu_b0[1]
u = u + noise + b0.transpose(1, 2)
return u
def init_train(self):
self._train = True
self._val = False
def init_val(self):
self._train = False
self._val = True
def length(self):
return self._length
def load_seq(self, i):
return pload(self.predata_dir, self.sequences[i] + '.p')
def load_gt(self, i):
return pload(self.predata_dir, self.sequences[i] + '_gt.p')
def init_normalize_factors(self, train_seqs):
if os.path.exists(self.path_normalize_factors):
mondict = pload(self.path_normalize_factors)
return mondict['mean_u'], mondict['std_u']
path = os.path.join(self.predata_dir, train_seqs[0] + '.p')
if not os.path.exists(path):
print("init_normalize_factors not computed")
return 0, 0
print('Start computing normalizing factors ...')
cprint("Do it only on training sequences, it is vital!", 'yellow')
# first compute mean
num_data = 0
for i, sequence in enumerate(train_seqs):
pickle_dict = pload(self.predata_dir, sequence + '.p')
us = pickle_dict['us']
sms = pickle_dict['xs']
if i == 0:
mean_u = us.sum(dim=0)
num_positive = sms.sum(dim=0)
num_negative = sms.shape[0] - sms.sum(dim=0)
else:
mean_u += us.sum(dim=0)
num_positive += sms.sum(dim=0)
num_negative += sms.shape[0] - sms.sum(dim=0)
num_data += us.shape[0]
mean_u = mean_u / num_data
pos_weight = num_negative / num_positive
# second compute standard deviation
for i, sequence in enumerate(train_seqs):
pickle_dict = pload(self.predata_dir, sequence + '.p')
us = pickle_dict['us']
if i == 0:
std_u = ((us - mean_u) ** 2).sum(dim=0)
else:
std_u += ((us - mean_u) ** 2).sum(dim=0)
std_u = (std_u / num_data).sqrt()
normalize_factors = {
'mean_u': mean_u,
'std_u': std_u,
}
print('... ended computing normalizing factors')
print('pos_weight:', pos_weight)
print('This values most be a training parameters !')
print('mean_u :', mean_u)
print('std_u :', std_u)
print('num_data :', num_data)
pdump(normalize_factors, self.path_normalize_factors)
return mean_u, std_u
def read_data(self, data_dir):
raise NotImplementedError
@staticmethod
def interpolate(x, t, t_int):
"""
Interpolate ground truth at the sensor timestamps
"""
# vector interpolation
x_int = np.zeros((t_int.shape[0], x.shape[1]))
for i in range(x.shape[1]):
if i in [4, 5, 6, 7]:
continue
x_int[:, i] = np.interp(t_int, t, x[:, i])
# quaternion interpolation
t_int = torch.Tensor(t_int - t[0])
t = torch.Tensor(t - t[0])
qs = SO3.qnorm(torch.Tensor(x[:, 4:8]))
x_int[:, 4:8] = SO3.qinterp(qs, t, t_int).numpy()
return x_int
class EUROCDataset(BaseDataset):
"""
Dataloader for the EUROC Data Set.
"""
def __init__(self, data_dir, predata_dir, train_seqs, val_seqs,
test_seqs, mode, N, min_train_freq, max_train_freq, dt=0.005):
super().__init__(predata_dir, train_seqs, val_seqs, test_seqs, mode, N, min_train_freq, max_train_freq, dt)
# convert raw data to pre loaded data
self.read_data(data_dir)
def read_data(self, data_dir):
r"""Read the data from the dataset"""
f = os.path.join(self.predata_dir, 'MH_01_easy.p')
if True and os.path.exists(f):
return
print("Start read_data, be patient please")
def set_path(seq):
path_imu = os.path.join(data_dir, seq, "mav0", "imu0", "data.csv")
path_gt = os.path.join(data_dir, seq, "mav0", "state_groundtruth_estimate0", "data.csv")
return path_imu, path_gt
sequences = os.listdir(data_dir)
# read each sequence
for sequence in sequences:
print("\nSequence name: " + sequence)
path_imu, path_gt = set_path(sequence)
imu = np.genfromtxt(path_imu, delimiter=",", skip_header=1)
gt = np.genfromtxt(path_gt, delimiter=",", skip_header=1)
# time synchronization between IMU and ground truth
t0 = np.max([gt[0, 0], imu[0, 0]])
t_end = np.min([gt[-1, 0], imu[-1, 0]])
# start index
idx0_imu = np.searchsorted(imu[:, 0], t0)
idx0_gt = np.searchsorted(gt[:, 0], t0)
# end index
idx_end_imu = np.searchsorted(imu[:, 0], t_end, 'right')
idx_end_gt = np.searchsorted(gt[:, 0], t_end, 'right')
# subsample
imu = imu[idx0_imu: idx_end_imu]
gt = gt[idx0_gt: idx_end_gt]
ts = imu[:, 0]/1e9
# interpolate
gt = self.interpolate(gt, gt[:, 0]/1e9, ts)
# take ground truth position
p_gt = gt[:, 1:4]
p_gt = p_gt - p_gt[0]
# take ground true quaternion pose
q_gt = torch.Tensor(gt[:, 4:8]).double()
q_gt = q_gt / q_gt.norm(dim=1, keepdim=True)
Rot_gt = SO3.from_quaternion(q_gt.cuda(), ordering='wxyz').cpu()
# convert from numpy
p_gt = torch.Tensor(p_gt).double()
v_gt = torch.tensor(gt[:, 8:11]).double()
imu = torch.Tensor(imu[:, 1:]).double()
# compute pre-integration factors for all training
mtf = self.min_train_freq
dRot_ij = bmtm(Rot_gt[:-mtf], Rot_gt[mtf:])
dRot_ij = SO3.dnormalize(dRot_ij.cuda())
dxi_ij = SO3.log(dRot_ij).cpu()
# save for all training
mondict = {
'xs': dxi_ij.float(),
'us': imu.float(),
}
pdump(mondict, self.predata_dir, sequence + ".p")
# save ground truth
mondict = {
'ts': ts,
'qs': q_gt.float(),
'vs': v_gt.float(),
'ps': p_gt.float(),
}
pdump(mondict, self.predata_dir, sequence + "_gt.p")
class TUMVIDataset(BaseDataset):
"""
Dataloader for the TUM-VI Data Set.
"""
def __init__(self, data_dir, predata_dir, train_seqs, val_seqs,
test_seqs, mode, N, min_train_freq, max_train_freq, dt=0.005):
super().__init__(predata_dir, train_seqs, val_seqs, test_seqs, mode, N,
min_train_freq, max_train_freq, dt)
# convert raw data to pre loaded data
self.read_data(data_dir)
# noise density
self.imu_std = torch.Tensor([8e-5, 1e-3]).float()
# bias repeatability (without in-run bias stability)
self.imu_b0 = torch.Tensor([1e-3, 1e-3]).float()
def read_data(self, data_dir):
r"""Read the data from the dataset"""
f = os.path.join(self.predata_dir, 'dataset-room1_512_16_gt.p')
if True and os.path.exists(f):
return
print("Start read_data, be patient please")
def set_path(seq):
path_imu = os.path.join(data_dir, seq, "mav0", "imu0", "data.csv")
path_gt = os.path.join(data_dir, seq, "mav0", "mocap0", "data.csv")
return path_imu, path_gt
sequences = os.listdir(data_dir)
# read each sequence
for sequence in sequences:
print("\nSequence name: " + sequence)
if 'room' not in sequence:
continue
path_imu, path_gt = set_path(sequence)
imu = np.genfromtxt(path_imu, delimiter=",", skip_header=1)
gt = np.genfromtxt(path_gt, delimiter=",", skip_header=1)
# time synchronization between IMU and ground truth
t0 = np.max([gt[0, 0], imu[0, 0]])
t_end = np.min([gt[-1, 0], imu[-1, 0]])
# start index
idx0_imu = np.searchsorted(imu[:, 0], t0)
idx0_gt = np.searchsorted(gt[:, 0], t0)
# end index
idx_end_imu = np.searchsorted(imu[:, 0], t_end, 'right')
idx_end_gt = np.searchsorted(gt[:, 0], t_end, 'right')
# subsample
imu = imu[idx0_imu: idx_end_imu]
gt = gt[idx0_gt: idx_end_gt]
ts = imu[:, 0]/1e9
# interpolate
t_gt = gt[:, 0]/1e9
gt = self.interpolate(gt, t_gt, ts)
# take ground truth position
p_gt = gt[:, 1:4]
p_gt = p_gt - p_gt[0]
# take ground true quaternion pose
q_gt = SO3.qnorm(torch.Tensor(gt[:, 4:8]).double())
Rot_gt = SO3.from_quaternion(q_gt.cuda(), ordering='wxyz').cpu()
# convert from numpy
p_gt = torch.Tensor(p_gt).double()
v_gt = torch.zeros_like(p_gt).double()
v_gt[1:] = (p_gt[1:]-p_gt[:-1])/self.dt
imu = torch.Tensor(imu[:, 1:]).double()
# compute pre-integration factors for all training
mtf = self.min_train_freq
dRot_ij = bmtm(Rot_gt[:-mtf], Rot_gt[mtf:])
dRot_ij = SO3.dnormalize(dRot_ij.cuda())
dxi_ij = SO3.log(dRot_ij).cpu()
# masks with 1 when ground truth is available, 0 otherwise
masks = dxi_ij.new_ones(dxi_ij.shape[0])
tmp = np.searchsorted(t_gt, ts[:-mtf])
diff_t = ts[:-mtf] - t_gt[tmp]
masks[np.abs(diff_t) > 0.01] = 0
# save all the sequence
mondict = {
'xs': torch.cat((dxi_ij, masks.unsqueeze(1)), 1).float(),
'us': imu.float(),
}
pdump(mondict, self.predata_dir, sequence + ".p")
# save ground truth
mondict = {
'ts': ts,
'qs': q_gt.float(),
'vs': v_gt.float(),
'ps': p_gt.float(),
}
pdump(mondict, self.predata_dir, sequence + "_gt.p")
|
11561059
|
from pylearning.model.tensorflow_base import tensorflow_base
from pyspark.sql import SparkSession
from pyspark import SparkContext
import tensorflow as tf
from pyspark.sql.functions import col
class train_boston(tensorflow_base):
@staticmethod
def pre_train():
spark_context = SparkContext.getOrCreate()
spark = SparkSession(spark_context).builder.getOrCreate()
df = spark.read.format('csv').option("header","True").load('/train.csv')
cast_df = df.select(*(col(c).cast("double").alias(c) for c in df.columns))
return cast_df
@staticmethod
def train(dataframe, env):
crim = tf.feature_column.numeric_column('crim', dtype=tf.float64, shape=())
zn = tf.feature_column.numeric_column('zn', dtype=tf.float64, shape=())
indus = tf.feature_column.numeric_column('indus', dtype=tf.float64, shape=())
chas = tf.feature_column.numeric_column('chas', dtype=tf.int64, shape=())
nox = tf.feature_column.numeric_column('nox', dtype=tf.float64, shape=())
rm = tf.feature_column.numeric_column('rm', dtype=tf.float64, shape=())
age = tf.feature_column.numeric_column('age', dtype=tf.float64, shape=())
dis = tf.feature_column.numeric_column('dis', dtype=tf.float64, shape=())
rad = tf.feature_column.numeric_column('rad', dtype=tf.int64, shape=())
tax = tf.feature_column.numeric_column('tax', dtype=tf.int64, shape=())
ptratio = tf.feature_column.numeric_column('ptratio', dtype=tf.float64, shape=())
black = tf.feature_column.numeric_column('black', dtype=tf.float64, shape=())
lstat = tf.feature_column.numeric_column('lstat', dtype=tf.float64, shape=())
feature_cols = [crim, zn, indus, chas, nox, rm, age, dis, rad, tax, ptratio, black, lstat]
feature_names = ['ID','crim', 'zn', 'indus', 'chas', 'nox', 'rm', 'age', 'dis', 'rad', 'tax', 'ptratio', 'black',
'lstat']
label_name = 'medv'
dict = {}
index = 0
for i in feature_names:
dict[i] = index
index+=1
def train_input():
feature_dict = {}
for i in feature_names[1:]:
feature_dict[i] = dataframe.get(i)
_dataset = tf.data.Dataset.from_tensor_slices((feature_dict, dataframe.get(label_name)))
dataset = _dataset.batch(32)
return dataset
ps = tf.contrib.distribute.ParameterServerStrategy()
config = tf.estimator.RunConfig(train_distribute=ps, eval_distribute=ps)
estimator = tf.estimator.LinearRegressor(feature_columns=feature_cols, model_dir='hdfs://emr-header-1:9000/boston', config=config)
train_spec = tf.estimator.TrainSpec(input_fn=train_input, max_steps=100)
eval_spec = tf.estimator.EvalSpec(input_fn=train_input, start_delay_secs=0, throttle_secs=10,steps=10)
tf.estimator.train_and_evaluate(estimator, train_spec, eval_spec)
|
11561093
|
import os
import shutil
import tempfile
import unittest
import yaml
from krsh.cmd.group_create.cmd_pipeline import cmd_create_pipeline
class TestCommandCreatePipeline(unittest.TestCase):
def setUp(self):
self.sample_path = os.path.join(os.path.dirname(__file__), "../../samples")
def test_create_pipeline(self):
with tempfile.TemporaryDirectory() as path:
project_path = os.path.join(path, "project")
shutil.copytree(
os.path.join(self.sample_path, "configured-project"), project_path
)
cmd_create_pipeline(project_path, "test-pipeline", "test-ns1,test-ns2")
self.assertTrue(
os.path.exists(
os.path.join(project_path, "pipelines", "test-pipeline"),
)
)
self.assertTrue(
os.path.exists(
os.path.join(
project_path, "pipelines", "test-pipeline", "pipeline.yaml"
)
)
)
self.assertTrue(
os.path.exists(
os.path.join(
project_path, "pipelines", "test-pipeline", "pipeline.py"
)
)
)
with open(
os.path.join(
project_path, "pipelines", "test-pipeline", "pipeline.yaml"
)
) as file:
conf = yaml.safe_load(file)
expected = {
"name": "test-pipeline",
"entry_point": "pipeline.py",
"namespaces": ["test-ns1", "test-ns2"],
}
self.assertEqual(expected, conf)
|
11561107
|
import torch
import torch.nn.functional as F
import numpy as np
import logging
from sklearn.metrics import recall_score
def test(step, dataset_test, filename, unk_class, G, C1, threshold):
G.eval()
C1.eval()
all_pred = []
all_gt = []
entropy_scores = []
prob_scores = np.zeros([dataset_test.dataset.__len__(), unk_class])
for batch_idx, data in enumerate(dataset_test):
with torch.no_grad():
img_t, label_t, index_t = data[0], data[1], data[2]
img_t, label_t = img_t.cuda(), label_t.cuda()
feat = G(img_t)
out_t = C1(feat)
out_t = F.softmax(out_t, dim=1)
prob_scores[index_t, :] = out_t.data.cpu().numpy()
entr = -torch.sum(out_t * torch.log(out_t), 1).data.cpu().numpy()
_, pred = out_t.data.max(1)
pred = pred.cpu().numpy()
all_gt += list(label_t.data.cpu().numpy())
all_pred += list(pred)
entropy_scores += list(entr)
# list to numpy
all_gt_np = np.array(all_gt)
all_pred_np = np.array(all_pred)
entropy_scores_np = np.array(entropy_scores)
y_true = np.array(all_gt)
y_pred = np.array(all_pred)
pred_unk = np.where(entropy_scores_np > threshold)
y_pred[pred_unk[0]] = unk_class
recall_avg_auc = recall_score(y_true, y_pred, labels=np.unique(y_true), average=None)
overall_acc = np.mean(y_true == y_pred)
unk_idx = np.where(y_true == unk_class)
if len(unk_idx[0]) != 0:
correct_unk = np.where(y_pred[unk_idx[0]] == unk_class)
acc_unk = len(correct_unk[0]) / len(unk_idx[0])
# v1 share in overall
shared_idx = np.where(y_true != unk_class)
shared_gt = y_true[shared_idx[0]]
pre_shared = y_pred[shared_idx[0]]
acc_shared = np.mean(shared_gt == pre_shared)
# v2 share in average
acc_shared = recall_avg_auc[:-1].mean()
h_score = 2 * acc_unk * acc_shared / (acc_unk + acc_shared)
output = [step, list(recall_avg_auc),
'AA %s' % float(recall_avg_auc.mean()),
'H-score %s' % float(h_score)]
# output = [step, list(recall_avg_auc), 'my_acc %s' % float(my_acc),
# 'allclass per class mean acc %s' % float(recall_avg_auc.mean()),
# 'inclass per class mean acc %s' % float(recall_avg_auc[:-1].mean()),
# 'overall acc %s' % float(overall_acc), 'unknow acc %s' % float(acc_unk),
# 'shared acc %s' % float(acc_shared), 'H-score %s' % float(h_score), 'my_acc unkown %s' % float(my_acc_unk)]
save_mat = {'gt': all_gt_np,
'pred': all_pred_np,
'entropy_score': entropy_scores_np,
'prob_scores': prob_scores,
'AA': recall_avg_auc.mean(),
'H-score': h_score,
'epoch': step}
else:
output = [step, list(recall_avg_auc), 'AA %s' % float(recall_avg_auc.mean()),
'OA %s' % float(overall_acc)]
save_mat = {'gt': all_gt_np,
'pred': all_pred_np,
'entropy_score': entropy_scores_np,
'prob_scores': prob_scores,
'AA': recall_avg_auc.mean(),
'OA': overall_acc,
'epoch': step}
logger = logging.getLogger(__name__)
logging.basicConfig(filename=filename, format="%(message)s")
logger.setLevel(logging.INFO)
print('\n', output, '\n')
logger.info(output)
return save_mat
|
11561152
|
import json
import pytest
import responses
from satosa.backends.orcid import OrcidBackend
from satosa.context import Context
from satosa.internal import InternalData
from satosa.response import Response
from unittest.mock import Mock
from urllib.parse import urljoin, urlparse, parse_qsl
ORCID_PERSON_ID = "0000-0000-0000-0000"
ORCID_PERSON_GIVEN_NAME = "orcid_given_name"
ORCID_PERSON_FAMILY_NAME = "orcid_family_name"
ORCID_PERSON_NAME = "{} {}".format(
ORCID_PERSON_GIVEN_NAME, ORCID_PERSON_FAMILY_NAME)
ORCID_PERSON_EMAIL = "orcid_email"
ORCID_PERSON_COUNTRY = "XX"
mock_get_state = Mock(return_value="abcdef")
class TestOrcidBackend(object):
@pytest.fixture(autouse=True)
def create_backend(self, internal_attributes, backend_config):
self.orcid_backend = OrcidBackend(
Mock(),
internal_attributes,
backend_config,
backend_config["base_url"],
"orcid"
)
@pytest.fixture
def backend_config(self):
return {
"authz_page": 'orcid/auth/callback',
"base_url": "https://client.example.com",
"client_config": {"client_id": "orcid_client_id"},
"client_secret": "orcid_secret",
"scope": ["/authenticate"],
"response_type": "code",
"server_info": {
"authorization_endpoint": "https://orcid.org/oauth/authorize",
"token_endpoint": "https://pub.orcid.org/oauth/token",
"user_info": "https://pub.orcid.org/v2.0/"
}
}
@pytest.fixture
def internal_attributes(self):
return {
"attributes": {
"address": {"orcid": ["address"]},
"displayname": {"orcid": ["name"]},
"edupersontargetedid": {"orcid": ["orcid"]},
"givenname": {"orcid": ["givenname"]},
"mail": {"orcid": ["mail"]},
"name": {"orcid": ["name"]},
"surname": {"orcid": ["surname"]},
}
}
@pytest.fixture
def userinfo(self):
return {
"name": {
"given-names": {"value": ORCID_PERSON_GIVEN_NAME},
"family-name": {"value": ORCID_PERSON_FAMILY_NAME},
},
"emails": {
"email": [
{
"email": ORCID_PERSON_EMAIL,
"verified": True,
"primary": True
}
]
},
"addresses": {
"address": [
{"country": {"value": ORCID_PERSON_COUNTRY}}
]
}
}
@pytest.fixture
def userinfo_private(self):
return {
"name": {
"given-names": {"value": ORCID_PERSON_GIVEN_NAME},
"family-name": {"value": ORCID_PERSON_FAMILY_NAME},
},
"emails": {
"email": [
]
},
"addresses": {
"address": [
]
}
}
def assert_expected_attributes(self, user_claims, actual_attributes):
print(user_claims)
print(actual_attributes)
expected_attributes = {
"address": [ORCID_PERSON_COUNTRY],
"displayname": [ORCID_PERSON_NAME],
"edupersontargetedid": [ORCID_PERSON_ID],
"givenname": [ORCID_PERSON_GIVEN_NAME],
"mail": [ORCID_PERSON_EMAIL],
"name": [ORCID_PERSON_NAME],
"surname": [ORCID_PERSON_FAMILY_NAME],
}
assert actual_attributes == expected_attributes
def setup_token_endpoint(self, token_endpoint_url):
token_response = {
"access_token": "orcid_access_token",
"token_type": "bearer",
"expires_in": 9999999999999,
"name": ORCID_PERSON_NAME,
"orcid": ORCID_PERSON_ID
}
responses.add(
responses.POST,
token_endpoint_url,
body=json.dumps(token_response),
status=200,
content_type="application/json"
)
def setup_userinfo_endpoint(self, userinfo_endpoint_url, userinfo):
responses.add(
responses.GET,
urljoin(userinfo_endpoint_url,
'{}/person'.format(ORCID_PERSON_ID)),
body=json.dumps(userinfo),
status=200,
content_type="application/json"
)
@pytest.fixture
def incoming_authn_response(self, context, backend_config):
context.path = backend_config["authz_page"]
state_data = dict(state=mock_get_state.return_value)
context.state[self.orcid_backend.name] = state_data
context.request = {
"code": "the_orcid_code",
"state": mock_get_state.return_value
}
return context
def test_start_auth(self, context, backend_config):
auth_response = self.orcid_backend.start_auth(
context, None, mock_get_state)
assert isinstance(auth_response, Response)
login_url = auth_response.message
parsed = urlparse(login_url)
assert login_url.startswith(
backend_config["server_info"]["authorization_endpoint"])
auth_params = dict(parse_qsl(parsed.query))
assert auth_params["scope"] == " ".join(backend_config["scope"])
assert auth_params["response_type"] == backend_config["response_type"]
assert auth_params["client_id"] == backend_config["client_config"]["client_id"]
assert auth_params["redirect_uri"] == "{}/{}".format(
backend_config["base_url"],
backend_config["authz_page"]
)
assert auth_params["state"] == mock_get_state.return_value
@responses.activate
def test_authn_response(self, backend_config, userinfo, incoming_authn_response):
self.setup_token_endpoint(
backend_config["server_info"]["token_endpoint"])
self.setup_userinfo_endpoint(
backend_config["server_info"]["user_info"], userinfo)
self.orcid_backend._authn_response(incoming_authn_response)
args = self.orcid_backend.auth_callback_func.call_args[0]
assert isinstance(args[0], Context)
assert isinstance(args[1], InternalData)
self.assert_expected_attributes(userinfo, args[1].attributes)
@responses.activate
def test_user_information(self, context, backend_config, userinfo):
self.setup_userinfo_endpoint(
backend_config["server_info"]["user_info"],
userinfo
)
user_attributes = self.orcid_backend.user_information(
"orcid_access_token",
ORCID_PERSON_ID,
ORCID_PERSON_NAME
)
assert user_attributes["address"] == ORCID_PERSON_COUNTRY
assert user_attributes["displayname"] == ORCID_PERSON_NAME
assert user_attributes["edupersontargetedid"] == ORCID_PERSON_ID
assert user_attributes["orcid"] == ORCID_PERSON_ID
assert user_attributes["mail"] == ORCID_PERSON_EMAIL
assert user_attributes["givenname"] == ORCID_PERSON_GIVEN_NAME
assert user_attributes["surname"] == ORCID_PERSON_FAMILY_NAME
@responses.activate
def test_user_information_private(self, context, backend_config, userinfo_private):
self.setup_userinfo_endpoint(
backend_config["server_info"]["user_info"],
userinfo_private
)
user_attributes = self.orcid_backend.user_information(
"orcid_access_token",
ORCID_PERSON_ID,
ORCID_PERSON_NAME
)
assert user_attributes["address"] == ""
assert user_attributes["mail"] == ""
|
11561166
|
import argparse
import os.path
import sys
import torch
def get_custom_op_library_path():
if sys.platform.startswith("win32"):
library_filename = "custom_ops.dll"
elif sys.platform.startswith("darwin"):
library_filename = "libcustom_ops.dylib"
else:
library_filename = "libcustom_ops.so"
path = os.path.abspath("build/{}".format(library_filename))
assert os.path.exists(path), path
return path
class Model(torch.jit.ScriptModule):
def __init__(self):
super(Model, self).__init__()
self.p = torch.nn.Parameter(torch.eye(5))
@torch.jit.script_method
def forward(self, input):
return torch.ops.custom.op_with_defaults(input)[0] + 1
def main():
parser = argparse.ArgumentParser(
description="Serialize a script module with custom ops"
)
parser.add_argument("--export-script-module-to", required=True)
options = parser.parse_args()
torch.ops.load_library(get_custom_op_library_path())
model = Model()
model.save(options.export_script_module_to)
if __name__ == "__main__":
main()
|
11561222
|
import os
import argparse
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
import experiment_descriptor as ed
import misc
import util.io
root = misc.get_root()
def get_dist(exp_desc, average):
"""
Get the average distance from observed data in every round.
"""
if average == 'mean':
fname = 'dist_obs'
avg_f = np.mean
elif average == 'median':
fname = 'dist_obs_median'
avg_f = np.median
else:
raise ValueError('unknown average: {0}'.format(average))
res_file = os.path.join(root, 'results', exp_desc.get_dir(), fname)
if os.path.exists(res_file + '.pkl'):
avg_dist = util.io.load(res_file)
else:
exp_dir = os.path.join(root, 'experiments', exp_desc.get_dir(), '0')
_, obs_xs = util.io.load(os.path.join(exp_dir, 'gt'))
results = util.io.load(os.path.join(exp_dir, 'results'))
if isinstance(exp_desc.inf, ed.PostProp_Descriptor):
_, _, _, all_xs = results
elif isinstance(exp_desc.inf, ed.SNPE_MDN_Descriptor):
_, _, all_xs, _ = results
elif isinstance(exp_desc.inf, ed.SNL_Descriptor):
_, all_xs, _ = results
else:
raise TypeError('unsupported experiment descriptor')
avg_dist = []
for xs in all_xs:
dist = np.sqrt(np.sum((xs - obs_xs) ** 2, axis=1))
dist = filter(lambda x: not np.isnan(x), dist)
avg_dist.append(avg_f(dist))
util.io.save(avg_dist, res_file)
return avg_dist
def plot_results(sim_name, average):
"""
Plots all results for a given simulator.
"""
all_dist_ppr = None
all_dist_snp = None
all_dist_snl = None
for exp_desc in ed.parse(util.io.load_txt('exps/{0}_seq.txt'.format(sim_name))):
# Post Prop
if isinstance(exp_desc.inf, ed.PostProp_Descriptor):
all_dist_ppr = get_dist(exp_desc, average)
# SNPE
if isinstance(exp_desc.inf, ed.SNPE_MDN_Descriptor):
all_dist_snp = get_dist(exp_desc, average)
# SNL
if isinstance(exp_desc.inf, ed.SNL_Descriptor):
all_dist_snl = get_dist(exp_desc, average)
matplotlib.rc('text', usetex=True)
matplotlib.rc('font', size=16)
fig, ax = plt.subplots(1, 1)
ax.plot(np.arange(len(all_dist_ppr)) + 1, all_dist_ppr, '>:', color='c', label='SNPE-A')
ax.plot(np.arange(len(all_dist_snp)) + 1, all_dist_snp, 'p:', color='g', label='SNPE-B')
ax.plot(np.arange(len(all_dist_snl)) + 1, all_dist_snl, 'o:', color='r', label='SNL')
ax.set_xlabel('Round')
ax.set_ylabel('{0} distance'.format(average[0].upper() + average[1:]))
ax.set_ylim([0.0, ax.get_ylim()[1]])
ax.legend(fontsize=14)
plt.show()
def main():
parser = argparse.ArgumentParser(description='Plotting distance vs time for the attention-focusing experiments.')
parser.add_argument('sim', type=str, choices=['gauss', 'mg1', 'lv', 'hh'], help='simulator')
parser.add_argument('-a', '--average', type=str, choices=['mean', 'median'], default='median', help='average type')
args = parser.parse_args()
plot_results(args.sim, args.average)
if __name__ == '__main__':
main()
|
11561232
|
import datetime
import io
import json
import logging
import os
import pathlib
import re
import sys
import urllib.parse as urlparse
import uuid
import sarif_om as om
from jschema_to_python.to_json import to_json
from reporter.sarif import render_html
import lib.config as config
import lib.csv_parser as csv_parser
import lib.xml_parser as xml_parser
from lib.context import find_repo_details
from lib.issue import issue_from_dict
LOG = logging.getLogger(__name__)
TS_FORMAT = "%Y-%m-%dT%H:%M:%SZ"
def tweak_severity(tool_name, issue_severity):
"""
Tweak severity for certain tools
:param tool_name:
:param issue_severity:
:return:
"""
if tool_name == "staticcheck":
issue_severity = "MEDIUM"
return issue_severity
def extract_from_file(tool_name, report_file, file_path_list=None):
"""Extract properties from reports
:param tool_name: tool name
:param report_file: Report file
:param file_path_list: Full file path for any manipulation
:return issues, metrics, skips information
"""
issues = []
metrics = None
skips = []
extn = pathlib.PurePosixPath(report_file).suffix
with io.open(report_file, "r") as rfile:
# Static check use jsonlines format, duh
if tool_name == "staticcheck":
contents = rfile.read()
issues = [json.loads(str(item)) for item in contents.strip().split("\n")]
return issues, metrics, skips
if extn == ".json":
try:
report_data = json.loads(rfile.read())
except json.decoder.JSONDecodeError:
return issues, metrics, skips
if isinstance(report_data, list):
issues = report_data
else:
# NodeJsScan uses sec_issues
if "sec_issues" in report_data:
sec_data = report_data["sec_issues"]
for key, value in sec_data.items():
if isinstance(value, list):
issues = issues + value
else:
issues.append(value)
if "Issues" in report_data or "results" in report_data:
for issue in report_data.get(
"Issues", report_data.get("results", [])
):
issues.append(issue)
if extn == ".csv":
headers, issues = csv_parser.get_report_data(rfile)
if extn == ".xml":
issues, metrics = xml_parser.get_report_data(rfile, file_path_list)
return issues, metrics, skips
def convert_file(
tool_name, tool_args, working_dir, report_file, converted_file, file_path_list=None,
):
"""Convert report file
:param tool_name: tool name
:param tool_args: tool args
:param working_dir: Working directory
:param report_file: Report file
:param converted_file: Converted file
:param file_path_list: Full file path for any manipulation
:return serialized_log: SARIF output data
"""
issues, metrics, skips = extract_from_file(tool_name, report_file, file_path_list)
return report(
tool_name,
tool_args,
working_dir,
metrics,
skips,
issues,
converted_file,
file_path_list,
)
def report(
tool_name,
tool_args,
working_dir,
metrics,
skips,
issues,
crep_fname,
file_path_list=None,
):
"""Prints issues in SARIF format
:param tool_name: tool name
:param tool_args: Args used for the tool
:param working_dir: Working directory
:param metrics: metrics data
:param skips: skips data
:param issues: issues data
:param crep_fname: The output file name
:param file_path_list: Full file path for any manipulation
:return serialized_log: SARIF output data
"""
if not tool_args:
tool_args = []
tool_args_str = tool_args
if isinstance(tool_args, list):
tool_args_str = " ".join(tool_args)
repo_details = find_repo_details(working_dir)
log_uuid = str(uuid.uuid4())
run_uuid = config.get("run_uuid")
# Populate metrics
metrics = {
"total": 0,
"critical": 0,
"high": 0,
"medium": 0,
"low": 0,
}
metrics["total"] = len(issues)
for issue in issues:
issue_dict = issue_from_dict(issue).as_dict()
issue_severity = issue_dict["issue_severity"]
# Fix up severity for certain tools
issue_severity = tweak_severity(tool_name, issue_severity)
key = issue_severity.lower()
if not metrics.get(key):
metrics[key] = 0
metrics[key] += 1
# working directory to use in the log
WORKSPACE_PREFIX = config.get("WORKSPACE", None)
wd_dir_log = WORKSPACE_PREFIX if WORKSPACE_PREFIX else working_dir
# Construct SARIF log
log = om.SarifLog(
schema_uri="https://raw.githubusercontent.com/oasis-tcs/sarif-spec/master/Schemata/sarif-schema-2.1.0.json",
version="2.1.0",
inline_external_properties=[
om.ExternalProperties(guid=log_uuid, run_guid=run_uuid)
],
runs=[
om.Run(
automation_details=om.RunAutomationDetails(
guid=log_uuid,
description=om.Message(
text="Static Analysis Security Test results using @AppThreat/sast-scan"
),
),
tool=om.Tool(
driver=om.ToolComponent(
name=config.tool_purpose_message.get(tool_name, tool_name)
)
),
invocations=[
om.Invocation(
end_time_utc=datetime.datetime.utcnow().strftime(TS_FORMAT),
execution_successful=True,
working_directory=om.ArtifactLocation(uri=to_uri(wd_dir_log)),
)
],
conversion={
"tool": om.Tool(
driver=om.ToolComponent(name="@AppThreat/sast-scan")
),
"invocation": om.Invocation(
execution_successful=True,
command_line=tool_args_str,
arguments=tool_args,
working_directory=om.ArtifactLocation(uri=to_uri(wd_dir_log)),
end_time_utc=datetime.datetime.utcnow().strftime(TS_FORMAT),
),
},
properties={"metrics": metrics},
version_control_provenance=[
om.VersionControlDetails(
repository_uri=repo_details["repositoryUri"],
branch=repo_details["branch"],
revision_id=repo_details["revisionId"],
)
],
)
],
)
run = log.runs[0]
invocation = run.invocations[0]
add_skipped_file_notifications(skips, invocation)
add_results(tool_name, issues, run, file_path_list, working_dir)
serialized_log = to_json(log)
if crep_fname:
html_file = crep_fname.replace(".sarif", ".html")
with io.open(crep_fname, "w") as fileobj:
fileobj.write(serialized_log)
render_html(json.loads(serialized_log), html_file)
if fileobj.name != sys.stdout.name:
LOG.debug(
"SARIF and HTML report written to file: %s, %s 👍",
fileobj.name,
html_file,
)
return serialized_log
def add_skipped_file_notifications(skips, invocation):
"""Method to add skipped files details to the output
:param skips: List of files skipped by the tool
:param invocation: Invocation object for the given run
"""
if skips is None or len(skips) == 0:
return
if invocation.tool_configuration_notifications is None:
invocation.tool_configuration_notifications = []
for skip in skips:
(file_name, reason) = skip
notification = om.Notification(
level="error",
message=om.Message(text=reason),
locations=[
om.Location(
physical_location=om.PhysicalLocation(
artifact_location=om.ArtifactLocation(uri=to_uri(file_name))
)
)
],
)
invocation.tool_configuration_notifications.append(notification)
def add_results(tool_name, issues, run, file_path_list=None, working_dir=None):
"""Method to convert issues into results schema
:param tool_name: tool name
:param issues: Issues found
:param run: Run object
:param file_path_list: Full file path for any manipulation
:param working_dir: Working directory
"""
if run.results is None:
run.results = []
rules = {}
rule_indices = {}
for issue in issues:
result = create_result(
tool_name, issue, rules, rule_indices, file_path_list, working_dir
)
run.results.append(result)
if len(rules) > 0:
run.tool.driver.rules = list(rules.values())
def create_result(
tool_name, issue, rules, rule_indices, file_path_list=None, working_dir=None
):
"""Method to convert a single issue into result schema with rules
:param tool_name: tool name
:param issue: Issues object
:param rules: List of rules
:param rule_indices: Indices of referred rules
:param file_path_list: Full file path for any manipulation
:param working_dir: Working directory
"""
WORKSPACE_PREFIX = config.get("WORKSPACE", None)
if isinstance(issue, dict):
issue = issue_from_dict(issue)
issue_dict = issue.as_dict()
rule, rule_index = create_or_find_rule(tool_name, issue_dict, rules, rule_indices)
# Substitute workspace prefix
# Override file path prefix with workspace
filename = issue_dict["filename"]
if working_dir:
# Issue 5 fix. Convert relative to full path automatically
if not filename.startswith(working_dir):
filename = os.path.join(working_dir, filename)
if WORKSPACE_PREFIX:
filename = re.sub(r"^" + working_dir, WORKSPACE_PREFIX, filename)
physical_location = om.PhysicalLocation(
artifact_location=om.ArtifactLocation(uri=to_uri(filename))
)
add_region_and_context_region(
physical_location, issue_dict["line_number"], issue_dict["code"]
)
issue_severity = issue_dict["issue_severity"]
issue_severity = tweak_severity(tool_name, issue_severity)
return om.Result(
rule_id=rule.id,
rule_index=rule_index,
message=om.Message(text=issue_dict["issue_text"]),
level=level_from_severity(issue_severity),
locations=[om.Location(physical_location=physical_location)],
properties={
"issue_confidence": issue_dict["issue_confidence"],
"issue_severity": issue_severity,
},
hosted_viewer_uri=config.get("hosted_viewer_uri", ""),
)
def level_from_severity(severity):
"""Converts tool's severity to the 4 level
suggested by SARIF
"""
if severity == "CRITICAL":
return "error"
elif severity == "HIGH":
return "error"
elif severity == "MEDIUM":
return "warning"
elif severity == "LOW":
return "note"
else:
return "warning"
def add_region_and_context_region(physical_location, line_number, code):
"""This adds the region information for displaying the code snippet
:param physical_location: Points to file
:param line_number: Line number suggested by the tool
:param code: Source code snippet
"""
first_line_number, snippet_lines = parse_code(code)
end_line_number = first_line_number + len(snippet_lines) - 1
if end_line_number < first_line_number:
end_line_number = first_line_number + 3
index = line_number - first_line_number
snippet_line = ""
if len(snippet_lines) > index and index > 0:
snippet_line = snippet_lines[index]
physical_location.region = om.Region(
start_line=line_number, snippet=om.ArtifactContent(text=snippet_line)
)
physical_location.context_region = om.Region(
start_line=first_line_number,
end_line=end_line_number,
snippet=om.ArtifactContent(text="".join(snippet_lines)),
)
def parse_code(code):
"""Method to parse the code to extract line number and snippets
"""
code_lines = code.split("\n")
# The last line from the split has nothing in it; it's an artifact of the
# last "real" line ending in a newline. Unless, of course, it doesn't:
last_line = code_lines[len(code_lines) - 1]
last_real_line_ends_in_newline = False
if len(last_line) == 0:
code_lines.pop()
last_real_line_ends_in_newline = True
snippet_lines = []
first = True
first_line_number = 1
for code_line in code_lines:
number_and_snippet_line = code_line.split(" ", 1)
if first:
first_line_number = int(number_and_snippet_line[0])
first = False
snippet_line = number_and_snippet_line[1] + "\n"
snippet_lines.append(snippet_line)
if not last_real_line_ends_in_newline:
last_line = snippet_lines[len(snippet_lines) - 1]
snippet_lines[len(snippet_lines) - 1] = last_line[: len(last_line) - 1]
return first_line_number, snippet_lines
def get_url(tool_name, rule_id, test_name, issue_dict):
# Return stackoverflow url for now
# FIXME: The world needs an opensource SAST issue database!
if issue_dict.get("test_ref_url"):
return issue_dict.get("test_ref_url")
if config.tool_ref_url.get(tool_name):
return config.tool_ref_url.get(tool_name) % dict(
rule_id=rule_id, tool_name=tool_name, test_name=test_name
)
if rule_id and rule_id.startswith("CWE"):
return "https://cwe.mitre.org/data/definitions/%s.html" % rule_id.replace(
"CWE-", ""
)
return "https://stackoverflow.com/search?q=appthreat/sast-scan+{}+{}+{}".format(
tool_name, rule_id, test_name
)
def create_or_find_rule(tool_name, issue_dict, rules, rule_indices):
"""Creates rules object for the rules section. Different tools make up
their own id and names so this is identified on the fly
:param tool_name: tool name
:param issue_dict: Issue object that is normalized and converted
:param rules: List of rules identified so far
:param rule_indices: Rule indices cache
:return rule and index
"""
rule_id = issue_dict["test_id"]
if rule_id in rules:
return rules[rule_id], rule_indices[rule_id]
rule = om.ReportingDescriptor(
id=rule_id,
name=issue_dict["test_name"],
help_uri=get_url(tool_name, rule_id, issue_dict["test_name"], issue_dict),
)
index = len(rules)
rules[rule_id] = rule
rule_indices[rule_id] = index
return rule, index
def to_uri(file_path):
"""Converts to file path to uri prefixed with file://
:param file_path: File path to convert
"""
if file_path.startswith("http"):
return file_path
pure_path = pathlib.PurePath(file_path)
if pure_path.is_absolute():
return pure_path.as_uri()
else:
posix_path = pure_path.as_posix() # Replace backslashes with slashes.
return urlparse.quote(posix_path) # %-encode special characters.
|
11561240
|
import logging
import os
import sys
# pylint: disable=wrong-import-position
sys.path.append(os.path.join(os.path.dirname(__file__), ".."))
from deep_qa import run_model_from_file, evaluate_model
from deep_qa.common.checks import ensure_pythonhashseed_set
logger = logging.getLogger(__name__) # pylint: disable=invalid-name
def main():
usage = 'USAGE: run_model.py [param_file] [train|test]'
if len(sys.argv) == 2:
run_model_from_file(sys.argv[1])
elif len(sys.argv) == 3:
mode = sys.argv[2]
if mode == 'train':
run_model_from_file(sys.argv[1])
elif mode == 'test':
evaluate_model(sys.argv[1])
else:
print(usage)
sys.exit(-1)
else:
print(usage)
sys.exit(-1)
if __name__ == "__main__":
ensure_pythonhashseed_set()
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',
level=logging.INFO)
main()
|
11561275
|
from flask import Flask
app = Flask(__name__)
# root or index
@app.route('/')
def index():
return "You have made it to the Index of my machine!"
# sub directories
@app.route("/hello")
def hello():
return "Hello there!"
@app.route("/idemo/<int:ivar>")
@app.route("/demo/<ivar>")
def showtype(ivar):
print(type(ivar))
t = str(type(ivar)).split("'")[1]
svar = str(ivar)
print(svar + " is a " + t)
return svar + " is a " + t
@app.route("/members")
def members():
return "We have many members please supply a name"
# Listing variables to pass to function
@app.route("/members/<string:name>")
def getMember(name):
return "I think " + name + " is a member, can you give me his last name and id number?"
@app.route("/members/<string:firstname>/<string:lastname>/<int:id>")
def getMemberID(firstname,lastname,id):
return f'Member {id}: {lastname}, {firstname}....... Ahh yes they are a member'
if __name__ == "__main__":
app.run(debug = True)
|
11561284
|
import os
from pathlib import Path
DEFAULT_ROOT_PATH = Path(os.path.expanduser(os.getenv("MOGUA_ROOT", "~/.mogua/mainnet"))).resolve()
|
11561287
|
from functools import wraps
from flask import session
from .errors import AuthError
def require_session(f):
@wraps(f)
def inner(*args, **kargs):
user_id = session.get('user_id')
if user_id is None:
raise AuthError()
return f(*args, user_id=user_id, **kargs)
return inner
|
11561334
|
try:
import ujson
def dump_json(x):
return ujson.dumps(x, ensure_ascii=False, escape_forward_slashes=False)
except ImportError:
import json
def dump_json(x):
return json.dumps(x, ensure_ascii=False)
|
11561366
|
import wx
from wx import glcanvas
import sys
import math
import cad
import Mouse
import Key
from RefreshObserver import RefreshObserver
import copy
graphics_canvases = []
repaint_registered = False
def OnRepaint():
global graphics_canvases
for g in graphics_canvases:
g.Refresh()
class GraphicsCanvas(glcanvas.GLCanvas):
def __init__(self, parent):
glcanvas.GLCanvas.__init__(self, parent,-1, attribList=[glcanvas.WX_GL_RGBA, glcanvas.WX_GL_DOUBLEBUFFER, glcanvas.WX_GL_DEPTH_SIZE, 24])
self.context = glcanvas.GLContext(self)
self.Bind(wx.EVT_PAINT, self.OnPaint)
self.Bind(wx.EVT_SIZE, self.OnSize)
self.Bind(wx.EVT_MOUSE_EVENTS, self.OnMouse)
self.Bind(wx.EVT_ERASE_BACKGROUND, self.OnEraseBackground)
self.Bind(wx.EVT_MENU, self.OnMenu, None, 10000, 12000)
self.Bind(wx.EVT_KEY_DOWN, self.OnKeyDown)
self.Bind(wx.EVT_KEY_UP, self.OnKeyUp)
self.viewport= cad.Viewport()
self.Resize()
self.paint_callbacks = []
self.context_menu_enabled = True
self.middle_down = False
self.observer = RefreshObserver(self)
cad.RegisterObserver(self.observer)
global repaint_registered
if not repaint_registered:
cad.RegisterOnRepaint(OnRepaint)
repaint_registered = True
graphics_canvases.append(self)
self.right_down_and_no_left_clicked = False
def OnSize(self, event):
self.Resize()
event.Skip()
def OnMenu(self, event):
index = event.GetId() - 10000
tool = self.tools[index]
tool.Run()
def OnKeyDown(self, event):
k = event.GetKeyCode()
if k == wx.WXK_ESCAPE and wx.GetApp().frame.IsFullScreen():
wx.GetApp().ShowFullScreen(False)
else:
key_code = Key.KeyCodeFromWx(event)
if not cad.GetInputMode().OnKeyDown(key_code):
wx.GetApp().OnKeyDown(event)
def OnKeyUp(self, event):
key_code = Key.KeyCodeFromWx(event)
cad.GetInputMode().OnKeyUp(key_code)
def AppendToolsToMenu(self, menu, tools):
for tool in tools:
if tool.IsSeparator():
menu.AppendSeparator()
elif tool.IsAToolList():
sub_menu = wx.Menu()
self.AppendToolsToMenu(sub_menu, tool.GetChildTools())
menu.AppendMenu(wx.ID_ANY, tool.GetTitle(), sub_menu)
else:
item = wx.MenuItem(menu, 10000 + self.next_tool_id, text = tool.GetTitle(), help = tool.GetToolTip())
str = tool.BitmapPath()
if len(str)>0:
try:
image = wx.Image(res_folder + '/bitmaps/' + str + '.png')
image.Rescale(24, 24)
item.SetBitmap(wx.BitmapFromImage(image))
except:
pass
menu.AppendItem(item)
self.next_tool_id = self.next_tool_id + 1
self.tools.append(tool)
def OnMouse(self, event):
self.SetCurrent(self.context)
e = Mouse.MouseEventFromWx(event)
if event.RightDown():
self.right_down_and_no_left_clicked = True
if event.LeftIsDown():
self.right_down_and_no_left_clicked = False
if event.RightUp() and self.right_down_and_no_left_clicked:
wx.GetApp().DoDropDownMenu(self, event.GetX(), event.GetY(), event.ControlDown())
self.right_down_and_no_left_clicked = False
else:
self.viewport.OnMouseEvent(e)
if self.viewport.need_update: self.Update()
if self.viewport.need_refresh: self.Refresh()
if event.LeftUp():
if cad.GetInputMode().GetType() == cad.GetDigitizing().GetType():
if cad.GetDigitizing().wants_to_exit_main_loop:
wx.GetApp().ExitMainLoop()
event.Skip()
def OnEraseBackground(self, event):
pass # Do nothing, to avoid flashing on MSW
def Resize(self):
s = self.GetClientSize()
self.viewport.WidthAndHeightChanged(s.GetWidth(), s.GetHeight())
self.Refresh()
def OnPaint(self, event):
dc = wx.PaintDC(self)
self.SetCurrent(self.context)
self.viewport.glCommands()
for callback in self.paint_callbacks:
callback()
self.SwapBuffers()
self.viewport.render_on_front_done = False
self.viewport.DrawFront()
return
|
11561373
|
from IPython.core.magic import (Magics, magics_class, line_magic,
cell_magic, line_cell_magic)
from sqlalchemy import create_engine
from sqlalchemy.ext.automap import automap_base
from sqlalchemy.orm import Session, sessionmaker
from sqlalchemy import create_engine
from sqlalchemy import desc, asc
from sqlalchemy.engine.base import Engine
from sqlalchemy import sql
import pandas as pd
import pickle
import threading
################# SQLCell modules #################
from sqlcell.db import EngineHandler, DBSessionHandler
from sqlcell.args import ArgHandler
from sqlcell.hooks import HookHandler
from sqlcell._initdb import run
@magics_class
class SQLCell(Magics, EngineHandler):
current_engine = False
current_hook_engine = False
modes = ['query', 'hook', 'refresh']
# consider yaml file for these types of params:
hook_indicator = '~'
def __init__(self, shell, data):
# You must call the parent constructor
super(SQLCell, self).__init__(shell)
self.shell = shell
self.data = data
self.ipy = get_ipython()
self.refresh_options = ['hooks', 'engines']
self.line_args = None
def register_line_vars(self, line):
"""options: engine, var, bg"""
mode = self.get_mode(line)
if line.strip() and mode == 'query':
line = line.split(' ')
line_vars = {}
for var in line:
key,value = var.split('=')
line_vars[key] = value
self.line_vars = line_vars
return line_vars
return {}
def push_var(self, obj):
if self.line_args.var:
self.ipy.push({self.line_args.var: obj})
def async_handler(self, obj):
self.push_var(obj)
return obj
def run_query(self, engine, query_params, var=None, callback=None, **kwargs):
results = pd.DataFrame([dict(row) for row in engine.execute(*query_params)])
return callback(results)
def query_router(self, *args):
if self.line_args.background:
processThread = threading.Thread(target=self.run_query, args=args)
processThread.start()
return None
return self.run_query(*args)
def get_mode(self, line):
line = [l.split('=') for l in line.split('=')]
if len(line) == 0:
if line in SQLCell.modes: return line
else: raise Exception('Invalid mode, please review docs')
return 'query'
def get_bind_params(self, params, ipython):
return {key:getattr(ipython.user_module, key) for key in params.keys()}
def get_sql_statement(self, cell):
text = sql.text(cell)
params = text.compile().params
bind_params = self.get_bind_params(params, self.ipy)
return (text, bind_params)
@line_cell_magic
def sql(self, line: str="", cell: str="") -> None:
line = line.strip()
cell = cell.strip()
line_args = ArgHandler(line).args
container_var = line_args.var
engine_var = line_args.engine
background = line_args.background
hook = line_args.hook
refresh = line_args.refresh
add_engines = line_args.engines
# refer to all args as self.line_args.<arg> to get rid of entire block ^?
self.line_args = line_args
############################ Refresh logic ##########################
if refresh and cell in self.refresh_options:
if cell in self.tables:
self.session.query(getattr(self.classes, cell)).delete()
self.session.commit()
return ('Removed all records from ' + cell)
############################ End Refresh logic ######################
############################ Engine Aliases Logic ###################
if self.line_args.engines:
if cell == 'list':
return self.list()
else:
self.add_alias(cell)
# need to reinit db_info to update new engines added
self.db_info = SQLCell(self.shell, self.data).db_info
return ('Engines successfully registered')
############################ End Engine Aliases #####################
# need engine below but not in refresh or alias logic
engine = self.get_engine(engine_var, session_engine=SQLCell.current_engine)
########################## HookHandler logic ########################
hook_handler = HookHandler(engine)
if hook:
if cell == 'list':
return hook_handler.list()
hook_handler.add(line, cell)
return ('Hook successfully registered')
if cell.startswith(self.hook_indicator):
# run returns `engine, cmd`, consider renaming
engine, cell = hook_handler.run(cell, engine_var)
SQLCell.current_hook_engine = hook_handler.hook_engine
########################## End HookHandler logic ####################
sql_statemnent_params = self.get_sql_statement(cell)
results = self.query_router(engine, sql_statemnent_params, self.line_args.var, self.async_handler)
# self.push_var(results)
engine.pool.dispose()
# reinitialize to update db_info, find better way
self.db_info = SQLCell(self.shell, self.data).db_info
SQLCell.current_engine = engine
return results
def load_ipython_extension(ipython):
run()
magics = SQLCell(ipython, [])
ipython.register_magics(magics)
|
11561385
|
from enum import Enum, unique
__all__ = [
'ImportStatus', 'STATUS_MESSAGE', 'get_row_error_message', 'Error', 'IMPORT_RENAME_ZH',
'IMPORT_ERROR_RENAME', 'EXPORT_RENAME_ZH'
]
@unique
class ImportStatus(Enum):
UPLOADED = 4001
READING = 4002
VALIDATING = 4003
IMPORTING = 4004
COMPLETED = 4005
TEMPLATE_ERROR = 4006
ABNORMAL = 4007
LIMITED = 4008
FAILED = 4009
STATUS_MESSAGE = {
ImportStatus.UPLOADED: 'File upload completed',
ImportStatus.READING: 'Reading file content',
ImportStatus.VALIDATING: 'Validating data',
ImportStatus.IMPORTING: 'Importing data',
ImportStatus.COMPLETED: 'Import completed',
ImportStatus.TEMPLATE_ERROR: 'Please follow the template to fill in the import data',
ImportStatus.ABNORMAL: 'Abnormal data detected,import stop',
ImportStatus.LIMITED: 'The number of imported devices exceeds the limit of devices',
ImportStatus.FAILED: 'Data import failed, please check the data or try again later',
}
class Error(Enum):
FORMAT_ERROR = 5001
DEVICE_NAME_DUPLICATE = 5002
DEVICE_ID_DUPLICATE = 5003
DEVICE_ID_EXIST = 5004
DEVICE_USERNAME_DUPLICATE = 5005
PRODUCT_NOT_EXIST = 5006
INDEX_REQUIRED = 5007
INDEX_NOT_REQUIRED = 5008
INDEX_INVALID = 5009
GATEWAY_NOT_REQUIRED = 5010
GATEWAY_REQUIRED = 5011
GATEWAY_NOT_EXIST = 5012
IMEI_REQUIRED = 5013
IMEI_DUPLICATE = 5014
IMEI_EXIST = 5015
ROW_ERROR_MESSAGE_ZH = {
Error.FORMAT_ERROR: '格式错误, 请按填写说明填写',
Error.DEVICE_NAME_DUPLICATE: '设备名重复(%s)',
Error.DEVICE_ID_DUPLICATE: '设备编号重复(%s)',
Error.DEVICE_ID_EXIST: '设备编号已存在(%s)',
Error.DEVICE_USERNAME_DUPLICATE: '设备用户名重复(%s)',
Error.PRODUCT_NOT_EXIST: '产品不存在(%s)',
Error.INDEX_REQUIRED: 'Modbus协议产品必须填写索引',
Error.INDEX_NOT_REQUIRED: '非Modbus协议产品不填写索引(%d)',
Error.INDEX_INVALID: '索引必须是0~255之间的数字(%d)',
Error.GATEWAY_NOT_REQUIRED: '所属网关不填写(上联系统为云时,不填)',
Error.GATEWAY_REQUIRED: '所属网关未填写(上联系统为网关时,必填)',
Error.GATEWAY_NOT_EXIST: '所属网关不存在(%s)',
Error.IMEI_REQUIRED: 'LwM2M协议产品必须填写IMEI',
Error.IMEI_DUPLICATE: 'IMEI已存在(%s)',
Error.IMEI_EXIST: 'IMEI重复(%s)',
}
ROW_ERROR_MESSAGE_EN = {
Error.FORMAT_ERROR: 'Format error',
Error.DEVICE_NAME_DUPLICATE: 'Device name duplicate(%s)',
Error.DEVICE_ID_DUPLICATE: 'Device id duplicate(%s)',
Error.DEVICE_ID_EXIST: 'Device id already exists(%s)',
Error.DEVICE_USERNAME_DUPLICATE: 'Device username duplicate(%s)',
Error.PRODUCT_NOT_EXIST: 'Product does not exist(%s)',
Error.INDEX_REQUIRED: 'Modbus index is required for Modbus protocol',
Error.INDEX_NOT_REQUIRED: 'Modbus index is not required for non-Modbus protocol(%d)',
Error.INDEX_INVALID: 'The index must be a number between 0 and 255(%d)',
Error.GATEWAY_NOT_REQUIRED: 'Gateway is not required',
Error.GATEWAY_REQUIRED: 'Gateway is required',
Error.GATEWAY_NOT_EXIST: 'Gateway does not exist(%s)',
Error.IMEI_REQUIRED: 'IMEI is required for LwM2M protocol',
Error.IMEI_DUPLICATE: 'IMEI already exist(%s)',
Error.IMEI_EXIST: 'IMEI duplicate(%s)',
}
def get_row_error_message(error: Error, language: str):
message = ''
if language == 'en':
message = ROW_ERROR_MESSAGE_EN.get(error)
elif language == 'zh':
message = ROW_ERROR_MESSAGE_ZH.get(error)
return message
IMPORT_RENAME_ZH = {
'设备名称': 'deviceName',
'认证类型': 'authType',
'所属产品': 'product',
'上联系统': 'upLinkSystem',
'所属网关': 'gateway',
'设备编号': 'deviceID',
'设备用户名': 'deviceUsername',
'设备秘钥': 'token',
'mac': 'mac',
'经度': 'longitude',
'纬度': 'latitude',
'安装位置': 'location',
'软件版本': 'softVersion',
'硬件版本': 'hardwareVersion',
'制造商': 'manufacturer',
'序列号': 'serialNumber',
'描述': 'description'
}
IMPORT_ERROR_RENAME = {
'deviceName': '设备名称',
'authType': '认证类型',
'product': '所属产品',
'upLinkSystem': '上联系统',
'gateway': '所属网关',
'deviceID': '设备编号',
'deviceUsername': '设备用户名',
'token': '设备秘钥',
'mac': 'mac',
'longitude': '经度',
'latitude': '纬度',
'location': '安装位置',
'softVersion': '软件版本',
'hardwareVersion': '硬件版本',
'manufacturer': '制造商',
'serialNumber': '序列号',
'description': '描述'
}
EXPORT_RENAME_ZH = {
'createAt': '创建时间',
'deviceName': '设备名称',
'deviceType': '设备类型',
'authType': '认证类型',
'product': '所属产品',
'cloudProtocol': '云端协议',
'upLinkNetwork': '上联网络',
'upLinkSystem': '上联系统',
'deviceID': '设备编号',
'deviceUsername': '设备用户名',
'token': '设备秘钥',
'mac': 'mac',
'longitude': '经度',
'latitude': '纬度',
'location': '安装位置',
'softVersion': '软件版本',
'hardwareVersion': '硬件版本',
'manufacturer': '制造商',
'serialNumber': '序列号',
'description': '描述',
'createUser': '创建人'
}
|
11561412
|
from .common import *
import os
PROJECT_ROOT = os.path.dirname(os.path.abspath(__file__))
STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')
SECURE_SSL_REDIRECT = config('SSL', default=True, cast=bool)
DEBUG_PROPAGATE_EXCEPTIONS = True
|
11561455
|
from django.contrib import admin
from .models import DepTour
class ToursAdmin(admin.ModelAdmin):
fields = [
"name",
"confirmed",
"datetime",
"email",
"phone",
"comments",
"deprel_comments",
]
readonly_fields = ("name", "email", "phone", "comments")
list_display = (
"name",
"confirmed",
"email",
"datetime",
"date_submitted",
"phone",
"comments",
"deprel_comments",
)
admin.site.register(DepTour, ToursAdmin)
|
11561459
|
import functools
import os
import time
import numpy as np
from tqdm import tqdm
from .custom import CustomDataset
from .registry import DATASETS
from .utils import (
OpenImagesDetectionChallengeEvaluator,
get_categories,
read_dets,
read_gts,
)
@DATASETS.register_module
class OpenImagesDataset(CustomDataset):
def load_annotations(self, ann_file):
print("load annotation begin", flush=True)
img_infos = []
with open(ann_file) as f:
lines = f.readlines()
i = 0
while i < len(lines):
img_gt = []
labels = []
img_name = lines[i].rstrip()
i += 2
img_gt_size = int(lines[i])
i += 1
for j in range(img_gt_size):
sp = lines[i + j].split()
img_gt.append([float(sp[1]), float(sp[2]), float(sp[3]), float(sp[4])])
labels.append(int(sp[0]))
i += img_gt_size
img_infos.append([img_name, np.array(img_gt), np.array(labels)])
print("load annotation end", flush=True)
return img_infos
def prepare_train_img(self, idx):
info = self.img_infos[idx]
results = dict(
img_info=dict(filename=info[0]),
ann_info=dict(bboxes=info[1], labels=info[2]),
)
if self.proposals is not None:
results["proposals"] = self.proposals[idx]
self.pre_pipeline(results)
return self.pipeline(results)
def prepare_test_img(self, idx):
info = self.img_infos[idx]
results = dict(img_info=dict(filename=info[0]))
if self.proposals is not None:
results["proposals"] = self.proposals[idx]
self.pre_pipeline(results)
return self.pipeline(results)
def format_results(self, outputs, metas, file):
assert len(outputs) == len(metas)
bbox_list = []
for bboxes, meta in tqdm(zip(outputs, metas)):
mt = meta[0].data[0][0]
h, w = mt["ori_shape"][:2]
filename = mt["filename"][:-4].split("/")[-1]
valid_classes = np.where(
np.array([[bbox.shape[0]] for bbox in bboxes]) != 0
)[0]
for valid_class in valid_classes:
class_bboxes = bboxes[valid_class]
class_bboxes[:, 0] /= w
class_bboxes[:, 1] /= h
class_bboxes[:, 2] /= w
class_bboxes[:, 3] /= h
bbox_num = class_bboxes.shape[0]
for i in range(bbox_num):
box = [filename] + list(class_bboxes[i]) + [valid_class + 1]
bbox_list.append(box)
def cmp(x, y):
if (x[0] < y[0]) or (x[0] == y[0] and x[5] > y[5]):
return -1
else:
return 1
bbox_list = sorted(bbox_list, key=functools.cmp_to_key(cmp))
f = open(file, "w+")
for bbox in bbox_list:
f.write("{}\n".format(" ".join(map(str, list(bbox)))))
f.flush()
def evaluate(self, label_dir, det_file):
cat_file = os.path.join(label_dir, "cls-label-description.csv")
categories = get_categories(cat_file)
evaluator = OpenImagesDetectionChallengeEvaluator(
categories, group_of_weight=1.0
)
gts = read_gts(label_dir)
st = time.time()
count = 0
for im, gt in gts.items():
evaluator.add_single_ground_truth_image_info(
image_id=im, groundtruth_dict=gt
)
ed = time.time()
print("\tGts added, using: {:.2f} s, flush=True".format(ed - st))
dets = read_dets(det_file, label_dir)
st = time.time()
count = 0
for im, det in dets.items():
evaluator.add_single_detected_image_info(image_id=im, detections_dict=det)
count += 1
if (count + 1) % 1000 == 0:
print(
"\t{}/{} done using {:.2f} s".format(
count + 1, len(dets), (time.time() - st)
),
flush=True,
)
ed = time.time()
print("\tDets evaluated, using: {:.2f} mins".format((ed - st) / 60), flush=True)
print("\n\taccumulating...", flush=True)
st = time.time()
metrics = evaluator.evaluate()
ed = time.time()
print("\ttime used: {:.2f} s".format((ed - st)), flush=True)
print(metrics)
return metrics
|
11561476
|
def split_the_bill(x):
people = total = 0
for k, v in x.iteritems():
people += 1
total += v
average = total / float(people)
return {k: round(v - average, 2) for k, v in x.iteritems()}
|
11561484
|
import torch
import numpy as np
class MCEM:
def __init__(self, Y, model, device, niter_MCEM=100, niter_MH=40,
burnin=30, var_MH=0.01, NMF_rank=8):
self.device = device
self.model = model
self.Y = Y
self.F, self.T = self.Y.shape
self.X =torch.from_numpy((np.abs(Y)**2).astype(np.float32)).to(device)
# initialize NMF parameters
np.random.seed(0)
eps = torch.tensor(np.finfo(float).eps, device=self.device)
self.K = NMF_rank
self.W = torch.max(torch.rand(self.F,self.K, device=self.device), eps)
self.H = torch.max(torch.rand(self.K,self.T, device=self.device), eps)
self.g = torch.ones((1, self.T)).to(self.device)
self.Z = model.encode(self.X)
self.D = self.Z.shape[0]
self.V_s = model.decode(self.Z) * self.g
self.V_n = self.W @ self.H
self.niter_MH = niter_MH
self.niter_MCEM = niter_MCEM
self.burnin = burnin
self.var_MH = torch.tensor(var_MH)
def metropolis_hastings(self, niter_MH, burnin):
Z_sampled = torch.zeros((self.D, self.T, niter_MH - burnin),
device=self.device)
for i in range(-burnin, niter_MH - burnin):
Z_new = self.Z + \
self.var_MH*torch.randn(self.D, self.T, device=self.device)
V_s_new = self.model.decode(Z_new)*self.g
acc_prob = (torch.sum(torch.log(self.V_n + self.V_s)
- torch.log(self.V_n + V_s_new) + (1/(self.V_n + self.V_s)
- 1/(self.V_n + V_s_new)) * self.X, axis=0)
+ .5*torch.sum(self.Z.pow(2) - Z_new.pow(2), axis=0))
idx = torch.log(torch.rand(self.T, device=self.device)) < acc_prob
self.Z[:,idx] = Z_new[:,idx]
self.V_s = self.model.decode(self.Z)*self.g
if i >= 0: Z_sampled[:,:,i] = self.Z
return Z_sampled
def run(self, tol=1e-4):
cost_after_M_step = np.zeros(self.niter_MCEM)
for n in range(self.niter_MCEM):
Z_sampled = self.metropolis_hastings(self.niter_MH, self.burnin)
V_s_sampled = torch.zeros((self.F, self.T,
self.niter_MH - self.burnin), device=self.device)
for i in range(self.niter_MH - self.burnin):
V_s_sampled[:,:,i] = self.model.decode(Z_sampled[:,:,i])
V_x = self.V_n[:,:,None] + V_s_sampled*self.g[:,:,None]
# Udpade W
self.W = self.W*(((self.X*torch.sum(V_x.pow(-2), axis=-1))
@ self.H.T)/(torch.sum(V_x.pow(-1), axis=-1)
@ self.H.T)).pow(0.5)
self.V_n = self.W @ self.H
V_x = self.V_n[:,:,None] + V_s_sampled*self.g[:,:,None]
# Update H
self.H = self.H*((self.W.T @ (self.X*torch.sum(V_x**-2, axis=-1)))
/ (self.W.T @ torch.sum(V_x.pow(-1), axis=-1))).pow(0.5)
self.V_n = self.W @ self.H
V_x = self.V_n[:,:,None] + V_s_sampled*self.g[:,:,None]
# Update g
self.g = self.g*((torch.sum(self.X*torch.sum(V_s_sampled
*(V_x.pow(-2)),axis=-1), axis=0))/(torch.sum(torch.sum(
V_s_sampled*(V_x.pow(-1)), axis=-1), axis=0))).pow(0.5)
V_x = self.V_n[:,:,None] + V_s_sampled*self.g[:,:,None]
cost_after_M_step[n] = torch.mean(torch.log(V_x)
+ self.X[:,:,None]/V_x )
if n>0 and cost_after_M_step[n-1] - cost_after_M_step[n] < tol:
break
def separate(self, niter_MH, burnin):
Z_sampled = self.metropolis_hastings(self.niter_MH, self.burnin)
V_s_sampled = torch.zeros((self.F, self.T,
self.niter_MH - self.burnin), device=self.device)
for i in range(self.niter_MH - self.burnin):
V_s_sampled[:,:,i] = self.model.decode(Z_sampled[:,:,i])
V_s_sampled = V_s_sampled*self.g[:,:,None]
self.S_hat = self.Y * torch.mean(V_s_sampled /
(V_s_sampled + self.V_n[:,:,None]), axis=-1).cpu().numpy()
|
11561488
|
import logging
from asynctnt import Iterator
from asynctnt import Response
from asynctnt.exceptions import TarantoolSchemaError
from tests import BaseTarantoolTestCase
from tests.util import get_complex_param
class SelectTestCase(BaseTarantoolTestCase):
LOGGING_LEVEL = logging.INFO
async def _fill_data(self, count=3, space=None):
space = space or self.TESTER_SPACE_ID
data = []
for i in range(count):
t = [i, str(i), 1, 2, 'something']
data.append(t)
await self.conn.insert(space, t)
return data
async def _fill_data_dict(self, count=3):
data = []
for i in range(count):
t = {
'f1': i,
'f2': str(i),
'f3': 1,
'f4': 2,
'f5': 'something',
}
t = await self.conn.insert(self.TESTER_SPACE_ID, t)
data.append(dict(t[0]))
return data
async def test__select_by_id_empty_space(self):
res = await self.conn.select(self.TESTER_SPACE_ID)
self.assertIsInstance(res, Response, 'Got response')
self.assertEqual(res.code, 0, 'success')
self.assertGreater(res.sync, 0, 'sync > 0')
self.assertResponseEqual(res, [], 'Body ok')
async def test__select_by_id_non_empty_space(self):
data = await self._fill_data()
res = await self.conn.select(self.TESTER_SPACE_ID)
self.assertIsInstance(res, Response, 'Got response')
self.assertEqual(res.code, 0, 'success')
self.assertGreater(res.sync, 0, 'sync > 0')
self.assertResponseEqual(res, data, 'Body ok')
async def test__select_by_name_space_empty(self):
res = await self.conn.select(self.TESTER_SPACE_NAME)
self.assertIsInstance(res, Response, 'Got response')
self.assertEqual(res.code, 0, 'success')
self.assertGreater(res.sync, 0, 'sync > 0')
self.assertResponseEqual(res, [], 'Body ok')
async def test__select_by_name_non_empty_space(self):
data = await self._fill_data()
res = await self.conn.select(self.TESTER_SPACE_NAME)
self.assertIsInstance(res, Response, 'Got response')
self.assertEqual(res.code, 0, 'success')
self.assertGreater(res.sync, 0, 'sync > 0')
self.assertResponseEqual(res, data, 'Body ok')
async def test__select_by_index_id(self):
data = await self._fill_data()
res = await self.conn.select(self.TESTER_SPACE_ID, index=1)
self.assertIsInstance(res, Response, 'Got response')
self.assertEqual(res.code, 0, 'success')
self.assertGreater(res.sync, 0, 'sync > 0')
self.assertResponseEqual(res, data, 'Body ok')
async def test__select_by_index_name(self):
data = await self._fill_data()
res = await self.conn.select(self.TESTER_SPACE_ID, index='txt')
self.assertIsInstance(res, Response, 'Got response')
self.assertEqual(res.code, 0, 'success')
self.assertGreater(res.sync, 0, 'sync > 0')
self.assertResponseEqual(res, data, 'Body ok')
async def test__select_by_id_no_schema(self):
await self.tnt_reconnect(fetch_schema=False)
try:
await self.conn.select(self.TESTER_SPACE_ID)
except Exception as e:
self.fail(e)
async def test__select_by_name_no_schema(self):
await self.tnt_reconnect(fetch_schema=False)
with self.assertRaises(TarantoolSchemaError):
await self.conn.select(self.TESTER_SPACE_NAME)
async def test__select_by_index_id_no_schema(self):
await self.tnt_reconnect(fetch_schema=False)
try:
await self.conn.select(self.TESTER_SPACE_ID, index=1)
except Exception as e:
self.fail(e)
async def test__select_by_index_name_no_schema(self):
await self.tnt_reconnect(fetch_schema=False)
with self.assertRaises(TarantoolSchemaError):
await self.conn.select(self.TESTER_SPACE_NAME, index='txt')
async def test__select_by_key_one_item(self):
data = await self._fill_data()
res = await self.conn.select(self.TESTER_SPACE_NAME, [1])
self.assertResponseEqual(res, [data[1]], 'Body ok')
async def test__select_by_key_multiple_items_index(self):
data = await self._fill_data()
next_id = data[-1][0] + 1
next_txt = data[-1][1]
await self.conn.insert(self.TESTER_SPACE_ID,
[next_id, next_txt, 1, 2, 'text'])
data.append([next_id, next_txt, 1, 2, 'text'])
res = await self.conn.select(self.TESTER_SPACE_NAME, [next_txt],
index='txt')
self.assertResponseEqual(res, data[len(data)-2:], 'Body ok')
async def test__select_limit(self):
data = await self._fill_data()
res = await self.conn.select(self.TESTER_SPACE_NAME, limit=1)
self.assertResponseEqual(res, [data[0]], 'Body ok')
async def test__select_limit_offset(self):
data = await self._fill_data(4)
res = await self.conn.select(self.TESTER_SPACE_NAME,
limit=1, offset=2)
self.assertResponseEqual(res, [data[2]], 'Body ok')
async def test__select_iterator_class(self):
data = await self._fill_data(4)
res = await self.conn.select(self.TESTER_SPACE_NAME,
iterator=Iterator.GE)
self.assertResponseEqual(res, data, 'Body ok')
res = await self.conn.select(self.TESTER_SPACE_NAME,
iterator=Iterator.LE)
self.assertResponseEqual(res, list(reversed(data)), 'Body ok')
async def test__select_iterator_int(self):
data = await self._fill_data(4)
res = await self.conn.select(self.TESTER_SPACE_NAME,
iterator=4)
self.assertResponseEqual(res, list(reversed(data)), 'Body ok')
async def test__select_iterator_str(self):
data = await self._fill_data(4)
res = await self.conn.select(self.TESTER_SPACE_NAME,
iterator='LE')
self.assertResponseEqual(res, list(reversed(data)), 'Body ok')
async def test__select_complex(self):
p, p_cmp = get_complex_param(replace_bin=False)
data = [1, 'hello2', 1, 4, p_cmp]
await self.conn.insert(self.TESTER_SPACE_ID, data)
res = await self.conn.select(self.TESTER_SPACE_ID)
self.assertResponseEqual(res, [data], 'Body ok')
async def test__select_all_params(self):
data = await self._fill_data(10)
res = await self.conn.select(self.TESTER_SPACE_NAME,
index='primary',
limit=2, offset=1,
iterator=Iterator.LE)
self.assertResponseEqual(res, list(reversed(data))[1:3], 'Body ok')
async def test__select_all_by_hash_index(self):
data = await self._fill_data(4, space='no_schema_space')
res = await self.conn.select('no_schema_space',
index='primary_hash')
self.assertResponseEqual(res, data, 'Body ok')
async def test__select_key_tuple(self):
try:
await self.conn.select(self.TESTER_SPACE_ID, (1,))
except Exception as e:
self.fail(e)
async def test__select_invalid_types(self):
with self.assertRaisesRegex(
TypeError,
r'missing 1 required positional argument: \'space\''):
await self.conn.select()
with self.assertRaisesRegex(
TypeError,
r'sequence must be either list, tuple or dict'):
await self.conn.select(self.TESTER_SPACE_ID, 1)
with self.assertRaisesRegex(
TypeError, r'Index must be either str or int, got'):
await self.conn.select(self.TESTER_SPACE_ID, [1],
index=[1, 2])
with self.assertRaisesRegex(
TypeError, r'an integer is required'):
await self.conn.select(self.TESTER_SPACE_ID, [1],
index=1, limit='hello')
with self.assertRaisesRegex(
TypeError, r'an integer is required'):
await self.conn.select(self.TESTER_SPACE_ID, [1],
index=1, limit=1, offset='hello')
with self.assertRaisesRegex(
TypeError, r'Iterator is of unsupported type'):
await self.conn.select(self.TESTER_SPACE_ID, [1],
index=1, limit=1, offset=1,
iterator=[1, 2])
async def test__select_dict_key(self):
data = await self._fill_data()
res = await self.conn.select(self.TESTER_SPACE_ID, {
'f1': data[0][0]
})
self.assertResponseEqual(res, [data[0]], 'Body ok')
async def test__select_dict_key_wrong_field(self):
data = await self._fill_data()
res = await self.conn.select(self.TESTER_SPACE_ID, {
'f2': data[0][0]
})
self.assertResponseEqual(res, data, 'Body ok')
async def test__select_dict_key_other_index(self):
data = await self._fill_data()
res = await self.conn.select(self.TESTER_SPACE_ID, {
'f2': data[0][1]
}, index='txt')
self.assertResponseEqual(res, [data[0]], 'Body ok')
async def test__select_dict_resp(self):
data = await self._fill_data_dict()
res = await self.conn.select(self.TESTER_SPACE_ID, [])
self.assertResponseEqualKV(res, data)
|
11561567
|
from ..error import IdentifierError
from ..objects.customer import Customer
from .base import ResourceBase
class Customers(ResourceBase):
RESOURCE_ID_PREFIX = "cst_"
def get_resource_object(self, result):
return Customer(result, self.client)
def get(self, customer_id, **params):
if not customer_id or not customer_id.startswith(self.RESOURCE_ID_PREFIX):
raise IdentifierError(
f"Invalid customer ID: '{customer_id}'. A customer ID should start with '{self.RESOURCE_ID_PREFIX}'."
)
return super().get(customer_id, **params)
|
11561586
|
import asyncio
import pytest
import logging
from privex.helpers.exceptions import CacheNotFound
log = logging.getLogger(__name__)
try:
from privex.helpers.cache.asyncx import AsyncMemcachedCache
# r = AsyncRedisCache()
except ImportError:
pytest.skip(msg="Failed to import AsyncMemcachedCache (???)", allow_module_level=True)
AsyncMemcachedCache = object
# r = None
cleanup_keys = []
def _cleanup(key: str):
global cleanup_keys
cleanup_keys += [key]
@pytest.fixture()
async def rcache():
global cleanup_keys
async with AsyncMemcachedCache() as r:
yield r
# _rd = await r.redis
# _rd.close()
# await _rd.wait_close()
async with AsyncMemcachedCache() as ac:
log.info("Removing keys listed in cleanup_keys: %s", cleanup_keys)
await ac.remove(*cleanup_keys)
cleanup_keys = []
@pytest.mark.asyncio
async def test_cache_set(rcache: AsyncMemcachedCache):
_cleanup('test_cache_set')
await rcache.set('test_cache_set', 'hello world')
val = await rcache.get('test_cache_set')
assert val == 'hello world'
@pytest.mark.asyncio
async def test_cache_expire(rcache: AsyncMemcachedCache):
k, v = 'test_cache_expire', 'testing expire'
_cleanup(k)
await rcache.set(k, v, timeout=2)
assert await rcache.get(k) == v
await asyncio.sleep(3)
assert await rcache.get(k) is None
@pytest.mark.asyncio
async def test_cache_update_timeout(rcache: AsyncMemcachedCache):
"""Test that cache.update_timeout extends timeouts correctly"""
k, v = 'test_cache_update_timeout', 'update expiry test'
_cleanup(k)
await rcache.set(k, v, timeout=4)
assert await rcache.get(k) == v
await asyncio.sleep(1.5)
await rcache.update_timeout(k, timeout=10)
await asyncio.sleep(3.5)
assert await rcache.get(k) == v
@pytest.mark.asyncio
async def test_cache_update_timeout_raise(rcache: AsyncMemcachedCache):
try:
await rcache.update_timeout('test_cache_update_timeout_raise', timeout=10)
assert False
except CacheNotFound:
assert True
@pytest.mark.asyncio
async def test_cache_remove(rcache: AsyncMemcachedCache):
k, v = 'test_cache_remove', 'test cache removal'
_cleanup(k)
await rcache.set(k, v, timeout=30)
assert await rcache.get(k) == v
await rcache.remove(k)
assert await rcache.get(k) is None
|
11561663
|
from sllist import *
def test_push():
colors = SingleLinkedList()
colors.push("Pthalo Blue")
assert colors.count() == 1
colors.push("Ultramarine Blue")
assert colors.count() == 2
colors.push("Ultramarine Violet")
assert colors.count() == 3
def test_pop():
colors = SingleLinkedList()
colors.push("Magenta")
colors.push("Alizarin")
assert colors.pop() == "Alizarin"
assert colors.pop() == "Magenta"
assert colors.pop() == None
|
11561695
|
import cv2
import numpy as np
from pytorch_toolbelt.utils.torch_utils import rgb_image_from_tensor, to_numpy
from retinopathy.dataset import UNLABELED_CLASS
from retinopathy.models.regression import regression_to_class
def draw_classification_predictions(input: dict,
output: dict,
class_names,
image_key='image',
image_id_key='image_id',
targets_key='targets',
outputs_key='logits',
mean=(0.485, 0.456, 0.406),
std=(0.229, 0.224, 0.225)):
images = []
for image, target, image_id, logits in zip(input[image_key],
input[targets_key],
input[image_id_key],
output[outputs_key]):
image = rgb_image_from_tensor(image, mean, std)
num_classes = logits.size(0)
target = int(to_numpy(target).squeeze(0))
if num_classes == 1:
logits = int(to_numpy(logits).squeeze(0) > 0)
else:
logits = np.argmax(to_numpy(logits))
overlay = image.copy()
if target != UNLABELED_CLASS:
target_name = class_names[target]
else:
target_name = 'Unlabeled'
cv2.putText(overlay, str(image_id), (10, 15), cv2.FONT_HERSHEY_PLAIN, 1, (250, 250, 250))
cv2.putText(overlay, target_name, (10, 30), cv2.FONT_HERSHEY_PLAIN, 1, (0, 250, 0))
if target == logits:
cv2.putText(overlay, class_names[logits], (10, 45), cv2.FONT_HERSHEY_PLAIN, 1, (0, 250, 0))
else:
cv2.putText(overlay, class_names[logits], (10, 45), cv2.FONT_HERSHEY_PLAIN, 1, (250, 0, 0))
images.append(overlay)
return images
def draw_regression_predictions(input: dict,
output: dict,
class_names,
image_key='image',
image_id_key='image_id',
targets_key='targets',
outputs_key='regression',
unsupervised_label=None,
mean=(0.485, 0.456, 0.406),
std=(0.229, 0.224, 0.225)):
images = []
for i, (image, target, image_id) in enumerate(zip(input[image_key],
input[targets_key],
input[image_id_key])):
diagnosis = output[outputs_key][i]
image = rgb_image_from_tensor(image, mean, std)
target = int(to_numpy(target).squeeze(0))
predicted_target = int(regression_to_class(diagnosis))
overlay = image.copy()
if 'stn' in output:
stn = rgb_image_from_tensor(output['stn'][i], mean, std)
overlay = np.hstack((overlay, stn))
cv2.putText(overlay, str(image_id), (10, 15), cv2.FONT_HERSHEY_PLAIN, 1, (250, 250, 250))
if target != unsupervised_label:
cv2.putText(overlay, f'{class_names[target]} ({target})', (10, 30),
cv2.FONT_HERSHEY_PLAIN, 1, (0, 250, 0))
else:
cv2.putText(overlay, f'Unlabeled ({target})', (10, 30),
cv2.FONT_HERSHEY_PLAIN, 1, (0, 250, 0))
cv2.putText(overlay, f'{class_names[predicted_target]} ({predicted_target}/{float(diagnosis)})', (10, 45),
cv2.FONT_HERSHEY_PLAIN, 1, (0, 250, 250))
images.append(overlay)
return images
|
11561720
|
import os
from smartsim.settings import SbatchSettings, SrunSettings
# ------ Srun ------------------------------------------------
def test_srun_settings():
settings = SrunSettings("python")
settings.set_nodes(5)
settings.set_cpus_per_task(2)
settings.set_tasks(100)
settings.set_tasks_per_node(20)
formatted = settings.format_run_args()
result = ["--nodes=5", "--cpus-per-task=2", "--ntasks=100", "--ntasks-per-node=20"]
assert formatted == result
def test_srun_args():
"""Test the possible user overrides through run_args"""
run_args = {
"account": "A3123",
"exclusive": None,
"C": "P100", # test single letter variables
"nodes": 10,
"ntasks": 100,
}
settings = SrunSettings("python", run_args=run_args)
formatted = settings.format_run_args()
result = [
"--account=A3123",
"--exclusive",
"-C",
"P100",
"--nodes=10",
"--ntasks=100",
]
assert formatted == result
def test_update_env():
env_vars = {"OMP_NUM_THREADS": 20, "LOGGING": "verbose"}
settings = SrunSettings("python", env_vars=env_vars)
settings.update_env({"OMP_NUM_THREADS": 10})
assert settings.env_vars["OMP_NUM_THREADS"] == 10
def test_format_env():
env_vars = {"OMP_NUM_THREADS": 20, "LOGGING": "verbose", "SSKEYIN": "name_0,name_1"}
settings = SrunSettings("python", env_vars=env_vars)
formatted, comma_separated_formatted = settings.format_env_vars()
assert "OMP_NUM_THREADS" in formatted
assert "LOGGING" in formatted
assert "SSKEYIN" in formatted
assert "SSKEYIN=name_0,name_1" in comma_separated_formatted
# ---- Sbatch ---------------------------------------------------
def test_sbatch_settings():
sbatch = SbatchSettings(nodes=1, time="10:00:00", account="A3123")
formatted = sbatch.format_batch_args()
result = ["--nodes=1", "--time=10:00:00", "--account=A3123"]
assert formatted == result
def test_sbatch_manual():
sbatch = SbatchSettings()
sbatch.set_nodes(5)
sbatch.set_account("A3531")
sbatch.set_walltime("10:00:00")
formatted = sbatch.format_batch_args()
result = ["--nodes=5", "--account=A3531", "--time=10:00:00"]
assert formatted == result
def test_change_batch_cmd():
sbatch = SbatchSettings()
sbatch.set_batch_command("qsub")
assert sbatch._batch_cmd == "qsub"
|
11561740
|
from binascii import hexlify, unhexlify
from secp256k1 import Secp256k1 as Secp256k1_base, Message, SECRET_KEY_SIZE
from secp256k1.key import SecretKey, PublicKey
from ._libsecp256k1 import ffi, lib
PEDERSEN_COMMITMENT_SIZE = 33
MAX_PROOF_SIZE = 675
PROOF_MSG_SIZE = 64
MAX_WIDTH = 1 << 20
# Pedersen Commitment xG+vH
class Commitment:
def __init__(self, secp):
assert isinstance(secp, Secp256k1)
self.commitment = ffi.new("secp256k1_pedersen_commitment *")
self.secp = secp
def __eq__(self, other):
return isinstance(other, Commitment) and self.to_bytearray(self.secp) == other.to_bytearray(other.secp)
def __str__(self):
return "Commitment<{}>".format(self.to_hex(self.secp).decode())
def __repr__(self):
return self.__str__()
def to_bytearray(self, secp) -> bytearray:
assert isinstance(secp, Secp256k1)
out = ffi.new("char [%d]" % PEDERSEN_COMMITMENT_SIZE)
res = lib.secp256k1_pedersen_commitment_serialize(secp.ctx, out, self.commitment)
assert res, "Unable to serialize"
return bytearray(ffi.buffer(out, PEDERSEN_COMMITMENT_SIZE))
def to_hex(self, secp) -> bytes:
return hexlify(self.to_bytearray(secp))
def to_public_key(self, secp) -> PublicKey:
assert isinstance(secp, Secp256k1)
obj = PublicKey(secp)
res = lib.secp256k1_pedersen_commitment_to_pubkey(secp.ctx, obj.key, self.commitment)
assert res, "Unable to convert to public key"
return obj
@staticmethod
def from_bytearray(secp, data: bytearray):
assert isinstance(secp, Secp256k1)
input = bytearray([0] * PEDERSEN_COMMITMENT_SIZE)
for i in range(min(len(data), PEDERSEN_COMMITMENT_SIZE)):
input[i] = data[i]
obj = Commitment(secp)
res = lib.secp256k1_pedersen_commitment_parse(secp.ctx, obj.commitment, bytes(input))
assert res, "Invalid commitment"
return obj
@staticmethod
def from_hex(secp, data: bytes):
return Commitment.from_bytearray(secp, bytearray(unhexlify(data)))
class RangeProof:
def __init__(self, proof: bytearray):
self.proof = proof
self.proof_len = len(proof)
def __eq__(self, other):
return isinstance(other, RangeProof) and self.proof == other.proof
def __str__(self):
return "RangeProof<len={}, {}>".format(self.proof_len, hexlify(self.proof[0:8]).decode())
def __repr__(self):
return self.__str__()
def to_bytearray(self) -> bytearray:
return self.proof[:]
def to_hex(self) -> bytes:
return hexlify(bytes(self.proof))
@staticmethod
def from_bytearray(data: bytearray):
assert len(data) <= MAX_PROOF_SIZE, "Invalid proof size"
return RangeProof(data)
@staticmethod
def from_hex(data: bytes):
return RangeProof.from_bytearray(bytearray(unhexlify(data)))
class Secp256k1(Secp256k1_base):
def __init__(self, ctx, flags):
super().__init__(ctx, flags)
self.GENERATOR_G = ffi.new("secp256k1_generator *", [bytes([
0x79, 0xbe, 0x66, 0x7e, 0xf9, 0xdc, 0xbb, 0xac,
0x55, 0xa0, 0x62, 0x95, 0xce, 0x87, 0x0b, 0x07,
0x02, 0x9b, 0xfc, 0xdb, 0x2d, 0xce, 0x28, 0xd9,
0x59, 0xf2, 0x81, 0x5b, 0x16, 0xf8, 0x17, 0x98,
0x48, 0x3a, 0xda, 0x77, 0x26, 0xa3, 0xc4, 0x65,
0x5d, 0xa4, 0xfb, 0xfc, 0x0e, 0x11, 0x08, 0xa8,
0xfd, 0x17, 0xb4, 0x48, 0xa6, 0x85, 0x54, 0x19,
0x9c, 0x47, 0xd0, 0x8f, 0xfb, 0x10, 0xd4, 0xb8
])])
self.GENERATOR_H = ffi.new("secp256k1_generator *", [bytes([
0x50, 0x92, 0x9b, 0x74, 0xc1, 0xa0, 0x49, 0x54,
0xb7, 0x8b, 0x4b, 0x60, 0x35, 0xe9, 0x7a, 0x5e,
0x07, 0x8a, 0x5a, 0x0f, 0x28, 0xec, 0x96, 0xd5,
0x47, 0xbf, 0xee, 0x9a, 0xce, 0x80, 0x3a, 0xc0,
0x31, 0xd3, 0xc6, 0x86, 0x39, 0x73, 0x92, 0x6e,
0x04, 0x9e, 0x63, 0x7c, 0xb1, 0xb5, 0xf4, 0x0a,
0x36, 0xda, 0xc2, 0x8a, 0xf1, 0x76, 0x69, 0x68,
0xc3, 0x0c, 0x23, 0x13, 0xf3, 0xa3, 0x89, 0x04
])])
self.gens = lib.secp256k1_bulletproof_generators_create(self.ctx, self.GENERATOR_G, 256)
def commit(self, value: int, blind) -> Commitment:
obj = Commitment(self)
res = lib.secp256k1_pedersen_commit(self.ctx, obj.commitment, bytes(blind.key), value,
self.GENERATOR_H, self.GENERATOR_G)
assert res, "Unable to commit"
return obj
def commit_value(self, value: int) -> Commitment:
blind = SecretKey()
return self.commit(value, blind)
def commit_sum(self, positives, negatives) -> Commitment:
pos = []
for positive in positives:
assert isinstance(positive, Commitment)
pos.append(positive.commitment)
neg = []
for negative in negatives:
assert isinstance(negative, Commitment)
neg.append(negative.commitment)
commit_sum = Commitment(self)
res = lib.secp256k1_pedersen_commit_sum(self.ctx, commit_sum.commitment, pos, len(pos), neg, len(neg))
assert res, "Unable to sum commitments"
return commit_sum
def blind_sum(self, positives, negatives) -> SecretKey:
keys = []
for positive in positives:
assert isinstance(positive, SecretKey)
keys.append(ffi.new("char []", bytes(positive.key)))
for negative in negatives:
assert isinstance(negative, SecretKey)
keys.append(ffi.new("char []", bytes(negative.key)))
sum_key = ffi.new("char []", SECRET_KEY_SIZE)
ret = lib.secp256k1_pedersen_blind_sum(self.ctx, sum_key, keys, len(keys), len(positives))
assert ret, "Unable to sum blinding factors"
return SecretKey.from_bytearray(self, bytearray(ffi.buffer(sum_key, SECRET_KEY_SIZE)))
def sign(self, secret_key: SecretKey, message: bytearray):
assert len(message) == 32, "Invalid message length"
signature_obj = ffi.new("secp256k1_ecdsa_signature *")
res = lib.secp256k1_ecdsa_sign(
self.ctx, signature_obj, bytes(message), bytes(secret_key.key), ffi.NULL, ffi.NULL
)
assert res, "Unable to generate signature"
signature_ptr = ffi.new("char []", 80)
signature_len_ptr = ffi.new("size_t *", 80)
res = lib.secp256k1_ecdsa_signature_serialize_der(
self.ctx, signature_ptr, signature_len_ptr, signature_obj
)
assert res, "Unable to DER serialize signature"
return bytearray(ffi.buffer(signature_ptr, signature_len_ptr[0]))
def sign_recoverable(self, secret_key: SecretKey, message: bytearray) -> bytearray:
assert len(message) == 32, "Invalid message length"
signature_obj = ffi.new("secp256k1_ecdsa_recoverable_signature *")
res = lib.secp256k1_ecdsa_sign_recoverable(
self.ctx, signature_obj, bytes(message), bytes(secret_key.key), ffi.NULL, ffi.NULL
)
assert res, "Unable to generate recoverable signature"
signature_ptr = ffi.new("char []", 64)
rec_id_ptr = ffi.new("int *")
res = lib.secp256k1_ecdsa_recoverable_signature_serialize_compact(
self.ctx, signature_ptr, rec_id_ptr, signature_obj
)
assert res, "Unable to serialize recoverable signature"
signature = bytearray(ffi.buffer(signature_ptr, 64))
signature.append(rec_id_ptr[0])
return signature
def bullet_proof(self, value: int, blind: SecretKey, nonce: SecretKey, extra_data: bytearray) -> RangeProof:
proof_ptr = ffi.new("char []", MAX_PROOF_SIZE)
proof_len_ptr = ffi.new("size_t *", MAX_PROOF_SIZE)
blind_key = ffi.new("char []", bytes(blind.key))
scratch = lib.secp256k1_scratch_space_create(self.ctx, 256 * MAX_WIDTH)
res = lib.secp256k1_bulletproof_rangeproof_prove(
self.ctx, scratch, self.gens, proof_ptr, proof_len_ptr, ffi.NULL, ffi.NULL, ffi.NULL,
[value], ffi.NULL, [blind_key], ffi.NULL, 1, self.GENERATOR_H, 64, bytes(nonce.key), ffi.NULL,
bytes(extra_data), len(extra_data), ffi.NULL
)
obj = RangeProof.from_bytearray(bytearray(ffi.buffer(proof_ptr, proof_len_ptr[0])))
lib.secp256k1_scratch_space_destroy(scratch)
assert res, "Unable to generate bulletproof"
return obj
def bullet_proof_multisig_1(self, value: int, blind: SecretKey, commit: Commitment, common_nonce: SecretKey,
nonce: SecretKey, extra_data: bytearray) -> (PublicKey, PublicKey):
scratch = lib.secp256k1_scratch_space_create(self.ctx, 256 * MAX_WIDTH)
t_1 = PublicKey(self)
t_2 = PublicKey(self)
blind_key = ffi.new("char []", bytes(blind.key))
res = lib.secp256k1_bulletproof_rangeproof_prove(
self.ctx, scratch, self.gens, ffi.NULL, ffi.NULL, ffi.NULL, t_1.key, t_2.key, [value],
ffi.NULL, [blind_key], [commit.commitment], 1, self.GENERATOR_H, 64, bytes(common_nonce.key),
bytes(nonce.key), bytes(extra_data), len(extra_data), ffi.NULL
)
lib.secp256k1_scratch_space_destroy(scratch)
assert res, "Unable to generate multisig bulletproof"
return t_1, t_2
def bullet_proof_multisig_2(self, value: int, blind: SecretKey, commit: Commitment, common_nonce: SecretKey,
nonce: SecretKey, t_1: PublicKey, t_2: PublicKey, extra_data: bytearray) -> SecretKey:
scratch = lib.secp256k1_scratch_space_create(self.ctx, 256 * MAX_WIDTH)
tau_x_ptr = ffi.new("char []", 32)
blind_key = ffi.new("char []", bytes(blind.key))
res = lib.secp256k1_bulletproof_rangeproof_prove(
self.ctx, scratch, self.gens, ffi.NULL, ffi.NULL, tau_x_ptr, t_1.key, t_2.key, [value],
ffi.NULL, [blind_key], [commit.commitment], 1, self.GENERATOR_H, 64, bytes(common_nonce.key),
bytes(nonce.key), bytes(extra_data), len(extra_data), ffi.NULL
)
lib.secp256k1_scratch_space_destroy(scratch)
assert res, "Unable to generate multisig bulletproof"
return SecretKey.from_bytearray(self, bytearray(ffi.buffer(tau_x_ptr, 32)))
def bullet_proof_multisig_3(self, value: int, blind: SecretKey, commit: Commitment, common_nonce: SecretKey,
nonce: SecretKey, t_1: PublicKey, t_2: PublicKey, tau_x: SecretKey,
extra_data: bytearray) -> RangeProof:
scratch = lib.secp256k1_scratch_space_create(self.ctx, 256 * MAX_WIDTH)
proof_ptr = ffi.new("char []", MAX_PROOF_SIZE)
proof_len_ptr = ffi.new("size_t *", MAX_PROOF_SIZE)
tau_x_ptr = ffi.new("char []", bytes(tau_x.to_bytearray()))
blind_key = ffi.new("char []", bytes(blind.key))
res = lib.secp256k1_bulletproof_rangeproof_prove(
self.ctx, scratch, self.gens, proof_ptr, proof_len_ptr, tau_x_ptr, t_1.key, t_2.key,
[value], ffi.NULL, [blind_key], [commit.commitment], 1, self.GENERATOR_H, 64, bytes(common_nonce.key),
bytes(nonce.key), bytes(extra_data), len(extra_data), ffi.NULL
)
obj = RangeProof.from_bytearray(bytearray(ffi.buffer(proof_ptr, proof_len_ptr[0])))
lib.secp256k1_scratch_space_destroy(scratch)
assert res, "Unable to generate multisig bulletproof"
return obj
def verify_bullet_proof(self, commit: Commitment, proof: RangeProof, extra_data: bytearray) -> bool:
scratch = lib.secp256k1_scratch_space_create(self.ctx, 256 * MAX_WIDTH)
res = lib.secp256k1_bulletproof_rangeproof_verify(
self.ctx, scratch, self.gens, bytes(proof.proof), proof.proof_len, ffi.NULL, commit.commitment,
1, 64, self.GENERATOR_H, bytes(extra_data), len(extra_data)
)
lib.secp256k1_scratch_space_destroy(scratch)
return res == 1
def ethereum_signature(data: bytearray) -> (bytes, bytes, int):
assert len(data) == 65
r = b"0x"+hexlify(bytes(data[:32]))
s = b"0x"+hexlify(bytes(data[32:64]))
v = int.from_bytes(bytes(data[64:]), "big") + 27
return r, s, v
|
11561758
|
import logging
import os
import hashlib
from galaxy_importer import exceptions as exc
log = logging.getLogger(__name__)
def sha256sum_from_fo(fo):
block_size = 65536
sha256 = hashlib.sha256()
for block in iter(lambda: fo.read(block_size), b""):
sha256.update(block)
return sha256.hexdigest()
def sha256sum_from_path(filename):
with open(filename, "rb") as fo:
return sha256sum_from_fo(fo)
def check_artifact_file(path_prefix, artifact_file):
"""Check existences of artifact_file on fs and check the chksum matches
Args:
path_prefix (str): Any file path prefix we need to add to file paths in the
CollectionArtifactFile artifact_file
artifact_file (CollectionArtifactFile): object with the expected info about
the file on the fs that will be checked.
This info includes name, type, path, and checksum.
Raises:
CollectionArtifactFileNotFound: If artifact_file is not found on the file system.
CollectionArtifactFileChecksumError: If the sha256sum of the on disk
artifact_file contents does not match artifact_file.chksum_sha256.
Returns:
bool: True if artifact_file check is ok, otherwise should raise exception
"""
log.debug("artifact_file: %s", artifact_file)
artifact_file_path = os.path.join(path_prefix, artifact_file.name)
if not os.path.exists(artifact_file_path):
msg = f"The file ({artifact_file.name}) was not found"
raise exc.CollectionArtifactFileNotFound(missing_file=artifact_file.name, msg=msg)
actual_chksum = sha256sum_from_path(artifact_file_path)
if actual_chksum != artifact_file.chksum_sha256:
err_msg = (
f"File {artifact_file.name} sha256sum should be "
f"{artifact_file.chksum_sha256} but the actual sha256sum was {actual_chksum}"
)
log.error(err_msg)
raise exc.CollectionArtifactFileChecksumError(err_msg)
return True
|
11561762
|
from .canvas import CanvasPDK
class tfr_prim(CanvasPDK):
def __init__(self, *args, **kwargs):
super().__init__()
self.metadata = {'instances': []}
def generate(self, ports, netlist_parameters=None, layout_parameters=None, *args, **kwargs):
assert len(ports) == 2
b_idx = (4, -1)
e_idx = (7, -1)
self.addWire(self.m2, ports[0], 12, b_idx, e_idx, netType = "pin")
self.addWire(self.m2, ports[1], 2, b_idx, e_idx, netType = "pin")
x1 = self.pdk['Poly']['Pitch']*(10)
y1 = self.pdk['M2']['Pitch']*(14)
bbox = [0, 0, x1, y1]
t = {'layer': 'Boundary', 'netName': None, 'rect': bbox, 'netType': 'drawing'}
self.terminals.append(t)
# Additional metadata for layout post-processing
self.metadata['instances'].append({'sample_key': 'sample_value'})
return {"bbox": bbox, "instance": {}, "terminals": self.terminals}
|
11561797
|
import os
import pytest
import shutil
import pandas as pd
from pyspark.sql import SparkSession
from pyspark.sql import DataFrame
from gensim.models import KeyedVectors
from gensim.models import Word2Vec as GensimW2V
from pyspark.ml.feature import Word2VecModel as SparkW2VModel
from node2vec.embedding import Node2VecBase
from node2vec.embedding import Node2VecGensim
from node2vec.embedding import Node2VecSpark
#
def test_class_node2vecbase():
"""
test class Node2VecBase
"""
n2v = Node2VecBase()
with pytest.raises(NotImplementedError):
n2v.fit()
with pytest.raises(NotImplementedError):
n2v.embedding()
with pytest.raises(NotImplementedError):
n2v.get_vector(0)
with pytest.raises(NotImplementedError):
n2v.save_model("file:///a", "b")
with pytest.raises(NotImplementedError):
n2v.load_model("file:///a", "b")
def test_class_node2vecgensim():
"""
test class Node2VecGensim
"""
df = pd.DataFrame.from_dict({
"walk": [[0, 1, 1, 0, 3, 4], [1, 2, 3, 2, 0, 4], [2, 3, 1, 0, 4, 4]]
})
n2v = Node2VecGensim(df, {})
assert isinstance(n2v, Node2VecGensim)
n2v = Node2VecGensim(
df, w2v_params={"iter": 3}, window_size=6, vector_size=64, random_seed=1000,
)
assert isinstance(n2v, Node2VecGensim)
pytest.raises(ValueError, Node2VecGensim, df, {}, window_size=3)
pytest.raises(ValueError, Node2VecGensim, df, {}, vector_size=16)
w2v_params = {"min_count": 0, "iter": 1, "seed": 1000, "batch_words": 1,
"size": 4, "workers": 4}
n2v = Node2VecGensim(df, w2v_params=w2v_params)
model = n2v.fit()
assert isinstance(model, GensimW2V)
df_res = n2v.embedding()
assert isinstance(df_res, pd.DataFrame)
assert len(df_res) > 0
assert list(df_res.columns) == ["id", "vector"]
assert len(n2v.get_vector(vertex_id='0')) > 0
assert len(n2v.get_vector(vertex_id=1)) > 0
n2v.save_model("./", "tmp")
assert os.path.exists("./tmp.model")
assert isinstance(n2v.load_model("./", "tmp"), GensimW2V)
os.remove("./tmp.model")
n2v.save_vectors("./", "tmp_vec")
assert os.path.exists("./tmp_vec")
assert isinstance(n2v.load_vectors("./", "tmp_vec"), KeyedVectors)
os.remove("./tmp_vec")
name_id = pd.DataFrame.from_dict({
"name": ["a", "b", "c", "d", "e"], "id": [0, 1, 2, 3, 4]
})
n2v = Node2VecGensim(df, w2v_params, name_id=name_id)
with pytest.raises(ValueError):
n2v.embedding()
n2v.fit()
df_res = n2v.embedding()
assert isinstance(df_res, pd.DataFrame)
assert len(df_res) > 0
assert list(df_res.columns) == ["name", "vector"]
def test_class_node2vecspark():
"""
test class Node2VecSpark
"""
spark = SparkSession.builder.config("spark.executor.cores", 4).getOrCreate()
df = spark.createDataFrame(pd.DataFrame.from_dict({
"walk": [[0, 1, 1, 0, 3, 4], [1, 2, 3, 2, 0, 4], [2, 3, 1, 0, 4, 4]]
}))
n2v = Node2VecSpark(df, w2v_params={})
assert isinstance(n2v, Node2VecSpark)
n2v = Node2VecSpark(
df, w2v_params={"maxIter": 3}, window_size=6, vector_size=64, random_seed=1000,
)
assert isinstance(n2v, Node2VecSpark)
pytest.raises(ValueError, Node2VecSpark, df, {}, window_size=3)
pytest.raises(ValueError, Node2VecSpark, df, {}, vector_size=16)
w2v_params = {"minCount": 0, "maxIter": 1, "seed": 1000, "maxSentenceLength": 1,
"windowSize": 4}
n2v = Node2VecSpark(df, w2v_params=w2v_params)
model = n2v.fit()
assert model is not None
df_res = n2v.embedding()
assert isinstance(df_res, DataFrame)
assert df_res.count() > 0
assert sorted(df_res.columns) == ["id", "vector"]
assert len(list(n2v.get_vector(vertex_id=1))) > 0
assert len(list(n2v.get_vector(vertex_id="1"))) > 0
n2v.save_model("./", "tmp")
assert os.path.exists("./tmp.sparkml")
assert isinstance(n2v.load_model("./", "tmp"), SparkW2VModel)
shutil.rmtree("./tmp.sparkml")
name_id = spark.createDataFrame(pd.DataFrame.from_dict({
"name": ["a", "b", "c", "d", "e"], "id": [0, 1, 2, 3, 4]
}))
n2v = Node2VecSpark(df, w2v_params, name_id=name_id)
with pytest.raises(ValueError):
n2v.embedding()
n2v.fit()
df_res = n2v.embedding()
assert isinstance(df_res, DataFrame)
assert df_res.count() > 0
assert sorted(df_res.columns) == ["name", "vector"]
|
11561800
|
import gzip
import os
import logging
from genomic_tools_lib import Utilities, Logging
from genomic_tools_lib.data_management import KeyedDataSource, TextFileTools
def _to_al(comps):
return "{}\t{}\t{}\t{}\t{}\t{}\t{}\n".format(*comps).encode()
def _to_gl(comps, format):
return format.format(*comps).encode()
def run(args):
Utilities.ensure_requisite_folders(args.output_prefix)
logging.info("Loading snp reference")
key = KeyedDataSource.load_data(args.snp_reference_file, "variant_id", "rs_id_dbSNP150_GRCh38p7", value_conversion=KeyedDataSource.dot_to_na)
logging.info("Loading samples")
samples = TextFileTools.load_list(args.samples)
genotype_format_string = "\t".join(["{}"]*(len(samples)+1))+ "\n"
og = args.output_prefix + "_genotype.txt.gz"
oa = args.output_prefix + "_annotation.txt.gz"
if os.path.exists(og) or os.path.exists(oa):
logging.info("Output exists. Nope.")
return
logging.info("Processing")
with gzip.open(args.genotype) as geno:
with gzip.open(og, "w") as _og:
_og.write(_to_gl(["varID"]+samples, genotype_format_string))
with gzip.open(oa, "w") as _oa:
_oa.write(_to_al(["chromosome", "position", "id", "allele_0", "allele_1", "allele_1_frequency" , "rsid"]))
for i, line in enumerate(geno):
comps = line.decode().strip().split()
chr = "chr"+ comps[0]
pos = comps[2]
ref = comps[3]
alt = comps[4]
af = comps[5]
dosage = comps[6:]
var_id = "{}_{}_{}_{}_b38".format(chr, pos, ref, alt)
if var_id in key:
id = key[var_id]
comps[1] = var_id
_og.write(_to_gl([var_id]+dosage, genotype_format_string))
_oa.write(_to_al([chr, pos, var_id, ref, alt, af, id]))
next
var_id = "{}_{}_{}_{}_b38".format(chr, pos, alt, ref)
if var_id in key and len(ref) == 1 and len(alt) == 1:
id = key[var_id]
af = str(1-float(af))
dosage = list(map(lambda x: str(2-int(x)), comps[6:]))
_og.write(_to_gl([var_id]+dosage, genotype_format_string))
_oa.write(_to_al([chr, pos, var_id, alt, ref, af, id]))
next
logging.info("Finished conversion")
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser("Convert genotypes in -PrediXcan Format- to -Model training format-")
parser.add_argument("-genotype")
parser.add_argument("-samples")
parser.add_argument("-snp_reference_file")
parser.add_argument("-output_prefix")
parser.add_argument("-parsimony", help="Log parsimony level. 1 is everything being logged. 10 is only high level messages, above 10 will hardly log anything", default=10, type =int)
args = parser.parse_args()
Logging.configure_logging(args.parsimony)
run(args)
|
11561827
|
import terrascript
import terrascript.provider
class TestVariable:
def __init__(self):
self.cfg = terrascript.Terrascript()
def test_one_variable(self):
v = terrascript.Variable("name", type="string", default="Hello World")
assert isinstance(v, terrascript.NamedBlock)
assert isinstance(v, terrascript.Variable)
assert v._name == "name"
assert v["type"] == "string"
assert v["default"] == "Hello World"
self.cfg += v
assert len(self.cfg["variable"].keys()) == 1
assert self.cfg["variable"]["name"]["type"] == "string"
assert self.cfg["variable"]["name"]["default"] == "Hello World"
def test_two_variables(self):
v1 = terrascript.Variable("name1", type="string", default="Me")
v2 = terrascript.Variable("name2", type="string", default="You")
self.cfg += v1
self.cfg += v2
assert len(self.cfg["variable"].keys()) == 2
assert self.cfg["variable"]["name1"]["type"] == "string"
assert self.cfg["variable"]["name1"]["default"] == "Me"
assert self.cfg["variable"]["name2"]["type"] == "string"
assert self.cfg["variable"]["name2"]["default"] == "You"
def test_duplicate_names(self):
v1 = terrascript.Variable("name", type="string", default="Me")
v2 = terrascript.Variable("name", type="string", default="You")
self.cfg += v1
self.cfg += v2 # Overwrites v1
assert len(self.cfg["variable"].keys()) == 1
assert self.cfg["variable"]["name"]["type"] == "string"
assert self.cfg["variable"]["name"]["default"] == "You"
def test_string_interpolation(self):
var = terrascript.Variable("myvar", type="string", default="myval")
expected_value = "${var.myvar}"
assert expected_value == str(
var
), "String interpolation of variable did not return its reference"
assert expected_value == repr(
var
), "String representation of variable did not return its reference"
expected_value = "embeded-${var.myvar}"
assert (
expected_value == f"embeded-{var}"
), "Formatting a string with variable did not insert reference"
def test_adding_variable_to_block(self):
""" Variables added to Block should be added as reference """
var = terrascript.Variable("myvar", type="string", default="value")
resource = terrascript.Block(variable=var)
assert (
"${var.myvar}" == resource["variable"]
), "Adding Variable to Block did not insert reference"
def test_adding_variable_to_resource(self):
""" Variables added to Resource should be added as reference """
var = terrascript.Variable("myvar", type="string", default="value")
resource = terrascript.Resource("Test", var=var)
assert (
"${var.myvar}" == resource.var
), "Adding Variable to Resource did not insert reference"
|
11561859
|
from __future__ import print_function
import sys
import time
import json
import boto3
import urllib3
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
from datetime import datetime
#from cufacesearch.common.conf_reader import ConfReader
from ..common.conf_reader import ConfReader
# Cannot be imported?
#from botocore.errorfactory import ExpiredIteratorException
def get_random_sha1():
from hashlib import sha1
import random
return sha1(str(random.getrandbits(256)).encode('utf-8')).hexdigest().upper()
# TODO: Should we have a GenericPusher class that exposes the `send` message method?
class KinesisPusher(ConfReader):
"""KinesisPusher
"""
def __init__(self, global_conf, prefix="", pid=None):
"""KinesisPusher constructor
:param global_conf: configuration file or dictionary
:type global_conf: str, dict
:param prefix: prefix in configuration file
:type prefix: str
:type prefix: str
:param pid: process id
:type pid: int
"""
# When running as deamon, save process id
self.pid = pid
self.verbose = 1
super(KinesisPusher, self).__init__(global_conf, prefix)
# Set print prefix
self.set_pp(pp=self.get_param("pp"))
print('[{}: log] verbose level is: {}'.format(self.pp, self.verbose))
# Initialize attributes
self.client = None
self.shard_iters = dict()
self.shard_infos = dict()
self.stream_name = self.get_required_param('stream_name')
# Initialize stats attributes
self.push_count = 0
self.last_display = 0
self.display_count = 1000
self.start_time = time.time()
# Initialize everything
self.init_pusher()
def set_pp(self, pp=None):
"""Set pretty print name
:param pp: pretty print name, default will be `KinesisPusher`
:type pp: str
"""
if pp is not None:
self.pp = pp
else:
self.pp = "KinesisPusher"
def init_client(self):
"""Initialize Kinesis client.
"""
region_name = self.get_required_param('region_name')
aws_profile = self.get_param('aws_profile', None)
# This is mostly to be able to test locally
endpoint_url = self.get_param('endpoint_url', None)
# This trigger some error
verify = self.get_param('verify_certificates', True)
use_ssl = self.get_param('use_ssl', True)
# Use session and profile
self.session = boto3.Session(profile_name=aws_profile, region_name=region_name)
self.client = self.session.client('kinesis', endpoint_url=endpoint_url, verify=verify,
use_ssl=use_ssl)
def init_pusher(self):
"""Initialize stream shards infos
"""
self.init_client()
# Get stream initialization related parameters
nb_trials = self.get_param('nb_trials', 3)
# Check stream is active
tries = 0
while tries < nb_trials:
tries += 1
try:
response = self.client.describe_stream(StreamName=self.stream_name)
if response['StreamDescription']['StreamStatus'] == 'ACTIVE':
break
# Can we catch ResourceNotFound and create stream here?
except Exception as inst:
# Create stream
if self.get_param('create_stream', False):
try:
nb_shards = self.get_param('nb_shards', 2)
self.client.create_stream(StreamName=self.stream_name, ShardCount=nb_shards)
except:
msg = "[{}: Warning] Trial #{}: could not create kinesis stream : {}. {}"
print(msg.format(self.pp, tries, self.stream_name, inst))
msg = "[{}: Warning] Trial #{}: could not describe kinesis stream : {}. {}"
print(msg.format(self.pp, tries, self.stream_name, inst))
time.sleep(1)
else:
msg = "[{}: ERROR] Stream {} not active after {} trials. Aborting..."
raise RuntimeError(msg.format(self.pp, self.stream_name, nb_trials))
def send(self, msg):
"""Push `msg` to `self.stream_name`
:param msg: message to be pushed
:type msg: str, dict
"""
# Check if msg was already JSON dumped
if isinstance(msg, dict):
msg = json.dump(msg).encode('utf-8')
# Use a random sha1 as partition key
single_rec = [{'Data': msg, 'PartitionKey': get_random_sha1()}]
self.client.put_records(Records=single_rec, StreamName=self.stream_name)
self.push_count += 1
if self.verbose > 1:
self.print_stats()
def print_stats(self):
"""Print statistics of producer
"""
if self.push_count - self.last_display > self.display_count:
display_time = datetime.today().strftime('%Y/%m/%d-%H:%M.%S')
print_msg = "[{} at {}] Push count: {}"
print(print_msg.format(self.pp, display_time, self.push_count))
sys.stdout.flush()
self.last_display = self.push_count
|
11561861
|
from opytimizer.optimizers.swarm import FSO
# One should declare a hyperparameters object based
# on the desired algorithm that will be used
params = {
'beta': 0.5
}
# Creates a FSO optimizer
o = FSO(params=params)
|
11561872
|
class SourceCoordinate(object):
_immutable_fields_ = ['_start_line', '_start_column', '_char_idx']
def __init__(self, start_line, start_column, char_idx):
self._start_line = start_line
self._start_column = start_column
self._char_idx = char_idx
def get_start_line(self):
return self._start_line
def get_start_column(self):
return self._start_column
class SourceSection(object):
_immutable_fields_ = ['_source', '_identifier', '_coord', '_char_length']
def __init__(self, source = None, identifier = None, coord = None,
char_length = 0, file_name = None, source_section = None):
if source_section:
self._source = source_section._source
self._coord = source_section._coord
self._char_length = source_section._char_length
self._file = source_section._file
else:
self._source = source
self._coord = coord
self._char_length = char_length
self._file = file_name
self._identifier = identifier
def __str__(self):
return "%s:%d:%d" % (self._file, self._coord.get_start_line(),
self._coord.get_start_column())
|
11561876
|
class Solution:
def countOrders(self, n: int) -> int:
MOD = 10 ** 9 + 7
result = 1
for i in range(2, n + 1):
result = result * (i * 2 - 1) * i % MOD
return result
|
11561878
|
import os
import unittest
from programytest.client import TestClient
class ArrowUDCTestClient(TestClient):
def __init__(self):
TestClient.__init__(self)
def load_storage(self):
super(ArrowUDCTestClient, self).load_storage()
self.add_default_stores()
self.add_categories_store([os.path.dirname(__file__)])
def load_configuration(self, arguments):
super(ArrowUDCTestClient, self).load_configuration(arguments)
self.configuration.client_configuration.configurations[0]._empty_string = "YEMPTY"
class UDCAIMLTests(unittest.TestCase):
def setUp(self):
client = ArrowUDCTestClient()
self._client_context = client.create_client_context("testid")
def test_udc_has_this_question(self):
response = self._client_context.bot.ask_question(self._client_context, "arrow this")
self.assertIsNotNone(response)
self.assertEqual(response, "UDC This Response.")
def test_udc_that_arrow_question(self):
response = self._client_context.bot.ask_question(self._client_context, "that")
self.assertIsNotNone(response)
self.assertEqual(response, "UDC That Response.")
def test_udc_the_arrow_other_question(self):
response = self._client_context.bot.ask_question(self._client_context, "THE arrow OTHER")
self.assertIsNotNone(response)
self.assertEqual(response, "UDC THE OTHER Response.")
def test_udc_yempty_question(self):
response = self._client_context.bot.ask_question(self._client_context, "YEMPTY")
self.assertIsNotNone(response)
self.assertEqual(response, "UDC YEMPTY Response.")
def test_udc_other_question(self):
response = self._client_context.bot.ask_question(self._client_context, "OTHER")
self.assertIsNotNone(response)
self.assertEqual(response, "Arrow Empty Response.")
def test_udc_empty_question(self):
response = self._client_context.bot.ask_question(self._client_context, "")
self.assertIsNotNone(response)
self.assertEqual(response, "UDC YEMPTY Response.")
def test_udc_space_question(self):
response = self._client_context.bot.ask_question(self._client_context, " ")
self.assertIsNotNone(response)
self.assertEqual(response, "UDC YEMPTY Response.")
|
11561962
|
import argparse
def get():
parser = argparse.ArgumentParser()
parser.add_argument("-M",
"--model-file",
type=str,
default=None)
parser.add_argument("-d",
"--train-dataset",
type=str,
default="data/train/centered_crop/",
help="This is needed to calculate the centering and standardization images to subtract and divide the test examples with.")
parser.add_argument("-V",
"--train-labels-csv-path",
type=str,
default="data/train/trainLabels.csv",
help="This is needed b/c it acts as a list of what images are in the training set.")
parser.add_argument("-c",
"--center",
type=int,
default=1,
help="Sumtract mean example from examples.")
parser.add_argument("-z",
"--normalize",
type=int,
default=1,
help="Divide examples by std dev of examples.")
parser.add_argument("-F",
"--train-flip",
type=str,
default='no_flip',
help="Method name or csv file that contains complete information on whether to flip a given training image.")
parser.add_argument("-F3",
"--test-flip",
type=str,
default='no_flip')
parser.add_argument("-D",
"--test-dataset",
type=str,
default=None,
help="A directory with test images.")
parser.add_argument("-r",
"--random-seed",
type=int,
default=1991,
help="Make validation set selection reproducible")
parser.add_argument("-v",
"--valid-dataset-size",
type=int,
default=1664,
help="Validation set size (4864=14%, 3456=10%, 1664=5%)")
parser.add_argument("-fs",
"--filter-shape",
type=str,
default='c01b',
choices=['c01b', 'bc01'],
help="The shape of the filters in the CONV layer. Use 'bc01' to use slower shape (this option exists to run legacy models trained in the suboptimal shape). You must use 'bc01' if you are not using cuda_convnet.")
parser.add_argument("-cc",
"--cuda-convnet",
type=int,
default=1,
choices=[0,1],
help="If you do not have a GPU, you must pass '-cc 0' (and don't forget to set THEANO_FLAGS='device=cpu'). If 1: use cuda_convnet library for convolutions which requires a GPU. Else use theano defaults which work on CPU and GPU.")
parser.add_argument("-H",
"--cache-size-factor",
type=int,
default=8,
help="The number of multiples of minimatches to store in GPU device memory at once.")
return parser.parse_args()
|
11561975
|
from django.apps import AppConfig
class DwitterConfig(AppConfig):
name = 'dwitter'
def ready(self):
# Register signals
import dwitter.signals # noqa: F401
|
11561988
|
import numpy as np
from pyrr import Matrix44
import moderngl
from ported._example import Example
class InstancedCrates(Example):
'''
This example renders 32x32 crates.
For each crate the location is [x, y, sin(a * time + b)]
There are 1024 crates aligned in a grid.
'''
title = "Instanced Crates"
gl_version = (3, 3)
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.prog = self.ctx.program(
vertex_shader='''
#version 330
uniform mat4 Mvp;
in vec3 in_move;
in vec3 in_position;
in vec3 in_normal;
in vec2 in_texcoord_0;
out vec3 v_vert;
out vec3 v_norm;
out vec2 v_text;
void main() {
gl_Position = Mvp * vec4(in_position + in_move, 1.0);
v_vert = in_position + in_move;
v_norm = in_normal;
v_text = in_texcoord_0;
}
''',
fragment_shader='''
#version 330
uniform vec3 Light;
uniform sampler2D Texture;
in vec3 v_vert;
in vec3 v_norm;
in vec2 v_text;
out vec4 f_color;
void main() {
float lum = clamp(dot(normalize(Light - v_vert), normalize(v_norm)), 0.0, 1.0) * 0.8 + 0.2;
f_color = vec4(texture(Texture, v_text).rgb * lum, 1.0);
}
''',
)
self.mvp = self.prog['Mvp']
self.light = self.prog['Light']
self.scene = self.load_scene('crate.obj')
self.texture = self.load_texture_2d('crate.png')
# Add a new buffer into the VAO wrapper in the scene.
# This is simply a collection of named buffers that is auto mapped
# to attributes in the vertex shader with the same name.
self.instance_data = self.ctx.buffer(reserve=12 * 1024)
vao_wrapper = self.scene.root_nodes[0].mesh.vao
vao_wrapper.buffer(self.instance_data, '3f/i', 'in_move')
# Create the actual vao instance (auto mapping in action)
self.vao = vao_wrapper.instance(self.prog)
self.crate_a = np.random.uniform(0.7, 0.8, 32 * 32)
self.crate_b = np.random.uniform(0.0, 6.3, 32 * 32)
self.crate_x = (np.tile(np.arange(32), 32) - 16) * 1.5
self.crate_y = (np.repeat(np.arange(32), 32) - 16) * 1.5
self.crate_x += np.random.uniform(-0.2, 0.2, 32 * 32)
self.crate_y += np.random.uniform(-0.2, 0.2, 32 * 32)
def render(self, time, frame_time):
angle = time * 0.2
self.ctx.clear(1.0, 1.0, 1.0)
self.ctx.enable(moderngl.DEPTH_TEST)
camera_pos = (np.cos(angle) * 5.0, np.sin(angle) * 5.0, 2.0)
proj = Matrix44.perspective_projection(45.0, self.aspect_ratio, 0.1, 1000.0)
lookat = Matrix44.look_at(
camera_pos,
(0.0, 0.0, 0.5),
(0.0, 0.0, 1.0),
)
self.mvp.write((proj * lookat).astype('f4').tobytes())
self.light.value = camera_pos
crate_z = np.sin(self.crate_a * time + self.crate_b) * 0.2
coordinates = np.dstack([self.crate_x, self.crate_y, crate_z])
self.instance_data.write(coordinates.astype('f4').tobytes())
self.texture.use()
self.vao.render(instances=1024)
if __name__ == '__main__':
InstancedCrates.run()
|
11562007
|
import os
import bs4
from bs4 import BeautifulSoup
from datauri import DataURI
class EmbeddableTag:
CSS = "css"
JS = "js"
IMAGE_PNG = "image_png"
IMAGE_SVG = "image_svg"
MAP = {
CSS: {"attr": "href", "type": "text/css"},
JS: {"attr": "src", "type": "text/javascript"},
IMAGE_PNG: {"attr": "src", "type": "image/png"},
IMAGE_SVG: {"attr": "data", "type": "image/svg+xml"},
}
def __init__(self, asset_type, path):
assert asset_type
assert path
self.asset_type = asset_type
self.path = path
@staticmethod
def recognize_from_soup_tag(tag: bs4.element.Tag):
if tag.name == "link":
if "rel" in tag.attrs:
rel_value = tag.attrs["rel"][0]
if rel_value == "stylesheet":
return EmbeddableTag(EmbeddableTag.CSS, tag.attrs["href"])
elif tag.name == "script":
if "type" in tag.attrs:
type_value = tag.attrs["type"]
if type_value == "text/javascript":
return EmbeddableTag(EmbeddableTag.JS, tag.attrs["src"])
elif tag.name == "img":
if "src" in tag.attrs:
rel_value: str = tag.attrs["src"]
if rel_value.lower().endswith(".png"):
return EmbeddableTag(
EmbeddableTag.IMAGE_PNG, tag.attrs["src"]
)
elif tag.name == "object":
if "type" in tag.attrs:
type_value = tag.attrs["type"]
if type_value == "image/svg+xml":
return EmbeddableTag(
EmbeddableTag.IMAGE_SVG, tag.attrs["data"]
)
return None
def get_path(self):
return self.path
def get_attr(self):
return EmbeddableTag.MAP[self.asset_type]["attr"]
def get_type(self):
return EmbeddableTag.MAP[self.asset_type]["type"]
class HTMLEmbedder:
@staticmethod
def embed_assets(html_string, path):
soup = BeautifulSoup(html_string, "html5lib")
tag: bs4.element.Tag
for tag in soup.findAll(recursive=True):
embeddable_tag = EmbeddableTag.recognize_from_soup_tag(tag)
if not embeddable_tag:
continue
output_dir = os.path.dirname(path)
asset_path = os.path.join(output_dir, embeddable_tag.get_path())
tag.attrs[
embeddable_tag.get_attr()
] = HTMLEmbedder._read_file_as_base64(asset_path)
output = str(soup)
return output
@staticmethod
def _read_file_as_base64(asset_path):
# DataURI.from_file seems to work well without knowing content type.
# There is also a lower-level DataURI.make file.
base64 = DataURI.from_file(asset_path)
return base64
|
11562010
|
import unittest
import logging
import sys
from pprint import pprint
from checks import dns_resolution
from checks.config import Config
class TestDNSResolution(unittest.TestCase):
def runTest(self):
"""Resolves www.google.com"""
url = 'https://www.google.com/'
config = Config(urls=[url])
checker = dns_resolution.Checker(config=config, previous_results={})
result = checker.run()
self.assertIn(url, result)
self.assertEqual(result[url]['hostname'], 'www.google.com')
self.assertTrue(result[url], 'resolvable_ipv4')
self.assertTrue(result[url], 'resolvable_ipv6')
self.assertIsInstance(result[url]['ipv4_addresses'], list)
self.assertNotEqual(result[url]['ipv4_addresses'], [])
if __name__ == '__main__':
logging.basicConfig(stream=sys.stderr, level=logging.DEBUG)
unittest.TextTestRunner().run(TestDNSResolution())
#unittest.main()
|
11562034
|
from django.test import TestCase
from django.urls import reverse
from core.models import RecordGroup, Job
from tests.utils import TestConfiguration, most_recent_global_message
class RecordGroupViewTestCase(TestCase):
def setUp(self) -> None:
self.config = TestConfiguration()
self.client.force_login(self.config.user)
def test_new_record_group(self):
response = self.client.post(reverse('record_group_new', args=[self.config.org.id]), {
'organization': self.config.org.id,
'name': 'New Test Record Group'
})
record_group = RecordGroup.objects.get(name='New Test Record Group')
self.assertRedirects(response, reverse('record_group', args=[self.config.org.id, record_group.id]))
redirect = self.client.get(response.url)
self.assertIn('Test Record Group', str(redirect.content, 'utf-8'))
def test_record_group_run_jobs(self):
other_rg = RecordGroup.objects.create(organization=self.config.org,
name="Other Record Group")
Job.objects.create(record_group=other_rg,
user=self.config.user,
job_type='MergeJob',
job_details='{"test_key": "test value"}',
name="Other Job")
response = self.client.get(reverse('record_group_run_jobs', args=[self.config.org.id,
self.config.record_group.id]))
self.assertRedirects(response, reverse('organization', args=[self.config.org.id]))
gm = most_recent_global_message()
self.assertEqual(gm['html'], '<strong>Preparing to Rerun Job(s):</strong><br>Test Job<br>Test Transform Job')
self.assertEqual(gm['class'], 'success')
def test_record_group_stop_jobs(self):
other_rg = RecordGroup.objects.create(organization=self.config.org,
name="Other Record Group")
Job.objects.create(record_group=other_rg,
user=self.config.user,
job_type='MergeJob',
job_details='{"test_key": "test value"}',
name="Other Job")
response = self.client.get(reverse('record_group_stop_jobs', args=[self.config.org.id,
self.config.record_group.id]))
self.assertRedirects(response, reverse('organization', args=[self.config.org.id]))
gm = most_recent_global_message()
self.assertEqual(gm['html'], '<p><strong>Stopped Job(s):</strong><br>Test Job<br>Test Transform Job</p>')
self.assertEqual(gm['class'], 'danger')
|
11562036
|
class HashTable:
def __init__(self):
self.max = 10
self.arr = [[] for _ in range(self.max)]
def get_hash(self, key):
hash_value = 0
for char in key:
hash_value += ord(char)
return hash_value % self.max
def __getitem__(self, key):
hash_value = self.get_hash(key)
for element in self.arr[hash_value]:
if element in self.arr[hash_value]:
if element[0] == key:
return element[1]
def __setitem__(self, key, value):
hash_value = self.get_hash(key)
for index, element in enumerate(self.arr[hash_value]):
if element and element[0] == key:
self.arr[hash_value][index] = (key, value)
return
self.arr[hash_value].append((key, value))
def __delitem__(self, key):
hash_value = self.get_hash(key)
for index, element in enumerate(self.arr[hash_value]):
if element[0] == key:
del self.arr[hash_value][index]
def main():
""" operational function """
table = HashTable()
table["march 6"] = 120
table["march 6"] = 78
table["march 8"] = 67
table["march 9"] = 4
table["march 17"] = 459
print(table["march 6"]) # 78
print(table["march 17"]) # 459
del table["march 17"]
print(table["march 17"]) # None
if __name__ == "__main__":
main()
|
11562091
|
from unittest import TestCase
from signalflowgrapher.common.geometry import rotate, move, distance, collinear
import math
class TestGeometry(TestCase):
def test_full_rotate(self):
point = (10, 10)
origin = (0, 0)
(x, y) = rotate(origin, point, 2 * math.pi)
self.assertAlmostEqual(x, 10, delta=0.00000001)
self.assertAlmostEqual(y, 10, delta=0.00000001)
def test_no_rotate(self):
point = (10, 10)
origin = (0, 0)
(x, y) = rotate(origin, point, 0)
self.assertAlmostEqual(x, 10, delta=0.00000001)
self.assertAlmostEqual(y, 10, delta=0.00000001)
def test_rotate_180(self):
point = (10, 10)
origin = (0, 0)
(x, y) = rotate(origin, point, math.pi)
self.assertAlmostEqual(x, -10, delta=0.00000001)
self.assertAlmostEqual(y, -10, delta=0.00000001)
def test_rotate_90(self):
point = (10, 10)
origin = (0, 0)
(x, y) = rotate(origin, point, 0.5 * math.pi)
self.assertAlmostEqual(x, -10, delta=0.00000001)
self.assertAlmostEqual(y, 10, delta=0.00000001)
def test_rotate_63(self):
point = (-4, 4)
origin = (-4, 1.98)
(x, y) = rotate(origin, point, 0.35 * math.pi)
self.assertAlmostEqual(x, -5.8, delta=0.01)
self.assertAlmostEqual(y, 2.9, delta=0.01)
def test_move_diagonal(self):
point_1 = (0, 0)
point_2 = (4, 4)
distance = math.sqrt(32)
(x, y) = move(point_1, point_2, distance / 2)
self.assertAlmostEqual(x, 2, delta=0.00000001)
self.assertAlmostEqual(y, 2, delta=0.00000001)
(x, y) = move(point_1, point_2, distance)
self.assertAlmostEqual(x, 4, delta=0.00000001)
self.assertAlmostEqual(y, 4, delta=0.00000001)
def test_move_vertical(self):
point_1 = (0, 0)
point_2 = (0, 10)
(x, y) = move(point_1, point_2, 5)
self.assertAlmostEqual(x, 0, delta=0.00000001)
self.assertAlmostEqual(y, 5, delta=0.00000001)
def test_move_horizontal(self):
point_1 = (0, 0)
point_2 = (8, 0)
(x, y) = move(point_1, point_2, 4)
self.assertAlmostEqual(x, 4, delta=0.00000001)
self.assertAlmostEqual(y, 0, delta=0.00000001)
def test_distance_0(self):
point_1 = (25, 25)
point_2 = (25, 25)
self.assertAlmostEqual(distance(point_1, point_2), 0, delta=0.00000001)
def test_distance(self):
point_1 = (25, 25)
point_2 = (36, 19)
self.assertAlmostEqual(distance(point_1, point_2),
12.52996409,
delta=0.00000001)
def test_distance_negative(self):
point_1 = (58, 27)
point_2 = (-9, 0)
self.assertAlmostEqual(distance(point_1, point_2),
72.23572523,
delta=0.00000001)
def test_collinear_one_point(self):
point_1 = (30, 30)
self.assertTrue(collinear([point_1]))
def test_collinear_two_poins(self):
point_1 = (30, 30)
point_2 = (8, 22)
self.assertTrue(collinear([point_1, point_2]))
def test_collinear_three_poins(self):
point_1 = (30, 30)
point_2 = (60, 60)
point_3 = (120, 120)
self.assertTrue(collinear([point_1, point_2, point_3]))
def test_not_collinear_three_poins(self):
point_1 = (30, 30)
point_2 = (60, 60)
point_3 = (120, 119)
self.assertFalse(collinear([point_1, point_2, point_3]))
def test_collinear_four_poins(self):
point_1 = (30, 30)
point_2 = (60, 60)
point_3 = (120, 120)
point_4 = (240, 240)
self.assertTrue(collinear([point_1, point_2, point_3, point_4]))
def test_not_collinear_four_poins(self):
point_1 = (30, 30)
point_2 = (60, 60)
point_3 = (120, 120)
point_4 = (240, 239)
self.assertFalse(collinear([point_1, point_2, point_3, point_4]))
def test_collinear_five_poins(self):
point_1 = (30, 30)
point_2 = (60, 60)
point_3 = (120, 120)
point_4 = (240, 240)
point_5 = (480, 480)
self.assertTrue(collinear([point_1,
point_2,
point_3,
point_4,
point_5]))
def test_not_collinear_five_poins(self):
point_1 = (30, 30)
point_2 = (60, 60)
point_3 = (120, 120)
point_4 = (240, 240)
point_5 = (481, 480)
self.assertFalse(collinear([point_1,
point_2,
point_3,
point_4,
point_5]))
|
11562106
|
import torch.nn as nn
import torch.nn.functional as F
class SeparableConv2d(nn.Module):
def __init__(self, inplanes, planes, kernel_size=3, stride=1, padding=1, dilation=1, bias=False, args=None):
super(SeparableConv2d, self).__init__()
self.args = args
self.conv1 = nn.Conv2d(inplanes, inplanes, (1, kernel_size), stride=(1, stride),
padding=(0, padding), dilation=(1,dilation), groups=inplanes, bias=True)
self.conv2 = nn.Conv2d(inplanes, inplanes, (kernel_size, 1), stride=(stride, 1),
padding=(padding, 0), dilation=(dilation,1), groups=inplanes,bias=False)
self.bn = nn.BatchNorm2d(inplanes, track_running_stats=False)
self.pointwise = nn.Conv2d(inplanes, planes, 1, 1, 0, 1, 1, bias=bias)
def forward(self, x):
x = self.conv1(x)
x = self.conv2(x)
x = self.bn(x)
x = self.pointwise(x)
return x
class _Upsample(nn.Upsample):
def __init__(self,
mode, align_corners):
super(_Upsample, self).__init__( mode=mode, align_corners=align_corners)
def forward(self, input, size):
self.size = size
return F.interpolate(input, size, self.scale_factor, self.mode, self.align_corners)
|
11562125
|
import os, sys, subprocess, glob
import os.path as path
from shutil import copyfile
def copy_code(source_dir, dest_dir, exclude_dirs=[], exclude_files=[]):
"""
Copies code from source_dir to dest_dir. Excludes specified folders and files by substring-matching.
Parameters:
source_dir (string): location of the code to copy
dest_dir (string): location where the code should be copied to
exclude_dirs (list of strings): folders containing strings specified in this list will be ignored
exclude_files (list of strings): files containing strings specified in this list will be ignored
"""
source_basename = path.basename(source_dir)
for root, dirs, files in os.walk(source_dir, topdown=True):
# skip ignored dirs
if any(ex_subdir in root for ex_subdir in exclude_dirs):
continue
# construct destination dir
cropped_root = root[2:] if (root[:2] == './') else root
subdir_basename = path.basename(cropped_root)
# do not treat the root as a subdir
if subdir_basename == source_basename:
subdir_basename = ""
dest_subdir = os.path.join(dest_dir, subdir_basename)
# create destination folder
if not os.path.exists(dest_subdir):
os.makedirs(dest_subdir)
# copy files
for filename in filter(lambda x: not any(substr in x for substr in exclude_files), files):
source_file_path = os.path.join(root, filename)
dest_file_path = os.path.join(dest_subdir, filename)
copyfile(source_file_path, dest_file_path)
def retrieve_git_hash():
"""
Retrieves and returns the current gith hash if execution location is a git repo.
"""
try:
git_hash = subprocess.check_output(['git', 'rev-parse', 'HEAD']).strip()
return str(git_hash)[2:-1]
except subprocess.CalledProcessError as e:
print(e.output)
return False
def save_run_params_in_file(folder_path, filename, run_config):
"""
Receives a config class, fetches all member variables and saves them
in a config file for logging purposes.
Parameters:
folder_path - output folder
filename - output filename
run_config - shallow class with parameter members
"""
with open(path.join(folder_path, "run_params.conf"), 'w') as run_param_file:
for attr, value in sorted(run_config.__dict__.items()):
run_param_file.write(attr + ': ' + str(value) + '\n')
|
11562128
|
import os
import sys
import time
import smbus
from imusensor.MPU9250 import MPU9250
address = 0x68
bus = smbus.SMBus(1)
imu = MPU9250.MPU9250(bus, address)
imu.begin()
# imu.caliberateGyro()
# imu.caliberateAccelerometer()
# or load your own caliberation file
#imu.loadCalibDataFromFile("/home/pi/calib_real_bolder.json")
while True:
imu.readSensor()
imu.computeOrientation()
#print ("Accel x: {0} ; Accel y : {1} ; Accel z : {2}".format(imu.AccelVals[0], imu.AccelVals[1], imu.AccelVals[2]))
#print ("Gyro x: {0} ; Gyro y : {1} ; Gyro z : {2}".format(imu.GyroVals[0], imu.GyroVals[1], imu.GyroVals[2]))
#print ("Mag x: {0} ; Mag y : {1} ; Mag z : {2}".format(imu.MagVals[0], imu.MagVals[1], imu.MagVals[2]))
print ("roll: {0} ; pitch : {1} ; yaw : {2}".format(imu.roll, imu.pitch, imu.yaw))
time.sleep(0.1)
|
11562158
|
import os
import sys
import json
import pandas as pd
from pandas.io.json import json_normalize
from celery_connectors.utils import ev
from spylunking.log.setup_logging import console_logger
from network_pipeline.utils import ppj
from network_pipeline.utils import rnow
from network_pipeline.build_packet_key import build_packet_key
from antinex_client.consts import SUCCESS
from antinex_client.consts import FAILED
from antinex_client.consts import ERROR
from antinex_client.consts import LOGIN_FAILED
from antinex_client.consts import ANTINEX_PUBLISH_ENABLED
from antinex_client.consts import ANTINEX_PUBLISH_REQUEST_FILE
from antinex_client.consts import ANTINEX_USE_MODEL_NAME
from antinex_client.consts import ANTINEX_URL
from antinex_client.consts import ANTINEX_USER
from antinex_client.consts import ANTINEX_MISSING_VALUE
from antinex_client.build_ai_client_from_env import build_ai_client_from_env
from antinex_client.generate_ai_request import generate_ai_request
log = console_logger(
name='csv')
class RecordPacketsToCSV:
"""RecordPacketsToCSV"""
def __init__(self):
"""__init__"""
self.recv_msgs = []
# save every nth number of messages
self.save_after_num = int(
ev("SAVE_AFTER_NUM",
"100"))
# shutdown after this number of messages
self.stop_after_num = int(
ev("STOP_AFTER_NUM",
"-1"))
if self.save_after_num < 0:
self.save_after_num = 1
if self.stop_after_num < 0:
self.stop_after_num = None
# shutdown if this file is found
self.stop_for_file = ev(
"STOP_FILE",
"/tmp/stop-recording-csv")
self.dataset_name = ev(
"DS_NAME",
"netdata")
self.save_dir = ev(
"DS_DIR",
"/tmp")
self.save_to_file = ev(
"OUTPUT_CSV",
"{}/{}-{}.csv".format(
self.save_dir,
self.dataset_name,
rnow("%Y-%m-%d-%H-%M-%S")))
self.archive_file = ev(
"ARCHIVE_JSON",
"{}/packets-{}-{}.json".format(
self.save_dir,
self.dataset_name,
rnow("%Y-%m-%d-%H-%M-%S")))
self.debug = bool(ev(
"DEBUG_PACKETS",
"0") == "1")
self.df = None
self.last_df = None
self.eth_keys = {"eth_id": "id"}
self.ip_keys = {"ip_id": "id"}
self.ipvsix_keys = {"<KEY>": "id"}
self.icmp_keys = {"icmp_id": "id"}
self.arp_keys = {"arp_id": "id"}
self.tcp_keys = {"tcp_id": "id"}
self.udp_keys = {"udp_id": "id"}
self.dns_keys = {"dns_id": "id"}
self.raw_keys = {"raw_id": "id"}
self.pad_keys = {"pad_id": "id"}
self.all_keys = {}
self.all_keys_list = []
self.all_eth = []
self.all_ip = []
self.all_ipvsix = []
self.all_icmp = []
self.all_arp = []
self.all_tcp = []
self.all_udp = []
self.all_dns = []
self.all_raw = []
self.all_pad = []
self.all_flat = []
self.all_rows = []
# noqa https://github.com/jay-johnson/antinex-client/blob/5fbcefaaed3d979b3c0829447b61592d5910ef22/antinex_client/build_ai_client_from_env.py#L19
self.client = build_ai_client_from_env()
# the client uses environment variables:
# noqa https://github.com/jay-johnson/antinex-client/blob/5fbcefaaed3d979b3c0829447b61592d5910ef22/antinex_client/consts.py#L23
# here is an example of what to export:
# noqa https://github.com/jay-johnson/antinex-client/blob/master/examples/example-prediction.env
self.request_dict = {}
if ANTINEX_PUBLISH_ENABLED:
if os.path.exists(ANTINEX_PUBLISH_REQUEST_FILE):
with open(ANTINEX_PUBLISH_REQUEST_FILE, "r") as f:
self.request_dict = json.loads(f.read())
# if publishing is enabled
# end of __init__
def process_ether_frame(self,
id=None,
msg=None):
"""process_ether_frame
Convert a complex nested json dictionary
to a flattened dictionary and capture
all unique keys for table construction
:param id: key for this msg
:param msg: ether frame for packet
"""
# normalize into a dataframe
df = json_normalize(msg)
# convert to a flattened dictionary
dt = json.loads(df.to_json())
flat_msg = {}
for k in dt:
new_key = "eth_{}".format(k)
flat_msg[new_key] = dt[k]["0"]
if new_key not in self.eth_keys:
self.eth_keys[new_key] = k
# end of capturing all unique keys
dt["eth_id"] = id
self.all_eth.append(dt)
log.debug("ETHER data updated:")
log.debug(self.eth_keys)
log.debug(self.all_eth)
log.debug("")
return flat_msg
# end of process_ether_frame
def process_ip_frame(self,
id=None,
msg=None):
"""process_ip_frame
Convert a complex nested json dictionary
to a flattened dictionary and capture
all unique keys for table construction
:param id: key for this msg
:param msg: ip frame for packet
"""
# normalize into a dataframe
df = json_normalize(msg)
# convert to a flattened dictionary
dt = json.loads(df.to_json())
flat_msg = {}
for k in dt:
new_key = "ip_{}".format(k)
flat_msg[new_key] = dt[k]["0"]
if new_key not in self.ip_keys:
self.ip_keys[new_key] = k
# end of capturing all unique keys
dt["ip_id"] = id
self.all_ip.append(dt)
log.debug("IP data updated:")
log.debug(self.ip_keys)
log.debug(self.all_ip)
log.debug("")
return flat_msg
# end of process_ip_frame
def process_ipvsix_frame(self,
id=None,
msg=None):
"""process_ipvsix_frame
Convert a complex nested json dictionary
to a flattened dictionary and capture
all unique keys for table construction
:param id: key for this msg
:param msg: ipv6 frame for packet
"""
# normalize into a dataframe
df = json_normalize(msg)
# convert to a flattened dictionary
dt = json.loads(df.to_json())
flat_msg = {}
for k in dt:
new_key = "<KEY>)
flat_msg[new_key] = dt[k]["0"]
if new_key not in self.ipvsix_keys:
self.ipvsix_keys[new_key] = k
# end of capturing all unique keys
dt["ipv6_id"] = id
self.all_ipvsix.append(dt)
log.debug("IPV6 data updated:")
log.debug(self.ipvsix_keys)
log.debug(self.all_ipvsix)
log.debug("")
return flat_msg
# end of process_ip_frame
def process_tcp_frame(self,
id=None,
msg=None):
"""process_tcp_frame
Convert a complex nested json dictionary
to a flattened dictionary and capture
all unique keys for table construction
:param id: key for this msg
:param msg: tcp frame for packet
"""
# normalize into a dataframe
df = json_normalize(msg)
# convert to a flattened dictionary
dt = json.loads(df.to_json())
flat_msg = {}
for k in dt:
new_key = "tcp_{}".format(k)
flat_msg[new_key] = dt[k]["0"]
if new_key not in self.tcp_keys:
self.tcp_keys[new_key] = k
# end of capturing all unique keys
dt["tcp_id"] = id
self.all_tcp.append(dt)
log.debug("TCP data updated:")
log.debug(self.tcp_keys)
log.debug(self.all_tcp)
log.debug("")
return flat_msg
# end of process_tcp_frame
def process_udp_frame(self,
id=None,
msg=None):
"""process_udp_frame
Convert a complex nested json dictionary
to a flattened dictionary and capture
all unique keys for table construction
:param id: key for this msg
:param msg: udp frame for packet
"""
# normalize into a dataframe
df = json_normalize(msg)
# convert to a flattened dictionary
dt = json.loads(df.to_json())
flat_msg = {}
for k in dt:
new_key = "udp_{}".format(k)
flat_msg[new_key] = dt[k]["0"]
if new_key not in self.udp_keys:
self.udp_keys[new_key] = k
# end of capturing all unique keys
dt["udp_id"] = id
self.all_udp.append(dt)
log.debug("UDP data updated:")
log.debug(self.udp_keys)
log.debug(self.all_udp)
log.debug("")
return flat_msg
# end of process_udp_frame
def process_dns_frame(self,
id=None,
msg=None):
"""process_dns_frame
Convert a complex nested json dictionary
to a flattened dictionary and capture
all unique keys for table construction
:param id: key for this msg
:param msg: dns frame for packet
"""
# normalize into a dataframe
df = json_normalize(msg)
# convert to a flattened dictionary
dt = json.loads(df.to_json())
flat_msg = {}
for k in dt:
new_key = "dns_{}".format(k)
flat_msg[new_key] = dt[k]["0"]
if new_key not in self.dns_keys:
self.dns_keys[new_key] = k
# end of capturing all unique keys
dt["dns_id"] = id
self.all_dns.append(dt)
log.debug("DNS data updated:")
log.debug(self.dns_keys)
log.debug(self.all_dns)
log.debug("")
return flat_msg
# end of process_dns_frame
def process_icmp_frame(self,
id=None,
msg=None):
"""process_icmp_frame
Convert a complex nested json dictionary
to a flattened dictionary and capture
all unique keys for table construction
:param id: key for this msg
:param msg: icmp frame for packet
"""
# normalize into a dataframe
df = json_normalize(msg)
# convert to a flattened dictionary
dt = json.loads(df.to_json())
flat_msg = {}
for k in dt:
new_key = "icmp_{}".format(k)
flat_msg[new_key] = dt[k]["0"]
if new_key not in self.icmp_keys:
self.icmp_keys[new_key] = k
# end of capturing all unique keys
dt["icmp_id"] = id
self.all_icmp.append(dt)
log.debug("ICMP data updated:")
log.debug(self.icmp_keys)
log.debug(self.all_icmp)
log.debug("")
return flat_msg
# end of process_icmp_frame
def process_arp_frame(self,
id=None,
msg=None):
"""process_arp_frame
Convert a complex nested json dictionary
to a flattened dictionary and capture
all unique keys for table construction
:param id: key for this msg
:param msg: arp frame for packet
"""
# normalize into a dataframe
df = json_normalize(msg)
# convert to a flattened dictionary
dt = json.loads(df.to_json())
flat_msg = {}
for k in dt:
new_key = "arp_{}".format(k)
flat_msg[new_key] = dt[k]["0"]
if new_key not in self.arp_keys:
self.arp_keys[new_key] = k
# end of capturing all unique keys
dt["arp_id"] = id
self.all_arp.append(dt)
log.debug("ARP data updated:")
log.debug(self.arp_keys)
log.debug(self.all_arp)
log.debug("")
return flat_msg
# end of process_arp_frame
def process_raw_frame(self,
id=None,
msg=None):
"""process_raw_frame
Convert a complex nested json dictionary
to a flattened dictionary and capture
all unique keys for table construction
:param id: key for this msg
:param msg: raw frame for packet
"""
# normalize into a dataframe
df = json_normalize(msg)
# convert to a flattened dictionary
dt = json.loads(df.to_json())
flat_msg = {}
for k in dt:
new_key = "raw_{}".format(k)
flat_msg[new_key] = dt[k]["0"]
if new_key not in self.raw_keys:
self.raw_keys[new_key] = k
# end of capturing all unique keys
dt["raw_id"] = id
self.all_raw.append(dt)
log.debug("RAW data updated:")
log.debug(self.raw_keys)
log.debug(self.all_raw)
log.debug("")
return flat_msg
# end of process_raw_frame
def process_pad_frame(self,
id=None,
msg=None):
"""process_pad_frame
Convert a complex nested json dictionary
to a flattened dictionary and capture
all unique keys for table construction
:param id: key for this msg
:param msg: pad frame for packet
"""
# normalize into a dataframe
df = json_normalize(msg)
# convert to a flattened dictionary
dt = json.loads(df.to_json())
flat_msg = {}
for k in dt:
new_key = "pad_{}".format(k)
flat_msg[new_key] = dt[k]["0"]
if new_key not in self.pad_keys:
self.pad_keys[new_key] = k
# end of capturing all unique keys
dt["pad_id"] = id
self.all_pad.append(dt)
log.debug("PAD data updated:")
log.debug(self.pad_keys)
log.debug(self.all_pad)
log.debug("")
return flat_msg
# end of process_pad_frame
def build_flat_msg(self,
id=None,
msg=None):
"""build_flat_msg
:param id: unique id for this message
:param msg: message dictionary to flatten
"""
flat_msg = {}
if not id:
log.error("Please pass in an id")
return None
if not msg:
log.error("Please pass in a msg")
return None
for k in msg["data"]:
if k == "ether":
flat_msg.update(self.process_ether_frame(
id=id,
msg=msg["data"][k]))
# end of ether
elif k == "ip":
flat_msg.update(self.process_ip_frame(
id=id,
msg=msg["data"][k]))
# end of ip
elif k == "ipv6":
flat_msg.update(self.process_ipvsix_frame(
id=id,
msg=msg["data"][k]))
# end of ipv6
elif k == "tcp":
flat_msg.update(self.process_tcp_frame(
id=id,
msg=msg["data"][k]))
# end of tcp
elif k == "udp":
flat_msg.update(self.process_udp_frame(
id=id,
msg=msg["data"][k]))
# end of udp
elif k == "dns":
flat_msg.update(self.process_dns_frame(
id=id,
msg=msg["data"][k]))
# end of dns
elif k == "icmp":
flat_msg.update(self.process_icmp_frame(
id=id,
msg=msg["data"][k]))
# end of icmp
elif k == "arp":
flat_msg.update(self.process_arp_frame(
id=id,
msg=msg["data"][k]))
# end of arp
elif k == "raw":
flat_msg.update(self.process_raw_frame(
id=id,
msg=msg["data"][k]))
# end of raw
elif k == "padding":
flat_msg.update(self.process_pad_frame(
id=id,
msg=msg["data"][k]))
# end of pad
else:
log.error(("Unsupported frame type={} "
"please file an issue to track this "
"with data={} msg={}")
.format(k,
ppj(msg["data"][k]),
msg["data"]))
# end of processing new message
return flat_msg
# end of build_flat_msg
def build_all_keys_dict(self):
"""build_all_keys_dict"""
log.info("finding keys")
for k in self.eth_keys:
ak = "{}".format(k)
if ak not in self.all_keys:
self.all_keys[ak] = k
# end of building all eths
for k in self.ip_keys:
ak = "{}".format(k)
if ak not in self.all_keys:
self.all_keys[ak] = k
# end of building all ips
for k in self.ipvsix_keys:
ak = "{}".format(k)
if ak not in self.all_keys:
self.all_keys[ak] = k
# end of building all ipvsixs
for k in self.icmp_keys:
ak = "{}".format(k)
if ak not in self.all_keys:
self.all_keys[ak] = k
# end of building all icmps
for k in self.arp_keys:
ak = "{}".format(k)
if ak not in self.all_keys:
self.all_keys[ak] = k
# end of building all arps
for k in self.tcp_keys:
ak = "{}".format(k)
if ak not in self.all_keys:
self.all_keys[ak] = k
# end of building all tcps
for k in self.udp_keys:
ak = "{}".format(k)
if ak not in self.all_keys:
self.all_keys[ak] = k
# end of building all udps
for k in self.dns_keys:
ak = "{}".format(k)
if ak not in self.all_keys:
self.all_keys[ak] = k
# end of building all dnss
for k in self.raw_keys:
ak = "{}".format(k)
if ak not in self.all_keys:
self.all_keys[ak] = k
# end of building all raws
for k in self.pad_keys:
ak = "{}".format(k)
if ak not in self.all_keys:
self.all_keys[ak] = k
# end of building all pads
# this will be the columns for the csv
for k in self.all_keys:
self.all_keys_list.append(k)
log.debug(("unique all_keys keys={} values={}")
.format(len(self.all_keys_list),
self.all_keys))
# end of build_all_keys_dict
def flatten_all(self):
"""flatten_all"""
log.info("flattening - START")
self.all_rows = []
for idx, r in enumerate(self.all_flat):
new_row = {"idx": idx}
for k in self.all_keys_list:
if k in r:
new_row[k] = r[k]
else:
new_row[k] = None
# end of for all keys
self.all_rows.append(new_row)
# end of all_keys
log.info("flattening - END")
# end of flatten_all
def create_json_archive(self):
"""create_json_archive"""
archive_data = {"packets": self.recv_msgs,
"dataset": self.dataset_name,
"num_packets": len(self.recv_msgs),
"created": rnow()}
self.write_to_file(archive_data,
self.archive_file)
# end of create_json_archive
def convert_to_df(self):
"""convert_to_df"""
log.info(("converting={}")
.format(len(self.all_rows)))
if len(self.all_rows) == 0:
return
self.df = pd.DataFrame(self.all_rows).set_index("idx")
if len(self.df) != len(self.all_rows):
log.error(("Failed converting={} to rows={}")
.format(len(self.all_rows),
len(self.df)))
else:
log.info(("converted={} into rows={}")
.format(len(self.all_rows),
len(self.df)))
# end of convert_to_df
def write_to_file(self,
data_dict,
output_file_path):
"""write_to_file
:param data_dict:
:param output_file_path:
"""
log.info("saving={}".format(output_file_path))
with open(output_file_path, "w") as output_file:
output_file.write(str(ppj(data_dict)))
# end of write_to_file
def save_df_as_csv(self):
"""save_df_as_csv"""
if len(self.all_rows) == 0:
log.info(("no df={} to save")
.format(self.df))
return
else:
log.info(("saving "
"packets={} file={} rows={}")
.format(len(self.recv_msgs),
self.save_to_file,
len(self.df)))
self.df.to_csv(self.save_to_file,
sep=",",
encoding="utf-8",
index=True)
log.info(("done saving={}")
.format(self.save_to_file))
# end of saving if the dataframe is there
# end of save_df_as_csv
def save_data(self):
"""save_data"""
state = ""
try:
state = "create_json_archive"
log.info("creating json archive")
self.create_json_archive()
state = "building_unique_keys"
log.info("processing all unique keys")
self.build_all_keys_dict()
state = "flattening"
log.info("flattening all data")
self.flatten_all()
state = "converting"
log.info("converting to df")
self.convert_to_df()
state = "saving"
log.info("saving to df")
self.save_df_as_csv()
if ANTINEX_PUBLISH_ENABLED:
log.info(("publishing stream to rest={}")
.format(
ANTINEX_URL))
self.publish_predictions_to_core()
# end of if publishing to the core
except Exception as e:
log.error(("Failed state={} with ex={} to "
"save={}")
.format(state,
e,
self.save_to_file))
# end of save_data
def handle_msg(self,
body,
org_message):
"""handle_msg
:param body: dictionary contents from the message body
:param org_message: message object can ack, requeue or reject
"""
if os.path.exists(self.stop_for_file):
log.info(("Detected stop_file={} "
"shutting down")
.format(self.stop_for_file))
# drop the message back in the queue
# for next time
org_message.requeue()
sys.exit(1)
# end of stop file detection
try:
log.debug(("handle body={}")
.format(ppj(body)))
msg = body
id = build_packet_key()
recv_time = rnow()
# this could be made into celery tasks...
flat_msg = self.build_flat_msg(
id=id,
msg=msg)
if not flat_msg:
log.error(("Failed to build a flat message "
"for message={}")
.format(msg))
return
msg["id"] = id
msg["received"] = recv_time
if len(flat_msg) > 0:
if self.debug:
log.info(ppj(flat_msg))
flat_msg["id"] = id
flat_msg["received"] = recv_time
self.all_flat.append(flat_msg)
self.recv_msgs.append(msg)
# end of adding all flat messages
already_saved = False
num_recv = len(self.recv_msgs)
if (num_recv % self.save_after_num) == 0:
already_saved = False
self.save_data()
# end of saving a snapshot
if self.stop_after_num:
if num_recv >= self.stop_after_num:
if not already_saved:
self.save_data()
# avoid waiting on the save again
log.info("archive successful - purging buffer")
sys.exit(2)
# shutdown - good for testing
# if now set up for infinite consuming
except Exception as e:
log.error(("Failed processing msg={} "
"ex={}")
.format(body,
e))
# end of processing message
try:
org_message.ack()
except Exception as e:
log.error(("Failed ack-ing msg={} "
"ex={}")
.format(body,
e))
# end of acknowleding message was processed
log.info("done handle")
# end of handle_message
def publish_predictions_to_core(self):
"""publish_predictions_to_core"""
status = FAILED
msg = "not started"
try:
msg = "generating request"
log.info(msg)
# noqa https://stackoverflow.com/questions/29815129/pandas-dataframe-to-list-of-dictionaries
publish_req = generate_ai_request(
predict_rows=self.df.fillna(
ANTINEX_MISSING_VALUE).to_dict("records"),
req_dict=self.request_dict)
if publish_req["status"] != SUCCESS:
log.error(("failed generate_ai_request with err={}")
.format(
publish_req["error"]))
status = ERROR
else:
msg = "publishing as user={} url={} model={}".format(
ANTINEX_USER,
ANTINEX_URL,
ANTINEX_USE_MODEL_NAME)
log.info(msg)
response = self.client.run_job(
body=publish_req["data"])
if response["status"] == SUCCESS:
log.info("predictions sent")
status = SUCCESS
elif response["status"] == FAILED:
log.error(("job failed with error='{}' with response={}")
.format(
response["error"],
response["data"]))
status = ERROR
elif response["status"] == ERROR:
log.error(("job had an error='{}' with response={}")
.format(
response["error"],
response["data"]))
status = ERROR
elif response["status"] == LOGIN_FAILED:
log.error(("job reported user was not able to log in "
"with an error='{}' with response={}")
.format(
response["error"],
response["data"]))
status = ERROR
# logging for good/bad cases during publish
# if generated a good request
except Exception as e:
log.error(("failed generating request last_step='{}' ex={}")
.format(
msg,
e))
# end of try/ex
return status
# end of publish_predictions_to_core
# end of RecordPacketsToCSV
|
11562174
|
import itertools
import torch
import torch.nn.functional as F
import models.stn as stn
from util.tb_visualizer import TensorboardVisualizer
from . import networks
from .base_model import BaseModel
class NEMARModel(BaseModel):
"""
NeMAR: a neural multimodal adversarial image registration network.
This class train a registration network and a geometry preserving translation network network. This is done
using three networks:
netT - A translation network that translates from modality A --to--> modality B (by default a
netR - A registration network that applies geometric transformation to spatially align modality A --with--> modality B
netD - Adversarial network that discriminates between fake an real images.
Official implementation of:
Unsupervised Multi-Modal Image Registration via Geometry Preserving Image-to-Image Translation paper
https://arxiv.org/abs/2003.08073
Inspired by the implementation of pix2pix:
https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/blob/master/models/pix2pix_model.py
"""
@staticmethod
def modify_commandline_options(parser, is_train=True):
"""Modify the command line."""
parser = stn.modify_commandline_options(parser, is_train)
if is_train:
parser.add_argument('--lambda_GAN', type=float, default=1.0, help='Weight for the GAN loss.')
parser.add_argument('--lambda_recon', type=float, default=100.0,
help='Weight for the L1 reconstruction loss.')
parser.add_argument('--lambda_smooth', type=float, default=0.0, help='Regularization term used by the STN')
parser.add_argument('--enable_tbvis', action='store_true',
help='Enable tensorboard visualizer (default : False)')
parser.add_argument('--multi_resolution', type=int, default=1,
help='Use of multi-resolution discriminator.'
'(if equals to 1 then no multi-resolution training is applied)')
TensorboardVisualizer.modify_commandline_options(parser, is_train)
return parser
def __init__(self, opt):
"""Initialize the CycleGAN class.
Parameters:
opt (Option class)-- stores all the experiment flags; needs to be a subclass of BaseOptions
"""
BaseModel.__init__(self, opt)
# Setup the visualizers
self.train_stn = True
self.setup_visualizers()
if self.isTrain and opt.enable_tbvis:
self.tb_visualizer = TensorboardVisualizer(self, ['netR', 'netT', 'netD'], self.loss_names, self.opt)
else:
self.tb_visualizer = None
self.define_networks()
if self.tb_visualizer is not None:
print('Enabling Tensorboard Visualizer!')
self.tb_visualizer.enable()
if self.isTrain:
# define loss functions
self.criterionGAN = networks.GANLoss(opt.gan_mode).to(self.device) # define GAN loss.
self.criterionL1 = torch.nn.L1Loss()
self.setup_optimizers()
def setup_visualizers(self):
# <Loss>_TR denotes the loss for the translation first variant.
# <Loss>_RT denotes the loss for the registration first variant.
loss_names_A = ['L1_TR', 'GAN_TR', 'L1_RT', 'GAN_RT', 'smoothness', 'D_fake_TR', 'D_fake_RT', 'D']
# specify the images you want to save/display. The training/test scripts will call <BaseModel.get_current_visuals>
visual_names_A = ['fake_TR_B', 'fake_RT_B', 'registered_real_A', 'fake_B']
model_names_a = ['T', 'R']
if self.isTrain:
model_names_a += ['D']
self.visual_names = ['real_A', 'real_B']
self.model_names = []
self.loss_names = []
# if self.opt.direction == 'AtoB':
self.visual_names += visual_names_A
self.model_names += model_names_a
self.loss_names += loss_names_A
def define_networks(self):
# define networks:
# netT - is the photometric translation network (i.e the generator)
# netR - is the registration network (i.e STN)
# netD - is the discriminator network
opt = self.opt
# Support two directions (A->B) or (B->A)
AtoB = opt.direction == 'AtoB'
in_c = opt.input_nc if AtoB else opt.output_nc
out_c = opt.output_nc if AtoB else opt.input_nc
self.netT = networks.define_G(in_c, out_c, opt.ngf, opt.netG, opt.norm,
not opt.no_dropout, opt.init_type, opt.init_gain, self.gpu_ids)
self.netR = stn.define_stn(self.opt, self.opt.stn_type)
if self.isTrain: # define discriminator
self.netD = networks.define_D(opt.output_nc + opt.input_nc, opt.ndf, opt.netD,
opt.n_layers_D, opt.norm, opt.init_type, opt.init_gain, self.gpu_ids)
# We support multi-resolution discriminator - this could yield better performance for large input images.
self.netD_multiresolution = []
if opt.multi_resolution > 1:
for _ in range(self.opt.multi_resolution - 1):
netD_S = networks.define_D(opt.output_nc + opt.input_nc, opt.ndf, opt.netD,
opt.n_layers_D, opt.norm, opt.init_type, opt.init_gain, self.gpu_ids)
self.netD_multiresolution.append(netD_S)
def reset_weights(self):
# We have tested what happens if we reset the discriminator/translation network's weights during training.
# This eventually will results in th
opt = self.opt
networks.init_weights(self.netT, opt.init_type, opt.init_gain)
networks.init_weights(self.netD, opt.init_type, opt.init_gain)
for netD_S in self.netD_multiresolution:
networks.init_weights(netD_S, opt.init_type, opt.init_gain)
def setup_optimizers(self):
opt = self.opt
# Define optimizer for the registration network:
self.optimizer_R = torch.optim.Adam(itertools.chain(self.netR.parameters()),
lr=opt.lr, betas=(opt.beta1, 0.999), )
# Define optimizer for the translation network:
self.optimizer_T = torch.optim.Adam([{'params': self.netT.parameters(), 'betas': (opt.beta1, 0.999),
'lr': opt.lr}])
# Define optimizer for the discriminator network:
d_params = self.netD.parameters()
if opt.multi_resolution > 1:
d_params = itertools.chain(d_params, *[x.parameters() for x in self.netD_multiresolution])
self.optimizer_D = torch.optim.Adam(d_params, lr=opt.lr, betas=(opt.beta1, 0.999))
self.optimizers.append(self.optimizer_T)
self.optimizers.append(self.optimizer_D)
self.optimizers.append(self.optimizer_R)
def set_input(self, input):
"""Unpack input data from the dataloader and perform necessary pre-processing steps.
Parameters:
input (dict): include the data itself and its metadata information.
The option 'direction' can be used to swap domain A and domain B.
"""
AtoB = self.opt.direction == 'AtoB'
if AtoB:
self.real_A = input['A'].to(self.device)
self.real_B = input['B'].to(self.device)
self.image_paths = input['A_paths']
else:
self.real_A = input['B'].to(self.device)
self.real_B = input['A'].to(self.device)
self.image_paths = input['B_paths']
def forward(self):
"""Run forward pass; called by both functions <optimize_parameters> and <test>."""
self.fake_B = self.netT(self.real_A)
wraped_images, reg_term = self.netR(self.real_A, self.real_B, apply_on=[self.real_A, self.fake_B])
self.stn_reg_term = reg_term
self.registered_real_A = wraped_images[0]
# Registration first -- Then --> Translation
self.fake_TR_B = self.netT(self.registered_real_A)
# Translation first -- Then --> Registration
self.fake_RT_B = wraped_images[1]
if self.tb_visualizer:
with torch.no_grad():
self.deformation_field_A_to_B = self.netR.module.get_grid(self.real_A, self.real_B)
def backward_T_and_R(self):
"""Calculate GAN and L1 loss for the translation and registration networks."""
# Registration first (TR):
# ----> Reconstruction loss:
self.loss_L1_TR = self.opt.lambda_recon * self.criterionL1(self.fake_TR_B, self.real_B)
# ----> GAN loss:
fake_AB_t = torch.cat((self.real_A, self.fake_TR_B), 1)
pred_fake = self.netD(fake_AB_t)
self.loss_GAN_TR = self.opt.lambda_GAN * self.criterionGAN(pred_fake, True)
# --------> Multi-scale discrimnaotr
for i in range(self.opt.multi_resolution - 1):
sh, sw = self.real_A.size(2) // (2 ** (i + 1)), self.real_A.size(3) // (2 ** (i + 1)),
real_A_resized = F.interpolate(self.real_A, (sh, sw), mode='bilinear', align_corners=False)
fake_B_B_resized = F.interpolate(self.fake_TR_B, (sh, sw), mode='bilinear', align_corners=False)
fake_AB_t = torch.cat((real_A_resized, fake_B_B_resized), 1)
pred_fake = self.netD_multiresolution[i](fake_AB_t)
self.loss_GAN_TR += self.opt.lambda_GAN * self.criterionGAN(pred_fake, True)
# Translation First:
# ----> Reconstruction loss:
self.loss_L1_RT = self.opt.lambda_recon * self.criterionL1(self.fake_RT_B, self.real_B)
# ----> GAN loss:
fake_AB_t = torch.cat((self.real_A, self.fake_RT_B), 1)
pred_fake = self.netD(fake_AB_t)
self.loss_GAN_RT = self.opt.lambda_GAN * self.criterionGAN(pred_fake, True)
# --------> Multi-scale discrimnaotr
for i in range(self.opt.multi_resolution - 1):
sh, sw = self.real_A.size(2) // (2 ** (i + 1)), self.real_A.size(3) // (2 ** (i + 1)),
real_A_resized = F.interpolate(self.real_A, (sh, sw), mode='bilinear', align_corners=False)
fake_B_P_resized = F.interpolate(self.fake_RT_B, (sh, sw), mode='bilinear', align_corners=False)
fake_AB_t = torch.cat((real_A_resized, fake_B_P_resized), 1)
pred_fake = self.netD_multiresolution[i](fake_AB_t)
self.loss_GAN_RT += self.opt.lambda_GAN * self.criterionGAN(pred_fake, True)
self.loss_smoothness = self.opt.lambda_smooth * self.stn_reg_term
loss = self.loss_L1_TR + self.loss_L1_RT + self.loss_GAN_TR + self.loss_GAN_RT + self.loss_smoothness
loss.backward()
return loss
def backward_D(self):
"""Calculate GAN loss for the discriminator"""
# Real
real_AB = torch.cat((self.real_A, self.real_B), 1)
pred_real = self.netD(real_AB)
loss_D_real = self.criterionGAN(pred_real, True)
# --------> Multi-scale discrimnaotr
for i in range(self.opt.multi_resolution - 1):
sh, sw = self.real_A.size(2) // (2 ** (i + 1)), self.real_A.size(3) // (2 ** (i + 1)),
real_A_resized = F.interpolate(self.real_A, (sh, sw), mode='bilinear', align_corners=False)
real_B_resized = F.interpolate(self.real_B, (sh, sw), mode='bilinear', align_corners=False)
real_AB = torch.cat((real_A_resized, real_B_resized), 1)
pred_real = self.netD_multiresolution[i](real_AB)
loss_D_real += self.criterionGAN(pred_real, True)
# Registration Firsts (TR):
# ----> Fake
fake_AB = torch.cat((self.real_A, self.fake_TR_B), 1)
pred_fake = self.netD(fake_AB.detach())
self.loss_D_fake_TR = self.criterionGAN(pred_fake, False)
# --------> Multi-scale discrimnaotr
for i in range(self.opt.multi_resolution - 1):
sh, sw = self.real_A.size(2) // (2 ** (i + 1)), self.real_A.size(3) // (2 ** (i + 1)),
real_A_resized = F.interpolate(self.real_A, (sh, sw), mode='bilinear', align_corners=False)
fake_B_B_resized = F.interpolate(self.fake_TR_B, (sh, sw), mode='bilinear', align_corners=False)
fake_AB_t = torch.cat((real_A_resized, fake_B_B_resized), 1)
pred_fake = self.netD_multiresolution[i](fake_AB_t.detach())
self.loss_D_fake_TR += self.criterionGAN(pred_fake, False)
# Translation First (RT):
# ----> Fake
fake_AB = torch.cat((self.real_A, self.fake_RT_B), 1)
pred_fake = self.netD(fake_AB.detach())
self.loss_D_fake_RT = self.criterionGAN(pred_fake, False)
# --------> Multi-scale discrimnaotr
for i in range(self.opt.multi_resolution - 1):
sh, sw = self.real_A.size(2) // (2 ** (i + 1)), self.real_A.size(3) // (2 ** (i + 1)),
real_A_resized = F.interpolate(self.real_A, (sh, sw), mode='bilinear', align_corners=False)
fake_B_P_resized = F.interpolate(self.fake_RT_B, (sh, sw), mode='bilinear', align_corners=False)
fake_AB_t = torch.cat((real_A_resized, fake_B_P_resized), 1)
pred_fake = self.netD_multiresolution[i](fake_AB_t.detach())
self.loss_D_fake_RT += self.criterionGAN(pred_fake, False)
# combine loss and calculate gradients
self.loss_D = 0.5 * self.opt.lambda_GAN * (loss_D_real + self.loss_D_fake_TR + self.loss_D_fake_RT)
self.loss_D.backward()
return self.loss_D
def optimize_parameters(self):
"""Calculate losses, gradients, and update network weights; called in every training iteration"""
# forward
self.forward() # TR(I_a) and RT(I_a)
# Backward D
self.set_requires_grad([self.netT, self.netR], False)
self.optimizer_D.zero_grad() # set D_A and D_B's gradients to zero
self.backward_D() # calculate gradients for D_A
self.optimizer_D.step() # update D_A and D_B's weights
self.set_requires_grad([self.netT, self.netR], True)
# Backward translation and registration networks
self.set_requires_grad([self.netD, *self.netD_multiresolution], False)
self.optimizer_R.zero_grad()
self.optimizer_T.zero_grad() # set G_A and G_B's gradients to zero
self.backward_T_and_R() # calculate gradients for translation and registration networks
self.optimizer_R.step()
self.optimizer_T.step()
self.set_requires_grad([self.netD, *self.netD_multiresolution], True)
# Update tb visualizer on each iteration step - if enabled
if self.tb_visualizer is not None:
self.tb_visualizer.iteration_step()
|
11562182
|
from skidl import SKIDL, TEMPLATE, Part, Pin, SchLib
SKIDL_lib_version = '0.0.1'
maxim = SchLib(tool=SKIDL).add_parts(*[
Part(name='DS1267_DIP',dest=TEMPLATE,tool=SKIDL,keywords='Dual Digital Potentiometer Maxim',description='Dual Digital Potentiometer, Serial, 256 Steps, DIP-14',ref_prefix='U',num_units=1,fplist=['DIP*W7.62mm*'],do_erc=True,pins=[
Pin(num='1',name='VB',func=Pin.PWRIN,do_erc=True),
Pin(num='2',name='H1',func=Pin.PASSIVE,do_erc=True),
Pin(num='3',name='L1',func=Pin.PASSIVE,do_erc=True),
Pin(num='4',name='W1',func=Pin.PASSIVE,do_erc=True),
Pin(num='5',name='~Reset',do_erc=True),
Pin(num='6',name='CLK',do_erc=True),
Pin(num='7',name='GND',func=Pin.PWRIN,do_erc=True),
Pin(num='8',name='DQ',do_erc=True),
Pin(num='9',name='COUT',func=Pin.OUTPUT,do_erc=True),
Pin(num='10',name='L0',func=Pin.PASSIVE,do_erc=True),
Pin(num='11',name='H0',func=Pin.PASSIVE,do_erc=True),
Pin(num='12',name='W0',func=Pin.PASSIVE,do_erc=True),
Pin(num='13',name='SOUT',func=Pin.OUTPUT,do_erc=True),
Pin(num='14',name='VCC',func=Pin.PWRIN,do_erc=True)]),
Part(name='DS1267_SOIC',dest=TEMPLATE,tool=SKIDL,keywords='Dual Digital Potentiometer Maxim',description='Dual Digital Potentiometer, Serial, 256 Steps, SOIC-16',ref_prefix='U',num_units=1,fplist=['SOIC*3.9x9.9mm*1.27mm'],do_erc=True,pins=[
Pin(num='1',name='VB',func=Pin.PWRIN,do_erc=True),
Pin(num='2',name='NC',func=Pin.NOCONNECT,do_erc=True),
Pin(num='3',name='H1',func=Pin.PASSIVE,do_erc=True),
Pin(num='4',name='L1',func=Pin.PASSIVE,do_erc=True),
Pin(num='5',name='W1',func=Pin.PASSIVE,do_erc=True),
Pin(num='6',name='~Reset',do_erc=True),
Pin(num='7',name='CLK',do_erc=True),
Pin(num='8',name='GND',func=Pin.PWRIN,do_erc=True),
Pin(num='9',name='DQ',do_erc=True),
Pin(num='10',name='COUT',func=Pin.OUTPUT,do_erc=True),
Pin(num='11',name='L0',func=Pin.PASSIVE,do_erc=True),
Pin(num='12',name='W0',func=Pin.PASSIVE,do_erc=True),
Pin(num='13',name='H0',func=Pin.PASSIVE,do_erc=True),
Pin(num='14',name='SOUT',func=Pin.OUTPUT,do_erc=True),
Pin(num='15',name='NC',func=Pin.NOCONNECT,do_erc=True),
Pin(num='16',name='VCC',func=Pin.PWRIN,do_erc=True)]),
Part(name='DS1267_TSSOP',dest=TEMPLATE,tool=SKIDL,keywords='Dual Digital Potentiometer Maxim',description='Dual Digital Potentiometer, Serial, 256 Steps, TSSOP-20',ref_prefix='U',num_units=1,fplist=['TSSOP*4.4x6.5mm*0.65mm*'],do_erc=True,pins=[
Pin(num='1',name='VB',func=Pin.PWRIN,do_erc=True),
Pin(num='2',name='NC',func=Pin.NOCONNECT,do_erc=True),
Pin(num='3',name='H1',func=Pin.PASSIVE,do_erc=True),
Pin(num='4',name='L1',func=Pin.PASSIVE,do_erc=True),
Pin(num='5',name='W1',func=Pin.PASSIVE,do_erc=True),
Pin(num='6',name='~Reset',do_erc=True),
Pin(num='7',name='CLK',do_erc=True),
Pin(num='8',name='NC',func=Pin.NOCONNECT,do_erc=True),
Pin(num='9',name='NC',func=Pin.NOCONNECT,do_erc=True),
Pin(num='10',name='GND',func=Pin.PWRIN,do_erc=True),
Pin(num='20',name='VCC',func=Pin.PWRIN,do_erc=True),
Pin(num='11',name='DQ',do_erc=True),
Pin(num='12',name='NC',func=Pin.NOCONNECT,do_erc=True),
Pin(num='13',name='COUT',func=Pin.OUTPUT,do_erc=True),
Pin(num='14',name='L0',func=Pin.PASSIVE,do_erc=True),
Pin(num='15',name='H0',func=Pin.PASSIVE,do_erc=True),
Pin(num='16',name='W0',func=Pin.PASSIVE,do_erc=True),
Pin(num='17',name='SOUT',func=Pin.OUTPUT,do_erc=True),
Pin(num='18',name='NC',func=Pin.NOCONNECT,do_erc=True),
Pin(num='19',name='NC',func=Pin.NOCONNECT,do_erc=True)]),
Part(name='DS1302',dest=TEMPLATE,tool=SKIDL,do_erc=True),
Part(name='DS1307+',dest=TEMPLATE,tool=SKIDL,do_erc=True,aliases=['DS1307N+', 'DS1307Z+']),
Part(name='DS1602',dest=TEMPLATE,tool=SKIDL,do_erc=True),
Part(name='DS1621',dest=TEMPLATE,tool=SKIDL,do_erc=True),
Part(name='DS1804',dest=TEMPLATE,tool=SKIDL,do_erc=True),
Part(name='DS1822Z',dest=TEMPLATE,tool=SKIDL,keywords='OneWire 1Wire Dallas Maxim',description='High-Precision 1-Wire Digital Thermometer SOIC-8',ref_prefix='U',num_units=1,fplist=['SOIC-8_3.9x4.9mm_Pitch1.27mm', 'SOIC-8_3.9x4.9mm_Pitch1.27mm*'],do_erc=True,aliases=['DS18B20Z', 'DS18S20Z'],pins=[
Pin(num='3',name='VDD',func=Pin.PWRIN,do_erc=True),
Pin(num='4',name='DQ',func=Pin.BIDIR,do_erc=True),
Pin(num='5',name='GND',func=Pin.PWRIN,do_erc=True)]),
Part(name='DS1825',dest=TEMPLATE,tool=SKIDL,keywords='1Wire OneWire Maxim Dallas',description='Programmable Resolution 1-Wire Digital Thermometer With 4-Bit ID',ref_prefix='U',num_units=1,fplist=['MSOP-8_3x3mm_Pitch0.65mm', 'MSOP-8_3x3mm_Pitch0.65mm*'],do_erc=True,pins=[
Pin(num='1',name='VDD',func=Pin.PWRIN,do_erc=True),
Pin(num='2',name='DQ',func=Pin.BIDIR,do_erc=True),
Pin(num='4',name='GND',func=Pin.PWRIN,do_erc=True),
Pin(num='5',name='AD0',do_erc=True),
Pin(num='6',name='AD1',do_erc=True),
Pin(num='7',name='AD2',do_erc=True),
Pin(num='8',name='AD3',do_erc=True)]),
Part(name='DS18B20U',dest=TEMPLATE,tool=SKIDL,keywords='OneWire 1-Wire 1Wire Maxim Dallas',description='Programmable Resolution 1-Wire Digital Thermometer MSOP-8',ref_prefix='U',num_units=1,fplist=['MSOP-8_3x3mm_Pitch0.65mm', 'MSOP-8_3x3mm_Pitch0.65mm*'],do_erc=True,pins=[
Pin(num='1',name='DQ',func=Pin.BIDIR,do_erc=True),
Pin(num='4',name='GND',func=Pin.PWRIN,do_erc=True),
Pin(num='8',name='VDD',func=Pin.PWRIN,do_erc=True)]),
Part(name='DS2401P',dest=TEMPLATE,tool=SKIDL,keywords='OneWire 1-Wire 1Wire Maxim Dallas ID',description='Silicon Serial Number TSSOP-6',ref_prefix='U',num_units=1,fplist=['TSSOP-6'],do_erc=True,pins=[
Pin(num='1',name='GND',func=Pin.PWRIN,do_erc=True),
Pin(num='2',name='DQ',func=Pin.BIDIR,do_erc=True)]),
Part(name='DS2401Z',dest=TEMPLATE,tool=SKIDL,keywords='OneWire 1-Wire 1Wire Maxim Dallas ID',description='Silicon Serial Number SOT-223',ref_prefix='U',num_units=1,fplist=['SOT-223', 'SOT-223*'],do_erc=True,pins=[
Pin(num='1',name='GND',func=Pin.PWRIN,do_erc=True),
Pin(num='2',name='DQ',func=Pin.BIDIR,do_erc=True),
Pin(num='4',name='GND',func=Pin.PWRIN,do_erc=True)]),
Part(name='DS2482-100',dest=TEMPLATE,tool=SKIDL,keywords='1-Wire I2C',description='Single-Channel 1-Wire Master, SOIC-8',ref_prefix='U',num_units=1,fplist=['SOIC*3.9x4.9mm*Pitch1.27mm*'],do_erc=True,pins=[
Pin(num='1',name='VCC',func=Pin.PWRIN,do_erc=True),
Pin(num='2',name='IO',func=Pin.BIDIR,do_erc=True),
Pin(num='3',name='GND',func=Pin.PWRIN,do_erc=True),
Pin(num='4',name='SCL',do_erc=True),
Pin(num='5',name='SDA',func=Pin.BIDIR,do_erc=True),
Pin(num='6',name='PCTLZ',func=Pin.OUTPUT,do_erc=True),
Pin(num='7',name='AD1',do_erc=True),
Pin(num='8',name='AD0',do_erc=True)]),
Part(name='DS28EA00',dest=TEMPLATE,tool=SKIDL,keywords='1Wire OneWire Maxim Dallas',description='1-Wire Digital Thermometer with Sequence Detect and PIO',ref_prefix='U',num_units=1,fplist=['MSOP-8_3x3mm_Pitch0.65mm', 'MSOP-8_3x3mm_Pitch0.65mm*'],do_erc=True,pins=[
Pin(num='1',name='IO',func=Pin.BIDIR,do_erc=True),
Pin(num='4',name='GND',func=Pin.PWRIN,do_erc=True),
Pin(num='6',name='PIOA',func=Pin.BIDIR,do_erc=True),
Pin(num='7',name='PIOB',func=Pin.BIDIR,do_erc=True),
Pin(num='8',name='VCC',func=Pin.PWRIN,do_erc=True)]),
Part(name='DS3231',dest=TEMPLATE,tool=SKIDL,keywords='RTC TCXO Realtime Time Clock Crystal Oscillator I2C',description='Extremely Accurate I2C-Integrated RTC/TCXO/Crystal SOIC-16',ref_prefix='U',num_units=1,fplist=['SOIC-*_7.5x10.3mm_Pitch1.27mm*'],do_erc=True,pins=[
Pin(num='1',name='32KHZ',func=Pin.OPENCOLL,do_erc=True),
Pin(num='2',name='VCC',func=Pin.PWRIN,do_erc=True),
Pin(num='3',name='~INT~/SQW',func=Pin.OPENCOLL,do_erc=True),
Pin(num='4',name='~RST',func=Pin.BIDIR,do_erc=True),
Pin(num='5',name='NC',func=Pin.PASSIVE,do_erc=True),
Pin(num='6',name='NC',func=Pin.PASSIVE,do_erc=True),
Pin(num='7',name='NC',func=Pin.PASSIVE,do_erc=True),
Pin(num='8',name='NC',func=Pin.PASSIVE,do_erc=True),
Pin(num='9',name='NC',func=Pin.PASSIVE,do_erc=True),
Pin(num='10',name='NC',func=Pin.PASSIVE,do_erc=True),
Pin(num='11',name='NC',func=Pin.PASSIVE,do_erc=True),
Pin(num='12',name='NC',func=Pin.PASSIVE,do_erc=True),
Pin(num='13',name='GND',func=Pin.PWRIN,do_erc=True),
Pin(num='14',name='VBAT',func=Pin.PWRIN,do_erc=True),
Pin(num='15',name='SDA',func=Pin.BIDIR,do_erc=True),
Pin(num='16',name='SCL',do_erc=True)]),
Part(name='DS3231MZ',dest=TEMPLATE,tool=SKIDL,keywords='RTC TCXO Realtime Time Clock MEMS I2C',description='±5ppm, I2C Real-Time Clock SOIC-8',ref_prefix='U',num_units=1,fplist=['SOIC*3.9x4.9mm*Pitch1.27mm*'],do_erc=True,pins=[
Pin(num='1',name='32KHZ',func=Pin.OPENCOLL,do_erc=True),
Pin(num='2',name='VCC',func=Pin.PWRIN,do_erc=True),
Pin(num='3',name='~INT~/SQW',func=Pin.OPENCOLL,do_erc=True),
Pin(num='4',name='~RST',func=Pin.BIDIR,do_erc=True),
Pin(num='5',name='GND',func=Pin.PWRIN,do_erc=True),
Pin(num='6',name='VBAT',func=Pin.PWRIN,do_erc=True),
Pin(num='7',name='SDA',func=Pin.BIDIR,do_erc=True),
Pin(num='8',name='SCL',do_erc=True)]),
Part(name='DS3232M',dest=TEMPLATE,tool=SKIDL,keywords='RTC TCXO Realtime Time Clock MEMS SRAM I2C',description='±5ppm, I2C Real-Time Clock with SRAM SOIC-8',ref_prefix='U',num_units=1,fplist=['SOIC-*_3.9x4.9mm_Pitch1.27mm*'],do_erc=True,pins=[
Pin(num='1',name='32KHZ',func=Pin.OUTPUT,do_erc=True),
Pin(num='2',name='VCC',func=Pin.PWRIN,do_erc=True),
Pin(num='3',name='~INT~/SQW',func=Pin.OPENCOLL,do_erc=True),
Pin(num='4',name='~RST',func=Pin.BIDIR,do_erc=True),
Pin(num='5',name='GND',func=Pin.PWRIN,do_erc=True),
Pin(num='6',name='VBAT',func=Pin.PWRIN,do_erc=True),
Pin(num='7',name='SDA',func=Pin.BIDIR,do_erc=True),
Pin(num='8',name='SCL',do_erc=True)]),
Part(name='MAX1248',dest=TEMPLATE,tool=SKIDL,keywords='10-Bit ADC Serial 4-Channel Maxim',description='4-Channel 10-Bit ADC with Serial Interface, +2.7V to +5.25V, Low-Power',ref_prefix='U',num_units=1,fplist=['DIP*', 'QSOP*'],do_erc=True,aliases=['MAX1249'],pins=[
Pin(num='1',name='VDD',func=Pin.PWRIN,do_erc=True),
Pin(num='2',name='CH0',do_erc=True),
Pin(num='3',name='CH1',do_erc=True),
Pin(num='4',name='CH2',do_erc=True),
Pin(num='5',name='CH3',do_erc=True),
Pin(num='6',name='COM',func=Pin.PWRIN,do_erc=True),
Pin(num='7',name='~SHDN',func=Pin.TRISTATE,do_erc=True),
Pin(num='8',name='VREF',func=Pin.PWRIN,do_erc=True),
Pin(num='9',name='REFADJ',do_erc=True),
Pin(num='10',name='AGND',func=Pin.PWRIN,do_erc=True),
Pin(num='11',name='DGND',func=Pin.PWRIN,do_erc=True),
Pin(num='12',name='DOUT',func=Pin.OUTPUT,do_erc=True),
Pin(num='13',name='SSTRB',func=Pin.OUTPUT,do_erc=True),
Pin(num='14',name='DIN',do_erc=True),
Pin(num='15',name='~CS',do_erc=True),
Pin(num='16',name='SCLK',do_erc=True)]),
Part(name='MAX2606',dest=TEMPLATE,tool=SKIDL,do_erc=True,aliases=['MAX2505', 'MAX2507', 'MAX2508', 'MAX2509']),
Part(name='MAX31820',dest=TEMPLATE,tool=SKIDL,keywords='OneWire 1-Wire 1Wire Maxim Dallas',description='1-Wire Ambient Temperature Sensor',ref_prefix='U',num_units=1,fplist=['TO-92_*'],do_erc=True,aliases=['DS1822', 'DS18B20', 'DS18S20', 'DS1821C'],pins=[
Pin(num='1',name='GND',func=Pin.PWRIN,do_erc=True),
Pin(num='2',name='DQ',func=Pin.BIDIR,do_erc=True),
Pin(num='3',name='VDD',func=Pin.PWRIN,do_erc=True)]),
Part(name='MAX31820PAR',dest=TEMPLATE,tool=SKIDL,keywords='OneWire 1-Wire 1Wire Maxim Dallas',description='1-Wire, Parasite-Power, Ambient Temperature Sensor',ref_prefix='U',num_units=1,fplist=['TO-92_*'],do_erc=True,aliases=['DS1822-PAR', 'DS18B20-PAR', 'DS18S20-PAR', 'DS2401'],pins=[
Pin(num='1',name='GND',func=Pin.PWRIN,do_erc=True),
Pin(num='2',name='DQ',func=Pin.BIDIR,do_erc=True)]),
Part(name='MAX31826',dest=TEMPLATE,tool=SKIDL,keywords='1Wire OneWire Maxim Dallas',description='1-Wire Digital Temperature Sensor with 1Kb Lockable EEPROM',ref_prefix='U',num_units=1,fplist=['MSOP-8_3x3mm_Pitch0.65mm', 'MSOP-8_3x3mm_Pitch0.65mm*'],do_erc=True,pins=[
Pin(num='1',name='VDD',func=Pin.PWRIN,do_erc=True),
Pin(num='2',name='DQ',func=Pin.BIDIR,do_erc=True),
Pin(num='4',name='GND',func=Pin.PWRIN,do_erc=True),
Pin(num='5',name='AD0',do_erc=True),
Pin(num='6',name='AD1',do_erc=True),
Pin(num='7',name='AD2',do_erc=True),
Pin(num='8',name='AD3',do_erc=True)]),
Part(name='MAX453',dest=TEMPLATE,tool=SKIDL,do_erc=True),
Part(name='MAX5436',dest=TEMPLATE,tool=SKIDL,do_erc=True),
Part(name='MAX6355',dest=TEMPLATE,tool=SKIDL,do_erc=True),
Part(name='MAX7325AEG+',dest=TEMPLATE,tool=SKIDL,do_erc=True),
Part(name='Max691',dest=TEMPLATE,tool=SKIDL,do_erc=True)])
|
11562207
|
import os
import sys
import argparse
import re
import xml.etree.ElementTree as xml_et
from xml.dom import minidom
assert sys.version_info >= (3, 5)
def scan_cpp(base_path, rel_project_path):
test_project_path = os.path.join(base_path, rel_project_path)
if not os.path.isdir(test_project_path):
print('Error: there is no folder: %s' % test_project_path)
sys.exit(1)
dict_test_suites = dict()
re_line = re.compile(r'^class\s+([\w\d_]+)\s*:\s*public\s*::testing::Test')
for dirpath, dirnames, filenames in os.walk(test_project_path):
for file in filenames:
if os.path.splitext(file)[1] == '.cpp':
cpp_file_path = os.path.join(dirpath, file)
print(' Found: %s' % os.path.relpath(cpp_file_path))
with open(cpp_file_path) as fd:
for line in fd.readlines():
m = re_line.match(line)
if m:
test_suit_name = m.group(1)
rel_path = os.path.relpath(cpp_file_path, base_path)
dict_test_suites[test_suit_name] = rel_path
break
return dict_test_suites
def handle_gtest_results(input_file, base_path, rel_project_path, xml_root):
dict_test_suites = scan_cpp(base_path, rel_project_path)
with open(input_file, 'r') as fd:
input_content = fd.read()
gtest_xml = xml_et.XML(input_content)
if gtest_xml.tag != 'testsuites':
print('Error: Bad input file: expected "testsuites" section, but found "%s"' % gtest_xml.tag)
sys.exit(1)
test_suits = list(gtest_xml)
for test_suite in test_suits:
assert isinstance(test_suite, xml_et.Element)
if test_suite.tag != 'testsuite':
print('Error: Bad input file: expected "testsuite" section, but found "%s"' % test_suite.tag)
sys.exit(1)
test_suite_name = test_suite.attrib['name']
print(' Test suite name: %s' % test_suite_name)
test_suite_cpp = dict_test_suites[test_suite_name]
xml_elem_file = xml_et.Element('file')
xml_elem_file.set('path', test_suite_cpp)
test_cases = list(test_suite)
for test_case in test_cases:
assert isinstance(test_case, xml_et.Element)
if test_case.tag != 'testcase':
print('Error: Bad input file: expected "testcase" section, but found "%s"' % test_case.tag)
sys.exit(1)
test_case_name = test_case.attrib['name']
test_case_time = test_case.attrib['time']
test_case_time = str(int(float(test_case_time) * 1000))
test_case_status = test_case.attrib['status']
test_case_result = test_case.attrib['result']
xml_elem_test_case = xml_et.Element('testCase')
print(' Test case: %s: %s: %s' % (test_case_name, test_case_status, test_case_result))
xml_elem_test_case.set('name', test_case_name)
xml_elem_test_case.set('duration', test_case_time)
if test_case_status == 'run':
if test_case_result == 'completed':
failures = list(test_case)
for failure in failures:
assert isinstance(failure, xml_et.Element)
if failure.tag != 'failure':
print('Error: Bad input file: expected "failure" section, but found "%s"' % failure.tag)
sys.exit(1)
failure_message = failure.attrib['message']
failure_text = failure.text
failure_message_short = failure_message.partition('\n')[0]
print(' Failure: %s' % failure_message_short)
xml_elem_failure = xml_et.Element('failure')
# failure_message = failure_message.replace('\n', '
')
xml_elem_failure.set('message', failure_message)
xml_elem_test_case.append(xml_elem_failure)
pass
else:
print('Error: Unknown test case result in status "run": %s' % test_case_result)
sys.exit(1)
elif test_case_status == 'notrun':
if test_case_result == 'suppressed':
# <skipped message="short message">other</skipped>
xml_elem_skipped = xml_et.Element('skipped')
xml_elem_skipped.set('message', 'disabled')
xml_elem_test_case.append(xml_elem_skipped)
else:
print('Error: Unknown test case result in status "notrun": %s' % test_case_result)
sys.exit(1)
else:
print('Error: Unknown test case status: %s' % test_case_status)
sys.exit(1)
xml_elem_file.append(xml_elem_test_case)
pass
xml_root.append(xml_elem_file)
def main():
parser = argparse.ArgumentParser(
description='Convert google-test result xml to the sonarcloud generic execution report xml')
parser.add_argument('--cmake_build_path', help='CMake build path')
parser.add_argument('--base_path', help='base path', default='.')
parser.add_argument('--output', help='output xml', required=True)
parser.add_argument('--update_sonar_project_properties',
help='Update "sonar.tests" property in "sonar-project.properties"',
action='store_true')
args = parser.parse_args()
cmake_build_path = args.cmake_build_path
base_path = args.base_path
output_file = args.output
list_of_test_projects = list()
xml_root = xml_et.Element('testExecutions')
xml_root.set('version', '1')
tests_path = os.path.split(os.path.abspath(cmake_build_path))[0]
for dirpath, dirnames, filenames in os.walk(cmake_build_path):
for file in filenames:
if file == 'gtestresults.xml':
project_name = os.path.split(dirpath)[1]
project_path = os.path.join(tests_path, project_name)
rel_project_path = os.path.relpath(project_path, base_path)
list_of_test_projects.append(rel_project_path)
print('Found google-test results: %s' % os.path.join(dirpath, file))
handle_gtest_results(os.path.join(dirpath, file), base_path, rel_project_path, xml_root)
xml_str = minidom.parseString(xml_et.tostring(xml_root)).toprettyxml(indent=" ")
with open(output_file, 'w') as fd:
fd.write(xml_str)
if args.update_sonar_project_properties:
with open('sonar-project.properties', 'w') as fd:
fd.write('sonar.tests=%s\n' % ','.join(list_of_test_projects))
if __name__ == '__main__':
main()
|
11562220
|
from bokeh.embed import json_item
import json
import os
from random import choices
from string import ascii_letters
import streamlit.components.v1 as components
_RELEASE = True
if not _RELEASE:
_component_func = components.declare_component(
"streamlit_bokeh_events", url="http://localhost:3001",
)
else:
parent_dir = os.path.dirname(os.path.abspath(__file__))
build_dir = os.path.join(parent_dir, "frontend/build")
_component_func = components.declare_component("streamlit_bokeh_events", path=build_dir)
def streamlit_bokeh_events(bokeh_plot=None, events="", key=None, debounce_time=1000, refresh_on_update=True, override_height=None):
"""Returns event dict
Keyword arguments:
bokeh_plot -- Bokeh figure object (default None)
events -- Comma separated list of events dispatched by bokeh eg. "event1,event2,event3" (default "")
debounce_time -- Time in ms to wait before dispatching latest event (default 1000)
refresh_on_update -- Should the chart be re-rendered on refresh (default False)
: Set to False if you are not updating the datasource at runtime
override_height -- Override plot viewport height
"""
if key is None:
raise ValueError("key can not be None.")
div_id = "".join(choices(ascii_letters, k=16))
fig_dict = json_item(bokeh_plot, div_id)
json_figure = json.dumps(fig_dict)
component_value = _component_func(
bokeh_plot=json_figure,
events=events,
key=key,
_id=div_id,
default=None,
debounce_time=debounce_time,
refresh_on_update=refresh_on_update,
override_height=override_height
)
return component_value
if not _RELEASE:
import streamlit as st
import pandas as pd
from bokeh.plotting import figure
from bokeh.models import ColumnDataSource, CustomJS
from bokeh.models import DataTable, TableColumn
from bokeh.plotting import figure
st.set_page_config(layout="wide")
# import function
# from streamlit_bokeh_events import streamlit_bokeh_events
col1, col2 = st.beta_columns(2)
df = pd.read_csv('https://raw.githubusercontent.com/mwaskom/seaborn-data/master/iris.csv')
# create plot
cds = ColumnDataSource(df)
columns = list(map(lambda colname: TableColumn(field=colname, title=colname), df.columns))
# define events
cds.selected.js_on_change(
"indices",
CustomJS(
args=dict(source=cds),
code="""
document.dispatchEvent(
new CustomEvent("INDEX_SELECT", {detail: {data: source.selected.indices}})
)
"""
)
)
table = DataTable(source=cds, columns=columns)
with col1:
result = streamlit_bokeh_events(
bokeh_plot=table,
events="INDEX_SELECT",
key="foo",
refresh_on_update=False,
debounce_time=0,
override_height=500
)
if result:
if result.get("INDEX_SELECT"):
st.write(df.iloc[result.get("INDEX_SELECT")["data"]])
plot = figure(tools="lasso_select,zoom_in")
df["colors"] = df.species.replace({"setosa": "#583d72", "versicolor": "#9f5f80", "virginica": "#ffba93"})
cds_lasso = ColumnDataSource(df)
cds_lasso.selected.js_on_change(
"indices",
CustomJS(
args=dict(source=cds_lasso),
code="""
document.dispatchEvent(
new CustomEvent("LASSO_SELECT", {detail: {data: source.selected.indices}})
)
"""
)
)
plot.circle("sepal_length", "sepal_width", fill_alpha=0.5, color="colors", size=10, line_color=None, source=cds_lasso)
with col2:
result_lasso = streamlit_bokeh_events(
bokeh_plot=plot,
events="LASSO_SELECT",
key="bar",
refresh_on_update=False,
debounce_time=0)
if result_lasso:
if result_lasso.get("LASSO_SELECT"):
st.write(df.iloc[result_lasso.get("LASSO_SELECT")["data"]])
|
11562241
|
import pandas as pd
import numpy as np
from sklearn import model_selection
from sklearn.svm import SVC as svc
from sklearn.metrics import accuracy_score
import os
import sys
eps = sys.float_info.epsilon
training_data = pd.read_csv(os.getenv('TRAINING'), header=0)
tournament_data = pd.read_csv(os.getenv('TESTING'), header=0)
features = [f for f in list(training_data) if 'feature' in f]
#this returns four arrays which is in the order of features_train, features_test, labels_train, labels_test
features_train, features_test, labels_train, labels_test = model_selection.train_test_split(training_data[features], training_data['target'], test_size=0.3, random_state=0)
clf = svc(C=1.0, probability=True).fit(features_train, labels_train)
# Alternative: calibration.CalibratedClassifierCV(svm.LinearSVC(C=1.0, verbose=True))
#predicting our target value with the 30% remnant of the training_data
predictions = clf.predict(features_test)
print(predictions)
accuracy = accuracy_score(labels_test,predictions, normalize=True, sample_weight=None)
print(accuracy)
#c = 1.0 -> 0.514361849391
#c = 100.0 -> 0.518133997785
prob_predictions_tourney = clf.predict_proba(tournament_data[features])
t_id = tournament_data['id']
results = prob_predictions_tourney[:, 1]
results_df = pd.DataFrame(data={'probability':results})
joined = pd.DataFrame(t_id).join(np.clip(results_df, 0.0 + eps, 1.0 - eps))
joined.to_csv(os.getenv('PREDICTING'), index=False, float_format='%.16f')
|
11562285
|
import numpy as np
import urllib2
import os
import scipy.io as spio
from functools import partial
import multiprocessing
import argparse
from PIL import Image
from StringIO import StringIO
import traceback
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
MAT_URL = 'http://vision.cs.princeton.edu/projects/2010/SUN/urls/SUN397_urls.mat'
######### Importing .mat files ###############################################
######### Reference: http://stackoverflow.com/a/8832212 ######################
def loadmat(filename):
'''
this function should be called instead of direct spio.loadmat
as it cures the problem of not properly recovering python dictionaries
from mat files. It calls the function check keys to cure all entries
which are still mat-objects
'''
data = spio.loadmat(filename, struct_as_record=False, squeeze_me=True)
return _check_keys(data)
def _check_keys(dict):
'''
checks if entries in dictionary are mat-objects. If yes
todict is called to change them to nested dictionaries
'''
for key in dict:
if isinstance(dict[key], spio.matlab.mio5_params.mat_struct):
dict[key] = _todict(dict[key])
return dict
def _todict(matobj):
'''
A recursive function which constructs from matobjects nested dictionaries
'''
dict = {}
for strg in matobj._fieldnames:
elem = matobj.__dict__[strg]
if isinstance(elem, spio.matlab.mio5_params.mat_struct):
dict[strg] = _todict(elem)
# Handle case where elem is an array of mat_structs
elif isinstance(elem, np.ndarray) and len(elem) > 0 and \
isinstance(elem[0], spio.matlab.mio5_params.mat_struct):
dict[strg] = np.array([_todict(subelem) for subelem in elem])
else:
dict[strg] = elem
return dict
def download_numbered_file((url, category_name, is_train), dataset_root):
'''
Download a file
'''
# Get file byte string
try:
response = urllib2.urlopen(url)
content = response.read()
# Convert to Image via string buffer
buff = StringIO()
buff.write(content)
buff.seek(0)
image = Image.open(buff)
# Resize image
image = image.resize((64, 64), Image.BICUBIC)
# Convert to RGB
image = image.convert('RGB')
# Save resized image
with open(os.path.join(dataset_root, 'training' if is_train else 'testing', category_name,
os.path.basename(url)), 'w') as f:
image.save(f)
except:
print('Failed to save %s, see traceback' % ((url, category_name, is_train),))
traceback.print_exc()
def main(bg_categories, num_threads):
os.chdir(SCRIPT_DIR)
print('Downloading data...')
# Download URL file
mat_save_path = os.path.join(SCRIPT_DIR, 'SUN397_urls.mat')
if not os.path.exists(mat_save_path):
response = urllib2.urlopen(MAT_URL)
content = response.read()
with open(mat_save_path, 'w') as f:
f.write(content)
# Set background directory
background_dir = os.path.abspath(os.path.join(SCRIPT_DIR, 'sun_bg'))
# Parse URL file
data = loadmat(mat_save_path)['SUN']
# Filter to specified background categories
if bg_categories is not None:
data = [x for x in data if x.category in bg_categories]
print('Found %d categories' % len(data))
# Start pool
pool = multiprocessing.Pool(num_threads)
all_save_info = []
for category_data in data:
# Generate random training and testing split for this category
num_images = len(category_data.images)
split = np.zeros(num_images, dtype=np.bool)
split[:num_images/2] = True
np.random.shuffle(split)
# Convert backslashes in category name to underscores
processed_category_name = category_data.category.replace('\\', '_')
# Make category directories
train_dir = os.path.join(background_dir, 'training', processed_category_name)
test_dir = os.path.join(background_dir, 'testing', processed_category_name)
if not os.path.isdir(train_dir):
os.makedirs(train_dir)
if not os.path.isdir(test_dir):
os.makedirs(test_dir)
save_info = [(url, processed_category_name, split[i])
for i, url in enumerate(category_data.images)]
all_save_info += save_info
# Print category info
print('Found %d images for category %s (%s)' % (num_images, category_data.category,
processed_category_name))
# Save images
print('Downloading a total of %d images...' % len(all_save_info))
fn = partial(download_numbered_file, dataset_root=background_dir)
iter = pool.imap(fn, all_save_info)
# iter = map(fn, all_save_info)
for i, _ in enumerate(iter):
if i % 200 == 0:
print('Finished %d/%d images' % (i, len(all_save_info)))
# Delete URL file
os.remove(mat_save_path)
print('Done.')
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--bg_categories', type=str, nargs='+',
help='SUN 397 categories to download (e.g. a\\abbey or '
'a\\apartment_building\\outdoor')
parser.add_argument('--num_threads', type=int, default=multiprocessing.cpu_count(),
help='Number of download threads')
args = parser.parse_args()
main(**vars(args))
|
11562311
|
import logging
import os
class ModelLogger:
"""A logger specific for the tasks of the ModelAdvancer"""
def __init__(self):
logging.basicConfig(level=os.environ.get("LOGLEVEL", "INFO"))
self.logger = logging.getLogger('simulation')
self.formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
self.fh = logging.FileHandler('model_logger.log')
self.fh.setFormatter(self.formatter)
self.logger.addHandler(self.fh)
|
11562320
|
import base64
import json
import os
import re
import requests
import shutil
import subprocess
import tempfile
def client_error(message):
print('Client error:', message)
return {
'statusCode': 400,
'headers': { 'Content-Type': 'application/json' },
'body': json.dumps({ 'error': message })
}
def server_error(message):
print('Server error:', message)
return {
'statusCode': 500,
'headers': { 'Content-Type': 'application/json' },
'body': json.dumps({ 'error': message })
}
def handle(event, context):
image_file = None
try:
body = json.loads(event['body'])
url = body['url']
print('Fetching image {}'.format(url))
res = requests.get(url, stream=True)
image_file = tempfile.NamedTemporaryFile(delete=False)
print('Writing image to {}'.format(image_file.name))
shutil.copyfileobj(res.raw, image_file)
image_file.close()
print('Identifying image size')
identify_out = subprocess.check_output(['identify', '-format', '%G', image_file.name])
parts = re.split('[x\n]', identify_out.decode('utf-8'))
if len(parts) < 2:
return client_error('could not process image: invalid dimensions')
width = int(parts[0])
height = int(parts[1])
data = { 'width': width, 'height': height }
print('Validating image of size {}x{}'.format(width, height))
subprocess.check_call(['convert', image_file.name, 'NULL:'])
print('Done processing image')
return {
'statusCode': 200,
'headers': { 'Content-Type': 'application/json' },
'body': json.dumps({ 'data': data })
}
except subprocess.CalledProcessError as err:
return client_error('could not process image: {}'.format(str(err)))
finally:
if image_file:
os.unlink(image_file.name)
|
11562321
|
from detectron2.utils.visualizer import Visualizer, GenericMask, _create_text_labels, ColorMode, random_color, _SMALL_OBJECT_AREA_THRESH
import numpy as np
class SORVisualizer(Visualizer):
def __init__(self, image, metadata, instance_mode=ColorMode.IMAGE):
super(SORVisualizer, self).__init__(
image, metadata, instance_mode=instance_mode)
def draw_instance_sor_predictions(self, predictions):
masks = np.asarray(predictions.pred_masks)
masks = [GenericMask(x, self.output.height, self.output.width)
for x in masks]
ranks = predictions.pred_ranks if len(masks) > 0 else None
self.overlay_sor_instances(
masks=masks,
ranks=ranks
)
return self.output
def overlay_sor_instances(
self,
*,
masks=None,
ranks=None
):
num_instances = len(masks)
if num_instances == 0:
return self.output
filtered_masks = []
filtered_ranks = []
for rank, mask in zip(ranks, masks):
if rank >=0:
filtered_masks.append(mask)
filtered_ranks.append(rank)
masks = filtered_masks
ranks = filtered_ranks
num_instances = len(masks)
if num_instances == 0:
return self.output
masks = self._convert_masks(masks)
assigned_colors = []
for r in ranks:
val = (r + 6.0) / 10.0
assigned_colors.append(np.array([val] * 3))
# assigned_colors = [random_color(rgb=True, maximum=1) for _ in range(num_instances)]
# Display in largest to smallest order to reduce occlusion.
areas = None
if masks is not None:
areas = np.asarray([x.area() for x in masks])
if areas is not None:
sorted_idxs = np.argsort(-areas).tolist()
# Re-order overlapped instances in descending order.
masks = [masks[idx]
for idx in sorted_idxs] if masks is not None else None
assigned_colors = [assigned_colors[idx] for idx in sorted_idxs]
for i in range(num_instances):
color = assigned_colors[i]
if masks is not None:
for segment in masks[i].polygons:
self.draw_polygon(segment.reshape(-1, 2), color, alpha=1.0)
return self.output
|
11562393
|
import exceptions
import model
import dataclasses
import typing
import ujson
import http
@dataclasses.dataclass
class Model:
"""Represents a NeuroAI text generation model."""
identifier: str = "60ca2a1e54f6ecb69867c72c"
response_length: int = 200
remove_input: bool = False
temperature: float = 0.9
repetition_penalty: float = 1.0
__documentation__: typing.ClassVar[dict[str, str]] = {
"identifier": "The name of the model on NeuroAI.",
"response_length": "The maximum number of tokens to generate.",
"remove_input": "Whether to remove the input text from the response.",
"temperature": "Determines how wacky the generated text is.",
"repetition_penalty": "Determines whether to penalize repeated tokens.",
}
@classmethod
def documentation(cls, field: dataclasses.Field) -> str:
"""Return the documentation for a specific field."""
return cls.__documentation__[field.name]
async def generate(self, query: str) -> str:
"""Generate text using this model's configuration."""
return await Backend.generate(self, query)
class Backend:
@classmethod
def setup(cls, bot: model.Bakerbot) -> None:
cls.base = "https://api.neuro-ai.co.uk"
cls.session = bot.session
cls.token = bot.secrets.get("neuro-token", None)
@classmethod
async def post(cls, endpoint: str, **kwargs) -> dict:
"""Send a HTTP POST request to the Neuro API."""
async with cls.session.post(f"{cls.base}/{endpoint}", **kwargs) as response:
data = await response.json(encoding="utf-8", loads=ujson.loads)
if response.status != http.HTTPStatus.OK:
error = data.get("error", None)
raise exceptions.HTTPUnexpected(response.status, error)
return data
@classmethod
async def generate(cls, config: Model, query: str) -> str:
"""Generate text using the Neuro API."""
if cls.token is None:
raise model.SecretNotFound("neuro-token not found in secrets.json.")
parameters = {"include_result": "true"}
headers = {"Authorization": f"Bearer {cls.token}"}
payload = {
"modelId": config.identifier,
"data": query,
"input_kwargs": {
"response_length": config.response_length,
"remove_input": config.remove_input,
"temperature": config.temperature,
"repetition_penalty": config.repetition_penalty
}
}
data = await cls.post("SyncPredict", params=parameters, json=payload, headers=headers)
if data["state"] == "ERROR":
# API returned HTTP 200 OK, but there's still an error.
status = http.HTTPStatus.OK
message = data["result"]
raise exceptions.HTTPUnexpected(status, message)
return data["result"][0]["generated_text"]
def setup(bot: model.Bakerbot) -> None:
Backend.setup(bot)
|
11562431
|
import pickle
import codecs
def load_pickle(path):
with codecs.open(path, 'rb') as input_f:
return pickle.load(input_f)
|
11562442
|
from .missing_element_base import MissingElementBase
from .missing_fieldset_entry import MissingFieldsetEntry
class MissingFieldset(MissingElementBase):
def __repr__(self):
if self._key:
return f"<class MissingFieldset key={self._key}>"
return '<class MissingFieldset>'
def entries(self, _key=None):
return []
def entry(self, key=None):
return MissingFieldsetEntry(key, self)
def optional_entry(self, _key=None):
return None
def required_entry(self, _key=None):
self._parent._missing_error(self)
|
11562451
|
import os
from setuptools import setup
here = lambda *a: os.path.join(os.path.dirname(__file__), *a)
# read the long description
with open(here('README.md'), 'r') as readme_file:
long_description = readme_file.read()
# read the requirements.txt
with open(here('requirements.txt'), 'r') as requirements_file:
requirements = [x.strip() for x in requirements_file.readlines()]
setup(
name='pyenergenie',
version='0.0.1',
description='A python interface to the Energenie line of products',
long_description=long_description,
author='whaleygeek',
classifiers=[
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.6'
],
packages=['pyenergenie', 'pyenergenie.energenie'],
package_dir={
'pyenergenie': 'src/',
'pyenergenie.energenie': 'src/energenie/'
},
install_requires=requirements,
package_data={
'pyenergenie': [
'energenie/drv/*'
]
},
entry_points={
'console_scripts': [
'pyenergenie=pyenergenie.setup_tool:main'
]
}
)
|
11562463
|
from abc import ABC
from dataclasses import InitVar, dataclass, field
from typing import Dict, List, Set, Tuple, Union
import optuna
from typing_extensions import Final
from embeddings.hyperparameter_search.configspace import (
BaseConfigSpace,
Parameter,
SampledParameters,
)
from embeddings.hyperparameter_search.parameters import (
ConstantParameter,
ParameterValues,
SearchableParameter,
)
# Mypy currently properly don't handle dataclasses with abstract methods https://github.com/python/mypy/issues/5374
@dataclass # type: ignore
class AbstractFlairModelTrainerConfigSpace(BaseConfigSpace, ABC):
embedding_name: InitVar[Union[str, List[str]]]
param_embedding_name: Parameter = field(init=False)
param_selection_mode: Parameter = field(
init=False, default=ConstantParameter(name="param_selection_mode", value=True)
)
save_final_model: Parameter = field(
init=False, default=ConstantParameter(name="save_final_model", value=False)
)
learning_rate: Parameter = SearchableParameter(
name="learning_rate", type="log_uniform", low=1e-4, high=1e-1
)
mini_batch_size: Parameter = SearchableParameter(
name="mini_batch_size", type="log_int_uniform", low=16, high=256, step=1
)
max_epochs: Parameter = SearchableParameter(
name="max_epochs", type="int_uniform", low=1, high=5, step=1
)
def __post_init__(self, embedding_name: Union[str, List[str]]) -> None:
if isinstance(embedding_name, str):
self.param_embedding_name: Parameter = ConstantParameter(
name="embedding_name",
value=embedding_name,
)
else:
self.param_embedding_name: Parameter = SearchableParameter(
name="embedding_name",
type="categorical",
choices=embedding_name,
)
@staticmethod
def _parse_model_trainer_parameters(
parameters: Dict[str, ParameterValues]
) -> Tuple[Dict[str, ParameterValues], Dict[str, ParameterValues]]:
task_train_keys: Final = {
"learning_rate",
"mini_batch_size",
"max_epochs",
"param_selection_mode",
"save_final_model",
}
task_train_kwargs = BaseConfigSpace._pop_parameters(
parameters=parameters, parameters_keys=task_train_keys
)
return parameters, task_train_kwargs
class FlairModelTrainerConfigSpace(AbstractFlairModelTrainerConfigSpace):
@staticmethod
def parse_parameters(parameters: Dict[str, ParameterValues]) -> SampledParameters:
embedding_name = parameters.pop("embedding_name")
(
parameters,
task_train_kwargs,
) = FlairModelTrainerConfigSpace._parse_model_trainer_parameters(parameters)
BaseConfigSpace._check_unmapped_parameters(parameters=parameters)
return {"embedding_name": embedding_name, "task_train_kwargs": task_train_kwargs}
@dataclass
class SequenceLabelingConfigSpace(AbstractFlairModelTrainerConfigSpace):
hidden_size: Parameter = SearchableParameter(
name="hidden_size", type="int_uniform", low=128, high=2048, step=128
)
use_rnn: Parameter = SearchableParameter(
name="use_rnn", type="categorical", choices=[True, False]
)
rnn_type: Parameter = SearchableParameter(
name="rnn_type", type="categorical", choices=["LSTM", "GRU"]
)
rnn_layers: Parameter = SearchableParameter(
name="rnn_layers", type="int_uniform", low=1, high=3, step=1
)
dropout: Parameter = SearchableParameter(
name="dropout", type="discrete_uniform", low=0.0, high=0.5, q=0.05
)
word_dropout: Parameter = SearchableParameter(
name="word_dropout", type="discrete_uniform", low=0.0, high=0.5, q=0.05
)
locked_dropout: Parameter = SearchableParameter(
name="locked_dropout", type="discrete_uniform", low=0.0, high=0.5, q=0.05
)
reproject_embeddings: Parameter = SearchableParameter(
name="reproject_embeddings", type="categorical", choices=[True, False]
)
use_crf: Parameter = SearchableParameter(
name="use_crf", type="categorical", choices=[True, False]
)
def _map_task_specific_parameters(
self, trial: optuna.trial.Trial
) -> Tuple[Dict[str, ParameterValues], Set[str]]:
parameters = {}
use_rnn_name, use_rnn_val = self._parse_parameter(trial=trial, param_name="use_rnn")
parameters[use_rnn_name] = use_rnn_val
if use_rnn_val:
for rnn_param in ("rnn_layers", "rnn_type"):
parameters.update([self._parse_parameter(trial=trial, param_name=rnn_param)])
mapped_parameters: Final[Set[str]] = {"rnn_layers", "rnn_type", "use_rnn"}
return parameters, mapped_parameters
@staticmethod
def parse_parameters(parameters: Dict[str, ParameterValues]) -> SampledParameters:
embedding_name = parameters.pop("embedding_name")
assert isinstance(embedding_name, str)
hidden_size = parameters.pop("hidden_size")
assert isinstance(hidden_size, int)
task_model_keys: Final = {
"use_rnn",
"dropout",
"word_dropout",
"locked_dropout",
"reproject_embeddings",
"use_crf",
"rnn_layers",
"rnn_type",
}
task_model_kwargs = BaseConfigSpace._pop_parameters(
parameters=parameters, parameters_keys=task_model_keys
)
parameters, task_train_kwargs = SequenceLabelingConfigSpace._parse_model_trainer_parameters(
parameters=parameters
)
BaseConfigSpace._check_unmapped_parameters(parameters=parameters)
return {
"embedding_name": embedding_name,
"hidden_size": hidden_size,
"task_model_kwargs": task_model_kwargs,
"task_train_kwargs": task_train_kwargs,
}
@dataclass
class TextClassificationConfigSpace(AbstractFlairModelTrainerConfigSpace):
dynamic_document_embedding: Parameter = SearchableParameter(
name="document_embedding",
type="categorical",
choices=[
"FlairDocumentCNNEmbeddings",
"FlairDocumentRNNEmbeddings",
"FlairTransformerDocumentEmbedding",
],
)
static_document_embedding: Parameter = SearchableParameter(
name="document_embedding",
type="categorical",
choices=[
"FlairDocumentCNNEmbeddings",
"FlairDocumentRNNEmbeddings",
"FlairDocumentPoolEmbedding",
],
)
static_pooling: Parameter = SearchableParameter(
name="pooling", type="categorical", choices=["min", "max", "mean"]
)
dynamic_pooling: Parameter = SearchableParameter(
name="pooling", type="categorical", choices=["cls", "max", "mean"]
)
static_fine_tune_mode: Parameter = SearchableParameter(
name="fine_tune_mode", type="categorical", choices=["none", "linear", "nonlinear"]
)
dynamic_fine_tune: Parameter = SearchableParameter(
name="fine_tune", type="categorical", choices=[False, True]
)
# Choices to Optuna can only take primitives;
# This parameter results in Optuna warning but the library works properly
cnn_pool_kernels: Parameter = SearchableParameter(
name="kernels",
type="categorical",
choices=[((100, 3), (100, 4), (100, 5)), ((200, 4), (200, 5), (200, 6))],
)
hidden_size: Parameter = SearchableParameter(
name="hidden_size", type="int_uniform", low=128, high=2048, step=128
)
rnn_type: Parameter = SearchableParameter(
name="rnn_type", type="categorical", choices=["LSTM", "GRU"]
)
rnn_layers: Parameter = SearchableParameter(
name="rnn_layers", type="int_uniform", low=1, high=3, step=1
)
bidirectional: Parameter = SearchableParameter(
name="bidirectional", type="categorical", choices=[True, False]
)
dropout: Parameter = SearchableParameter(
name="dropout", type="discrete_uniform", low=0.0, high=0.5, q=0.05
)
word_dropout: Parameter = SearchableParameter(
name="word_dropout", type="discrete_uniform", low=0.0, high=0.5, q=0.05
)
reproject_words: Parameter = SearchableParameter(
name="reproject_words", type="categorical", choices=[True, False]
)
def get_embedding_type(self) -> str:
embedding_name_param: Parameter = self.__getattribute__("param_embedding_name")
embedding_name = embedding_name_param.value
assert isinstance(embedding_name, str)
embedding_type = self._retrieve_embedding_type(embedding_name=embedding_name)
assert isinstance(embedding_type, str)
return embedding_type
def _map_task_specific_parameters(
self, trial: optuna.trial.Trial
) -> Tuple[Dict[str, ParameterValues], Set[str]]:
shared_params = ("dropout", "word_dropout", "reproject_words")
param_names_mapping: Final = {
"FlairDocumentCNNEmbeddings": ("cnn_pool_kernels",) + shared_params,
"FlairDocumentRNNEmbeddings": ("hidden_size", "rnn_type", "rnn_layers", "bidirectional")
+ shared_params,
"FlairTransformerDocumentEmbedding": ("dynamic_pooling", "dynamic_fine_tune"),
"FlairDocumentPoolEmbedding": ("static_pooling", "static_fine_tune_mode"),
}
parameters = {}
embedding_name, embedding_name_val = self._parse_parameter(
trial=trial, param_name="param_embedding_name"
)
parameters[embedding_name] = embedding_name_val
embedding_type_param: str = self.get_embedding_type()
assert embedding_type_param in ["dynamic", "static"]
document_embedding_name, document_embedding_val = self._parse_parameter(
trial=trial, param_name=f"{embedding_type_param}_document_embedding"
)
if not isinstance(document_embedding_val, str):
raise TypeError("Variable document_embedding_val must be a str!")
parameters[document_embedding_name] = document_embedding_val
parameter_names = param_names_mapping[document_embedding_val]
parameters.update(self._map_parameters(parameters_names=list(parameter_names), trial=trial))
mapped_parameters: Final[Set[str]] = {
"param_embedding_name",
*list(self.__annotations__.keys()),
}
return parameters, mapped_parameters
@staticmethod
def parse_parameters(parameters: Dict[str, ParameterValues]) -> SampledParameters:
embedding_name = parameters.pop("embedding_name")
assert isinstance(embedding_name, str)
document_embedding = parameters.pop("document_embedding")
assert isinstance(document_embedding, str)
load_model_keys: Final = {
"pooling",
"fine_tune_mode",
"fine_tune",
"kernels",
"hidden_size",
"rnn_type",
"rnn_layers",
"bidirectional",
"dropout",
"word_dropout",
"reproject_words",
}
load_model_kwargs = BaseConfigSpace._pop_parameters(
parameters=parameters, parameters_keys=load_model_keys
)
parameters, task_train_kwargs = SequenceLabelingConfigSpace._parse_model_trainer_parameters(
parameters=parameters
)
BaseConfigSpace._check_unmapped_parameters(parameters=parameters)
return {
"embedding_name": embedding_name,
"document_embedding": document_embedding,
"task_model_kwargs": {},
"task_train_kwargs": task_train_kwargs,
"load_model_kwargs": load_model_kwargs,
}
|
11562469
|
from xenon.base.datatypes import Sweepable
from xenon.base.designsweeptypes import ExhaustiveSweep
from benchmarks import params
class Benchmark(Sweepable):
sweepable_params = [
params.cycle_time,
params.pipelining,
params.cache_size,
params.cache_assoc,
params.cache_hit_latency,
params.cache_line_sz,
params.cache_queue_size,
params.cache_bandwidth,
params.tlb_hit_latency,
params.tlb_miss_latency,
params.tlb_page_size,
params.tlb_entries,
params.tlb_max_outstanding_walks,
params.tlb_assoc,
params.tlb_bandwidth,
params.l2cache_size,
params.enable_l2,
params.perfect_l1,
params.perfect_bus,
params.pipelined_dma,
params.ready_mode,
params.dma_multi_channel,
params.ignore_cache_flush,
]
def __init__(self, name, source_dir):
super(Benchmark, self).__init__(name)
self.sub_dir = source_dir
self.kernels = []
self.main_id = 0
self.exec_cmd = ""
self.run_args = ""
self.exec_cmd = ""
self.required_files = []
def add_array(self, *args):
""" Add an array of this benchmark.
Args:
*args: Array constructor args.
"""
array = Array(*args)
assert(not hasattr(self, array.name))
setattr(self, array.name, array)
def add_host_array(self, *args):
""" Add an host array of this benchmark.
Args:
*args: Array constructor args.
"""
array = Array(*args, is_host_array=True)
assert(not hasattr(self, array.name))
setattr(self, array.name, array)
def add_function_array(self, func, *args):
""" Add an array of this benchmark that does not belong to the kernel.
For example, if an array 'bar' were declared inside an inner function
'foo' (where 'foo' is not the top kernel function), the user would have to
refer to this array as 'foo.bar'.
Args:
func: Function name.
*args: Array constructor args.
"""
array = Array(*args)
self.add_function(func)
getattr(self, func).add_array(func, *args)
def add_loop(self, function_name, *args):
""" Add a loop of this benchmark.
Args:
function_name: The name of the function that contains this loop.
*args: Loop constructor args.
"""
self.add_function(function_name)
getattr(self, function_name).add_loop(*args)
def add_function(self, function_name):
if not hasattr(self, function_name):
f = Function(function_name)
setattr(self, function_name, f)
def set_kernels(self, kernels):
""" Set the kernels to be traced in this benchmark.
If a single kernel is provided, then all functions called by that function
will be called. If multiple kernels are provided, only those functions will
appear in the dynamic trace.
Args:
kernels: A list of function names.
"""
self.kernels = kernels
def set_main_id(self, main_id):
""" Set the id number of this benchmark.
In a system with multiple accelerators, this allows the simulator to distinguish
between them.
TODO: Remove this and replace with a dynamic registration procedure
(BUG=ALADDIN-66).
Args:
main_id: integer id of this benchmark.
"""
self.main_id = main_id
def add_required_files(self, files):
""" Add required files for input/output.
These will be symlinked into the configuration directory. It is assumed
these files are located in the benchmark source directory.
"""
if isinstance(files, list):
self.required_files.extend(files)
else:
self.required_files.append(files)
def set_exec_cmd(self, cmd):
""" If simulating with a CPU, set the command to execute. """
self.exec_cmd = cmd
def set_run_args(self, args):
""" If simulating with a CPU, set the command-line arguments. """
self.run_args = args
class Array(Sweepable):
sweepable_params = [
params.partition_type,
params.partition_factor,
params.memory_type,
]
def __init__(self, name, size, word_length, is_host_array=False):
""" Creates an array. """
super(Array, self).__init__(name)
self.size = size
self.word_length = word_length
self.is_host_array = is_host_array
class Function(Sweepable):
sweepable_params = []
def __init__(self, name):
super(Function, self).__init__(name)
def add_array(self, func, *args):
a = Array(*args)
assert(not hasattr(self, a.name))
setattr(self, a.name, a)
def add_loop(self, *args):
l = Loop(*args)
assert(not hasattr(self, l.name))
setattr(self, l.name, l)
class Loop(Sweepable):
sweepable_params = [
params.unrolling,
]
def __init__(self, name):
""" Creates a loop. """
super(Loop, self).__init__(name)
|
11562472
|
import json
import os
import sys
import argparse
from jupyter_client.kernelspec import KernelSpecManager
from IPython.utils.tempdir import TemporaryDirectory
from shutil import copyfile
kernel_json = {
"argv":[sys.executable,"-m","seq_kernel", "-f", "{connection_file}"],
"display_name":"Seq",
"language":"seq",
}
def install_my_kernel_spec(user=True, prefix=None):
with TemporaryDirectory() as td:
os.chmod(td, 0o755) # Starts off as 700, not user readable
with open(os.path.join(td, 'kernel.json'), 'w') as f:
json.dump(kernel_json, f, sort_keys=True)
print('Installing IPython kernel spec')
KernelSpecManager().install_kernel_spec(td, 'seq', user=user, prefix=prefix)
def install_my_kernel_javascript():
seq_js_file = os.path.join(os.environ['SEQ_PATH'][:-7], 'jupyter', 'seq_kernel', 'kernel.js')
kernel_js_file = os.path.join(KernelSpecManager().get_kernel_spec('seq').resource_dir, 'kernel.js')
os.system(f'cp {seq_js_file} {kernel_js_file}')
def _is_root():
try:
return os.geteuid() == 0
except AttributeError:
return False # assume not an admin on non-Unix platforms
def main(argv=None):
parser = argparse.ArgumentParser(
description='Install KernelSpec for Seq Kernel'
)
prefix_locations = parser.add_mutually_exclusive_group()
prefix_locations.add_argument(
'--user',
help='Install KernelSpec in user homedirectory',
action='store_true'
)
prefix_locations.add_argument(
'--sys-prefix',
help='Install KernelSpec in sys.prefix. Useful in conda / virtualenv',
action='store_true',
dest='sys_prefix'
)
prefix_locations.add_argument(
'--prefix',
help='Install KernelSpec in this prefix',
default=None
)
args = parser.parse_args(argv)
user = False
prefix = None
if args.sys_prefix:
prefix = sys.prefix
elif args.prefix:
prefix = args.prefix
elif args.user or not _is_root():
user = True
install_my_kernel_spec(user=user, prefix=prefix)
install_my_kernel_javascript()
if __name__ == '__main__':
main()
|
11562524
|
import os
import numpy as np
import torch
import cv2
import argparse
import segmentation_models_pytorch as smp
from segmentation_models_pytorch.encoders import get_preprocessing_fn
import sys
sys.path.append('../Common')
from tool_clean import get_image_patch_deep, get_image_patch, check_is_image
# make step1 prediction image patch, and test image for step2 training.
parser = argparse.ArgumentParser()
parser.add_argument("--gpu", type=str, default='2', help="GPU number")
parser.add_argument('--image_train_dir', type=str, default='/mnt/nas/data/denoise/DIBCO/train/image/', help='original image train dir')
parser.add_argument('--mask_train_dir', type=str, default='/mnt/nas/data/denoise/DIBCO/train/mask/', help='original mask train dir')
parser.add_argument('--image_test_dir', type=str, default='/mnt/nas/data/denoise/DIBCO/test/image/', help='original image test dir')
parser.add_argument('--mask_test_dir', type=str, default='/mnt/nas/data/denoise/DIBCO/test/mask/', help='original mask test dir')
parser.add_argument('--base_model_name', type=str, default='efficientnet-b4', help='base model name')
parser.add_argument('--lambda_bce', type=float, default=50.0, help='bce weight')
parser.add_argument('--encoder_weights', type=str, default='imagenet', help='none or imagenet')
parser.add_argument('--generator_lr', type=float, default=2e-4, help='generator learning rate')
parser.add_argument('--threshold', type=float, default=0.30, help='threshold for bgr mask')
parser.add_argument('--original_dir', type=str, default='/data/denoise/Label_data', help='original image dir')
opt = parser.parse_args()
device = torch.device("cuda:%s" % opt.gpu)
models = []
base_model_name = opt.base_model_name
lambda_bce = opt.lambda_bce
generator_lr = opt.generator_lr
threshold = opt.threshold
encoder_weights = opt.encoder_weights
weight_folder = './step1_dibco_' + base_model_name + '_' + str(int(lambda_bce)) + '_' + str(generator_lr) + '_' + str(threshold) + '/'
weight_list = sorted(os.listdir(weight_folder))
weight_list = [os.path.join(weight_folder, weight_path) for weight_path in weight_list
if weight_path.endswith('pth') and 'unet' in weight_path]
print(weight_list)
# blue
model = smp.Unet(base_model_name, encoder_weights=encoder_weights, in_channels=3)
model.load_state_dict(torch.load(weight_list[0], map_location='cpu'))
model.to(device)
model.requires_grad_(False)
model.eval()
models.append(model)
# green
model = smp.Unet(base_model_name, encoder_weights=encoder_weights, in_channels=3)
model.load_state_dict(torch.load(weight_list[1], map_location='cpu'))
model.to(device)
model.requires_grad_(False)
model.eval()
models.append(model)
# red
model = smp.Unet(base_model_name, encoder_weights=encoder_weights, in_channels=3)
model.load_state_dict(torch.load(weight_list[2], map_location='cpu'))
model.to(device)
model.requires_grad_(False)
model.eval()
models.append(model)
# gray
model = smp.Unet(base_model_name, encoder_weights=encoder_weights, in_channels=3)
model.load_state_dict(torch.load(weight_list[3], map_location='cpu'))
model.to(device)
model.requires_grad_(False)
model.eval()
models.append(model)
batch_size = 16
preprocess_input = get_preprocessing_fn(base_model_name, pretrained='imagenet')
# make directory
image_save_path = './predicted_image_for_step2_dibco'
os.makedirs(image_save_path, exist_ok=True)
train_image_save_path = os.path.join(image_save_path, 'train')
os.makedirs(train_image_save_path, exist_ok=True)
test_image_save_path = os.path.join(image_save_path, 'test')
os.makedirs(test_image_save_path, exist_ok=True)
# patch directory
patch_save_path = os.path.join(train_image_save_path, 'patch')
os.makedirs(patch_save_path, exist_ok=True)
patch_train_image_save_path = os.path.join(patch_save_path, 'image')
os.makedirs(patch_train_image_save_path, exist_ok=True)
patch_train_mask_save_path = os.path.join(patch_save_path, 'mask')
os.makedirs(patch_train_mask_save_path, exist_ok=True)
# no test patch
# end patch
# end directory
# start train
step2_overlap_ratio = 0.3
scale_list = [0.75, 1.00, 1.25, 1.50] # sample patches with the scale factor and resize patches to 256 * 256 // 192, 256, 320, 384
rotation = [0, 3]
reshape = (256, 256)
predict_overlap_ratio = 0.1
crop_h = 256
crop_w = 256
image_train_dir = opt.image_train_dir
mask_train_dir = opt.mask_train_dir
images = os.listdir(image_train_dir)
for img in images:
if not check_is_image(img):
print('not image', img)
continue
image = cv2.imread(os.path.join(image_train_dir, img))
image_name = img.split('.')[0]
print('processing the image:', img)
# find and read mask file
if os.path.isfile(os.path.join(mask_train_dir, image_name + '.png')):
mask = cv2.imread(os.path.join(mask_train_dir, image_name + '.png'), cv2.IMREAD_GRAYSCALE)
elif os.path.isfile(os.path.join(mask_train_dir, image_name + '.bmp')):
mask = cv2.imread(os.path.join(mask_train_dir, image_name + '.bmp'), cv2.IMREAD_GRAYSCALE)
else:
print(img, 'no mask')
exit(1)
mask[mask < 190] = 0
mask[mask >= 190] = 255
h, w, _ = image.shape
image_patches, poslist = get_image_patch(image, crop_h, crop_w, overlap=predict_overlap_ratio, is_mask=False)
merge_img = np.ones((h, w, 3))
out_imgs = []
for channel in range(4):
color_patches = []
for patch in image_patches:
tmp = patch.astype(np.float32)
if channel != 3:
color_patches.append(preprocess_input(tmp[:, :, channel:channel+1]))
else:
color_patches.append(preprocess_input(np.expand_dims( cv2.cvtColor(tmp, cv2.COLOR_BGR2GRAY), axis=-1 )))
step = 0
preds = []
with torch.no_grad():
while step < len(image_patches):
ps = step
pe = step + batch_size
if pe >= len(image_patches):
pe = len(image_patches)
target = torch.from_numpy(np.array(color_patches[ps:pe])).permute(0, 3, 1, 2).float()
preds.extend(torch.sigmoid(models[channel](target.to(device))).cpu())
step += batch_size
# handling overlap
out_img = np.ones((h, w, 1)) * 255
for i in range(len(image_patches)):
patch = preds[i].permute(1, 2, 0).numpy() * 255
start_h, start_w, end_h, end_w, h_shift, w_shift = poslist[i]
h_cut = end_h - start_h
w_cut = end_w - start_w
tmp = np.minimum(out_img[start_h:end_h, start_w:end_w], patch[h_shift:h_shift+h_cut, w_shift:w_shift+w_cut])
out_img[start_h:end_h, start_w:end_w] = tmp
out_imgs.append(out_img)
# save step1 merged color train image
merge_img[:, :, 0:1] = (out_imgs[0] + out_imgs[3]) / 2.
merge_img[:, :, 1:2] = (out_imgs[1] + out_imgs[3]) / 2.
merge_img[:, :, 2:3] = (out_imgs[2] + out_imgs[3]) / 2.
merge_img = merge_img.astype(np.uint8)
cv2.imwrite('%s/%s.png' % (train_image_save_path, image_name), merge_img)
# start patch
scale_cnt = 0
for scale in scale_list:
# (patches, 256, 256, 3)
crpW = int(scale * crop_w)
crpH = int(scale * crop_h)
image_patches, poslist = get_image_patch_deep(merge_img, crpH, crpW, reshape, overlap=step2_overlap_ratio)
mask_patches, poslist = get_image_patch_deep(mask, crpH, crpW, reshape, overlap=step2_overlap_ratio)
for idx in range(len(image_patches)):
image_patch = image_patches[idx]
mask_patch = mask_patches[idx]
# agumentation
for k in rotation:
img_tmp = np.rot90(image_patch, k)
mask_tmp = np.rot90(mask_patch, k)
cv2.imwrite('%s/%s_s%dr%di%d.png' % (patch_train_image_save_path, image_name, scale_cnt, k, idx), img_tmp)
cv2.imwrite('%s/%s_s%dr%di%d.png' % (patch_train_mask_save_path, image_name, scale_cnt, k, idx), mask_tmp)
scale_cnt += 1
# break
# end patch
# start test
image_test_dir = opt.image_test_dir
images = os.listdir(image_test_dir)
for img in images:
if not check_is_image(img):
print('not image', img)
continue
image = cv2.imread(os.path.join(image_test_dir, img))
image_name = img.split('.')[0]
print('processing the image:', img)
h, w, _ = image.shape
image_patches, poslist = get_image_patch(image, crop_h, crop_w, overlap=predict_overlap_ratio, is_mask=False)
merge_img = np.ones((h, w, 3))
out_imgs = []
for channel in range(4):
color_patches = []
for patch in image_patches:
tmp = patch.astype(np.float32)
if channel != 3:
color_patches.append(preprocess_input(tmp[:, :, channel:channel+1]))
else:
color_patches.append(preprocess_input(np.expand_dims( cv2.cvtColor(tmp, cv2.COLOR_BGR2GRAY), axis=-1 )))
step = 0
preds = []
with torch.no_grad():
while step < len(image_patches):
ps = step
pe = step + batch_size
if pe >= len(image_patches):
pe = len(image_patches)
target = torch.from_numpy(np.array(color_patches[ps:pe])).permute(0, 3, 1, 2).float()
preds.extend(torch.sigmoid(models[channel](target.to(device))).cpu())
step += batch_size
# handling overlap
out_img = np.ones((h, w, 1)) * 255
for i in range(len(image_patches)):
patch = preds[i].permute(1, 2, 0).numpy() * 255
start_h, start_w, end_h, end_w, h_shift, w_shift = poslist[i]
h_cut = end_h - start_h
w_cut = end_w - start_w
# Vo overlap
tmp = np.minimum(out_img[start_h:end_h, start_w:end_w], patch[h_shift:h_shift+h_cut, w_shift:w_shift+w_cut])
out_img[start_h:end_h, start_w:end_w] = tmp
out_imgs.append(out_img)
# save step1 merged color train image
merge_img[:, :, 0:1] = (out_imgs[0] + out_imgs[3]) / 2.
merge_img[:, :, 1:2] = (out_imgs[1] + out_imgs[3]) / 2.
merge_img[:, :, 2:3] = (out_imgs[2] + out_imgs[3]) / 2.
merge_img = merge_img.astype(np.uint8)
cv2.imwrite('%s/%s.png' % (test_image_save_path, image_name), merge_img)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.