hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
8d60c377538ddae6447654f6c37f24bae517225c
| 3,629
|
py
|
Python
|
convert.py
|
Ellen7ions/bin2mem
|
51e3216cbf5e78547751968ef1619a925f2f55ef
|
[
"MIT"
] | 3
|
2021-05-18T13:07:39.000Z
|
2021-05-24T12:46:43.000Z
|
convert.py
|
Ellen7ions/bin2mem
|
51e3216cbf5e78547751968ef1619a925f2f55ef
|
[
"MIT"
] | null | null | null |
convert.py
|
Ellen7ions/bin2mem
|
51e3216cbf5e78547751968ef1619a925f2f55ef
|
[
"MIT"
] | null | null | null |
import os, sys
import json
class Config:
def __init__(self, config_path='./config.json'):
super(Config, self).__init__()
self.config_path = config_path
self.bin2mem_path = None
self.init_configs(json.load(open(config_path)))
def init_configs(self, json_data):
self.bin2mem_path = json_data['bin2mem.path']
Config.check_file_exists(self.bin2mem_path)
@staticmethod
def check_file_exists(file_name):
if not os.path.exists(file_name):
raise Exception(f'{file_name} not found!')
class Convert:
def __init__(self):
super(Convert, self).__init__()
self.config = Config()
self.FLAG_SAVE_FILES = False
self.FLAG_FILE_NAME = ''
self.FLAG_CLEAN_ALL = False
self.workspace_name = ''
self.file_name = ''
self.o_file_path = ''
self.bin_file_path = ''
self.coe_file_path = ''
self.init_flags()
self.make_workspace()
self.set_files_path()
def init_flags(self):
for i in sys.argv:
if i == '-s':
self.FLAG_SAVE_FILES = True
if i.endswith('.s'):
self.FLAG_FILE_NAME = i
if i == 'clean':
self.FLAG_CLEAN_ALL = True
if self.FLAG_FILE_NAME == '':
if os.path.exists('main.s'):
self.FLAG_FILE_NAME = 'main.s'
else:
raise Exception('Where is your input file :(')
self.workspace_name = self.FLAG_FILE_NAME[:-2]
self.file_name = self.FLAG_FILE_NAME[:-2]
def make_workspace(self):
if not os.path.exists(self.workspace_name):
os.mkdir(self.workspace_name)
def set_files_path(self):
self.o_file_path = f'.\\{self.workspace_name}\\{self.file_name}.o'
self.bin_file_path = f'.\\{self.workspace_name}\\{self.file_name}.bin'
self.coe_file_path = f'.\\{self.workspace_name}\\{self.file_name}.txt'
def mips_gcc_c(self):
os.system(f'mips-sde-elf-gcc -c {self.FLAG_FILE_NAME} -o {self.o_file_path}')
def mips_objcopy(self):
os.system(f'mips-sde-elf-objcopy -O binary {self.o_file_path} {self.bin_file_path}')
def mips_bin2mem(self):
os.system(f'{self.config.bin2mem_path} {self.bin_file_path} {self.coe_file_path}')
def clean_process_files(self):
try:
Config.check_file_exists(self.o_file_path)
os.system(f'del {self.o_file_path}')
except Exception as e:
pass
try:
Config.check_file_exists(self.bin_file_path)
os.system(f'del {self.bin_file_path}')
except Exception as e:
pass
def run(self):
self.mips_gcc_c()
self.mips_objcopy()
self.mips_bin2mem()
def clean(self):
self.clean_process_files()
try:
Config.check_file_exists(self.coe_file_path)
os.system(f'del {self.coe_file_path}')
except Exception as e:
pass
os.removedirs(self.workspace_name)
def mips_objdump(self):
if os.path.exists(self.o_file_path):
os.system(f'mips-sde-elf-objdump -d {self.o_file_path}')
def apply(self):
if self.FLAG_CLEAN_ALL:
self.clean()
return
self.run()
if not self.FLAG_SAVE_FILES:
self.clean_process_files()
return
self.mips_objdump()
if __name__ == '__main__':
c = Convert()
c.apply()
# c.mips_gcc_c()
# c.mips_objcopy()
# c.mips_bin2mem()
# config = Config()
| 28.801587
| 92
| 0.590245
| 491
| 3,629
| 4.04888
| 0.160896
| 0.076459
| 0.06841
| 0.052314
| 0.399396
| 0.312374
| 0.234406
| 0.13833
| 0.091549
| 0
| 0
| 0.003882
| 0.290163
| 3,629
| 125
| 93
| 29.032
| 0.767857
| 0.018187
| 0
| 0.135417
| 0
| 0.010417
| 0.155143
| 0.051433
| 0
| 0
| 0
| 0
| 0
| 1
| 0.15625
| false
| 0.03125
| 0.020833
| 0
| 0.21875
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
8d61a4b35ddf035024fe7d951c745cb83a2a9d4d
| 3,161
|
py
|
Python
|
stats.py
|
DisinfoResearch/TwitterCollector
|
183b6761cca54b5db5b98a2f9f86bd8bcc98a7cb
|
[
"MIT"
] | null | null | null |
stats.py
|
DisinfoResearch/TwitterCollector
|
183b6761cca54b5db5b98a2f9f86bd8bcc98a7cb
|
[
"MIT"
] | null | null | null |
stats.py
|
DisinfoResearch/TwitterCollector
|
183b6761cca54b5db5b98a2f9f86bd8bcc98a7cb
|
[
"MIT"
] | null | null | null |
#!/bin/python3
# Copyright (C) 2021, Michigan State University.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import csv
import json
import argparse
import sys
import datetime
from dateutil.parser import parse
def calc_row(u):
created_date = parse(u['created_at'])
t = today - created_date.date()
# Prevent divide by zero
ff_ratio = 0
if int(u['friends_count']) != 0: ff_ratio = int(u['followers_count'])/int(u['friends_count'])
# Force conversions to int, as you never know with Twitter
return {'Twitter_ID':u['id'], 'Handle':u['screen_name'], 'Followed':u['friends_count'], 'Followers':u['followers_count'], 'Followers/Followed':ff_ratio, 'Tweets':u['statuses_count'], 'Days_old':int(t.days), 'Tweets/Days_old':int(u['statuses_count'])/int(t.days), 'Followers/Days_old':int(u['followers_count'])/int(t.days)}
def process_csv(inp, out):
# Uses a Tuple to ensure a specific column order
csv_writer = csv.DictWriter(out, fieldnames=('Twitter_ID', 'Handle', 'Followed', 'Followers', 'Followers/Followed', 'Tweets', 'Days_old', 'Tweets/Days_old', 'Followers/Days_old'))
csv_writer.writeheader()
for line in inp:
csv_writer.writerow(calc_row(json.loads(line)))
def process_json(inp, out):
for line in inp:
j = json.loads(line)
out.write(json.dumps(calc_row(j))+"\n")
parser = argparse.ArgumentParser(description='Convert JSON to CSV', epilog='P.S. Trust The Plan')
parser.add_argument('--format', help='either JSON or CSV', required=True)
parser.add_argument('input', help='JSON File, or stdin if not specified', type=argparse.FileType('r', encoding='utf-8'), default=sys.stdin)
parser.add_argument('output', help='output to File, or stdout if not specified', type=argparse.FileType('w', encoding='utf-8'), default=sys.stdout)
args = parser.parse_args()
today = datetime.date.today()
if args.format.upper() == 'CSV':
process_csv(args.input, args.output)
elif args.format.upper() == 'JSON':
process_json(args.input, args.output)
else:
print(f"Error: '{args.format}' is an invalid format, must be CSV or JSON.", end="\n\n")
parser.print_help()
exit(-1)
| 45.157143
| 326
| 0.726669
| 479
| 3,161
| 4.716075
| 0.425887
| 0.038955
| 0.017264
| 0.014166
| 0.068172
| 0.030102
| 0
| 0
| 0
| 0
| 0
| 0.003709
| 0.147105
| 3,161
| 70
| 327
| 45.157143
| 0.834199
| 0.382158
| 0
| 0.057143
| 0
| 0
| 0.297255
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.085714
| false
| 0
| 0.171429
| 0
| 0.285714
| 0.057143
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
8d61d1b5d6b0de975b9d576cfadcd886cc44204a
| 10,970
|
py
|
Python
|
Scratch/lstm.py
|
imadtoubal/MultimodalDeepfakeDetection
|
46539e16c988ee9fdfb714893788bbbf72836595
|
[
"MIT"
] | 2
|
2022-03-12T09:18:13.000Z
|
2022-03-23T08:29:10.000Z
|
Scratch/lstm.py
|
imadtoubal/MultimodalDeepfakeDetection
|
46539e16c988ee9fdfb714893788bbbf72836595
|
[
"MIT"
] | null | null | null |
Scratch/lstm.py
|
imadtoubal/MultimodalDeepfakeDetection
|
46539e16c988ee9fdfb714893788bbbf72836595
|
[
"MIT"
] | null | null | null |
import torch
from torch import nn
import torch.nn.functional as F
import torch.optim as optim
from preprocess import *
from torch.utils.data import Dataset, DataLoader
from blazeface import BlazeFace
import os
import cv2
import numpy as np
from matplotlib import pyplot as plt
import random
import pickle
DATA_FOLDER = '../input/deepfake-detection-challenge'
TRAIN_SAMPLE_FOLDER = 'train_sample_videos'
TEST_FOLDER = 'test_videos'
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
NET = BlazeFace().to(device)
NET.load_weights("../input/blazeface.pth")
NET.load_anchors("../input/anchors.npy")
class MyLSTM(nn.Module):
def __init__(self, num_layers=2, num_hidden_nodes=512):
super(MyLSTM, self).__init__()
self.num_layers = num_layers
self.num_hidden_nodes = num_hidden_nodes
# input dim is 167, output 200
self.lstm = nn.LSTM(167, num_hidden_nodes,
batch_first=True, num_layers=num_layers)
# fully connected
self.fc1 = nn.Linear(num_hidden_nodes, num_hidden_nodes)
self.act = nn.Sigmoid()
self.fc2 = nn.Linear(num_hidden_nodes, 2)
self.softmax = nn.Softmax()
def forward(self, x, hidden):
y, hidden = self.lstm(x, hidden) # returns the two outputs
y = y[:, -1, :] # get only the last output
y = self.fc1(y)
y = self.fc2(y)
y = F.softmax(y, dim=1)
return y, hidden
def init_hidden(self, batch_size):
weight = next(self.parameters()).data
hidden = (weight.new(self.num_layers, batch_size, self.num_hidden_nodes).zero_(),
weight.new(self.num_layers, batch_size, self.num_hidden_nodes).zero_())
return hidden
class FourierDataset(Dataset):
def __init__(self, data):
"""
data: a list of (label: string, fourier_data: numpy array, name: string)
"""
self.data = []
for elt in data:
label, spects, name = elt
label = torch.tensor(0 if label == 'FAKE' else 1)
# Moving window sequence generation without overalap
# other ideas: 1. Random sampling, 2. Moving qindow with overlap
# this data will be shuffled
for i in range(0, 24 * (spects.shape[0] // 24), 24):
spect = torch.tensor(spects[i:i+24, :])
self.data.append((spect, label))
def __getitem__(self, idx):
return self.data[idx] # spect (24, 167), label (2)
def __len__(self):
return len(self.data)
sequence = 24 # 1 sec of video
feature_size = 167 # length of spatial frequency
def read_video(filename):
vidcap = cv2.VideoCapture(filename)
success, image = vidcap.read()
count = 0
images = []
while success:
tiles, resize_info = stride_search(image)
detections = NET.predict_on_image(tiles[1])
blazeface_endpoints = get_face_endpoints(tiles[1], detections)[
0] # take the first face only
# we need to resize them on the original image and get the amount shifted to prevent negative values
# in this case it will be 1080
split_size = 128 * resize_info[1]
# determine how much we shifted for this tile
x_shift = (image.shape[1] - split_size) // 2
face_endpoints = (int(blazeface_endpoints[0] * resize_info[0]),
int(blazeface_endpoints[1] *
resize_info[0] + x_shift),
int(blazeface_endpoints[2] * resize_info[0]),
int(blazeface_endpoints[3] * resize_info[0] + x_shift))
# next we need to expand the rectangle to be 240, 240 pixels (for this training example)
# we can do this equally in each direction, kind of
face_width = face_endpoints[3] - face_endpoints[1]
face_height = face_endpoints[2] - face_endpoints[0]
buffer = 20
face_box = image[max(0, face_endpoints[0] - buffer): min(face_endpoints[2] + buffer, image.shape[0]),
max(0, face_endpoints[1] - buffer): min(face_endpoints[3] + buffer, image.shape[1])]
# print(face_box.shape) # almost a square or very close to it
face = cv2.resize(face_box, (240, 240))
images.append(face)
# cv2.imshow("face", face)
success, image = vidcap.read()
count += 1
if cv2.waitKey(1) & 0xFF == ord('q'):
break
if images:
return np.stack(images)
def get_spects(vid):
spects = []
for i in range(vid.shape[0]):
img = vid[i]
spects.append(fourier_tranform(img, ''))
return np.stack(spects)
def get_face_endpoints(img, detections, with_keypoints=False):
if isinstance(detections, torch.Tensor):
detections = detections.cpu().numpy()
if detections.ndim == 1:
detections = np.expand_dims(detections, axis=0)
detected_faces_endpoints = []
for i in range(detections.shape[0]): # dependent on number of faces found
ymin = detections[i, 0] * img.shape[0]
xmin = detections[i, 1] * img.shape[1]
ymax = detections[i, 2] * img.shape[0]
xmax = detections[i, 3] * img.shape[1]
detected_faces_endpoints.append((ymin, xmin, ymax, xmax))
cv2.rectangle(img, (int(xmin), int(ymin)),
(int(xmax), int(ymax)), (0, 0, 255), 2)
if with_keypoints:
for k in range(6):
kp_x = detections[i, 4 + k*2] * img.shape[1]
kp_y = detections[i, 4 + k*2 + 1] * img.shape[0]
circle = patches.Circle((kp_x, kp_y), radius=0.5, linewidth=1,
edgecolor="lightskyblue", facecolor="none",
alpha=detections[i, 16])
return detected_faces_endpoints
def prepare_data():
# Here we check the train data files extensions.
train_list = list(os.listdir(
os.path.join(DATA_FOLDER, TRAIN_SAMPLE_FOLDER)))
ext_dict = []
for file in train_list:
file_ext = file.split('.')[1]
if (file_ext not in ext_dict):
ext_dict.append(file_ext)
print(f"Extensions: {ext_dict}")
# Let's count how many files with each extensions there are.
for file_ext in ext_dict:
print(
f"Files with extension `{file_ext}`: {len([file for file in train_list if file.endswith(file_ext)])}")
test_list = list(os.listdir(os.path.join(DATA_FOLDER, TEST_FOLDER)))
ext_dict = []
for file in test_list:
file_ext = file.split('.')[1]
if (file_ext not in ext_dict):
ext_dict.append(file_ext)
print(f"Extensions: {ext_dict}")
for file_ext in ext_dict:
print(
f"Files with extension `{file_ext}`: {len([file for file in train_list if file.endswith(file_ext)])}")
json_file = [file for file in train_list if file.endswith('json')][0]
print(f"JSON file: {json_file}")
meta_train_df = get_meta_from_json(TRAIN_SAMPLE_FOLDER, json_file)
meta_train_df.head()
fake_train_sample_video = list(
meta_train_df.loc[meta_train_df.label == 'FAKE'].sample(90).index)
real_train_sample_video = list(
meta_train_df.loc[meta_train_df.label == 'REAL'].index)
training_data = []
for video_file in fake_train_sample_video:
try:
data = process_video_data(os.path.join(
DATA_FOLDER, TRAIN_SAMPLE_FOLDER, video_file))
training_data.append(('FAKE', data, video_file)) # (X, 24, 167)
except:
continue
for video_file in real_train_sample_video:
try:
data = process_video_data(os.path.join(
DATA_FOLDER, TRAIN_SAMPLE_FOLDER, video_file))
training_data.append(('REAL', data, video_file))
except:
continue
random.shuffle(training_data)
with open('train_data.txt', 'wb') as fp: # pickling
pickle.dump(training_data, fp)
return training_data
def read_data():
with open("train_data.txt", "rb") as fp: # Unpickling
training_data = pickle.load(fp)
return training_data
def process_video_data(video_file):
stack = read_video(video_file)
stack = stack.mean(axis=-1) / 255
return get_spects(stack)
def prepare_spect(spect):
return torch.tensor(spect)
def convert_scores(label):
return torch.tensor([1, 0]) if label == 'FAKE' else torch.tensor([0, 1])
def train(training_data):
batch_size = 69
model = MyLSTM()
loss_function = nn.CrossEntropyLoss()
optimizer = optim.Adam(model.parameters(), lr=0.001)
training_data = FourierDataset(training_data)
trainloader = DataLoader(
training_data, batch_size=batch_size, shuffle=True)
hidden = model.init_hidden(batch_size)
print_every = 10
for epoch in range(100): # again, normally you would NOT do 100 epochs, it is toy data
running_loss = 0.0
running_acc = 0.0
i = 0
for inp, labels in trainloader: # renamed sequence to inp because inp is a batch of sequences
# Step 1. Remember that Pytorch accumulates gradients.
# We need to clear them out before each instance
model.zero_grad()
inp = inp.float()
# Step 2. Run our forward pass.
tag_scores, h = model(inp, hidden)
# Step 3. Compute the loss, gradients, and update the parameters by
# calling optimizer.step()
loss = loss_function(tag_scores, labels)
loss.backward()
optimizer.step()
running_acc += torch.mean((tag_scores.argmax(dim=1)
== labels).float()).item()
# print statistics
running_loss += loss.item()
if i % print_every == print_every-1:
print('[%d, %5d] loss: %.3f - acc: %.3f' %
(epoch + 1, i + 1, running_loss / print_every, running_acc * 100 / print_every))
running_loss = 0.0
running_acc = 0.0
i += 1
def main():
# prepare_data()
'''
stack = read_video(os.path.join(DATA_FOLDER, TRAIN_SAMPLE_FOLDER, 'aagfhgtpmv.mp4'))
print(stack.shape)
stack = stack.mean(axis=-1) / 255
spects = get_spects(stack)
# print(spects.shape)
print(spects[0])
plt.plot(spects[0])
plt.xlabel('Spatial Frequency')
plt.ylabel('Power Spectrum')
plt.show()
'''
training_data = read_data()
train(training_data)
if __name__ == '__main__':
main()
| 34.388715
| 116
| 0.591978
| 1,424
| 10,970
| 4.375
| 0.249298
| 0.026966
| 0.020225
| 0.011236
| 0.241894
| 0.199037
| 0.158909
| 0.158909
| 0.150241
| 0.124238
| 0
| 0.025695
| 0.301094
| 10,970
| 318
| 117
| 34.496855
| 0.786879
| 0.149772
| 0
| 0.160377
| 0
| 0.009434
| 0.056292
| 0.012472
| 0
| 0
| 0.000449
| 0
| 0
| 1
| 0.075472
| false
| 0
| 0.061321
| 0.018868
| 0.20283
| 0.042453
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
8d63217e5fdc8f7f711034a43dd2b7d398591281
| 18,373
|
py
|
Python
|
analysis/plot/python/plot_groups/estimator.py
|
leozz37/makani
|
c94d5c2b600b98002f932e80a313a06b9285cc1b
|
[
"Apache-2.0"
] | 1,178
|
2020-09-10T17:15:42.000Z
|
2022-03-31T14:59:35.000Z
|
analysis/plot/python/plot_groups/estimator.py
|
leozz37/makani
|
c94d5c2b600b98002f932e80a313a06b9285cc1b
|
[
"Apache-2.0"
] | 1
|
2020-05-22T05:22:35.000Z
|
2020-05-22T05:22:35.000Z
|
analysis/plot/python/plot_groups/estimator.py
|
leozz37/makani
|
c94d5c2b600b98002f932e80a313a06b9285cc1b
|
[
"Apache-2.0"
] | 107
|
2020-09-10T17:29:30.000Z
|
2022-03-18T09:00:14.000Z
|
# Copyright 2020 Makani Technologies LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Plots relating to the estimator."""
from makani.analysis.plot.python import mplot
from makani.avionics.common import plc_messages
from makani.control import control_types
from makani.lib.python import c_helpers
from makani.lib.python.h5_utils import numpy_utils
from matplotlib.pyplot import plot
from matplotlib.pyplot import yticks
import numpy as np
from scipy import interpolate
MFig = mplot.PlotGroup.MFig # pylint: disable=invalid-name
_WING_GPS_RECEIVER_HELPER = c_helpers.EnumHelper(
'WingGpsReceiver', control_types)
_GROUND_STATION_MODE_HELPER = c_helpers.EnumHelper(
'GroundStationMode', plc_messages)
def _QuatToVec(q):
dims = ['q0', 'q1', 'q2', 'q3']
return np.array([q[d] for d in dims])
class Plots(mplot.PlotGroup):
"""Plots of the estimator."""
@MFig(title='Filtered Velocity', ylabel='Velocity [m/s]', xlabel='Time [s]')
def PlotFilteredVelocity(self, e, c, s, params):
mplot.PlotVec3(c['time'], c['state_est']['Vg'], label='Vg', linestyle='-')
mplot.PlotVec3(c['time'], c['state_est']['Vg_f'], label='Vg_f',
linestyle='-.')
if s is not None:
mplot.PlotVec3(s['time'], s['wing']['Vg'], label='sim', linestyle=':')
@MFig(title='Acc Norm f', ylabel='Acc. [m/s^2]', xlabel='Time [s]')
def PlotAccNormF(self, e, c, s, params, imu_index=0):
plot(c['time'], c['state_est']['acc_norm_f'])
@MFig(title='Gyros', ylabel='Rate [rad/s]', xlabel='Time [s]')
def PlotGyros(self, e, c, s, params, imu_index=0):
mplot.PlotVec3(c['time'], c['control_input']['imus']['gyro'][:, imu_index])
@MFig(title='Filtered Body Rates', ylabel='Rate [rad/s]', xlabel='Time [s]')
def PlotBodyRates(self, e, c, s, params):
mplot.PlotVec3(c['time'], c['state_est']['pqr_f'])
@MFig(title='Attitude Error', ylabel='Error [deg]', xlabel='Time [s]')
def PlotAttitudeError(self, e, c, s, params):
for imu_index in range(3):
if s is not None:
dims = ['q0', 'q1', 'q2', 'q3']
q_s = {d: np.zeros(c['time'].shape) for d in dims}
for d in dims:
q_s[d] = interpolate.interp1d(s['time'], s['wing']['q'][d],
bounds_error=False)(c['time'])
q_s = _QuatToVec(q_s)
if 'q_g2b' in e.dtype.names:
q_c = e['q_g2b'][:, imu_index]
q_c = _QuatToVec(q_c)
plot(c['time'], np.rad2deg(
np.arccos(1.0 - 2.0 * (1.0 - np.sum(q_c * q_s, axis=0)**2.0))),
label='Imu %d' % imu_index)
if 'mahony_states' in e.dtype.names:
q_c = e['mahony_states']['q'][:, imu_index]
q_c = _QuatToVec(q_c)
plot(c['time'], np.rad2deg(
np.arccos(1.0 - 2.0 * (1.0 - np.sum(q_c * q_s, axis=0)**2.0))),
label='Imu %d' % imu_index)
@MFig(title='Gyro Biases', ylabel='Biases [rad/s]', xlabel='Time [s]')
def PlotGyroBiases(self, e, c, s, params, imu_index=0):
mplot.PlotVec3(c['time'], e['gyro_biases'][:, imu_index],
label='IMU %d' % imu_index)
if s is not None:
mplot.PlotVec3(s['time'], s['imus']['gyro_bias_b'][:, imu_index],
linestyle=':')
@MFig(title='Acc Biases', ylabel='Biases [m/s^1]', xlabel='Time [s]')
def PlotAccBiases(self, e, c, s, params, imu_index=0):
mplot.PlotVec3(c['time'], e['acc_b_estimates'][:, imu_index],
label='IMU %d' % imu_index)
@MFig(title='Air Speed', ylabel='Speed [m/s]', xlabel='Time [s]')
def PlotAirspeed(self, e, c, s, params):
plot(c['time'], c['state_est']['apparent_wind']['sph']['v'], 'b',
label='est')
plot(c['time'], c['state_est']['apparent_wind']['sph_f']['v'], 'g',
label='filt')
if s is not None:
plot(s['time'], s['wing']['apparent_wind_b']['v'], 'b:', label='sim')
@MFig(title='Magnetometer', ylabel='Field [Gauss]', xlabel='Time [s]')
def PlotMagnetometer(self, e, c, s, params):
mplot.PlotVec3(c['time'], c['control_input']['imus']['mag'][:, 0],
linestyle='-', label='A')
mplot.PlotVec3(c['time'], c['control_input']['imus']['mag'][:, 1],
linestyle=':', label='B')
mplot.PlotVec3(c['time'], c['control_input']['imus']['mag'][:, 2],
linestyle='-.', label='C')
@MFig(title='Specific Force', ylabel='Specific Force [m/s^2]',
xlabel='Time [s]')
def PlotAccelerometer(self, e, c, s, params):
mplot.PlotVec3(c['time'], c['control_input']['imus']['acc'][:, 0],
linestyle='-', label='A')
mplot.PlotVec3(c['time'], c['control_input']['imus']['acc'][:, 1],
linestyle=':', label='B')
mplot.PlotVec3(c['time'], c['control_input']['imus']['acc'][:, 2],
linestyle='-.', label='C')
@MFig(title='Accel.', ylabel='Specific Force [m/s^2]', xlabel='Time [s]')
def PlotSpecificForce(self, e, c, s, params):
mplot.PlotVec3(c['time'], c['estimator']['acc_b_estimates'][:, 0],
linestyle='-', label='A')
mplot.PlotVec3(c['time'], c['estimator']['acc_b_estimates'][:, 1],
linestyle=':', label='B')
mplot.PlotVec3(c['time'], c['estimator']['acc_b_estimates'][:, 2],
linestyle='-.', label='C')
@MFig(title='Magnetometer Diff', ylabel='Field [Gauss]', xlabel='Time [s]')
def PlotMagnetometerDiff(self, e, c, s, params, dimension='x'):
plot(c['time'],
c['control_input']['imus']['mag'][dimension][:, 0]
- c['control_input']['imus']['mag'][dimension][:, 1], 'b',
label='A-B ' + dimension)
plot(c['time'],
c['control_input']['imus']['mag'][dimension][:, 1]
- c['control_input']['imus']['mag'][dimension][:, 2], 'g',
label='B-C ' + dimension)
plot(c['time'],
c['control_input']['imus']['mag'][dimension][:, 2]
- c['control_input']['imus']['mag'][dimension][:, 0], 'r',
label='C-A ' + dimension)
@MFig(title='Current GPS', ylabel='GPS Receiver', xlabel='Time [s]')
def PlotGpsReceiver(self, e, c, s, params):
plot(c['time'], e['current_gps_receiver'], label='current_receiver')
yticks(_WING_GPS_RECEIVER_HELPER.Values(),
_WING_GPS_RECEIVER_HELPER.ShortNames())
def _PlotGpsPositionEcefChannel(self, c, d):
sigma = c['control_input']['wing_gps']['pos_sigma'][d]
wing_gps_pos = np.array(c['control_input']['wing_gps']['pos'][d])
wing_gps_pos[wing_gps_pos == 0] = float('nan')
plot(c['time'], wing_gps_pos[:, 0], 'b', label='0:%s ECEF' % d)
plot(c['time'], wing_gps_pos[:, 0] + sigma[:, 0], 'b:')
plot(c['time'], wing_gps_pos[:, 0] - sigma[:, 0], 'b:')
plot(c['time'], wing_gps_pos[:, 1], 'g', label='1:%s ECEF' % d)
plot(c['time'], wing_gps_pos[:, 1] + sigma[:, 1], 'g:')
plot(c['time'], wing_gps_pos[:, 1] - sigma[:, 1], 'g:')
@MFig(title='GPS Position ECEF', ylabel='Position [m]', xlabel='Time [s]')
def PlotGpsPositionEcefX(self, e, c, s, params):
self._PlotGpsPositionEcefChannel(c, 'x')
@MFig(title='GPS Position ECEF', ylabel='Position [m]', xlabel='Time [s]')
def PlotGpsPositionEcefY(self, e, c, s, params):
self._PlotGpsPositionEcefChannel(c, 'y')
@MFig(title='GPS Position ECEF', ylabel='Position [m]', xlabel='Time [s]')
def PlotGpsPositionEcefZ(self, e, c, s, params):
self._PlotGpsPositionEcefChannel(c, 'z')
@MFig(title='Kite Velocity Sigma', ylabel='Sigma Velocity [m/s]',
xlabel='Time [s]')
def PlotVelocitySigmas(self, e, c, s, params, plot_glas=True):
if 'cov_vel_g' in e.dtype.names:
plot(c['time'], e['cov_vel_g']['x']**0.5, 'b', label='Vg_x est')
plot(c['time'], e['cov_vel_g']['y']**0.5, 'g', label='Vg_y est')
plot(c['time'], e['cov_vel_g']['z']**0.5, 'r', label='Vg_z est')
if 'gps' in e.dtype.names:
aux_indices = np.argwhere(e['current_gps_receiver'] == 1)
vg = e['gps']['sigma_Vg'][:, 0]
vg[aux_indices] = e['gps']['sigma_Vg'][aux_indices, 1]
plot(c['time'], vg['x'], 'b-.', label='Vg_x gps')
plot(c['time'], vg['y'], 'g-.', label='Vg_y gps')
plot(c['time'], vg['z'], 'r-.', label='Vg_z gps')
@MFig(title='Kite Position Sigma', ylabel='Sigma Position [m]',
xlabel='Time [s]')
def PlotPositionSigmas(self, e, c, s, params, plot_glas=True):
if 'cov_vel_g' in e.dtype.names:
plot(c['time'], e['cov_pos_g']['x']**0.5, 'b', label='Xg_x est')
plot(c['time'], e['cov_pos_g']['y']**0.5, 'g', label='Xg_y est')
plot(c['time'], e['cov_pos_g']['z']**0.5, 'r', label='Xg_z est')
if 'gps' in e.dtype.names:
aux_indices = np.argwhere(e['current_gps_receiver'] == 1)
xg = e['gps']['sigma_Xg'][:, 0]
xg[aux_indices] = e['gps']['sigma_Xg'][aux_indices, 1]
plot(c['time'], xg['x'], 'b-.', label='Xg_x gps')
plot(c['time'], xg['y'], 'g-.', label='Xg_y gps')
plot(c['time'], xg['z'], 'r-.', label='Xg_z gps')
@MFig(title='Kite Velocity', ylabel='Velocity [m/s]', xlabel='Time [s]')
def PlotVelocity(self, e, c, s, params, plot_glas=True):
plot(c['time'], c['state_est']['Vg']['x'], 'b', label='Vg_x est')
plot(c['time'], c['state_est']['Vg']['y'], 'g', label='Vg_y est')
plot(c['time'], c['state_est']['Vg']['z'], 'r', label='Vg_z est')
if 'Vg_gps' in e.dtype.names:
plot(c['time'], e['Vg_gps']['x'], 'b-.', label='Vg_x gps')
plot(c['time'], e['Vg_gps']['y'], 'g-.', label='Vg_y gps')
plot(c['time'], e['Vg_gps']['z'], 'r-.', label='Vg_z gps')
if 'gps' in e.dtype.names:
aux_indices = np.argwhere(e['current_gps_receiver'] == 1)
vg = e['gps']['Vg'][:, 0]
vg[aux_indices] = e['gps']['Vg'][aux_indices, 1]
plot(c['time'], vg['x'], 'b-.', label='Vg_x gps')
plot(c['time'], vg['y'], 'g-.', label='Vg_y gps')
plot(c['time'], vg['z'], 'r-.', label='Vg_z gps')
if plot_glas and 'Vg_glas' in e.dtype.names:
plot(c['time'], e['Vg_glas']['x'], 'b:', label='Vg_x glas')
plot(c['time'], e['Vg_glas']['y'], 'g:', label='Vg_y glas')
plot(c['time'], e['Vg_glas']['z'], 'r:', label='Vg_z glas')
if s is not None:
plot(s['time'], s['wing']['Vg']['x'], 'b-o', label='Vg_x sim')
plot(s['time'], s['wing']['Vg']['y'], 'g-o', label='Vg_y sim')
plot(s['time'], s['wing']['Vg']['z'], 'r-o', label='Vg_z sim')
@MFig(title='Payout', ylabel='Payout [m]', xlabel='Time [s]')
def PlotPayout(self, e, c, s, params):
plot(c['time'], c['state_est']['winch']['payout'], label='Payout')
@MFig(title='Tension', ylabel='Tension [N]', xlabel='Time [s]')
def PlotTension(self, e, c, s, params):
plot(c['time'], c['state_est']['tether_force_b']['sph']['tension'],
label='Tension est')
@MFig(title='Tether Angles', ylabel='Angles [deg]', xlabel='Time [s]')
def PlotTetherAngles(self, e, c, s, params):
plot(c['time'],
np.rad2deg(c['state_est']['tether_force_b']['sph']['roll']),
label='Tether Roll')
plot(c['time'],
np.rad2deg(c['state_est']['tether_force_b']['sph']['pitch']),
label='Tether Pitch')
@MFig(title='Kite Position', ylabel='Position [m]', xlabel='Time [s]')
def PlotPosition(self, e, c, s, params, plot_glas=True):
for (d, clr) in [('x', 'b'), ('y', 'g'), ('z', 'r')]:
plot(c['time'], c['state_est']['Xg'][d], clr, label='Xg_%s est' % d)
plot(c['time'],
c['state_est']['Xg'][d] + c['estimator']['cov_pos_g'][d]**0.5,
clr+':', label='Xg_%s est' % d)
plot(c['time'],
c['state_est']['Xg'][d] - c['estimator']['cov_pos_g'][d]**0.5,
clr+':', label='Xg_%s est' % d)
plot(c['time'], e['gps']['Xg'][d][:], clr+'--', label='Xg_%s gps' % d)
plot(c['time'], e['gps']['Xg'][d][:]
+ e['gps']['sigma_Xg'][d][:], clr+':', label='Xg_%s gps' % d)
plot(c['time'], e['gps']['Xg'][d][:]
- e['gps']['sigma_Xg'][d][:], clr+':', label='Xg_%s gps' % d)
if plot_glas:
plot(c['time'], e['glas']['Xg'][d][:], clr+'-.', label='Xg_%s glas' % d)
plot(c['time'], e['glas']['Xg'][d][:]
+ e['glas']['sigma_Xg'][d][:], clr+':', label='Xg_%s glas' % d)
plot(c['time'], e['glas']['Xg'][d][:]
- e['glas']['sigma_Xg'][d][:], clr+':', label='Xg_%s glas' % d)
clr = 'r' # z-color from above loop
plot(c['time'], e['baro']['Xg_z'], clr+'-*', label='Xg_z baro')
if s is not None:
plot(s['time'], s['wing']['Xg']['x'], 'b-o', label='Xg_x sim')
plot(s['time'], s['wing']['Xg']['y'], 'g-o', label='Xg_y sim')
plot(s['time'], s['wing']['Xg']['z'], 'r-o', label='Xg_z sim')
@MFig(title='GSG Biases', ylabel='Angles [deg]', xlabel='Time [s]')
def PlotGsgBias(self, e, c, s, params):
plot(c['time'], np.rad2deg(e['gsg_bias']['azi']), 'b', label='Azi Bias')
plot(c['time'], np.rad2deg(e['gsg_bias']['ele']), 'g', label='Ele Bias')
@MFig(title='GPS Bias', ylabel='Position [m]', xlabel='Time [s]')
def PlotGpsBias(self, e, c, s, params):
mplot.PlotVec3(c['time'], e['Xg_gps_biases'][:, 0], label='GPS A bias')
mplot.PlotVec3(c['time'], e['Xg_gps_biases'][:, 1], label='GPS B bias')
@MFig(title='Wind Speed', ylabel='Wind Speed [m/s]', xlabel='Time [s]')
def PlotWindSpeed(self, e, c, s, params):
if s is not None:
wind_g = s['wind_sensor']['wind_g']
plot(s['time'], numpy_utils.Vec3Norm(wind_g), 'C1--',
label='wind speed at wind sensor [sim]')
wind_g = s['wing']['wind_g']
plot(s['time'], numpy_utils.Vec3Norm(wind_g), 'C2:',
label='wind speed at kite [sim]')
# Plot the estimated wind speed last so that it will be on top.
plot(c['time'], c['state_est']['wind_g']['speed_f'], 'C0-',
linewidth=2, label='wind speed [est]')
@MFig(title='Kite Azimuth', ylabel='Azimuth [deg]', xlabel='Time [s]')
def PlotKiteAzimuth(self, e, c, s, params):
xg = c['state_est']['Xg']
plot(c['time'], np.rad2deg(np.arctan2(xg['y'], xg['x'])), 'b')
@MFig(title='Wind Direction (FROM)', ylabel='Direction [deg]',
xlabel='Time [s]')
def PlotWindDir(self, e, c, s, params):
if s is not None:
wind_g = s['wind_sensor']['wind_g']
plot(s['time'],
np.rad2deg(np.arctan2(-wind_g['y'], -wind_g['x'])), 'C1--',
label='wind direction at wind sensor [sim]')
wind_g = s['wing']['wind_g']
plot(s['time'],
np.rad2deg(np.arctan2(-wind_g['y'], -wind_g['x'])), 'C1--',
label='wind direction at kite [sim]')
# The estimator's "dir_f" is the TO direction. Here we convert to a
# FROM direction.
dir_f = np.rad2deg(c['state_est']['wind_g']['dir_f']) + 180.0
dir_f[dir_f > 360.0] -= 360.0
# Plot the estimated wind speed last so that it will be on top.
plot(c['time'], dir_f, 'C0-',
linewidth=2, label='wind direction [est]')
@MFig(title='Tether Elevation', ylabel='[deg]', xlabel='Time [s]')
def PlotTetherElevation(self, e, c, s, params):
elevation = c['state_est']['tether_ground_angles']['elevation']
elevation[np.logical_not(
c['state_est']['tether_ground_angles']['elevation_valid']
)] = float('nan')
plot(c['time'], np.rad2deg(elevation), label='Est')
if s is not None:
plot(s['time'], np.rad2deg(s['tether']['Xv_start_elevation']), '--',
label='Sim')
@MFig(title='Ground Station Mode', ylabel='Mode [enum]', xlabel='Time [s]')
def PlotGroundStationMode(self, e, c, s, params):
plot(c['time'], c['control_input']['gs_sensors']['mode'], label='ci')
plot(c['time'], c['state_est']['gs_mode'], label='est')
if s is not None:
plot(s['time'], s['gs02']['mode'], '-.', label='Sim')
yticks(_GROUND_STATION_MODE_HELPER.Values(),
_GROUND_STATION_MODE_HELPER.ShortNames())
@MFig(title='Ground Station Transform Stage', ylabel='Stage [#]',
xlabel='Time [s]')
def PlotGroundStationTransformStage(self, e, c, s, params):
plot(c['time'], c['control_input']['gs_sensors']['transform_stage'],
label='ci')
plot(c['time'], c['state_est']['gs_transform_stage'], label='est')
if s is not None:
# This value is not yet in simulator telemetry.
pass
# TODO: Create separate 'simulator' plot group.
@MFig(title='Moments', ylabel='Nm', xlabel='Time [s]')
def PlotKiteMoments(self, e, c, s, params, axis='y'):
for what in ['aero', 'gravity', 'tether', 'rotors',
'disturb', 'blown_wing', 'total']:
plot(s['time'], s['wing']['fm_'+what]['moment'][axis], label='fm_'+what)
@MFig(title='Kite Azimuth and Elevation', ylabel='Angle [deg]',
xlabel='Time [s]')
def PlotKiteAzimuthAndElevation(self, e, c, s, params):
wing_pos_g = s['wing']['Xg']
plot(s['time'], np.rad2deg(np.arctan2(wing_pos_g['y'], wing_pos_g['x'])),
label='kite azimuth')
plot(s['time'], np.rad2deg(np.arctan2(-wing_pos_g['z'],
np.hypot(wing_pos_g['x'],
wing_pos_g['y']))),
label='kite elevation')
@MFig(title='Air Density (Measured at Ground Station)',
ylabel='Density [kg/m^3]', xlabel='Time [s]')
def PlotDensity(self, e, c, s, params):
plot(c['time'], c['state_est']['rho'], label='state_est.rho')
plot(c['time'],
np.full_like(c['time'],
params['system_params']['phys']['rho']),
label='hard-coded value')
@MFig(title='Tether Anchor Point', ylabel='[m]', xlabel='Time [s]')
def PlotTetherAnchorPoint(self, e, c, s, params):
mplot.PlotVec3(c['time'], c['state_est']['tether_anchor']['pos_g'],
label='pos_g [est]', linestyle='-')
mplot.PlotVec3(c['time'], c['state_est']['tether_anchor']['pos_g_f'],
label='pos_g_f [est]', linestyle='--')
| 46.047619
| 80
| 0.56828
| 2,764
| 18,373
| 3.654486
| 0.122287
| 0.043065
| 0.057915
| 0.04851
| 0.581725
| 0.537373
| 0.50302
| 0.422631
| 0.362142
| 0.31185
| 0
| 0.011304
| 0.19594
| 18,373
| 398
| 81
| 46.163317
| 0.672443
| 0.052795
| 0
| 0.236196
| 0
| 0
| 0.254317
| 0
| 0
| 0
| 0
| 0.002513
| 0
| 1
| 0.113497
| false
| 0.003067
| 0.027607
| 0
| 0.147239
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
8d638991d71730377e930b6afff8fce13cde7b4a
| 4,453
|
py
|
Python
|
siptrackdlib/objectregistry.py
|
sii/siptrackd
|
f124f750c5c826156c31ae8699e90ff95a964a02
|
[
"Apache-2.0"
] | null | null | null |
siptrackdlib/objectregistry.py
|
sii/siptrackd
|
f124f750c5c826156c31ae8699e90ff95a964a02
|
[
"Apache-2.0"
] | 14
|
2016-03-18T13:28:16.000Z
|
2019-06-02T21:11:29.000Z
|
siptrackdlib/objectregistry.py
|
sii/siptrackd
|
f124f750c5c826156c31ae8699e90ff95a964a02
|
[
"Apache-2.0"
] | 7
|
2016-03-18T13:04:54.000Z
|
2021-06-22T10:39:04.000Z
|
from siptrackdlib import errors
from siptrackdlib import log
class ObjectClass(object):
"""A class definition in the object registry.
Stores a reference to the class itself and also a list of valid child
classes (class_ids).
"""
def __init__(self, class_reference):
self.class_reference = class_reference
self.valid_children = {}
def registerChild(self, class_reference):
"""Register a class as a valid child class."""
self.valid_children[class_reference.class_id] = None
class ObjectRegistry(object):
"""Keeps track of registered classes and their valid children.
The object registry is used to keep track of valid classes and
what classes are valid children of a class.
It also allocates object ids and can be used to create new objects
based on the registry.
"""
def __init__(self):
self.object_classes = {}
self.object_classes_by_name = {}
self.next_oid = 0
def registerClass(self, class_reference):
"""Register a new class.
This creates a new ObjectClass and stores it in the registry,
enabling creation of objects of the given class.
The returned ObjectClass object can be used to register valid
children of the class.
"""
object_class = ObjectClass(class_reference)
self.object_classes[class_reference.class_id] = \
object_class
self.object_classes_by_name[class_reference.class_name] = \
object_class
return object_class
def isValidChild(self, parent_id, child_id):
"""Check if a class is a valid child of another class."""
if not parent_id in self.object_classes:
return False
parent = self.object_classes[parent_id]
if child_id not in parent.valid_children:
return False
return True
def getClass(self, class_name):
"""Returns the class reference for class registered with class_name."""
if class_name in self.object_classes_by_name:
return self.object_classes_by_name[class_name].class_reference
return None
def getClassById(self, class_id):
"""Returns the class reference for class registered with class_name."""
if class_id in self.object_classes:
return self.object_classes[class_id].class_reference
return None
def getIDByName(self, class_name):
"""Return a classes id given it's name."""
if class_name in self.object_classes_by_name:
object_class = self.object_classes_by_name[class_name]
return object_class.class_reference.class_id
return None
def allocateOID(self):
"""Allocate a new oid."""
ret = str(self.next_oid)
self.next_oid += 1
return ret
def revertOID(self):
"""Revert an oid allocation."""
self.next_oid -= 1
def createObject(self, class_id, parent_branch, *args, **kwargs):
"""Try to create a new object based on a registered class.
This will try to create a new object of 'class_id' type, allocating
it it's own oid. A new branch will also be created in the object
tree to hold the object.
"""
if class_id not in self.object_classes:
raise errors.SiptrackError(
'trying to create object with invalid class id \'%s\'' % (class_id))
object_class = self.object_classes[class_id]
oid = self.allocateOID()
branch = parent_branch.add(oid)
try:
obj = object_class.class_reference(oid, branch, *args, **kwargs)
except Exception as e:
branch.remove(recursive = False, callback_data = None)
self.revertOID()
raise
branch.ext_data = obj
return obj
def _createObject(self, class_id, branch):
"""Try to create _only_ a new object based on an oid and class id.
Similar to createObject, but takes a class id and an oid and only
creates a new object, no branch etc.
"""
if class_id not in self.object_classes:
raise errors.SiptrackError(
'trying to create object with invalid class id \'%s\'' % (class_id))
object_class = self.object_classes[class_id]
obj = object_class.class_reference(branch.oid, branch)
return obj
object_registry = ObjectRegistry()
| 37.108333
| 88
| 0.650348
| 588
| 4,453
| 4.746599
| 0.207483
| 0.047653
| 0.097456
| 0.040846
| 0.347187
| 0.244357
| 0.209961
| 0.194554
| 0.166607
| 0.166607
| 0
| 0.000935
| 0.279362
| 4,453
| 119
| 89
| 37.420168
| 0.868806
| 0.292387
| 0
| 0.275362
| 0
| 0
| 0.031789
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.173913
| false
| 0
| 0.028986
| 0
| 0.42029
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
8d66576529e5704ad9e6b2d90cc87687907b8c91
| 1,139
|
py
|
Python
|
src/kol/request/CombatRequest.py
|
ZJ/pykol
|
c0523a4a4d09bcdf16f8c86c78da96914e961076
|
[
"BSD-3-Clause"
] | 1
|
2016-05-08T13:26:56.000Z
|
2016-05-08T13:26:56.000Z
|
src/kol/request/CombatRequest.py
|
ZJ/pykol
|
c0523a4a4d09bcdf16f8c86c78da96914e961076
|
[
"BSD-3-Clause"
] | null | null | null |
src/kol/request/CombatRequest.py
|
ZJ/pykol
|
c0523a4a4d09bcdf16f8c86c78da96914e961076
|
[
"BSD-3-Clause"
] | null | null | null |
from GenericAdventuringRequest import GenericAdventuringRequest
class CombatRequest(GenericAdventuringRequest):
"""
A request used for a single round of combat. The user may attack, use an item or skill, or
attempt to run away.
"""
# What follows are a list of available actions.
ATTACK = 0
USE_ITEM = 1
USE_SKILL = 2
RUN_AWAY = 3
def __init__(self, session, action, param=None):
"""
In this constructor, action should be set to CombatRequest.ATTACK, CombatRequest.USE_ITEM,
CombatRequest.USE_SKILL, or CombatRequest.RUN_AWAY. If a skill or item is to be used, the
caller should also specify param to be the number of the item or skill the user wishes
to use.
"""
super(CombatRequest, self).__init__(session)
self.url = session.serverURL + "fight.php"
if action == ATTACK:
self.requestData["action"] = "attack"
elif action == USE_ITEM:
self.requestData["action"] = "useitem"
self.requestData["whichitem"] = param
elif action == USE_SKILL:
self.requestData["action"] = "skill"
self.requestData["whichskill"] = param
elif action == RUN_AWAY:
self.requestData["action"] = "runaway"
| 32.542857
| 92
| 0.72432
| 155
| 1,139
| 5.212903
| 0.432258
| 0.111386
| 0.10396
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.004269
| 0.177349
| 1,139
| 34
| 93
| 33.5
| 0.858058
| 0.381036
| 0
| 0
| 0
| 0
| 0.113737
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.052632
| false
| 0
| 0.052632
| 0
| 0.368421
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
8d6a85cb3cf62644daa8bec049af6d5de6f147e2
| 632
|
py
|
Python
|
src/modules/dates/searchDates.py
|
leonardoleyva/api-agenda-uas
|
697740a0a3feebb2ada01133db020fcf5127e1de
|
[
"MIT"
] | 1
|
2022-03-13T02:28:29.000Z
|
2022-03-13T02:28:29.000Z
|
src/modules/dates/searchDates.py
|
leonardoleyva/api-agenda-uas
|
697740a0a3feebb2ada01133db020fcf5127e1de
|
[
"MIT"
] | null | null | null |
src/modules/dates/searchDates.py
|
leonardoleyva/api-agenda-uas
|
697740a0a3feebb2ada01133db020fcf5127e1de
|
[
"MIT"
] | null | null | null |
from .date import Date
from ..response import handleResponse
from datetime import datetime
def searchDates():
req = Date().searchAll()
message = "Listado de citas" if req['status'] == True else "No se pudo conseguir el listado de citas, inténtelo más tarde"
dateToday = datetime.now().isoformat().split('T')[0]
dates = []
for date in req['dates']:
dateDict = date.to_dict()
dateYYMMDD = dateDict['date'].split('T')[0]
if dateYYMMDD >= dateToday:
dates.append({**dateDict, 'id': date.id})
response = handleResponse(req['status'], message, dates)
return response
| 27.478261
| 126
| 0.643987
| 77
| 632
| 5.272727
| 0.545455
| 0.044335
| 0.068966
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.004057
| 0.219937
| 632
| 22
| 127
| 28.727273
| 0.819473
| 0
| 0
| 0
| 0
| 0
| 0.161392
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.066667
| false
| 0
| 0.2
| 0
| 0.333333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
8d6cc5852312640c236532b7026c1ac08efbc30f
| 13,148
|
py
|
Python
|
core/views/misc.py
|
ditttu/gymkhana-Nominations
|
2a0e993c1b8362c456a9369b0b549d1c809a21df
|
[
"MIT"
] | 3
|
2018-02-27T13:48:28.000Z
|
2018-03-03T21:57:50.000Z
|
core/views/misc.py
|
ditttu/gymkhana-Nominations
|
2a0e993c1b8362c456a9369b0b549d1c809a21df
|
[
"MIT"
] | 6
|
2020-02-12T00:07:46.000Z
|
2022-03-11T23:25:59.000Z
|
core/views/misc.py
|
ditttu/gymkhana-Nominations
|
2a0e993c1b8362c456a9369b0b549d1c809a21df
|
[
"MIT"
] | 1
|
2019-03-26T20:19:57.000Z
|
2019-03-26T20:19:57.000Z
|
from django.contrib.auth.decorators import login_required
from django.core.exceptions import ObjectDoesNotExist
from django.core.urlresolvers import reverse, reverse_lazy
from django.http import HttpResponseRedirect
from django.shortcuts import render,HttpResponse
from django.views.generic.edit import CreateView, UpdateView, DeleteView
import csv, json
from datetime import date,datetime
from itertools import chain
from operator import attrgetter
from forms.models import Questionnaire
from forms.views import replicate
from core.models import *
from core.forms import *
from .nomi_cr import get_access_and_post_for_result, get_access_and_post
@login_required
def ratify(request, nomi_pk):
nomi = Nomination.objects.get(pk=nomi_pk)
access, view_post = get_access_and_post_for_result(request,nomi_pk)
if access:
if view_post.perms == "can ratify the post":
nomi.append()
return HttpResponseRedirect(reverse('applicants', kwargs={'pk': nomi_pk}))
else:
return render(request, 'no_access.html')
else:
return render(request, 'no_access.html')
@login_required
def request_ratify(request, nomi_pk):
nomi = Nomination.objects.get(pk=nomi_pk)
access, view_post = get_access_and_post_for_result(request,nomi_pk)
if access:
if view_post.parent:
to_add = view_post.parent
nomi.result_approvals.add(to_add)
nomi.nomi_approvals.add(to_add)
nomi.status = 'Sent for ratification'
nomi.save()
return HttpResponseRedirect(reverse('applicants', kwargs={'pk': nomi_pk}))
else:
return render(request, 'no_access.html')
@login_required
def cancel_ratify(request, nomi_pk):
nomi = Nomination.objects.get(pk=nomi_pk)
access, view_post = get_access_and_post_for_result(request,nomi_pk)
if access:
if view_post.parent:
to_remove = view_post.parent
nomi.result_approvals.remove(to_remove)
nomi.nomi_approvals.remove(to_remove)
nomi.status = 'Interview period'
nomi.save()
return HttpResponseRedirect(reverse('applicants', kwargs={'pk': nomi_pk}))
else:
return render(request, 'no_access.html')
@login_required
def cancel_result_approval(request, nomi_pk):
nomi = Nomination.objects.get(pk=nomi_pk)
access, view_post = get_access_and_post_for_result(request,nomi_pk)
if access:
to_remove = view_post.parent
if to_remove.parent not in nomi.result_approvals.all():
nomi.result_approvals.remove(to_remove)
return HttpResponseRedirect(reverse('applicants', kwargs={'pk': nomi_pk}))
else:
return render(request, 'no_access.html')
@login_required
def result_approval(request, nomi_pk):
nomi = Nomination.objects.get(pk=nomi_pk)
access, view_post = get_access_and_post_for_result(request,nomi_pk)
if access:
if view_post == nomi.nomi_post.parent:
nomi.show_result = True
to_add = view_post.parent
nomi.result_approvals.add(to_add)
return HttpResponseRedirect(reverse('applicants', kwargs={'pk': nomi_pk}))
else:
return render(request, 'no_access.html')
@login_required
def create_deratification_request(request, post_pk, user_pk ,type):
post = Post.objects.get(pk=post_pk)
user =User.objects.get(pk=user_pk)
if request.user in post.parent.post_holders.all():
Deratification.objects.create(name=user, post=post,status = type, deratify_approval=post.parent)
return HttpResponseRedirect(reverse('child_post', kwargs={'pk': post_pk}))
@login_required
def approve_deratification_request(request,pk):
to_deratify = Deratification.objects.get(pk = pk)
view = to_deratify.deratify_approval
if request.user in view.post_holders.all():
if view.perms == "can ratify the post":
to_deratify.post.post_holders.remove(to_deratify.name)
history=PostHistory.objects.filter(user=to_deratify.name).filter(post = to_deratify.post).first()
if to_deratify.status=='remove from post':
history.delete()
to_deratify.status = 'removed'
else:
history.end = date.today()
history.save()
to_deratify.status = 'deratified'
to_deratify.save()
else:
to_deratify.deratify_approval = view.parent
to_deratify.save()
return HttpResponseRedirect(reverse('post_view', kwargs={'pk':view.pk}))
else:
return render(request, 'no_access.html')
@login_required
def reject_deratification_request(request, pk):
to_deratify = Deratification.objects.get(pk=pk)
view = to_deratify.deratify_approval
if request.user in view.post_holders.all():
to_deratify.delete()
return HttpResponseRedirect(reverse('post_view', kwargs={'pk':view.pk}))
else:
return render(request, 'no_access.html')
'''
mark_as_interviewed, reject_nomination, accept_nomination: Changes the interview status/ nomination_instance status
of the applicant
'''
def get_access_and_post_for_selection(request, nomi_pk):
nomi =Nomination.objects.get(pk=nomi_pk)
access = False
view_post = None
for post in nomi.result_approvals.all():
if request.user in post.post_holders.all():
access = True
view_post = post
break
return access, view_post
@login_required
def mark_as_interviewed(request, pk):
application = NominationInstance.objects.get(pk=pk)
id_nomi = application.nomination.pk
nomination = Nomination.objects.get(pk=id_nomi)
access, view_post = get_access_and_post_for_selection(request,id_nomi)
if access or request.user in nomination.interview_panel.all():
application.interview_status = 'Interview Done'
application.save()
return HttpResponseRedirect(reverse('nomi_answer', kwargs={'pk': pk}))
else:
return render(request, 'no_access.html')
@login_required
def accept_nomination(request, pk):
application = NominationInstance.objects.get(pk=pk)
id_accept = application.nomination.pk
nomination = Nomination.objects.get(pk=id_accept)
access, view_post = get_access_and_post_for_selection(request, id_accept)
if access or request.user in nomination.interview_panel.all():
application.status = 'Accepted'
application.save()
comment = '<strong>' + str(request.user.userprofile.name) + '</strong>' + ' Accepted '\
+ '<strong>' + str(application.user.userprofile.name) + '</strong>'
status = Commment.objects.create(comments=comment, nomi_instance=application)
return HttpResponseRedirect(reverse('applicants', kwargs={'pk': id_accept}))
else:
return render(request, 'no_access.html')
@login_required
def reject_nomination(request, pk):
application = NominationInstance.objects.get(pk=pk)
id_reject = application.nomination.pk
nomination = Nomination.objects.get(pk=id_reject)
access, view_post = get_access_and_post_for_selection(request, id_reject)
if access or request.user in nomination.interview_panel.all():
application.status = 'Rejected'
application.save()
comment = '<strong>' + str(request.user.userprofile.name) + '</strong>' + ' Rejected ' \
+ '<strong>' + str(application.user.userprofile.name) + '</strong>'
status = Commment.objects.create(comments=comment, nomi_instance=application)
return HttpResponseRedirect(reverse('applicants', kwargs={'pk': id_reject}))
else:
return render(request, 'no_access.html')
'''
append_user, replace_user: Adds and Removes the current post-holders according to their selection status
'''
@login_required
def append_user(request, pk):
posts = request.user.posts.all()
access = False
for post in posts:
if post.perms == "can ratify the post":
access = True
break
if access:
nomi = Nomination.objects.get(pk=pk)
nomi.append()
return HttpResponseRedirect(reverse('applicants', kwargs={'pk': pk}))
else:
return render(request, 'no_access.html')
@login_required
def end_tenure(request):
posts = request.user.posts.all()
access = False
for post in posts:
if post.perms == "can ratify the post":
access = True
break
if access:
posts = Post.objects.all()
for post in posts:
for holder in post.post_holders.all():
try:
history = PostHistory.objects.get(post=post, user=holder)
if history.end:
if date.today() >= history.end:
post.post_holders.remove(holder)
except ObjectDoesNotExist:
pass
return HttpResponseRedirect(reverse('index'))
else:
return render(request, 'no_access.html')
# Import all posts of all clubs
# Check if their session has expired (31-3-2018 has passed)
# Remove them from the post
# Create the post history (No need, its already created)
## ------------------------------------------------------------------------------------------------------------------ ##
############################################ PROFILE VIEWS ##################################################
## ------------------------------------------------------------------------------------------------------------------ ##
@login_required
def profile_view(request):
pk = request.user.pk
my_posts = Post.objects.filter(post_holders=request.user)
history = PostHistory.objects.filter(user=request.user).order_by('start')
pending_nomi = NominationInstance.objects.filter(user=request.user).filter(nomination__status='Nomination out')
pending_re_nomi = NominationInstance.objects.filter(user=request.user).\
filter(nomination__status='Interview period and Nomination reopened')
pending_nomi = pending_nomi | pending_re_nomi
# show the instances that user finally submitted.. not the saved one
interview_re_nomi = NominationInstance.objects.filter(user=request.user).filter(submission_status = True).filter(nomination__status='Interview period and Reopening initiated')
interview_nomi = NominationInstance.objects.filter(user=request.user).filter(submission_status = True).filter(nomination__status='Interview period')
interview_nomi = interview_nomi | interview_re_nomi
declared_nomi = NominationInstance.objects.filter(user=request.user).filter(submission_status = True).filter(nomination__status='Sent for ratification')
try:
user_profile = UserProfile.objects.get(user__id=pk)
post_exclude_history = [] # In case a post is not registered in history
post_history = []
for his in history:
post_history.append(his.post)
for post in my_posts:
if post not in post_history:
post_exclude_history.append(post)
return render(request, 'profile.html', context={'user_profile': user_profile, 'history': history,
'pending_nomi': pending_nomi, 'declared_nomi': declared_nomi,
'interview_nomi': interview_nomi, 'my_posts': my_posts,
'excluded_posts': post_exclude_history})
except ObjectDoesNotExist:
return HttpResponseRedirect('create')
@login_required
def public_profile(request, pk):
student = UserProfile.objects.get(pk=pk)
student_user = student.user
history = PostHistory.objects.filter(user=student_user)
my_posts = Post.objects.filter(post_holders=student_user)
return render(request, 'public_profile.html', context={'student': student, 'history': history,
'my_posts': my_posts})
def UserProfileUpdate(request,pk):
profile = UserProfile.objects.get(pk = pk)
if profile.user == request.user:
form = ProfileForm(request.POST or None, instance=profile)
if form.is_valid():
form.save()
return HttpResponseRedirect(reverse('profile'))
return render(request, 'nomi/userprofile_form.html', context={'form': form})
else:
return render(request, 'no_access.html')
class CommentUpdate(UpdateView):
model = Commment
fields = ['comments']
def get_success_url(self):
form_pk = self.kwargs['form_pk']
return reverse('nomi_answer', kwargs={'pk': form_pk})
class CommentDelete(DeleteView):
model = Commment
def get_success_url(self):
form_pk = self.kwargs['form_pk']
return reverse('nomi_answer', kwargs={'pk': form_pk})
def all_nominations(request):
all_nomi = Nomination.objects.all().exclude(status='Nomination created')
return render(request, 'all_nominations.html', context={'all_nomi': all_nomi})
| 34.783069
| 179
| 0.6601
| 1,537
| 13,148
| 5.446975
| 0.127521
| 0.015767
| 0.027234
| 0.038462
| 0.579312
| 0.535834
| 0.508958
| 0.484711
| 0.477186
| 0.449833
| 0
| 0.00068
| 0.217143
| 13,148
| 377
| 180
| 34.875332
| 0.812688
| 0.040843
| 0
| 0.503788
| 0
| 0
| 0.082776
| 0.002122
| 0.007576
| 0
| 0
| 0
| 0
| 1
| 0.075758
| false
| 0.003788
| 0.056818
| 0
| 0.287879
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
8d6deeb2db5e44e12af11dde00260d1e8aae607e
| 29,706
|
py
|
Python
|
make_paper_plots.py
|
mjbasso/asymptotic_formulae_examples
|
a1ba177426bf82e2a58e7b54e1874b088a86595f
|
[
"MIT"
] | 1
|
2021-08-06T14:58:51.000Z
|
2021-08-06T14:58:51.000Z
|
make_paper_plots.py
|
mjbasso/asymptotic_formulae_examples
|
a1ba177426bf82e2a58e7b54e1874b088a86595f
|
[
"MIT"
] | null | null | null |
make_paper_plots.py
|
mjbasso/asymptotic_formulae_examples
|
a1ba177426bf82e2a58e7b54e1874b088a86595f
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
import logging
import os
import pickle
import time
from os.path import join as pjoin
import matplotlib.pyplot as plt
import numpy as np
import scipy
from matplotlib import rc
from scipy.optimize import least_squares
import asymptotic_formulae
from asymptotic_formulae import GaussZ0
from asymptotic_formulae import GaussZ0_MC
from asymptotic_formulae import nCRZ0
from asymptotic_formulae import nCRZ0_MC
from asymptotic_formulae import nSRZ0
from asymptotic_formulae import nSRZ0_MC
rc('font', **{'family': 'sans-serif','sans-serif': ['Helvetica']})
rc('text', usetex = True)
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
sh = logging.StreamHandler()
sh.setFormatter(logging.Formatter('%(asctime)s : %(name)s : %(levelname)s : %(message)s'))
logger.addHandler(sh)
# For creating a set of uniformly-spaced points on a log scale
def logVector(low, high, n):
low = np.log(low) / np.log(10)
high = np.log(high) / np.log(10)
step = (high - low) / n
vec = np.array([low + step * i for i in range(n + 1)])
return np.exp(np.log(10) * vec)
# As described in Section 2.1.4
def nCRZ0_DiagTau(s, b, tau):
''' Calculate the asymptotic significance for a 1 SR + N CRs, diagonal tau measurement
s := expected signal yield in SR (float)
b := expected background yields in SR (vector of floats, size N)
tau := transfer coefficients, tau[i] carries background i yield in SR to CR i (vector of floats, size N)
Returns Z0 (float) '''
# Argument checking
b, tau = np.array(b), np.array(tau)
s, b, tau = float(s), b.astype(float), tau.astype(float)
assert b.ndim == 1 # b should be a vector
assert tau.ndim == 1 # tau should be a vector
assert len(b) == len(tau)
assert (tau >= 0.).all() # Assert tau contains transfer factors (i.e., all positive)
n = s + np.sum(b)
# System of equations
def func(bhh):
eqns = []
for k in range(len(b)):
eqns.append(n / np.sum(bhh) - 1. + tau[k] * (b[k] / bhh[k] - 1.))
return eqns
# Perform our minimization
res = least_squares(func, x0 = b, bounds = [tuple(len(b) * [0.]), tuple(len(b) * [np.inf])])
if not res.success:
raise RuntimeError('Minimization failed: status = %s, message = \'%s\'' % (res.status, res.message))
bhh = np.array(res.x)
# Calculate our significance
Z0 = np.sqrt(-2. * np.log((np.sum(bhh) / n) ** n * np.prod([(bhh[k] / b[k]) ** (tau[k] * b[k]) for k in range(len(b))])))
return Z0
# As described in Section 2.4.2
def GaussZ0_Decorr(s, b, sigma):
''' Calculate the asymptotic significance for a 1 SR + N CRs, diagonal tau measurement
s := expected signal yield in SR (float)
b := expected background yields in SR (vector of floats, size N)
sigma := width of Gaussian constraint ("absolute uncertainty") for each background yield (vector of floats, size N)
Returns Z0 (float) '''
# Argument checking
b, sigma = np.array(b), np.array(sigma)
s, b, sigma = float(s), b.astype(float), sigma.astype(float)
assert b.ndim == 1 # b should be a vector
assert sigma.ndim == 1 # sigma should be a vector
assert len(b) == len(sigma)
assert (sigma >= 0.).all() # Assert sigma contains widths (i.e., all positive)
n = s + np.sum(b)
# System of equations
def func(bhh):
eqns = []
for k in range(len(b)):
eqns.append(sigma[k] * (n / np.sum(bhh) - 1.) - (bhh[k] - b[k]) / sigma[k])
return eqns
# Perform our minimization
res = least_squares(func, x0 = b, bounds = [tuple(len(b) * [0.]), tuple(len(b) * [np.inf])])
if not res.success:
raise RuntimeError('Minimization failed: status = %s, message = \'%s\'' % (res.status, res.message))
bhh = np.array(res.x)
# Calculate our significance
Z0 = np.sqrt(-2. * (n * np.log(np.sum(bhh) / n) + n - np.sum(bhh + 0.5 * ((b - bhh) / sigma) ** 2)))
return Z0
def makedir(path):
if not os.path.exists(path):
os.makedirs(path)
return path
def load_data_from_pickle(path):
if os.path.exists(path):
with open(path, 'rb') as f:
data = pickle.load(f)
else:
data = {}
return data
def dump_data_to_pickle(data, path):
if not os.path.exists(path):
with open(path, 'wb') as f:
data = pickle.dump(data, f)
pass
pass
def main():
basedir = os.path.dirname(os.path.abspath(__file__))
pickledir = makedir(pjoin(basedir, 'pickles/'))
plotdir = makedir(pjoin(basedir, 'plots/'))
#####################
### SECTION 2.1.1 ###
#####################
def Section2p1p1():
s = 50.
b1 = 100.
b2 = 50.
tau11 = 60.
tau22 = 40.
tau12 = np.linspace(0., b1 * tau11 / b2, 100)
tau21 = np.linspace(0., b2 * tau22 / b1, 100)
z0 = np.empty((len(tau12), len(tau21)))
for i in range(len(tau12)):
for j in range(len(tau21)):
z0[i, j] = nCRZ0(s, [b1, b2], [[tau11, tau12[i]], [tau21[j], tau22]])
fig = plt.figure()
ax = fig.add_subplot(111)
pcm = ax.pcolormesh(tau12 * b2 / (tau11 * b1), tau21 * b1 / (tau22 * b2), z0, cmap = 'magma', shading = 'nearest')
pcm.set_edgecolor('face')
cbar = plt.colorbar(pcm)
ax.set_xlabel('($b_2$ in CR 1) / ($b_1$ in CR 1) [a.u.]')
ax.set_ylabel('($b_1$ in CR 2) / ($b_2$ in CR 2) [a.u.]')
cbar.set_label('Significance of discovery $\\textrm{med}[Z_0|\\mu^\\prime=1]$ [a.u.]', rotation = 270, labelpad = 20)
# ax.set_title('Asymptotic significance for CRs with mixed background processes', pad = 10)
plt.savefig(pjoin(plotdir, '1SRNCR_mixed_processes.eps'), format = 'eps', dpi = 1200)
plt.close()
multi = logVector(1, 1000, 100)
z0 = np.empty((len(multi), len(multi)))
for i in range(len(multi)):
for j in range(len(multi)):
z0[i, j] = nCRZ0(s, [b1, b2], [[multi[i], 0.1 * multi[i] * b1 / b2], [0.1 * multi[j] * b2 / b1, multi[j]]])
fig = plt.figure()
ax = fig.add_subplot(111)
pcm = ax.pcolormesh(multi, multi, z0, cmap = 'magma', shading = 'nearest')
pcm.set_edgecolor('face')
cbar = plt.colorbar(pcm)
ax.set_xlabel('($b_1$ in CR 1) / ($b_1$ in SR) [a.u.]')
ax.set_ylabel('($b_2$ in CR 2) / ($b_2$ in SR) [a.u.]')
ax.set_xscale('log')
ax.set_yscale('log')
cbar.set_label('Significance of discovery $\\textrm{med}[Z_0|\\mu^\\prime=1]$ [a.u.]', rotation = 270, labelpad = 20)
# ax.set_title('Asymptotic significance for CRs varying transfer factors', pad = 10)
plt.savefig(pjoin(plotdir, '1SRNCR_varying_tau.eps'), format = 'eps', dpi = 1200)
plt.close()
#####################
### SECTION 2.1.2 ###
#####################
def Section2p1p2():
# Set the seed
np.random.seed(43)
datapath = pjoin(pickledir, 'Section2p1p2.pkl')
s = 10.
b1 = [round(n) for n in logVector(1., 1000., 10)]
b2 = [5., 25., 150.]
tau1 = 8.
tau2 = 5.
colours = ['g', 'b', 'r']
data = load_data_from_pickle(datapath)
for _b2, c in zip(b2, colours):
k = str(int(_b2))
if not data.get(k, {}):
data[k] = {'z0': [], 't0': [], 't1': []}
for _b1 in b1:
logger.info('On (b1, b2) = (%s, %s).' % (int(_b1), int(_b2)))
z0, t0, t1 = nCRZ0_MC(s, [_b1, _b2], [[tau1, 0.], [0., tau2]], return_t0_and_t1 = True, sleep = 0.001, ntoys = 50000)
data[k]['z0'].append(z0)
data[k]['t0'].append(t0)
data[k]['t1'].append(t1)
plt.plot(b1, data[k]['z0'], marker = 'o', color = c, linewidth = 0, label = 'Numerical: $b_2 = %s$' % int(_b2))
b1Fine = logVector(b1[0], b1[-1], 1000)
plt.plot(b1Fine, [nCRZ0_DiagTau(s, [_b1, _b2], [tau1, tau2]) for _b1 in b1Fine], linestyle = '-', markersize = 0, color = c, label = 'Asymptotic: $b_2 = %s$' % int(_b2))
plt.plot(b1Fine, s / np.sqrt(s + b1Fine + _b2), linestyle = '--', markersize = 0, color = c, label = 'Simple: $b_2 = %s$' % int(_b2))
plt.xlim((b1[0], b1[-1]))
plt.ylim((0., 3.5))
plt.xlabel('Background 1 yield in SR $b_1$ [a.u.]')
plt.ylabel('Significance of discovery $\\textrm{med}[Z_0|\\mu^\\prime=1]$ [a.u.]')
plt.xscale('log')
# plt.title('1 SR + 2 CRs Asymptotic Significance: $s = %s$, $\\tau_1 = %s$, $\\tau_2 = %s$' % (int(s), int(tau1), int(tau2)))
plt.legend(loc = 'upper right')
plt.savefig(pjoin(plotdir, '1SRplus2CR.eps'), format = 'eps', dpi = 1200)
plt.close()
axrange = (0., 25.)
bins = 100
for _b1 in [1., 1000.]:
t0, t1 = data['5']['t0'][b1.index(_b1)], data['5']['t1'][b1.index(_b1)]
plt.hist(t0, weights = len(t0) * [1. / len(t0)], range = axrange, bins = bins, histtype = 'step', color = 'b', label = '$f(t_0|\\mu^\\prime = 0)$')
plt.hist(t1, weights = len(t1) * [1. / len(t1)], range = axrange, bins = bins, histtype = 'step', color = 'r', label = '$f(t_0|\\mu^\\prime = 1)$')
plt.xlim(axrange)
plt.xlabel('Test statistic $t_0$ [a.u.]')
plt.ylabel('Normalized counts [a.u.]')
plt.yscale('log')
plt.legend()
plt.savefig(pjoin(plotdir, '1SRplus2CR_b1eq%s.eps' % int(_b1)), format = 'eps', dpi = 1200)
plt.close()
dump_data_to_pickle(data, datapath)
#####################
### SECTION 2.2.1 ###
#####################
def Section2p2p1():
# Set the seed
np.random.seed(44)
datapath = pjoin(pickledir, 'Section2p2p1.pkl')
s1 = [round(n) for n in logVector(1., 100., 10)]
s2 = [25., 10., 10.]
s3 = 12.
b = [1000., 1000., 3000.]
tau1 = 2.
tau2 = 10.
tau3 = 20.
colours = ['g', 'b', 'r']
data = load_data_from_pickle(datapath)
for _s2, _b, c in zip(s2, b, colours):
k = str(int(_s2)) + '_' + str(int(_b))
if not data.get(k, {}):
data[k] = {'z0': [], 't0': [], 't1': []}
for _s1 in s1:
logger.info('On (s1, s2, b) = (%s, %s, %s).' % (int(_s1), int(_s2), int(_b)))
ntoys = 100000 if (_s1 > 75.) else 50000
logger.info('Using %s toys.' % ntoys)
z0, t0, t1 = nSRZ0_MC([_s1, _s2, s3], _b, [tau1, tau2, tau3], return_t0_and_t1 = True, sleep = 0.001, ntoys = ntoys)
data[k]['z0'].append(z0)
data[k]['t0'].append(t0)
data[k]['t1'].append(t1)
plt.plot(s1, data[k]['z0'], marker = 'o', color = c, linewidth = 0, label = 'Numerical: $(s_2, b) = (%s, %s)$' % (int(_s2), int(_b)))
s1Fine = logVector(s1[0], s1[-1], 1000)
plt.plot(s1Fine, [nSRZ0([_s1, _s2, s3], _b, [tau1, tau2, tau3]) for _s1 in s1Fine], linestyle = '-', markersize = 0, color = c, label = 'Asymptotic: $(s_2, b) = (%s, %s)$' % (int(_s2), int(_b)))
plt.plot(s1Fine, np.sqrt((s1Fine / np.sqrt(s1Fine + _b / tau1)) ** 2 + (_s2 / np.sqrt(_s2 + _b / tau2)) ** 2 + (s3 / np.sqrt(s3 + _b / tau3)) ** 2), linestyle = '--', markersize = 0, color = c, label = 'Simple: $(s_2, b) = (%s, %s)$' % (int(_s2), int(_b)))
plt.xlim((s1[0], s1[-1]))
plt.ylim((0., 5.0))
plt.xlabel('Signal yield in SR 1 $s_1$ [a.u.]')
plt.ylabel('Significance of discovery $\\textrm{med}[Z_0|\\mu^\\prime=1]$ [a.u.]')
plt.xscale('log')
# plt.title('3 SRs + 1 CR Asymptotic Significance: $s_3 = %s$, $\\tau_1 = %s$, $\\tau_2 = %s$, $\\tau_3 = %s$' % (int(s3), int(tau1), int(tau2), int(tau3)))
plt.legend(loc = 'upper left', bbox_to_anchor = (1.0, 1.02))
plt.savefig(pjoin(plotdir, '3SRplus1CR.eps'), format = 'eps', dpi = 1200, bbox_inches = 'tight')
plt.close()
dump_data_to_pickle(data, datapath)
#####################
### SECTION 2.4.2 ###
#####################
def Section2p4p2_vsB1():
# Set the seed
np.random.seed(45)
datapath = pjoin(pickledir, 'Section2p4p2_vsB1.pkl')
sigma1 = 5.
sigma2 = 10.
s = 10.
b1 = [round(n) for n in logVector(1., 1000., 10)]
b2 = [5., 25., 150.]
R = [[lambda th: 1. + sigma1 / 100. * th, lambda th: 1.], [lambda th: 1., lambda th: 1. + sigma2 / 100. * th]]
S = [[1., 0.], [0., 1.]]
colours = ['g', 'b', 'r']
data = load_data_from_pickle(datapath)
for _b2, c in zip(b2, colours):
k = str(int(_b2))
if not data.get(k, {}):
data[k] = {'z0': [], 't0': [], 't1': []}
for _b1 in b1:
logger.info('On (b1, b2) = (%s, %s).' % (int(_b1), int(_b2)))
z0, t0, t1 = GaussZ0_MC(s, [_b1, _b2], R, S, return_t0_and_t1 = True, sleep = 0.001, ntoys = 50000)
data[k]['z0'].append(z0)
data[k]['t0'].append(t0)
data[k]['t1'].append(t1)
plt.plot(b1, data[k]['z0'], marker = 'o', color = c, linewidth = 0, label = 'Numerical: $b_2 = %s$' % int(_b2))
b1Fine = logVector(b1[0], b1[-1], 1000)
plt.plot(b1Fine, [GaussZ0_Decorr(s, [_b1, _b2], [_b1 * sigma1 / 100., _b2 * sigma2 / 100.]) for _b1 in b1Fine], linestyle = '-', markersize = 0, color = c, label = 'Asymptotic: $b_2 = %s$' % int(_b2))
plt.plot(b1Fine, s / np.sqrt(s + b1Fine + _b2 + (sigma1 / 100. * b1Fine) ** 2 + (sigma2 / 100. * _b2) ** 2), linestyle = '--', markersize = 0, color = c, label = 'Simple: $b_2 = %s$' % int(_b2))
plt.xlim((b1[0], b1[-1]))
plt.ylim((0., 3.5))
plt.xlabel('Background 1 yield in SR $b_1$ [a.u.]')
plt.ylabel('Significance of discovery $\\textrm{med}[Z_0|\\mu^\\prime=1]$ [a.u.]')
plt.xscale('log')
# plt.title('1 SR + 2 Gaussian Decorrelated Constraints Asymptotic Significance:\n$s = {}$, $\\sigma_1 = {}\\%$, $\\sigma_2 = {}\\%$'.format(int(s), int(sigma1), int(sigma2)))
plt.legend(loc = 'upper right')
plt.savefig(pjoin(plotdir, '1SRplus2GaussConst.eps'), format = 'eps', dpi = 1200)
plt.close()
dump_data_to_pickle(data, datapath)
def Section2p4p2_vsSigma():
sigma1 = np.hstack([logVector(0.1, 100., 15), logVector(100., 400., 3)[1:]])
sigma2 = [1., 10., 100.]
s = 10.
b1 = [25., 50., 50., 150.]
b2 = [25., 50., 150., 50.]
colours = ['gold', 'g', 'b', 'r']
fig, axs = plt.subplots(nrows = 2, ncols = 2, sharex = 'col', sharey = 'row', figsize = [2 * 6.0, 2 * 4.0])
axs[1, 1].axis('off')
for i, _sigma2 in enumerate(sigma2):
# Set the seed - let's use a fresh seed on each loop iteration, as we are saving separate pickles
# (this allows us to cleanly reproduce the results, per pickle, without throwing all of the toys in previous)
np.random.seed(60 + i)
# Dump a pickle for each sigma2 loop
datapath = pjoin(pickledir, 'Section2p4p2_vsSigma_sigma2eq%s.pkl' % int(_sigma2))
data = load_data_from_pickle(datapath)
if i == 0:
ax = axs[0, 0]
elif i == 1:
ax = axs[0, 1]
elif i == 2:
ax = axs[1, 0]
elif i == 3:
continue
else:
ax = None
for _b1, _b2, c in zip(b1, b2, colours):
k = str(int(_b1)) + '_' + str(int(_b2))
if not data.get(k, {}):
data[k] = {'z0': [], 't0': [], 't1': []}
for _sigma1 in sigma1:
logger.info('On (sigma1, sigma2, b1, b2) = (%s, %s, %s, %s).' % (round(_sigma1, 5), round(_sigma2, 5), int(_b1), int(_b2)))
z0, t0, t1 = GaussZ0_MC(s, [_b1, _b2], R(_sigma1, _sigma2), S, return_t0_and_t1 = True, sleep = 0.001, ntoys = 50000, retry_first = False, skip_failed_toys = True)
data[k]['z0'].append(z0)
data[k]['t0'].append(t0)
data[k]['t1'].append(t1)
ax.plot(sigma1, data[k]['z0'], marker = 'o', color = c, linewidth = 0, label = 'Numerical: $(b_1, b_2) = (%s, %s)$' % (int(_b1), int(_b2)) if i == 0 else '')
sigma1Fine = logVector(sigma1[0], sigma1[-1] if sigma1[-1] > 1000. else 1000., 1000)
ax.plot(sigma1Fine, [GaussZ0_Decorr(s, [_b1, _b2], [_b1 * _sigma1 / 100., _b2 * _sigma2 / 100.]) for _sigma1 in sigma1Fine], linestyle = '-', markersize = 0, color = c, label = 'Asymptotic: $(b_1, b_2) = (%s, %s)$' % (int(_b1), int(_b2)) if i == 0 else '')
ax.plot(sigma1Fine, s / np.sqrt(s + _b1 + _b2 + (sigma1Fine / 100. * _b1) ** 2 + (_sigma2 / 100. * _b2) ** 2), linestyle = '--', markersize = 0, color = c, label = 'Simple: $(b_1, b_2) = (%s, %s)$' % (int(_b1), int(_b2)) if i == 0 else '')
ax.set_ylim((0., 1.4))
if i != 1: ax.set_ylabel('Significance of discovery $\\textrm{med}[Z_0|\\mu^\\prime=1]$ [a.u.]')
ax.text(40, 1.2, '$s = {}$, $\\sigma_2 = {}\\%$'.format(int(s), int(_sigma2)), fontsize = 12, bbox = {'facecolor': 'white', 'pad': 10})
if i != 0:
ax.set_xlim((sigma1[0], sigma1[-1] if sigma1[-1] > 1000. else 1000.))
ax.set_xlabel('Background 1 yield uncertainty in SR $\\sigma_1$ [\\%]')
ax.set_xscale('log')
if i == 1: ax.xaxis.set_tick_params(labelbottom = True)
dump_data_to_pickle(data, datapath)
# fig.suptitle('1 SR + 2 Decorrelated Gaussian Constraints Asymptotic Significance')
axs[0, 0].legend(loc = 'upper left', bbox_to_anchor = (1.05, -0.15))
plt.subplots_adjust(hspace = 0.05, wspace = 0.05) # , top = 0.95, bottom = 0.05)
plt.savefig(pjoin(plotdir, '1SRplus2GaussConst_err.eps'), format = 'eps', dpi = 1200, bbox_inches = 'tight')
plt.close()
#####################
### SECTION 2.4.4 ###
#####################
def Section2p4p4_Corr():
# Set the seed
np.random.seed(47)
datapath = pjoin(pickledir, 'Section2p4p4_Corr.pkl')
s = 10.
b1 = [round(n) for n in logVector(1., 1000., 10)]
b2 = 5.
sigma1 = 35.
sigma2 = 70.
R = [[lambda th: 1. + sigma1 / 100. * th, lambda th: 1.], [lambda th: 1., lambda th: 1. + sigma2 / 100. * th]]
S = [[1., 0.75], [0.75, 1.]]
data = load_data_from_pickle(datapath)
if not all(data.get(k, []) for k in ['z0', 't0', 't1']):
data.update({'z0': [], 't0': [], 't1': []})
for _b1 in b1:
logger.info('On b1 = %s.' % int(_b1))
z0, t0, t1 = GaussZ0_MC(s, [_b1, b2], R, S, return_t0_and_t1 = True, sleep = 0.002, ntoys = 50000)
data['z0'].append(z0)
data['t0'].append(t0)
data['t1'].append(t1)
plt.plot(b1, data['z0'], marker = 'o', color = 'r', linewidth = 0, label = 'Numerical')
b1Fine = logVector(b1[0], b1[-1], 1000)
plt.plot(b1Fine, [GaussZ0(s = s, b = [_b1, b2], R = R, S = S) for _b1 in b1Fine], linestyle = '-', markersize = 0, color = 'r', label = 'Asymptotic (corr.)')
plt.plot(b1Fine, [GaussZ0(s = s, b = [_b1, b2], R = R, S = [[1., 0.], [0., 1.]]) for _b1 in b1Fine], linestyle = ':', markersize = 0, color = 'darkred', label = 'Asymptotic (decorr.)')
plt.plot(b1Fine, s / np.sqrt(s + b1Fine + b2 + (sigma1 / 100. * b1Fine) ** 2 + (sigma2 / 100. * b2) ** 2), linestyle = '--', markersize = 0, color = 'lightcoral', label = 'Simple')
plt.xlim((b1[0], b1[-1]))
plt.ylim((0., 2.))
plt.xlabel('Background 1 yield in SR $b_1$ [a.u.]')
plt.ylabel('Significance of discovery $\\textrm{med}[Z_0|\\mu^\\prime=1]$ [a.u.]')
plt.xscale('log')
# plt.title('1 SR + 2 Gaussian Correlated Constraints Asymptotic Significance:\n$s = {}$, $b_2 = {}$, $\\sigma_1 = {}\\%$, $\\sigma_2 = {}\\%$'.format(int(s), int(b2), int(sigma1), int(sigma2)))
plt.legend(loc = 'upper right')
plt.savefig(pjoin(plotdir, '1SRplus2GaussConst_corr.eps'), format = 'eps', dpi = 1200)
plt.close()
dump_data_to_pickle(data, datapath)
#####################
### SECTION 2.4.5 ###
#####################
def Section2p4p5_Response():
# Set the seed
np.random.seed(49)
def smooth_interpolate(th, func1, func2, weight):
return weight(th) * func1(th) + (1. - weight(th)) * func2(th)
def heaviside(th, sigma_lo, sigma_hi):
return smooth_interpolate(th, lambda th: 1. + sigma_lo * th, lambda th: 1. + sigma_hi * th, lambda th: 1. - np.heaviside(th, 1.))
def arctan(th, sigma_lo, sigma_hi, k = 10.):
return smooth_interpolate(th, lambda th: 1. + sigma_lo * th, lambda th: 1. + sigma_hi * th, lambda th: (1. - 2. / np.pi * np.arctan(np.pi / 2. * k * th)) / 2.)
def tanh(th, sigma_lo, sigma_hi, k = 10.):
return smooth_interpolate(th, lambda th: 1. + sigma_lo * th, lambda th: 1. + sigma_hi * th, lambda th: (1. - np.tanh(k * th)) / 2.)
def erf(th, sigma_lo, sigma_hi, k = 10.):
return smooth_interpolate(th, lambda th: 1. + sigma_lo * th, lambda th: 1. + sigma_hi * th, lambda th: (1. - scipy.special.erf(k * th)) / 2.)
def sigmoid(th, sigma_lo, sigma_hi, k = 10.):
return smooth_interpolate(th, lambda th: 1. + sigma_lo * th, lambda th: 1. + sigma_hi * th, lambda th: 1. - 1. / (1. + np.exp(-k * th)))
response_functions = {'Heaviside': (heaviside, 'k', '-'), 'arctan': (arctan, 'g', '--'), 'tanh': (tanh, 'b', ':'), 'erf': (erf, 'r', '-.'), 'sigmoid': (sigmoid, 'gold', '-')}
sigma_lo = 0.20
sigma_hi = 0.35
th = np.linspace(-1., +1., 1000)
for l, (f, c, ls) in response_functions.items():
plt.plot(th, f(th, sigma_lo, sigma_hi), color = c, label = l, linestyle = ls)
plt.xlim((th[0], th[-1]))
plt.ylim((1. - sigma_lo, 1. + sigma_hi))
plt.xlabel('Nuisance parameter $\\theta$ [a.u.]')
plt.ylabel('Response function $R(\\theta)$ [a.u.]')
# plt.title('Different Response Functions')
plt.legend(loc = 'upper left')
plt.savefig(pjoin(plotdir, 'response_functions.eps'), format = 'eps', dpi = 1200)
plt.xlim((-0.2, +0.2))
plt.ylim((0.95, 1.075))
plt.savefig(pjoin(plotdir, 'response_functions_zoomed.eps'), format = 'eps', dpi = 1200)
plt.close()
# 1st derivatives:
th = np.linspace(-1., +1., 1000)
for l, (f, c, ls) in response_functions.items():
plt.plot(th, scipy.misc.derivative(lambda th: f(th, sigma_lo, sigma_hi), th, dx = 1e-6), color = c, label = l, linestyle = ls)
plt.xlim((th[0], th[-1]))
plt.ylim((0.15, 0.40))
plt.xlabel('Nuisance parameter $\\theta$ [a.u.]')
plt.ylabel('Derivative of response function $dR(\\theta)/d\\theta$ [a.u.]')
# plt.title('Dervatives of Different Response Functions')
plt.legend(loc = 'upper left')
plt.savefig(pjoin(plotdir, 'response_functions_derivatives.eps'), format = 'eps', dpi = 1200)
plt.close()
s = 10.
b1 = logVector(1., 10000., 100)
b2 = 5.
sigma1_lo = 20. / 100.
sigma1_hi = 35. / 100.
sigma2_lo = 70. / 100.
sigma2_hi = 90. / 100.
R = lambda sigma1_lo, sigma1_hi, sigma2_lo, sigma2_hi: [[lambda th: f(th, sigma1_lo, sigma1_hi), lambda th: 1.], [lambda th: 1., lambda th: f(th, sigma2_lo, sigma2_hi)]]
S = [[1., 0.75], [0.75, 1.]]
for l, (f, c, ls) in response_functions.items():
plt.plot(b1, [GaussZ0(s = s, b = [_b1, b2], R = R(sigma1_lo, sigma1_hi, sigma2_lo, sigma2_hi), S = S) for _b1 in b1], linestyle = ls, markersize = 0, color = c, label = l)
plt.xlim((b1[0], b1[-1]))
plt.ylim((0.001, 10.))
plt.xlabel('Background 1 yield in SR $b_1$ [a.u.]')
plt.ylabel('Significance of discovery $\\textrm{med}[Z_0|\\mu^\\prime=1]$ [a.u.]')
plt.xscale('log')
plt.yscale('log')
# plt.title('Sensitivities for Different Response Functions:\n$s = {}$, $b_2 = {}$'.format(int(s), int(b2)))
plt.legend(loc = 'upper right')
plt.savefig(pjoin(plotdir, 'response_functions_z0_b2eq%s.eps' % int(b2)), format = 'eps', dpi = 1200, bbox_inches = 'tight')
plt.close()
s = 100.
b2 = 10000.
for l, (f, c, ls) in response_functions.items():
plt.plot(b1, [GaussZ0(s = s, b = [_b1, b2], R = R(sigma1_lo, sigma1_hi, sigma2_lo, sigma2_hi), S = S) for _b1 in b1], linestyle = ls, markersize = 0, color = c, label = l)
plt.xlim((b1[0], b1[-1]))
plt.ylim((0.008, 0.02))
plt.xlabel('Background 1 yield in SR $b_1$ [a.u.]')
plt.ylabel('Significance of discovery $\\textrm{med}[Z_0|\\mu^\\prime=1]$ [a.u.]')
plt.xscale('log')
plt.yscale('log')
# plt.title('Sensitivities for Different Response Functions:\n$s = {}$, $b_2 = {}$'.format(int(s), int(b2)))
plt.legend(loc = 'upper right')
plt.savefig(pjoin(plotdir, 'response_functions_z0_b2eq%s.eps' % int(b2)), format = 'eps', dpi = 1200, bbox_inches = 'tight')
plt.close()
#####################
### SECTION 2.4.6 ###
#####################
def Section2p4p6_CPU():
# Set the seed
np.random.seed(48)
datapath = pjoin(pickledir, 'Section2p4p6_CPU.pkl')
s = 10.
b1 = 10.
b2 = 5.
sigma1 = 35.
sigma2 = 70.
R = [[lambda th: 1. + sigma1 / 100. * th, lambda th: 1.], [lambda th: 1., lambda th: 1. + sigma2 / 100. * th]]
S = [[1., 0.75], [0.75, 1.]]
ntoys = [round(n) for n in logVector(1000, 1000000, 40)]
data = load_data_from_pickle(datapath)
if not all(data.get(k, []) for k in ['z0', 't0', 't1', 'cpu']):
data.update({'z0': [], 't0': [], 't1': [], 'cpu': []})
for _ntoys in ntoys:
logger.info('On ntoys = %s.' % int(_ntoys))
logging.getLogger(asymptotic_formulae.__name__).setLevel(level = logging.WARNING)
start = time.clock()
z0, t0, t1 = GaussZ0_MC(s, [b1, b2], R, S, return_t0_and_t1 = True, sleep = 0.001, ntoys = _ntoys, retry_first = False, skip_failed_toys = True)
stop = time.clock()
logging.getLogger(asymptotic_formulae.__name__).setLevel(level = logging.DEBUG)
data['z0'].append(z0)
data['t0'].append(t0)
data['t1'].append(t1)
delta = stop - start
logger.info('Z0 = %s, CPU time = %s s.' % (z0, delta))
data['cpu'].append(delta)
if not all(data.get(k, []) for k in ['cpu_asymptotic', 'z0_asymptotic']):
data['cpu_asymptotic'] = []
data['z0_asymptotic'] = []
for i in range(len(ntoys)):
logger.info('On iteration %s.' % i)
logging.getLogger(asymptotic_formulae.__name__).setLevel(level = logging.WARNING)
start = time.clock()
z0 = GaussZ0(s = s, b = [b1, b2], R = R, S = S)
stop = time.clock()
logging.getLogger(asymptotic_formulae.__name__).setLevel(level = logging.DEBUG)
delta = stop - start
logger.info('CPU time = %s s.' % delta)
data['cpu_asymptotic'].append(delta)
data['z0_asymptotic'].append(z0)
z0 = GaussZ0(s = s, b = [b1, b2], R = R, S = S)
fig = plt.figure()
fig, axs = plt.subplots(2, 1, sharex = True)
fig.subplots_adjust(hspace = 0.1)
# fig.suptitle('CPU Comparisons: Numerical vs. Asymptotic for Gaussian Constraints')
axs[0].plot(ntoys, data['z0'], color = 'darkorange', label = 'Numerical')
axs[0].plot(ntoys, data['z0_asymptotic'], color = 'navy', label = 'Asymptotic')
axs[0].set_ylabel('Significance of discovery [a.u.]')
axs[0].set_ylim((1.15, 1.30))
axs[0].legend(loc = 'upper right')
axs[1].plot(ntoys, data['cpu'], color = 'darkorange', label = 'Numerical')
axs[1].plot(ntoys, data['cpu_asymptotic'], color = 'navy', label = 'Asymptotic')
axs[1].set_xlabel('Number of toys [a.u.]')
axs[1].set_ylabel('CPU time [s]')
axs[1].set_xlim((ntoys[0], ntoys[-1]))
axs[1].set_ylim((1e-3, 1e4))
axs[1].set_xscale('log')
axs[1].set_yscale('log')
plt.savefig(pjoin(plotdir, 'Section2p4p2_CPU.eps'), format = 'eps', dpi = 1200)
plt.close()
dump_data_to_pickle(data, datapath)
Section2p1p1()
Section2p1p2()
Section2p2p1()
Section2p4p2_vsB1()
Section2p4p2_vsSigma()
Section2p4p4_Corr()
Section2p4p5_Response()
Section2p4p6_CPU()
if __name__ == '__main__':
main()
| 46.85489
| 272
| 0.523564
| 4,117
| 29,706
| 3.672334
| 0.111732
| 0.006548
| 0.017263
| 0.013096
| 0.650175
| 0.592169
| 0.556783
| 0.512335
| 0.485085
| 0.472849
| 0
| 0.070525
| 0.286878
| 29,706
| 633
| 273
| 46.92891
| 0.643174
| 0.098869
| 0
| 0.408034
| 0
| 0.021142
| 0.126919
| 0.026866
| 0
| 0
| 0
| 0
| 0.016913
| 1
| 0.048626
| false
| 0.004228
| 0.035941
| 0.012685
| 0.112051
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
8d783ab1b46b55a24509d554110a68bdbb340935
| 11,660
|
py
|
Python
|
montecarlo/mcpy/monte_carlo.py
|
v-asatha/EconML
|
eb9ac829e93abbc8a163ab09d905b40370b21b1a
|
[
"MIT"
] | null | null | null |
montecarlo/mcpy/monte_carlo.py
|
v-asatha/EconML
|
eb9ac829e93abbc8a163ab09d905b40370b21b1a
|
[
"MIT"
] | null | null | null |
montecarlo/mcpy/monte_carlo.py
|
v-asatha/EconML
|
eb9ac829e93abbc8a163ab09d905b40370b21b1a
|
[
"MIT"
] | null | null | null |
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
import os
import sys
import numpy as np
from joblib import Parallel, delayed
import joblib
import argparse
import importlib
from itertools import product
import collections
from copy import deepcopy
from mcpy.utils import filesafe
from mcpy import plotting
def check_valid_config(config):
"""
Performs a basic check of the config file, checking if the necessary
subsections are present.
If multiple config files are being made that use the same dgps and/or methods,
it may be helpful to tailor the config check to those dgps and methods. That way,
one can check that the correct parameters are being provided for those dgps and methods.
This is specific to one's implementation, however.
"""
assert 'type' in config, "config dict must specify config type"
assert 'dgps' in config, "config dict must contain dgps"
assert 'dgp_opts' in config, "config dict must contain dgp_opts"
assert 'method_opts' in config, "config dict must contain method_opts"
assert 'mc_opts' in config, "config dict must contain mc_opts"
assert 'metrics' in config, "config dict must contain metrics"
assert 'methods' in config, "config dict must contain methods"
assert 'plots' in config, "config dict must contain plots"
assert 'single_summary_metrics' in config, "config dict must specify which metrics are plotted in a y-x plot vs. as a single value per dgp and method"
assert 'target_dir' in config, "config must contain target_dir"
assert 'reload_results' in config, "config must contain reload_results"
assert 'n_experiments' in config['mc_opts'], "config[mc_opts] must contain n_experiments"
assert 'seed' in config['mc_opts'], "config[mc_opts] must contain seed"
class MonteCarlo:
"""
This class contains methods to run (multiple) monte carlo experiments
Experiments are constructed from a config file, which mainly consists of
references to the implementations of four different kinds of items, in
addition to various parameters for the experiment. See the README for
a descriptoin of the config file, or look at an example in the configs directory.
The four main items are:
- data generating processes (dgps): functions that generate data according to
some assumed underlying model
- methods: functions that take in data and produce other data. In our case,
they train on data produced by DGPs and then produce counterfactual estimates
- metrics: functions that take in the results of estimators and calculate metrics
- plots: functions that take in the metric results, etc. and generate plots
"""
def __init__(self, config):
self.config = config
check_valid_config(self.config)
# these param strings are for properly naming results saved to disk
config['param_str'] = '_'.join(['{}_{}'.format(filesafe(k), v) for k,v in self.config['mc_opts'].items()])
config['param_str'] += '_' + '_'.join(['{}_{}'.format(filesafe(k), v) for k,v in self.config['dgp_opts'].items()])
config['param_str'] += '_' + '_'.join(['{}_{}'.format(filesafe(k), v) for k,v in self.config['method_opts'].items()])
def experiment(self, instance_params, seed):
"""
Given instance parameters to pass on to the data generating processes,
runs an experiment on a single randomly generated instance of data and returns the
parameter estimates for each method and the evaluated metrics for each method.
Parameters
----------
instance_params : dictionary
instance paramaters that DGP functions may use
seed : int
random seed for random data generation
Returns
-------
experiment_results : dictionary
results of the experiment, depending on what the methods return.
These are stored by dgp_name and then by method_name.
true_params : dictionary
true parameters of the DGP, indexed by dgp_name, used for metrics
calculation downstream
"""
np.random.seed(seed)
experiment_results = {}
true_params = {}
for dgp_name, dgp_fn in self.config['dgps'].items():
data, true_param = dgp_fn(self.config['dgp_opts'][dgp_name], instance_params[dgp_name], seed)
true_params[dgp_name] = true_param
experiment_results[dgp_name] = {}
for method_name, method in self.config['methods'].items():
experiment_results[dgp_name][method_name] = method(data, self.config['method_opts'][method_name], seed)
return experiment_results, true_params
def run(self):
"""
Runs multiple experiments in parallel on randomly generated instances and samples and returns
the results for each method and the evaluated metrics for each method across all
experiments.
Returns
-------
simulation_results : dictionary
dictionary indexed by [dgp_name][method_name] for individual experiment results
metric_results : dictionary
dictionary indexed by [dgp_name][method_name][metric_name]
true_param : dictinoary
dictionary indexed by [dgp_name]
"""
random_seed = self.config['mc_opts']['seed']
if not os.path.exists(self.config['target_dir']):
os.makedirs(self.config['target_dir'])
instance_params = {}
for dgp_name in self.config['dgps']:
instance_params[dgp_name] = self.config['dgp_instance_fns'][dgp_name](self.config['dgp_opts'][dgp_name], random_seed)
# results_file = os.path.join(self.config['target_dir'], 'results_{}.jbl'.format(self.config['param_str']))
results_file = os.path.join(self.config['target_dir'], 'results_seed{}.jbl'.format(random_seed))
if self.config['reload_results'] and os.path.exists(results_file):
results = joblib.load(results_file)
else:
results = Parallel(n_jobs=-1, verbose=1)(
delayed(self.experiment)(instance_params, random_seed + exp_id)
for exp_id in range(self.config['mc_opts']['n_experiments']))
joblib.dump(results, results_file)
simulation_results = {} # note that simulation_results is a vector of individual experiment_results. from experiment()
metric_results = {}
true_params = {}
for dgp_name in self.config['dgps'].keys():
simulation_results[dgp_name] = {}
metric_results[dgp_name] = {}
for method_name in self.config['methods'].keys():
simulation_results[dgp_name][method_name] = [results[i][0][dgp_name][method_name] for i in range(self.config['mc_opts']['n_experiments'])]
true_params[dgp_name] = [results[i][1][dgp_name] for i in range(self.config['mc_opts']['n_experiments'])]
metric_results[dgp_name][method_name] = {}
for metric_name, metric_fn in self.config['metrics'].items():
# for metric_name, metric_fn in self.config['metrics'][method_name].items(): # for method specific parameters
metric_results[dgp_name][method_name][metric_name] = metric_fn(simulation_results[dgp_name][method_name], true_params[dgp_name])
for plot_name, plot_fn in self.config['plots'].items():
# for plot_name, plot_fn in self.config['plots'][method_name].items(): # for method specific plots
if isinstance(plot_fn, dict):
plotting.instance_plot(plot_name, simulation_results, metric_results, self.config, plot_fn)
else:
plot_fn(plot_name, simulation_results, metric_results, true_params, self.config)
return simulation_results, metric_results, true_params
class MonteCarloSweep:
"""
This class contains methods to run sets of multiple monte carlo experiments
where each set of experiments has different parameters (for the dgps and methods, etc.).
This enables sweeping through parameter values to generate results for each permutation
of parameters. For example, running a simulation when the number of samples a specific DGP
generates is 100, 1000, or 10000.
"""
def __init__(self, config):
self.config = config
check_valid_config(self.config)
config['param_str'] = '_'.join(['{}_{}'.format(filesafe(k), self.stringify_param(v)) for k,v in self.config['mc_opts'].items()])
config['param_str'] += '_' + '_'.join(['{}_{}'.format(filesafe(k), self.stringify_param(v)) for k,v in self.config['dgp_opts'].items()])
config['param_str'] += '_' + '_'.join(['{}_{}'.format(filesafe(k), self.stringify_param(v)) for k,v in self.config['method_opts'].items()])
def stringify_param(self, param):
"""
Parameters
----------
param : list
list denoting the various values a parameter should take
Returns
-------
A string representation of the range of the values that parameter will take
"""
if hasattr(param, "__len__"):
return '{}_to_{}'.format(np.min(param), np.max(param))
else:
return param
def run(self):
"""
Runs many monte carlo simulations for all the permutations of parameters
specified in the config file.
Returns
-------
sweep_keys : list
list of all the permutations of parameters for each dgp
sweep_sim_results : list
list of simulation results for each permutation of parameters for each dgp
sweep_metrics : list
list of metric results for each permutation of parameters for each dgp
sweep_true_params : list
list of true parameters for each permutation of parameters for each dgp
"""
# currently duplicates computation for the dgps because all only one dgp param changed each config
# need to make it so that every inst_config is different for each dgp
for dgp_name in self.config['dgp_opts'].keys():
dgp_sweep_params = []
dgp_sweep_param_vals = []
for dgp_key, dgp_val in self.config['dgp_opts'][dgp_name].items():
if hasattr(dgp_val, "__len__"):
dgp_sweep_params.append(dgp_key)
dgp_sweep_param_vals.append(dgp_val)
sweep_keys = []
sweep_sim_results = []
sweep_metrics = []
sweep_true_params = []
inst_config = deepcopy(self.config)
for vec in product(*dgp_sweep_param_vals):
setting = list(zip(dgp_sweep_params, vec))
for k,v in setting:
inst_config['dgp_opts'][dgp_name][k] = v
simulation_results, metrics, true_params = MonteCarlo(inst_config).run()
sweep_keys.append(setting)
sweep_sim_results.append(simulation_results)
sweep_metrics.append(metrics)
sweep_true_params.append(true_params)
for plot_name, plot_fn in self.config['sweep_plots'].items():
if isinstance(plot_fn, dict):
plotting.sweep_plot(plot_key, sweep_keys, sweep_sim_results, sweep_metrics, self.config, plot_fn)
else:
plot_fn(plot_name, sweep_keys, sweep_sim_results, sweep_metrics, sweep_true_params, self.config)
return sweep_keys, sweep_sim_results, sweep_metrics, sweep_true_params
| 48.786611
| 154
| 0.660806
| 1,521
| 11,660
| 4.879684
| 0.186719
| 0.057936
| 0.029103
| 0.021827
| 0.405416
| 0.361223
| 0.246834
| 0.223255
| 0.20062
| 0.147265
| 0
| 0.001823
| 0.247427
| 11,660
| 238
| 155
| 48.991597
| 0.843989
| 0.357204
| 0
| 0.136752
| 0
| 0.008547
| 0.151813
| 0.003178
| 0.034188
| 0
| 0
| 0
| 0.111111
| 1
| 0.059829
| false
| 0
| 0.102564
| 0
| 0.222222
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
8d7ad5ef06de97e8b617443c00cdb60123831b97
| 5,845
|
py
|
Python
|
MusicGame.py
|
kfparri/MusicGame
|
f2914cae7a68585ca1a569c78ac13f68c1adb827
|
[
"MIT"
] | null | null | null |
MusicGame.py
|
kfparri/MusicGame
|
f2914cae7a68585ca1a569c78ac13f68c1adb827
|
[
"MIT"
] | null | null | null |
MusicGame.py
|
kfparri/MusicGame
|
f2914cae7a68585ca1a569c78ac13f68c1adb827
|
[
"MIT"
] | null | null | null |
#------------------------------------------------------------------------------------------------------
# File Name: MusicGame.py
# Author: Kyle Parrish
# Date: 7/4/2014
# Description: This is a simple program that I wrote for the raspberry pi so that my daughter can
# play with. It is a simple program that plays a different sound with every keystroke. It also
# displays a simple shape pattern on the screen with each keypress. The pi can also be setup to
# allow users to change the sounds by uploading them to a web form on the pi itself. This code
# will be included when it is finished.
# Change log:
# 4.30.15 - Updated the header to test out Visual Studio Code git integration
# 9.18.15 - Started making some changes to the application. Natalie is starting to enjoy
# the application so I'm starting to make it do more:
# - Updated the code to put circles as well as squares on the screen.
#------------------------------------------------------------------------------------------------------
# Basic imports for the game
import os,sys,datetime, sqlite3
import pygame
# I don't believe that I need the time references anymore, to be removed with next commit
#from time import strftime, localtime
from random import randint
from pygame.locals import *
# Setup basic constants
test = 640
# Screen height and width
SCREEN_WIDTH = test
SCREEN_HEIGHT = 480
#CENTER_POINT = (SCREEN_WIDTH / 2, SCREEN_HEIGHT / 2)
#LOWER_CENTER = (SCREEN_WIDTH / 2, SCREEN_HEIGHT / 4)
#CENTER_RECT_HEIGHT = 40
#CLOCK_TEXT_FONT = 48
# Colors, any of these can be used in the program
WHITE = (255, 255, 255)
BLACK = (0, 0, 0)
RED = (255, 0, 0)
GREEN = (0, 255, 0)
BLUE = (0, 0, 255)
MATRIX_GREEN = (0, 255, 21)
# Code taken from: http://code.activestate.com/recipes/521884-play-sound-files-with-pygame-in-a-cross-platform-m/
# global constants
FREQ = 44100 # same as audio CD
BITSIZE = -16 # unsigned 16 bit
CHANNELS = 2 # 1 == mono, 2 == stereo
BUFFER = 1024 # audio buffer size in no. of samples
FRAMERATE = 30 # how often to check if playback has finished
sounds = ["Typewrit-Intermed-538_hifi.ogg",
"Typewrit-Bell-Patrick-8344_hifi.ogg",
"Arcade_S-wwwbeat-8528_hifi.ogg",
"Arcade_S-wwwbeat-8529_hifi.ogg",
"Arcade_S-wwwbeat-8530_hifi.ogg",
"Arcade_S-wwwbeat-8531_hifi.ogg",
"PowerUp-Mark_E_B-8070_hifi.ogg",
"PulseGun-Mark_E_B-7843_hifi.ogg",
"PulseSho-Mark_E_B-8071_hifi.ogg",
"SineySpa-Mark_E_B-7844_hifi.ogg",
"ToySpace-Mark_E_B-7846_hifi.ogg",
"ZipUp-Mark_E_B-8079_hifi.ogg"]
soundFiles = []
def playsound(soundfile):
"""Play sound through default mixer channel in blocking manner.
This will load the whole sound into memory before playback
"""
soundfile.play()
#sound = pygame.mixer.Sound(soundfile)
#clock = pygame.time.Clock()
#sound.play()
#while pygame.mixer.get_busy():
#clock.tick(FRAMERATE)
def drawMyRect(surface):
#pygame.draw.rect(screen, color, (x,y,width,height), thickness)
pygame.draw.rect(surface, RED, (randint(0,600), randint(0,440), 40,40), 5)
return surface
def drawMyCircle(surface):
pygame.draw.circle(surface, GREEN, (randint(0,600), randint(0,440)), 20, 5)
return surface
def main():
pygame.mixer.pre_init(44100,-16,2, 1024)
pygame.init()
screen = pygame.display.set_mode((SCREEN_WIDTH, SCREEN_HEIGHT))
pygame.display.set_caption('Music Game')
drawCircle = True
# create background
background = pygame.Surface(screen.get_size())
background = background.convert()
#allocate all the sound files, this should make it work better...
for file in sounds:
tempsound = pygame.mixer.Sound(file)
soundFiles.append(tempsound)
# hide the mouse
# not used while developing
#pygame.mouse.set_visible(False)
#pygame.draw.rect(screen, color, (x,y,width,height), thickness)
#pygame.draw.rect(background, RED, (10,10,40,40), 5)
#drawMyRect(background)
screen.blit(background, (0,0))
pygame.display.update()
soundfile = "Typewrit-Intermed-538_hifi.ogg"
soundfile3 = "Typewrit-Bell-Patrick-8344_hifi.ogg"
# main loop
while 1:
# This needs to change to match the new way of checking that I found on the web
# http://stackoverflow.com/questions/12556535/programming-pygame-so-that-i-can-press-multiple-keys-at-once-to-get-my-character
updateScreen = False
resetScreen = False
for event in pygame.event.get():
if event.type == QUIT:
pygame.quit()
sys.exit()
return
elif event.type == KEYDOWN:
keys = pygame.key.get_pressed()
#print(len(keys))
if keys[K_ESCAPE] and keys[K_LCTRL]:
pygame.quit()
sys.exit()
elif keys[K_ESCAPE]:
resetScreen = True;
soundFiles[1].play()
#playsound(soundFiles[1])
else:
updateScreen = True
soundFiles[0].play()
#playsound(soundFiles[0])
if resetScreen:
background = pygame.Surface(screen.get_size())
background = background.convert()
screen.blit(background, (0,0))
pygame.display.update()
if updateScreen:
if drawCircle:
drawMyCircle(background)
else:
drawMyRect(background)
drawCircle = not drawCircle
screen.blit(background, (0,0))
pygame.display.update()
if __name__ == '__main__': main()
| 34.791667
| 134
| 0.609239
| 753
| 5,845
| 4.64409
| 0.410359
| 0.028024
| 0.010295
| 0.016014
| 0.199028
| 0.136975
| 0.107235
| 0.107235
| 0.09551
| 0.034887
| 0
| 0.047729
| 0.257998
| 5,845
| 167
| 135
| 35
| 0.758589
| 0.441574
| 0
| 0.206897
| 0
| 0
| 0.140713
| 0.135084
| 0
| 0
| 0
| 0
| 0
| 1
| 0.045977
| false
| 0
| 0.045977
| 0
| 0.126437
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
8d7e6b625734d32d6eb3ec106401a004caa7962c
| 5,763
|
py
|
Python
|
DeepLearning/DeepLearning/07_Deep_LeeYS/Week_1/4. Single-Layer NN/4) Neural Network.py
|
ghost9023/DeepLearningPythonStudy
|
4d319c8729472cc5f490935854441a2d4b4e8818
|
[
"MIT"
] | 1
|
2019-06-27T04:05:59.000Z
|
2019-06-27T04:05:59.000Z
|
DeepLearning/DeepLearning/07_Deep_LeeYS/Week_1/4. Single-Layer NN/4) Neural Network.py
|
ghost9023/DeepLearningPythonStudy
|
4d319c8729472cc5f490935854441a2d4b4e8818
|
[
"MIT"
] | null | null | null |
DeepLearning/DeepLearning/07_Deep_LeeYS/Week_1/4. Single-Layer NN/4) Neural Network.py
|
ghost9023/DeepLearningPythonStudy
|
4d319c8729472cc5f490935854441a2d4b4e8818
|
[
"MIT"
] | null | null | null |
#4) 신경망 구현하기
##########KEYWORD###############
################################
#신경망은 입력층에서 출력층으로 기본적으로 한 방향으로 흐른다. 한 싸이클이 끝나면 역전파 알고리즘을 통해
#계속 학습으 진행하지만 역전파 알고리즘과 같은 고급 알고리즘은 다음장에서..
#한 방향으로만 정보가 전방으로 전달되는 신경망을 피드포워드 신경망(Feed Forward NN)이라고 한다.
#기본적으로 신경망은 입력층에서 데이터 입력을 받은 뒤 은닉층에서 데이터를 학습하고 출력층으로 결과를 내보낸다.
#입력층의 역할은 입력 데이터를 받아들이는 것이고 이를 위해서 입력층의 노드(뉴런) 개수는 입력데이터의 특성 갯수와 일치해야 한다.
#은닉층은 학습을 진행하는 층으로 은닉층의 노드 수와 은닉층 Layer 수는 설계자가 경험으로 얻어내야 한다.
#뉴런의 수가 너무 많으면 오버피팅이 발생하고 너무 적으면 언더피팅이 발생하여 학습이 되지 않음.
#또한 은닉층의 개수가 지나치게 많은 경우 비효율적이다.
#단순히 은닉층의 개수를 2배 늘리면 연산에 걸리는 시간은 400% 증가하지만 학습효율은 10%만 증가하기도 한다.
#출력층은 은닉층을 거쳐서 얻어낸 결과를 해결하고자 하는 문제에 맞게 만들어 준다.
#필기체 숫자 0부터 9까지를 인식하는 신경망이면 출력층이 10개가 될 것이고 개와 고양이를 분류하는 신경망이라면 3개의 출력층이 된다.
#다차원 배열을 이용하여 층이 3개인 다층 신경망을 간단하게 구현하자.행렬곱과 각 행렬의 원소의 위치를 잘 확인하면 어렵지 않다.
#그림25 P35
#
#Layer Node 수 Node Shape Weight Shape Bias Shape 계산식
#입력층 2 2차원 벡터 2 X 3 Matrix 3차원 Vector 은닉층(1) = 활성화함수((입력층*가중치1) + 편향1)
#은닉층(1) 3 3차원 벡터 3 X 2 Matrix 2차원 Vector 은닉층(2) = 활성화함수((은닉층1) * 가중치2 + 편향2)
#은닉층(2) 2 2차원 벡터 2 X 2 Matrix 2차원 Vector 출력층 = 활성화함수((은닉층2) * 가중치3 + 편향3)
#출력층 2 2차원 벡터
#그림을 확인해보면 3층 신경망이 어떻게 구성되어 있는지 확인할 수 있다.
#입력층은 2개이며 각 층마다 편향이 존재한다. 은닉층은 2개 층으로 구성되어 있고 출력층의 출력값은 2개이다.
#위 그림을 확인해보면
#w12^(1), a2(1) 와 같은 형식으로 표기되어 있는 것을 확인 할 수 있다. 우측 상단의 (1)은 1층의 가중치를 의미한다.
#우측 하단의 12에서 1은 다음층의 뉴런번호 2는 앞층의 뉴런 번호를 의미한다. 따라서 w12^(!)은 앞의 1번 뉴런에서 2번 뉴런으로 이동하는 신경망 1층의 가중치
#를 의미한다.
#예제 3층 신경망의 구조를 보면 입력층은 2개로 구성되어 있고 1층에서 편향이 1로 존재한다. 여기서 가중치에 의해
#입력 값은 a1(1) ... 에 입력된다. 이 입력값을 수식으로 나타내면
#a1(1) = w11^(1)x1 + w12^(1)x2 + b1^(1) 으로 표현할 수 있다.
#이를 행렬 내적으로 표현하면 1층의 노드를 A^(1) = (a1^(1),a2^(1),a3^(1)), 1층의 가중치를
#W^(1) ...
#이를 이용해서 numpy의 다차원 배열을 이용하면 신경망 1층을 파이선 코드로 짤 수 있다.
#마찬가지로 1층의 출력값을 다시 2층의 입력값으로 넣고 똑같은 방식으로 입력노드 행렬(1층의 출력노드 행렬), 가중치 행렬, 편향 행렬의
#행렬 연산을 통해 2층의 출력 노드 행렬을 구할 수 있게 된다.
#마찬가지로 신경망 1층에서 행렬 연산식을 통해 출력값을 구했던 것처럼 1층의 출력값을 2층의 입력값으로 연결해주고 2층의 가중치와 2층의 편향을
#더해주면 2층의 출력값이 완성된다.
#마지막으로 그림30 처럼 2층의 출력값을 동일한 방법으로 출력층의 입력값으로 넣고 출력층 사이의 가중치와 편향을 더해준 동일한 방법으로
#식을 계산하면 최정적인 출력값이 뽑히게 된다. 한가지 위 과정과 다른 점이 있다면 출력층의 활성함수는 풀고자하는 문제의 성질에 맞게 정한다.
#회귀가 목적인 신경망은 출력층에 항등함수를 사용하고 이중클래스 분류에는 시그모이드 함수를 다중 클래스에는 소프트맥스 함수를 일반적으로 사용.
#그럼 출력층에 사용하는 활성함수를 알아보자.
#회귀에는 항등함수, 분류에는 소프트맥스 함수를 보통 사용한다. 회귀는 입력데이터의 연속적인 수치를 예측하는 것을 의미하고 분류는 각 데이터가 어떤
#범주에 속하는지 나누는 것을 의미한다. 항등함수는 입력값이 그대로 출력되는 함수로 흔히 알고 있는 f(x) = x 를 의미한다.
#파이선 코드로는
def identity_function(x):
return x
#소프트맥스 함수는 자연상수를 밑수로 하는 지수함수로 이루어진 하나의 함수이다.
#소프트맥스 함수가 가지는 의미는 바로 시그모이드 함수를 일반화 한 것.
#이를 통해 각 클래스에 대한 확률을 계산 할 수도 있게 됨.
#시그모이드 함수를 일반화해서 각 클래스에 대한 확률을 계산 할 수 있다는 것은 모든 소프트맥스 함수의 출력값을 더하면 1이 나온다는 의미이다.
#소프트맥스 함수 출력값은 0과 1사이의 값이고 각각의 출력 값은 개별 출력 값에 대한 확률 값이기 때문에 전체 소프트맥스 함수의 합은 항상
#1이 되는 특별한 성질을 가진다.
#때문에 소프트 맥스 함수를 출력층의 활성함수로 사용하면 출력결과를 확률적으로 결론낼 수 있다.
#예를 들어
#y[0] = 0.018, y[1] = 0.245, y[2] = 0.737로 결과가 출력되었다면 1.8%의 확률로 0번 클래스, 24.5%의 확률로 1번 클래스, 73.7%의 확률로 2번
#클래스일 것이므로 2번 클래스일 확률이 가장 높고 따라서 답은 2번 클래스다. 라는 결과를 도출 할 수 있다.
#소프트맥스 함수를 이용해서 통계적(확률적)으로 문제를 대응할 수 있게 되는 것이다. \
#소프트맥스 함수는 단조 증가 함수인 지수함수 exp()를 기반으로 하므로 소프트맥스 함수의 출력값의 대소관계가 그대로 입력된 원소의 대소관계를 이어받는다.
#따라서 역으로 소프트맥스 함수를 통해 나온 출력값의 대소관계를 입력값의 대소관계로 판단해도 된다.
#그래서 신경망 학습과정에선 출력층의 활성함수로 소프트맥스 함수를 사용하고 학습된 모델을 이용해서 추론(분류 및 회귀)하는 과정에선 소프트맥스 함수를
#활성함수에서 생략해도 된다. 이러한 소프트맥스 함수의 구현엔 주의사항이 있다.
#지수함수는 입력값이 커지면 급격하게 무한히 증가한다. 이를 오버플로우(Overflow)라고 한다.
#입력값이 100인 exp(100)은 10의 40승이 넘는 수이다. 오버플로를 해결하기 위해선 해당 값을 전체 데이터 셋에서의 최대값으로 뺀 값으로 치환하는 방법을 사용한다.
#위 과정을 수식으로 나타낼 수 있다. [수식 13] P40
#소프트맥스 함수의 분모 분자에 C라는 상수를 곱해준다. 같은 상수값을 곱해주었으므로 전체 값엔 변화가 없다.
#그리고 여기에 지수함수와 로그함수의 성질 중 하나인 x = a ^ log(a,x)를 이용하여 상수 C를 exp() 함수 안으로 넣는다.
#그럼 상수 C는 exp() 함수 내에서 log(e,C) = ln C 로 변화되고 ln C를 상수 C` 로 받게 되면 아래의 수식으로 변형된다.
#파이선 코드
import numpy as np
a = np.array([1010,1000,990])
np.exp(a) / np.sum(np.exp(a)) #오버플로 발생
#변경된 softmax 함수식
c = np.max(a)
np.exp(a-c) / np.sum(np.exp(a-c)) #정상적으로 계산됨
#이처럼 같은 스케일의 변화는 아무런 결과값에 아무런 영향을 주지 않는 점을 이용해서 소프트맥스 함수의 오버플로 현상을 해결할 수 있다.
#이를 이용하여 소프트맥스 함수를 파이썬으로 구현하면 아래와 같다.
def softmax(a):
c=np.max(a)
exp_a = np.exp(a-c)
sum_exp_a = np.sum(exp_a)
y = exp_a / sum_exp_a
return y
#마지막으로 출력층의 노드 개수를 정하는 방법은 간단하다. 입력한 데이터의 클래스 갯수만큼 출력층의 노드 갯수를 정해주면 된다.
#다른 예로 개와 고양이를 분류하고 싶다면 개, 고양이 총 2개의 출력 노드를 만들면 된다.
#은닉층이 2개인 다층 신경망(보통 입력층을 제외한 층수로 신경망을 부른다. 따라서 이 경우는 3층 신경망)
#을 간단하게 파이선으로 코딩.
#이 신경망 모델은 출력층의 활성 함수로 항등함수로 정의한다.
#결과적으로 위 과정을 모두 합한 전체적인 은닉층이 2층인 다층 신경망의 파이썬 구현코드는 아래와 같다.
import numpy as np
#시그모이드 함수
def sigmoid(x):
return 1 / (1 + np.exp(-x))
#identify function 항등함수 사용
def identify_function(x):
return x
#신경망을 초기화. 여기서 가중치와 편향의 다차원 배열을 선언해준다.
def init_network():
network = {}
network['w1'] = np.array([[0.1,0.3,0.5],[0.2,0.4,0.6]])
network['b1'] = np.array([0.1,0.2,0.3])
network['w2'] = np.array([[0.1,0.4],[0.2,0.5],[0.3,0.6]])
network['b2'] = np.array([0.1,0.2])
network['w3'] = np.array([[0.1,0.3],[0.2,0.4]])
network['b3'] = np.array([0.1,0.2])
return network
#순전파 신경망 함수. 가중치와 편향을 입력받아 입력층과 은닉층의 활성함수는 시그모이드 함수로,
#출력층의 활성함수는 항등함수를 사용하는 3층 신경망을 함수로 구현
def forward(network,x):
w1,w2,w3 = network['w1'],network['w2'],network['w3']
b1,b2,b3 = network['b1'],network['bw'],network['b3']
a1 = np.dot(x,w1) + b1
z1 = sigmoid(a1)
a2 = np.dot(z1,w2) + b2
z2 = sigmoid(a2)
a3 = np.dot(z2,w3) + b3
y = identify_function(a3)
return y
network = init_network() #신경망의 가중치와 편향값 인스턴스화
x = np.array([1.0,0.5])
y = forward(network ,x)
print(y)
#단순한 신경망을 설계하는 것은 어렵지 않다. 다차원 배열을 잘 사용해서 가중치와 입력값 그리고 편향을 잘 도출해서 어떤 활성함수를 사용할지 정해서
#구현한 다음 구현한 활성함수에 값을 잘 넣어준 다음 이전 층의 출력값으로 잘 연결해서 원하는 층만큼 이어주면 된다.
| 36.707006
| 120
| 0.640986
| 1,238
| 5,763
| 2.974152
| 0.519386
| 0.010864
| 0.013036
| 0.014666
| 0.048886
| 0.022542
| 0.006518
| 0
| 0
| 0
| 0
| 0.058905
| 0.248829
| 5,763
| 156
| 121
| 36.942308
| 0.791638
| 0.757418
| 0
| 0.195122
| 0
| 0
| 0.019093
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.146341
| false
| 0
| 0.04878
| 0.073171
| 0.341463
| 0.02439
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
8d7eb5aaefc17250eb9787e23ab1f5200d2d65f8
| 466
|
py
|
Python
|
label_gen.py
|
avasid/gaze_detection
|
dbb76a2b3f3eedff5801b53bc95b3a95bc715bc5
|
[
"MIT"
] | 1
|
2020-02-07T21:34:10.000Z
|
2020-02-07T21:34:10.000Z
|
label_gen.py
|
avasid/gaze_detection
|
dbb76a2b3f3eedff5801b53bc95b3a95bc715bc5
|
[
"MIT"
] | 8
|
2020-11-13T18:37:12.000Z
|
2022-03-12T00:14:04.000Z
|
label_gen.py
|
avasid/gaze_detection
|
dbb76a2b3f3eedff5801b53bc95b3a95bc715bc5
|
[
"MIT"
] | null | null | null |
import os
import pandas as pd
dictt = {}
i = 0
for label in ['down', 'up', 'left', 'right']:
img_lst = os.listdir("./data/img_data/" + label + "/")
temp_label = [0] * 4
temp_label[i] = 1
for img in img_lst:
print(label + " " + img)
dictt[img] = temp_label
i += 1
label_df = pd.DataFrame(data=dictt, index=['down', 'up', 'left', 'right']).transpose()
label_df = label_df.sample(frac=1)
label_df.to_csv("./data/label_data.csv")
| 23.3
| 86
| 0.592275
| 73
| 466
| 3.616438
| 0.424658
| 0.106061
| 0.075758
| 0.113636
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.016438
| 0.216738
| 466
| 19
| 87
| 24.526316
| 0.706849
| 0
| 0
| 0
| 0
| 0
| 0.148069
| 0.045064
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.133333
| 0
| 0.133333
| 0.066667
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
8d7fb31d8d0c397a081d7685e96fa1bf8414f9a6
| 2,398
|
py
|
Python
|
rubik_race/rubiks_race/solver_test.py
|
ZengLawrence/rubiks_race
|
3d78484f0a68c7e483953cea68130f1edde2739a
|
[
"MIT"
] | null | null | null |
rubik_race/rubiks_race/solver_test.py
|
ZengLawrence/rubiks_race
|
3d78484f0a68c7e483953cea68130f1edde2739a
|
[
"MIT"
] | null | null | null |
rubik_race/rubiks_race/solver_test.py
|
ZengLawrence/rubiks_race
|
3d78484f0a68c7e483953cea68130f1edde2739a
|
[
"MIT"
] | null | null | null |
'''
Created on Jun 27, 2017
@author: lawrencezeng
'''
import unittest
from rubiks_race import solver
class Test(unittest.TestCase):
def setUp(self):
self.initial_position = [
['g', 'g', 'y', 'r', 'r' ],
['w', 'g', 'w', 'w', 'y' ],
['g', 'o', ' ', 'r', 'o' ],
['o', 'b', 'b', 'y', 'y' ],
['b', 'o', 'w', 'r', 'b' ]
]
self.pattern = [
['g', 'w', 'w'],
['g', 'o', 'r'],
['b', 'b', 'y']
]
final_positions = [
['g', 'g', 'y', 'r', 'r' ],
['w', 'g', 'w', 'w', 'y' ],
[' ', 'g', 'o', 'r', 'o' ],
['o', 'b', 'b', 'y', 'y' ],
['b', 'o', 'w', 'r', 'b' ]
]
moves = [
[[2, 1], [2, 2]],
[[2, 0], [2, 1]],
]
self.result = [final_positions, moves]
def tearDown(self):
return unittest.TestCase.tearDown(self)
def test_solve(self):
initial_position = [
['g', 'g', 'y', 'r', 'r' ],
['w', 'g', 'w', 'w', 'y' ],
['g', 'o', ' ', 'r', 'o' ],
['o', 'b', 'b', 'y', 'y' ],
['b', 'o', 'w', 'r', 'b' ]
]
pattern = [
['g', 'w', 'w'],
['g', 'o', 'r'],
['b', 'b', 'y']
]
final_positions = [
['g', 'g', 'y', 'r', 'r' ],
['w', 'g', 'w', 'w', 'y' ],
[' ', 'g', 'o', 'r', 'o' ],
['o', 'b', 'b', 'y', 'y' ],
['b', 'o', 'w', 'r', 'b' ]
]
moves = [
[[2, 1], [2, 2]],
[[2, 0], [2, 1]],
]
self.assertItemsEqual([final_positions, moves], solver.solve(initial_position, pattern))
def test_solve_pq(self):
self.assertItemsEqual(self.result, solver.solve_pq(self.initial_position, self.pattern))
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.test_solve']
unittest.main()
| 31.142857
| 96
| 0.27648
| 221
| 2,398
| 2.900452
| 0.20362
| 0.018721
| 0.028081
| 0.024961
| 0.355694
| 0.355694
| 0.355694
| 0.355694
| 0.355694
| 0.355694
| 0
| 0.018257
| 0.497498
| 2,398
| 76
| 97
| 31.552632
| 0.513693
| 0.038365
| 0
| 0.576271
| 0
| 0
| 0.05483
| 0
| 0
| 0
| 0
| 0
| 0.033898
| 1
| 0.067797
| false
| 0
| 0.033898
| 0.016949
| 0.135593
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
8d80488b5bce65f6332a7212b2c16986023812ef
| 1,625
|
py
|
Python
|
wagtail_translation/migrations/0001_initial.py
|
patroqueeet/wagtail2-translation
|
6a7ad4eea5d900c8640f965ebf7a442dd7bc7e74
|
[
"MIT"
] | null | null | null |
wagtail_translation/migrations/0001_initial.py
|
patroqueeet/wagtail2-translation
|
6a7ad4eea5d900c8640f965ebf7a442dd7bc7e74
|
[
"MIT"
] | null | null | null |
wagtail_translation/migrations/0001_initial.py
|
patroqueeet/wagtail2-translation
|
6a7ad4eea5d900c8640f965ebf7a442dd7bc7e74
|
[
"MIT"
] | 1
|
2021-01-08T19:25:46.000Z
|
2021-01-08T19:25:46.000Z
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from modeltranslation import settings as mt_settings
from modeltranslation.utils import build_localized_fieldname, get_translation_fields
from django.db import migrations, models
def url_path_fix(apps, schema_editor):
# cannot use apps.get_model here
# because Page instances wouldn't have set_url_path method
from wagtail.core.models import Page
url_path_fields = get_translation_fields('url_path')
for page in Page.objects.order_by('path').iterator():
page.set_url_path(page.get_parent())
# make sure descendant page url paths are not updated at this point
# because it would fail
page.save(update_fields=url_path_fields)
class Migration(migrations.Migration):
"""
This migration fixes whatever pages you already have in DB
so that their titles and slugs in default language are not empty
and url_path field translations are updated accordingly.
"""
dependencies = [
('wagtailtranslation', '9999_wagtail_translation'),
]
operations = [
# 1. copy slugs and titles to corresponding default language fields
migrations.RunSQL(
['UPDATE wagtailcore_page SET {}=slug, {}=title'.format(
build_localized_fieldname('slug', mt_settings.DEFAULT_LANGUAGE),
build_localized_fieldname('title', mt_settings.DEFAULT_LANGUAGE))],
migrations.RunSQL.noop),
# 2. update url_path in all existing pages for all translations
migrations.RunPython(url_path_fix, migrations.RunPython.noop),
]
| 37.790698
| 84
| 0.715692
| 204
| 1,625
| 5.490196
| 0.504902
| 0.05625
| 0.061607
| 0.044643
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.005456
| 0.210462
| 1,625
| 42
| 85
| 38.690476
| 0.867498
| 0.312
| 0
| 0
| 0
| 0
| 0.099265
| 0.022059
| 0
| 0
| 0
| 0
| 0
| 1
| 0.045455
| false
| 0
| 0.227273
| 0
| 0.409091
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
8d8293dd05c195d7acdf3af64d74eb27c71ed3fc
| 99,195
|
py
|
Python
|
WORC/WORC.py
|
MStarmans91/WORC
|
b6b8fc2ccb7d443a69b5ca20b1d6efb65b3f0fc7
|
[
"ECL-2.0",
"Apache-2.0"
] | 47
|
2018-01-28T14:08:15.000Z
|
2022-03-24T16:10:07.000Z
|
WORC/WORC.py
|
JZK00/WORC
|
14e8099835eccb35d49b52b97c0be64ecca3809c
|
[
"ECL-2.0",
"Apache-2.0"
] | 13
|
2018-08-28T13:32:57.000Z
|
2020-10-26T16:35:59.000Z
|
WORC/WORC.py
|
JZK00/WORC
|
14e8099835eccb35d49b52b97c0be64ecca3809c
|
[
"ECL-2.0",
"Apache-2.0"
] | 16
|
2017-11-13T10:53:36.000Z
|
2022-03-18T17:02:04.000Z
|
#!/usr/bin/env python
# Copyright 2016-2021 Biomedical Imaging Group Rotterdam, Departments of
# Medical Informatics and Radiology, Erasmus MC, Rotterdam, The Netherlands
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import yaml
import fastr
import graphviz
import configparser
from pathlib import Path
from random import randint
import WORC.IOparser.file_io as io
from fastr.api import ResourceLimit
from WORC.tools.Slicer import Slicer
from WORC.tools.Elastix import Elastix
from WORC.tools.Evaluate import Evaluate
import WORC.addexceptions as WORCexceptions
import WORC.IOparser.config_WORC as config_io
from WORC.detectors.detectors import DebugDetector
from WORC.export.hyper_params_exporter import export_hyper_params_to_latex
from urllib.parse import urlparse
from urllib.request import url2pathname
class WORC(object):
"""Workflow for Optimal Radiomics Classification.
A Workflow for Optimal Radiomics Classification (WORC) object that
serves as a pipeline spawner and manager for optimizating radiomics
studies. Depending on the attributes set, the object will spawn an
appropriate pipeline and manage it.
Note that many attributes are lists and can therefore contain multiple
instances. For example, when providing two sequences per patient,
the "images" list contains two items. The type of items in the lists
is described below.
All objects that serve as source for your network, i.e. refer to
actual files to be used, should be formatted as fastr sources suited for
one of the fastr plugings, see also
http://fastr.readthedocs.io/en/stable/fastr.reference.html#ioplugin-reference
The objects should be lists of these fastr sources or dictionaries with the
sample ID's, e.g.
images_train = [{'Patient001': vfs://input/CT001.nii.gz,
'Patient002': vfs://input/CT002.nii.gz},
{'Patient001': vfs://input/MR001.nii.gz,
'Patient002': vfs://input/MR002.nii.gz}]
Attributes
------------------
name: String, default 'WORC'
name of the network.
configs: list, required
Configuration parameters, either ConfigParser objects
created through the defaultconfig function or paths of config .ini
files. (list, required)
labels: list, required
Paths to files containing patient labels (.txt files).
network: automatically generated
The FASTR network generated through the "build" function.
images: list, optional
Paths refering to the images used for Radiomics computation. Images
should be of the ITK Image type.
segmentations: list, optional
Paths refering to the segmentations used for Radiomics computation.
Segmentations should be of the ITK Image type.
semantics: semantic features per image type (list, optional)
masks: state which pixels of images are valid (list, optional)
features: input Radiomics features for classification (list, optional)
metadata: DICOM headers belonging to images (list, optional)
Elastix_Para: parameter files for Elastix (list, optional)
fastr_plugin: plugin to use for FASTR execution
fastr_tempdir: temporary directory to use for FASTR execution
additions: additional inputs for your network (dict, optional)
source_data: data to use as sources for FASTR (dict)
sink_data: data to use as sinks for FASTR (dict)
CopyMetadata: Boolean, default True
when using elastix, copy metadata from image to segmentation or not
"""
def __init__(self, name='test'):
"""Initialize WORC object.
Set the initial variables all to None, except for some defaults.
Arguments:
name: name of the nework (string, optional)
"""
self.name = 'WORC_' + name
# Initialize several objects
self.configs = list()
self.fastrconfigs = list()
self.images_train = list()
self.segmentations_train = list()
self.semantics_train = list()
self.labels_train = list()
self.masks_train = list()
self.masks_normalize_train = list()
self.features_train = list()
self.metadata_train = list()
self.images_test = list()
self.segmentations_test = list()
self.semantics_test = list()
self.labels_test = list()
self.masks_test = list()
self.masks_normalize_test = list()
self.features_test = list()
self.metadata_test = list()
self.Elastix_Para = list()
self.label_names = 'Label1, Label2'
self.fixedsplits = list()
# Set some defaults, name
self.fastr_plugin = 'LinearExecution'
if name == '':
name = [randint(0, 9) for p in range(0, 5)]
self.fastr_tmpdir = os.path.join(fastr.config.mounts['tmp'], self.name)
self.additions = dict()
self.CopyMetadata = True
self.segmode = []
self._add_evaluation = False
self.TrainTest = False
# Memory settings for all fastr nodes
self.fastr_memory_parameters = dict()
self.fastr_memory_parameters['FeatureCalculator'] = '14G'
self.fastr_memory_parameters['Classification'] = '6G'
self.fastr_memory_parameters['WORCCastConvert'] = '4G'
self.fastr_memory_parameters['Preprocessing'] = '4G'
self.fastr_memory_parameters['Elastix'] = '4G'
self.fastr_memory_parameters['Transformix'] = '4G'
self.fastr_memory_parameters['Segmentix'] = '6G'
self.fastr_memory_parameters['ComBat'] = '12G'
self.fastr_memory_parameters['PlotEstimator'] = '12G'
if DebugDetector().do_detection():
print(fastr.config)
def defaultconfig(self):
"""Generate a configparser object holding all default configuration values.
Returns:
config: configparser configuration file
"""
config = configparser.ConfigParser()
config.optionxform = str
# General configuration of WORC
config['General'] = dict()
config['General']['cross_validation'] = 'True'
config['General']['Segmentix'] = 'True'
config['General']['FeatureCalculators'] = '[predict/CalcFeatures:1.0, pyradiomics/Pyradiomics:1.0]'
config['General']['Preprocessing'] = 'worc/PreProcess:1.0'
config['General']['RegistrationNode'] = "elastix4.8/Elastix:4.8"
config['General']['TransformationNode'] = "elastix4.8/Transformix:4.8"
config['General']['Joblib_ncores'] = '1'
config['General']['Joblib_backend'] = 'threading'
config['General']['tempsave'] = 'False'
config['General']['AssumeSameImageAndMaskMetadata'] = 'False'
config['General']['ComBat'] = 'False'
# Options for the object/patient labels that are used
config['Labels'] = dict()
config['Labels']['label_names'] = 'Label1, Label2'
config['Labels']['modus'] = 'singlelabel'
config['Labels']['url'] = 'WIP'
config['Labels']['projectID'] = 'WIP'
# Preprocessing
config['Preprocessing'] = dict()
config['Preprocessing']['CheckSpacing'] = 'False'
config['Preprocessing']['Clipping'] = 'False'
config['Preprocessing']['Clipping_Range'] = '-1000.0, 3000.0'
config['Preprocessing']['Normalize'] = 'True'
config['Preprocessing']['Normalize_ROI'] = 'Full'
config['Preprocessing']['Method'] = 'z_score'
config['Preprocessing']['ROIDetermine'] = 'Provided'
config['Preprocessing']['ROIdilate'] = 'False'
config['Preprocessing']['ROIdilateradius'] = '10'
config['Preprocessing']['Resampling'] = 'False'
config['Preprocessing']['Resampling_spacing'] = '1, 1, 1'
config['Preprocessing']['BiasCorrection'] = 'False'
config['Preprocessing']['BiasCorrection_Mask'] = 'False'
config['Preprocessing']['CheckOrientation'] = 'False'
config['Preprocessing']['OrientationPrimaryAxis'] = 'axial'
# Segmentix
config['Segmentix'] = dict()
config['Segmentix']['mask'] = 'subtract'
config['Segmentix']['segtype'] = 'None'
config['Segmentix']['segradius'] = '5'
config['Segmentix']['N_blobs'] = '1'
config['Segmentix']['fillholes'] = 'True'
config['Segmentix']['remove_small_objects'] = 'False'
config['Segmentix']['min_object_size'] = '2'
# PREDICT - Feature calculation
# Determine which features are calculated
config['ImageFeatures'] = dict()
config['ImageFeatures']['shape'] = 'True'
config['ImageFeatures']['histogram'] = 'True'
config['ImageFeatures']['orientation'] = 'True'
config['ImageFeatures']['texture_Gabor'] = 'True'
config['ImageFeatures']['texture_LBP'] = 'True'
config['ImageFeatures']['texture_GLCM'] = 'True'
config['ImageFeatures']['texture_GLCMMS'] = 'True'
config['ImageFeatures']['texture_GLRLM'] = 'False'
config['ImageFeatures']['texture_GLSZM'] = 'False'
config['ImageFeatures']['texture_NGTDM'] = 'False'
config['ImageFeatures']['coliage'] = 'False'
config['ImageFeatures']['vessel'] = 'True'
config['ImageFeatures']['log'] = 'True'
config['ImageFeatures']['phase'] = 'True'
# Parameter settings for PREDICT feature calculation
# Defines only naming of modalities
config['ImageFeatures']['image_type'] = 'CT'
# Define frequencies for gabor filter in pixels
config['ImageFeatures']['gabor_frequencies'] = '0.05, 0.2, 0.5'
# Gabor, GLCM angles in degrees and radians, respectively
config['ImageFeatures']['gabor_angles'] = '0, 45, 90, 135'
config['ImageFeatures']['GLCM_angles'] = '0, 0.79, 1.57, 2.36'
# GLCM discretization levels, distances in pixels
config['ImageFeatures']['GLCM_levels'] = '16'
config['ImageFeatures']['GLCM_distances'] = '1, 3'
# LBP radius, number of points in pixels
config['ImageFeatures']['LBP_radius'] = '3, 8, 15'
config['ImageFeatures']['LBP_npoints'] = '12, 24, 36'
# Phase features minimal wavelength and number of scales
config['ImageFeatures']['phase_minwavelength'] = '3'
config['ImageFeatures']['phase_nscale'] = '5'
# Log features sigma of Gaussian in pixels
config['ImageFeatures']['log_sigma'] = '1, 5, 10'
# Vessel features scale range, steps for the range
config['ImageFeatures']['vessel_scale_range'] = '1, 10'
config['ImageFeatures']['vessel_scale_step'] = '2'
# Vessel features radius for erosion to determine boudnary
config['ImageFeatures']['vessel_radius'] = '5'
# Tags from which to extract features, and how to name them
config['ImageFeatures']['dicom_feature_tags'] = '0010 1010, 0010 0040'
config['ImageFeatures']['dicom_feature_labels'] = 'age, sex'
# PyRadiomics - Feature calculation
# Addition to the above, specifically for PyRadiomics
# Mostly based on specific MR Settings: see https://github.com/Radiomics/pyradiomics/blob/master/examples/exampleSettings/exampleMR_NoResampling.yaml
config['PyRadiomics'] = dict()
config['PyRadiomics']['geometryTolerance'] = '0.0001'
config['PyRadiomics']['normalize'] = 'False'
config['PyRadiomics']['normalizeScale'] = '100'
config['PyRadiomics']['resampledPixelSpacing'] = 'None'
config['PyRadiomics']['interpolator'] = 'sitkBSpline'
config['PyRadiomics']['preCrop'] = 'True'
config['PyRadiomics']['binCount'] = config['ImageFeatures']['GLCM_levels'] # BinWidth to sensitive for normalization, thus use binCount
config['PyRadiomics']['binWidth'] = 'None'
config['PyRadiomics']['force2D'] = 'False'
config['PyRadiomics']['force2Ddimension'] = '0' # axial slices, for coronal slices, use dimension 1 and for sagittal, dimension 2.
config['PyRadiomics']['voxelArrayShift'] = '300'
config['PyRadiomics']['Original'] = 'True'
config['PyRadiomics']['Wavelet'] = 'False'
config['PyRadiomics']['LoG'] = 'False'
if config['General']['Segmentix'] == 'True':
config['PyRadiomics']['label'] = '1'
else:
config['PyRadiomics']['label'] = '255'
# Enabled PyRadiomics features
config['PyRadiomics']['extract_firstorder'] = 'False'
config['PyRadiomics']['extract_shape'] = 'True'
config['PyRadiomics']['texture_GLCM'] = 'False'
config['PyRadiomics']['texture_GLRLM'] = 'True'
config['PyRadiomics']['texture_GLSZM'] = 'True'
config['PyRadiomics']['texture_GLDM'] = 'True'
config['PyRadiomics']['texture_NGTDM'] = 'True'
# ComBat Feature Harmonization
config['ComBat'] = dict()
config['ComBat']['language'] = 'python'
config['ComBat']['batch'] = 'Hospital'
config['ComBat']['mod'] = '[]'
config['ComBat']['par'] = '1'
config['ComBat']['eb'] = '1'
config['ComBat']['per_feature'] = '0'
config['ComBat']['excluded_features'] = 'sf_, of_, semf_, pf_'
config['ComBat']['matlab'] = 'C:\\Program Files\\MATLAB\\R2015b\\bin\\matlab.exe'
# Feature OneHotEncoding
config['OneHotEncoding'] = dict()
config['OneHotEncoding']['Use'] = 'False'
config['OneHotEncoding']['feature_labels_tofit'] = ''
# Feature imputation
config['Imputation'] = dict()
config['Imputation']['use'] = 'True'
config['Imputation']['strategy'] = 'mean, median, most_frequent, constant, knn'
config['Imputation']['n_neighbors'] = '5, 5'
# Feature scaling options
config['FeatureScaling'] = dict()
config['FeatureScaling']['scaling_method'] = 'robust_z_score'
config['FeatureScaling']['skip_features'] = 'semf_, pf_'
# Feature preprocessing before the whole HyperOptimization
config['FeatPreProcess'] = dict()
config['FeatPreProcess']['Use'] = 'False'
config['FeatPreProcess']['Combine'] = 'False'
config['FeatPreProcess']['Combine_method'] = 'mean'
# Feature selection
config['Featsel'] = dict()
config['Featsel']['Variance'] = '1.0'
config['Featsel']['GroupwiseSearch'] = 'True'
config['Featsel']['SelectFromModel'] = '0.275'
config['Featsel']['SelectFromModel_estimator'] = 'Lasso, LR, RF'
config['Featsel']['SelectFromModel_lasso_alpha'] = '0.1, 1.4'
config['Featsel']['SelectFromModel_n_trees'] = '10, 90'
config['Featsel']['UsePCA'] = '0.275'
config['Featsel']['PCAType'] = '95variance, 10, 50, 100'
config['Featsel']['StatisticalTestUse'] = '0.275'
config['Featsel']['StatisticalTestMetric'] = 'MannWhitneyU'
config['Featsel']['StatisticalTestThreshold'] = '-3, 2.5'
config['Featsel']['ReliefUse'] = '0.275'
config['Featsel']['ReliefNN'] = '2, 4'
config['Featsel']['ReliefSampleSize'] = '0.75, 0.2'
config['Featsel']['ReliefDistanceP'] = '1, 3'
config['Featsel']['ReliefNumFeatures'] = '10, 40'
# Groupwise Featureselection options
config['SelectFeatGroup'] = dict()
config['SelectFeatGroup']['shape_features'] = 'True, False'
config['SelectFeatGroup']['histogram_features'] = 'True, False'
config['SelectFeatGroup']['orientation_features'] = 'True, False'
config['SelectFeatGroup']['texture_Gabor_features'] = 'True, False'
config['SelectFeatGroup']['texture_GLCM_features'] = 'True, False'
config['SelectFeatGroup']['texture_GLDM_features'] = 'True, False'
config['SelectFeatGroup']['texture_GLCMMS_features'] = 'True, False'
config['SelectFeatGroup']['texture_GLRLM_features'] = 'True, False'
config['SelectFeatGroup']['texture_GLSZM_features'] = 'True, False'
config['SelectFeatGroup']['texture_GLDZM_features'] = 'True, False'
config['SelectFeatGroup']['texture_NGTDM_features'] = 'True, False'
config['SelectFeatGroup']['texture_NGLDM_features'] = 'True, False'
config['SelectFeatGroup']['texture_LBP_features'] = 'True, False'
config['SelectFeatGroup']['dicom_features'] = 'False'
config['SelectFeatGroup']['semantic_features'] = 'False'
config['SelectFeatGroup']['coliage_features'] = 'False'
config['SelectFeatGroup']['vessel_features'] = 'True, False'
config['SelectFeatGroup']['phase_features'] = 'True, False'
config['SelectFeatGroup']['fractal_features'] = 'True, False'
config['SelectFeatGroup']['location_features'] = 'True, False'
config['SelectFeatGroup']['rgrd_features'] = 'True, False'
# Select features per toolbox, or simply all
config['SelectFeatGroup']['toolbox'] = 'All, PREDICT, PyRadiomics'
# Select original features, or after transformation of feature space
config['SelectFeatGroup']['original_features'] = 'True'
config['SelectFeatGroup']['wavelet_features'] = 'True, False'
config['SelectFeatGroup']['log_features'] = 'True, False'
# Resampling options
config['Resampling'] = dict()
config['Resampling']['Use'] = '0.20'
config['Resampling']['Method'] =\
'RandomUnderSampling, RandomOverSampling, NearMiss, ' +\
'NeighbourhoodCleaningRule, ADASYN, BorderlineSMOTE, SMOTE, ' +\
'SMOTEENN, SMOTETomek'
config['Resampling']['sampling_strategy'] = 'auto, majority, minority, not minority, not majority, all'
config['Resampling']['n_neighbors'] = '3, 12'
config['Resampling']['k_neighbors'] = '5, 15'
config['Resampling']['threshold_cleaning'] = '0.25, 0.5'
# Classification
config['Classification'] = dict()
config['Classification']['fastr'] = 'True'
config['Classification']['fastr_plugin'] = self.fastr_plugin
config['Classification']['classifiers'] =\
'SVM, RF, LR, LDA, QDA, GaussianNB, ' +\
'AdaBoostClassifier, ' +\
'XGBClassifier'
config['Classification']['max_iter'] = '100000'
config['Classification']['SVMKernel'] = 'linear, poly, rbf'
config['Classification']['SVMC'] = '0, 6'
config['Classification']['SVMdegree'] = '1, 6'
config['Classification']['SVMcoef0'] = '0, 1'
config['Classification']['SVMgamma'] = '-5, 5'
config['Classification']['RFn_estimators'] = '10, 90'
config['Classification']['RFmin_samples_split'] = '2, 3'
config['Classification']['RFmax_depth'] = '5, 5'
config['Classification']['LRpenalty'] = 'l1, l2, elasticnet'
config['Classification']['LRC'] = '0.01, 0.99'
config['Classification']['LR_solver'] = 'lbfgs, saga'
config['Classification']['LR_l1_ratio'] = '0, 1'
config['Classification']['LDA_solver'] = 'svd, lsqr, eigen'
config['Classification']['LDA_shrinkage'] = '-5, 5'
config['Classification']['QDA_reg_param'] = '-5, 5'
config['Classification']['ElasticNet_alpha'] = '-5, 5'
config['Classification']['ElasticNet_l1_ratio'] = '0, 1'
config['Classification']['SGD_alpha'] = '-5, 5'
config['Classification']['SGD_l1_ratio'] = '0, 1'
config['Classification']['SGD_loss'] = 'squared_loss, huber, epsilon_insensitive, squared_epsilon_insensitive'
config['Classification']['SGD_penalty'] = 'none, l2, l1'
config['Classification']['CNB_alpha'] = '0, 1'
config['Classification']['AdaBoost_n_estimators'] = config['Classification']['RFn_estimators']
config['Classification']['AdaBoost_learning_rate'] = '0.01, 0.99'
# Based on https://towardsdatascience.com/doing-xgboost-hyper-parameter-tuning-the-smart-way-part-1-of-2-f6d255a45dde
# and https://www.analyticsvidhya.com/blog/2016/03/complete-guide-parameter-tuning-xgboost-with-codes-python/
# and https://medium.com/data-design/xgboost-hi-im-gamma-what-can-i-do-for-you-and-the-tuning-of-regularization-a42ea17e6ab6
config['Classification']['XGB_boosting_rounds'] = config['Classification']['RFn_estimators']
config['Classification']['XGB_max_depth'] = '3, 12'
config['Classification']['XGB_learning_rate'] = config['Classification']['AdaBoost_learning_rate']
config['Classification']['XGB_gamma'] = '0.01, 9.99'
config['Classification']['XGB_min_child_weight'] = '1, 6'
config['Classification']['XGB_colsample_bytree'] = '0.3, 0.7'
# CrossValidation
config['CrossValidation'] = dict()
config['CrossValidation']['Type'] = 'random_split'
config['CrossValidation']['N_iterations'] = '100'
config['CrossValidation']['test_size'] = '0.2'
config['CrossValidation']['fixed_seed'] = 'False'
# Hyperparameter optimization options
config['HyperOptimization'] = dict()
config['HyperOptimization']['scoring_method'] = 'f1_weighted'
config['HyperOptimization']['test_size'] = '0.2'
config['HyperOptimization']['n_splits'] = '5'
config['HyperOptimization']['N_iterations'] = '1000'
config['HyperOptimization']['n_jobspercore'] = '200' # only relevant when using fastr in classification
config['HyperOptimization']['maxlen'] = '100'
config['HyperOptimization']['ranking_score'] = 'test_score'
config['HyperOptimization']['memory'] = '3G'
config['HyperOptimization']['refit_workflows'] = 'False'
# Ensemble options
config['Ensemble'] = dict()
config['Ensemble']['Use'] = '100'
config['Ensemble']['Metric'] = 'Default'
# Evaluation options
config['Evaluation'] = dict()
config['Evaluation']['OverfitScaler'] = 'False'
# Bootstrap options
config['Bootstrap'] = dict()
config['Bootstrap']['Use'] = 'False'
config['Bootstrap']['N_iterations'] = '1000'
return config
def add_tools(self):
"""Add several tools to the WORC object."""
self.Tools = Tools()
def build(self, wtype='training'):
"""Build the network based on the given attributes.
Parameters
----------
wtype: string, default 'training'
Specify the WORC execution type.
- testing: use if you have a trained classifier and want to
train it on some new images.
- training: use if you want to train a classifier from a dataset.
"""
self.wtype = wtype
if wtype == 'training':
self.build_training()
elif wtype == 'testing':
self.build_testing()
def build_training(self):
"""Build the training network based on the given attributes."""
# We either need images or features for Radiomics
if self.images_test or self.features_test:
self.TrainTest = True
if self.images_train or self.features_train:
print('Building training network...')
# We currently require labels for supervised learning
if self.labels_train:
if not self.configs:
print("No configuration given, assuming default")
if self.images_train:
self.configs = [self.defaultconfig()] * len(self.images_train)
else:
self.configs = [self.defaultconfig()] * len(self.features_train)
self.network = fastr.create_network(self.name)
# BUG: We currently use the first configuration as general config
image_types = list()
for c in range(len(self.configs)):
if type(self.configs[c]) == str:
# Probably, c is a configuration file
self.configs[c] = config_io.load_config(self.configs[c])
image_types.append(self.configs[c]['ImageFeatures']['image_type'])
# Create config source
self.source_class_config = self.network.create_source('ParameterFile', id='config_classification_source', node_group='conf', step_id='general_sources')
# Classification tool and label source
self.source_patientclass_train = self.network.create_source('PatientInfoFile', id='patientclass_train', node_group='pctrain', step_id='train_sources')
if self.labels_test:
self.source_patientclass_test = self.network.create_source('PatientInfoFile', id='patientclass_test', node_group='pctest', step_id='test_sources')
memory = self.fastr_memory_parameters['Classification']
self.classify = self.network.create_node('worc/TrainClassifier:1.0',
tool_version='1.0',
id='classify',
resources=ResourceLimit(memory=memory),
step_id='WorkflowOptimization')
if self.fixedsplits:
self.fixedsplits_node = self.network.create_source('CSVFile', id='fixedsplits_source', node_group='conf', step_id='general_sources')
self.classify.inputs['fixedsplits'] = self.fixedsplits_node.output
self.source_Ensemble =\
self.network.create_constant('String', [self.configs[0]['Ensemble']['Use']],
id='Ensemble',
step_id='Evaluation')
self.source_LabelType =\
self.network.create_constant('String', [self.configs[0]['Labels']['label_names']],
id='LabelType',
step_id='Evaluation')
memory = self.fastr_memory_parameters['PlotEstimator']
self.plot_estimator =\
self.network.create_node('worc/PlotEstimator:1.0', tool_version='1.0',
id='plot_Estimator',
resources=ResourceLimit(memory=memory),
step_id='Evaluation')
# Outputs
self.sink_classification = self.network.create_sink('HDF5', id='classification', step_id='general_sinks')
self.sink_performance = self.network.create_sink('JsonFile', id='performance', step_id='general_sinks')
self.sink_class_config = self.network.create_sink('ParameterFile', id='config_classification_sink', node_group='conf', step_id='general_sinks')
# Links
self.sink_class_config.input = self.source_class_config.output
self.link_class_1 = self.network.create_link(self.source_class_config.output, self.classify.inputs['config'])
self.link_class_2 = self.network.create_link(self.source_patientclass_train.output, self.classify.inputs['patientclass_train'])
self.link_class_1.collapse = 'conf'
self.link_class_2.collapse = 'pctrain'
self.plot_estimator.inputs['ensemble'] = self.source_Ensemble.output
self.plot_estimator.inputs['label_type'] = self.source_LabelType.output
if self.labels_test:
pinfo = self.source_patientclass_test.output
else:
pinfo = self.source_patientclass_train.output
self.plot_estimator.inputs['prediction'] = self.classify.outputs['classification']
self.plot_estimator.inputs['pinfo'] = pinfo
if self.TrainTest:
# FIXME: the naming here is ugly
self.link_class_3 = self.network.create_link(self.source_patientclass_test.output, self.classify.inputs['patientclass_test'])
self.link_class_3.collapse = 'pctest'
self.sink_classification.input = self.classify.outputs['classification']
self.sink_performance.input = self.plot_estimator.outputs['output_json']
if self.masks_normalize_train:
self.sources_masks_normalize_train = dict()
if self.masks_normalize_test:
self.sources_masks_normalize_test = dict()
# -----------------------------------------------------
# Optionally, add ComBat Harmonization. Currently done
# on full dataset, not in a cross-validation
if self.configs[0]['General']['ComBat'] == 'True':
self.add_ComBat()
if not self.features_train:
# Create nodes to compute features
# General
self.sources_parameters = dict()
self.source_config_pyradiomics = dict()
self.source_toolbox_name = dict()
# Training only
self.calcfeatures_train = dict()
self.featureconverter_train = dict()
self.preprocessing_train = dict()
self.sources_images_train = dict()
self.sinks_features_train = dict()
self.converters_im_train = dict()
self.converters_seg_train = dict()
self.links_C1_train = dict()
self.featurecalculators = dict()
if self.TrainTest:
# A test set is supplied, for which nodes also need to be created
self.calcfeatures_test = dict()
self.featureconverter_test = dict()
self.preprocessing_test = dict()
self.sources_images_test = dict()
self.sinks_features_test = dict()
self.converters_im_test = dict()
self.converters_seg_test = dict()
self.links_C1_test = dict()
# Check which nodes are necessary
if not self.segmentations_train:
message = "No automatic segmentation method is yet implemented."
raise WORCexceptions.WORCNotImplementedError(message)
elif len(self.segmentations_train) == len(image_types):
# Segmentations provided
self.sources_segmentations_train = dict()
self.sources_segmentations_test = dict()
self.segmode = 'Provided'
elif len(self.segmentations_train) == 1:
# Assume segmentations need to be registered to other modalities
print('\t - Adding Elastix node for image registration.')
self.add_elastix_sourcesandsinks()
pass
else:
nseg = len(self.segmentations_train)
nim = len(image_types)
m = f'Length of segmentations for training is ' +\
f'{nseg}: should be equal to number of images' +\
f' ({nim}) or 1 when using registration.'
raise WORCexceptions.WORCValueError(m)
# BUG: We assume that first type defines if we use segmentix
if self.configs[0]['General']['Segmentix'] == 'True':
# Use the segmentix toolbox for segmentation processing
print('\t - Adding segmentix node for segmentation preprocessing.')
self.sinks_segmentations_segmentix_train = dict()
self.sources_masks_train = dict()
self.converters_masks_train = dict()
self.nodes_segmentix_train = dict()
if self.TrainTest:
# Also use segmentix on the tes set
self.sinks_segmentations_segmentix_test = dict()
self.sources_masks_test = dict()
self.converters_masks_test = dict()
self.nodes_segmentix_test = dict()
if self.semantics_train:
# Semantic features are supplied
self.sources_semantics_train = dict()
if self.metadata_train:
# Metadata to extract patient features from is supplied
self.sources_metadata_train = dict()
if self.semantics_test:
# Semantic features are supplied
self.sources_semantics_test = dict()
if self.metadata_test:
# Metadata to extract patient features from is supplied
self.sources_metadata_test = dict()
# Create a part of the pipeline for each modality
self.modlabels = list()
for nmod, mod in enumerate(image_types):
# Create label for each modality/image
num = 0
label = mod + '_' + str(num)
while label in self.calcfeatures_train.keys():
# if label already exists, add number to label
num += 1
label = mod + '_' + str(num)
self.modlabels.append(label)
# Create required sources and sinks
self.sources_parameters[label] = self.network.create_source('ParameterFile', id='config_' + label, step_id='general_sources')
self.sources_images_train[label] = self.network.create_source('ITKImageFile', id='images_train_' + label, node_group='train', step_id='train_sources')
if self.TrainTest:
self.sources_images_test[label] = self.network.create_source('ITKImageFile', id='images_test_' + label, node_group='test', step_id='test_sources')
if self.metadata_train and len(self.metadata_train) >= nmod + 1:
self.sources_metadata_train[label] = self.network.create_source('DicomImageFile', id='metadata_train_' + label, node_group='train', step_id='train_sources')
if self.metadata_test and len(self.metadata_test) >= nmod + 1:
self.sources_metadata_test[label] = self.network.create_source('DicomImageFile', id='metadata_test_' + label, node_group='test', step_id='test_sources')
if self.masks_train and len(self.masks_train) >= nmod + 1:
# Create mask source and convert
self.sources_masks_train[label] = self.network.create_source('ITKImageFile', id='mask_train_' + label, node_group='train', step_id='train_sources')
memory = self.fastr_memory_parameters['WORCCastConvert']
self.converters_masks_train[label] =\
self.network.create_node('worc/WORCCastConvert:0.3.2',
tool_version='0.1',
id='convert_mask_train_' + label,
node_group='train',
resources=ResourceLimit(memory=memory),
step_id='FileConversion')
self.converters_masks_train[label].inputs['image'] = self.sources_masks_train[label].output
if self.masks_test and len(self.masks_test) >= nmod + 1:
# Create mask source and convert
self.sources_masks_test[label] = self.network.create_source('ITKImageFile', id='mask_test_' + label, node_group='test', step_id='test_sources')
memory = self.fastr_memory_parameters['WORCCastConvert']
self.converters_masks_test[label] =\
self.network.create_node('worc/WORCCastConvert:0.3.2',
tool_version='0.1',
id='convert_mask_test_' + label,
node_group='test',
resources=ResourceLimit(memory=memory),
step_id='FileConversion')
self.converters_masks_test[label].inputs['image'] = self.sources_masks_test[label].output
# First convert the images
if any(modality in mod for modality in ['MR', 'CT', 'MG', 'PET']):
# Use WORC PXCastConvet for converting image formats
memory = self.fastr_memory_parameters['WORCCastConvert']
self.converters_im_train[label] =\
self.network.create_node('worc/WORCCastConvert:0.3.2',
tool_version='0.1',
id='convert_im_train_' + label,
resources=ResourceLimit(memory=memory),
step_id='FileConversion')
if self.TrainTest:
self.converters_im_test[label] =\
self.network.create_node('worc/WORCCastConvert:0.3.2',
tool_version='0.1',
id='convert_im_test_' + label,
resources=ResourceLimit(memory=memory),
step_id='FileConversion')
else:
raise WORCexceptions.WORCTypeError(('No valid image type for modality {}: {} provided.').format(str(nmod), mod))
# Create required links
self.converters_im_train[label].inputs['image'] = self.sources_images_train[label].output
if self.TrainTest:
self.converters_im_test[label].inputs['image'] = self.sources_images_test[label].output
# -----------------------------------------------------
# Preprocessing
preprocess_node = str(self.configs[nmod]['General']['Preprocessing'])
print('\t - Adding preprocessing node for image preprocessing.')
self.add_preprocessing(preprocess_node, label, nmod)
# -----------------------------------------------------
# Feature calculation
feature_calculators =\
self.configs[nmod]['General']['FeatureCalculators']
feature_calculators = feature_calculators.strip('][').split(', ')
self.featurecalculators[label] = [f.split('/')[0] for f in feature_calculators]
# Add lists for feature calculation and converter objects
self.calcfeatures_train[label] = list()
self.featureconverter_train[label] = list()
if self.TrainTest:
self.calcfeatures_test[label] = list()
self.featureconverter_test[label] = list()
for f in feature_calculators:
print(f'\t - Adding feature calculation node: {f}.')
self.add_feature_calculator(f, label, nmod)
# -----------------------------------------------------
# Create the neccesary nodes for the segmentation
if self.segmode == 'Provided':
# Segmentation ----------------------------------------------------
# Use the provided segmantions for each modality
memory = self.fastr_memory_parameters['WORCCastConvert']
self.sources_segmentations_train[label] =\
self.network.create_source('ITKImageFile',
id='segmentations_train_' + label,
node_group='train',
step_id='train_sources')
self.converters_seg_train[label] =\
self.network.create_node('worc/WORCCastConvert:0.3.2',
tool_version='0.1',
id='convert_seg_train_' + label,
resources=ResourceLimit(memory=memory),
step_id='FileConversion')
self.converters_seg_train[label].inputs['image'] =\
self.sources_segmentations_train[label].output
if self.TrainTest:
self.sources_segmentations_test[label] =\
self.network.create_source('ITKImageFile',
id='segmentations_test_' + label,
node_group='test',
step_id='test_sources')
self.converters_seg_test[label] =\
self.network.create_node('worc/WORCCastConvert:0.3.2',
tool_version='0.1',
id='convert_seg_test_' + label,
resources=ResourceLimit(memory=memory),
step_id='FileConversion')
self.converters_seg_test[label].inputs['image'] =\
self.sources_segmentations_test[label].output
elif self.segmode == 'Register':
# ---------------------------------------------
# Registration nodes: Align segmentation of first
# modality to others using registration ith Elastix
self.add_elastix(label, nmod)
# -----------------------------------------------------
# Optionally, add segmentix, the in-house segmentation
# processor of WORC
if self.configs[nmod]['General']['Segmentix'] == 'True':
self.add_segmentix(label, nmod)
elif self.configs[nmod]['Preprocessing']['Resampling'] == 'True':
raise WORCexceptions.WORCValueError('If you use resampling, ' +
'have to use segmentix to ' +
' make sure the mask is ' +
'also resampled. Please ' +
'set ' +
'config["General"]["Segmentix"]' +
'to "True".')
else:
# Provide source or elastix segmentations to
# feature calculator
for i_node in range(len(self.calcfeatures_train[label])):
if self.segmode == 'Provided':
self.calcfeatures_train[label][i_node].inputs['segmentation'] =\
self.converters_seg_train[label].outputs['image']
elif self.segmode == 'Register':
if nmod > 0:
self.calcfeatures_train[label][i_node].inputs['segmentation'] =\
self.transformix_seg_nodes_train[label].outputs['image']
else:
self.calcfeatures_train[label][i_node].inputs['segmentation'] =\
self.converters_seg_train[label].outputs['image']
if self.TrainTest:
if self.segmode == 'Provided':
self.calcfeatures_test[label][i_node].inputs['segmentation'] =\
self.converters_seg_test[label].outputs['image']
elif self.segmode == 'Register':
if nmod > 0:
self.calcfeatures_test[label][i_node].inputs['segmentation'] =\
self.transformix_seg_nodes_test[label].outputs['image']
else:
self.calcfeatures_test[label][i_node].inputs['segmentation'] =\
self.converters_seg_test[label].outputs['image']
# -----------------------------------------------------
# Optionally, add ComBat Harmonization
if self.configs[0]['General']['ComBat'] == 'True':
# Link features to ComBat
self.links_Combat1_train[label] = list()
for i_node, fname in enumerate(self.featurecalculators[label]):
self.links_Combat1_train[label].append(self.ComBat.inputs['features_train'][f'{label}_{self.featurecalculators[label][i_node]}'] << self.featureconverter_train[label][i_node].outputs['feat_out'])
self.links_Combat1_train[label][i_node].collapse = 'train'
if self.TrainTest:
self.links_Combat1_test[label] = list()
for i_node, fname in enumerate(self.featurecalculators[label]):
self.links_Combat1_test[label].append(self.ComBat.inputs['features_test'][f'{label}_{self.featurecalculators[label][i_node]}'] << self.featureconverter_test[label][i_node].outputs['feat_out'])
self.links_Combat1_test[label][i_node].collapse = 'test'
# -----------------------------------------------------
# Classification nodes
# Add the features from this modality to the classifier node input
self.links_C1_train[label] = list()
self.sinks_features_train[label] = list()
if self.TrainTest:
self.links_C1_test[label] = list()
self.sinks_features_test[label] = list()
for i_node, fname in enumerate(self.featurecalculators[label]):
# Create sink for feature outputs
self.sinks_features_train[label].append(self.network.create_sink('HDF5', id='features_train_' + label + '_' + fname, step_id='train_sinks'))
# Append features to the classification
if not self.configs[0]['General']['ComBat'] == 'True':
self.links_C1_train[label].append(self.classify.inputs['features_train'][f'{label}_{self.featurecalculators[label][i_node]}'] << self.featureconverter_train[label][i_node].outputs['feat_out'])
self.links_C1_train[label][i_node].collapse = 'train'
# Save output
self.sinks_features_train[label][i_node].input = self.featureconverter_train[label][i_node].outputs['feat_out']
# Similar for testing workflow
if self.TrainTest:
# Create sink for feature outputs
self.sinks_features_test[label].append(self.network.create_sink('HDF5', id='features_test_' + label + '_' + fname, step_id='test_sinks'))
# Append features to the classification
if not self.configs[0]['General']['ComBat'] == 'True':
self.links_C1_test[label].append(self.classify.inputs['features_test'][f'{label}_{self.featurecalculators[label][i_node]}'] << self.featureconverter_test[label][i_node].outputs['feat_out'])
self.links_C1_test[label][i_node].collapse = 'test'
# Save output
self.sinks_features_test[label][i_node].input = self.featureconverter_test[label][i_node].outputs['feat_out']
else:
# Features already provided: hence we can skip numerous nodes
self.sources_features_train = dict()
self.links_C1_train = dict()
if self.features_test:
self.sources_features_test = dict()
self.links_C1_test = dict()
# Create label for each modality/image
self.modlabels = list()
for num, mod in enumerate(image_types):
num = 0
label = mod + str(num)
while label in self.sources_features_train.keys():
# if label exists, add number to label
num += 1
label = mod + str(num)
self.modlabels.append(label)
# Create a node for the feature computation
self.sources_features_train[label] = self.network.create_source('HDF5', id='features_train_' + label, node_group='train', step_id='train_sources')
# Add the features from this modality to the classifier node input
self.links_C1_train[label] = self.classify.inputs['features_train'][str(label)] << self.sources_features_train[label].output
self.links_C1_train[label].collapse = 'train'
if self.features_test:
# Create a node for the feature computation
self.sources_features_test[label] = self.network.create_source('HDF5', id='features_test_' + label, node_group='test', step_id='test_sources')
# Add the features from this modality to the classifier node input
self.links_C1_test[label] = self.classify.inputs['features_test'][str(label)] << self.sources_features_test[label].output
self.links_C1_test[label].collapse = 'test'
else:
raise WORCexceptions.WORCIOError("Please provide labels.")
else:
raise WORCexceptions.WORCIOError("Please provide either images or features.")
def add_ComBat(self):
"""Add ComBat harmonization to the network.
Note: applied on all objects, not in a train-test or cross-val setting.
"""
memory = self.fastr_memory_parameters['ComBat']
self.ComBat =\
self.network.create_node('combat/ComBat:1.0',
tool_version='1.0',
id='ComBat',
resources=ResourceLimit(memory=memory),
step_id='ComBat')
# Create sink for ComBat output
self.sinks_features_train_ComBat = self.network.create_sink('HDF5', id='features_train_ComBat', step_id='ComBat')
# Create links for inputs
self.link_combat_1 = self.network.create_link(self.source_class_config.output, self.ComBat.inputs['config'])
self.link_combat_2 = self.network.create_link(self.source_patientclass_train.output, self.ComBat.inputs['patientclass_train'])
self.link_combat_1.collapse = 'conf'
self.link_combat_2.collapse = 'pctrain'
self.links_Combat1_train = dict()
self.links_Combat1_test = dict()
# Link Combat output to both sink and classify node
self.links_Combat_out_train = self.network.create_link(self.ComBat.outputs['features_train_out'], self.classify.inputs['features_train'])
self.links_Combat_out_train.collapse = 'ComBat'
self.sinks_features_train_ComBat.input = self.ComBat.outputs['features_train_out']
if self.TrainTest:
# Create sink for ComBat output
self.sinks_features_test_ComBat = self.network.create_sink('HDF5', id='features_test_ComBat', step_id='ComBat')
# Create links for inputs
self.link_combat_3 = self.network.create_link(self.source_patientclass_test.output, self.ComBat.inputs['patientclass_test'])
self.link_combat_3.collapse = 'pctest'
# Link Combat output to both sink and classify node
self.links_Combat_out_test = self.network.create_link(self.ComBat.outputs['features_test_out'], self.classify.inputs['features_test'])
self.links_Combat_out_test.collapse = 'ComBat'
self.sinks_features_test_ComBat.input = self.ComBat.outputs['features_test_out']
def add_preprocessing(self, preprocess_node, label, nmod):
"""Add nodes required for preprocessing of images."""
memory = self.fastr_memory_parameters['Preprocessing']
self.preprocessing_train[label] = self.network.create_node(preprocess_node, tool_version='1.0', id='preprocessing_train_' + label, resources=ResourceLimit(memory=memory), step_id='Preprocessing')
if self.TrainTest:
self.preprocessing_test[label] = self.network.create_node(preprocess_node, tool_version='1.0', id='preprocessing_test_' + label, resources=ResourceLimit(memory=memory), step_id='Preprocessing')
# Create required links
self.preprocessing_train[label].inputs['parameters'] = self.sources_parameters[label].output
self.preprocessing_train[label].inputs['image'] = self.converters_im_train[label].outputs['image']
if self.TrainTest:
self.preprocessing_test[label].inputs['parameters'] = self.sources_parameters[label].output
self.preprocessing_test[label].inputs['image'] = self.converters_im_test[label].outputs['image']
if self.metadata_train and len(self.metadata_train) >= nmod + 1:
self.preprocessing_train[label].inputs['metadata'] = self.sources_metadata_train[label].output
if self.metadata_test and len(self.metadata_test) >= nmod + 1:
self.preprocessing_test[label].inputs['metadata'] = self.sources_metadata_test[label].output
# If there are masks to use in normalization, add them here
if self.masks_normalize_train:
self.sources_masks_normalize_train[label] = self.network.create_source('ITKImageFile', id='masks_normalize_train_' + label, node_group='train', step_id='Preprocessing')
self.preprocessing_train[label].inputs['mask'] = self.sources_masks_normalize_train[label].output
if self.masks_normalize_test:
self.sources_masks_normalize_test[label] = self.network.create_source('ITKImageFile', id='masks_normalize_test_' + label, node_group='test', step_id='Preprocessing')
self.preprocessing_test[label].inputs['mask'] = self.sources_masks_normalize_test[label].output
def add_feature_calculator(self, calcfeat_node, label, nmod):
"""Add a feature calculation node to the network."""
# Name of fastr node has to exclude some specific symbols, which
# are used in the node name
node_ID = '_'.join([calcfeat_node.replace(':', '_').replace('.', '_').replace('/', '_'),
label])
memory = self.fastr_memory_parameters['FeatureCalculator']
node_train =\
self.network.create_node(calcfeat_node,
tool_version='1.0',
id='calcfeatures_train_' + node_ID,
resources=ResourceLimit(memory=memory),
step_id='Feature_Extraction')
if self.TrainTest:
node_test =\
self.network.create_node(calcfeat_node,
tool_version='1.0',
id='calcfeatures_test_' + node_ID,
resources=ResourceLimit(memory=memory),
step_id='Feature_Extraction')
# Check if we need to add pyradiomics specific sources
if 'pyradiomics' in calcfeat_node.lower():
# Add a config source
self.source_config_pyradiomics[label] =\
self.network.create_source('YamlFile',
id='config_pyradiomics_' + label,
node_group='train',
step_id='Feature_Extraction')
# Add a format source, which we are going to set to a constant
# And attach to the tool node
self.source_format_pyradiomics =\
self.network.create_constant('String', 'csv',
id='format_pyradiomics_' + label,
node_group='train',
step_id='Feature_Extraction')
node_train.inputs['format'] =\
self.source_format_pyradiomics.output
if self.TrainTest:
node_test.inputs['format'] =\
self.source_format_pyradiomics.output
# Create required links
# We can have a different config for different tools
if 'pyradiomics' in calcfeat_node.lower():
node_train.inputs['parameters'] =\
self.source_config_pyradiomics[label].output
else:
node_train.inputs['parameters'] =\
self.sources_parameters[label].output
node_train.inputs['image'] =\
self.preprocessing_train[label].outputs['image']
if self.TrainTest:
if 'pyradiomics' in calcfeat_node.lower():
node_test.inputs['parameters'] =\
self.source_config_pyradiomics[label].output
else:
node_test.inputs['parameters'] =\
self.sources_parameters[label].output
node_test.inputs['image'] =\
self.preprocessing_test[label].outputs['image']
# PREDICT can extract semantic and metadata features
if 'predict' in calcfeat_node.lower():
if self.metadata_train and len(self.metadata_train) >= nmod + 1:
node_train.inputs['metadata'] =\
self.sources_metadata_train[label].output
if self.metadata_test and len(self.metadata_test) >= nmod + 1:
node_test.inputs['metadata'] =\
self.sources_metadata_test[label].output
# If a semantics file is provided, connect to feature extraction tool
if self.semantics_train and len(self.semantics_train) >= nmod + 1:
self.sources_semantics_train[label] =\
self.network.create_source('CSVFile',
id='semantics_train_' + label,
step_id='train_sources')
node_train.inputs['semantics'] =\
self.sources_semantics_train[label].output
if self.semantics_test and len(self.semantics_test) >= nmod + 1:
self.sources_semantics_test[label] =\
self.network.create_source('CSVFile',
id='semantics_test_' + label,
step_id='test_sources')
node_test.inputs['semantics'] =\
self.sources_semantics_test[label].output
# Add feature converter to make features WORC compatible
conv_train =\
self.network.create_node('worc/FeatureConverter:1.0',
tool_version='1.0',
id='featureconverter_train_' + node_ID,
resources=ResourceLimit(memory='4G'),
step_id='Feature_Extraction')
conv_train.inputs['feat_in'] = node_train.outputs['features']
# Add source to tell converter which toolbox we use
if 'pyradiomics' in calcfeat_node.lower():
toolbox = 'PyRadiomics'
elif 'predict' in calcfeat_node.lower():
toolbox = 'PREDICT'
else:
message = f'Toolbox {calcfeat_node} not recognized!'
raise WORCexceptions.WORCKeyError(message)
self.source_toolbox_name[label] =\
self.network.create_constant('String', toolbox,
id=f'toolbox_name_{toolbox}_{label}',
step_id='Feature_Extraction')
conv_train.inputs['toolbox'] = self.source_toolbox_name[label].output
conv_train.inputs['config'] = self.sources_parameters[label].output
if self.TrainTest:
conv_test =\
self.network.create_node('worc/FeatureConverter:1.0',
tool_version='1.0',
id='featureconverter_test_' + node_ID,
resources=ResourceLimit(memory='4G'),
step_id='Feature_Extraction')
conv_test.inputs['feat_in'] = node_test.outputs['features']
conv_test.inputs['toolbox'] = self.source_toolbox_name[label].output
conv_test.inputs['config'] = self.sources_parameters[label].output
# Append to nodes to list
self.calcfeatures_train[label].append(node_train)
self.featureconverter_train[label].append(conv_train)
if self.TrainTest:
self.calcfeatures_test[label].append(node_test)
self.featureconverter_test[label].append(conv_test)
def add_elastix_sourcesandsinks(self):
"""Add sources and sinks required for image registration."""
self.sources_segmentation = dict()
self.segmode = 'Register'
self.source_Elastix_Parameters = dict()
self.elastix_nodes_train = dict()
self.transformix_seg_nodes_train = dict()
self.sources_segmentations_train = dict()
self.sinks_transformations_train = dict()
self.sinks_segmentations_elastix_train = dict()
self.sinks_images_elastix_train = dict()
self.converters_seg_train = dict()
self.edittransformfile_nodes_train = dict()
self.transformix_im_nodes_train = dict()
self.elastix_nodes_test = dict()
self.transformix_seg_nodes_test = dict()
self.sources_segmentations_test = dict()
self.sinks_transformations_test = dict()
self.sinks_segmentations_elastix_test = dict()
self.sinks_images_elastix_test = dict()
self.converters_seg_test = dict()
self.edittransformfile_nodes_test = dict()
self.transformix_im_nodes_test = dict()
def add_elastix(self, label, nmod):
""" Add image registration through elastix to network."""
# Create sources and converter for only for the given segmentation,
# which should be on the first modality
if nmod == 0:
memory = self.fastr_memory_parameters['WORCCastConvert']
self.sources_segmentations_train[label] =\
self.network.create_source('ITKImageFile',
id='segmentations_train_' + label,
node_group='input',
step_id='train_sources')
self.converters_seg_train[label] =\
self.network.create_node('worc/WORCCastConvert:0.3.2',
tool_version='0.1',
id='convert_seg_train_' + label,
resources=ResourceLimit(memory=memory),
step_id='FileConversion')
self.converters_seg_train[label].inputs['image'] =\
self.sources_segmentations_train[label].output
if self.TrainTest:
self.sources_segmentations_test[label] =\
self.network.create_source('ITKImageFile',
id='segmentations_test_' + label,
node_group='input',
step_id='test_sources')
self.converters_seg_test[label] =\
self.network.create_node('worc/WORCCastConvert:0.3.2',
tool_version='0.1',
id='convert_seg_test_' + label,
resources=ResourceLimit(memory=memory),
step_id='FileConversion')
self.converters_seg_test[label].inputs['image'] =\
self.sources_segmentations_test[label].output
# Assume provided segmentation is on first modality
if nmod > 0:
# Use elastix and transformix for registration
# NOTE: Assume elastix node type is on first configuration
elastix_node =\
str(self.configs[0]['General']['RegistrationNode'])
transformix_node =\
str(self.configs[0]['General']['TransformationNode'])
memory_elastix = self.fastr_memory_parameters['Elastix']
self.elastix_nodes_train[label] =\
self.network.create_node(elastix_node,
tool_version='0.2',
id='elastix_train_' + label,
resources=ResourceLimit(memory=memory_elastix),
step_id='Image_Registration')
memory_transformix = self.fastr_memory_parameters['Elastix']
self.transformix_seg_nodes_train[label] =\
self.network.create_node(transformix_node,
tool_version='0.2',
id='transformix_seg_train_' + label,
resources=ResourceLimit(memory=memory_transformix),
step_id='Image_Registration')
self.transformix_im_nodes_train[label] =\
self.network.create_node(transformix_node,
tool_version='0.2',
id='transformix_im_train_' + label,
resources=ResourceLimit(memory=memory_transformix),
step_id='Image_Registration')
if self.TrainTest:
self.elastix_nodes_test[label] =\
self.network.create_node(elastix_node,
tool_version='0.2',
id='elastix_test_' + label,
resources=ResourceLimit(memory=memory_elastix),
step_id='Image_Registration')
self.transformix_seg_nodes_test[label] =\
self.network.create_node(transformix_node,
tool_version='0.2',
id='transformix_seg_test_' + label,
resources=ResourceLimit(memory=memory_transformix),
step_id='Image_Registration')
self.transformix_im_nodes_test[label] =\
self.network.create_node(transformix_node,
tool_version='0.2',
id='transformix_im_test_' + label,
resources=ResourceLimit(memory=memory_transformix),
step_id='Image_Registration')
# Create sources_segmentation
# M1 = moving, others = fixed
self.elastix_nodes_train[label].inputs['fixed_image'] =\
self.converters_im_train[label].outputs['image']
self.elastix_nodes_train[label].inputs['moving_image'] =\
self.converters_im_train[self.modlabels[0]].outputs['image']
# Add node that copies metadata from the image to the
# segmentation if required
if self.CopyMetadata:
# Copy metadata from the image which was registered to
# the segmentation, if it is not created yet
if not hasattr(self, "copymetadata_nodes_train"):
# NOTE: Do this for first modality, as we assume
# the segmentation is on that one
self.copymetadata_nodes_train = dict()
self.copymetadata_nodes_train[self.modlabels[0]] =\
self.network.create_node('itktools/0.3.2/CopyMetadata:1.0',
tool_version='1.0',
id='CopyMetadata_train_' + self.modlabels[0],
step_id='Image_Registration')
self.copymetadata_nodes_train[self.modlabels[0]].inputs["source"] =\
self.converters_im_train[self.modlabels[0]].outputs['image']
self.copymetadata_nodes_train[self.modlabels[0]].inputs["destination"] =\
self.converters_seg_train[self.modlabels[0]].outputs['image']
self.transformix_seg_nodes_train[label].inputs['image'] =\
self.copymetadata_nodes_train[self.modlabels[0]].outputs['output']
else:
self.transformix_seg_nodes_train[label].inputs['image'] =\
self.converters_seg_train[self.modlabels[0]].outputs['image']
if self.TrainTest:
self.elastix_nodes_test[label].inputs['fixed_image'] =\
self.converters_im_test[label].outputs['image']
self.elastix_nodes_test[label].inputs['moving_image'] =\
self.converters_im_test[self.modlabels[0]].outputs['image']
if self.CopyMetadata:
# Copy metadata from the image which was registered
# to the segmentation
if not hasattr(self, "copymetadata_nodes_test"):
# NOTE: Do this for first modality, as we assume
# the segmentation is on that one
self.copymetadata_nodes_test = dict()
self.copymetadata_nodes_test[self.modlabels[0]] =\
self.network.create_node('itktools/0.3.2/CopyMetadata:1.0',
tool_version='1.0',
id='CopyMetadata_test_' + self.modlabels[0],
step_id='Image_Registration')
self.copymetadata_nodes_test[self.modlabels[0]].inputs["source"] =\
self.converters_im_test[self.modlabels[0]].outputs['image']
self.copymetadata_nodes_test[self.modlabels[0]].inputs["destination"] =\
self.converters_seg_test[self.modlabels[0]].outputs['image']
self.transformix_seg_nodes_test[label].inputs['image'] =\
self.copymetadata_nodes_test[self.modlabels[0]].outputs['output']
else:
self.transformix_seg_nodes_test[label].inputs['image'] =\
self.converters_seg_test[self.modlabels[0]].outputs['image']
# Apply registration to input modalities
self.source_Elastix_Parameters[label] =\
self.network.create_source('ElastixParameterFile',
id='Elastix_Para_' + label,
node_group='elpara',
step_id='Image_Registration')
self.link_elparam_train =\
self.network.create_link(self.source_Elastix_Parameters[label].output,
self.elastix_nodes_train[label].inputs['parameters'])
self.link_elparam_train.collapse = 'elpara'
if self.TrainTest:
self.link_elparam_test =\
self.network.create_link(self.source_Elastix_Parameters[label].output,
self.elastix_nodes_test[label].inputs['parameters'])
self.link_elparam_test.collapse = 'elpara'
if self.masks_train:
self.elastix_nodes_train[label].inputs['fixed_mask'] =\
self.converters_masks_train[label].outputs['image']
self.elastix_nodes_train[label].inputs['moving_mask'] =\
self.converters_masks_train[self.modlabels[0]].outputs['image']
if self.TrainTest:
if self.masks_test:
self.elastix_nodes_test[label].inputs['fixed_mask'] =\
self.converters_masks_test[label].outputs['image']
self.elastix_nodes_test[label].inputs['moving_mask'] =\
self.converters_masks_test[self.modlabels[0]].outputs['image']
# Change the FinalBSpline Interpolation order to 0 as required for binarie images: see https://github.com/SuperElastix/elastix/wiki/FAQ
self.edittransformfile_nodes_train[label] =\
self.network.create_node('elastixtools/EditElastixTransformFile:0.1',
tool_version='0.1',
id='EditElastixTransformFile_train_' + label,
step_id='Image_Registration')
self.edittransformfile_nodes_train[label].inputs['set'] =\
["FinalBSplineInterpolationOrder=0"]
self.edittransformfile_nodes_train[label].inputs['transform'] =\
self.elastix_nodes_train[label].outputs['transform'][-1]
if self.TrainTest:
self.edittransformfile_nodes_test[label] =\
self.network.create_node('elastixtools/EditElastixTransformFile:0.1',
tool_version='0.1',
id='EditElastixTransformFile_test_' + label,
step_id='Image_Registration')
self.edittransformfile_nodes_test[label].inputs['set'] =\
["FinalBSplineInterpolationOrder=0"]
self.edittransformfile_nodes_test[label].inputs['transform'] =\
self.elastix_nodes_test[label].outputs['transform'][-1]
# Link data and transformation to transformix and source
self.transformix_seg_nodes_train[label].inputs['transform'] =\
self.edittransformfile_nodes_train[label].outputs['transform']
self.transformix_im_nodes_train[label].inputs['transform'] =\
self.elastix_nodes_train[label].outputs['transform'][-1]
self.transformix_im_nodes_train[label].inputs['image'] =\
self.converters_im_train[self.modlabels[0]].outputs['image']
if self.TrainTest:
self.transformix_seg_nodes_test[label].inputs['transform'] =\
self.edittransformfile_nodes_test[label].outputs['transform']
self.transformix_im_nodes_test[label].inputs['transform'] =\
self.elastix_nodes_test[label].outputs['transform'][-1]
self.transformix_im_nodes_test[label].inputs['image'] =\
self.converters_im_test[self.modlabels[0]].outputs['image']
if self.configs[nmod]['General']['Segmentix'] != 'True':
# These segmentations serve as input for the feature calculation
for i_node in range(len(self.calcfeatures_train[label])):
self.calcfeatures_train[label][i_node].inputs['segmentation'] =\
self.transformix_seg_nodes_train[label].outputs['image']
if self.TrainTest:
self.calcfeatures_test[label][i_node].inputs['segmentation'] =\
self.transformix_seg_nodes_test[label].outputs['image']
# Save outputfor the training set
self.sinks_transformations_train[label] =\
self.network.create_sink('ElastixTransformFile',
id='transformations_train_' + label,
step_id='train_sinks')
self.sinks_segmentations_elastix_train[label] =\
self.network.create_sink('ITKImageFile',
id='segmentations_out_elastix_train_' + label,
step_id='train_sinks')
self.sinks_images_elastix_train[label] =\
self.network.create_sink('ITKImageFile',
id='images_out_elastix_train_' + label,
step_id='train_sinks')
self.sinks_transformations_train[label].input =\
self.elastix_nodes_train[label].outputs['transform']
self.sinks_segmentations_elastix_train[label].input =\
self.transformix_seg_nodes_train[label].outputs['image']
self.sinks_images_elastix_train[label].input =\
self.transformix_im_nodes_train[label].outputs['image']
# Save output for the test set
if self.TrainTest:
self.sinks_transformations_test[label] =\
self.network.create_sink('ElastixTransformFile',
id='transformations_test_' + label,
step_id='test_sinks')
self.sinks_segmentations_elastix_test[label] =\
self.network.create_sink('ITKImageFile',
id='segmentations_out_elastix_test_' + label,
step_id='test_sinks')
self.sinks_images_elastix_test[label] =\
self.network.create_sink('ITKImageFile',
id='images_out_elastix_test_' + label,
step_id='test_sinks')
self.sinks_transformations_test[label].input =\
self.elastix_nodes_test[label].outputs['transform']
self.sinks_segmentations_elastix_test[label].input =\
self.transformix_seg_nodes_test[label].outputs['image']
self.sinks_images_elastix_test[label].input =\
self.transformix_im_nodes_test[label].outputs['image']
def add_segmentix(self, label, nmod):
"""Add segmentix to the network."""
# Segmentix nodes -------------------------------------------------
# Use segmentix node to convert input segmentation into
# correct contour
if label not in self.sinks_segmentations_segmentix_train:
self.sinks_segmentations_segmentix_train[label] =\
self.network.create_sink('ITKImageFile',
id='segmentations_out_segmentix_train_' + label,
step_id='train_sinks')
memory = self.fastr_memory_parameters['Segmentix']
self.nodes_segmentix_train[label] =\
self.network.create_node('segmentix/Segmentix:1.0',
tool_version='1.0',
id='segmentix_train_' + label,
resources=ResourceLimit(memory=memory),
step_id='Preprocessing')
# Input the image
self.nodes_segmentix_train[label].inputs['image'] =\
self.converters_im_train[label].outputs['image']
# Input the metadata
if self.metadata_train and len(self.metadata_train) >= nmod + 1:
self.nodes_segmentix_train[label].inputs['metadata'] = self.sources_metadata_train[label].output
# Input the segmentation
if hasattr(self, 'transformix_seg_nodes_train'):
if label in self.transformix_seg_nodes_train.keys():
# Use output of registration in segmentix
self.nodes_segmentix_train[label].inputs['segmentation_in'] =\
self.transformix_seg_nodes_train[label].outputs['image']
else:
# Use original segmentation
self.nodes_segmentix_train[label].inputs['segmentation_in'] =\
self.converters_seg_train[label].outputs['image']
else:
# Use original segmentation
self.nodes_segmentix_train[label].inputs['segmentation_in'] =\
self.converters_seg_train[label].outputs['image']
# Input the parameters
self.nodes_segmentix_train[label].inputs['parameters'] =\
self.sources_parameters[label].output
self.sinks_segmentations_segmentix_train[label].input =\
self.nodes_segmentix_train[label].outputs['segmentation_out']
if self.TrainTest:
self.sinks_segmentations_segmentix_test[label] =\
self.network.create_sink('ITKImageFile',
id='segmentations_out_segmentix_test_' + label,
step_id='test_sinks')
self.nodes_segmentix_test[label] =\
self.network.create_node('segmentix/Segmentix:1.0',
tool_version='1.0',
id='segmentix_test_' + label,
resources=ResourceLimit(memory=memory),
step_id='Preprocessing')
# Input the image
self.nodes_segmentix_test[label].inputs['image'] =\
self.converters_im_test[label].outputs['image']
# Input the metadata
if self.metadata_test and len(self.metadata_test) >= nmod + 1:
self.nodes_segmentix_test[label].inputs['metadata'] = self.sources_metadata_test[label].output
if hasattr(self, 'transformix_seg_nodes_test'):
if label in self.transformix_seg_nodes_test.keys():
# Use output of registration in segmentix
self.nodes_segmentix_test[label].inputs['segmentation_in'] =\
self.transformix_seg_nodes_test[label].outputs['image']
else:
# Use original segmentation
self.nodes_segmentix_test[label].inputs['segmentation_in'] =\
self.converters_seg_test[label].outputs['image']
else:
# Use original segmentation
self.nodes_segmentix_test[label].inputs['segmentation_in'] =\
self.converters_seg_test[label].outputs['image']
self.nodes_segmentix_test[label].inputs['parameters'] =\
self.sources_parameters[label].output
self.sinks_segmentations_segmentix_test[label].input =\
self.nodes_segmentix_test[label].outputs['segmentation_out']
for i_node in range(len(self.calcfeatures_train[label])):
self.calcfeatures_train[label][i_node].inputs['segmentation'] =\
self.nodes_segmentix_train[label].outputs['segmentation_out']
if self.TrainTest:
self.calcfeatures_test[label][i_node].inputs['segmentation'] =\
self.nodes_segmentix_test[label].outputs['segmentation_out']
if self.masks_train and len(self.masks_train) >= nmod + 1:
# Use masks
self.nodes_segmentix_train[label].inputs['mask'] =\
self.converters_masks_train[label].outputs['image']
if self.masks_test and len(self.masks_test) >= nmod + 1:
# Use masks
self.nodes_segmentix_test[label].inputs['mask'] =\
self.converters_masks_test[label].outputs['image']
def set(self):
"""Set the FASTR source and sink data based on the given attributes."""
self.fastrconfigs = list()
self.source_data = dict()
self.sink_data = dict()
# Save the configurations as files
self.save_config()
# fixed splits
if self.fixedsplits:
self.source_data['fixedsplits_source'] = self.fixedsplits
# Generate gridsearch parameter files if required
self.source_data['config_classification_source'] = self.fastrconfigs[0]
# Set source and sink data
self.source_data['patientclass_train'] = self.labels_train
self.source_data['patientclass_test'] = self.labels_test
self.sink_data['classification'] = ("vfs://output/{}/estimator_{{sample_id}}_{{cardinality}}{{ext}}").format(self.name)
self.sink_data['performance'] = ("vfs://output/{}/performance_{{sample_id}}_{{cardinality}}{{ext}}").format(self.name)
self.sink_data['config_classification_sink'] = ("vfs://output/{}/config_{{sample_id}}_{{cardinality}}{{ext}}").format(self.name)
self.sink_data['features_train_ComBat'] = ("vfs://output/{}/ComBat/features_ComBat_{{sample_id}}_{{cardinality}}{{ext}}").format(self.name)
self.sink_data['features_test_ComBat'] = ("vfs://output/{}/ComBat/features_ComBat_{{sample_id}}_{{cardinality}}{{ext}}").format(self.name)
# Set the source data from the WORC objects you created
for num, label in enumerate(self.modlabels):
self.source_data['config_' + label] = self.fastrconfigs[num]
if self.pyradiomics_configs:
self.source_data['config_pyradiomics_' + label] = self.pyradiomics_configs[num]
# Add train data sources
if self.images_train and len(self.images_train) - 1 >= num:
self.source_data['images_train_' + label] = self.images_train[num]
if self.masks_train and len(self.masks_train) - 1 >= num:
self.source_data['mask_train_' + label] = self.masks_train[num]
if self.masks_normalize_train and len(self.masks_normalize_train) - 1 >= num:
self.source_data['masks_normalize_train_' + label] = self.masks_normalize_train[num]
if self.metadata_train and len(self.metadata_train) - 1 >= num:
self.source_data['metadata_train_' + label] = self.metadata_train[num]
if self.segmentations_train and len(self.segmentations_train) - 1 >= num:
self.source_data['segmentations_train_' + label] = self.segmentations_train[num]
if self.semantics_train and len(self.semantics_train) - 1 >= num:
self.source_data['semantics_train_' + label] = self.semantics_train[num]
if self.features_train and len(self.features_train) - 1 >= num:
self.source_data['features_train_' + label] = self.features_train[num]
if self.Elastix_Para:
# First modality does not need to be registered
if num > 0:
if len(self.Elastix_Para) > 1:
# Each modality has its own registration parameters
self.source_data['Elastix_Para_' + label] = self.Elastix_Para[num]
else:
# Use one fileset for all modalities
self.source_data['Elastix_Para_' + label] = self.Elastix_Para[0]
# Add test data sources
if self.images_test and len(self.images_test) - 1 >= num:
self.source_data['images_test_' + label] = self.images_test[num]
if self.masks_test and len(self.masks_test) - 1 >= num:
self.source_data['mask_test_' + label] = self.masks_test[num]
if self.masks_normalize_test and len(self.masks_normalize_test) - 1 >= num:
self.source_data['masks_normalize_test_' + label] = self.masks_normalize_test[num]
if self.metadata_test and len(self.metadata_test) - 1 >= num:
self.source_data['metadata_test_' + label] = self.metadata_test[num]
if self.segmentations_test and len(self.segmentations_test) - 1 >= num:
self.source_data['segmentations_test_' + label] = self.segmentations_test[num]
if self.semantics_test and len(self.semantics_test) - 1 >= num:
self.source_data['semantics_test_' + label] = self.semantics_test[num]
if self.features_test and len(self.features_test) - 1 >= num:
self.source_data['features_test_' + label] = self.features_test[num]
self.sink_data['segmentations_out_segmentix_train_' + label] = ("vfs://output/{}/Segmentations/seg_{}_segmentix_{{sample_id}}_{{cardinality}}{{ext}}").format(self.name, label)
self.sink_data['segmentations_out_elastix_train_' + label] = ("vfs://output/{}/Elastix/seg_{}_elastix_{{sample_id}}_{{cardinality}}{{ext}}").format(self.name, label)
self.sink_data['images_out_elastix_train_' + label] = ("vfs://output/{}/Elastix/im_{}_elastix_{{sample_id}}_{{cardinality}}{{ext}}").format(self.name, label)
if hasattr(self, 'featurecalculators'):
for f in self.featurecalculators[label]:
self.sink_data['features_train_' + label + '_' + f] = ("vfs://output/{}/Features/features_{}_{}_{{sample_id}}_{{cardinality}}{{ext}}").format(self.name, f, label)
if self.labels_test:
self.sink_data['segmentations_out_segmentix_test_' + label] = ("vfs://output/{}/Segmentations/seg_{}_segmentix_{{sample_id}}_{{cardinality}}{{ext}}").format(self.name, label)
self.sink_data['segmentations_out_elastix_test_' + label] = ("vfs://output/{}/Elastix/seg_{}_elastix_{{sample_id}}_{{cardinality}}{{ext}}").format(self.name, label)
self.sink_data['images_out_elastix_test_' + label] = ("vfs://output/{}/Images/im_{}_elastix_{{sample_id}}_{{cardinality}}{{ext}}").format(self.name, label)
if hasattr(self, 'featurecalculators'):
for f in self.featurecalculators[label]:
self.sink_data['features_test_' + label + '_' + f] = ("vfs://output/{}/Features/features_{}_{}_{{sample_id}}_{{cardinality}}{{ext}}").format(self.name, f, label)
# Add elastix sinks if used
if self.segmode:
# Segmode is only non-empty if segmentations are provided
if self.segmode == 'Register':
self.sink_data['transformations_train_' + label] = ("vfs://output/{}/Elastix/transformation_{}_{{sample_id}}_{{cardinality}}{{ext}}").format(self.name, label)
if self.TrainTest:
self.sink_data['transformations_test_' + label] = ("vfs://output/{}/Elastix/transformation_{}_{{sample_id}}_{{cardinality}}{{ext}}").format(self.name, label)
if self._add_evaluation:
self.Evaluate.set()
def execute(self):
"""Execute the network through the fastr.network.execute command."""
# Draw and execute nwtwork
try:
self.network.draw(file_path=self.network.id + '.svg', draw_dimensions=True)
except graphviz.backend.ExecutableNotFound:
print('[WORC WARNING] Graphviz executable not found: not drawing network diagram. Make sure the Graphviz executables are on your systems PATH.')
except graphviz.backend.CalledProcessError as e:
print(f'[WORC WARNING] Graphviz executable gave an error: not drawing network diagram. Original error: {e}')
# export hyper param. search space to LaTeX table
for config in self.fastrconfigs:
config_path = Path(url2pathname(urlparse(config).path))
tex_path = f'{config_path.parent.absolute() / config_path.stem}_hyperparams_space.tex'
export_hyper_params_to_latex(config_path, tex_path)
if DebugDetector().do_detection():
print("Source Data:")
for k in self.source_data.keys():
print(f"\t {k}: {self.source_data[k]}.")
print("\n Sink Data:")
for k in self.sink_data.keys():
print(f"\t {k}: {self.sink_data[k]}.")
# When debugging, set the tempdir to the default of fastr + name
self.fastr_tmpdir = os.path.join(fastr.config.mounts['tmp'],
self.name)
self.network.execute(self.source_data, self.sink_data, execution_plugin=self.fastr_plugin, tmpdir=self.fastr_tmpdir)
def add_evaluation(self, label_type, modus='binary_classification'):
"""Add branch for evaluation of performance to network.
Note: should be done after build, before set:
WORC.build()
WORC.add_evaluation(label_type)
WORC.set()
WORC.execute()
"""
self.Evaluate =\
Evaluate(label_type=label_type, parent=self, modus=modus)
self._add_evaluation = True
def save_config(self):
"""Save the config files to physical files and add to network."""
# If the configuration files are confiparse objects, write to file
self.pyradiomics_configs = list()
# Make sure we can dump blank values for PyRadiomics
yaml.SafeDumper.add_representer(type(None),
lambda dumper, value: dumper.represent_scalar(u'tag:yaml.org,2002:null', ''))
for num, c in enumerate(self.configs):
if type(c) != configparser.ConfigParser:
# A filepath (not a fastr source) is provided. Hence we read
# the config file and convert it to a configparser object
config = configparser.ConfigParser()
config.read(c)
c = config
cfile = os.path.join(self.fastr_tmpdir, f"config_{self.name}_{num}.ini")
if not os.path.exists(os.path.dirname(cfile)):
os.makedirs(os.path.dirname(cfile))
with open(cfile, 'w') as configfile:
c.write(configfile)
# If PyRadiomics is used, also write a config for PyRadiomics
if 'pyradiomics' in c['General']['FeatureCalculators']:
cfile_pyradiomics = os.path.join(self.fastr_tmpdir, f"config_pyradiomics_{self.name}_{num}.yaml")
config_pyradiomics = io.convert_config_pyradiomics(c)
with open(cfile_pyradiomics, 'w') as file:
yaml.safe_dump(config_pyradiomics, file)
cfile_pyradiomics = Path(self.fastr_tmpdir) / f"config_pyradiomics_{self.name}_{num}.yaml"
self.pyradiomics_configs.append(cfile_pyradiomics.as_uri().replace('%20', ' '))
# BUG: Make path with pathlib to create windows double slashes
cfile = Path(self.fastr_tmpdir) / f"config_{self.name}_{num}.ini"
self.fastrconfigs.append(cfile.as_uri().replace('%20', ' '))
class Tools(object):
"""
Create other pipelines besides the default radiomics executions.
Currently includes:
1. Registration pipeline
2. Evaluation pipeline
3. Slicer pipeline, to create pngs of middle slice of images.
"""
def __init__(self):
"""Initialize object with all pipelines."""
self.Elastix = Elastix()
self.Evaluate = Evaluate()
self.Slicer = Slicer()
| 53.822572
| 228
| 0.559897
| 9,626
| 99,195
| 5.562331
| 0.098587
| 0.027081
| 0.025718
| 0.019722
| 0.537643
| 0.455316
| 0.388884
| 0.346656
| 0.297387
| 0.257662
| 0
| 0.009288
| 0.330289
| 99,195
| 1,842
| 229
| 53.851792
| 0.796694
| 0.133091
| 0
| 0.313372
| 0
| 0.001641
| 0.193925
| 0.0444
| 0
| 0
| 0
| 0.000543
| 0
| 1
| 0.013126
| false
| 0.00082
| 0.014766
| 0
| 0.030353
| 0.010664
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
8d86c9a6526d8d524710fa780972b087a3f46ac3
| 7,715
|
py
|
Python
|
causal_rl/environments/multi_typed.py
|
vluzko/causal_rl
|
92ee221bdf1932fa83955441baabb5e28b78ab9d
|
[
"MIT"
] | 2
|
2021-04-02T12:06:13.000Z
|
2022-02-09T06:57:26.000Z
|
causal_rl/environments/multi_typed.py
|
vluzko/causal_rl
|
92ee221bdf1932fa83955441baabb5e28b78ab9d
|
[
"MIT"
] | 11
|
2020-12-28T14:51:31.000Z
|
2021-03-29T19:53:24.000Z
|
causal_rl/environments/multi_typed.py
|
vluzko/causal_rl
|
92ee221bdf1932fa83955441baabb5e28b78ab9d
|
[
"MIT"
] | null | null | null |
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
from gym import Env
from scipy.spatial import distance
from typing import Optional, Tuple, Any
from causal_rl.environments import CausalEnv
class MultiTyped(CausalEnv):
"""A simulation of balls bouncing with gravity and elastic collisions.
Attributes:
num_obj (int): Number of balls in the simulation.
obj_dim (int): The dimension of the balls. Will always be 2 * dimension_of_space
masses (np.ndarray): The masses of the balls.
radii (np.ndarray): The radii of the balls.
space (pymunk.Space): The actual simulation space.
"""
cls_name = 'multi_typed'
def __init__(self, num_obj: int=5, mass: float=10, radii: float=10, width: float=400):
self.num_obj = 2 * num_obj
self.obj_dim = 4
self.mass = mass
self.radius = radii
self.masses = mass * np.ones(self.num_obj)
self.radii = radii * np.ones(self.num_obj)
self.width = width
self.location_indices = (0, 1)
@property
def name(self) -> str:
return '{}_{}_{}_{}'.format(self.cls_name, self.mass, self.radius, self.width)
def reset(self):
import pymunk
self.space = pymunk.Space()
self.space.gravity = (0.0, 0.0)
self.objects = []
x_pos = np.random.rand(self.num_obj, 1) * (self.width - 40) + 20
y_pos = np.random.rand(self.num_obj, 1) * (self.width - 40) + 20
x_vel = np.random.rand(self.num_obj, 1) * 300 - 150
y_vel = np.random.rand(self.num_obj, 1) * 300 - 150
# Create circles
for i in range(self.num_obj):
mass = self.masses[i]
radius = self.radii[i]
moment = pymunk.moment_for_circle(mass, 0, radius, (0, 0))
body = pymunk.Body(mass, moment)
body.position = (x_pos[i], y_pos[i])
body.velocity = (x_vel[i], y_vel[i])
shape = pymunk.Circle(body, radius, (0, 0))
shape.elasticity = 1.0
self.space.add(body, shape)
self.objects.append(body)
# Create squares
for i in range(self.num_obj):
mass = self.masses[i] * 6
radius = self.radii[i] * 1.2
size = (radius, radius)
moment = pymunk.moment_for_box(mass, size)
body = pymunk.Body(mass, moment)
body.position = (x_pos[i], y_pos[i])
body.velocity = (x_vel[i], y_vel[i])
shape = pymunk.Poly.create_box(body, size)
shape.elasticity = 1.0
self.space.add(body, shape)
self.objects.append(body)
static_lines = [
pymunk.Segment(self.space.static_body, (0.0, 0.0), (0.0, self.width), 0),
pymunk.Segment(self.space.static_body, (0.0, 0.0), (self.width, 0.0), 0),
pymunk.Segment(self.space.static_body, (self.width, 0.0), (self.width, self.width), 0),
pymunk.Segment(self.space.static_body, (0.0, self.width), (self.width, self.width), 0)
]
for line in static_lines:
line.elasticity = 1.
self.space.add(static_lines)
return self.get_state(), 0, False, None
def get_state(self) -> np.ndarray:
"""Get the current state.
Returns:
A tensor representing the state. Each row is a single ball, columns are [*position, *velocity]
"""
state = np.zeros((self.num_obj, 4))
for i in range(self.num_obj):
state[i, :2] = np.array([self.objects[i].position[0], self.objects[i].position[1]])
state[i, 2:] = np.array([self.objects[i].velocity[0], self.objects[i].velocity[1]])
return state
def step(self, dt=0.01) -> Tuple[np.ndarray, float, bool, Any]:
self.space.step(dt)
return self.get_state(), 0, False, None
def generate_data(self, epochs: int=10000, dt: float=0.01) -> Tuple[np.ndarray, np.ndarray]:
states = np.zeros((epochs, self.num_obj, 4))
rewards = np.zeros((epochs, 1))
self.reset()
for t in range(epochs):
states[t] = self.get_state()
if t > 0:
states[t, :, 2:] = (states[t, :, :2] - states[t - 1, :, :2]) / dt
self.step(dt=dt)
return states, rewards
def visualize(self, state: np.ndarray, save_path: Optional[str]=None):
"""Visualize a single state.
Args:
state: The full
save_path: Path to save the image to.
"""
colors = ['b', 'g', 'r', 'c', 'm', 'y', 'k']
pos = state[:, :2]
momenta = state[:, 2:]
fig, ax = plt.subplots(figsize=(6, 6))
box = plt.Rectangle((0, 0), self.width, self.width, linewidth=5, edgecolor='k', facecolor='none')
ax.add_patch(box)
for i in range(self.num_obj // 2):
circle = plt.Circle((pos[i, 0], pos[i, 1]), radius=self.radii[i], edgecolor='b')
label = ax.annotate('{}'.format(i), xy=(pos[i, 0], pos[i, 1]), fontsize=8, ha='center')
# Plot the momentum
plt.arrow(pos[i, 0], pos[i, 1], momenta[i, 0], momenta[i, 1])
ax.add_patch(circle)
for i in range(self.num_obj // 2, self.num_obj):
circle = plt.Rectangle((pos[i, 0], pos[i, 1]), self.radii[i], self.radii[i], edgecolor='b')
label = ax.annotate('{}'.format(i), xy=(pos[i, 0], pos[i, 1]), fontsize=8, ha='center')
# Plot the momentum
plt.arrow(pos[i, 0], pos[i, 1], momenta[i, 0], momenta[i, 1])
ax.add_patch(circle)
plt.axis([0, self.width, 0, self.width])
plt.axis('off')
if save_path is not None:
plt.savefig(save_path)
else:
plt.show()
plt.close()
def detect_collisions(self, trajectories: np.ndarray) -> np.ndarray:
n = trajectories.shape[0]
k = self.num_obj
radii = np.copy(self.radii)
radii[self.num_obj // 2:] = radii[self.num_obj // 2:] * np.sqrt(2)
min_dist = radii.reshape(k, 1) + radii.reshape(1, k)
np.fill_diagonal(min_dist, 0)
collisions = np.zeros((n, k, k))
for i in range(1, n):
# The (x, y) coordinates of all balls at t=i
locs = trajectories[i, :, :2]
distances = distance.squareform(distance.pdist(locs))
collided = np.nonzero(distances < min_dist)
collisions[i-1][collided] = 1
collisions[i][collided] = 1
return collisions
def wall_collisions(self, states: np.ndarray) -> np.ndarray:
min_coord = 0 + self.radius
max_coord = self.width - self.radius
# Just the position coordinates
locs = states[:, :, :2]
has_collision = (locs < min_coord) | (locs > max_coord)
return has_collision
class WithTypes(MultiTyped):
"""Include the type of the object in the state."""
cls_name = 'with_types'
def __init__(self, num_obj=5, mass: float=10, radii: float=10, width: float=400):
super().__init__(num_obj, mass, radii, width)
self.obj_dim = 5
self.location_indices = (0, 1)
def generate_data(self, epochs: int=10000, dt: float=0.01) -> Tuple[np.ndarray, np.ndarray]:
states, rewards = super().generate_data(epochs, dt)
with_types = np.zeros((epochs, self.num_obj, self.obj_dim))
with_types[:, :, :-1] = states
with_types[:, self.num_obj//2:, -1] = 1
return with_types, rewards
def detect_collisions(self, trajectories: np.ndarray) -> np.ndarray:
return super().detect_collisions(trajectories[:, :, :-1])
| 36.738095
| 106
| 0.568762
| 1,083
| 7,715
| 3.950139
| 0.196676
| 0.035063
| 0.051426
| 0.015428
| 0.417017
| 0.372137
| 0.335905
| 0.320477
| 0.284245
| 0.259467
| 0
| 0.030557
| 0.287362
| 7,715
| 209
| 107
| 36.913876
| 0.747545
| 0.10512
| 0
| 0.205674
| 0
| 0
| 0.009571
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.085106
| false
| 0
| 0.056738
| 0.014184
| 0.234043
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
8d88e96d4a71ca08ce8d66eee14e65dd7c02396c
| 3,189
|
py
|
Python
|
bin/makeReport.py
|
oxfordmmm/SARS-CoV2_workflows
|
a84cb0a7142684414b2f285dd27cc2ea287eecb9
|
[
"MIT"
] | null | null | null |
bin/makeReport.py
|
oxfordmmm/SARS-CoV2_workflows
|
a84cb0a7142684414b2f285dd27cc2ea287eecb9
|
[
"MIT"
] | null | null | null |
bin/makeReport.py
|
oxfordmmm/SARS-CoV2_workflows
|
a84cb0a7142684414b2f285dd27cc2ea287eecb9
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
import pandas as pd
import sys
import json
from Bio import SeqIO
sample_name=sys.argv[1]
pango=pd.read_csv('pango.csv')
nextclade=pd.read_csv('nextclade.tsv', sep='\t')
aln2type=pd.read_csv('aln2type.csv')
pango['sampleName']=sample_name
nextclade['sampleName']=sample_name
aln2type['sampleName']=sample_name
df=pango.merge(nextclade, on='sampleName', how='left', suffixes=("_pango","_nextclade"))
df=df.merge(aln2type, on='sampleName', how='left', suffixes=(None,"_aln2type"))
# versions
wf=open('workflow_commit.txt').read()
df['workflowCommit']=str(wf).strip()
df['manifestVersion']=sys.argv[2]
nextclade_version=open('nextclade_files/version.txt').read()
df['nextcladeVersion']=str(nextclade_version).strip()
aln2type_variant_commit=open('variant_definitions/aln2type_variant_git_commit.txt').read()
aln2type_variant_version=open('variant_definitions/aln2type_variant_version.txt').read()
aln2type_source_commit=open('variant_definitions/aln2type_commit.txt').read()
df['aln2typeVariantCommit']=str(aln2type_variant_commit).strip()
df['aln2typeVariantVersion']=str(aln2type_variant_version).strip()
df['aln2typeSourceVommit']=str(aln2type_source_commit).strip()
df.to_csv('{0}_report.tsv'.format(sys.argv[1]), sep='\t', index=False)
### convert to json
pango['program']='pango'
pango.set_index('program',inplace=True)
p=pango.to_dict(orient='index')
nextclade['program']='nextclade'
nextclade['nextcladeVersion']=str(nextclade_version).strip()
nextclade.set_index('program',inplace=True)
n=nextclade.to_dict(orient='index')
with open('nextclade.json','rt', encoding= 'utf-8') as inf:
nj=json.load(inf)
n['nextcladeOutputJson']=nj
aln2type['program']='aln2type'
aln2type['label']=aln2type['phe-label']
aln2type['aln2typeVariantCommit']=str(aln2type_variant_commit).strip()
aln2type['aln2typeSourceCommit']=str(aln2type_source_commit).strip()
aln2type.set_index(['program','phe-label'],inplace=True)
a={level: aln2type.xs(level).to_dict('index') for level in aln2type.index.levels[0]}
w={'WorkflowInformation':{}}
w['WorkflowInformation']['workflowCommit']=str(wf).strip()
w['WorkflowInformation']['manifestVersion']=sys.argv[2]
w['WorkflowInformation']['sampleIdentifier']=sample_name
# add fasta to json
record = SeqIO.read('ref.fasta', "fasta")
w['WorkflowInformation']['referenceIdentifier']=record.id
#f={'FastaRecord':{'SeqId':record.id,
# 'SeqDescription': record.description,
# 'Sequence':str(record.seq),
# 'sampleName':sample_name}}
def completeness(nextcladeOutputJson):
ref_len = 29903
total_missing = nextcladeOutputJson['results'][0]['qc']['missingData']['totalMissing']
completeness_prop = (ref_len - total_missing) / ref_len
completeness_pc = round(completeness_prop * 100, 1)
return completeness_pc
s={'summary':{}}
s['summary']['completeness']=completeness(n['nextcladeOutputJson'])
d={sample_name:{}}
d[sample_name].update(p)
d[sample_name].update(n)
d[sample_name].update(a)
d[sample_name].update(w)
#d[sample_name].update(f)
d[sample_name].update(s)
with open('{0}_report.json'.format(sample_name), 'w', encoding='utf-8') as f:
json.dump(d, f, indent=4, sort_keys=True, ensure_ascii=False)
| 35.831461
| 90
| 0.756977
| 423
| 3,189
| 5.541371
| 0.304965
| 0.059727
| 0.03285
| 0.043515
| 0.195392
| 0.042662
| 0
| 0
| 0
| 0
| 0
| 0.017028
| 0.060834
| 3,189
| 88
| 91
| 36.238636
| 0.765609
| 0.070869
| 0
| 0
| 0
| 0
| 0.303862
| 0.077575
| 0
| 0
| 0
| 0
| 0
| 1
| 0.015873
| false
| 0
| 0.063492
| 0
| 0.095238
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
8d8a5d72d65e690dc4c82341ed975187662e4c48
| 1,484
|
py
|
Python
|
webhooks/statuscake/alerta_statuscake.py
|
frekel/alerta-contrib
|
d8f5c93a4ea735085b3689c2c852ecae94924d08
|
[
"MIT"
] | 114
|
2015-02-05T00:22:16.000Z
|
2021-11-25T13:02:44.000Z
|
webhooks/statuscake/alerta_statuscake.py
|
NeilOrley/alerta-contrib
|
69d271ef9fe6542727ec4aa39fc8e0f797f1e8b1
|
[
"MIT"
] | 245
|
2016-01-09T22:29:09.000Z
|
2022-03-16T10:37:02.000Z
|
webhooks/statuscake/alerta_statuscake.py
|
NeilOrley/alerta-contrib
|
69d271ef9fe6542727ec4aa39fc8e0f797f1e8b1
|
[
"MIT"
] | 193
|
2015-01-30T21:22:49.000Z
|
2022-03-28T05:37:14.000Z
|
from alerta.models.alert import Alert
from alerta.webhooks import WebhookBase
from alerta.exceptions import RejectException
import os
import hashlib
class StatusCakeWebhook(WebhookBase):
def incoming(self, query_string, payload):
alert_severity = os.environ.get('STATUSCAKE_DEFAULT_ALERT_SEVERITY', 'major')
# If the statuscake username and apikey are provided
# We can validate that the webhook call is valid
statuscake_username = os.environ.get('STATUSCAKE_USERNAME')
statuscake_apikey = os.environ.get('STATUSCAKE_APIKEY')
if statuscake_username and statuscake_apikey:
decoded_token = statuscake_username + statuscake_apikey
statuscake_token = hashlib.md5(decoded_token.encode()).hexdigest()
if statuscake_token != payload['Token']:
raise RejectException("Provided Token couldn't be verified")
if payload['Status'] == 'UP':
severity = 'normal'
else:
severity = alert_severity
return Alert(
resource=payload['Name'],
event='AppDown',
environment='Production',
severity=severity,
service=['StatusCake'],
group='Application',
value=payload['StatusCode'],
text="%s is down" % payload['URL'],
tags=payload['Tags'].split(','),
origin='statuscake',
raw_data=str(payload)
)
| 35.333333
| 85
| 0.624663
| 147
| 1,484
| 6.176871
| 0.517007
| 0.099119
| 0.039648
| 0.072687
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.000937
| 0.280997
| 1,484
| 41
| 86
| 36.195122
| 0.850047
| 0.065364
| 0
| 0
| 0
| 0
| 0.150289
| 0.023844
| 0
| 0
| 0
| 0
| 0
| 1
| 0.03125
| false
| 0
| 0.15625
| 0
| 0.25
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
8d8b51eaca246cacfde939fcbc4a16b39dba720e
| 3,738
|
py
|
Python
|
ironic_discoverd/main.py
|
enovance/ironic-discoverd
|
d3df6178ca5c95943c93ff80723c86b7080bca0b
|
[
"Apache-2.0"
] | null | null | null |
ironic_discoverd/main.py
|
enovance/ironic-discoverd
|
d3df6178ca5c95943c93ff80723c86b7080bca0b
|
[
"Apache-2.0"
] | null | null | null |
ironic_discoverd/main.py
|
enovance/ironic-discoverd
|
d3df6178ca5c95943c93ff80723c86b7080bca0b
|
[
"Apache-2.0"
] | null | null | null |
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import eventlet
eventlet.monkey_patch(thread=False)
import json
import logging
import sys
from flask import Flask, request # noqa
from keystoneclient import exceptions
from ironic_discoverd import conf
from ironic_discoverd import discoverd
from ironic_discoverd import firewall
from ironic_discoverd import node_cache
from ironic_discoverd import utils
app = Flask(__name__)
LOG = discoverd.LOG
@app.route('/v1/continue', methods=['POST'])
def post_continue():
data = request.get_json(force=True)
LOG.debug("Got JSON %s, going into processing thread", data)
try:
res = discoverd.process(data)
except utils.DiscoveryFailed as exc:
return str(exc), exc.http_code
else:
return json.dumps(res), 200, {'Content-Type': 'applications/json'}
@app.route('/v1/discover', methods=['POST'])
def post_discover():
if conf.getboolean('discoverd', 'authenticate'):
if not request.headers.get('X-Auth-Token'):
LOG.debug("No X-Auth-Token header, rejecting")
return 'Authentication required', 401
try:
utils.get_keystone(token=request.headers['X-Auth-Token'])
except exceptions.Unauthorized:
LOG.debug("Keystone denied access, rejecting")
return 'Access denied', 403
# TODO(dtanstur): check for admin role
data = request.get_json(force=True)
LOG.debug("Got JSON %s", data)
try:
discoverd.discover(data)
except utils.DiscoveryFailed as exc:
return str(exc), exc.http_code
else:
return "", 202
def periodic_update(period):
while True:
LOG.debug('Running periodic update of filters')
try:
firewall.update_filters()
except Exception:
LOG.exception('Periodic update failed')
eventlet.greenthread.sleep(period)
def periodic_clean_up(period):
while True:
LOG.debug('Running periodic clean up of timed out nodes')
try:
if node_cache.clean_up():
firewall.update_filters()
except Exception:
LOG.exception('Periodic clean up failed')
eventlet.greenthread.sleep(period)
def main():
if len(sys.argv) < 2:
sys.exit("Usage: %s config-file" % sys.argv[0])
conf.read(sys.argv[1])
debug = conf.getboolean('discoverd', 'debug')
logging.basicConfig(level=logging.DEBUG if debug else logging.INFO)
logging.getLogger('urllib3.connectionpool').setLevel(logging.WARNING)
logging.getLogger('requests.packages.urllib3.connectionpool').setLevel(
logging.WARNING)
if not conf.getboolean('discoverd', 'authenticate'):
LOG.warning('Starting unauthenticated, please check configuration')
node_cache.init()
firewall.init()
utils.check_ironic_available()
period = conf.getint('discoverd', 'firewall_update_period')
eventlet.greenthread.spawn_n(periodic_update, period)
period = conf.getint('discoverd', 'clean_up_period')
eventlet.greenthread.spawn_n(periodic_clean_up, period)
app.run(debug=debug,
host=conf.get('discoverd', 'listen_address'),
port=conf.getint('discoverd', 'listen_port'))
| 31.411765
| 75
| 0.688604
| 470
| 3,738
| 5.393617
| 0.393617
| 0.023669
| 0.037475
| 0.04931
| 0.257199
| 0.223274
| 0.161736
| 0.131755
| 0.087574
| 0.087574
| 0
| 0.007757
| 0.206795
| 3,738
| 118
| 76
| 31.677966
| 0.847218
| 0.150348
| 0
| 0.259259
| 0
| 0
| 0.206264
| 0.026574
| 0
| 0
| 0
| 0.008475
| 0
| 1
| 0.061728
| false
| 0
| 0.135802
| 0
| 0.271605
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
8d8c7b2102958e3a921b5b5a1f32ed6750cd5ff4
| 964
|
py
|
Python
|
config_translator.py
|
Charahiro-tan/Jurubot_Translator
|
d0d0db137f3ddfe06d7cd9457d22c418bdeff94c
|
[
"MIT"
] | 1
|
2021-07-26T11:14:05.000Z
|
2021-07-26T11:14:05.000Z
|
config_translator.py
|
Charahiro-tan/Jurubot_Translator
|
d0d0db137f3ddfe06d7cd9457d22c418bdeff94c
|
[
"MIT"
] | null | null | null |
config_translator.py
|
Charahiro-tan/Jurubot_Translator
|
d0d0db137f3ddfe06d7cd9457d22c418bdeff94c
|
[
"MIT"
] | null | null | null |
##################################################
# 翻訳の設定
# 変更した設定は次回起動時から適用されます
##################################################
# []でくくってある項目は""でくくって,で区切ることでいくつも設定できます。
# 無視するユーザー
ignore_user = ["Nightbot","Streamelements","Moobot"]
# 翻訳する前に削除するワード。正規表現対応。
# URLや同じ言葉の繰り返しなどはデフォルトで削除してますので足りなかったら追加してください。
del_word = ["88+","88+"]
# 無視する言語。
# 言語コードは https://cloud.google.com/translate/docs/languages 参照
ignore_lang = ["",""]
# 配信者が使用している言語。あらゆる言語がこの言語に翻訳されます。
home_lang = "ja"
# 上のhome_langで投稿された時の翻訳先
default_to_lang = "en"
# translate.googleのURLのサフィックス。日本の方ならこのままで。
url_suffix = "co.jp"
# 翻訳結果に発言者の名前を入れる場合はTrue、入れない場合はFalse
sender = True
# 上がTrueの場合に表示する名前
# "displayname" でディスプレイネーム
# "loginid" でログインID
sender_name = "displayname"
# 翻訳結果に言語情報(en ⇒ ja)を付ける場合はTrue、付けない場合はFalse
language = True
# Google Apps Scriptで作成したAPIを使用するときはTrue、しないときはFalse
# Google Apps Scriptを使用するときは必ずReadmeを読んでください。
gas = False
# Google Apps Scriptで作成したURL
gas_url = ""
| 22.418605
| 61
| 0.692946
| 82
| 964
| 8.036585
| 0.792683
| 0.045524
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.004603
| 0.098548
| 964
| 43
| 62
| 22.418605
| 0.752589
| 0.590249
| 0
| 0
| 0
| 0
| 0.195652
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
8d8cd77924dc533eeabb54595050045f0fb725d3
| 1,489
|
py
|
Python
|
wxcloudrun/dao.py
|
lubupang/resume_flask1
|
1ea18e88c0b667e92710096f57973a77d19e8fc6
|
[
"MIT"
] | null | null | null |
wxcloudrun/dao.py
|
lubupang/resume_flask1
|
1ea18e88c0b667e92710096f57973a77d19e8fc6
|
[
"MIT"
] | null | null | null |
wxcloudrun/dao.py
|
lubupang/resume_flask1
|
1ea18e88c0b667e92710096f57973a77d19e8fc6
|
[
"MIT"
] | null | null | null |
import logging
from sqlalchemy.exc import OperationalError
from wxcloudrun import db
from wxcloudrun.model import Counters
# 初始化日志
logger = logging.getLogger('log')
logger.info("aaaaaaa")
def query_counterbyid(id):
"""
根据ID查询Counter实体
:param id: Counter的ID
:return: Counter实体
"""
logger.info("bbbbbbbbb")
try:
return Counters.query.filter(Counters.id == id).first()
except OperationalError as e:
logger.info("query_counterbyid errorMsg= {} ".format(e))
return None
def delete_counterbyid(id):
"""
根据ID删除Counter实体
:param id: Counter的ID
"""
try:
counter = Counters.query.get(id)
if counter is None:
return
db.session.delete(counter)
db.session.commit()
except OperationalError as e:
logger.info("delete_counterbyid errorMsg= {} ".format(e))
def insert_counter(counter):
"""
插入一个Counter实体
:param counter: Counters实体
"""
try:
db.session.add(counter)
db.session.commit()
except OperationalError as e:
logger.info("insert_counter errorMsg= {} ".format(e))
def update_counterbyid(counter):
"""
根据ID更新counter的值
:param counter实体
"""
try:
counter = query_counterbyid(counter.id)
if counter is None:
return
db.session.flush()
db.session.commit()
except OperationalError as e:
logger.info("update_counterbyid errorMsg= {} ".format(e))
| 22.560606
| 65
| 0.633983
| 158
| 1,489
| 5.917722
| 0.316456
| 0.064171
| 0.102674
| 0.106952
| 0.281283
| 0.281283
| 0.24385
| 0.24385
| 0.175401
| 0.121925
| 0
| 0
| 0.257891
| 1,489
| 65
| 66
| 22.907692
| 0.846154
| 0.117529
| 0
| 0.405405
| 0
| 0
| 0.115166
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.108108
| false
| 0
| 0.108108
| 0
| 0.324324
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
8d8db8eca4cacfeb8ce07aa8011f8a4b558400b4
| 7,411
|
py
|
Python
|
src/bpp/tests/tests_legacy/test_views/test_raporty.py
|
iplweb/django-bpp
|
85f183a99d8d5027ae4772efac1e4a9f21675849
|
[
"BSD-3-Clause"
] | 1
|
2017-04-27T19:50:02.000Z
|
2017-04-27T19:50:02.000Z
|
src/bpp/tests/tests_legacy/test_views/test_raporty.py
|
mpasternak/django-bpp
|
434338821d5ad1aaee598f6327151aba0af66f5e
|
[
"BSD-3-Clause"
] | 41
|
2019-11-07T00:07:02.000Z
|
2022-02-27T22:09:39.000Z
|
src/bpp/tests/tests_legacy/test_views/test_raporty.py
|
iplweb/bpp
|
f027415cc3faf1ca79082bf7bacd4be35b1a6fdf
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- encoding: utf-8 -*-
import os
import sys
import uuid
import pytest
from django.apps import apps
from django.contrib.auth.models import Group
from django.core.files.base import ContentFile
try:
from django.core.urlresolvers import reverse
except ImportError:
from django.urls import reverse
from django.db import transaction
from django.http import Http404
from django.test.utils import override_settings
from django.utils import timezone
from model_mommy import mommy
from bpp.models import Typ_KBN, Jezyk, Charakter_Formalny, Typ_Odpowiedzialnosci
from bpp.tests.tests_legacy.testutil import UserTestCase, UserTransactionTestCase
from bpp.tests.util import any_jednostka, any_autor, any_ciagle
from bpp.util import rebuild_contenttypes
from bpp.views.raporty import RaportSelector, PodgladRaportu, KasowanieRaportu
from celeryui.models import Report
class TestRaportSelector(UserTestCase):
def test_raportselector(self):
p = RaportSelector()
p.request = self.factory.get('/')
p.get_context_data()
def test_raportselector_with_reports(self):
for x, kiedy_ukonczono in enumerate([timezone.now(), None]):
mommy.make(
Report, arguments={},
file=None, finished_on=kiedy_ukonczono)
self.client.get(reverse('bpp:raporty'))
def test_tytuly_raportow_kronika_uczelni(self):
any_ciagle(rok=2000)
rep = Report.objects.create(
ordered_by=self.user,
function="kronika-uczelni",
arguments={"rok": "2000"})
res = self.client.get(reverse('bpp:raporty'))
self.assertContains(
res,
"Kronika Uczelni dla roku 2000",
status_code=200)
def test_tytuly_raportow_raport_dla_komisji_centralnej(self):
a = any_autor("Kowalski", "Jan")
rep = Report.objects.create(
ordered_by=self.user,
function="raport-dla-komisji-centralnej",
arguments={"autor": a.pk})
res = self.client.get(reverse('bpp:raporty'))
self.assertContains(
res,
"Raport dla Komisji Centralnej - %s" % str(a),
status_code=200)
class RaportMixin:
def zrob_raport(self):
r = mommy.make(
Report, file=None,
function="kronika-uczelni",
arguments='{"rok":"2013"}')
return r
class TestPobranieRaportu(RaportMixin, UserTestCase):
def setUp(self):
UserTestCase.setUp(self)
self.r = self.zrob_raport()
error_class = OSError
if sys.platform.startswith('win'):
error_class = WindowsError
try:
os.unlink(
os.path.join(settings.MEDIA_ROOT, 'raport', 'test_raport'))
except error_class:
pass
self.r.file.save("test_raport", ContentFile("hej ho"))
def test_pobranie_nginx(self):
# Raport musi byc zakonczony, ineczej nie ma pobrania
self.r.finished_on = timezone.now()
self.r.save()
with override_settings(SENDFILE_BACKEND='sendfile.backends.nginx'):
url = reverse('bpp:pobranie-raportu', kwargs=dict(uid=self.r.uid))
resp = self.client.get(url)
self.assertEqual(resp.status_code, 200)
self.assertIn('x-accel-redirect', resp._headers)
class TestPodgladRaportu(RaportMixin, UserTestCase):
def setUp(self):
UserTestCase.setUp(self)
self.r = self.zrob_raport()
def test_podgladraportu(self):
p = PodgladRaportu()
p.kwargs = {}
p.kwargs['uid'] = self.r.uid
self.assertEqual(p.get_object(), self.r)
p.kwargs['uid'] = str(uuid.uuid4())
self.assertRaises(Http404, p.get_object)
def test_podgladraportu_client(self):
url = reverse('bpp:podglad-raportu', kwargs=dict(uid=self.r.uid))
resp = self.client.get(url)
self.assertContains(resp, 'Kronika Uczelni', status_code=200)
class KasowanieRaportuMixin:
def setUp(self):
self.r = self.zrob_raport()
self.r.ordered_by = self.user
self.r.save()
class TestKasowanieRaportu(KasowanieRaportuMixin, RaportMixin, UserTestCase):
def setUp(self):
UserTestCase.setUp(self)
KasowanieRaportuMixin.setUp(self)
def test_kasowanieraportu(self):
k = KasowanieRaportu()
k.kwargs = dict(uid=self.r.uid)
class FakeRequest:
user = self.user
k.request = FakeRequest()
k.request.user = None
self.assertRaises(Http404, k.get_object)
k.request.user = self.user
self.assertEqual(k.get_object(), self.r)
def test_kasowanieraportu_client(self):
self.assertEqual(Report.objects.count(), 1)
url = reverse('bpp:kasowanie-raportu', kwargs=dict(uid=self.r.uid))
resp = self.client.get(url)
self.assertRedirects(resp, reverse("bpp:raporty"))
self.assertEqual(Report.objects.count(), 0)
from django.conf import settings
class TestWidokiRaportJednostek2012(UserTestCase):
# fixtures = ['charakter_formalny.json',
# 'jezyk.json',
# 'typ_kbn.json',
# 'typ_odpowiedzialnosci.json']
def setUp(self):
UserTestCase.setUp(self)
self.j = any_jednostka()
Typ_KBN.objects.get_or_create(skrot="PW", nazwa="Praca wieloośrodkowa")
Jezyk.objects.get_or_create(skrot='pol.', nazwa='polski')
Charakter_Formalny.objects.get_or_create(skrot='KSZ', nazwa='Książka w języku obcym')
Charakter_Formalny.objects.get_or_create(skrot='KSP', nazwa='Książka w języku polskim')
Charakter_Formalny.objects.get_or_create(skrot='KS', nazwa='Książka')
Charakter_Formalny.objects.get_or_create(skrot='ROZ', nazwa='Rozdział książki')
Group.objects.get_or_create(name="wprowadzanie danych")
def test_jeden_rok(self):
url = reverse("bpp:raport-jednostek-rok-min-max",
args=(self.j.pk, 2010, 2013))
res = self.client.get(url)
self.assertContains(
res,
"Dane o publikacjach za okres 2010 - 2013",
status_code=200)
def test_zakres_lat(self):
url = reverse("bpp:raport-jednostek", args=(self.j.pk, 2013))
res = self.client.get(url)
self.assertContains(
res,
"Dane o publikacjach za rok 2013",
status_code=200)
class TestRankingAutorow(UserTestCase):
def setUp(self):
UserTestCase.setUp(self)
rebuild_contenttypes()
Typ_Odpowiedzialnosci.objects.get_or_create(skrot='aut.', nazwa='autor')
Group.objects.get_or_create(name="wprowadzanie danych")
j = any_jednostka()
a = any_autor(nazwisko="Kowalski")
c = any_ciagle(impact_factor=200, rok=2000)
c.dodaj_autora(a, j)
def test_renderowanie(self):
url = reverse("bpp:ranking-autorow", args=(2000, 2000))
res = self.client.get(url)
self.assertContains(
res, "Ranking autorów", status_code=200)
self.assertContains(res, "Kowalski")
def test_renderowanie_csv(self):
url = reverse("bpp:ranking-autorow", args=(2000, 2000))
res = self.client.get(url, data={"_export": "csv"})
self.assertContains(
res,
'"Kowalski Jan Maria, dr",Jednostka')
| 32.221739
| 95
| 0.643233
| 868
| 7,411
| 5.366359
| 0.268433
| 0.017175
| 0.027909
| 0.034779
| 0.335552
| 0.284242
| 0.256548
| 0.198583
| 0.162731
| 0.14255
| 0
| 0.018029
| 0.244097
| 7,411
| 229
| 96
| 32.362445
| 0.813459
| 0.028336
| 0
| 0.296512
| 0
| 0
| 0.105088
| 0.014596
| 0
| 0
| 0
| 0
| 0.098837
| 1
| 0.116279
| false
| 0.005814
| 0.127907
| 0
| 0.302326
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
8d8ebb77655b687ce95045239bb38a91c19a2901
| 1,192
|
py
|
Python
|
manager_app/serializers/carousel_serializers.py
|
syz247179876/e_mall
|
f94e39e091e098242342f532ae371b8ff127542f
|
[
"Apache-2.0"
] | 7
|
2021-04-10T13:20:56.000Z
|
2022-03-29T15:00:29.000Z
|
manager_app/serializers/carousel_serializers.py
|
syz247179876/E_mall
|
f94e39e091e098242342f532ae371b8ff127542f
|
[
"Apache-2.0"
] | 9
|
2021-05-11T03:53:31.000Z
|
2022-03-12T00:58:03.000Z
|
manager_app/serializers/carousel_serializers.py
|
syz247179876/E_mall
|
f94e39e091e098242342f532ae371b8ff127542f
|
[
"Apache-2.0"
] | 2
|
2020-11-24T08:59:22.000Z
|
2020-11-24T14:10:59.000Z
|
# -*- coding: utf-8 -*-
# @Time : 2021/4/6 下午9:21
# @Author : 司云中
# @File : carousel_serializers.py
# @Software: Pycharm
from rest_framework import serializers
from Emall.exceptions import DataFormatError
from shop_app.models.commodity_models import Carousel
class ManagerCarouselSerializer(serializers.ModelSerializer):
"""管理轮播图序列化器"""
class Meta:
model = Carousel
fields = ('pk', 'picture', 'url', 'sort', 'type')
read_only_fields = ('pk',)
def add(self):
"""增加轮播图"""
self.Meta.model.objects.create(**self.validated_data)
def modify(self):
"""修改轮播图"""
pk = self.context.get('request').data.get('pk')
if not pk:
raise DataFormatError('缺少数据')
return self.Meta.model.objects.filter(pk=pk).update(**self.validated_data)
class DeleteCarouselSerializer(serializers.ModelSerializer):
pk_list = serializers.ListField(child=serializers.IntegerField(), allow_empty=False)
class Meta:
model = Carousel
fields = ('pk_list',)
def delete(self):
"""删除轮播图"""
return self.Meta.model.objects.filter(pk__in=self.validated_data.pop('pk_list')).delete()
| 27.090909
| 97
| 0.654362
| 136
| 1,192
| 5.625
| 0.544118
| 0.058824
| 0.05098
| 0.078431
| 0.16732
| 0.16732
| 0.088889
| 0
| 0
| 0
| 0
| 0.010549
| 0.204698
| 1,192
| 43
| 98
| 27.72093
| 0.796414
| 0.11745
| 0
| 0.181818
| 0
| 0
| 0.047758
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.136364
| false
| 0
| 0.136364
| 0
| 0.590909
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
8d9135e1864bf2b1336ddc05e72617edb4057d7b
| 7,312
|
py
|
Python
|
xfbin/structure/nud.py
|
SutandoTsukai181/xfbin_lib
|
8e2c56f354bfd868f9162f816cc528e6f830cdbc
|
[
"MIT"
] | 3
|
2021-07-20T09:13:13.000Z
|
2021-09-06T18:08:15.000Z
|
xfbin/structure/nud.py
|
SutandoTsukai181/xfbin_lib
|
8e2c56f354bfd868f9162f816cc528e6f830cdbc
|
[
"MIT"
] | 1
|
2021-09-06T18:07:48.000Z
|
2021-09-06T18:07:48.000Z
|
xfbin/structure/nud.py
|
SutandoTsukai181/xfbin_lib
|
8e2c56f354bfd868f9162f816cc528e6f830cdbc
|
[
"MIT"
] | null | null | null |
from itertools import chain
from typing import List, Tuple
from .br.br_nud import *
class Nud:
name: str # chunk name
mesh_groups: List['NudMeshGroup']
def init_data(self, name, br_nud: BrNud):
self.name = name
self.bounding_sphere = br_nud.boundingSphere
self.mesh_groups = list()
for br_mesh_group in br_nud.meshGroups:
mesh_group = NudMeshGroup()
mesh_group.init_data(br_mesh_group)
self.mesh_groups.append(mesh_group)
def get_bone_range(self) -> Tuple[int, int]:
if not (self.mesh_groups and
self.mesh_groups[0].meshes and
self.mesh_groups[0].meshes[0].bone_type != NudBoneType.NoBones):
return (0, 0)
lower = 0xFF_FF
higher = 0
for mesh in [m for m in self.mesh_groups[0].meshes if m.vertices and m.vertices[0].bone_ids]:
lower = min(lower, min(chain(*map(lambda x: x.bone_ids, mesh.vertices))))
higher = max(higher, max(chain(*map(lambda x: x.bone_ids, mesh.vertices))))
if lower > higher:
return (0, 0)
return (lower, higher)
class NudMeshGroup:
name: str
meshes: List['NudMesh']
def init_data(self, br_mesh_group: BrNudMeshGroup):
self.name = br_mesh_group.name
self.bone_flags = br_mesh_group.boneFlags
self.bounding_sphere = br_mesh_group.boundingSphere
self.meshes = list()
for br_mesh in br_mesh_group.meshes:
mesh = NudMesh()
mesh.init_data(br_mesh)
self.meshes.append(mesh)
class NudMesh:
MAX_VERTICES = 32_767
MAX_FACES = 16_383
vertices: List['NudVertex']
faces: List[Tuple[int, int, int]]
materials: List['NudMaterial']
vertex_type: NudVertexType
bone_type: NudBoneType
uv_type: NudUvType
def init_data(self, br_mesh: BrNudMesh):
self.add_vertices(br_mesh.vertices)
self.add_faces(br_mesh.faces, br_mesh.faceSize)
self.add_materials(br_mesh.materials)
self.vertex_type = NudVertexType(br_mesh.vertexSize & 0x0F)
self.bone_type = NudBoneType(br_mesh.vertexSize & 0xF0)
self.uv_type = NudUvType(br_mesh.uvSize & 0x0F)
self.face_flag = br_mesh.faceFlag
def has_bones(self):
return bool(self.vertices and self.vertices[0].bone_ids)
def has_color(self):
return bool(self.vertices and self.vertices[0].color)
def get_uv_channel_count(self):
return len(self.vertices[0].uv) if bool(self.vertices and self.vertices[0].uv) else 0
def add_vertices(self, vertices: List[BrNudVertex]):
self.vertices = list()
for br_vertex in vertices:
vertex = NudVertex()
vertex.init_data(br_vertex)
self.vertices.append(vertex)
def add_faces(self, faces: List[int], faceSize: int):
faces = iter(faces)
if faceSize & 0x40:
# 0x40 format does not have -1 indices nor changing directions
self.faces = zip(faces, faces, faces)
return
self.faces = list()
start_dir = 1
f1 = next(faces)
f2 = next(faces)
face_dir = start_dir
try:
while True:
f3 = next(faces)
if f3 == -1:
f1 = next(faces)
f2 = next(faces)
face_dir = start_dir
else:
face_dir = -face_dir
if f1 != f2 != f3:
if face_dir > 0:
self.faces.append((f3, f2, f1))
else:
self.faces.append((f2, f3, f1))
f1 = f2
f2 = f3
except StopIteration:
pass
def add_materials(self, materials: List[BrNudMaterial]):
self.materials = list()
for br_material in materials:
material = NudMaterial()
material.init_data(br_material)
self.materials.append(material)
class NudVertex:
position: Tuple[float, float, float]
normal: Tuple[float, float, float]
bitangent: Tuple[float, float, float]
tangent: Tuple[float, float, float]
color: Tuple[int, int, int, int]
uv: List[Tuple[float, float]]
bone_ids: Tuple[int, int, int, int]
bone_weights: Tuple[float, float, float, float]
def init_data(self, br_vertex: BrNudVertex):
self.position = br_vertex.position
self.normal = br_vertex.normals
self.bitangent = br_vertex.biTangents if br_vertex.biTangents else None
self.tangent = br_vertex.tangents if br_vertex.tangents else None
self.color = tuple(map(lambda x: int(x), br_vertex.color)) if br_vertex.color else None
self.uv = br_vertex.uv
self.bone_ids = br_vertex.boneIds
self.bone_weights = br_vertex.boneWeights
def __eq__(self, o: 'NudVertex') -> bool:
return all(map(lambda x, y: x == y, self.position, o.position)) \
and all(map(lambda x, y: x == y, self.normal, o.normal)) \
and all(map(lambda x, y: all(map(lambda a, b: a == b, x, y)), self.uv, o.uv)) \
and all(map(lambda x, y: x == y, self.tangent, o.tangent)) \
and all(map(lambda x, y: x == y, self.bitangent, o.bitangent)) \
and all(map(lambda x, y: x == y, self.color, o.color)) \
and all(map(lambda x, y: x == y, self.bone_ids, o.bone_ids)) \
and all(map(lambda x, y: x == y, self.bone_weights, o.bone_weights))
def __hash__(self) -> int:
return hash(tuple(self.position)) ^ hash(tuple(self.normal)) ^ hash(tuple(self.color)) ^ hash(tuple(self.uv))
class NudMaterial:
def init_data(self, material: BrNudMaterial):
self.flags = material.flags
self.sourceFactor = material.sourceFactor
self.destFactor = material.destFactor
self.alphaTest = material.alphaTest
self.alphaFunction = material.alphaFunction
self.refAlpha = material.refAlpha
self.cullMode = material.cullMode
self.unk1 = material.unk1
self.unk2 = material.unk2
self.zBufferOffset = material.zBufferOffset
self.textures = list()
for br_texture in material.textures:
texture = NudMaterialTexture()
texture.init_data(br_texture)
self.textures.append(texture)
self.properties = list()
for br_property in [p for p in material.properties if p.name]:
property = NudMaterialProperty()
property.init_data(br_property)
self.properties.append(property)
class NudMaterialTexture:
def init_data(self, texture: BrNudMaterialTexture):
self.unk0 = texture.unk0
self.mapMode = texture.mapMode
self.wrapModeS = texture.wrapModeS
self.wrapModeT = texture.wrapModeT
self.minFilter = texture.minFilter
self.magFilter = texture.magFilter
self.mipDetail = texture.mipDetail
self.unk1 = texture.unk1
self.unk2 = texture.unk2
class NudMaterialProperty:
def init_data(self, property: BrNudMaterialProperty):
self.name = property.name
self.values: List[float] = property.values
| 32.642857
| 117
| 0.607358
| 922
| 7,312
| 4.67679
| 0.172451
| 0.025046
| 0.02551
| 0.024119
| 0.14077
| 0.124072
| 0.099258
| 0.091837
| 0.087199
| 0.030148
| 0
| 0.013725
| 0.292533
| 7,312
| 223
| 118
| 32.789238
| 0.819834
| 0.00971
| 0
| 0.071429
| 0
| 0
| 0.006632
| 0
| 0
| 0
| 0.002211
| 0
| 0
| 1
| 0.095238
| false
| 0.005952
| 0.017857
| 0.029762
| 0.327381
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
8d93c9fb2121a519402ceb1deef23ae520c7fdfe
| 1,717
|
py
|
Python
|
utils/event_store_rebuilder_for_segments.py
|
initialed85/eds-cctv-system
|
fcdb7e7e23327bf3a901d23d506b3915833027d1
|
[
"MIT"
] | null | null | null |
utils/event_store_rebuilder_for_segments.py
|
initialed85/eds-cctv-system
|
fcdb7e7e23327bf3a901d23d506b3915833027d1
|
[
"MIT"
] | null | null | null |
utils/event_store_rebuilder_for_segments.py
|
initialed85/eds-cctv-system
|
fcdb7e7e23327bf3a901d23d506b3915833027d1
|
[
"MIT"
] | null | null | null |
import datetime
from pathlib import Path
from typing import Optional, Tuple
from .common import _IMAGE_SUFFIXES, _PERMITTED_EXTENSIONS, PathDetails, rebuild_event_store
def parse_path(path: Path, tzinfo: datetime.tzinfo) -> Optional[PathDetails]:
if path.suffix.lower() not in _PERMITTED_EXTENSIONS:
return None
if path.name.lower().startswith("event"):
raise ValueError("cannot process events; only segments")
parts = path.name.split('_')
timestamp = datetime.datetime.strptime(f'{parts[1]}_{parts[2]}', "%Y-%m-%d_%H-%M-%S")
timestamp = timestamp.replace(tzinfo=tzinfo)
camera_name = parts[3].split('.')[0]
if camera_name.endswith('-lowres'):
camera_name = camera_name.split('-lowres')[0]
return PathDetails(
path=path,
event_id=None,
camera_id=None,
timestamp=timestamp,
camera_name=camera_name,
is_image=path.suffix.lower() in _IMAGE_SUFFIXES,
is_lowres="-lowres" in path.name.lower(),
)
def _get_key(path_details: PathDetails) -> Tuple[str, str]:
return (
path_details.camera_name,
path_details.timestamp.strftime("%Y-%m-%d %H:%M:%S")
)
if __name__ == "__main__":
import argparse
from dateutil.tz import tzoffset
parser = argparse.ArgumentParser()
parser.add_argument("-r", "--root-path", type=str, required=True)
parser.add_argument("-j", "--json-path", type=str, required=True)
args = parser.parse_args()
rebuild_event_store(
root_path=args.root_path,
tzinfo=tzoffset(name="WST-8", offset=8 * 60 * 60),
json_path=args.json_path,
parse_method=parse_path,
get_key_methods=[_get_key]
)
| 28.616667
| 92
| 0.663366
| 220
| 1,717
| 4.936364
| 0.381818
| 0.064457
| 0.031308
| 0.007366
| 0.053407
| 0.01105
| 0
| 0
| 0
| 0
| 0
| 0.008035
| 0.202679
| 1,717
| 59
| 93
| 29.101695
| 0.785245
| 0
| 0
| 0
| 0
| 0
| 0.092021
| 0.012231
| 0
| 0
| 0
| 0
| 0
| 1
| 0.046512
| false
| 0
| 0.139535
| 0.023256
| 0.255814
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
8d94db8d2bb9acc8dbec349c6766ca408545196a
| 599
|
py
|
Python
|
python/distance/HaversineDistanceInMiles.py
|
jigneshoo7/AlgoBook
|
8aecc9698447c0ee561a1c90d5c5ab87c4a07b79
|
[
"MIT"
] | 191
|
2020-09-28T10:00:20.000Z
|
2022-03-06T14:36:55.000Z
|
python/distance/HaversineDistanceInMiles.py
|
jigneshoo7/AlgoBook
|
8aecc9698447c0ee561a1c90d5c5ab87c4a07b79
|
[
"MIT"
] | 210
|
2020-09-28T10:06:36.000Z
|
2022-03-05T03:44:24.000Z
|
python/distance/HaversineDistanceInMiles.py
|
jigneshoo7/AlgoBook
|
8aecc9698447c0ee561a1c90d5c5ab87c4a07b79
|
[
"MIT"
] | 320
|
2020-09-28T09:56:14.000Z
|
2022-02-12T16:45:57.000Z
|
import math
def distanceInMilesOrKilos(milesOrKilos,originLat,originLon,destinationLat,destinationLon):
radius = 3959 if milesOrKilos == "miles" else 6371
lat1 = originLat
lat2 = destinationLat
lon1 = originLon
lon2 = destinationLon
dlat = math.radians(lat2 - lat1)
dlon = math.radians(lon2 - lon1)
a = (math.sin(dlat / 2) * math.sin(dlat / 2) + math.cos(math.radians(lat1)) * math.cos(math.radians(lat2)) * math.sin(dlon / 2) * math.sin(dlon / 2))
c = 2 * math.atan2(math.sqrt(a), math.sqrt(1 - a))
distance = radius * c
return distance
| 33.277778
| 153
| 0.651085
| 77
| 599
| 5.064935
| 0.415584
| 0.112821
| 0.076923
| 0.061538
| 0.071795
| 0
| 0
| 0
| 0
| 0
| 0
| 0.053533
| 0.220367
| 599
| 18
| 154
| 33.277778
| 0.781585
| 0
| 0
| 0
| 0
| 0
| 0.008333
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.076923
| false
| 0
| 0.076923
| 0
| 0.230769
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
8d95a5da0117840ab07b75457380a92375c5347d
| 8,837
|
py
|
Python
|
i2i/util.py
|
thorwhalen/i2i
|
f967aaba28793029e3fe643c5e17ae9bc7a77732
|
[
"Apache-2.0"
] | 1
|
2019-08-29T01:35:12.000Z
|
2019-08-29T01:35:12.000Z
|
i2i/util.py
|
thorwhalen/i2i
|
f967aaba28793029e3fe643c5e17ae9bc7a77732
|
[
"Apache-2.0"
] | null | null | null |
i2i/util.py
|
thorwhalen/i2i
|
f967aaba28793029e3fe643c5e17ae9bc7a77732
|
[
"Apache-2.0"
] | null | null | null |
from __future__ import division
import inspect
import types
from functools import wraps
function_type = type(lambda x: x) # using this instead of callable() because classes are callable, for instance
class NoDefault(object):
def __repr__(self):
return 'no_default'
no_default = NoDefault()
class imdict(dict):
def __hash__(self):
return id(self)
def _immutable(self, *args, **kws):
raise TypeError('object is immutable')
__setitem__ = _immutable
__delitem__ = _immutable
clear = _immutable
update = _immutable
setdefault = _immutable
pop = _immutable
popitem = _immutable
def inject_method(self, method_function, method_name=None):
"""
method_function could be:
* a function
* a {method_name: function, ...} dict (for multiple injections)
* a list of functions or (function, method_name) pairs
"""
if isinstance(method_function, function_type):
if method_name is None:
method_name = method_function.__name__
setattr(self,
method_name,
types.MethodType(method_function, self))
else:
if isinstance(method_function, dict):
method_function = [(func, func_name) for func_name, func in method_function.items()]
for method in method_function:
if isinstance(method, tuple) and len(method) == 2:
self = inject_method(self, method[0], method[1])
else:
self = inject_method(self, method)
return self
def transform_args(**trans_func_for_arg):
"""
Make a decorator that transforms function arguments before calling the function.
For example:
* original argument: a relative path --> used argument: a full path
* original argument: a pickle filepath --> used argument: the loaded object
:param rootdir: rootdir to be used for all name arguments of target function
:param name_arg: the position (int) or argument name of the argument containing the name
:return: a decorator
>>> def f(a, b, c):
... return "a={a}, b={b}, c={c}".format(a=a, b=b, c=c)
>>>
>>> print(f('foo', 'bar', 3))
a=foo, b=bar, c=3
>>> ff = transform_args()(f)
>>> print(ff('foo', 'bar', 3))
a=foo, b=bar, c=3
>>> ff = transform_args(a=lambda x: 'ROOT/' + x)(f)
>>> print(ff('foo', 'bar', 3))
a=ROOT/foo, b=bar, c=3
>>> ff = transform_args(b=lambda x: 'ROOT/' + x)(f)
>>> print(ff('foo', 'bar', 3))
a=foo, b=ROOT/bar, c=3
>>> ff = transform_args(a=lambda x: 'ROOT/' + x, b=lambda x: 'ROOT/' + x)(f)
>>> print(ff('foo', b='bar', c=3))
a=ROOT/foo, b=ROOT/bar, c=3
"""
def transform_args_decorator(func):
if len(trans_func_for_arg) == 0: # if no transformations were specified...
return func # just return the function itself
else:
@wraps(func)
def transform_args_wrapper(*args, **kwargs):
# get a {argname: argval, ...} dict from *args and **kwargs
# Note: Didn't really need an if/else here but...
# Note: ... assuming getcallargs gives us an overhead that can be avoided if there's only keyword args.
if len(args) > 0:
val_of_argname = inspect.getcallargs(func, *args, **kwargs)
else:
val_of_argname = kwargs
# apply transform functions to argument values
for argname, trans_func in trans_func_for_arg.items():
val_of_argname[argname] = trans_func(val_of_argname[argname])
# call the function with transformed values
return func(**val_of_argname)
return transform_args_wrapper
return transform_args_decorator
def resolve_filepath_of_name(name_arg=None, rootdir=''):
"""
Make a decorator that applies a function to an argument before using it.
For example:
* original argument: a relative path --> used argument: a full path
* original argument: a pickle filepath --> used argument: the loaded object
:param rootdir: rootdir to be used for all name arguments of target function
:param name_arg: the position (int) or argument name of the argument containing the name
:return: a decorator
>>> def f(a, b, c):
... return "a={a}, b={b}, c={c}".format(a=a, b=b, c=c)
>>>
>>> print(f('foo', 'bar', 3))
a=foo, b=bar, c=3
>>> ff = resolve_filepath_of_name()(f)
>>> print(ff('foo', 'bar', 3))
a=foo, b=bar, c=3
>>> ff = resolve_filepath_of_name('a', 'ROOT')(f)
>>> print(ff('foo', 'bar', 3))
a=ROOT/foo, b=bar, c=3
>>> ff = resolve_filepath_of_name('b', 'ROOT')(f)
>>> print(ff('foo', 'bar', 3))
a=foo, b=ROOT/bar, c=3
"""
if name_arg is not None:
return transform_args(**{name_arg: lambda x: os.path.join(rootdir, x)})
else:
return lambda x: x
def arg_dflt_dict_of_callable(f):
"""
Get a {arg_name: default_val, ...} dict from a callable.
See also :py:mint_of_callable:
:param f: A callable (function, method, ...)
:return:
"""
argspec = inspect.getfullargspec(f)
args = argspec.args or []
defaults = argspec.defaults or []
return {arg: dflt for arg, dflt in zip(args, [no_default] * (len(args) - len(defaults)) + list(defaults))}
def add_self_as_first_argument(func):
@wraps(func)
def wrapped_func(self, *args, **kwargs):
return func(*args, **kwargs)
return wrapped_func
def add_cls_as_first_argument(func):
@wraps(func)
def wrapped_func(cls, *args, **kwargs):
return func(*args, **kwargs)
return wrapped_func
def infer_if_function_might_be_intended_as_a_classmethod_or_staticmethod(func):
"""
Tries to infer if the input function is a 'classmethod' or 'staticmethod' (or just 'normal')
When is that? When:
* the function's first argument is called 'cls' and has no default: 'classmethod'
* the function's first argument is called 'self' and has no default: 'staticmethod'
* otherwise: 'normal'
>>> def a_normal_func(x, y=None):
... pass
>>> def a_func_that_is_probably_a_classmethod(cls, y=None):
... pass
>>> def a_func_that_is_probably_a_staticmethod(self, y=None):
... pass
>>> def a_func_that_is_probably_a_classmethod_but_is_not(cls=3, y=None):
... pass
>>> def a_func_that_is_probably_a_staticmethod_but_is_not(self=None, y=None):
... pass
>>> list_of_functions = [
... a_normal_func,
... a_func_that_is_probably_a_classmethod,
... a_func_that_is_probably_a_staticmethod,
... a_func_that_is_probably_a_classmethod_but_is_not,
... a_func_that_is_probably_a_staticmethod_but_is_not,
... ]
>>>
>>> for func in list_of_functions:
... print("{}: {}".format(func.__name__,
... infer_if_function_might_be_intended_as_a_classmethod_or_staticmethod(func)))
...
a_normal_func: normal
a_func_that_is_probably_a_classmethod: classmethod
a_func_that_is_probably_a_staticmethod: staticmethod
a_func_that_is_probably_a_classmethod_but_is_not: normal_with_cls
a_func_that_is_probably_a_staticmethod_but_is_not: normal_with_self
"""
argsspec = inspect.getfullargspec(func)
if len(argsspec.args) > 0:
first_element_has_no_defaults = bool(len(argsspec.args) > len(argsspec.defaults))
if argsspec.args[0] == 'cls':
if first_element_has_no_defaults:
return 'classmethod'
else:
return 'normal_with_cls'
elif argsspec.args[0] == 'self':
if first_element_has_no_defaults:
return 'staticmethod'
else:
return 'normal_with_self'
return 'normal'
def decorate_as_staticmethod_or_classmethod_if_needed(func):
type_of_func = infer_if_function_might_be_intended_as_a_classmethod_or_staticmethod(func)
if type_of_func == 'classmethod':
return classmethod(func)
elif type_of_func == 'staticmethod':
return staticmethod(func)
elif type_of_func == 'normal':
return func
if __name__ == '__main__':
import os
import re
key_file_re = re.compile('setup.py')
def dir_is_a_pip_installable_dir(dirpath):
return any(filter(key_file_re.match, os.listdir(dirpath)))
rootdir = '/D/Dropbox/dev/py/proj'
cumul = list()
for f in filter(lambda x: not x.startswith('.'), os.listdir(rootdir)):
filepath = os.path.join(rootdir, f)
if os.path.isdir(filepath):
if dir_is_a_pip_installable_dir(filepath):
cumul.append(filepath)
for f in cumul:
print(f)
| 34.928854
| 119
| 0.629965
| 1,203
| 8,837
| 4.362427
| 0.176226
| 0.011433
| 0.020579
| 0.025152
| 0.420732
| 0.393293
| 0.38205
| 0.34718
| 0.329078
| 0.308308
| 0
| 0.004092
| 0.253367
| 8,837
| 252
| 120
| 35.06746
| 0.7913
| 0.452303
| 0
| 0.162162
| 0
| 0
| 0.036962
| 0.004958
| 0
| 0
| 0
| 0
| 0
| 1
| 0.144144
| false
| 0
| 0.054054
| 0.045045
| 0.486486
| 0.009009
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
8d99f51b98aee394d6e4b4f62dcc6cdca1b6db1f
| 10,131
|
py
|
Python
|
tutorials/seq2seq_sated/seq2seq_sated_meminf.py
|
rizwandel/ml_privacy_meter
|
5dc4c300eadccceadd0e664a7e46099f65728628
|
[
"MIT"
] | 294
|
2020-04-13T18:32:45.000Z
|
2022-03-31T10:32:34.000Z
|
tutorials/seq2seq_sated/seq2seq_sated_meminf.py
|
kypomon/ml_privacy_meter
|
c0324e8f74cbd0cde0643a7854fa66eab47bbe53
|
[
"MIT"
] | 26
|
2020-04-29T19:56:21.000Z
|
2022-03-31T10:42:24.000Z
|
tutorials/seq2seq_sated/seq2seq_sated_meminf.py
|
kypomon/ml_privacy_meter
|
c0324e8f74cbd0cde0643a7854fa66eab47bbe53
|
[
"MIT"
] | 50
|
2020-04-16T02:16:24.000Z
|
2022-03-16T00:37:40.000Z
|
import os
import sys
from collections import defaultdict
import tensorflow as tf
import tensorflow.keras.backend as K
import numpy as np
import scipy.stats as ss
import matplotlib.pyplot as plt
from sklearn.metrics import roc_curve
from sklearn.linear_model import LogisticRegression
from utils import process_texts, load_texts, load_users, load_sated_data_by_user, \
build_nmt_model, words_to_indices, \
SATED_TRAIN_USER, SATED_TRAIN_FR, SATED_TRAIN_ENG
MODEL_PATH = 'checkpoints/model/'
OUTPUT_PATH = 'checkpoints/output/'
tf.compat.v1.disable_eager_execution()
# ================================ GENERATE RANKS ================================ #
# Code adapted from https://github.com/csong27/auditing-text-generation
def load_train_users_heldout_data(train_users, src_vocabs, trg_vocabs, user_data_ratio=0.5):
src_users = load_users(SATED_TRAIN_USER)
train_src_texts = load_texts(SATED_TRAIN_ENG)
train_trg_texts = load_texts(SATED_TRAIN_FR)
user_src_texts = defaultdict(list)
user_trg_texts = defaultdict(list)
for u, s, t in zip(src_users, train_src_texts, train_trg_texts):
if u in train_users:
user_src_texts[u].append(s)
user_trg_texts[u].append(t)
assert 0. < user_data_ratio < 1.
# Hold out some fraction of data for testing
for u in user_src_texts:
l = len(user_src_texts[u])
l = int(l * user_data_ratio)
user_src_texts[u] = user_src_texts[u][l:]
user_trg_texts[u] = user_trg_texts[u][l:]
for u in train_users:
process_texts(user_src_texts[u], src_vocabs)
process_texts(user_trg_texts[u], trg_vocabs)
return user_src_texts, user_trg_texts
def rank_lists(lists):
ranks = np.empty_like(lists)
for i, l in enumerate(lists):
ranks[i] = ss.rankdata(l, method='min') - 1
return ranks
def get_ranks(user_src_data, user_trg_data, pred_fn, save_probs=False):
indices = np.arange(len(user_src_data))
"""
Get ranks from prediction vectors.
"""
ranks = []
labels = []
probs = []
for idx in indices:
src_text = np.asarray(user_src_data[idx], dtype=np.float32).reshape(1, -1)
trg_text = np.asarray(user_trg_data[idx], dtype=np.float32)
trg_input = trg_text[:-1].reshape(1, -1)
trg_label = trg_text[1:].reshape(1, -1)
prob = pred_fn([src_text, trg_input, trg_label, 0])[0][0]
if save_probs:
probs.append(prob)
all_ranks = rank_lists(-prob)
sent_ranks = all_ranks[np.arange(len(all_ranks)), trg_label.flatten().astype(int)]
ranks.append(sent_ranks)
labels.append(trg_label.flatten())
if save_probs:
return probs
return ranks, labels
def save_users_rank_results(users, user_src_texts, user_trg_texts, src_vocabs, trg_vocabs, prob_fn, save_dir,
member_label=1, cross_domain=False, save_probs=False, mask=False, rerun=False):
"""
Save user ranks in the appropriate format for attacks.
"""
for i, u in enumerate(users):
save_path = save_dir + 'rank_u{}_y{}{}.npz'.format(i, member_label, '_cd' if cross_domain else '')
prob_path = save_dir + 'prob_u{}_y{}{}.npz'.format(i, member_label, '_cd' if cross_domain else '')
if os.path.exists(save_path) and not save_probs and not rerun:
continue
user_src_data = words_to_indices(user_src_texts[u], src_vocabs, mask=mask)
user_trg_data = words_to_indices(user_trg_texts[u], trg_vocabs, mask=mask)
rtn = get_ranks(user_src_data, user_trg_data, prob_fn, save_probs=save_probs)
if save_probs:
probs = rtn
np.savez(prob_path, probs)
else:
ranks, labels = rtn[0], rtn[1]
np.savez(save_path, ranks, labels)
if (i + 1) % 500 == 0:
sys.stderr.write('Finishing saving ranks for {} users'.format(i + 1))
def get_target_ranks(num_users=200, num_words=5000, mask=False, h=128, emb_h=128, user_data_ratio=0.,
tied=False, save_probs=False):
"""
Get ranks of target machine translation model.
"""
user_src_texts, user_trg_texts, test_user_src_texts, test_user_trg_texts, src_vocabs, trg_vocabs \
= load_sated_data_by_user(num_users, num_words, test_on_user=True, user_data_ratio=user_data_ratio)
train_users = sorted(user_src_texts.keys())
test_users = sorted(test_user_src_texts.keys())
# Get model
save_dir = OUTPUT_PATH + 'target_{}{}/'.format(num_users, '_dr' if 0. < user_data_ratio < 1. else '')
if not os.path.exists(save_dir):
os.mkdir(save_dir)
model_path = 'sated_nmt'.format(num_users)
if 0. < user_data_ratio < 1.:
model_path += '_dr{}'.format(user_data_ratio)
heldout_src_texts, heldout_trg_texts = load_train_users_heldout_data(train_users, src_vocabs, trg_vocabs)
for u in train_users:
user_src_texts[u] += heldout_src_texts[u]
user_trg_texts[u] += heldout_trg_texts[u]
model = build_nmt_model(Vs=num_words, Vt=num_words, mask=mask, drop_p=0., h=h, demb=emb_h, tied=tied)
model.load_weights(MODEL_PATH + '{}_{}.h5'.format(model_path, num_users))
src_input_var, trg_input_var = model.inputs
prediction = model.output
trg_label_var = K.placeholder((None, None), dtype='float32')
# Get predictions
prediction = K.softmax(prediction)
prob_fn = K.function([src_input_var, trg_input_var, trg_label_var, K.learning_phase()], [prediction])
# Save user ranks for train and test dataset
save_users_rank_results(users=train_users, save_probs=save_probs,
user_src_texts=user_src_texts, user_trg_texts=user_trg_texts,
src_vocabs=src_vocabs, trg_vocabs=trg_vocabs, cross_domain=False,
prob_fn=prob_fn, save_dir=save_dir, member_label=1)
save_users_rank_results(users=test_users, save_probs=save_probs,
user_src_texts=test_user_src_texts, user_trg_texts=test_user_trg_texts,
src_vocabs=src_vocabs, trg_vocabs=trg_vocabs, cross_domain=False,
prob_fn=prob_fn, save_dir=save_dir, member_label=0)
# ================================ ATTACK ================================ #
def avg_rank_feats(ranks):
"""
Averages ranks to get features for deciding the threshold for membership inference.
"""
avg_ranks = []
for r in ranks:
avg = np.mean(np.concatenate(r))
avg_ranks.append(avg)
return avg_ranks
def load_ranks_by_label(save_dir, num_users=300, cross_domain=False, label=1):
"""
Helper method to load ranks by train/test dataset.
If label = 1, train set ranks are loaded. If label = 0, test set ranks are loaded.
Ranks are generated by running sated_nmt_ranks.py.
"""
ranks = []
labels = []
y = []
for i in range(num_users):
save_path = save_dir + 'rank_u{}_y{}{}.npz'.format(i, label, '_cd' if cross_domain else '')
if os.path.exists(save_path):
f = np.load(save_path, allow_pickle=True)
train_rs, train_ls = f['arr_0'], f['arr_1']
ranks.append(train_rs)
labels.append(train_ls)
y.append(label)
return ranks, labels, y
def load_all_ranks(save_dir, num_users=5000, cross_domain=False):
"""
Loads all ranks generated by the target model.
Ranks are generated by running sated_nmt_ranks.py.
"""
ranks = []
labels = []
y = []
# Load train ranks
train_label = 1
train_ranks, train_labels, train_y = load_ranks_by_label(save_dir, num_users, cross_domain, train_label)
ranks = ranks + train_ranks
labels = labels + train_labels
y = y + train_y
# Load test ranks
test_label = 0
test_ranks, test_labels, test_y = load_ranks_by_label(save_dir, num_users, cross_domain, test_label)
ranks = ranks + test_ranks
labels = labels + test_labels
y = y + test_y
return ranks, labels, np.asarray(y)
def run_average_rank_thresholding(num_users=300, dim=100, prop=1.0, user_data_ratio=0.,
top_words=5000, cross_domain=False, rerun=False):
"""
Runs average rank thresholding attack on the target model.
"""
result_path = OUTPUT_PATH
if dim > top_words:
dim = top_words
attack1_results_save_path = result_path + 'mi_data_dim{}_prop{}_{}{}_attack1.npz'.format(
dim, prop, num_users, '_cd' if cross_domain else '')
if not rerun and os.path.exists(attack1_results_save_path):
f = np.load(attack1_results_save_path)
X, y = [f['arr_{}'.format(i)] for i in range(4)]
else:
save_dir = result_path + 'target_{}{}/'.format(num_users, '_dr' if 0. < user_data_ratio < 1. else '')
# Load ranks
train_ranks, _, train_y = load_ranks_by_label(save_dir, num_users, label=1)
test_ranks, _, test_y = load_ranks_by_label(save_dir, num_users, label=0)
# Convert to average rank features
train_feat = avg_rank_feats(train_ranks)
test_feat = avg_rank_feats(test_ranks)
# Create dataset
X, y = np.concatenate([train_feat, test_feat]), np.concatenate([train_y, test_y])
np.savez(attack1_results_save_path, X, y)
# print(X.shape, y.shape)
# Find threshold using ROC
clf = LogisticRegression()
clf.fit(X.reshape(-1, 1), y)
probs = clf.predict_proba(X.reshape(-1, 1))
fpr, tpr, thresholds = roc_curve(y, probs[:, 1])
plt.figure(1)
plt.plot(fpr, tpr, label='Attack 1')
plt.xlabel('False positive rate')
plt.ylabel('True positive rate')
plt.title('ROC curve')
plt.savefig('sateduser_attack1_roc_curve.png')
if __name__ == '__main__':
num_users = 300
save_probs = False
rerun = True
print("Getting target ranks...")
get_target_ranks(num_users=num_users, save_probs=save_probs)
print("Running average rank thresholding attack...")
run_average_rank_thresholding(num_users=num_users, rerun=True)
| 35.672535
| 113
| 0.660251
| 1,490
| 10,131
| 4.160403
| 0.168456
| 0.027101
| 0.03678
| 0.01468
| 0.334731
| 0.267462
| 0.200032
| 0.188901
| 0.144862
| 0.144862
| 0
| 0.013208
| 0.222782
| 10,131
| 283
| 114
| 35.798587
| 0.774067
| 0.099398
| 0
| 0.094444
| 0
| 0
| 0.046003
| 0.007593
| 0
| 0
| 0
| 0
| 0.005556
| 1
| 0.05
| false
| 0
| 0.061111
| 0
| 0.15
| 0.011111
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
8d9d264830cab7159205ed06b41898abec3b84f4
| 2,685
|
py
|
Python
|
app/recipe/tests/test_tags_api.py
|
MohamedAbdelmagid/django-recipe-api
|
229d3a7cff483b3cad76c70aefde6a51250b9bc8
|
[
"MIT"
] | null | null | null |
app/recipe/tests/test_tags_api.py
|
MohamedAbdelmagid/django-recipe-api
|
229d3a7cff483b3cad76c70aefde6a51250b9bc8
|
[
"MIT"
] | null | null | null |
app/recipe/tests/test_tags_api.py
|
MohamedAbdelmagid/django-recipe-api
|
229d3a7cff483b3cad76c70aefde6a51250b9bc8
|
[
"MIT"
] | null | null | null |
from django.test import TestCase
from django.urls import reverse
from django.contrib.auth import get_user_model
from rest_framework.test import APIClient
from rest_framework import status
from core.models import Tag
from recipe.serializers import TagSerializer
TAGS_URL = reverse("recipe:tag-list")
class PublicTagsApiTests(TestCase):
""" Test the publicly available tags API """
def setUp(self):
self.client = APIClient()
def test_login_required(self):
""" Test that login is required for retrieving tags """
response = self.client.get(TAGS_URL)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
class PrivateTagsApiTests(TestCase):
""" Test the authorized user tags API """
def setUp(self):
self.user = get_user_model().objects.create_user(
"test@gmail.com", "testpassword"
)
self.client = APIClient()
self.client.force_authenticate(self.user)
def test_retrieve_tags(self):
""" Test retrieving tags """
Tag.objects.create(user=self.user, name="Dessert")
Tag.objects.create(user=self.user, name="Salad")
response = self.client.get(TAGS_URL)
tags = Tag.objects.all().order_by("-name")
serializer = TagSerializer(tags, many=True)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data, serializer.data)
def test_tags_limited_to_user(self):
""" Test that tags returned are for the authenticated user """
user2 = get_user_model().objects.create_user(
"test2@gmail.com", "test2password"
)
Tag.objects.create(user=user2, name="Candied Yams")
tag = Tag.objects.create(user=self.user, name="Soul Food")
response = self.client.get(TAGS_URL)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(len(response.data), 1)
self.assertEqual(response.data[0]["name"], tag.name)
def test_create_tag_successful(self):
""" Test creating a new tag """
payload = {'name': 'Test Tag Name'}
notExists = Tag.objects.filter(user=self.user, name=payload['name']).exists()
self.client.post(TAGS_URL, payload)
exists = Tag.objects.filter(user=self.user, name=payload['name']).exists()
self.assertTrue(exists)
self.assertFalse(notExists)
def test_create_tag_invalid(self):
""" Test creating a new tag with invalid payload """
payload = {'name': ''}
response = self.client.post(TAGS_URL, payload)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
| 32.349398
| 85
| 0.672998
| 335
| 2,685
| 5.259701
| 0.298507
| 0.045403
| 0.07832
| 0.045403
| 0.397276
| 0.397276
| 0.266742
| 0.187855
| 0.187855
| 0.187855
| 0
| 0.008503
| 0.211546
| 2,685
| 82
| 86
| 32.743902
| 0.823807
| 0.100186
| 0
| 0.18
| 0
| 0
| 0.059022
| 0
| 0
| 0
| 0
| 0
| 0.18
| 1
| 0.14
| false
| 0.04
| 0.14
| 0
| 0.32
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
8d9d7d5c7ee0f28e0c8877291fb904e2d8ace2db
| 5,736
|
py
|
Python
|
dtlpy/entities/annotation_definitions/cube_3d.py
|
dataloop-ai/dtlpy
|
2c73831da54686e047ab6aefd8f12a8e53ea97c2
|
[
"Apache-2.0"
] | 10
|
2020-05-21T06:25:35.000Z
|
2022-01-07T20:34:03.000Z
|
dtlpy/entities/annotation_definitions/cube_3d.py
|
dataloop-ai/dtlpy
|
2c73831da54686e047ab6aefd8f12a8e53ea97c2
|
[
"Apache-2.0"
] | 22
|
2019-11-17T17:25:16.000Z
|
2022-03-10T15:14:28.000Z
|
dtlpy/entities/annotation_definitions/cube_3d.py
|
dataloop-ai/dtlpy
|
2c73831da54686e047ab6aefd8f12a8e53ea97c2
|
[
"Apache-2.0"
] | 8
|
2020-03-05T16:23:55.000Z
|
2021-12-27T11:10:42.000Z
|
import numpy as np
# import open3d as o3d
from . import BaseAnnotationDefinition
# from scipy.spatial.transform import Rotation as R
import logging
logger = logging.getLogger(name=__name__)
class Cube3d(BaseAnnotationDefinition):
"""
Cube annotation object
"""
type = "cube_3d"
def __init__(self, label, position, scale, rotation,
attributes=None, description=None):
"""
:param label:
:param position: the XYZ position of the ‘center’ of the annotation.
:param scale: the scale of the object by each axis (XYZ).
:param rotation: an euler representation of the object rotation on each axis (with rotation order ‘XYZ’). (rotation in radians)
:param attributes:
:param description:
"""
super().__init__(description=description, attributes=attributes)
self.position = position
self.scale = scale
self.rotation = rotation
self.label = label
def _translate(self, points, translate_x, translate_y, translate_z):
translation_matrix = np.array([[1, 0, 0, 0],
[0, 1, 0, 0],
[0, 0, 1, 0],
[translate_x, translate_y, translate_z, 1]])
matrix = [(list(i) + [1]) for i in points]
pts2 = np.dot(matrix, translation_matrix)
return [pt[:3] for pt in pts2]
# def make_points(self):
# simple = [
# [self.scale[0] / 2, self.scale[1] / 2, self.scale[2] / 2],
# [-self.scale[0] / 2, self.scale[1] / 2, self.scale[2] / 2],
# [self.scale[0] / 2, -self.scale[1] / 2, self.scale[2] / 2],
# [self.scale[0] / 2, self.scale[1] / 2, -self.scale[2] / 2],
# [-self.scale[0] / 2, -self.scale[1] / 2, self.scale[2] / 2],
# [self.scale[0] / 2, -self.scale[1] / 2, -self.scale[2] / 2],
# [-self.scale[0] / 2, self.scale[1] / 2, -self.scale[2] / 2],
# [-self.scale[0] / 2, -self.scale[1] / 2, -self.scale[2] / 2],
# ]
#
# # matrix = R.from_euler('xyz', self.rotation, degrees=False)
#
# vecs = [np.array(p) for p in simple]
# rotated = matrix.apply(vecs)
# translation = np.array(self.position)
# dX = translation[0]
# dY = translation[1]
# dZ = translation[2]
# points = self._translate(rotated, dX, dY, dZ)
# return points
@property
def geo(self):
return np.asarray([
list(self.position),
list(self.scale),
list(self.rotation)
])
def show(self, image, thickness, with_text, height, width, annotation_format, color):
"""
Show annotation as ndarray
:param image: empty or image to draw on
:param thickness:
:param with_text: not required
:param height: item height
:param width: item width
:param annotation_format: options: list(dl.ViewAnnotationOptions)
:param color: color
:return: ndarray
"""
try:
import cv2
except (ImportError, ModuleNotFoundError):
self.logger.error(
'Import Error! Cant import cv2. Annotations operations will be limited. import manually and fix errors')
raise
points = self.make_points()
front_bl = points[0]
front_br = points[1]
front_tr = points[2]
front_tl = points[3]
back_bl = points[4]
back_br = points[5]
back_tr = points[6]
back_tl = points[7]
logger.warning('the show for 3d_cube is not supported.')
return image
# image = np.zeros((100, 100, 100), dtype=np.uint8)
# pcd = o3d.io.read_point_cloud(r"C:\Users\97250\PycharmProjects\tt\qw\3D\D34049418_0000635.las.pcd")
# # o3d.visualization.draw_geometries([pcd])
# # points = [[0, 0, 0], [1, 0, 0], [0, 1, 0], [1, 1, 0], [0, 0, 1], [1, 0, 1],
# # [0, 1, 1], [1, 1, 1]]
# lines = [[0, 1], [0, 2], [1, 3], [2, 3], [4, 5], [4, 6], [5, 7], [6, 7],
# [0, 4], [1, 5], [2, 6], [3, 7]]
# colors = [[1, 0, 0] for i in range(len(lines))]
# points = [back_bl, back_br, back_tl, back_tr, front_bl, front_br, front_tl, front_tr]
# line_set = o3d.geometry.LineSet()
# line_set.points = o3d.utility.Vector3dVector(points)
# line_set.lines = o3d.utility.Vector2iVector(lines)
# line_set.colors = o3d.utility.Vector3dVector(colors)
# o3d.visualization.draw_geometries([line_set])
# return image
def to_coordinates(self, color=None):
keys = ["position", "scale", "rotation"]
coordinates = {keys[idx]: {"x": float(x), "y": float(y), "z": float(z)}
for idx, [x, y, z] in enumerate(self.geo)}
return coordinates
@staticmethod
def from_coordinates(coordinates):
geo = list()
for key, pt in coordinates.items():
geo.append([pt["x"], pt["y"], pt["z"]])
return np.asarray(geo)
@classmethod
def from_json(cls, _json):
if "coordinates" in _json:
key = "coordinates"
elif "data" in _json:
key = "data"
else:
raise ValueError('can not find "coordinates" or "data" in annotation. id: {}'.format(_json["id"]))
return cls(
position=list(_json[key]['position'].values()),
scale=list(_json[key]['scale'].values()),
rotation=list(_json[key]['rotation'].values()),
label=_json["label"],
attributes=_json.get("attributes", None)
)
| 38.496644
| 135
| 0.544107
| 712
| 5,736
| 4.286517
| 0.259831
| 0.076671
| 0.07536
| 0.028834
| 0.116645
| 0.112385
| 0.092726
| 0.086501
| 0.086501
| 0.086501
| 0
| 0.044337
| 0.31189
| 5,736
| 148
| 136
| 38.756757
| 0.728908
| 0.415795
| 0
| 0
| 0
| 0
| 0.095253
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.097222
| false
| 0
| 0.083333
| 0.013889
| 0.291667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
8d9e1079bef17b6514de9131ede3ab7099ea53a4
| 3,702
|
py
|
Python
|
my_module/tools.py
|
roki18d/sphinx_autogen-apidoc
|
67ad9c716c909d89bcd813a5fa871df8850e4fd5
|
[
"Apache-2.0"
] | null | null | null |
my_module/tools.py
|
roki18d/sphinx_autogen-apidoc
|
67ad9c716c909d89bcd813a5fa871df8850e4fd5
|
[
"Apache-2.0"
] | null | null | null |
my_module/tools.py
|
roki18d/sphinx_autogen-apidoc
|
67ad9c716c909d89bcd813a5fa871df8850e4fd5
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# coding: utf-8
from my_module.exceptions import InvalidArgumentsError
class SimpleCalculator(object):
"""SimpleCalculator
SimpleCalculator is a simple calculator.
Attributes:
operator (str):
String that represents operation type.
Acceptable values are: {"add": addition, "sub": subtraction
"mul": multiplication, "div": divide}
response (dict):
Response for API execution.
This contains conditions (such as operands) and execution results.
"""
def __init__(self, operator: str) -> None:
"""Initialize instance
Args:
operator (str):
"""
valid_operators = ["add", "sub", "mul", "div"]
if operator not in valid_operators:
msg = f"Invalid operator '{operator}' was given, choose from {valid_operators}."
raise InvalidArgumentsError(msg)
else:
self.operator = operator
self.response = dict()
def __add(self, num1: int, num2: int) -> None:
self.response['results'] = {"sum": num1 + num2}
return None
def __sub(self, num1: int, num2: int) -> None:
self.response['results'] = {"difference": num1 - num2}
return None
def __mul(self, num1: int, num2: int) -> None:
self.response['results'] = {"product": num1 * num2}
return None
def __div(self, num1: int, num2: int) -> None:
self.response['results'] = {"quotient": num1//num2, "remainder": num1%num2}
return None
def __handle_exceptions(self, e) -> None:
self.response['results'] = {"error_message": e}
return None
def execute(self, num1: int, num2: int):
"""
Interface to execute caluculation.
Args:
num1 (int): 1st operand.
num2 (int): 2nd operand.
Returns:
dict: self.response
Raises:
InvalidArgumentsError:
Examples:
>>> my_adder = SimpleCalculator(operator="add")
>>> my_adder.execute(4, 2)
{'operands': {'num1': 4, 'num2': 2}, 'results': {'sum': 6}}
"""
try:
operands = {"num1": num1, "num2": num2}
self.response['operands'] = operands
if (not isinstance(num1, int)) or (not isinstance(num2, int)):
msg = f"All operands should be integer, given: {operands}."
raise InvalidArgumentsError(msg)
except Exception as e:
_ = self.__handle_exceptions(e)
try:
if self.operator == "add":
_ = self.__add(num1, num2)
elif self.operator == "sub":
_ = self.__sub(num1, num2)
elif self.operator == "mul":
_ = self.__mul(num1, num2)
elif self.operator == "div":
_ = self.__div(num1, num2)
except Exception as e:
_ = self.__handle_exceptions(e)
return self.response
if __name__ == "__main__":
my_adder = SimpleCalculator(operator="add")
print('Case01:', my_adder.execute(4, 2))
print('Case02:', my_adder.execute(5, "a"))
my_subtractor = SimpleCalculator(operator="sub")
print('Case03:', my_subtractor.execute(3, 5))
my_multiplier = SimpleCalculator(operator="mul")
print('Case04:', my_multiplier.execute(2, 7))
my_divider = SimpleCalculator(operator="div")
print('Case05:', my_divider.execute(17, 5))
print('Case06:', my_divider.execute(6, 0))
print('Case07:')
my_unknown = SimpleCalculator(operator="unknown")
import sys; sys.exit(0)
| 30.85
| 92
| 0.562939
| 387
| 3,702
| 5.22739
| 0.335917
| 0.039545
| 0.027187
| 0.037074
| 0.255067
| 0.119624
| 0.119624
| 0.119624
| 0.081068
| 0
| 0
| 0.028527
| 0.308752
| 3,702
| 119
| 93
| 31.109244
| 0.762016
| 0.218801
| 0
| 0.216667
| 0
| 0
| 0.119497
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.116667
| false
| 0
| 0.033333
| 0
| 0.266667
| 0.116667
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
8da38969800ff2540723920b2ba94670badb3561
| 12,114
|
py
|
Python
|
PCA_ResNet50.py
|
liuyingbin19222/HSI_svm_pca_resNet50
|
cd95d21c81e93f8b873183f10f52416f71a93d07
|
[
"Apache-2.0"
] | 12
|
2020-03-13T02:39:53.000Z
|
2022-02-21T03:28:33.000Z
|
PCA_ResNet50.py
|
liuyingbin19222/HSI_svm_pca_resNet50
|
cd95d21c81e93f8b873183f10f52416f71a93d07
|
[
"Apache-2.0"
] | 14
|
2020-02-17T12:31:08.000Z
|
2022-02-10T01:07:05.000Z
|
PCA_ResNet50.py
|
liuyingbin19222/HSI_svm_pca_resNet50
|
cd95d21c81e93f8b873183f10f52416f71a93d07
|
[
"Apache-2.0"
] | 3
|
2020-09-06T08:19:15.000Z
|
2021-03-08T10:15:40.000Z
|
import keras
from keras.layers import Conv2D, Conv3D, Flatten, Dense, Reshape, BatchNormalization
from keras.layers import Dropout, Input
from keras.models import Model
from keras.optimizers import Adam
from keras.callbacks import ModelCheckpoint
from keras.utils import np_utils
from sklearn.decomposition import PCA
from sklearn.model_selection import train_test_split
from sklearn.metrics import confusion_matrix, accuracy_score, classification_report, cohen_kappa_score
from operator import truediv
from plotly.offline import init_notebook_mode
import numpy as np
import tensorflow as tf
from keras import layers
from keras.layers import Input, Add, Dense, Activation, ZeroPadding2D, BatchNormalization, Flatten, Conv2D, AveragePooling2D, MaxPooling2D, GlobalMaxPooling2D
from keras.models import Model, load_model
from keras.preprocessing import image
from keras.utils import layer_utils
from keras.utils.data_utils import get_file
from keras.applications.imagenet_utils import preprocess_input
from keras.utils.vis_utils import model_to_dot
from keras.utils import plot_model
from keras.initializers import glorot_uniform
import pydot
from IPython.display import SVG
import scipy.misc
from matplotlib.pyplot import imshow
import keras.backend as K
K.set_image_data_format('channels_last')
K.set_learning_phase(1)
from keras.utils import to_categorical
import numpy as np
import matplotlib.pyplot as plt
import scipy.io as sio
import os
import spectral
## GLOBAL VARIABLES
dataset = 'IP'
test_ratio = 0.8
windowSize = 25
def loadData(name):
data_path = os.path.join(os.getcwd(),'data')
if name == 'IP':
data = sio.loadmat(os.path.join(data_path, 'Indian_pines_corrected.mat'))['indian_pines_corrected']
labels = sio.loadmat(os.path.join(data_path, 'Indian_pines_gt.mat'))['indian_pines_gt']
elif name == 'SA':
data = sio.loadmat(os.path.join(data_path, 'Salinas_corrected.mat'))['salinas_corrected']
labels = sio.loadmat(os.path.join(data_path, 'Salinas_gt.mat'))['salinas_gt']
elif name == 'PU':
data = sio.loadmat(os.path.join(data_path, 'PaviaU.mat'))['paviaU']
labels = sio.loadmat(os.path.join(data_path, 'PaviaU_gt.mat'))['paviaU_gt']
return data, labels
def splitTrainTestSet(X, y, testRatio, randomState=345):
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=testRatio, random_state=randomState,
stratify=y)
return X_train, X_test, y_train, y_test
def applyPCA(X, numComponents=75):
newX = np.reshape(X, (-1, X.shape[2]))
pca = PCA(n_components=numComponents, whiten=True)
newX = pca.fit_transform(newX)
newX = np.reshape(newX, (X.shape[0],X.shape[1], numComponents))
return newX, pca
def padWithZeros(X, margin=2):
newX = np.zeros((X.shape[0] + 2 * margin, X.shape[1] + 2* margin, X.shape[2]))
x_offset = margin
y_offset = margin
newX[x_offset:X.shape[0] + x_offset, y_offset:X.shape[1] + y_offset, :] = X
return newX
# 去零
def createImageCubes(X, y, windowSize=5, removeZeroLabels = True):
margin = int((windowSize - 1) / 2)
zeroPaddedX = padWithZeros(X, margin=margin)
# split patches
patchesData = np.zeros((X.shape[0] * X.shape[1], windowSize, windowSize, X.shape[2]))
patchesLabels = np.zeros((X.shape[0] * X.shape[1]))
patchIndex = 0
for r in range(margin, zeroPaddedX.shape[0] - margin):
for c in range(margin, zeroPaddedX.shape[1] - margin):
patch = zeroPaddedX[r - margin:r + margin + 1, c - margin:c + margin + 1]
patchesData[patchIndex, :, :, :] = patch
patchesLabels[patchIndex] = y[r-margin, c-margin]
patchIndex = patchIndex + 1
if removeZeroLabels:
patchesData = patchesData[patchesLabels>0,:,:,:]
patchesLabels = patchesLabels[patchesLabels>0]
patchesLabels -= 1
return patchesData, patchesLabels
X, y = loadData(dataset)
K = 30 if dataset == 'IP' else 15
X,pca = applyPCA(X,numComponents=K)
X, y = createImageCubes(X, y, windowSize=windowSize)
##
Xtrain, Xtest, ytrain, ytest = splitTrainTestSet(X, y, test_ratio)
# print("Xtrain.shape:",Xtrain.shape)
# print("ytrain.shape:",ytrain.shape)
# print("ytrain:",ytrain)
def convert_one_hot(labels,classes=16):
return to_categorical(labels,num_classes=classes)
ytrain = convert_one_hot(ytrain,16)
ytest = convert_one_hot(ytest,16)
# print("ytrain.shape:",ytrain.shape)
# ResNet50 网络;
def identity_block(X, f, filters, stage, block):
"""
实现图3的恒等块
参数:
X - 输入的tensor类型的数据,维度为( m, n_H_prev, n_W_prev, n_H_prev )
f - 整数,指定主路径中间的CONV窗口的维度
filters - 整数列表,定义了主路径每层的卷积层的过滤器数量
stage - 整数,根据每层的位置来命名每一层,与block参数一起使用。
block - 字符串,据每层的位置来命名每一层,与stage参数一起使用。
返回:
X - 恒等块的输出,tensor类型,维度为(n_H, n_W, n_C)
"""
#定义命名规则
conv_name_base = "res" + str(stage) + block + "_branch"
bn_name_base = "bn" + str(stage) + block + "_branch"
#获取过滤器
F1, F2, F3 = filters
#保存输入数据,将会用于为主路径添加捷径
X_shortcut = X
#主路径的第一部分
##卷积层
X = Conv2D(filters=F1, kernel_size=(1,1), strides=(1,1) ,padding="valid",
name=conv_name_base+"2a", kernel_initializer=glorot_uniform(seed=0))(X)
##归一化
X = BatchNormalization(axis=3,name=bn_name_base+"2a")(X)
##使用ReLU激活函数
X = Activation("relu")(X)
#主路径的第二部分
##卷积层
X = Conv2D(filters=F2, kernel_size=(f,f),strides=(1,1), padding="same",
name=conv_name_base+"2b", kernel_initializer=glorot_uniform(seed=0))(X)
##归一化
X = BatchNormalization(axis=3,name=bn_name_base+"2b")(X)
##使用ReLU激活函数
X = Activation("relu")(X)
#主路径的第三部分
##卷积层
X = Conv2D(filters=F3, kernel_size=(1,1), strides=(1,1), padding="valid",
name=conv_name_base+"2c", kernel_initializer=glorot_uniform(seed=0))(X)
##归一化
X = BatchNormalization(axis=3,name=bn_name_base+"2c")(X)
##没有ReLU激活函数
#最后一步:
##将捷径与输入加在一起
X = Add()([X,X_shortcut])
##使用ReLU激活函数
X = Activation("relu")(X)
return X
def convolutional_block(X, f, filters, stage, block, s=2):
"""
实现图5的卷积块
参数:
X - 输入的tensor类型的变量,维度为( m, n_H_prev, n_W_prev, n_C_prev)
f - 整数,指定主路径中间的CONV窗口的维度
filters - 整数列表,定义了主路径每层的卷积层的过滤器数量
stage - 整数,根据每层的位置来命名每一层,与block参数一起使用。
block - 字符串,据每层的位置来命名每一层,与stage参数一起使用。
s - 整数,指定要使用的步幅
返回:
X - 卷积块的输出,tensor类型,维度为(n_H, n_W, n_C)
"""
#定义命名规则
conv_name_base = "res" + str(stage) + block + "_branch"
bn_name_base = "bn" + str(stage) + block + "_branch"
#获取过滤器数量
F1, F2, F3 = filters
#保存输入数据
X_shortcut = X
#主路径
##主路径第一部分
X = Conv2D(filters=F1, kernel_size=(1,1), strides=(s,s), padding="valid",
name=conv_name_base+"2a", kernel_initializer=glorot_uniform(seed=0))(X)
X = BatchNormalization(axis=3,name=bn_name_base+"2a")(X)
X = Activation("relu")(X)
##主路径第二部分
X = Conv2D(filters=F2, kernel_size=(f,f), strides=(1,1), padding="same",
name=conv_name_base+"2b", kernel_initializer=glorot_uniform(seed=0))(X)
X = BatchNormalization(axis=3,name=bn_name_base+"2b")(X)
X = Activation("relu")(X)
##主路径第三部分
X = Conv2D(filters=F3, kernel_size=(1,1), strides=(1,1), padding="valid",
name=conv_name_base+"2c", kernel_initializer=glorot_uniform(seed=0))(X)
X = BatchNormalization(axis=3,name=bn_name_base+"2c")(X)
#捷径
X_shortcut = Conv2D(filters=F3, kernel_size=(1,1), strides=(s,s), padding="valid",
name=conv_name_base+"1", kernel_initializer=glorot_uniform(seed=0))(X_shortcut)
X_shortcut = BatchNormalization(axis=3,name=bn_name_base+"1")(X_shortcut)
#最后一步
X = Add()([X,X_shortcut])
X = Activation("relu")(X)
return X
def ResNet50(input_shape=(25,25,30),classes=16):
"""
实现ResNet50
CONV2D -> BATCHNORM -> RELU -> MAXPOOL -> CONVBLOCK -> IDBLOCK*2 -> CONVBLOCK -> IDBLOCK*3
-> CONVBLOCK -> IDBLOCK*5 -> CONVBLOCK -> IDBLOCK*2 -> AVGPOOL -> TOPLAYER
参数:
input_shape - 图像数据集的维度
classes - 整数,分类数
返回:
model - Keras框架的模型
"""
#定义tensor类型的输入数据
X_input = Input(input_shape)
#0填充
X = ZeroPadding2D((3,3))(X_input)
#stage1
X = Conv2D(filters=64, kernel_size=(7,7), strides=(2,2), name="conv1",
kernel_initializer=glorot_uniform(seed=0))(X)
X = BatchNormalization(axis=3, name="bn_conv1")(X)
X = Activation("relu")(X)
X = MaxPooling2D(pool_size=(3,3), strides=(2,2))(X)
#stage2
X = convolutional_block(X, f=3, filters=[64,64,256], stage=2, block="a", s=1)
X = identity_block(X, f=3, filters=[64,64,256], stage=2, block="b")
X = identity_block(X, f=3, filters=[64,64,256], stage=2, block="c")
#stage3
X = convolutional_block(X, f=3, filters=[128,128,512], stage=3, block="a", s=2)
X = identity_block(X, f=3, filters=[128,128,512], stage=3, block="b")
X = identity_block(X, f=3, filters=[128,128,512], stage=3, block="c")
X = identity_block(X, f=3, filters=[128,128,512], stage=3, block="d")
#stage4
X = convolutional_block(X, f=3, filters=[256,256,1024], stage=4, block="a", s=2)
X = identity_block(X, f=3, filters=[256,256,1024], stage=4, block="b")
X = identity_block(X, f=3, filters=[256,256,1024], stage=4, block="c")
X = identity_block(X, f=3, filters=[256,256,1024], stage=4, block="d")
X = identity_block(X, f=3, filters=[256,256,1024], stage=4, block="e")
X = identity_block(X, f=3, filters=[256,256,1024], stage=4, block="f")
#stage5
X = convolutional_block(X, f=3, filters=[512,512,2048], stage=5, block="a", s=2)
X = identity_block(X, f=3, filters=[512,512,2048], stage=5, block="b")
X = identity_block(X, f=3, filters=[512,512,2048], stage=5, block="c")
#均值池化层
X = AveragePooling2D(pool_size=(2,2),padding="same")(X)
#输出层
X = Flatten()(X)
X = Dense(classes, activation="softmax", name="fc"+str(classes),
kernel_initializer=glorot_uniform(seed=0))(X)
#创建模型
model = Model(inputs=X_input, outputs=X, name="ResNet50")
return model
# # x_train : (3074,25,25,30) y_train: (3074)
# model = ResNet50(input_shape=(25,25,30),classes=16)
# model.compile(optimizer="adam", loss="categorical_crossentropy", metrics=["accuracy"])
#
#
# model.fit(Xtrain,ytrain,epochs=2,batch_size=25)
# preds = model.evaluate(Xtest,ytest)
#
# print("误差率:",str(preds[0]))
# print("准确率:",str(preds[1]))
def main():
# x_train : (3074,25,25,30) y_train: (3074)
model = ResNet50(input_shape=(25, 25, 30), classes=16)
model.compile(optimizer="adam", loss="categorical_crossentropy", metrics=["accuracy"])
history = model.fit(Xtrain, ytrain, epochs=100, batch_size=25)
preds = model.evaluate(Xtest, ytest)
plt.figure(figsize=(5,5))
plt.ylim(0,1.1)
plt.grid()
plt.plot(history.history['accuracy'])
#plt.plot(history.history['val_acc'])
plt.ylabel( dataset+' _Accuracy')
plt.xlabel('Epochs')
plt.legend(['Training','Validation'])
plt.savefig("acc_curve.jpg")
plt.show()
plt.figure(figsize=(7,7))
plt.grid()
plt.plot(history.history['loss'])
#plt.plot(history.history['val_loss'])
plt.ylabel(dataset+' _Loss')
plt.xlabel('Epochs')
plt.legend(['Training','Validation'], loc='upper right')
plt.savefig("loss_curve.jpg")
plt.show()
print("误差率:", str(preds[0]))
print("准确率:", str(preds[1]))
if __name__ == "__main__":
main()
| 34.123944
| 159
| 0.630097
| 1,654
| 12,114
| 4.480048
| 0.195889
| 0.014575
| 0.017004
| 0.017274
| 0.517949
| 0.457895
| 0.434953
| 0.394332
| 0.348448
| 0.320378
| 0
| 0.045421
| 0.223956
| 12,114
| 354
| 160
| 34.220339
| 0.742793
| 0.140499
| 0
| 0.235602
| 0
| 0
| 0.056077
| 0.009482
| 0
| 0
| 0
| 0
| 0
| 1
| 0.052356
| false
| 0
| 0.183246
| 0.005236
| 0.282723
| 0.010471
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
8da4e24daba79cfc5a237fbfd0bd61228b6bdc1d
| 754
|
py
|
Python
|
tests/test_data/utest/setup.py
|
gordonmessmer/pyp2rpm
|
60145ba6fa49ad5bb29eeffa5765e10ba8417f03
|
[
"MIT"
] | 114
|
2015-07-13T12:38:27.000Z
|
2022-03-23T15:05:11.000Z
|
tests/test_data/utest/setup.py
|
gordonmessmer/pyp2rpm
|
60145ba6fa49ad5bb29eeffa5765e10ba8417f03
|
[
"MIT"
] | 426
|
2015-07-13T12:09:38.000Z
|
2022-01-07T16:41:32.000Z
|
tests/test_data/utest/setup.py
|
Mattlk13/pyp2rpm
|
f9ced95877d88c96b77b2b8c510dc4ceaa10504a
|
[
"MIT"
] | 51
|
2015-07-14T13:11:29.000Z
|
2022-03-31T07:27:32.000Z
|
#!/usr/bin/env python3
from setuptools import setup, find_packages
requirements = ["pyp2rpm~=3.3.1"]
setup(
name="utest",
version="0.1.0",
description="Micro test module",
license="GPLv2+",
author="pyp2rpm Developers",
author_email='bkabrda@redhat.com, rkuska@redhat.com, mcyprian@redhat.com, ishcherb@redhat.com',
url='https://github.com/fedora-python/pyp2rpm',
install_requires=requirements,
include_package_data=True,
packages=find_packages(exclude=["test"]),
classifiers=(
"License :: OSI Approved :: GNU General Public License v2 or later (GPLv2+)",
"Operating System :: POSIX :: Linux",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
),
)
| 30.16
| 99
| 0.66313
| 87
| 754
| 5.678161
| 0.689655
| 0.072874
| 0.101215
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.022876
| 0.188329
| 754
| 24
| 100
| 31.416667
| 0.784314
| 0.027851
| 0
| 0
| 0
| 0.05
| 0.493169
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.05
| 0
| 0.05
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
8da621c7d046b3bbba97fe0075833d24a4276a49
| 4,235
|
py
|
Python
|
abstract_nas/train/preprocess.py
|
dumpmemory/google-research
|
bc87d010ab9086b6e92c3f075410fa6e1f27251b
|
[
"Apache-2.0"
] | null | null | null |
abstract_nas/train/preprocess.py
|
dumpmemory/google-research
|
bc87d010ab9086b6e92c3f075410fa6e1f27251b
|
[
"Apache-2.0"
] | null | null | null |
abstract_nas/train/preprocess.py
|
dumpmemory/google-research
|
bc87d010ab9086b6e92c3f075410fa6e1f27251b
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Data preprocessing for ImageNet2012 and CIFAR-10."""
from typing import Any, Callable
# pylint: disable=unused-import
from big_vision.pp import ops_general
from big_vision.pp import ops_image
# pylint: enable=unused-import
from big_vision.pp import utils
from big_vision.pp.builder import get_preprocess_fn as _get_preprocess_fn
from big_vision.pp.registry import Registry
import tensorflow as tf
CIFAR_MEAN = [0.4914, 0.4822, 0.4465]
CIFAR_STD = [0.247, 0.243, 0.261]
@Registry.register("preprocess_ops.random_crop_with_pad")
@utils.InKeyOutKey()
def get_random_crop_with_pad(crop_size,
padding):
"""Makes a random crop of a given size.
Args:
crop_size: either an integer H, where H is both the height and width of the
random crop, or a list or tuple [H, W] of integers, where H and W are
height and width of the random crop respectively.
padding: how much to pad before cropping.
Returns:
A function, that applies random crop.
"""
crop_size = utils.maybe_repeat(crop_size, 2)
padding = utils.maybe_repeat(padding, 2)
def _crop(image):
image = tf.image.resize_with_crop_or_pad(image,
crop_size[0] + padding[0],
crop_size[1] + padding[1])
return tf.image.random_crop(image,
[crop_size[0], crop_size[1], image.shape[-1]])
return _crop
def preprocess_cifar(split, **_):
"""Preprocessing functions for CIFAR-10 training."""
mean_str = ",".join([str(m) for m in CIFAR_MEAN])
std_str = ",".join([str(m) for m in CIFAR_STD])
if split == "train":
pp = ("decode|"
"value_range(0,1)|"
"random_crop_with_pad(32,4)|"
"flip_lr|"
f"vgg_value_range(({mean_str}),({std_str}))|"
"onehot(10, key='label', key_result='labels')|"
"keep('image', 'labels')")
else:
pp = ("decode|"
"value_range(0,1)|"
"central_crop(32)|"
f"vgg_value_range(({mean_str}),({std_str}))|"
"onehot(10, key='label', key_result='labels')|"
"keep('image', 'labels')")
return _get_preprocess_fn(pp)
def preprocess_imagenet(split,
autoaugment = False,
label_smoothing = 0.0,
**_):
"""Preprocessing functions for ImageNet training."""
if split == "train":
pp = ("decode_jpeg_and_inception_crop(224)|"
"flip_lr|")
if autoaugment:
pp += "randaug(2,10)|"
pp += "value_range(-1,1)|"
if label_smoothing:
confidence = 1.0 - label_smoothing
low_confidence = (1.0 - confidence) / (1000 - 1)
pp += ("onehot(1000, key='label', key_result='labels', "
f"on_value={confidence}, off_value={low_confidence})|")
else:
pp += "onehot(1000, key='label', key_result='labels')|"
pp += "keep('image', 'labels')"
else:
pp = ("decode|"
"resize_small(256)|"
"central_crop(224)|"
"value_range(-1,1)|"
"onehot(1000, key='label', key_result='labels')|"
"keep('image', 'labels')")
return _get_preprocess_fn(pp)
PREPROCESS = {
"cifar10": preprocess_cifar,
"imagenet2012": preprocess_imagenet,
}
def get_preprocess_fn(dataset, split,
**preprocess_kwargs):
"""Makes a preprocessing function."""
preprocess_fn_by_split = PREPROCESS.get(dataset, lambda _: (lambda x: x))
split = "train" if "train" in split else "val"
preprocess_fn = preprocess_fn_by_split(split, **preprocess_kwargs)
return preprocess_fn
| 32.576923
| 79
| 0.633058
| 568
| 4,235
| 4.536972
| 0.323944
| 0.041909
| 0.025223
| 0.029104
| 0.235933
| 0.223516
| 0.185487
| 0.133489
| 0.089251
| 0.089251
| 0
| 0.034547
| 0.241322
| 4,235
| 129
| 80
| 32.829457
| 0.767507
| 0.273436
| 0
| 0.246753
| 0
| 0
| 0.256461
| 0.104705
| 0
| 0
| 0
| 0
| 0
| 1
| 0.064935
| false
| 0
| 0.090909
| 0
| 0.220779
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
8da70610f3402c8b44d3fbdf21a05f4f563b016b
| 488
|
py
|
Python
|
hidb/wrapper.py
|
sk-ip/hidb
|
1394000992c016607e7af15095f058cd9cce007b
|
[
"MIT"
] | null | null | null |
hidb/wrapper.py
|
sk-ip/hidb
|
1394000992c016607e7af15095f058cd9cce007b
|
[
"MIT"
] | null | null | null |
hidb/wrapper.py
|
sk-ip/hidb
|
1394000992c016607e7af15095f058cd9cce007b
|
[
"MIT"
] | null | null | null |
from datetime import datetime
class fileWrapper(object):
def __init__(self):
self.data = {}
self.keys = set()
# JSON data size 16KB in Bytes
self.max_data_size = 16384
# Max database size 1GB in Bytes
self.max_database_size = 1073741824
self.current_database_size = 0
class dataWrapper:
def __init__(self, data, ttl):
self.data = data
self.timestamp = datetime.today().timestamp()
self.ttl = ttl
| 24.4
| 53
| 0.622951
| 60
| 488
| 4.833333
| 0.466667
| 0.082759
| 0.075862
| 0.096552
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.055233
| 0.295082
| 488
| 19
| 54
| 25.684211
| 0.787791
| 0.120902
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.153846
| false
| 0
| 0.076923
| 0
| 0.384615
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
8da8f86888f2ee041a3f2312c9709ef180e420d0
| 4,504
|
py
|
Python
|
ion-channel-models/compare.py
|
sanmitraghosh/fickleheart-method-tutorials
|
d5ee910258a2656951201d4ada2a412804013bd5
|
[
"BSD-3-Clause"
] | null | null | null |
ion-channel-models/compare.py
|
sanmitraghosh/fickleheart-method-tutorials
|
d5ee910258a2656951201d4ada2a412804013bd5
|
[
"BSD-3-Clause"
] | null | null | null |
ion-channel-models/compare.py
|
sanmitraghosh/fickleheart-method-tutorials
|
d5ee910258a2656951201d4ada2a412804013bd5
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python3
from __future__ import print_function
import sys
sys.path.append('./method')
import os
import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import model as m
"""
Run fit.
"""
predict_list = ['sinewave', 'staircase', 'activation', 'ap']
try:
which_predict = sys.argv[1]
except:
print('Usage: python %s [str:which_predict]' % os.path.basename(__file__))
sys.exit()
if which_predict not in predict_list:
raise ValueError('Input data %s is not available in the predict list' \
% which_predict)
# Get all input variables
import importlib
sys.path.append('./mmt-model-files')
info_id_a = 'model_A'
info_a = importlib.import_module(info_id_a)
info_id_b = 'model_B'
info_b = importlib.import_module(info_id_b)
data_dir = './data'
savedir = './fig/compare'
if not os.path.isdir(savedir):
os.makedirs(savedir)
data_file_name = 'data-%s.csv' % which_predict
print('Predicting ', data_file_name)
saveas = 'compare-sinewave-' + which_predict
# Protocol
protocol = np.loadtxt('./protocol-time-series/%s.csv' % which_predict,
skiprows=1, delimiter=',')
protocol_times = protocol[:, 0]
protocol = protocol[:, 1]
# Load data
data = np.loadtxt(data_dir + '/' + data_file_name,
delimiter=',', skiprows=1) # headers
times = data[:, 0]
data = data[:, 1]
# Model
model_a = m.Model(info_a.model_file,
variables=info_a.parameters,
current_readout=info_a.current_list,
set_ion=info_a.ions_conc,
transform=None,
temperature=273.15 + info_a.temperature, # K
)
model_b = m.Model(info_b.model_file,
variables=info_b.parameters,
current_readout=info_b.current_list,
set_ion=info_b.ions_conc,
transform=None,
temperature=273.15 + info_b.temperature, # K
)
# Update protocol
model_a.set_fixed_form_voltage_protocol(protocol, protocol_times)
model_b.set_fixed_form_voltage_protocol(protocol, protocol_times)
# Load calibrated parameters
load_seed = 542811797
fix_idx = [1]
calloaddir_a = './out/' + info_id_a
calloaddir_b = './out/' + info_id_b
cal_params_a = []
cal_params_b = []
for i in fix_idx:
cal_params_a.append(np.loadtxt('%s/%s-solution-%s-%s.txt' % \
(calloaddir_a, 'sinewave', load_seed, i)))
cal_params_b.append(np.loadtxt('%s/%s-solution-%s-%s.txt' % \
(calloaddir_b, 'sinewave', load_seed, i)))
# Predict
predictions_a = []
for p in cal_params_a:
predictions_a.append(model_a.simulate(p, times))
predictions_b = []
for p in cal_params_b:
predictions_b.append(model_b.simulate(p, times))
# Plot
fig, axes = plt.subplots(2, 1, sharex=True, figsize=(10, 4),
gridspec_kw={'height_ratios': [1, 3]})
is_predict = ' prediction' if which_predict != 'sinewave' else ''
sim_protocol = model_a.voltage(times) # model_b should give the same thing
axes[0].plot(times, sim_protocol, c='#7f7f7f')
axes[0].set_ylabel('Voltage\n(mV)', fontsize=16)
axes[1].plot(times, data, alpha=0.5, label='Data')
for i, p in zip(fix_idx, predictions_a):
axes[1].plot(times, p, label='Model A' + is_predict)
for i, p in zip(fix_idx, predictions_b):
axes[1].plot(times, p, label='Model B' + is_predict)
# Zooms
from mpl_toolkits.axes_grid1.inset_locator import inset_axes, mark_inset
sys.path.append('./protocol-time-series')
zoom = importlib.import_module(which_predict + '_to_zoom')
axes[1].set_ylim(zoom.set_ylim)
for i_zoom, (w, h, loc) in enumerate(zoom.inset_setup):
axins = inset_axes(axes[1], width=w, height=h, loc=loc,
axes_kwargs={"facecolor" : "#f0f0f0"})
axins.plot(times, data, alpha=0.5)
for i, p in zip(fix_idx, predictions_a):
axins.plot(times, p)
for i, p in zip(fix_idx, predictions_b):
axins.plot(times, p)
axins.set_xlim(zoom.set_xlim_ins[i_zoom])
axins.set_ylim(zoom.set_ylim_ins[i_zoom])
#axins.yaxis.get_major_locator().set_params(nbins=3)
#axins.xaxis.get_major_locator().set_params(nbins=3)
axins.set_xticklabels([])
axins.set_yticklabels([])
pp, p1, p2 = mark_inset(axes[1], axins, loc1=zoom.mark_setup[i_zoom][0],
loc2=zoom.mark_setup[i_zoom][1], fc="none", lw=0.75, ec='k')
pp.set_fill(True); pp.set_facecolor("#f0f0f0")
axes[1].legend()
axes[1].set_ylabel('Current (pA)', fontsize=16)
axes[1].set_xlabel('Time (ms)', fontsize=16)
plt.subplots_adjust(hspace=0)
plt.savefig('%s/%s' % (savedir, saveas), bbox_inches='tight', dpi=200)
plt.close()
| 31.277778
| 78
| 0.690941
| 699
| 4,504
| 4.213162
| 0.287554
| 0.036672
| 0.006791
| 0.009508
| 0.247199
| 0.179966
| 0.166384
| 0.149406
| 0.065195
| 0.027165
| 0
| 0.0214
| 0.159636
| 4,504
| 143
| 79
| 31.496504
| 0.756671
| 0.062389
| 0
| 0.074074
| 0
| 0
| 0.115036
| 0.023628
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.111111
| 0
| 0.111111
| 0.027778
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
8da906c8ad76ecde7a1bd94e5017709b02a7ce8e
| 7,752
|
py
|
Python
|
examples/services/classifier_service.py
|
bbbdragon/python-pype
|
f0618150cb4d2fae1f959127453fb6eca8db84e5
|
[
"MIT"
] | 8
|
2019-07-12T03:28:10.000Z
|
2019-07-19T20:34:45.000Z
|
examples/services/classifier_service.py
|
bbbdragon/python-pype
|
f0618150cb4d2fae1f959127453fb6eca8db84e5
|
[
"MIT"
] | null | null | null |
examples/services/classifier_service.py
|
bbbdragon/python-pype
|
f0618150cb4d2fae1f959127453fb6eca8db84e5
|
[
"MIT"
] | null | null | null |
'''
python3 classifier_service.py data.csv
This service runs a scikit-learn classifier on data provided by the csv file data.csv.
The idea of this is a simple spam detector. In the file, you will see a number, 1 or
-1, followed by a pipe, followed by a piece of text. The text is designed to be a
subject email, and the number its label: 1 for spam and -1 for not spam.
The service loads the csv file, trains the classifier, and then waits for you to
send it a list of texts via the 'classify' route. This service can be tested using:
./test_classifier_service.sh
'''
from flask import Flask,request,jsonify
from pype import pype as p
from pype import _,_0,_1,_p
from pype import _assoc as _a
from pype import _dissoc as _d
from pype import _do
from statistics import mean,stdev
from pype.vals import lenf
from sklearn.ensemble import RandomForestClassifier as Classifier
from sklearn.feature_extraction.text import TfidfVectorizer as Vectorizer
import sys
import csv
'''
We have to use lambda to define the read function because pype functions can't yet
deal with keyword args.
'''
read=lambda f: csv.reader(f,delimiter='|')
def train_classifier(texts,y):
'''
Here is a perfect example of the "feel it ... func it" philosophy:
The pype call uses the function arguments and function body to specify
three variables, texts, a list of strings, y, a list of floats, and vectorizer,
a scikit-learn object that vectorizes text. This reiterates the adivce that you
should use the function body and function arguments to declare your scope,
whenever you can.
Line-by-line, here we go:
{'vectorizer':vectorizer.fit,
'X':vectorizer.transform},
We build a dict, the first element of which is the fit vectorizer. Luckily, the
'fit' function returns an instance of the trained vectorizer, so we do not need to
use _do. This vectorizer is then assigned to 'vectorizer'. Because iterating
through dictionaries in Python3.6 preserves the order of the keys in which they
were declared, we can apply the fit function to the vectorizer on the texts,
assign that to the 'vectorizer' key. We need this instance of the vectorizer to
run the classifier for unknown texts.
After this, we apply the 'transform' to convert the texts into a training matrix
keyed by 'X', whose rows are texts and whose columns are words.
_a('classifier',(Classifier().fit,_['X'],y)),
Finally, we can build a classifier. _a, or _assoc, means we are adding a
key-value pair to the previous dictionary. This will be a new instance of our
Classifier, which is trained through the fit function on the text-word matrix 'X'
and the labels vector y.
_d('X'),
Since we don't need the X matrix anymore, we delete it from the returned JSON,
which now only contains 'vectorizer' and 'classifier', the two things we will
need to classify unknown texts.
'''
vectorizer=Vectorizer()
return p( texts,
{'vectorizer':vectorizer.fit,
'X':vectorizer.transform},
_a('classifier',(Classifier().fit,_['X'],y)),
_d('X'),
)
'''
We train the model in a global variable containing our vectorizer and classifier.
This use of global variables is only used for microservices, by the way.
Here is a line-by-line description:
sys.argv[1],
open,
Open the file.
read,
We build a csv reader with the above-defined 'read' function, which builds a csv reader
with a '|' delimiter. I chose this delimeter because the texts often have commas.
list,
Because csv.reader is a generator, it cannot be accessed twice, so I cast it to a list. This list is a list of 2-element lists, of the form [label,text], where label is a
string for the label ('1' or '-1'), and text is a string for the training text. So an
example of this would be ['1','free herbal viagra buy now'].
(train,[_1],[(float,[_0])])
This is a lambda which calls the 'train' function on two arguments, the first being
a list of texts, the second being a list of numerical labels.
We know that the incoming argument is a list of 2-element lists, so [_1] is a map,
which goes through this list - [] - and builds a new list containing only the second
element of each 2-element list, referenced by _1.
With the first elements of the 2-element lists, we must extract the first element and
cast it to a float. In [(float,[_0])], the [] specifies a map over the list of
2-element lists. (float,_0) specifies we are accessing the first element of the
2-element list ('1' or '-1'), and calls the float function on it, to cast it to a
float. If we do not cast it to a float, sklearn will not be able to process it as
a label.
'''
MODEL=p( sys.argv[1],
open,
read,
list,
(train_classifier,[_1],[(float,_0)]),
)
app = Flask(__name__)
@app.route('/classify',methods=['POST'])
def classify():
'''
This is the function that is run on a JSON containing one field, 'texts', which
is a list of strings. This function will return a list of JSON's containing the
label for that text given by the classifier (1 or -1), and the original text.
Notice that, in this routing, we need access to 'texts' in (zip,_,texts).
Line-by-line:
global MODEL
We need this to refer to the model we trained at the initialization of the
microservice.
texts=request.get_json(force=True)['texts']
This extracts the 'texts' list from the json embedded in the request.
MODEL['vectorizer'].transform,
This uses the vectorizer to convert the list of strings in texts to a text-word
matrix that can be fed into the classifier.
MODEL['classifier'].predict,
This runs the prediction on the text-word matrix, producing an array of 1's and
-1's, where 1 indicates that the classification is positive (it is spam), and -1
indicates that the classification is negative (it is not spam).
(zip,_,texts),
We know that the n-th label produced by the classifier is for the n-th string in
texts, so we zip them together to produce an iterable of tuples (label,text).
[{'label':_0,
'text':_1,
'description':{_0 == 1: 'not spam',
'else':'spam'}}],
Here, we are performing a mapping over the (label,text) tuples produced by the
zip. For each tuple, we build a dictionary with three items. The first is the
label, which is numberic, either 1.0 or -1.0. The second is the actual text
string.
However, to help the user, we also include a description of what the label means:
'description':{_0 == 1: 'not spam',
'else':'spam'}
The value is a switch dict. Since _0 is a Getter object, it overrides the ==
operator to produce a LamTup, which Python will accept, but which the pype
interpreter will run as an expression. _0 == 1 simply means, "the first element
of the (label,text) tuple, label, is 1. If this is true, 'description is set to
'not spam'. Otherwise, it is set to 'spam'.
jsonify
This just turns the resulting JSON, a list of dicitonaries, into something that can
be returned to the client over HTTP.
'''
global MODEL
texts=request.get_json(force=True)['texts']
return p( texts,
MODEL['vectorizer'].transform,
MODEL['classifier'].predict,
(zip,_,texts),
[{'label':_0,
'text':_1,
'description':{_0 == 1: 'not spam',
'else':'spam'}}],
jsonify)
if __name__=='__main__':
app.run(host='0.0.0.0',port=10004,debug=True)
| 36.739336
| 172
| 0.68434
| 1,238
| 7,752
| 4.238288
| 0.267367
| 0.007433
| 0.013341
| 0.006861
| 0.112255
| 0.080046
| 0.041166
| 0.014866
| 0.014866
| 0.014866
| 0
| 0.010645
| 0.236584
| 7,752
| 210
| 173
| 36.914286
| 0.875972
| 0.563983
| 0
| 0.046512
| 0
| 0
| 0.081237
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.046512
| false
| 0
| 0.27907
| 0
| 0.372093
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
8da9192128d87d058ba7b763d377c653bfe2eb10
| 2,657
|
py
|
Python
|
ida_plugin/uefi_analyser.py
|
fengjixuchui/UEFI_RETool
|
72c5d54c1dab9f58a48294196bca5ce957f6fb24
|
[
"MIT"
] | 240
|
2019-03-12T21:28:06.000Z
|
2021-02-09T16:20:09.000Z
|
ida_plugin/uefi_analyser.py
|
fengjixuchui/UEFI_RETool
|
72c5d54c1dab9f58a48294196bca5ce957f6fb24
|
[
"MIT"
] | 10
|
2019-09-09T08:38:35.000Z
|
2020-11-30T15:19:30.000Z
|
ida_plugin/uefi_analyser.py
|
fengjixuchui/UEFI_RETool
|
72c5d54c1dab9f58a48294196bca5ce957f6fb24
|
[
"MIT"
] | 53
|
2019-03-16T06:54:18.000Z
|
2020-12-23T06:16:38.000Z
|
# SPDX-License-Identifier: MIT
import os
import idaapi
import idautils
from PyQt5 import QtWidgets
from uefi_analyser import dep_browser, dep_graph, prot_explorer, ui
AUTHOR = "yeggor"
VERSION = "1.2.0"
NAME = "UEFI_RETool"
WANTED_HOTKEY = "Ctrl+Alt+U"
HELP = "This plugin performs automatic analysis of the input UEFI module"
class UefiAnalyserPlugin(idaapi.plugin_t):
flags = idaapi.PLUGIN_MOD | idaapi.PLUGIN_PROC | idaapi.PLUGIN_FIX
comment = HELP
help = HELP
wanted_name = NAME
wanted_hotkey = WANTED_HOTKEY
def init(self):
self._last_directory = idautils.GetIdbDir()
ui.init_menu(MenuHandler(self))
self._welcome()
return idaapi.PLUGIN_KEEP
def run(self, arg):
try:
self._analyse_all()
except Exception as err:
import traceback
print(f"[{NAME} error] {str(err)}\n{traceback.format_exc()}")
def term(self):
pass
def load_json_log(self):
print(f"[{NAME}] try to parse JSON log file")
log_name = self._select_log()
print(f"[{NAME}] log name: {log_name}")
dep_browser.run(log_name)
dep_graph.run(log_name)
def _select_log(self):
file_dialog = QtWidgets.QFileDialog()
file_dialog.setFileMode(QtWidgets.QFileDialog.ExistingFiles)
filename = None
try:
filename, _ = file_dialog.getOpenFileName(
file_dialog,
f"Select the {NAME} log file",
self._last_directory,
"Results files (*.json)",
)
except Exception as e:
print(f"[{NAME} error] {str(e)}")
if filename:
self._last_directory = os.path.dirname(filename)
return filename
@staticmethod
def _welcome():
print(f"\n{NAME} plugin by {AUTHOR} ({VERSION})")
print(f"{NAME} shortcut key is {WANTED_HOTKEY}\n")
@staticmethod
def _analyse_all():
prot_explorer.run()
class MenuHandler(idaapi.action_handler_t):
def __init__(self, plugin):
idaapi.action_handler_t.__init__(self)
self.plugin = plugin
def activate(self, ctx):
try:
self.plugin.load_json_log()
except Exception as err:
import traceback
print(f"[{NAME} error] {str(err)}\n{traceback.format_exc()}")
return True
def update(self, ctx):
return idaapi.AST_ENABLE_ALWAYS
def PLUGIN_ENTRY():
try:
return UefiAnalyserPlugin()
except Exception as err:
import traceback
print(f"[{NAME} error] {str(err)}\n{traceback.format_exc()}")
| 26.04902
| 73
| 0.616861
| 320
| 2,657
| 4.915625
| 0.359375
| 0.030515
| 0.044501
| 0.038144
| 0.154482
| 0.143039
| 0.143039
| 0.143039
| 0.143039
| 0.143039
| 0
| 0.002087
| 0.27851
| 2,657
| 101
| 74
| 26.306931
| 0.818466
| 0.010538
| 0
| 0.194805
| 0
| 0
| 0.176247
| 0.041112
| 0
| 0
| 0
| 0
| 0
| 1
| 0.142857
| false
| 0.012987
| 0.103896
| 0.012987
| 0.402597
| 0.103896
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
8daa3414a09b9f3c7c95225a1a7fdf929b8d3dfe
| 440
|
py
|
Python
|
BPt/default/options/samplers.py
|
sahahn/ABCD_ML
|
a8b1c48c33f3fdc046c8922964f1c456273238da
|
[
"MIT"
] | 1
|
2019-09-25T23:23:49.000Z
|
2019-09-25T23:23:49.000Z
|
BPt/default/options/samplers.py
|
sahahn/ABCD_ML
|
a8b1c48c33f3fdc046c8922964f1c456273238da
|
[
"MIT"
] | 1
|
2020-04-20T20:53:27.000Z
|
2020-04-20T20:53:27.000Z
|
BPt/default/options/samplers.py
|
sahahn/ABCD_ML
|
a8b1c48c33f3fdc046c8922964f1c456273238da
|
[
"MIT"
] | 1
|
2019-06-21T14:44:40.000Z
|
2019-06-21T14:44:40.000Z
|
from ..helpers import get_obj_and_params, all_from_objects
from ...extensions.samplers import OverSampler
SAMPLERS = {
'oversample': (OverSampler, ['default']),
}
def get_sampler_and_params(obj_str, extra_params, params, **kwargs):
obj, extra_obj_params, obj_params =\
get_obj_and_params(obj_str, SAMPLERS, extra_params, params)
return obj(**extra_obj_params), obj_params
all_obj_keys = all_from_objects(SAMPLERS)
| 25.882353
| 68
| 0.756818
| 60
| 440
| 5.133333
| 0.35
| 0.116883
| 0.058442
| 0.097403
| 0.168831
| 0.168831
| 0
| 0
| 0
| 0
| 0
| 0
| 0.138636
| 440
| 17
| 69
| 25.882353
| 0.812665
| 0
| 0
| 0
| 0
| 0
| 0.038549
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.1
| false
| 0
| 0.2
| 0
| 0.4
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a5c0fa60cac177d2865547e53143112bdfdc7111
| 1,008
|
py
|
Python
|
testing.py
|
madjabal/morphine
|
2c76b10a7276936042913d609ad773fbc08b0887
|
[
"MIT"
] | 15
|
2017-03-11T18:25:04.000Z
|
2022-03-31T19:54:31.000Z
|
testing.py
|
madjabal/morphine
|
2c76b10a7276936042913d609ad773fbc08b0887
|
[
"MIT"
] | 2
|
2018-10-17T15:08:36.000Z
|
2021-06-08T13:34:56.000Z
|
testing.py
|
madjabal/morphine
|
2c76b10a7276936042913d609ad773fbc08b0887
|
[
"MIT"
] | 2
|
2018-07-25T15:15:54.000Z
|
2019-06-14T11:16:41.000Z
|
# Python modules
import time
from datetime import timedelta
def consistency(func, args, expected, n=10**4):
"""Analyze and report on the consistency of a function."""
print('\n[CONSISTENCY TEST] {0}'.format(func.__doc__.format(*args)))
def show(num, den, t, p, end='\r'):
print('{3}|{4:.3f}: {0}/{1} = {2}'.format(num, den, num/den, str(timedelta(seconds=t)), p), end=end)
start = time.time()
interval = start
tally = 0
for i in range(n):
isCorrect = func(*args) == expected
tally += (1 if isCorrect else 0)
diff = time.time() - interval
if diff > 0.01:
interval = time.time()
show(tally, (i+1), time.time() - start, (i+1)/n)
show(tally, n, time.time() - start, (i+1)/n, '\n')
def max_over(n, func, args=None):
"""Compute the maximum value returned by func(args) in n runs."""
m = 0
for i in range(n):
v = func(*args) if args else func()
if v > m:
m = v
return m
| 30.545455
| 108
| 0.558532
| 154
| 1,008
| 3.623377
| 0.422078
| 0.071685
| 0.057348
| 0.02509
| 0.103943
| 0.103943
| 0
| 0
| 0
| 0
| 0
| 0.027211
| 0.270833
| 1,008
| 33
| 109
| 30.545455
| 0.731973
| 0.126984
| 0
| 0.083333
| 0
| 0
| 0.062069
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.125
| false
| 0
| 0.083333
| 0
| 0.25
| 0.083333
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a5c112fb1800922ae32e15c8c2c3119937a66895
| 520
|
py
|
Python
|
misc/python/fibonacci.py
|
saranshbht/codes-and-more-codes
|
0bd2e46ca613b3b81e1196d393902e86a43aa353
|
[
"MIT"
] | null | null | null |
misc/python/fibonacci.py
|
saranshbht/codes-and-more-codes
|
0bd2e46ca613b3b81e1196d393902e86a43aa353
|
[
"MIT"
] | null | null | null |
misc/python/fibonacci.py
|
saranshbht/codes-and-more-codes
|
0bd2e46ca613b3b81e1196d393902e86a43aa353
|
[
"MIT"
] | null | null | null |
from itertools import permutations
from collections import Counter
import time
print(time.time())
s=["dgajkhdjkjfkl","ahfjkh","jfskoj","hfakljfio","fjfjir","jiosj","jiojf","jriosj","jiorjf","jhhhhaskgasjdfljjriof"]
t=10
while t>0:
S=s[10-t]
c=dict(Counter(S))
Cperm=list(permutations(c.values()))
flag= False
for i in Cperm:
for j in range(2,len(i)):
if i[j]==i[j-1]+i[j-2]:
print("Dynamic")
flag= True
break
if flag==True:
break
else:
print("Not")
t=t-1
print(time.time())
| 18.571429
| 117
| 0.646154
| 82
| 520
| 4.097561
| 0.536585
| 0.017857
| 0.077381
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.020785
| 0.167308
| 520
| 27
| 118
| 19.259259
| 0.755196
| 0
| 0
| 0.173913
| 0
| 0
| 0.178846
| 0.040385
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.130435
| 0
| 0.130435
| 0.173913
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a5c6922a61844f38e222e52aacc04701fb1c3022
| 4,953
|
py
|
Python
|
main.py
|
rodrigobercinimartins/export-import-por-mesorregiao-brasil
|
73b8126e593eec63ae29eb81a2967f566ec93bc9
|
[
"MIT"
] | 1
|
2020-04-06T17:55:04.000Z
|
2020-04-06T17:55:04.000Z
|
main.py
|
rodrigobercini/export-import-por-mesorregiao-brasil
|
73b8126e593eec63ae29eb81a2967f566ec93bc9
|
[
"MIT"
] | null | null | null |
main.py
|
rodrigobercini/export-import-por-mesorregiao-brasil
|
73b8126e593eec63ae29eb81a2967f566ec93bc9
|
[
"MIT"
] | null | null | null |
import pandas as pd
import os
import ssl
# I'm getting SSL certificates issues when downloading files from MDIC.
# The code below is a hack to get around this issue.
try:
_create_unverified_https_context = ssl._create_unverified_context
except AttributeError:
pass
else:
ssl._create_default_https_context = _create_unverified_https_context
class ExportsByMesoregion:
def __init__(self
, start_year:int
, end_year:int = None
, transaction_type:str='exports'):
self.start_year = start_year
if end_year is not None:
self.end_year = end_year
else:
self.end_year = start_year
self.TRANSACTION_TYPES = {
'exports':'EXP'
, 'imports':'IMP'
}
if transaction_type in self.TRANSACTION_TYPES:
self.transaction_type = transaction_type
else:
raise ValueError(f"Invalid transaction type. Valid values are: {''.join(self.TRANSACTION_TYPES)}")
self.BASE_URL = 'https://balanca.economia.gov.br/balanca/bd/comexstat-bd/mun/'
self.REPO_FOLDER_PATH = os.path.dirname(os.path.abspath(__file__))
self.MUN_FOLDER_PATH = os.path.join(self.REPO_FOLDER_PATH, 'data', 'municipalities',"")
self.MESO_FOLDER_PATH = os.path.join(self.REPO_FOLDER_PATH, 'data', 'mesoregions',"")
self.MUN_LOOKUP_FILENAME = os.path.join(self.REPO_FOLDER_PATH, 'municipalities_lookup.xlsx')
def create_folder_if_not_exists(self, folder_path):
if not os.path.exists(folder_path):
os.makedirs(folder_path)
def get_file_name(self, transaction_type, year, division_type):
return f'{self.TRANSACTION_TYPES[transaction_type]}_{year}_{division_type}.csv'
def download_mun_data(self):
self.create_folder_if_not_exists(self.MUN_FOLDER_PATH)
for year in range(self.start_year, self.end_year+1):
file_name = self.get_file_name(self.transaction_type, year, 'MUN')
file_path = f'{self.MUN_FOLDER_PATH}{file_name}'
if os.path.isfile(file_path):
print(f'{year} - Mun {self.transaction_type} already exists. Skipping download...')
continue
url = f'{self.BASE_URL}{file_name}'
pd.read_csv(url, sep=';', encoding='UTF-8').to_csv(file_path, sep=';', encoding='UTF-8')
print(f'{year} - Municipalities {self.transaction_type} finished downloading')
def add_meso_to_mun_data(self, year):
mun_exp_filename = self.get_file_name(self.transaction_type, year, 'MUN')
mun_exports = pd.read_csv(f'{self.MUN_FOLDER_PATH}{mun_exp_filename}', sep=';')
municip_codes = pd.read_excel(self.MUN_LOOKUP_FILENAME)
mun_with_meso = mun_exports.merge(municip_codes, left_on= 'CO_MUN',
right_on='Código Município Completo (MDIC)')
mun_with_meso.drop(['Município', 'CO_MUN', 'Nome_Microrregião',
'Microrregião Geográfica',
'Código Município Completo (MDIC)'], axis=1, inplace=True)
print(f'{year} - Mesoregions info added to municipalities data')
return mun_with_meso
def aggregate_by_mesoregion(self, year, mun_with_meso):
meso_aggregated = mun_with_meso.groupby(['CO_ANO','Nome_Mesorregião','CD_GEOCME', 'CO_MES', 'CO_PAIS', 'SH4'],as_index=False).sum() # Consolida dados por mesorregião
meso_aggregated.drop(['UF', 'Mesorregião Geográfica', 'Código Município Completo (IBGE)'], axis=1, inplace=True)
print(f'{year} - Mesoregions data aggregated')
return meso_aggregated
def download_data_and_aggregate_by_meso(self):
self.create_folder_if_not_exists(self.MESO_FOLDER_PATH)
self.download_mun_data()
for year in (range(self.start_year, self.end_year+1)):
mun_with_meso = self.add_meso_to_mun_data(year)
meso_aggregated = self.aggregate_by_mesoregion(year, mun_with_meso)
meso_exp_filename = self.get_file_name(self.transaction_type, year, 'MESO')
meso_aggregated.to_csv(f'{self.MESO_FOLDER_PATH}{meso_exp_filename}', encoding='UTF-8')
print(f'{year} - Mesoregions data saved')
def download_data_and_add_meso_info(self):
self.create_folder_if_not_exists(self.MUN_FOLDER_PATH)
self.download_mun_data()
final_df = pd.DataFrame()
for year in (range(self.start_year, self.end_year+1)):
mun_with_meso = self.add_meso_to_mun_data(year)
final_df = final_df.append(mun_with_meso)
return final_df
if __name__ == '__main__':
ExportsObject = ExportsByMesoregion(start_year=2020, end_year=2020, transaction_type='imports')
ExportsObject.download_data_and_aggregate_by_meso()
| 43.447368
| 173
| 0.657985
| 639
| 4,953
| 4.749609
| 0.250391
| 0.049423
| 0.032619
| 0.028007
| 0.333114
| 0.283031
| 0.224053
| 0.203624
| 0.16771
| 0.153213
| 0
| 0.004512
| 0.239249
| 4,953
| 114
| 174
| 43.447368
| 0.800955
| 0.030688
| 0
| 0.130952
| 0
| 0
| 0.201334
| 0.065652
| 0
| 0
| 0
| 0
| 0
| 1
| 0.095238
| false
| 0.011905
| 0.059524
| 0.011905
| 0.214286
| 0.059524
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a5ca6ea7872c55e908f6afc4233961e95a90159a
| 1,366
|
py
|
Python
|
sendUAV/recevier.py
|
RobEn-AAST/AI-UAVC
|
732683fd5821d492b772cc5f966e86aed164a68c
|
[
"MIT"
] | 16
|
2022-02-05T15:51:13.000Z
|
2022-02-05T17:38:54.000Z
|
sendUAV/recevier.py
|
RobEn-AAST/AI-UAVC
|
732683fd5821d492b772cc5f966e86aed164a68c
|
[
"MIT"
] | null | null | null |
sendUAV/recevier.py
|
RobEn-AAST/AI-UAVC
|
732683fd5821d492b772cc5f966e86aed164a68c
|
[
"MIT"
] | null | null | null |
from socket import socket, AF_INET, SOCK_STREAM, IPPROTO_TCP
import struct
import pickle
class ServerSock(socket):
def __init__(self, PORT):
super().__init__(AF_INET, SOCK_STREAM, IPPROTO_TCP)
self.bind(("", PORT))
self.listen()
def getMessage(self):
payload_size = struct.calcsize(">L")
conn, _ = self.accept()
conn.settimeout(5)
while True:
try:
string = b""
while len(string) < payload_size:
bits = conn.recv(4096)
string += bits
packed_msg_size = string[:payload_size]
data = string[payload_size:]
msg_size = struct.unpack(">L", packed_msg_size)[0]
while len(data) < msg_size:
bits = conn.recv(4096)
data += bits
frame_data = data[:msg_size]
data = data[msg_size:]
msg = pickle.loads(frame_data, fix_imports=True, encoding="bytes")
# if msg start then get it's len from the header
return msg
except Exception:
conn.close()
return self.getMessage()
if __name__ == "__main__":
server = ServerSock(5500)
while True:
print(server.getMessage())
| 29.695652
| 82
| 0.51757
| 144
| 1,366
| 4.645833
| 0.458333
| 0.06278
| 0.076233
| 0.047833
| 0.137519
| 0.077728
| 0
| 0
| 0
| 0
| 0
| 0.016726
| 0.387262
| 1,366
| 45
| 83
| 30.355556
| 0.782557
| 0.033675
| 0
| 0.114286
| 0
| 0
| 0.012898
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.057143
| false
| 0
| 0.114286
| 0
| 0.257143
| 0.028571
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a5cb7a30978758aaea2edade994cdb342894093c
| 21,620
|
py
|
Python
|
pedal/questions/loader.py
|
acbart/python-analysis
|
3cd2cc22d50a414ae6b62c74d2643be4742238d4
|
[
"MIT"
] | 14
|
2019-08-22T03:40:23.000Z
|
2022-03-13T00:30:53.000Z
|
pedal/questions/loader.py
|
pedal-edu/pedal
|
3cd2cc22d50a414ae6b62c74d2643be4742238d4
|
[
"MIT"
] | 74
|
2019-09-12T04:35:56.000Z
|
2022-01-26T19:21:32.000Z
|
pedal/questions/loader.py
|
acbart/python-analysis
|
3cd2cc22d50a414ae6b62c74d2643be4742238d4
|
[
"MIT"
] | 2
|
2021-01-11T06:34:00.000Z
|
2021-07-21T12:48:07.000Z
|
"""
instructions: blah blah blah
settings:
tifa:
enabled: True
unit test by function (bool): Whether to test each function entirely before moving onto the
next one, or to first check that all functions have been defined, and then
checking their parameters, etc. Defaults to True.
show case details (bool): Whether to show the specific args/inputs that caused a test case
to fail.
rubric:
functions:
total: 100
definition: 10
signature: 10
cases: 80
global:
variables:
name:
type:
value:
inputs:
prints:
# Sandbox, type checking
functions:
documentation: "any" or "google"
coverage: 100%
tests: int
name: do_complicated_stuff
arity: int
signature: int, int -> float
signature: int, int, list[int], (int->str), dict[str:list[int]] -> list[int]
parameters:
name: banana
exactly:
regex:
includes:
within:
type: int
cases:
- arguments (list): 5, 4
inputs (list):
returns (Any):
equals: 27.3
is:
is not: _1
name (str): Meaningful name for tracking purposes? Or possibly separate into label/id/code
hint (str): Message to display to user
prints:
exactly:
regex:
startswith:
endswith:
plots:
# Cait
syntax:
prevent:
___ + ___
# Override any of our default feedback messages
messages:
FUNCTION_NOT_DEFINED: "Oops you missed a function"
"""
from pedal.core.commands import set_success, give_partial
from pedal.core.feedback_category import FeedbackCategory
from pedal.questions.constants import TOOL_NAME
from pedal.sandbox.commands import get_sandbox
from pedal.utilities.comparisons import equality_test
SETTING_SHOW_CASE_DETAILS = "show case details"
DEFAULT_SETTINGS = {
SETTING_SHOW_CASE_DETAILS: True
}
EXAMPLE_DATA = {
'functions': [{
'name': 'do_complicated_stuff',
'signature': 'int, int, [int] -> list[int]',
'cases': [
{'arguments': "5, 4, 3", 'returns': "12"},
]
}]
}
class FeedbackException(Exception):
"""
"""
def __init__(self, category, label, **fields):
self.category = category
self.label = label
self.fields = fields
def as_message(self):
"""
Returns:
"""
return FEEDBACK_MESSAGES[self.category][self.label].format(**self.fields)
def check_function_defined(function, function_definitions, settings=None):
"""
Args:
function:
function_definitions:
settings:
Returns:
"""
# 1. Is the function defined syntactically?
# 1.1. With the right name?
function_name = function['name']
if function_name not in function_definitions:
raise FeedbackException(FeedbackCategory.SPECIFICATION, 'missing_function', function_name=function_name)
definition = function_definitions[function_name]
return definition
def check_function_signature(function, definition, settings=None):
"""
Args:
function:
definition:
settings:
Returns:
"""
function_name = function['name']
# 1.2. With the right parameters and return type?
# 1.2.1 'arity' style - simply checks number of parameters
if 'arity' in function or 'parameters' in function:
expected_arity = function['arity'] if 'arity' in function else len(function['parameters'])
actual_arity = len(definition.args.args)
if actual_arity < expected_arity:
raise FeedbackException(FeedbackCategory.SPECIFICATION, 'insufficient_args',
function_name=function_name, expected_arity=expected_arity,
actual_arity=actual_arity)
elif actual_arity > expected_arity:
raise FeedbackException(FeedbackCategory.SPECIFICATION, 'excessive_args',
function_name=function_name, expected_arity=expected_arity,
actual_arity=actual_arity)
# 1.2.2 'parameters' style - checks each parameter's name and type
if 'parameters' in function:
expected_parameters = function['parameters']
actual_parameters = definition.args.args
for expected_parameter, actual_parameter in zip(expected_parameters, actual_parameters):
actual_parameter_name = get_arg_name(actual_parameter)
if 'name' in expected_parameter:
if actual_parameter_name != expected_parameter['name']:
raise FeedbackException(FeedbackCategory.SPECIFICATION, 'wrong_parameter_name',
function_name=function_name,
expected_parameter_name=expected_parameter['name'],
actual_parameter_name=actual_parameter_name
)
if 'type' in expected_parameter:
actual_parameter_type = parse_type(actual_parameter)
# TODO: Handle non-string expected_parameter types (dict)
expected_parameter_type = parse_type_value(expected_parameter['type'], True)
if not type_check(expected_parameter_type, actual_parameter_type):
raise FeedbackException(FeedbackCategory.SPECIFICATION, 'wrong_parameter_type',
function_name=function_name,
parameter_name=actual_parameter_name,
expected_parameter_type=expected_parameter_type,
actual_parameter_type=actual_parameter_type)
# 1.2.3. 'returns' style - checks the return type explicitly
if 'returns' in function:
expected_returns = parse_type_value(function['returns'], True)
actual_returns = parse_type(definition.returns)
if actual_returns != "None":
if not type_check(expected_returns, actual_returns):
raise FeedbackException(FeedbackCategory.SPECIFICATION, "wrong_returns",
function_name=function_name, expected_returns=expected_returns,
actual_returns=actual_returns)
elif expected_returns != "None":
raise FeedbackException(FeedbackCategory.SPECIFICATION, "missing_returns",
function_name=function_name, expected_returns=expected_returns)
# 1.2.4. 'signature' style - shortcut for specifying the types
if 'signature' in function:
expected_signature = function['signature']
actual_returns = parse_type(definition.returns)
actual_parameters = ", ".join(parse_type(actual_parameter.annotation)
for actual_parameter in definition.args.args)
actual_signature = "{} -> {}".format(actual_parameters, actual_returns)
if not type_check(expected_signature, actual_signature):
raise FeedbackException(FeedbackCategory.SPECIFICATION, "wrong_signature",
function_name=function_name, expected_signature=expected_signature,
actual_signature=actual_signature)
# All good here!
return True
def check_function_value(function, values, settings):
"""
2. Does the function exist in the data?
:param function:
:param values:
:param settings:
:return:
"""
function_name = function['name']
# 2.1. Does the name exist in the values?
if function_name not in values:
raise FeedbackException(FeedbackCategory.SPECIFICATION, "function_not_available", function_name=function_name)
function_value = values[function_name]
# 2.2. Is the name bound to a callable?
if not callable(function_value):
raise FeedbackException(FeedbackCategory.SPECIFICATION, "name_is_not_function", function_name=function_name)
# All good here
return function_value
class TestCase:
"""
"""
CASE_COUNT = 0
def __init__(self, function_name, case_name):
self.function_name = function_name
if case_name is None:
self.case_name = str(TestCase.CASE_COUNT)
TestCase.CASE_COUNT += 1
else:
self.case_name = case_name
self.arguments, self.has_arguments = [], False
self.inputs, self.has_inputs = [], False
self.error, self.has_error = None, False
self.message, self.has_message = None, False
self.expected_prints, self.has_expected_prints = None, False
self.expected_returns, self.has_expected_returns = None, False
self.prints = []
self.returns = None
self.success = True
def add_message(self, message):
"""
Args:
message:
"""
self.message = message
self.has_message = True
def add_inputs(self, inputs):
"""
Args:
inputs:
"""
if not isinstance(inputs, list):
inputs = [inputs]
self.inputs = inputs
self.has_inputs = True
def add_arguments(self, arguments):
"""
Args:
arguments:
"""
if not isinstance(arguments, list):
arguments = [arguments]
self.arguments = arguments
self.has_arguments = True
def add_error(self, error):
"""
Args:
error:
"""
self.error = error
self.has_error = True
self.success = False
def add_expected_prints(self, prints):
"""
Args:
prints:
"""
self.expected_prints = prints
self.has_expected_prints = True
def add_expected_returns(self, returns):
"""
Args:
returns:
"""
self.expected_returns = returns
self.has_expected_returns = True
def add_prints_returns(self, prints, returns):
"""
Args:
prints:
returns:
"""
self.prints = prints
self.returns = returns
def fail(self):
"""
"""
self.success = False
def check_case(function, case, student_function):
"""
:param function:
:param case:
:param student_function:
:return: status, arg, input, error, output, return, message
"""
function_name = function['name']
test_case = TestCase(function_name, case.get('name'))
# Get callable
sandbox = get_sandbox(MAIN_REPORT)
sandbox.clear_output()
# Potential bonus message
if 'message' in case:
test_case.add_message(case['message'])
# Queue up the the inputs
if 'inputs' in case:
test_case.add_inputs(case['inputs'])
sandbox.set_input(test_case.inputs)
else:
sandbox.clear_input()
# Pass in the arguments and call the function
if 'arguments' in case:
test_case.add_arguments(case['arguments'])
result = sandbox.call(function_name, *test_case.arguments)
# Store actual values
test_case.add_prints_returns(sandbox.output, result)
# Check for errors
if sandbox.exception:
test_case.add_error(sandbox.exception)
# 4. Check out the output
if 'prints' in case:
test_case.add_expected_prints(case['prints'])
if not output_test(sandbox.output, case['prints'], False, .0001):
test_case.fail()
# 5. Check the return value
if 'returns' in case:
test_case.add_expected_returns(case['returns'])
if not equality_test(result, case['returns'], True, .0001):
test_case.fail()
# TODO: Check the plots
# Return results
return test_case
# TODO: blockpy-feedback-unit => pedal-test-cases in BlockPy Client
TEST_TABLE_TEMPLATE = """<table class='pedal-test-cases table table-sm table-bordered table-hover'>
<tr class='table-active'>
<th></th>
<th>Arguments</th>
<th>Expected</th>
<th>Returned</th>
</tr>
{body}
</table>"""
TEST_TABLE_FOOTER = "</table>"
TEST_TABLE_ROW_HEADER = "<tr class='table-active'>"
TEST_TABLE_ROW_NORMAL = "<tr>"
TEST_TABLE_ROW_FOOTER = "</tr>"
TEST_TABLE_ROW_INFO = "<tr class='table-info'>"
GREEN_CHECK = " <td class='green-check-mark'>✔</td>"
RED_X = " <td>❌</td>"
CODE_CELL = " <td><code>{}</code></td>"
COLUMN_TITLES = ["", "Arguments", "Inputs", "Errors", "Expected", "Expected", "Returned", "Printed"]
def make_table(cases):
"""
Args:
cases:
Returns:
"""
body = []
for case in cases:
body.append(" <tr>")
body.append(GREEN_CHECK if case.success else RED_X)
body.append(CODE_CELL.format(", ".join(repr(arg) for arg in case.arguments)))
if case.has_error:
body.append(" <td colspan='2'>Error: <code>{}</code></td>".format(str(case.error)))
else:
body.append(CODE_CELL.format(repr(case.expected_returns)))
body.append(CODE_CELL.format(repr(case.returns)))
if not case.success and case.has_message:
body.append(" </tr><tr><td colspan='4'>{}</td>".format(case.message))
body.append(" </tr>")
body = "\n".join(body)
return TEST_TABLE_TEMPLATE.format(body=body)
#if ((any(args) and any(inputs)) or
# (any(expected_outputs) and any(expected_returns)) or
# (any(actual_outputs) and any(actual_returns))):
# # Complex cells
# pass
#else:
# Simple table
# Make header
# row_mask = [True, any(args), any(inputs), False,
# any("returns" in reason for reason in reasons),
# any("prints" in reason for reason in reasons),
# any("returns" in reason for reason in reasons),
# any("prints" in reason for reason in reasons)]
# header_cells = "".join("<th>{}</th>".format(title) for use, title in zip(row_mask, COLUMN_TITLES) if use)
# body = [TEST_TABLE_ROW_HEADER.format(header_cells)]
# for case in zip(
# statuses, args, inputs, errors, actual_outputs, actual_returns,
# expected_outputs, expected_returns):
# status, case = case[0], case[1:]
# print(row_mask[1:], case)
# def make_code(values):
# if values == None:
# return "<code>None</code>"
# elif isinstance(values, int):
# return "<code>{!r}</code>".format(values)
# else:
# return ", ".join("<code>{}</code>".format(repr(value)) for value in values)
# body.append(
# TEST_TABLE_ROW_NORMAL+
# (GREEN_CHECK if case[0] else RED_X)+
# "\n".join(" <td>{}</td>".format(make_code(values))
# for use, values in zip(row_mask[1:], case) if use)+
# "</tr>\n"
# )
# # Make each row
# table = "{}\n{}\n{}".format(TEST_TABLE_HEADER, "\n ".join(body), TEST_TABLE_FOOTER)
# return table
def check_cases(function, student_function, settings):
"""
Args:
function:
student_function:
settings:
"""
function_name = function['name']
if 'cases' in function:
cases = function['cases']
test_cases = [check_case(function, case, student_function) for case in cases]
success_cases = sum(test.success for test in test_cases)
if success_cases < len(cases):
if settings[SETTING_SHOW_CASE_DETAILS]:
table = make_table(test_cases)
raise FeedbackException(FeedbackCategory.SPECIFICATION, "failed_test_cases",
function_name=function_name,
cases_count=len(cases), failure_count=len(cases)-success_cases,
table=table)
else:
raise FeedbackException(FeedbackCategory.SPECIFICATION, "failed_test_cases_count",
function_name=function_name,
cases_count=len(cases), failure_count=len(cases) - success_cases)
def get_arg_name(node):
"""
Args:
node:
Returns:
"""
name = node.id
if name is None:
return node.arg
else:
return name
def load_question(data):
"""
:param data:
:return:
"""
ast = parse_program()
student_data = commands.get_student_data()
# Check that there aren't any invalid syntactical structures
# Get all of the function ASTs in a dictionary
function_definitions = {definition._name: definition
for definition in ast.find_all("FunctionDef")}
settings = DEFAULT_SETTINGS.copy()
settings.update(data.get('settings', {}))
rubric = settings.get('rubric', {})
function_points = 0
if 'functions' in data:
function_rubric = rubric.get('functions', {})
successes = []
for function in data['functions']:
success = False
try:
definition = check_function_defined(function, function_definitions, settings)
function_points += function_rubric.get('definition', 10)
check_function_signature(function, definition, settings)
function_points += function_rubric.get('signature', 10)
student_function = check_function_value(function, student_data, settings)
function_points += function_rubric.get('value', 0)
except FeedbackException as fe:
yield fe.as_message(), fe.label
else:
try:
check_cases(function, student_function, settings)
except FeedbackException as fe:
success_ratio = (1.0 - fe.fields['failure_count'] / fe.fields['cases_count'])
function_points += function_rubric.get('cases', 80*success_ratio)
yield fe.as_message(), fe.label
else:
function_points += function_rubric.get('cases', 80)
success = True
successes.append(success)
function_points /= len(data['functions'])
if all(successes):
set_success()
else:
give_partial(function_points, tool=TOOL_NAME,
justification="Passed some but not all unit tests")
def check_question(data):
"""
Args:
data:
"""
results = list(load_question(data))
if results:
message, label = results[0]
gently(message, label=label)
def check_pool(questions):
"""
Args:
questions:
"""
pass
def load_file(filename):
"""
Args:
filename:
"""
pass
FEEDBACK_MESSAGES = {
FeedbackCategory.SPECIFICATION: {
"missing_function": "No function named `{function_name}` was found.",
"insufficient_args": ("The function named `{function_name}` "
"has fewer parameters ({actual_arity}) "
"than expected ({expected_arity})."),
"excessive_args": ("The function named `{function_name}` "
"has more parameters ({actual_arity}) "
"than expected ({expected_arity})."),
# TODO: missing_parameter that checks if parameter name exists, but is in the wrong place
"wrong_parameter_name": ("Error in definition of `{function_name}`. "
"Expected a parameter named `{expected_parameter_name}`, "
"instead found `{actual_parameter_name}`."),
"wrong_parameter_type": ("Error in definition of function `{function_name}` "
"parameter `{parameter_name}`. Expected `{expected_parameter_type}`, "
"instead found `{actual_parameter_type}`."),
"missing_returns": ("Error in definition of function `{function_name}` return type. "
"Expected `{expected_returns}`, but there was no return type specified."),
"wrong_returns": ("Error in definition of function `{function_name}` return type. "
"Expected `{expected_returns}`, instead found `{actual_returns}`."),
"wrong_signature": ("Error in definition of function `{function_name}` signature. "
"Expected `{expected_signature}`, instead found `{actual_signature}`."),
"name_is_not_function": "You defined `{function_name}`, but did not define it as a function.",
"function_not_available": ("You defined `{function_name}` somewhere in your code, "
"but it was not available in the top-level scope to be called. "
"Perhaps you defined it inside another function or scope?"),
"failed_test_cases": ("I ran your function <code>{function_name}</code> on my own test cases. "
"It failed {failure_count}/{cases_count} of my tests.\n{table}"),
"failed_test_cases_count": ("I ran your function <code>{function_name}</code> on my own test cases. "
"It failed {failure_count}/{cases_count} of my tests."),
}
}
| 35.913621
| 118
| 0.592091
| 2,311
| 21,620
| 5.340113
| 0.144526
| 0.05348
| 0.02593
| 0.035005
| 0.27988
| 0.197148
| 0.141318
| 0.092699
| 0.073252
| 0.063366
| 0
| 0.005806
| 0.306892
| 21,620
| 601
| 119
| 35.973378
| 0.817751
| 0.222433
| 0
| 0.121311
| 0
| 0
| 0.181779
| 0.032472
| 0
| 0
| 0
| 0.004992
| 0
| 1
| 0.072131
| false
| 0.009836
| 0.016393
| 0
| 0.12459
| 0.036066
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a5cc7ebfb0f671bb1d1aeac6021cc68675439a1a
| 8,732
|
py
|
Python
|
VM/fetchLoop.py
|
djtech-dev/PyVM
|
1edda436ce7073d0cecbf16f5cab2509895d953c
|
[
"MIT"
] | 75
|
2017-09-22T22:36:13.000Z
|
2022-03-20T16:18:27.000Z
|
VM/fetchLoop.py
|
djtech-dev/PyVM
|
1edda436ce7073d0cecbf16f5cab2509895d953c
|
[
"MIT"
] | 7
|
2019-05-10T19:15:08.000Z
|
2021-08-24T16:03:34.000Z
|
VM/fetchLoop.py
|
djtech-dev/PyVM
|
1edda436ce7073d0cecbf16f5cab2509895d953c
|
[
"MIT"
] | 14
|
2018-07-02T02:49:46.000Z
|
2022-02-22T15:24:47.000Z
|
import enum
from .ELF import ELF32, enums
from .util import SegmentRegs, MissingOpcodeError
from .CPU import CPU32
import logging
logger = logging.getLogger(__name__)
class FetchLoopMixin:
_attrs_ = 'eip', 'mem', 'reg.ebx', 'fmt', 'instr', 'sizes', 'default_mode'
def execute_opcode(self: CPU32) -> None:
self.eip += 1
off = 1
if self.opcode == 0x0F:
op = self.mem.get_eip(self.eip, 1)
self.eip += 1
self.opcode = (self.opcode << 8) | op
off += 1
if __debug__:
logger.debug(self.fmt, self.eip - off, self.opcode)
try:
impls = self.instr[self.opcode]
except KeyError:
... # could not find opcode
else:
for impl in impls:
if impl():
return # opcode executed
# could not find suitable implementation
# read one more byte
op = self.mem.get_eip(self.eip, 1)
self.eip += 1
self.opcode = (self.opcode << 8) | op
try:
impls = self.instr[self.opcode]
except KeyError:
raise MissingOpcodeError(f'Opcode {self.opcode:x} is not recognized yet (at 0x{self.eip - off - 1:08x})')
else:
for impl in impls:
if impl():
return # opcode executed
# could not find suitable implementation
raise NotImplementedError(f'No suitable implementation found for opcode {self.opcode:x} (@0x{self.eip - off - 1:02x})')
def run(self: CPU32) -> int:
"""
Implements the basic CPU instruction cycle (https://en.wikipedia.org/wiki/Instruction_cycle)
:param self: passed implicitly
:param offset: location of the first opcode
:return: None
"""
# opcode perfixes
pref_segments = {
0x2E: SegmentRegs.CS,
0x36: SegmentRegs.SS,
0x3E: SegmentRegs.DS,
0x26: SegmentRegs.ES,
0x64: SegmentRegs.FS,
0x65: SegmentRegs.GS
}
pref_op_size_override = {0x66, 0x67}
pref_lock = {0xf0}
rep = {0xf3}
prefixes = set(pref_segments) | pref_op_size_override | pref_lock | rep
self.running = True
while self.running and self.eip + 1 < self.mem.size:
overrides = []
self.opcode = self.mem.get(self.eip, 1)
while self.opcode in prefixes:
overrides.append(self.opcode)
self.eip += 1
self.opcode = self.mem.get(self.eip, 1)
# apply overrides
size_override_active = False
for ov in overrides:
if ov == 0x66:
if not size_override_active:
self.current_mode = not self.current_mode
size_override_active = True
old_operand_size = self.operand_size
self.operand_size = self.sizes[self.current_mode]
logger.debug(
'Operand size override: %d -> %d',
old_operand_size, self.operand_size
)
elif ov == 0x67:
if not size_override_active:
self.current_mode = not self.current_mode
size_override_active = True
old_address_size = self.address_size
self.address_size = self.sizes[self.current_mode]
logger.debug(
'Address size override: %d -> %d',
old_address_size, self.address_size
)
elif ov in pref_segments:
is_special = ov >> 6
if is_special:
sreg_number = 4 + (ov & 1) # FS or GS
else:
sreg_number = (ov >> 3) & 0b11
self.mem.segment_override = sreg_number
logger.debug('Segment override: %s', self.mem.segment_override)
elif ov == 0xf0: # LOCK prefix
logger.debug('LOCK prefix') # do nothing; all operations are atomic anyway. Right?
elif ov == 0xf3: # REP prefix
self.opcode = ov
self.eip -= 1 # repeat the previous opcode
self.execute_opcode()
# undo all overrides
for ov in overrides:
if ov == 0x66:
self.current_mode = self.default_mode
self.operand_size = self.sizes[self.current_mode]
elif ov == 0x67:
self.current_mode = self.default_mode
self.address_size = self.sizes[self.current_mode]
elif ov in pref_segments:
self.mem.segment_override = SegmentRegs.DS
return self.reg.eax
class ExecutionStrategy(enum.Enum):
BYTES = 1
FLAT = 2
ELF = 3
class ExecutionMixin(FetchLoopMixin):
def execute(self, *args, **kwargs):
return NotImplemented
class ExecuteBytes(ExecutionMixin):
_attrs_ = 'eip', 'mem', 'code_segment_end'
_funcs_ = 'run',
def execute(self: CPU32, data: bytes, offset=0):
l = len(data)
self.mem.set_bytes(offset, l, data)
self.eip = offset
self.code_segment_end = self.eip + l - 1
self.mem.program_break = self.code_segment_end
return self.run()
class ExecuteFlat(ExecutionMixin):
_attrs_ = 'eip', 'mem', 'code_segment_end'
_funcs_ = 'run',
def execute(self: CPU32, fname: str, offset=0):
with open(fname, 'rb') as f:
data = f.read()
l = len(data)
self.mem.set_bytes(offset, l, data)
self.eip = offset
self.code_segment_end = self.eip + l - 1
self.mem.program_break = self.code_segment_end
return self.run()
class ExecuteELF(ExecutionMixin):
_attrs_ = 'eip', 'mem', 'reg', 'code_segment_end'
_funcs_ = 'run', 'stack_init', 'stack_push'
def execute(self: CPU32, fname: str, args=()):
with ELF32(fname) as elf:
if elf.hdr.e_type != enums.e_type.ET_EXEC:
raise ValueError(f'ELF file {elf.fname!r} is not executable (type: {elf.hdr.e_type})')
max_memsz = max(
phdr.p_vaddr + phdr.p_memsz
for phdr in elf.phdrs
if phdr.p_type == enums.p_type.PT_LOAD
)
if self.mem.size < max_memsz * 2:
self.mem.size = max_memsz * 2
self.stack_init()
for phdr in elf.phdrs:
if phdr.p_type not in (enums.p_type.PT_LOAD, enums.p_type.PT_GNU_EH_FRAME):
continue
logger.info(f'LOAD {phdr.p_memsz:10,d} bytes at address 0x{phdr.p_vaddr:09_x}')
elf.file.seek(phdr.p_offset)
data = elf.file.read(phdr.p_filesz)
self.mem.set_bytes(phdr.p_vaddr, len(data), data)
self.mem.set_bytes(phdr.p_vaddr + phdr.p_filesz, phdr.p_memsz - phdr.p_filesz, bytearray(phdr.p_memsz - phdr.p_filesz))
self.eip = elf.hdr.e_entry
self.code_segment_end = self.eip + max_memsz - 1
self.mem.program_break = self.code_segment_end
# INITIALIZE STACK LAYOUT:
# http://asm.sourceforge.net/articles/startup.html
# https://lwn.net/Articles/631631/
environment = ["USER=ForceBru"]
args = [fname] + list(args)
arg_addresses, env_addresses = [], []
for arg in args:
arg = arg.encode() + b'\0'
l = len(arg)
self.mem.set_bytes(self.reg.esp - l, l, arg)
self.reg.esp -= l
arg_addresses.append(self.reg.esp)
for env in environment:
env = env.encode() + b'\0'
l = len(env)
self.mem.set_bytes(self.reg.esp - l, l, env)
self.reg.esp -= l
env_addresses.append(self.reg.esp)
# auxiliary vector (just NULL)
self.stack_push(0)
# environment (array of pointers + NULL)
self.stack_push(0)
for addr in env_addresses[::-1]:
self.stack_push(addr)
# argv
self.stack_push(0) # end of argv
for addr in arg_addresses[::-1]:
self.stack_push(addr)
# argc
self.stack_push(len(args))
logger.info(f'EXEC at 0x{self.eip:09_x}')
# logger.debug(f'Stack start at 0x{self.reg.esp:08x}')
# logger.debug(f'Stack end at 0x{self.reg.ebp:08x}')
return self.run()
| 33.328244
| 135
| 0.535502
| 1,048
| 8,732
| 4.307252
| 0.232824
| 0.031015
| 0.017723
| 0.01595
| 0.452813
| 0.39167
| 0.33097
| 0.286221
| 0.213558
| 0.180771
| 0
| 0.022735
| 0.365323
| 8,732
| 261
| 136
| 33.455939
| 0.791772
| 0.091732
| 0
| 0.389189
| 0
| 0.021622
| 0.07201
| 0.002672
| 0
| 0
| 0.008651
| 0
| 0
| 1
| 0.032432
| false
| 0
| 0.027027
| 0.005405
| 0.183784
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a5cef8d918f7406a1dd78059cb13a600f918323a
| 5,897
|
py
|
Python
|
mlpy/regression/logistic_regression.py
|
SNUDerek/MLPy
|
0d47a8ef8522a663716cda6a831855e6482069ba
|
[
"MIT"
] | 1
|
2019-05-10T10:39:12.000Z
|
2019-05-10T10:39:12.000Z
|
mlpy/regression/logistic_regression.py
|
SNUDerek/MLPy
|
0d47a8ef8522a663716cda6a831855e6482069ba
|
[
"MIT"
] | null | null | null |
mlpy/regression/logistic_regression.py
|
SNUDerek/MLPy
|
0d47a8ef8522a663716cda6a831855e6482069ba
|
[
"MIT"
] | null | null | null |
import numpy as np
from ..tools import batchGenerator
# LOGISTIC REGRESSION
# for (binary) categorical data
class LogisticRegression():
'''
Logistic regression with Gradient Descent
binary Logistic regression
Parameters
----------
epochs : int
maximum epochs of gradient descent
lr : float
learning rate
lmb : float
(L2) regularization parameter lambda
sgd : int
batch size for stochastic gradient descent (0 = gradient descent)
tol : float
tolerance for convergence
weights : array
weights (coefficients) of linear model
Attributes
-------
'''
def __init__(self, epochs=1000, intercept=False, lmb=0.0, lr=0.01, sgd=0, tol=1e-5):
self.epochs = epochs
self.intercept = intercept
self.lmb=lmb
self.lr = lr
self.sgd = sgd
self.tol = tol
self.weights = np.array([])
self.costs = []
# internal function for sigmoid
def _sigmoid(self, estimates):
sigmoid = 1 / (1 + np.exp(-estimates))
return sigmoid
# internal function for making hypothesis and getting cost
def _getestimate(self, x_data, y_data, weights):
# get hypothesis 'scores' (features by weights)
scores = x_data.dot(weights).flatten()
# sigmoid these scores for predictions (0~1)
y_hat = self._sigmoid(scores)
# get the difference between the trues and the hypothesis
difference = y_data.flatten() - y_hat
# calculate cost function J (log-likelihood)
# loglik = sum y_i theta.T x_i - log( 1 + e^b.T x_i )
nloglik = -np.sum(y_data*scores - np.log(1 + np.exp(scores)))
return y_hat, difference, nloglik
# fit ("train") the function to the training data
# inputs : x and y data as np.arrays (x is array of x-dim arrays where x = features)
# params : verbose : Boolean - whether to print out detailed information
# outputs : none
def fit(self, x_data, y_data, verbose=False, print_iters=100):
# STEP 1: ADD X_0 TERM FOR BIAS (IF INTERCEPT==TRUE)
# add an 'x0' = 1.0 to our x data so we can treat intercept as a weight
# use numpy.hstack (horizontal stack) to add a column of ones:
if self.intercept:
x_data = np.hstack((np.ones((x_data.shape[0], 1)), x_data))
# STEP 2: INIT WEIGHT COEFFICIENTS
# one weight per feature (+ intercept)
# you can init the weights randomly:
# weights = np.random.randn(x_data.shape[1])
# or you can use zeroes with np.zeros():
weights = np.zeros(x_data.shape[1])
# STEP 3: INIT REGULARIZATION TERM LAMBDA
# make as array with bias = 0 so don't regularize bias
# then we can element-wise multiply with weights
# this is the second term in the ( 1 - lambda/m )
lmbda = np.array([self.lmb/x_data.shape[0] for i in range(x_data.shape[1])])
if self.intercept:
lmbda[0] = 0.0
iters = 0
# choose between iterations of sgd and epochs
if self.sgd==0:
maxiters = self.epochs
else:
maxiters = self.epochs * int(len(y_data)/self.sgd)
minibatch = batchGenerator(x_data, y_data, self.sgd)
for epoch in range(maxiters):
# make an estimate, calculate the difference and the cost
# gradient_ll = X.T(y - y_hat)
# GRADIENT DESCENT:
# get gradient over ~all~ training instances each iteration
if self.sgd==0:
y_hat, difference, cost = self._getestimate(x_data, y_data, weights)
gradient = -np.dot(x_data.T, difference)
# STOCHASTIC (minibatch) GRADIENT DESCENT
# get gradient over random minibatch each iteration
# for "true" sgd, this should be sgd=1
# though minibatches of power of 2 are more efficient (2, 4, 8, 16, 32, etc)
else:
x_batch, y_batch = next(minibatch)
y_hat, difference, cost = self._getestimate(x_batch, y_batch, weights)
gradient = -np.dot(x_data.T, difference)
# get new predicted weights by stepping "backwards' along gradient
# use lambda parameter for regularization (calculated above)
new_weights = (weights - lmbda) - gradient * self.lr
# check stopping condition
if np.sum(abs(new_weights - weights)) < self.tol:
if verbose:
print("converged after {0} iterations".format(iters))
break
# update weight values, save cost
weights = new_weights
self.costs.append(cost)
iters += 1
# print diagnostics
if verbose and iters % print_iters == 0:
print("iteration {0}: cost: {1}".format(iters, cost))
# update final weights
self.weights = weights
return self.costs
# predict probas on the test data
# inputs : x data as np.array
# outputs : y probabilities as list
def predict_proba(self, x_data):
# STEP 1: ADD X_0 TERM FOR BIAS (IF INTERCEPT==TRUE)
if self.intercept:
x_data = np.hstack((np.ones((x_data.shape[0], 1)), x_data))
# STEP 2: PREDICT USING THE y_hat EQN
scores = x_data.dot(self.weights).flatten()
y_hat = self._sigmoid(scores)
return y_hat
# predict on the test data
# inputs : x data as np.array
# outputs : y preds as list
def predict(self, x_data):
y_hat = self.predict_proba(x_data)
# ROUND TO 0, 1
preds = []
for p in y_hat:
if p > 0.5:
preds.append(1.0)
else:
preds.append(0.0)
return preds
| 32.944134
| 89
| 0.587248
| 776
| 5,897
| 4.377577
| 0.289948
| 0.035325
| 0.017663
| 0.011775
| 0.162496
| 0.119517
| 0.119517
| 0.0995
| 0.078304
| 0.078304
| 0
| 0.017794
| 0.323385
| 5,897
| 178
| 90
| 33.129213
| 0.833584
| 0.420892
| 0
| 0.202899
| 0
| 0
| 0.016448
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.086957
| false
| 0
| 0.028986
| 0
| 0.202899
| 0.057971
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a5d2df25221764ec5395b74a6c3cb30a216ee3ff
| 12,269
|
py
|
Python
|
server.py
|
satriabw/Tugas_Sisdis
|
b1e152f35834e52806071b9b1424b114dce65148
|
[
"MIT"
] | null | null | null |
server.py
|
satriabw/Tugas_Sisdis
|
b1e152f35834e52806071b9b1424b114dce65148
|
[
"MIT"
] | null | null | null |
server.py
|
satriabw/Tugas_Sisdis
|
b1e152f35834e52806071b9b1424b114dce65148
|
[
"MIT"
] | null | null | null |
# coding: utf-8
from random import randint
from urllib.parse import parse_qs
import socket
import sys
import json
import traceback
import os
import base64
import yaml
import datetime
import requests
import re
class Route:
def __init__(self):
self._route = []
def route(self, method, path, handler):
self._route.append({"method": method, "path": path, "handler": handler})
def dispatch(self, path, method):
pattern = re.compile(r'/api/plusone/[0-9]*[0-9]$')
match = re.match(pattern, path)
if match != None:
path = "/api/plusone/<:digit>"
for item in self._route:
if item["path"] == path and item["method"] == method:
return item["handler"]
return None
def findPath(self, path):
for item in self._route:
if item["path"] == path:
return True
return False
route = Route()
class HTTPRequest:
def __init__(self, request):
self._raw_request = request
self._build_header()
self._build_body()
def _build_header(self):
raw_head = self._split_request()[0]
head = raw_head.split("\n")
# Get method, path, and http version
temp = head[0].split(" ")
self.header = {
"method" : temp[0],
"path" : temp[1],
"http_version" : temp[2],
}
# Get Content-type and Content-length
for info in head:
if "Content-Type" in info:
self.header["content_type"] = info.split(" ")[1]
continue
if "Content-Length" in info:
self.header["content_length"] = info.split(" ")[1]
def _build_body(self):
self._raw_body = self._split_request()[1]
def _split_request(self):
return self._raw_request.decode(
"utf-8").replace("\r", "").split("\n\n")
def body_json(self):
return json.loads('[{}]'.format(self._raw_body))
def body_query(self, query):
return parse_qs(self._raw_body)[query]
def validation(func):
def func_wrapper(conn, request):
if (request.header["http_version"] not in "HTTP/1.0") and (request.header["http_version"] not in "HTTP/1.1"):
badRequest(conn, request)
else:
func(conn, request)
return func_wrapper
@validation
def getRoot(conn, request):
debugger = "Hooray getRoot end point is hitted\n"
print(debugger)
status = "302 Found"
loc = "/hello-world"
c_type = "text/plain; charset=UTF-8"
data = '302 Found: Location: /hello-world'
msgSuccess = renderMessage(status, str(21+len(loc)), loc, None, c_type, data)
writeResponse(conn, msgSuccess)
@validation
def getHelloWorld(conn, request):
with open("./hello-world.html", "r") as f:
html = f.read()
data = html.replace("__HELLO__", "World")
status = "200 OK"
c_type = "text/html"
msgSuccess = renderMessage(status, str(len(data)), None, None, c_type, data)
writeResponse(conn, msgSuccess)
@validation
def getStyle(conn, request):
with open("./style.css", "r") as f:
css = f.read()
status = "200 OK"
c_type = "text/css"
msgSuccess = renderMessage(status, str(len(css)), None, None, c_type, css)
writeResponse(conn, msgSuccess)
@validation
def getBackground(conn, request):
with open("./background.jpg", "rb") as f:
img = f.read()
status = "200 OK"
c_type = "image/jpeg"
enc = "base64"
msgSuccess = renderMessage(status, str(len(img)), None, enc, c_type, "")
msgSuccess = msgSuccess + img
writeResponse(conn, msgSuccess)
@validation
def getSpesifikasi(conn, request):
with open("./spesifikasi.yaml", "r") as f:
yaml = f.read()
status = "200 OK"
c_type = "text/plain; charset=UTF-8"
msgSuccess = renderMessage(status, str(len(yaml)), None, None, c_type, yaml)
writeResponse(conn, msgSuccess)
@validation
def getInfo(conn, request):
query = request.header["path"].split("?")
data = "No Data"
try:
tipe = exctractUrl(query[1], "type")
if tipe == "time":
data = "{}".format(datetime.datetime.now())
elif tipe == "random":
data = "{}".format(randint(111111,999999))
except (IndexError, ValueError) as e:
pass
status = "200 OK"
c_type = "text/plain; charset=UTF-8"
msgSuccess = renderMessage(status, str(len(data)), None, None, c_type, data)
writeResponse(conn, msgSuccess)
def notFound(conn, request):
if "/api" in request.header["path"]:
notFoundJson(conn)
status = "404 Not Found"
c_type = "text/plain; charset=UTF-8"
msgErr = renderMessage(status, str(len(status)), None, None, c_type, status)
writeResponse(conn, msgErr)
def notImplemented(conn, request):
status = "501 Not Implemented"
c_type = "text/plain; charset=UTF-8"
msgErr = renderMessage(status, str(len(status)), None, None, c_type, status)
writeResponse(conn, msgErr)
def badRequest(conn, request):
if "/api" in request.header["path"]:
badRequestJson(conn, "Please use proper http version")
status = "400 Bad Request"
c_type = "text/plain; charset=UTF-8"
msgErr = renderMessage(status, str(len(status)), None, None, c_type, status)
writeResponse(conn, msgErr)
@validation
def postHelloWorld(conn, request):
debugger = "Hooray postHelloWorld end point is hitted\n"
print(debugger)
try:
if request.header["content_type"] == "application/x-www-form-urlencoded":
name = request.body_query("name")[0]
with open("./hello-world.html", "r") as f:
html = f.read()
data = html.replace("__HELLO__", str(name))
status = "200 OK"
c_type = "text/html; charset=UTF-8"
msgSuccess = renderMessage(status, str(len(data)), None, None, c_type, data)
writeResponse(conn, msgSuccess)
else:
raise ValueError("Cannot parse the request")
except (IndexError, KeyError, ValueError) as e:
badRequest(conn, request)
def validateHelloAPI(func):
def func_wrapper(conn, request):
if (request.header["http_version"] not in "HTTP/1.0") and (request.header["http_version"] not in "HTTP/1.1"):
badRequestJson(conn, "Please use proper http version")
elif request.header["method"] != "POST":
methodNotAllowedJson(conn, "Method is not allowed, please use POST method")
elif request.header["content_type"] != "application/json":
methodNotAllowedJson(conn, "please use application/json")
else:
func(conn, request)
return func_wrapper
@validateHelloAPI
def helloAPI(conn, request):
req = requests.get(url='http://172.22.0.222:5000')
data = req.json()
current_visit = datetime.datetime.now().strftime("%Y-%m-%dT%H:%M:%S.%fZ")
try:
name = request.body_json()[0]["request"]
count = getCounter() + 1
writeCounter(count)
res = "Good {}, {}".format(data["state"], name)
json_http_ok(conn, count=count, currentvisit=current_visit, response=res)
except KeyError:
badRequestJson(conn, "'request' is a required property")
@validation
def plusOneAPI(conn, request):
val = int(request.header["path"].split("/")[-1])
json_http_ok(conn, plusoneret=val+1)
def getTime(t_raw):
t = datetime.datetime.strptime(t_raw, "%Y-%m-%d %H:%M:%S")
return t.strftime("%Y-%m-%dT%H:%M:%S.%fZ")
def getCounter():
with open('counter.json', 'r') as json_file:
data = json.load(json_file)
return data["count"]
def writeCounter(c):
count = {"count": c}
with open('counter.json', 'w') as json_file:
data = json.dump(count, json_file)
def getApiVersion():
with open('./spesifikasi.yaml', 'r') as f:
doc = yaml.load(f)
return doc["info"]["version"]
def notFoundJson(conn):
detail = "The requested URL was not found on the server. If you entered the URL manually please check your spelling and try again."
status = "404"
title = "Not Found"
json_http_error(conn, detail, status, title)
def methodNotAllowedJson(conn, d):
detail = d
status = "405"
title = "Method Not Allowed"
json_http_error(conn, detail, status, title)
def badRequestJson(conn, d):
detail = d
status = "400"
title = "Bad Request"
json_http_error(conn, detail, status, title)
def json_http_ok(conn, **kwargs):
res_dict = {'apiversion': getApiVersion()}
for key, value in kwargs.items():
res_dict[key] = value
data = json.dumps(res_dict)
# Build Response
status = "200 OK"
c_type = "application/json; charset=UTF-8"
msgErr = renderMessage(status, str(len(data)), None, None, c_type, data)
writeResponse(conn, msgErr)
def json_http_error(conn, detail, status, title):
res_dict = {'detail': detail, 'status': status, 'title': title}
data = json.dumps(res_dict)
status = "{} {}".format(status, title)
c_type = "application/json; charset=UTF-8"
msgErr = renderMessage(status, str(len(data)), None, None, c_type, data)
writeResponse(conn, msgErr)
def main():
# HOST = socket.gethostbyname(socket.gethostname())
HOST = "0.0.0.0"
PORT = int(sys.argv[1])
#Get method
route.route("GET", "/", getRoot)
route.route("GET", "/hello-world", getHelloWorld)
route.route("GET", "/style", getStyle)
route.route("GET", "/background", getBackground)
route.route("GET", "/info", getInfo)
route.route("GET", "/api/hello", helloAPI)
route.route("GET", "/api/plusone/<:digit>", plusOneAPI)
route.route("GET", "/api/spesifikasi.yaml", getSpesifikasi)
#Post Method
route.route("POST", "/api/hello", helloAPI)
route.route("POST", "/hello-world", postHelloWorld)
# PUT
route.route("PUT", "/api/hello", helloAPI)
#PATCH
route.route("PATCH", "/api/hello", helloAPI)
#DELETE
route.route("DELETE", "/api/hello", helloAPI)
#HEAD
route.route("HEAD", "/api/hello", helloAPI)
# Serve the connection
connect(HOST, PORT)
def handler(conn, req):
try:
debugger = "=== Got Request ===\n{}\n===Got Header====\n{}\n".format(req._raw_request, req.header)
print(debugger)
route.dispatch(cleanURL(req.header["path"]), req.header["method"])(conn, req)
except TypeError as e:
print(traceback.format_exc())
if route.findPath(cleanURL(req.header["path"])):
notImplemented(conn, req)
return
notFound(conn, req)
return
def cleanURL(url):
return url.split("?")[0]
def writeResponse(conn, message):
debugger = "=== Got Message ===\n{}\n".format(message)
print(debugger)
conn.sendall(message)
def renderMessage(stat, c_length, location, encoding, c_type, data):
msg = ""
if stat != None:
status = "HTTP/1.1 {}\r\n".format(stat)
msg = msg + status
msg = msg + "Connection: close\r\n"
if c_length != None:
content_length = "Content-Length: {}\r\n".format(c_length)
msg = msg + content_length
if location != None:
loc = "Location: {}\r\n".format(location)
msg = msg + loc
if encoding != None:
enc = "Content-Transfer-Encoding: {}\r\n".format(encoding)
msg = msg + enc
if c_type != None:
content_type = "Content-Type: {}\r\n".format(c_type)
msg = msg + content_type
if data != None:
msg = msg + "\r\n" + data
return bytes(msg, "utf-8")
def exctractUrl(url, query):
return parse_qs(url)[query][0]
def connect(host, port):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind((host, port))
s.listen()
while True:
try:
conn, addr = s.accept()
data = conn.recv(1024)
req = HTTPRequest(data)
handler(conn, req)
conn.shutdown(socket.SHUT_WR)
conn.close()
except Exception:
print(traceback.format_exc())
continue
main()
| 31.060759
| 136
| 0.605102
| 1,530
| 12,269
| 4.764706
| 0.179739
| 0.018519
| 0.036214
| 0.037723
| 0.340055
| 0.274074
| 0.274074
| 0.21893
| 0.185322
| 0.16749
| 0
| 0.013786
| 0.249165
| 12,269
| 395
| 137
| 31.060759
| 0.777573
| 0.017116
| 0
| 0.279365
| 0
| 0.003175
| 0.164937
| 0.015689
| 0
| 0
| 0
| 0
| 0
| 1
| 0.133333
| false
| 0.003175
| 0.038095
| 0.015873
| 0.231746
| 0.019048
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a5d9baaf2337daeafdfe9b9a22db73d38a684f6f
| 576
|
py
|
Python
|
functions-and-keda/src/python-function-publisher/QueueTrigger/__init__.py
|
emctl/samples
|
569f81035a6c214d4cda3687173e24003f17f95e
|
[
"MIT"
] | 3
|
2021-11-16T11:24:27.000Z
|
2021-11-21T17:11:24.000Z
|
functions-and-keda/src/python-function-publisher/QueueTrigger/__init__.py
|
emctl/samples
|
569f81035a6c214d4cda3687173e24003f17f95e
|
[
"MIT"
] | 7
|
2021-09-01T06:50:41.000Z
|
2021-09-03T23:12:07.000Z
|
functions-and-keda/src/python-function-publisher/QueueTrigger/__init__.py
|
emctl/samples
|
569f81035a6c214d4cda3687173e24003f17f95e
|
[
"MIT"
] | 4
|
2021-02-05T17:30:28.000Z
|
2021-08-16T21:26:55.000Z
|
import logging
import requests
import json
import azure.functions as func
dapr_url = "http://localhost:3500/v1.0"
def main(msg: func.QueueMessage):
logging.info(f"Python queue-triggered function received a message!")
message = msg.get_body().decode('utf-8')
logging.info(f"Message: {message}")
# Publish an event
url = f'{dapr_url}/publish/myTopic'
content = { "message": message }
logging.info(f'POST to {url} with content {json.dumps(content)}')
p = requests.post(url, json=content)
logging.info(f'Got response code {p.status_code}')
| 28.8
| 72
| 0.697917
| 83
| 576
| 4.795181
| 0.566265
| 0.110553
| 0.120603
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.014523
| 0.163194
| 576
| 19
| 73
| 30.315789
| 0.811203
| 0.027778
| 0
| 0
| 0
| 0
| 0.383513
| 0.084229
| 0
| 0
| 0
| 0
| 0
| 1
| 0.071429
| false
| 0
| 0.285714
| 0
| 0.357143
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a5dac3d6ca2b3d760f8736d068bcd1c838b5581c
| 2,618
|
py
|
Python
|
tests/unit/test_upstream_dataset.py
|
ianbakst/tamr-client
|
ae7a6190a2251d31f973f9d5a7170ac5dc097f97
|
[
"Apache-2.0"
] | 9
|
2019-08-13T11:07:06.000Z
|
2022-01-14T18:15:13.000Z
|
tests/unit/test_upstream_dataset.py
|
ianbakst/tamr-client
|
ae7a6190a2251d31f973f9d5a7170ac5dc097f97
|
[
"Apache-2.0"
] | 166
|
2019-08-09T18:51:05.000Z
|
2021-12-02T15:24:15.000Z
|
tests/unit/test_upstream_dataset.py
|
ianbakst/tamr-client
|
ae7a6190a2251d31f973f9d5a7170ac5dc097f97
|
[
"Apache-2.0"
] | 21
|
2019-08-12T15:37:31.000Z
|
2021-06-15T14:06:23.000Z
|
import responses
from tamr_unify_client import Client
from tamr_unify_client.auth import UsernamePasswordAuth
@responses.activate
def test_upstream_dataset():
dataset_json = {
"id": "unify://unified-data/v1/datasets/12",
"name": "Project_1_unified_dataset_dedup_features",
"description": "Features for all the rows and values in the source dataset. Used in Dedup Workflow.",
"version": "543",
"keyAttributeNames": ["entityId"],
"tags": [],
"created": {
"username": "admin",
"time": "2019-06-05T18:31:59.327Z",
"version": "212",
},
"lastModified": {
"username": "admin",
"time": "2019-07-18T14:19:28.133Z",
"version": "22225",
},
"relativeId": "datasets/12",
"upstreamDatasetIds": ["unify://unified-data/v1/datasets/8"],
"externalId": "Project_1_unified_dataset_dedup_features",
}
upstream_json = ["unify://unified-data/v1/datasets/8"]
upstream_ds_json = {
"id": "unify://unified-data/v1/datasets/8",
"name": "Project_1_unified_dataset",
"description": "",
"version": "529",
"keyAttributeNames": ["tamr_id"],
"tags": [],
"created": {
"username": "admin",
"time": "2019-06-05T16:28:11.639Z",
"version": "83",
},
"lastModified": {
"username": "admin",
"time": "2019-07-22T20:31:23.968Z",
"version": "23146",
},
"relativeId": "datasets/8",
"upstreamDatasetIds": ["unify://unified-data/v1/datasets/6"],
"externalId": "Project_1_unified_dataset",
"resourceId": "8",
}
tamr = Client(UsernamePasswordAuth("username", "password"))
url_prefix = "http://localhost:9100/api/versioned/v1/"
dataset_url = url_prefix + "datasets/12"
upstream_url = url_prefix + "datasets/12/upstreamDatasets"
upstream_ds_url = url_prefix + "datasets/8"
responses.add(responses.GET, dataset_url, json=dataset_json)
responses.add(responses.GET, upstream_url, json=upstream_json)
responses.add(responses.GET, upstream_ds_url, json=upstream_ds_json)
project_ds = tamr.datasets.by_relative_id("datasets/12")
actual_upstream_ds = project_ds.upstream_datasets()
uri_dataset = actual_upstream_ds[0].dataset()
assert actual_upstream_ds[0].relative_id == upstream_ds_json["relativeId"]
assert actual_upstream_ds[0].resource_id == upstream_ds_json["resourceId"]
assert uri_dataset.name == upstream_ds_json["name"]
| 34.906667
| 109
| 0.615737
| 285
| 2,618
| 5.424561
| 0.326316
| 0.071151
| 0.051746
| 0.058215
| 0.402975
| 0.298189
| 0.085382
| 0
| 0
| 0
| 0
| 0.061286
| 0.233384
| 2,618
| 74
| 110
| 35.378378
| 0.709018
| 0
| 0
| 0.15873
| 0
| 0
| 0.377005
| 0.162338
| 0
| 0
| 0
| 0
| 0.047619
| 1
| 0.015873
| false
| 0.031746
| 0.047619
| 0
| 0.063492
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a5daeaca530d32aa4078eb1a40a959857dd7e442
| 14,531
|
py
|
Python
|
pmaf/sequence/_multiple/_multiple.py
|
mmtechslv/PhyloMAF
|
bab43dd4a4d2812951b1fdf4f1abb83edb79ea88
|
[
"BSD-3-Clause"
] | 1
|
2021-07-02T06:24:17.000Z
|
2021-07-02T06:24:17.000Z
|
pmaf/sequence/_multiple/_multiple.py
|
mmtechslv/PhyloMAF
|
bab43dd4a4d2812951b1fdf4f1abb83edb79ea88
|
[
"BSD-3-Clause"
] | 1
|
2021-06-28T12:02:46.000Z
|
2021-06-28T12:02:46.000Z
|
pmaf/sequence/_multiple/_multiple.py
|
mmtechslv/PhyloMAF
|
bab43dd4a4d2812951b1fdf4f1abb83edb79ea88
|
[
"BSD-3-Clause"
] | null | null | null |
import warnings
warnings.simplefilter("ignore", category=FutureWarning)
from skbio import TabularMSA
from skbio.sequence import GrammaredSequence
from io import StringIO, IOBase
from shutil import copyfileobj
import copy
import numpy as np
from pmaf.internal.io._seq import SequenceIO
from pmaf.sequence._sequence._nucleotide import Nucleotide
from pmaf.sequence._metakit import MultiSequenceMetabase, NucleotideMetabase
from pmaf.sequence._shared import validate_seq_mode
from typing import Union, Optional, Any, Sequence, Generator
from pmaf.internal._typing import AnyGenericIdentifier
class MultiSequence(MultiSequenceMetabase):
"""Class responsible for handling multiple sequences."""
def __init__(
self,
sequences: Any,
name: Optional[str] = None,
mode: Optional[str] = None,
metadata: Optional[dict] = None,
aligned: bool = False,
**kwargs: Any
):
"""Constructor for :class:`.MultiSequence`
Parameters
----------
sequences
Anything that can be parsed as multiple sequences.
name
Name of the multi-sequence instance
mode
Mode of the sequences. All sequences must have same mode/type.
Otherwise error will be raised
metadata
Metadata of the multi-sequence instance
aligned
True if sequences are aligned. Default is False
kwargs
Compatibility
"""
if name is None or np.isscalar(name):
tmp_name = name
else:
raise TypeError("`name` can be any scalar")
if isinstance(metadata, dict):
tmp_metadata = metadata
elif metadata is None:
tmp_metadata = {}
else:
raise TypeError("`metadata` can be dict or None")
if mode is not None:
if validate_seq_mode(mode):
tmp_mode = mode.lower()
else:
raise ValueError("`mode` is invalid.")
else:
tmp_mode = mode
tmp_sequences = []
if isinstance(sequences, list):
if all(
[isinstance(sequence, NucleotideMetabase) for sequence in sequences]
):
tmp_sequences = sequences
elif all(
[isinstance(sequence, GrammaredSequence) for sequence in sequences]
):
tmp_sequences = [
Nucleotide(skbio_seq, mode=None, **kwargs)
for skbio_seq in sequences
]
else:
raise ValueError(
"`sequences` must have same type when provided as list."
)
else:
if tmp_mode is not None:
seq_gen = SequenceIO(sequences, upper=True).pull_parser(
parser="simple", id=True, description=True, sequence=True
)
for sid, desc, seq_str in seq_gen:
tmp_sequences.append(
Nucleotide(
seq_str,
name=sid,
mode=tmp_mode,
metadata={"description": desc},
**kwargs
)
)
else:
raise ValueError("`mode` cannot be None if raw read is performed.")
if aligned:
if len(set([sequence.length for sequence in tmp_sequences])) != 1:
raise ValueError("`sequences` must be all of the length if aligned.")
tmp_indices = [sequence.name for sequence in tmp_sequences]
if len(tmp_indices) != len(set(tmp_indices)):
raise ValueError("`sequences` must have unique names.")
tmp_modes = set([sequence.mode for sequence in tmp_sequences])
if len(tmp_modes) > 1:
raise ValueError("`sequences` cannot have different modes.")
if tmp_mode is not None:
if tmp_mode not in tmp_modes:
raise ValueError("`mode` must match modes of sequences.")
else:
tmp_mode = tmp_modes.pop()
tmp_internal_id = kwargs.get("internal_id", None)
if tmp_internal_id is not None:
for sequence in tmp_sequences:
if tmp_internal_id not in sequence.metadata.keys():
raise ValueError(
"Metadata of all sequences must contain same internal_id."
)
self.__indices = np.asarray([seq.name for seq in tmp_sequences])
self.__sequences = tmp_sequences
self.__metadata = tmp_metadata
self.__aligned = bool(aligned)
self.__internal_id = tmp_internal_id
self.__skbio_mode = tmp_sequences[0].skbio_mode
self.__mode = tmp_mode
self.__name = tmp_name
self.__buckled = bool(kwargs.get("buckled", None))
def __repr__(self):
class_name = self.__class__.__name__
name = self.__name if self.__name is not None else "N/A"
count = len(self.__sequences)
metadata_state = "Present" if len(self.__metadata) > 0 else "N/A"
aligned = "Yes" if self.__aligned else "No"
mode = self.__mode.upper() if self.__mode is not None else "N/A"
repr_str = (
"<{}:[{}], Name:[{}], Mode:[{}], Aligned: [{}], Metadata:[{}]>".format(
class_name, count, name, mode, aligned, metadata_state
)
)
return repr_str
def to_skbio_msa(
self, indices: Optional[AnyGenericIdentifier] = None
) -> TabularMSA:
"""Convert to :mod:`skbio` :class:`~skbio.alignment.TabularMSA`
instance.
Parameters
----------
indices
List of target sequences to select. Default is None for all sequences.
Returns
-------
Instance of :class:`skbio.alignment.TabularMSA`
"""
if self.__aligned:
tmp_sequences = self.__get_seqs_by_index(indices)
return TabularMSA([sequence.skbio for sequence in tmp_sequences])
else:
raise RuntimeError("TabularMSA can only be retrieved for alignment.")
def __get_seqs_by_index(self, ids: Optional[AnyGenericIdentifier]):
"""Get sequences by indices/ids."""
if ids is not None:
target_ids = np.asarray(ids)
else:
target_ids = self.__indices
if np.isin(self.__indices, target_ids).sum() == len(target_ids):
return [seq for seq in self.__sequences if seq.name in target_ids]
else:
raise ValueError("Invalid indices are provided.")
def get_consensus(
self, indices: Optional[AnyGenericIdentifier] = None
) -> Nucleotide:
"""If sequence are aligned, estimate consensus sequence from the
:term:`MSA`
Parameters
----------
indices
List of target sequences to select. Default is None for all sequences.
Returns
-------
Consensus sequence.
"""
if self.__aligned:
tmp_msa = self.to_skbio_msa(indices)
return Nucleotide(
tmp_msa.consensus(),
name=self.__name,
metadata=self.__metadata,
mode=self.__mode,
)
else:
raise RuntimeError("Consensus can be retrieved only from alignment.")
def get_subset(
self, indices: Optional[AnyGenericIdentifier] = None
) -> "MultiSequence":
"""Get subset of the mutli-sequence instance.
Parameters
----------
indices
Indices to subset for.
Returns
-------
Subset instance of :class:`.MultiSequence`
"""
return type(self)(
self.__get_seqs_by_index(indices),
name=self.__name,
metadata=self.__metadata,
mode=self.__mode,
aligned=self.__aligned,
)
def buckle_for_alignment(self) -> dict:
"""Buckle individual sequences for alignment.
Returns
------
Packed metadata of all sequences.
"""
if not self.__buckled:
from collections import defaultdict
from random import random
if self.__internal_id is None:
self.__internal_id = round(random() * 100000, None)
packed_metadata = {
"master-metadata": self.__metadata,
"__name": self.__name,
"__internal_id": self.__internal_id,
}
children_metadata = defaultdict(dict)
for tmp_uid, sequence in enumerate(self.__sequences):
tmp_uid_str = "TMP_ID_{}".format(str(tmp_uid))
children_metadata[tmp_uid_str] = sequence.buckle_by_uid(tmp_uid_str)
packed_metadata.update({"children-metadata": dict(children_metadata)})
self.__buckled = True
return packed_metadata
else:
raise RuntimeError("MultiSequence instance is already buckled.")
def restore_buckle(self, buckled_pack: dict) -> None:
"""Restore the buckled :class:`MultiSequence` instance.
Parameters
----------
buckled_pack
Backed up packed metadata of all individual sequences
Returns
-------
None if success or raise error
"""
if self.__buckled:
self.__metadata = buckled_pack["master-metadata"]
self.__name = buckled_pack["__name"]
self.__internal_id = buckled_pack["__internal_id"]
for sequence in self.__sequences:
tmp_uid = sequence.unbuckle_uid()
child_packed_metadata = buckled_pack["children-metadata"][tmp_uid]
sequence.restore_buckle(child_packed_metadata)
self.__indices = np.asarray([seq.name for seq in self.__sequences])
else:
raise RuntimeError("MultiSequence instance is not buckled.")
def get_iter(self, method: str = "asis") -> Generator:
"""Get generator for the idividual sequences.
Parameters
----------
method
Method indicate how generator must yield the sequence data
Returns
-------
Generator for the sequences.
Depending on `method` result can yield on of following:
- 'asis' - (name[str], sequence[Instance])
- 'string' - (name[str], sequence[str])
- 'skbio' - (name[str], sequence[skbio])
"""
def make_generator():
for sequence in self.__sequences:
if method == "asis":
yield sequence.name, sequence
elif method == "string":
yield sequence.name, sequence.text
elif method == "skbio":
yield sequence.name, sequence.skbio
else:
raise ValueError("`method` is invalid.")
return make_generator()
def copy(self):
"""Copy current instance."""
return copy.deepcopy(self)
def write(self, file: Union[str, IOBase], mode: str = "w", **kwargs: Any) -> None:
"""Write the sequence data into the file.
Parameters
----------
file
File path or IO stream to write into
mode
File write mode such as "w" or "a" or "w+"
kwargs
Compatibility.
"""
buffer_io = self.__make_fasta_io(**kwargs)
if isinstance(file, IOBase):
file_handle = file
elif isinstance(file, str):
file_handle = open(file, mode=mode)
else:
raise TypeError("`file` has invalid type.")
copyfileobj(buffer_io, file_handle)
buffer_io.close()
def get_string_as(self, **kwargs):
"""Get string of all sequences.
Parameters
----------
kwargs
Compatibility. Will be passed to :meth:`pmaf.sequence.Nucleotide.write` method.
Returns
-------
String with formatted sequence data
"""
buffer_io = self.__make_fasta_io(**kwargs)
ret = buffer_io.getvalue()
buffer_io.close()
return ret
def __make_fasta_io(self, **kwargs):
"""Make a FASTA file IO stream."""
buffer_io = StringIO()
for sequence in self.__sequences:
sequence.write(buffer_io, mode="a", **kwargs)
buffer_io.seek(0)
return buffer_io
@classmethod
def from_buckled(
cls, sequences: Any, buckled_pack: dict, **kwargs: Any
) -> "MultiSequence":
"""Factory method to create :class:`.MultiSequence` using packed
metadata from buckling.
Parameters
----------
sequences
Sequences that will be passed to constructor
buckled_pack
Packed metadata produced during buckling
kwargs
Compatibility
Returns
-------
New instance of :class:`.MultiSequence`
"""
if not isinstance(buckled_pack, dict):
raise TypeError("`buckled_pack` must have dict type.")
tmp_multiseq = cls(sequences, buckled=True, **kwargs)
tmp_multiseq.restore_buckle(buckled_pack)
return tmp_multiseq
@property
def count(self):
"""Total number of sequences."""
return len(self.__sequences)
@property
def metadata(self):
"""Instance metadata."""
return self.__metadata
@property
def mode(self):
"""Mode/type of the sequences."""
return self.__mode
@property
def skbio_mode(self):
"""The :mod:`skbio` mode of the sequence."""
return self.__skbio_mode
@property
def sequences(self):
"""List of individual sequence instances."""
return self.__sequences
@property
def name(self):
"""Name of the instance."""
return self.__name
@property
def is_alignment(self):
"""Is mutli-sequence is aligned or not."""
return self.__aligned
@property
def is_buckled(self):
"""Is mulit-sequence instance is buckled or not."""
return self.__buckled
@property
def index(self):
"""Indices of the internals sequences."""
return self.__indices
| 33.871795
| 91
| 0.566719
| 1,515
| 14,531
| 5.228383
| 0.155776
| 0.019694
| 0.016412
| 0.0101
| 0.14733
| 0.093801
| 0.056054
| 0.048731
| 0.040399
| 0.020452
| 0
| 0.001149
| 0.341064
| 14,531
| 428
| 92
| 33.950935
| 0.82611
| 0.19627
| 0
| 0.199234
| 0
| 0
| 0.088363
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.091954
| false
| 0
| 0.057471
| 0
| 0.229885
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a5db8882e50338e2cfe3830ff393ba99f5232ba1
| 1,498
|
py
|
Python
|
arvore_derivacao.py
|
rjribeiro/trabalho-formais
|
358de668cc256c696fdc4b426a69cf5a3d17b511
|
[
"MIT"
] | 3
|
2018-04-28T15:55:50.000Z
|
2018-05-11T22:57:20.000Z
|
arvore_derivacao.py
|
rjribeiro/trabalho-formais
|
358de668cc256c696fdc4b426a69cf5a3d17b511
|
[
"MIT"
] | null | null | null |
arvore_derivacao.py
|
rjribeiro/trabalho-formais
|
358de668cc256c696fdc4b426a69cf5a3d17b511
|
[
"MIT"
] | null | null | null |
class ArvoreDerivacao:
def __init__(self, conteudo, esquerda=None, direita=None):
self._conteudo = conteudo
self._esquerda = esquerda
self._direita = direita
self.children = [self._esquerda, self._direita]
@property
def conteudo(self):
return self._conteudo
def print_arvore(self, nivel=1):
"""
Objetivo: imprimir toda a árvore, cuja raíz tem o nivel fornecido.
:param nivel:
:type nivel: int
:rtype: None
"""
print("Nível {espacos}: {:>{espacos}}".format(self._conteudo, espacos=nivel))
if self._direita:
self._direita.print_arvore(nivel + 1)
if self._esquerda:
self._esquerda.print_arvore(nivel + 1)
def palavra_gerada(self):
"""
Objetivo: Obter a palavra gerada pela árvore de derivação.
:return: Palavra derivada.
:rtype: str
"""
if not self._esquerda and not self._direita:
return self._conteudo
if self._esquerda:
prefixo = self._esquerda.palavra_gerada()
else:
prefixo = ""
if self._direita:
sufixo = self._direita.palavra_gerada()
else:
sufixo = ""
return prefixo + sufixo
if __name__ == '__main__':
a = ArvoreDerivacao('a')
b = ArvoreDerivacao('b')
A = ArvoreDerivacao('A', a)
B = ArvoreDerivacao('B', b)
S = ArvoreDerivacao('S', A, B)
S.print_arvore()
| 27.740741
| 85
| 0.579439
| 160
| 1,498
| 5.19375
| 0.31875
| 0.101083
| 0.045728
| 0.040915
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.002918
| 0.313752
| 1,498
| 53
| 86
| 28.264151
| 0.805447
| 0.138852
| 0
| 0.235294
| 0
| 0
| 0.035774
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.117647
| false
| 0
| 0
| 0.029412
| 0.235294
| 0.147059
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a5dca4db049c83c9e0aaf82c2743e38347886e01
| 1,404
|
py
|
Python
|
src/test.py
|
biqar/hypergraph-study
|
04b54117eb8f684a72259b27b03162efb4c18cd0
|
[
"MIT"
] | 2
|
2021-12-24T12:02:48.000Z
|
2021-12-25T00:00:22.000Z
|
src/test.py
|
biqar/hypergraph-study
|
04b54117eb8f684a72259b27b03162efb4c18cd0
|
[
"MIT"
] | null | null | null |
src/test.py
|
biqar/hypergraph-study
|
04b54117eb8f684a72259b27b03162efb4c18cd0
|
[
"MIT"
] | 1
|
2021-07-19T02:05:13.000Z
|
2021-07-19T02:05:13.000Z
|
import re
import sys
from operator import add
from pyspark.sql import SparkSession
def computeContribs(urls, rank):
"""Calculates URL contributions to the rank of other URLs."""
num_urls = len(urls)
for url in urls:
yield (url, rank / num_urls)
def parseNeighbors(urls):
"""Parses a urls pair string into urls pair."""
parts = re.split(r'\s+', urls)
for i in range(len(parts)):
for j in range(i,len(parts)):
if i!=j:
yield parts[i],parts[j]
if __name__ == "__main__":
if len(sys.argv) != 3:
print("Usage: pagerank <file> <iterations>", file=sys.stderr)
sys.exit(-1)
print("WARN: This is a naive implementation of PageRank and is given as an example!\n" +
"Please refer to PageRank implementation provided by graphx",
file=sys.stderr)
# Initialize the spark context.
spark = SparkSession\
.builder\
.appName("PythonPageRank")\
.getOrCreate()
# Loads in input file. It should be in format of:
# URL neighbor URL
# URL neighbor URL
# URL neighbor URL
# ...
lines = spark.read.text(sys.argv[1]).rdd.map(lambda r: r[0])
print("ALL LINKS",lines.collect())
links = lines.flatMap(lambda urls: parseNeighbors(urls)).distinct().groupByKey().cache()
print("ALL LINKS",links.collect())
| 29.25
| 92
| 0.608262
| 185
| 1,404
| 4.562162
| 0.513514
| 0.0391
| 0.049763
| 0.040284
| 0.049763
| 0.049763
| 0
| 0
| 0
| 0
| 0
| 0.003906
| 0.270655
| 1,404
| 47
| 93
| 29.87234
| 0.820313
| 0.19302
| 0
| 0
| 0
| 0
| 0.191585
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.068966
| false
| 0
| 0.137931
| 0
| 0.206897
| 0.137931
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a5df0a5e25ad5c8a611b093330f6ecc81a28362f
| 1,312
|
py
|
Python
|
wagtail_lightadmin/wagtail_hooks.py
|
leukeleu/wagtail_lightadmin
|
6aa465e2673f4eb8865f7b4dc6cd2c7c41ed71a5
|
[
"MIT"
] | 4
|
2019-02-22T14:07:26.000Z
|
2020-04-20T05:33:39.000Z
|
wagtail_lightadmin/wagtail_hooks.py
|
leukeleu/wagtail_lightadmin
|
6aa465e2673f4eb8865f7b4dc6cd2c7c41ed71a5
|
[
"MIT"
] | 1
|
2019-05-18T08:04:32.000Z
|
2019-05-20T13:39:14.000Z
|
wagtail_lightadmin/wagtail_hooks.py
|
leukeleu/wagtail_lightadmin
|
6aa465e2673f4eb8865f7b4dc6cd2c7c41ed71a5
|
[
"MIT"
] | 2
|
2017-06-06T09:34:53.000Z
|
2019-09-10T16:16:12.000Z
|
from __future__ import absolute_import, unicode_literals
from django.conf import settings
from django.templatetags.static import static
from django.utils.html import format_html
from django.utils.module_loading import import_string
from wagtail.core import hooks
@hooks.register('insert_editor_css')
def editor_css():
return format_html(
'<link rel="stylesheet" href="{}">',
static('css/admin_editor.css')
)
@hooks.register('insert_editor_js')
def editor_js():
return format_html(
"""
<script type="text/javascript" src="{0}"></script>
<script type="text/javascript" src="{1}"></script>
""",
static('js/wagtailadmin/admin_link_widget.js'),
static('wagtailadmin/js/page-chooser-modal.js'),
)
@hooks.register('insert_editor_js')
def editor_js_hallo():
"""
We need an extra JS file for Wagtail<1.12.x
"""
import wagtail
_, version, _, = wagtail.__version__.split('.')
if int(version) < 12:
# Use our custom hallo-bootstrap
js = static('js/wagtailadmin/lighter-hallo-bootstrap.js')
else:
js = static('wagtailadmin/js/hallo-bootstrap.js')
return format_html(
"""
<script type="text/javascript" src="{0}"></script>
""",
js
)
| 26.24
| 65
| 0.641768
| 159
| 1,312
| 5.100629
| 0.396226
| 0.049322
| 0.070284
| 0.092478
| 0.252774
| 0.219482
| 0.219482
| 0.219482
| 0.128237
| 0.128237
| 0
| 0.00782
| 0.220274
| 1,312
| 49
| 66
| 26.77551
| 0.784946
| 0.057165
| 0
| 0.16129
| 0
| 0
| 0.251497
| 0.148703
| 0
| 0
| 0
| 0
| 0
| 1
| 0.096774
| false
| 0
| 0.225806
| 0.064516
| 0.419355
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a5e4666915212b8f6b0b15dc2449a686ce496e42
| 5,633
|
py
|
Python
|
stackdriver/restapi.py
|
MarkMarine/stackdriver-client-python
|
7e5e5806d02fcf4b8633d19adbce6d64f3082083
|
[
"Apache-2.0"
] | null | null | null |
stackdriver/restapi.py
|
MarkMarine/stackdriver-client-python
|
7e5e5806d02fcf4b8633d19adbce6d64f3082083
|
[
"Apache-2.0"
] | null | null | null |
stackdriver/restapi.py
|
MarkMarine/stackdriver-client-python
|
7e5e5806d02fcf4b8633d19adbce6d64f3082083
|
[
"Apache-2.0"
] | null | null | null |
"""
restapi - base for calling rest resources
Stackdriver Public API, Copyright Stackdriver 2014
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
"""
import requests
import copy
import types
import json
import logging
logger = logging.getLogger(__name__)
def _wrap_transport_decorator(transport_func, wrapper, userdata):
def inner(*args, **kwargs):
return wrapper(transport_func, userdata=userdata, func_args=args, func_kwargs=kwargs)
return inner
def transport_func(func):
""" Decorates each of the transport functions that can get wrapped by a transport_controller """
func._is_transport_func = True
return func
class RestApi(object):
def __init__(self, entrypoint_uri, version=None, apikey=None, username=None, password=None, useragent=None, transport_controller=None, transport_userdata=None):
"""
Base class for accessing REST services
:param entrypoint_path: The http or https uri to the api
:param version: version of the api we support
:param apikey: the stackdriver apikey to use for authentication
:param username: username for basic auth - this is here for completeness but for the stackdriver apis auth should be done using the apikey
:param password: password for basic auth - this is here for completeness but for the stackdriver apis auth should be done using the apikey
:param transport_controller: if defined run this function before each network call
:param transport_userdata: data to send to the transport_controller
"""
# always end with a slash
entrypoint_uri = entrypoint_uri.strip()
if entrypoint_uri[-1] != '/':
entrypoint_uri += '/'
self._entrypoint_uri = entrypoint_uri
self._apikey = apikey
self._username = username
self._password = password
self._version = version
self._useragent = useragent
if transport_controller:
self._decorate_transport_funcs(transport_controller, transport_userdata)
def _decorate_transport_funcs(self, controller, userdata):
""" decorate all methods that have an attribute of _is_transport_func set to True
skip any methods that start with an underscore (_)
SEE @transport_func decorator
"""
for method_name in dir(self):
if method_name.startswith('_'):
continue
method = getattr(self, method_name, None)
if isinstance(method, types.MethodType):
setattr(self, method_name, _wrap_transport_decorator(method, controller, userdata))
def _merge_headers(self, extra, is_post=False):
headers = {}
if extra is not None:
headers = copy.copy(extra)
if self._apikey:
headers['x-stackdriver-apikey'] = self._apikey
headers['x-stackdriver-version'] = self._version
if is_post:
headers['accept'] = 'application/json, text/plain, */*'
headers['content-type'] = 'application/json'
if self._useragent:
headers['user-agent'] = self._useragent
return headers
def _gen_full_endpoint(self, endpoint_path):
if endpoint_path.startswith('/'):
endpoint_path = endpoint_path[1:]
return '%s%s' % (self._entrypoint_uri, endpoint_path)
@transport_func
def get(self, endpoint, params=None, headers=None):
headers = self._merge_headers(headers)
uri = self._gen_full_endpoint(endpoint)
logger.debug('GET %s', uri, extra={'params': params})
r = requests.get(uri, params=params, headers=headers)
r.raise_for_status()
return r.json()
@transport_func
def post(self, endpoint, data=None, headers=None):
headers = self._merge_headers(headers, is_post=True)
uri = self._gen_full_endpoint(endpoint)
logger.debug('POST %s', uri, extra={'data': data})
r = requests.post(uri, data=json.dumps(data), headers=headers)
r.raise_for_status()
return r.json()
@transport_func
def put(self, endpoint, data=None, headers=None):
headers = self._merge_headers(headers, is_post=True)
uri = self._gen_full_endpoint(endpoint)
logger.debug('PUT %s', uri, extra={'data': data})
r = requests.put(uri, data=json.dumps(data), headers=headers)
r.raise_for_status()
return r.json()
@transport_func
def delete(self, endpoint, headers=None):
headers = self._merge_headers(headers, is_post=True)
uri = self._gen_full_endpoint(endpoint)
logger.debug('DELETE %s', uri)
r = requests.delete(uri, headers=headers)
r.raise_for_status()
return r.json()
@property
def api_version(self):
return self._version
@property
def entrypoint(self):
return self._entrypoint_uri
| 35.427673
| 164
| 0.680987
| 714
| 5,633
| 5.205882
| 0.27591
| 0.034974
| 0.020178
| 0.023675
| 0.256121
| 0.240517
| 0.240517
| 0.226527
| 0.20339
| 0.192628
| 0
| 0.002322
| 0.235399
| 5,633
| 158
| 165
| 35.651899
| 0.860692
| 0.312445
| 0
| 0.238636
| 0
| 0
| 0.044908
| 0.005613
| 0
| 0
| 0
| 0
| 0
| 1
| 0.147727
| false
| 0.022727
| 0.056818
| 0.034091
| 0.340909
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a5e7acf2b322f72151a720e8d6b6a7577bf377de
| 13,896
|
py
|
Python
|
ventana_perceptron.py
|
musicbiker/ANNT
|
301f1090925c8937f0fd3b4955ec68ff772022ce
|
[
"MIT"
] | null | null | null |
ventana_perceptron.py
|
musicbiker/ANNT
|
301f1090925c8937f0fd3b4955ec68ff772022ce
|
[
"MIT"
] | null | null | null |
ventana_perceptron.py
|
musicbiker/ANNT
|
301f1090925c8937f0fd3b4955ec68ff772022ce
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Wed Dec 11 15:05:41 2019
@author: jrodriguez119
"""
import tkinter as tk
from tkinter import ttk
import crearcapas
import perceptron_multicapa
from threading import Thread
import sys
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg, NavigationToolbar2Tk
from matplotlib.figure import Figure
from tkinter import filedialog as fd
from tkinter import messagebox as mb
import menu
import sklearn
class Display(tk.Frame):
def __init__(self,parent=0):
tk.Frame.__init__(self,parent)
self.output = tk.Text(self, width=80, height=15)
self.output.pack(padx = 30, pady = 5,)
sys.stdout = self
self.pack()
def flush(self):
pass
def write(self, txt):
self.output.insert(tk.END,str(txt))
self.output.see("end")
self.update_idletasks()
#Función que genera la ventana de parámetros del Perceptron multicapa
def Ventana_perceptron(ventana_seleccion,X_train,Y_train,X_test,Y_test,ventana_inicio):
#Crear ventana
ventana_perceptron = tk.Toplevel(ventana_seleccion)
ventana_perceptron.geometry('725x600+500+200')
#Insertar menu
menu.menu(ventana_perceptron,ventana_inicio)
#Esconder ventana previa
ventana_seleccion.withdraw()
#Título
labeltitulo = ttk.Label(ventana_perceptron,text = "Parámetros necesarios para el Perceptrón",
foreground = "#054FAA",font=("Arial Bold", 15))
labeltitulo.pack(pady=10)
#Frame donde alojar los widget de entrada
lframe = ttk.Frame(ventana_perceptron)
lframe.pack()
#------------------------ entrada de datos ---------------------------------
#Tamaño de lote
tamlot = tk.IntVar()
lbtamlote = ttk.Label(lframe,text = "Tamaño lote: ",
foreground = "#054FAA",font=("Arial Bold", 12))
lbtamlote.grid(column=0, row=0 ,pady=5,sticky=tk.W)
etamlot = ttk.Entry(lframe,width=5, textvariable = tamlot)
etamlot.grid(column=1, row=0,pady=5,sticky=tk.E)
#Optimizador
opt =tk.StringVar()
lbopt = ttk.Label(lframe, text="Optimizador: ",
foreground = "#054FAA",font=("Arial Bold", 12))
lbopt.grid(column=0, row=1,pady=5,sticky=tk.W)
cbopt=ttk.Combobox(lframe,width=9,state="readonly",textvariable = opt)
cbopt["values"] = ["SGD", "RMSProp","Adam","Adagrad"]
cbopt.grid(column = 1 ,row = 1,pady=5,columnspan=2)
cbopt.current(0)
#Proporción de validación
pv = tk.DoubleVar()
pv.set(0.2)
lbpv = ttk.Label(lframe,text = "Proporción de Validación :",
foreground = "#054FAA",font=("Arial Bold", 12))
lbpv.grid(column=0, row=2 ,pady=5,sticky=tk.W)
epv = ttk.Entry(lframe,width=5, textvariable = pv)
epv.grid(column=1, row=2,pady=5,sticky=tk.E)
#Número de capas ocultas
nco = tk.IntVar()
lbnco = ttk.Label(lframe,text = "Número capas ocultas :",
foreground = "#054FAA",font=("Arial Bold", 12))
lbnco.grid(column=0, row=3 ,pady=5,sticky=tk.W)
enco = ttk.Entry(lframe,width=5, textvariable = nco)
enco.grid(column=1, row=3,pady=5,sticky=tk.E)
#Función Loss
fl =tk.StringVar()
lbfl = ttk.Label(lframe, text="Función Loss: ",
foreground = "#054FAA",font=("Arial Bold", 12))
lbfl.grid(column=0, row=4,pady=5,sticky=tk.W)
cbfl=ttk.Combobox(lframe,width=21,state="readonly",textvariable = fl)
cbfl["values"] = ["kullback_leibler_divergence","mean_squared_error", "categorical_hinge",
"categorical_crossentropy","binary_crossentropy","poisson","cosine_proximity"]
cbfl.grid(column = 1 ,row = 4,pady=5,columnspan=2,sticky=tk.E)
cbfl.current(3)
#Método de parada
labeltitulo1 = ttk.Label(ventana_perceptron,text = "Método de parada",
foreground = "#054FAA",font=("Arial Bold", 15))
labeltitulo1.pack(pady=10)
lframe1 = ttk.Frame(ventana_perceptron)
lframe1.pack()
#Tipo de parada
#Parada por número de iteraciones
mp=tk.IntVar()
bat1= ttk.Radiobutton(lframe1, value=0,variable=mp)
bat1.grid(column=0, row=0)
nui=tk.IntVar()
lbnui = ttk.Label(lframe1, text="Número de iteraciones: ",
foreground = "#054FAA",font=("Arial Bold", 12))
lbnui.grid(column=1, row=0,pady=5,sticky=tk.W)
enui = ttk.Entry(lframe1,width=5, textvariable = nui)
enui.grid(column=2, row=0,pady=5,sticky=tk.E)
#Parada por control de un parámetro
bat2 = ttk.Radiobutton(lframe1, value=1,variable=mp)
bat2.grid(column=0, row=1)
lbparada = ttk.Label(lframe1, text="Parada temprana: ",
foreground = "#054FAA",font=("Arial Bold", 12))
lbparada.grid(column = 1, row = 1,sticky=tk.W )
#Parámetro a controlar
lbcon = ttk.Label(lframe1, text=" Parámetro a controlar: ",
foreground = "#054FAA",font=("Arial Bold", 12))
lbcon.grid(column = 1, row = 2,pady=5,sticky=tk.W )
con =tk.StringVar()
cbcon=ttk.Combobox(lframe1,width=9,state="readonly",textvariable = con)
cbcon["values"] = ["loss","val_loss", "acc","val_acc"]
cbcon.grid(column = 2 ,row = 2,pady=5,sticky=tk.E)
cbcon.current(0)
#Delta mínima de evolución
delt =tk.DoubleVar()
delt.set(0.001)
lbdelt = ttk.Label(lframe1, text=" Delta min: ",
foreground = "#054FAA",font=("Arial Bold", 12))
lbdelt.grid(column=1, row=3,pady=5,sticky=tk.W)
edelt = ttk.Entry(lframe1,width=5, textvariable = delt)
edelt.grid(column=2, row=3,pady=5,sticky=tk.E)
#Paciencia para realizar la parada
pat =tk.IntVar()
pat.set(3)
lbpat = ttk.Label(lframe1, text=" Paciencia: ",
foreground = "#054FAA",font=("Arial Bold", 12))
lbpat.grid(column=1, row=4,pady=5,sticky=tk.W)
epat = ttk.Entry(lframe1,width=5, textvariable = pat)
epat.grid(column=2, row=4,pady=5,sticky=tk.E)
#Función que abre una ventana externa y nos permite crear nuestro modelo editando las capas ocultas
def crearmodelo():
global NO,AC,BA,DR,numero_capas
numero_capas = int(nco.get())
NO,AC,BA,DR = crearcapas.capas(numero_capas, ventana_perceptron)
btnmodelo = ttk.Button(ventana_perceptron, text = "Crear modelo",style='my.TButton', command=crearmodelo)
btnmodelo.pack(pady=50)
lframe2 = ttk.Frame(ventana_perceptron)
lframe2.pack(side= "bottom")
def entrenar():
lote = tamlot.get()
optimizador = opt.get()
prop_val = pv.get()
numero_capas_ocultas = int(nco.get())
loss = fl.get()
parada = mp.get()
iteraciones = nui.get()
control = con.get()
delta = delt.get()
paciencia = pat.get()
#Excepciones
if lote == 0:
mb.showerror("Error", "Variable tamaño del lote = 0 ")
return
if prop_val == 0:
mb.showerror("Error", "El algoritmo necesita una parte del conjunto de entrenamiento para su validación ")
return
if prop_val > 1:
mb.showerror("Error", "Proporción de validación no válida ")
return
if numero_capas_ocultas == 0:
mb.showerror("Error", "Variable numero de capas ocultas = 0 ")
return
if parada == 0 and iteraciones==0:
mb.showerror("Error", "No se ha indicado el número de iteraciones requeridas ")
return
if parada == 1 and delta==0.0:
mb.showerror("Error", "No se ha indicado el mínimo delta para controlar la evolución ")
return
while True:
try:
NO
break
except NameError:
mb.showerror("Error", "No se ha creado el modelo, haga click en crear modelo ")
return
for i in range(numero_capas_ocultas) :
if NO[i].get()==0:
mb.showerror("Error", "No es posible tener capas con 0 neuronas, asegurese de haber creado el modelo correctamente ")
return
for i in range(numero_capas_ocultas) :
if DR[i].get() > 1:
mb.showerror("Error", "Valor Dropout no válido ")
return
#Ventana donde aparece el proceso y los botones para guardar el modelo
ventana_display = tk.Toplevel(ventana_perceptron)
labeltitulo1 = ttk.Label(ventana_display,text = "Entrenamiento",
foreground = "#054FAA",font=("Arial Bold", 15))
labeltitulo1.pack(pady=5)
#Funcion que representa la evolución del entrenamiento
def plot():
ventana_plot = tk.Toplevel(ventana_perceptron)
ventana_plot.geometry('900x600')
f = Figure(figsize = (5,5),dpi = 100)
a = f.add_subplot(121)
b = f.add_subplot(122)
#Resumimos e imprimimos esos datos
a.plot(entrenamiento.history['acc'])
a.plot(entrenamiento.history['val_acc'])
a.set_title('Precisión del modelo')
a.set_ylabel('Precisión')
a.set_xlabel('Iteraciones')
a.legend(['Entrenamiento', 'Validación'], loc='upper left')
# summarize history for loss
b.plot(entrenamiento.history['loss'])
b.plot(entrenamiento.history['val_loss'])
b.set_title('Loss del modelo')
b.set_ylabel('Loss')
b.set_xlabel('Iteraciones')
b.legend(['Entrenamiento', 'Validación'], loc='upper left')
canvas1 = FigureCanvasTkAgg(f,ventana_plot)
canvas1.get_tk_widget().pack(side = tk.TOP,fill = tk.BOTH, expand = True)
toolbar = NavigationToolbar2Tk(canvas1,ventana_plot)
toolbar.update()
canvas1._tkcanvas.pack(side = tk.TOP,fill = tk.BOTH, expand = True)
def guardarcompl():
nombrearch=fd.asksaveasfilename(initialdir = "/",title = "Guardar como",defaultextension = 'h5')
model.save(nombrearch)
mb.showinfo("Información", "Los datos fueron guardados.")
def guardarpesos():
nombrearch=fd.asksaveasfilename(initialdir = "/",title = "Guardar como",defaultextension = 'h5')
model.save_weights(nombrearch)
mb.showinfo("Información", "Los datos fueron guardados.")
def atras():
ventana_display.destroy()
framebotones = ttk.Frame(ventana_display)
framebotones.pack(side= "bottom")
btnguardarcompl = ttk.Button(framebotones, text="Modelo completo",
command=guardarcompl,style='my.TButton',width = 15)
btnguardarcompl.grid(row = 0, column = 0, padx = 10, pady = 5,sticky=tk.W)
btnguardarpesos = ttk.Button(framebotones, text="Pesos",
command=guardarpesos,style='my.TButton',width = 15)
btnguardarpesos.grid(row = 0, column = 1, padx = 10, pady = 5,sticky=tk.W)
btnplot = ttk.Button(framebotones, text="Plot",
command=plot,style='my.TButton',width = 15)
btnplot.grid(row = 1, column = 0, padx = 10, pady = 5,sticky=tk.W)
btnatras = ttk.Button(framebotones, text="Atrás",
command=atras,style='my.TButton',width = 15)
btnatras.grid(row = 1, column = 1, padx = 10, pady = 5,sticky=tk.W)
def pantalla():
global Display
Display(ventana_display)
def run():
global model, entrenamiento
while True:
try:
model, entrenamiento = perceptron_multicapa.Perceptron_multicapa(ventana_perceptron,ventana_display,X_train,Y_train,X_test,Y_test,
lote,optimizador,prop_val,numero_capas_ocultas,loss,
parada,iteraciones,control,delta,paciencia,NO,AC,BA,DR)
break
except tk.TclError:
mb.showerror("Error desconocido", "Por favor vuelva a intentarlo ")
ventana_display.destroy()
return
except RuntimeError:
mb.showerror("Error desconocido", "Por favor reinicie la aplicación ")
ventana_display.destroy()
return
except sklearn.metrics.classification.UndefinedMetricWarning:
mb.showerror("Error ", "Algo salió mal con los datos, reinicie la aplicación y vuelva a intentarlo ")
ventana_display.destroy()
return
t1=Thread(target=pantalla)
t2=Thread(target=run)
t1.start()
t2.start()
btntrain = ttk.Button(lframe2, text = "Entrenar",style='my.TButton', command=entrenar)
btntrain.grid(row = 0, column = 1, padx = 20, pady=15)
def atras():
ventana_perceptron.destroy()
ventana_seleccion.deiconify()
btnatras = ttk.Button(lframe2, text = "Atras",style='my.TButton', command=atras)
btnatras.grid(row=0,column=0, padx = 20, pady=15)
| 40.750733
| 151
| 0.573258
| 1,594
| 13,896
| 4.937265
| 0.235257
| 0.015248
| 0.027954
| 0.033037
| 0.310165
| 0.250445
| 0.149301
| 0.115121
| 0.10953
| 0.020839
| 0
| 0.031192
| 0.307858
| 13,896
| 340
| 152
| 40.870588
| 0.787066
| 0.062608
| 0
| 0.172691
| 0
| 0
| 0.143286
| 0.004031
| 0
| 0
| 0
| 0.002941
| 0
| 1
| 0.052209
| false
| 0.004016
| 0.048193
| 0
| 0.15261
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a5ea06e0a07718613f62378639588110228f7035
| 728
|
py
|
Python
|
secu/tests/user_post_test.py
|
wancy86/tornado-seed
|
bea842f4ba6b23dda53ec9ae9f1349e1d2b54fd3
|
[
"MIT"
] | null | null | null |
secu/tests/user_post_test.py
|
wancy86/tornado-seed
|
bea842f4ba6b23dda53ec9ae9f1349e1d2b54fd3
|
[
"MIT"
] | null | null | null |
secu/tests/user_post_test.py
|
wancy86/tornado-seed
|
bea842f4ba6b23dda53ec9ae9f1349e1d2b54fd3
|
[
"MIT"
] | null | null | null |
import requests
from ..base.test import BaseTestCase, AuthorizedTestCase
import uuid
import common
class T(AuthorizedTestCase):
@property
def path(self):
return '/service/secu/user'
def setUp(self):
super().setUp()
self.data = {
'username': 'myao',
'email': '1343030803@qq.com',
'mobile': '18665369920',
'pwd': '123456',
'fullname': '姚贯伟',
'roles': ''
}
def test_by_correct_info(self):
response = requests.post(self.url, json=self.data)
self.assertNotEqual(response.text, '', '返回值为空!')
resp = response.json()
self.assertEqual('000', resp['code'])
| 26.962963
| 59
| 0.542582
| 70
| 728
| 5.6
| 0.7
| 0.045918
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.060484
| 0.318681
| 728
| 27
| 60
| 26.962963
| 0.729839
| 0
| 0
| 0
| 0
| 0
| 0.152205
| 0
| 0
| 0
| 0
| 0
| 0.086957
| 1
| 0.130435
| false
| 0
| 0.173913
| 0.043478
| 0.391304
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a5ef7047358651b5620e1896751f01c69ce61941
| 6,404
|
py
|
Python
|
products_and_services_client/models/monthly_price.py
|
pitzer42/opbk-br-quickstart
|
b3f86b2e5f82a6090aaefb563614e174a452383c
|
[
"MIT"
] | 2
|
2021-02-07T23:58:36.000Z
|
2021-02-08T01:03:25.000Z
|
products_and_services_client/models/monthly_price.py
|
pitzer42/opbk-br-quickstart
|
b3f86b2e5f82a6090aaefb563614e174a452383c
|
[
"MIT"
] | null | null | null |
products_and_services_client/models/monthly_price.py
|
pitzer42/opbk-br-quickstart
|
b3f86b2e5f82a6090aaefb563614e174a452383c
|
[
"MIT"
] | null | null | null |
# coding: utf-8
"""
API's OpenData do Open Banking Brasil
As API's descritas neste documento são referentes as API's da fase OpenData do Open Banking Brasil. # noqa: E501
OpenAPI spec version: 1.0.0-rc5.2
Contact: apiteam@swagger.io
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class MonthlyPrice(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'interval': 'PriceIntervals',
'monthly_fee': 'str',
'currency': 'Currency',
'customers': 'Customer'
}
attribute_map = {
'interval': 'interval',
'monthly_fee': 'monthlyFee',
'currency': 'currency',
'customers': 'customers'
}
def __init__(self, interval=None, monthly_fee=None, currency=None, customers=None): # noqa: E501
"""MonthlyPrice - a model defined in Swagger""" # noqa: E501
self._interval = None
self._monthly_fee = None
self._currency = None
self._customers = None
self.discriminator = None
self.interval = interval
self.monthly_fee = monthly_fee
self.currency = currency
self.customers = customers
@property
def interval(self):
"""Gets the interval of this MonthlyPrice. # noqa: E501
:return: The interval of this MonthlyPrice. # noqa: E501
:rtype: PriceIntervals
"""
return self._interval
@interval.setter
def interval(self, interval):
"""Sets the interval of this MonthlyPrice.
:param interval: The interval of this MonthlyPrice. # noqa: E501
:type: PriceIntervals
"""
if interval is None:
raise ValueError("Invalid value for `interval`, must not be `None`") # noqa: E501
self._interval = interval
@property
def monthly_fee(self):
"""Gets the monthly_fee of this MonthlyPrice. # noqa: E501
Valor da mediana de cada faixa relativa ao serviço ofertado, informado no período, conforme Res nº 32 BCB, 2020. p.ex. ''45.00'' (representa um valor monetário. p.ex: 1547368.92. Este valor, considerando que a moeda seja BRL, significa R$ 1.547.368,92. O único separador presente deve ser o ''.'' (ponto) para indicar a casa decimal. Não deve haver separador de milhar) # noqa: E501
:return: The monthly_fee of this MonthlyPrice. # noqa: E501
:rtype: str
"""
return self._monthly_fee
@monthly_fee.setter
def monthly_fee(self, monthly_fee):
"""Sets the monthly_fee of this MonthlyPrice.
Valor da mediana de cada faixa relativa ao serviço ofertado, informado no período, conforme Res nº 32 BCB, 2020. p.ex. ''45.00'' (representa um valor monetário. p.ex: 1547368.92. Este valor, considerando que a moeda seja BRL, significa R$ 1.547.368,92. O único separador presente deve ser o ''.'' (ponto) para indicar a casa decimal. Não deve haver separador de milhar) # noqa: E501
:param monthly_fee: The monthly_fee of this MonthlyPrice. # noqa: E501
:type: str
"""
if monthly_fee is None:
raise ValueError("Invalid value for `monthly_fee`, must not be `None`") # noqa: E501
self._monthly_fee = monthly_fee
@property
def currency(self):
"""Gets the currency of this MonthlyPrice. # noqa: E501
:return: The currency of this MonthlyPrice. # noqa: E501
:rtype: Currency
"""
return self._currency
@currency.setter
def currency(self, currency):
"""Sets the currency of this MonthlyPrice.
:param currency: The currency of this MonthlyPrice. # noqa: E501
:type: Currency
"""
if currency is None:
raise ValueError("Invalid value for `currency`, must not be `None`") # noqa: E501
self._currency = currency
@property
def customers(self):
"""Gets the customers of this MonthlyPrice. # noqa: E501
:return: The customers of this MonthlyPrice. # noqa: E501
:rtype: Customer
"""
return self._customers
@customers.setter
def customers(self, customers):
"""Sets the customers of this MonthlyPrice.
:param customers: The customers of this MonthlyPrice. # noqa: E501
:type: Customer
"""
if customers is None:
raise ValueError("Invalid value for `customers`, must not be `None`") # noqa: E501
self._customers = customers
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(MonthlyPrice, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, MonthlyPrice):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 32.841026
| 392
| 0.600562
| 767
| 6,404
| 4.920469
| 0.238592
| 0.044515
| 0.076312
| 0.069952
| 0.45416
| 0.398516
| 0.380763
| 0.222311
| 0.172231
| 0.172231
| 0
| 0.029405
| 0.304341
| 6,404
| 194
| 393
| 33.010309
| 0.817733
| 0.394441
| 0
| 0.065934
| 0
| 0
| 0.108908
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.153846
| false
| 0
| 0.032967
| 0
| 0.32967
| 0.021978
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a5f31b512ae3b988c292e1211f6d15cfb61624fc
| 839
|
py
|
Python
|
suppy/simulator/atomics/divergence_atomic.py
|
bmaris98/suppy
|
8450c6d25ffa492cdedfbbb4c111d22e7f2788a7
|
[
"BSD-3-Clause"
] | null | null | null |
suppy/simulator/atomics/divergence_atomic.py
|
bmaris98/suppy
|
8450c6d25ffa492cdedfbbb4c111d22e7f2788a7
|
[
"BSD-3-Clause"
] | null | null | null |
suppy/simulator/atomics/divergence_atomic.py
|
bmaris98/suppy
|
8450c6d25ffa492cdedfbbb4c111d22e7f2788a7
|
[
"BSD-3-Clause"
] | null | null | null |
from suppy.utils.stats_constants import DIVERGENCE, TYPE
from typing import Any, Dict
from suppy.simulator.atomics.atomic import Atomic
class DivergenceAtomic(Atomic):
def __init__(self, uid: str, seh, name: str):
Atomic.__init__(self, uid, seh, name, 0, 0)
def get_stats(self) -> Dict[str, Any]:
stats = Atomic.get_stats(self)
stats[TYPE] = DIVERGENCE
return stats
def _all_output_clear(self) -> bool:
for output_stream in self._output_streams:
if not output_stream.has_input:
return True
return False
def _do_process(self) -> None:
resource = self._loaded_input[0]
for output_stream in self._output_streams:
if not output_stream.has_input:
output_stream.try_load(resource)
return
| 31.074074
| 56
| 0.647199
| 108
| 839
| 4.75
| 0.435185
| 0.116959
| 0.042885
| 0.066277
| 0.230019
| 0.230019
| 0.230019
| 0.230019
| 0.230019
| 0.230019
| 0
| 0.004926
| 0.274136
| 839
| 27
| 57
| 31.074074
| 0.837438
| 0
| 0
| 0.190476
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.190476
| false
| 0
| 0.142857
| 0
| 0.571429
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a5f7a4ecfa05bf78a585981771c76de8e093cf7a
| 5,180
|
py
|
Python
|
database/BuildDatabase.py
|
chanzuckerberg/scoreboard
|
7ebf783819d0f5b4dd54092201f709b8644c85a4
|
[
"MIT"
] | 8
|
2017-11-28T22:36:37.000Z
|
2020-10-20T06:46:19.000Z
|
database/BuildDatabase.py
|
chanzuckerberg/scoreboard
|
7ebf783819d0f5b4dd54092201f709b8644c85a4
|
[
"MIT"
] | 25
|
2017-12-27T19:05:41.000Z
|
2022-03-15T18:35:22.000Z
|
database/BuildDatabase.py
|
chanzuckerberg/scoreboard
|
7ebf783819d0f5b4dd54092201f709b8644c85a4
|
[
"MIT"
] | 1
|
2018-04-23T11:16:41.000Z
|
2018-04-23T11:16:41.000Z
|
from sqlalchemy import create_engine
from sqlalchemy import Column, Integer, Boolean, String, DateTime, ForeignKey
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.dialects.postgresql import JSONB
from sqlalchemy.orm import sessionmaker
from sqlalchemy.sql import func
import datetime
import os
import json
database = {
'pg_user': os.environ['SCOREBOARD_PG_USERNAME'],
'pg_pass': os.environ['SCOREBOARD_PG_PASSWORD'],
'pg_host': os.environ.get('SCOREBOARD_PG_HOST', 'localhost'),
'pg_port': os.environ.get('SCOREBOARD_PG_PORT', 5432),
'pg_database': os.environ.get('SCOREBOARD_PG_DATABASE', 'scoreboard')
}
# Build database
engine = create_engine(
"postgresql://{pg_user}:{pg_pass}@{pg_host}:{pg_port}/{pg_database}".format(**database))
Base = declarative_base()
class User(Base):
__tablename__ = 'users'
__table_args__ = {'extend_existing': True}
id = Column(Integer, primary_key=True, nullable=False)
github_username = Column(String, nullable=False)
name = Column(String)
email = Column(String)
is_admin = Column(Boolean, nullable=False)
create_date = Column(DateTime, nullable=False, server_default=func.now())
class Challenge(Base):
__tablename__ = 'challenges'
__table_args__ = {'extend_existing': True}
id = Column(Integer, primary_key=True, nullable=False)
name = Column(String, nullable=False)
description = Column(String)
docker_container = Column(String, nullable=False)
image = Column(String)
data_path = Column(String)
data_size = Column(String)
color = Column(String)
about = Column(String)
example_file = Column(String)
submission_header = Column(JSONB)
submission_separator = Column(String, default=",")
scores = Column(JSONB)
subscores = Column(JSONB)
start_date = Column(DateTime, nullable=False, server_default=func.now())
end_date = Column(DateTime)
is_open = Column(Boolean, nullable=False, default=True)
create_date = Column(DateTime, nullable=False, server_default=func.now())
class Dataset(Base):
__tablename__ = 'datasets'
__table_args__ = {'extend_existing': True}
id = Column(Integer, primary_key=True, nullable=False)
name = Column(String, nullable=False)
description = Column(String)
tree = Column(JSONB)
challenge_id = Column(Integer, ForeignKey("challenges.id", onupdate="CASCADE", ondelete="CASCADE"), nullable=False)
create_date = Column(DateTime, nullable=False, server_default=func.now())
class Submission(Base):
__tablename__ = 'submissions'
__table_args__ = {'extend_existing': True}
id = Column(Integer, primary_key=True)
user_id = Column(Integer, ForeignKey("users.id", onupdate="CASCADE", ondelete="CASCADE"), nullable=False)
challenge_id = Column(Integer, ForeignKey("challenges.id", onupdate="CASCADE", ondelete="CASCADE"), nullable=False)
name = Column(String, nullable=False)
repository = Column(String, nullable=False)
is_private = Column(Boolean, nullable=False)
institution = Column(String)
publication = Column(String)
is_accepted = Column(Boolean, nullable=False)
create_date = Column(DateTime, nullable=False, server_default=func.now())
class Result(Base):
__tablename__ = 'results'
__table_args__ = {'extend_existing': True}
id = Column(Integer, primary_key=True)
submission_id = Column(Integer, ForeignKey("submissions.id", onupdate="CASCADE", ondelete="CASCADE"), nullable=False)
results_path = Column(String, nullable=False)
score_data = Column(JSONB)
is_current = Column(Boolean, nullable=False)
submission_date = Column(DateTime, nullable=False, server_default=func.now())
create_date = Column(DateTime, nullable=False, server_default=func.now())
class AdminEmailSettings(Base):
__tablename__ = 'email_settings'
__table_args__ = {'extend_existing': True}
id = Column(Integer, primary_key=True, nullable=False)
email_provider = Column(String, nullable=False)
email_address = Column(String, nullable=False)
email_pass = Column(String, nullable=False)
Base.metadata.create_all(engine)
# Load Data
Base.metadata.bind = engine
DBSession = sessionmaker(bind=engine)
session = DBSession()
with open("initialize.json") as fh:
initialize_data = json.load(fh)
for challenge in initialize_data["challenges"]:
datasets = challenge.pop('datasets', [])
new_challenge = Challenge(**challenge)
session.add(new_challenge)
session.flush()
session.refresh(new_challenge)
challenge_id = new_challenge.id
for dataset in datasets:
dataset["challenge_id"] = challenge_id
new_dataset = Dataset(**dataset)
session.add(new_dataset)
for admin in initialize_data["admins"]:
new_user = User(github_username=admin, is_admin=True)
session.add(new_user)
email_settings = initialize_data["email_settings"]
settings = AdminEmailSettings(email_provider=email_settings["email_provider"],
email_address= email_settings["admin_email"],
email_pass=email_settings["admin_pass"])
session.add(settings)
session.commit()
| 37
| 121
| 0.72471
| 610
| 5,180
| 5.898361
| 0.196721
| 0.108394
| 0.04169
| 0.069483
| 0.392996
| 0.348249
| 0.348249
| 0.315175
| 0.315175
| 0.286826
| 0
| 0.000917
| 0.157915
| 5,180
| 139
| 122
| 37.266187
| 0.823934
| 0.004633
| 0
| 0.210526
| 0
| 0
| 0.111866
| 0.025636
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0.035088
| 0.078947
| 0
| 0.684211
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a5fbd11dbc9a0e80007cdb92a40b5c8dd7191ce7
| 8,387
|
py
|
Python
|
packages/w3af/w3af/core/data/url/HTTPRequest.py
|
ZooAtmosphereGroup/HelloPackages
|
0ccffd33bf927b13d28c8f715ed35004c33465d9
|
[
"Apache-2.0"
] | null | null | null |
packages/w3af/w3af/core/data/url/HTTPRequest.py
|
ZooAtmosphereGroup/HelloPackages
|
0ccffd33bf927b13d28c8f715ed35004c33465d9
|
[
"Apache-2.0"
] | null | null | null |
packages/w3af/w3af/core/data/url/HTTPRequest.py
|
ZooAtmosphereGroup/HelloPackages
|
0ccffd33bf927b13d28c8f715ed35004c33465d9
|
[
"Apache-2.0"
] | null | null | null |
"""
HTTPRequest.py
Copyright 2010 Andres Riancho
This file is part of w3af, http://w3af.org/ .
w3af is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation version 2 of the License.
w3af is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with w3af; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
"""
import copy
import socket
import urllib2
from w3af.core.data.dc.headers import Headers
from w3af.core.data.dc.utils.token import DataToken
from w3af.core.data.parsers.doc.url import URL
from w3af.core.data.request.request_mixin import RequestMixIn
from w3af.core.data.url.constants import MAX_HTTP_RETRIES
class HTTPRequest(RequestMixIn, urllib2.Request):
def __init__(self, url,
data=None,
headers=None,
origin_req_host=None,
unverifiable=False,
cookies=True,
session=None,
cache=False,
method=None,
error_handling=True,
retries=MAX_HTTP_RETRIES,
timeout=socket._GLOBAL_DEFAULT_TIMEOUT,
new_connection=False,
follow_redirects=False,
use_basic_auth=True,
use_proxy=True,
debugging_id=None,
binary_response=False):
"""
This is a simple wrapper around a urllib2 request object which helps
with some common tasks like serialization, cache, etc.
:param method: None means choose the default (POST if data is not None)
:param data: The post_data as a string
"""
headers = headers or Headers()
#
# Save some information for later access in an easier way
#
self.url_object = url
self.cookies = cookies
self.session = session
self.get_from_cache = cache
self.error_handling = error_handling
self.retries_left = retries
self.timeout = timeout
self.new_connection = new_connection
self.follow_redirects = follow_redirects
self.use_basic_auth = use_basic_auth
self.use_proxy = use_proxy
self.debugging_id = debugging_id
self._binary_response = binary_response
self.method = method
if self.method is None:
self.method = 'POST' if data else 'GET'
if isinstance(headers, Headers):
headers.tokens_to_value()
headers = dict(headers)
# Call the base class constructor
urllib2.Request.__init__(self, url.url_encode(), data,
headers, origin_req_host, unverifiable)
RequestMixIn.__init__(self)
def __eq__(self, other):
return (self.get_method() == other.get_method() and
self.get_uri() == other.get_uri() and
self.get_headers() == other.get_headers() and
self.get_data() == other.get_data() and
self.get_timeout() == other.get_timeout())
def with_binary_response(self):
return self._binary_response
def set_data(self, data):
self.data = data
def add_header(self, key, val):
"""
Override mostly to avoid having header values of DataToken type
:param key: The header name as a string
:param val: The header value (a string of a DataToken)
:return: None
"""
if isinstance(val, DataToken):
val = val.get_value()
self.headers[key.capitalize()] = val
def get_method(self):
return self.method
def set_method(self, method):
self.method = method
def get_netloc(self):
uri = self.get_uri()
return '%s:%s' % (uri.get_domain(), uri.get_port())
def get_domain(self):
return self.get_uri().get_domain()
def get_uri(self):
return self.url_object
def set_uri(self, url_object):
self.url_object = url_object
def get_headers(self):
headers = Headers(self.headers.items())
headers.update(self.unredirected_hdrs.items())
return headers
def set_headers(self, headers):
self.headers = dict(headers)
def get_timeout(self):
return self.timeout
def set_timeout(self, timeout):
self.timeout = timeout
def set_new_connection(self, new_connection):
self.new_connection = new_connection
def get_new_connection(self):
return self.new_connection
def to_dict(self):
serializable_dict = {}
sdict = serializable_dict
sdict['method'] = self.get_method()
sdict['uri'] = self.get_uri().url_string
sdict['headers'] = dict(self.get_headers())
sdict['data'] = self.get_data()
sdict['cookies'] = self.cookies
sdict['session'] = self.session
sdict['cache'] = self.get_from_cache
sdict['timeout'] = None if self.timeout is socket._GLOBAL_DEFAULT_TIMEOUT else self.timeout
sdict['new_connection'] = self.new_connection
sdict['follow_redirects'] = self.follow_redirects
sdict['use_basic_auth'] = self.use_basic_auth
sdict['use_proxy'] = self.use_proxy
sdict['debugging_id'] = self.debugging_id
sdict['binary_response'] = self._binary_response
return serializable_dict
@classmethod
def from_fuzzable_request(cls, fuzzable_request):
"""
:param fuzzable_request: The FuzzableRequest
:return: An instance of HTTPRequest with all the information contained
in the FuzzableRequest passed as parameter
"""
host = fuzzable_request.get_url().get_domain()
data = fuzzable_request.get_data()
headers = fuzzable_request.get_headers()
headers.tokens_to_value()
return cls(fuzzable_request.get_uri(), data=data, headers=headers,
origin_req_host=host)
@classmethod
def from_dict(cls, unserialized_dict):
"""
* msgpack is MUCH faster than cPickle,
* msgpack can't serialize python objects,
* I have to create a dict representation of HTTPRequest to serialize it,
* and a from_dict to have the object back
:param unserialized_dict: A dict just as returned by to_dict()
"""
udict = unserialized_dict
method, uri = udict['method'], udict['uri']
headers, data = udict['headers'], udict['data']
cookies = udict['cookies']
session = udict['session']
cache = udict['cache']
timeout = socket.getdefaulttimeout() if udict['timeout'] is None else udict['timeout']
new_connection = udict['new_connection']
follow_redirects = udict['follow_redirects']
use_basic_auth = udict['use_basic_auth']
use_proxy = udict['use_proxy']
debugging_id = udict['debugging_id']
binary_response = udict['binary_response']
headers_inst = Headers(headers.items())
url = URL(uri)
return cls(url, data=data, headers=headers_inst,
cookies=cookies, session=session,
cache=cache, method=method,
timeout=timeout, new_connection=new_connection,
follow_redirects=follow_redirects,
use_basic_auth=use_basic_auth, use_proxy=use_proxy,
debugging_id=debugging_id, binary_response=binary_response)
def copy(self):
return copy.deepcopy(self)
def __repr__(self):
fmt = '<HTTPRequest "%s" (cookies:%s, cache:%s, did:%s, timeout:%.2f, new_connection:%s)>'
timeout = 3 if self.timeout is socket._GLOBAL_DEFAULT_TIMEOUT else self.timeout
return fmt % (self.url_object.url_string,
self.cookies,
self.get_from_cache,
self.debugging_id,
timeout,
self.new_connection)
| 34.514403
| 99
| 0.623465
| 1,009
| 8,387
| 4.982161
| 0.224975
| 0.043963
| 0.021484
| 0.015914
| 0.109011
| 0.042968
| 0.02228
| 0.02228
| 0.02228
| 0.02228
| 0
| 0.005393
| 0.292476
| 8,387
| 242
| 100
| 34.657025
| 0.841759
| 0.193871
| 0
| 0.065789
| 0
| 0.006579
| 0.053868
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.144737
| false
| 0
| 0.052632
| 0.052632
| 0.296053
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a5ff1935416c4a799dc3631e3b180db7559793bf
| 817
|
py
|
Python
|
moldesign/_notebooks/nbscripts/gen_toc.py
|
Autodesk/molecular-design-toolkit
|
5f45a47fea21d3603899a6366cb163024f0e2ec4
|
[
"Apache-2.0"
] | 147
|
2016-07-15T18:53:55.000Z
|
2022-01-30T04:36:39.000Z
|
moldesign/_notebooks/nbscripts/gen_toc.py
|
cherishyli/molecular-design-toolkit
|
5f45a47fea21d3603899a6366cb163024f0e2ec4
|
[
"Apache-2.0"
] | 151
|
2016-07-15T21:35:11.000Z
|
2019-10-10T08:57:29.000Z
|
moldesign/_notebooks/nbscripts/gen_toc.py
|
cherishyli/molecular-design-toolkit
|
5f45a47fea21d3603899a6366cb163024f0e2ec4
|
[
"Apache-2.0"
] | 33
|
2016-08-02T00:04:51.000Z
|
2021-09-02T10:05:04.000Z
|
#!/usr/bin/env python
from __future__ import print_function
import sys, os
from nbformat import v4
def parse_line(line):
if not line.startswith('#'):
return None
ilevel = 0
for char in line:
if char == '#': ilevel += 1
else: break
name = line[ilevel:].strip()
return ilevel, name
if __name__ == '__main__':
with open(sys.argv[1], 'r') as nbfile:
nb = v4.reads(nbfile.read())
print('Contents\n=======\n---')
for cell in nb.cells:
if cell['cell_type'] == 'markdown':
for line in cell['source'].splitlines():
header = parse_line(line)
if header is None: continue
ilevel, name = header
print(' '*(ilevel-1) + ' - [%s](#%s)'%(name, name.replace(' ','-')))
| 19.452381
| 85
| 0.532436
| 101
| 817
| 4.148515
| 0.534653
| 0.042959
| 0.062053
| 0.071599
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.010638
| 0.30967
| 817
| 41
| 86
| 19.926829
| 0.73227
| 0.02448
| 0
| 0
| 0
| 0
| 0.091371
| 0.027919
| 0
| 0
| 0
| 0
| 0
| 1
| 0.043478
| false
| 0
| 0.130435
| 0
| 0.26087
| 0.130435
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
570168b655cd4c5fe01f67c0408794d1cfd928aa
| 2,306
|
py
|
Python
|
tests/test_persona.py
|
holnburger/persine
|
cb26d1e275f7ed7e1048bc1e6b66b71386c3e602
|
[
"MIT"
] | 84
|
2020-12-20T20:39:19.000Z
|
2022-02-02T01:01:12.000Z
|
tests/test_persona.py
|
holnburger/persine
|
cb26d1e275f7ed7e1048bc1e6b66b71386c3e602
|
[
"MIT"
] | 1
|
2020-12-25T01:07:09.000Z
|
2020-12-25T04:05:19.000Z
|
tests/test_persona.py
|
holnburger/persine
|
cb26d1e275f7ed7e1048bc1e6b66b71386c3e602
|
[
"MIT"
] | 9
|
2020-12-23T03:10:35.000Z
|
2021-09-08T14:44:18.000Z
|
import pytest
from persine import Persona
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from unittest.mock import Mock
@pytest.fixture
def engine():
def launch_chrome(user_data_dir):
options = Options()
options.add_argument("--headless")
return webdriver.Chrome(options=options)
eng = Mock()
eng.data_dir = "/tmp/data_dir"
eng.history_path = "/tmp/history.json"
eng.launch = launch_chrome
eng.run = lambda driver, action: { 'action': action }
return eng
def test_context(engine):
with Persona(engine=engine) as persona:
assert persona.driver is not None
assert persona.driver is None
def test_history(engine):
persona = Persona(engine=engine)
assert len(persona.history) == 0
persona.update_history(
{
"key": "test-key-1",
"url": "sample",
"action": "test:sample",
"recommendations": [{"number": 1}, {"number": 2}, {"number": 3}],
}
)
persona.update_history(
{
"key": "test-key-2",
"url": "sample2",
"action": "test:sample",
"recommendations": [{"number": 3}, {"number": 2}, {"number": 1}],
}
)
assert len(persona.history) == 2
assert len(persona.recommendations) == 6
def test_history_notes(engine):
persona = Persona(engine=engine)
assert len(persona.history) == 0
persona.update_history(
{
"key": "test-key-1",
"url": "sample",
"action": "test:sample",
"recommendations": [{"number": 1}, {"number": 2}, {"number": 3}],
},
{
"note_key": "note_value"
}
)
assert persona.history[-1]['note_key'] == 'note_value'
def test_run_notes(engine):
with Persona(engine=engine) as persona:
persona.run('http://jonathansoma.com', {'note_key': 'note_value'})
assert persona.history[-1]['action'] == 'http://jonathansoma.com'
assert persona.history[-1]['note_key'] == 'note_value'
def test_startup_shutdown(engine):
persona = Persona(engine=engine)
assert persona.driver is None
persona.launch()
assert persona.driver is not None
persona.quit()
assert persona.driver is None
| 26.204545
| 77
| 0.598439
| 262
| 2,306
| 5.164122
| 0.232824
| 0.076866
| 0.070214
| 0.077605
| 0.54915
| 0.470806
| 0.379157
| 0.322986
| 0.283814
| 0.283814
| 0
| 0.011689
| 0.258023
| 2,306
| 87
| 78
| 26.505747
| 0.779077
| 0
| 0
| 0.376812
| 0
| 0
| 0.167823
| 0
| 0
| 0
| 0
| 0
| 0.173913
| 1
| 0.101449
| false
| 0
| 0.072464
| 0
| 0.202899
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
57018df18d3cbc94d73679782950464b4f793c17
| 26,556
|
py
|
Python
|
inference.py
|
QuPengfei/learnable-triangulation-pytorch
|
861d9ccf8b06bd2f130697cd40b7ac57d7f7d9f2
|
[
"MIT"
] | null | null | null |
inference.py
|
QuPengfei/learnable-triangulation-pytorch
|
861d9ccf8b06bd2f130697cd40b7ac57d7f7d9f2
|
[
"MIT"
] | null | null | null |
inference.py
|
QuPengfei/learnable-triangulation-pytorch
|
861d9ccf8b06bd2f130697cd40b7ac57d7f7d9f2
|
[
"MIT"
] | null | null | null |
import torch
import numpy as np
import cv2
import os
import h5py
from collections import defaultdict
from mvn.models.triangulation import RANSACTriangulationNet, AlgebraicTriangulationNet, VolumetricTriangulationNet
from mvn.models.loss import KeypointsMSELoss, KeypointsMSESmoothLoss, KeypointsMAELoss, KeypointsL2Loss, VolumetricCELoss
from mvn.utils import img, multiview, op, vis, misc, cfg
from mvn.utils.img import get_square_bbox, resize_image, crop_image, normalize_image, scale_bbox
from mvn.utils.multiview import Camera
from mvn.utils.multiview import project_3d_points_to_image_plane_without_distortion as project
from mvn.datasets import utils as dataset_utils
from mvn.utils.img import image_batch_to_torch
retval = {
'subject_names': ['S1', 'S5', 'S6', 'S7', 'S8', 'S9', 'S11'],
'camera_names': ['54138969', '55011271', '58860488', '60457274'],
'action_names': [
'Directions-1', 'Directions-2',
'Discussion-1', 'Discussion-2',
'Eating-1', 'Eating-2',
'Greeting-1', 'Greeting-2',
'Phoning-1', 'Phoning-2',
'Posing-1', 'Posing-2',
'Purchases-1', 'Purchases-2',
'Sitting-1', 'Sitting-2',
'SittingDown-1', 'SittingDown-2',
'Smoking-1', 'Smoking-2',
'TakingPhoto-1', 'TakingPhoto-2',
'Waiting-1', 'Waiting-2',
'Walking-1', 'Walking-2',
'WalkingDog-1', 'WalkingDog-2',
'WalkingTogether-1', 'WalkingTogether-2']
}
h5_file="data/human36m/extra/una-dinosauria-data/h36m/cameras.h5"
bbox_file="data/human36m/extra/bboxes-Human36M-GT.npy"
def square_the_bbox(bbox):
top, left, bottom, right = bbox
width = right - left
height = bottom - top
if height < width:
center = (top + bottom) * 0.5
top = int(round(center - width * 0.5))
bottom = top + width
else:
center = (left + right) * 0.5
left = int(round(center - height * 0.5))
right = left + height
return top, left, bottom, right
def fill_bbox(bb_file):
# Fill bounding boxes TLBR
bboxes = np.load(bb_file, allow_pickle=True).item()
for subject in bboxes.keys():
for action in bboxes[subject].keys():
for camera, bbox_array in bboxes[subject][action].items():
for frame_idx, bbox in enumerate(bbox_array):
bbox[:] = square_the_bbox(bbox)
return bboxes
def fill_bbox_subject_action(bb_file, subject, action):
# Fill bounding boxes TLBR
bboxes = np.load(bb_file, allow_pickle=True).item()
bboxes_subject_action = bboxes[subject][action]
for camera, bbox_array in bboxes_subject_action.items():
for frame_idx, bbox in enumerate(bbox_array):
bbox[:] = square_the_bbox(bbox)
return bboxes_subject_action
def get_bbox_subject_action(bboxes, idx):
bbox = {}
for (camera_idx, camera) in enumerate(retval['camera_names']):
bbox[camera] = bboxes[camera][idx]
return bbox
def fill_cameras(h5_cameras):
info = np.empty(
(len(retval['subject_names']), len(retval['camera_names'])),
dtype=[
('R', np.float32, (3,3)),
('t', np.float32, (3,1)),
('K', np.float32, (3,3)),
('dist', np.float32, 5)
]
)
cameras_params = h5py.File(h5_cameras, 'r')
# Fill retval['cameras']
for subject_idx, subject in enumerate(retval['subject_names']):
for camera_idx, camera in enumerate(retval['camera_names']):
assert len(cameras_params[subject.replace('S', 'subject')]) == 4
camera_params = cameras_params[subject.replace('S', 'subject')]['camera%d' % (camera_idx+1)]
camera_retval = info[subject_idx][camera_idx]
def camera_array_to_name(array):
return ''.join(chr(int(x[0])) for x in array)
assert camera_array_to_name(camera_params['Name']) == camera
camera_retval['R'] = np.array(camera_params['R']).T
camera_retval['t'] = -camera_retval['R'] @ camera_params['T']
camera_retval['K'] = 0
camera_retval['K'][:2, 2] = camera_params['c'][:, 0]
camera_retval['K'][0, 0] = camera_params['f'][0]
camera_retval['K'][1, 1] = camera_params['f'][1]
camera_retval['K'][2, 2] = 1.0
camera_retval['dist'][:2] = camera_params['k'][:2, 0]
camera_retval['dist'][2:4] = camera_params['p'][:, 0]
camera_retval['dist'][4] = camera_params['k'][2, 0]
return info
def fill_cameras_subject(h5_cameras,subject):
info = np.empty(
len(retval['camera_names']),
dtype=[
('R', np.float32, (3,3)),
('t', np.float32, (3,1)),
('K', np.float32, (3,3)),
('dist', np.float32, 5)
]
)
cameras = {}
subject_idx = retval['subject_names'].index(subject)
cameras_params = h5py.File(h5_cameras, 'r')
# Fill retval['cameras']
for camera_idx, camera in enumerate(retval['camera_names']):
assert len(cameras_params[subject.replace('S', 'subject')]) == 4
camera_params = cameras_params[subject.replace('S', 'subject')]['camera%d' % (camera_idx+1)]
camera_retval = info[camera_idx]
def camera_array_to_name(array):
return ''.join(chr(int(x[0])) for x in array)
assert camera_array_to_name(camera_params['Name']) == camera
camera_retval['R'] = np.array(camera_params['R']).T
camera_retval['t'] = -camera_retval['R'] @ camera_params['T']
camera_retval['K'] = 0
camera_retval['K'][:2, 2] = camera_params['c'][:, 0]
camera_retval['K'][0, 0] = camera_params['f'][0]
camera_retval['K'][1, 1] = camera_params['f'][1]
camera_retval['K'][2, 2] = 1.0
camera_retval['dist'][:2] = camera_params['k'][:2, 0]
camera_retval['dist'][2:4] = camera_params['p'][:, 0]
camera_retval['dist'][4] = camera_params['k'][2, 0]
cameras[camera] = camera_retval
return cameras
#retval['bboxes'] = fill_bbox(bbox_file)
#retval['cameras'] = fill_cameras(h5_file)
class Detector:
def __init__(self, config, device = "cuda:0"):
super().__init__()
self.model = {
"ransac": RANSACTriangulationNet,
"alg": AlgebraicTriangulationNet,
"vol": VolumetricTriangulationNet
}[config.model.name](config, device=device).to(device)
if config.model.init_weights:
state_dict = torch.load(config.model.checkpoint)
for key in list(state_dict.keys()):
new_key = key.replace("module.", "")
state_dict[new_key] = state_dict.pop(key)
state_dict = torch.load(config.model.checkpoint)
self.model.load_state_dict(state_dict, strict=True)
print("Successfully loaded pretrained weights for whole model")
def infer(self, batch, model_type, device, config):
"""
For a single image inference
"""
outputBatch = {}
inputBatch = {}
images_batch = []
image_batch = image_batch_to_torch(batch['images'])
image_batch = image_batch.to(device)
images_batch.append(image_batch)
images_batch = torch.stack(images_batch, dim=0)
#proj_matricies_batch = [torch.from_numpy(camera.projection).float().to(device) for camera in batch['cameras']]
proj_matricies_batch = torch.stack([torch.from_numpy(camera.projection) for camera in batch['cameras']], dim=0)
proj_matricies_batch = proj_matricies_batch.float().to(device)
proj_matricies_batchs = [] # shape (batch_size, n_views, 3, 4)
proj_matricies_batchs.append(proj_matricies_batch)
proj_matricies_batchs = torch.stack(proj_matricies_batchs,dim=0)
#print(proj_matricies_batchs,proj_matricies_batchs.shape,len(batch),images_batch.shape)
keypoints_2d_pred, cuboids_pred, base_points_pred, volumes_pred, coord_volumes_pred = None, None, None, None, None
if model_type == "alg" or model_type == "ransac":
keypoints_3d_pred, keypoints_2d_pred, heatmaps_pred, confidences_pred = self.model(images_batch, proj_matricies_batchs, batch)
elif model_type == "vol":
keypoints_3d_pred, heatmaps_pred, volumes_pred, confidences_pred, cuboids_pred, coord_volumes_pred, base_points_pred = self.model(images_batch, proj_matricies_batchs, batch)
outputBatch["keypoints_3d_pred"] = keypoints_3d_pred
outputBatch["heatmaps_pred"] = heatmaps_pred
outputBatch["volumes_pred"] = volumes_pred
outputBatch["confidences_pred"] = confidences_pred
outputBatch["cuboids_pred"] = confidences_pred
outputBatch["coord_volumes_pred"] = coord_volumes_pred
outputBatch["base_points_pred"] = base_points_pred
inputBatch["images_batch"] = images_batch
return outputBatch, inputBatch
def inferHuman36Data(self, batch, model_type, device, config, randomize_n_views,
min_n_views,
max_n_views):
"""
For batch inferences
"""
outputBatch = {}
inputBatch = {}
collatFunction = dataset_utils.make_collate_fn(randomize_n_views,
min_n_views,
max_n_views)
batch = collatFunction(batch)
images_batch, keypoints_3d_gt, keypoints_3d_validity_gt, proj_matricies_batch = dataset_utils.prepare_batch(batch, device, config)
#print(proj_matricies_batch,proj_matricies_batch.shape,len(batch),images_batch.shape)
keypoints_2d_pred, cuboids_pred, base_points_pred, volumes_pred, coord_volumes_pred = None, None, None, None, None
if model_type == "alg" or model_type == "ransac":
keypoints_3d_pred, keypoints_2d_pred, heatmaps_pred, confidences_pred = self.model(images_batch, proj_matricies_batch, batch)
elif model_type == "vol":
keypoints_3d_pred, heatmaps_pred, volumes_pred, confidences_pred, cuboids_pred, coord_volumes_pred, base_points_pred = self.model(images_batch, proj_matricies_batch, batch)
outputBatch["keypoints_3d_pred"] = keypoints_3d_pred
outputBatch["heatmaps_pred"] = heatmaps_pred
outputBatch["volumes_pred"] = volumes_pred
outputBatch["confidences_pred"] = confidences_pred
outputBatch["cuboids_pred"] = confidences_pred
outputBatch["coord_volumes_pred"] = coord_volumes_pred
outputBatch["base_points_pred"] = base_points_pred
inputBatch["images_batch"] = images_batch
inputBatch["proj_matricies_batch"] = proj_matricies_batch
return outputBatch, inputBatch
def viewSample(sample,idx=0):
camera_idx = 0
image = sample['images'][camera_idx]
camera = sample['cameras'][camera_idx]
subject = sample['subject'][camera_idx]
action = sample['action'][camera_idx]
display = image.copy()
keypoints_2d = project(camera.projection, sample['keypoints_3d'][:, :3])
for i,(x,y) in enumerate(keypoints_2d):
cv2.circle(display, (int(x), int(y)), 3, (0,0,255), -1)
file = f"./result/{subject}-{action}-{camera.name}-{idx}.png"
cv2.imwrite(file, display)
def viewHeatmaps(sample,idx,prediction,config):
# TODO get the visualization done
images_batch = []
for image_batch in sample['images']:
images_batch.append(image_batch)
heatmaps_vis = vis.visualize_heatmaps(
inputBatch["images_batch"], prediction["heatmaps_pred"],
kind=config.kind,
batch_index=0, size=5,
max_n_rows=10, max_n_cols=10)
heatmaps_vis = heatmaps_vis.transpose(2, 0, 1)
for i in range(0,4):
cv2.imwrite(f"./result/heatmaps_test_{idx}_{i}.png", heatmaps_vis[i,:,:])
def viewVideo(sample):
displays = []
# Project and draw keypoints on images
for camera_idx in range(len(sample['cameras'])): #camera_indexes_to_show:
# import ipdb; ipdb.set_trace()
display = sample['images'][camera_idx]
cv2.putText(display, f"Cam {camera_idx:02}", (10, 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 0, 0))
displays.append(display)
# Fancy stacked images
for j, display in enumerate(displays):
if j == 0:
combined = display
else:
combined = np.concatenate((combined, display), axis=1)
return combined
def viewVideoResult(sample,idx, prediction,config,size=(384,384)):
displays = []
keypoints3d_pred = prediction['keypoints_3d_pred'].cpu()
keypoints_3d_pred = keypoints3d_pred[0,:, :3].detach().numpy()
# Project and draw keypoints on images
for camera_idx in range(len(sample['cameras'])): #camera_indexes_to_show:
camera = sample['cameras'][camera_idx]
keypoints_2d_pred = project(camera.projection, keypoints_3d_pred)
# import ipdb; ipdb.set_trace()
img = sample['images'][camera_idx]
pred_kind = config.pred_kind if hasattr(config, "pred_kind") else config.kind
display = vis.draw_2d_pose_cv2(keypoints_2d_pred, img, kind=pred_kind)
cv2.putText(display, f"Cam {camera_idx:02}", (10, 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 0, 0))
displays.append(display)
display3 = vis.draw_3d_pose_image(keypoints_3d_pred,kind=pred_kind,radius=450)
display3 = cv2.cvtColor(display3,cv2.COLOR_RGBA2RGB)
display3 = cv2.resize(display3, size, interpolation=cv2.INTER_AREA)
cv2.putText(display3, f"3D prediction", (10, 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 0, 0))
displays.append(display3)
# Fancy stacked images
for j, display in enumerate(displays):
if j == 0:
combined = display
else:
combined = np.concatenate((combined, display), axis=1)
return combined
def viewResult(sample,idx,prediction,config,save_images_instead=1,size=(384,384)):
displays = []
camera_idx = 0
camera = sample['cameras'][camera_idx]
subject = sample['subject'][camera_idx]
action = sample['action'][camera_idx]
keypoints3d_pred = prediction['keypoints_3d_pred'].cpu()
keypoints_3d_pred = keypoints3d_pred[0,:, :3].detach().numpy()
keypoints_3d_gt = sample['keypoints_3d'][:, :3]
# Project and draw keypoints on images
for camera_idx in range(len(sample['cameras'])): #camera_indexes_to_show:
camera = sample['cameras'][camera_idx]
keypoints_2d_pred = project(camera.projection, keypoints_3d_pred)
keypoints_2d_gt = project(camera.projection, keypoints_3d_gt)
# import ipdb; ipdb.set_trace()
img = sample['images'][camera_idx]
pred_kind = config.pred_kind if hasattr(config, "pred_kind") else config.kind
display = vis.draw_2d_pose_cv2(keypoints_2d_pred, img, kind=pred_kind)
#display = vis.draw_2d_pose_cv2(keypoints_2d_gt, img, kind=config.kind)
cv2.putText(display, f"Cam {camera_idx:02}", (10, 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 0, 0))
displays.append(display)
display3 = vis.draw_3d_pose_image(keypoints_3d_pred,kind=pred_kind,radius=450)
display3 = cv2.cvtColor(display3,cv2.COLOR_RGBA2RGB)
display3 = cv2.resize(display3, size, interpolation=cv2.INTER_AREA)
cv2.putText(display3, f"3D prediction", (10, 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 0, 0))
displays.append(display3)
display3_gt = vis.draw_3d_pose_image(sample['keypoints_3d'][:, :3],kind=pred_kind,radius=450)
display3_gt = cv2.cvtColor(display3_gt,cv2.COLOR_RGBA2RGB)
display3_gt = cv2.resize(display3_gt, size, interpolation=cv2.INTER_AREA)
cv2.putText(display3_gt, f"3D GT", (10, 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 0, 0))
displays.append(display3_gt)
# Fancy stacked images
for j, display in enumerate(displays):
if j == 0:
combined = display
else:
combined = np.concatenate((combined, display), axis=1)
# Load
if save_images_instead:
file = f"./result/result-{subject}-{action}-{camera.name}-{idx}.png"
cv2.imwrite(file, combined)
else:
cv2.imshow('w', combined)
cv2.setWindowTitle('w', f"Index {idx}")
c = cv2.waitKey(0) % 256
if c == ord('q') or c == 27:
print('Quitting...')
cv2.destroyAllWindows()
def prepareSample(idx, labels, human36mRoot, keyPoint3d = None , imageShape = None, scaleBox = 1.0, crop = True, normImage = False):
sample = defaultdict(list) # return value
shot = labels['table'][idx]
subject = labels['subject_names'][shot['subject_idx']]
action = labels['action_names'][shot['action_idx']]
frame_idx = shot['frame_idx']
for camera_idx, camera_name in enumerate(labels['camera_names']):
bbox = shot['bbox_by_camera_tlbr'][camera_idx][[1,0,3,2]] # TLBR to LTRB
bbox_height = bbox[2] - bbox[0]
if bbox_height == 0:
# convention: if the bbox is empty, then this view is missing
continue
# scale the bounding box
bbox = scale_bbox(bbox, scaleBox)
# load image
image_path = os.path.join(human36mRoot, subject, action, 'imageSequence', camera_name, 'img_%06d.jpg' % (frame_idx+1))
assert os.path.isfile(image_path), '%s doesn\'t exist' % image_path
image = cv2.imread(image_path)
# load camera
shot_camera = labels['cameras'][shot['subject_idx'], camera_idx]
#print(shot_camera)
retval_camera = Camera(shot_camera['R'], shot_camera['t'], shot_camera['K'], shot_camera['dist'], camera_name)
if crop:
# crop image
image = crop_image(image, bbox)
retval_camera.update_after_crop(bbox)
if imageShape is not None:
# resize
image_shape_before_resize = image.shape[:2]
image = resize_image(image, imageShape)
retval_camera.update_after_resize(image_shape_before_resize, imageShape)
sample['image_shapes_before_resize'].append(image_shape_before_resize)
if normImage:
image = normalize_image(image)
sample['images'].append(image)
sample['detections'].append(bbox + (1.0,)) # TODO add real confidences
sample['cameras'].append(retval_camera)
sample['proj_matrices'].append(retval_camera.projection)
sample["action"].append(action)
sample["subject"].append(subject)
sample["frameId"].append(frame_idx)
# 3D keypoints
# add dummy confidences
sample['keypoints_3d'] = np.pad(
shot['keypoints'][:17],
((0,0), (0,1)), 'constant', constant_values=1.0)
# build cuboid
# base_point = sample['keypoints_3d'][6, :3]
# sides = np.array([self.cuboid_side, self.cuboid_side, self.cuboid_side])
# position = base_point - sides / 2
# sample['cuboids'] = volumetric.Cuboid3D(position, sides)
# save sample's index
sample['indexes'] = idx
if keyPoint3d is not None:
sample['pred_keypoints_3d'] = keyPoint3d[idx]
sample.default_factory = None
return sample
def prepareVideoSample(info, images, cameras, bboxes, subject = 'S1', imageShape = [384, 384], scaleBox = 1.0, crop = True, normImage = False):
sample = defaultdict(list) # return value
subject_idx = info['subject_names'].index(subject)
for camera_idx, camera_name in enumerate(info['camera_names']):
bbox = bboxes[camera_name][[1,0,3,2]] # TLBR to LTRB
bbox_height = bbox[2] - bbox[0]
if bbox_height == 0:
# convention: if the bbox is empty, then this view is missing
continue
# scale the bounding box
bbox = scale_bbox(bbox, scaleBox)
# load camera
shot_camera = cameras[camera_name]
image = images[camera_name]
#print(shot_camera)
retval_camera = Camera(shot_camera['R'], shot_camera['t'], shot_camera['K'], shot_camera['dist'], camera_name)
if crop:
# crop image
image = crop_image(image, bbox)
retval_camera.update_after_crop(bbox)
if imageShape is not None:
# resize
image_shape_before_resize = image.shape[:2]
image = resize_image(image, imageShape)
retval_camera.update_after_resize(image_shape_before_resize, imageShape)
sample['images'].append(image)
sample['cameras'].append(retval_camera)
sample['proj_matrices'].append(retval_camera.projection)
# projection matricies
#print(sample['proj_matrices'])
sample.default_factory = None
return sample
def loadHuman36mLabel(path,train = True, withDamageAction=True, retain_every_n_frames_in_test=1):
"""
this load the label, including bouding box, camera matrices
"""
test = not train
labels = np.load(path, allow_pickle=True).item()
train_subjects = ['S1', 'S5', 'S6', 'S7', 'S8']
test_subjects = ['S9', 'S11']
train_subjects = list(labels['subject_names'].index(x) for x in train_subjects)
test_subjects = list(labels['subject_names'].index(x) for x in test_subjects)
indices = []
if train:
mask = np.isin(labels['table']['subject_idx'], train_subjects, assume_unique=True)
indices.append(np.nonzero(mask)[0])
if test:
mask = np.isin(labels['table']['subject_idx'], test_subjects, assume_unique=True)
if not withDamageAction:
mask_S9 = labels['table']['subject_idx'] == labels['subject_names'].index('S9')
damaged_actions = 'Greeting-2', 'SittingDown-2', 'Waiting-1'
damaged_actions = [labels['action_names'].index(x) for x in damaged_actions]
mask_damaged_actions = np.isin(labels['table']['action_idx'], damaged_actions)
mask &= ~(mask_S9 & mask_damaged_actions)
indices.append(np.nonzero(mask)[0][::retain_every_n_frames_in_test])
labels['table'] = labels['table'][np.concatenate(indices)]
return labels
def loadPrePelvis(path):
pred_results = np.load(path, allow_pickle=True)
keypoints_3d_pred = pred_results['keypoints_3d'][np.argsort(pred_results['indexes'])]
return keypoints_3d_pred
def infer(model_type="alg",max_num=5, save_images_instead=1, crop=True):
if model_type == "alg":
config = cfg.load_config("./experiments/human36m/train/human36m_alg.yaml")
elif model_type == "vol":
config = cfg.load_config("./experiments/human36m/train/human36m_vol_softmax.yaml")
pelvis3d = loadPrePelvis(config.dataset.train.pred_results_path)
device = torch.device(0)
labels = loadHuman36mLabel(config.dataset.train.labels_path)
detector = Detector(config, device=device)
for idx in range(max_num):
sample = [prepareSample(100+idx, labels, config.dataset.train.h36m_root, keyPoint3d=None, crop=crop, imageShape=config.image_shape)]
viewSample(sample[0],idx)
prediction, inputBatch = detector.inferHuman36Data(sample, model_type, device, config,
randomize_n_views=config.dataset.val.randomize_n_views,
min_n_views=config.dataset.val.min_n_views,
max_n_views=config.dataset.val.max_n_views)
viewResult(sample[0],idx,prediction,config,save_images_instead=save_images_instead)
def infer_videos(model_type="alg",subject="S1", action="Sitting-1", max_num=5, save_images_instead=True, crop=True):
if model_type == "alg":
config = cfg.load_config("./experiments/human36m/train/human36m_alg.yaml")
elif model_type == "vol":
config = cfg.load_config("./experiments/human36m/train/human36m_vol_softmax.yaml")
pelvis3d = loadPrePelvis(config.dataset.train.pred_results_path)
device = torch.device(0)
detector = Detector(config, device=device)
bboxes = fill_bbox_subject_action(bbox_file, subject, action)
cameras = fill_cameras_subject(h5_file,subject)
cap = {}
wri = None
human36mRoot = "/dataset/experiment-dataset/extracted/"
video_path = os.path.join(human36mRoot, subject, 'Videos')
for (camera_idx, camera) in enumerate(retval['camera_names']):
video_name = video_path+'/'+action.replace("-"," ")+'.'+camera+'.mp4'
assert os.path.isfile(video_name), '%s doesn\'t exist' % video_name
cap[camera] = cv2.VideoCapture(video_name)
size = (int(cap[camera].get(cv2.CAP_PROP_FRAME_WIDTH)),int(cap[camera].get(cv2.CAP_PROP_FRAME_HEIGHT)))
if save_images_instead:
wri = cv2.VideoWriter(
f'./result/result-{subject}-{action}.mp4',cv2.VideoWriter_fourcc('m','p','4','v'),
30,(1920,384))
idx = 0
#while True:
while True:
frames = {}
for (camera_idx, camera) in enumerate(retval['camera_names']):
success,frames[camera] = cap[camera].read()
if success != True:
break
bbox = get_bbox_subject_action(bboxes,idx)
sample = prepareVideoSample(info=retval, images=frames, cameras=cameras, bboxes=bbox, subject = subject, imageShape = [384, 384], scaleBox = 1.0, crop = True, normImage = False)
prediction, inputBatch = detector.infer(sample, model_type, device, config)
combined = viewVideoResult(sample,idx, prediction,config)
#combined = viewVideo(sample)
idx = idx + 1
if save_images_instead:
if idx < max_num:
#file = f"./result/result-video-{subject}-{action}-{camera}-{idx}.png"
#cv2.imwrite(file, combined)
wri.write(combined)
else:
break
else:
cv2.imshow('w', combined)
cv2.setWindowTitle('w', f"Index {idx}")
c = cv2.waitKey(0) % 256
if c == ord('q') or c == 27:
print('Quitting...')
break;
cv2.destroyAllWindows()
for (camera_idx, camera) in enumerate(retval['camera_names']):
cap[camera].release()
if save_images_instead: wri.release()
if __name__ == "__main__":
#infer("alg",max_num=2, crop=True)
infer_videos("alg",max_num=1000, save_images_instead=False, crop=True)
| 42.018987
| 185
| 0.640496
| 3,313
| 26,556
| 4.907335
| 0.121944
| 0.019929
| 0.016607
| 0.008857
| 0.604502
| 0.548838
| 0.505228
| 0.484254
| 0.477673
| 0.458851
| 0
| 0.02905
| 0.231322
| 26,556
| 631
| 186
| 42.085578
| 0.767403
| 0.07102
| 0
| 0.483801
| 0
| 0.006479
| 0.10244
| 0.022158
| 0
| 0
| 0
| 0.001585
| 0.012959
| 1
| 0.047516
| false
| 0
| 0.030238
| 0.00432
| 0.114471
| 0.006479
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
57028ca06deb47d996805621dce315dc63a9dc8f
| 4,846
|
py
|
Python
|
survol/sources_types/Linux/tcp_sockets.py
|
AugustinMascarelli/survol
|
7a822900e82d1e6f016dba014af5741558b78f15
|
[
"BSD-3-Clause"
] | null | null | null |
survol/sources_types/Linux/tcp_sockets.py
|
AugustinMascarelli/survol
|
7a822900e82d1e6f016dba014af5741558b78f15
|
[
"BSD-3-Clause"
] | null | null | null |
survol/sources_types/Linux/tcp_sockets.py
|
AugustinMascarelli/survol
|
7a822900e82d1e6f016dba014af5741558b78f15
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python
"""
TCP Linux sockets with netstat
"""
import re
import sys
import socket
import lib_util
import lib_common
from lib_properties import pc
from sources_types import addr as survol_addr
# Many advantages compared to psutil:
# The Python module psutil is not needed
# psutil gives only sockets if the process is accessible.
# It is much faster.
# On the other it is necessary to run netstat in the shell.
# $ netstat -aptn
# (Not all processes could be identified, non-owned process info
# will not be shown, you would have to be root to see it all.)
# Active Internet connections (servers and established)
# Proto Recv-Q Send-Q Local Address Foreign Address State PID/Program name
# tcp 0 0 192.168.0.17:8000 0.0.0.0:* LISTEN 25865/python
# tcp 0 0 127.0.0.1:427 0.0.0.0:* LISTEN -
# tcp 0 0 0.0.0.0:5900 0.0.0.0:* LISTEN 4119/vino-server
# tcp 0 0 192.168.122.1:53 0.0.0.0:* LISTEN -
# tcp 0 0 192.168.0.17:44634 192.168.0.14:60685 ESTABLISHED 4118/rygel
# tcp 0 0 192.168.0.17:22 192.168.0.14:60371 ESTABLISHED -
# tcp 0 0 192.168.0.17:44634 192.168.0.14:58478 ESTABLISHED 4118/rygel
# tcp 0 0 192.168.0.17:44634 192.168.0.15:38960 TIME_WAIT -
# tcp 0 0 192.168.0.17:44634 192.168.0.14:58658 ESTABLISHED 4118/rygel
# tcp 0 0 192.168.0.17:44634 192.168.0.14:59694 ESTABLISHED 4118/rygel
# tcp 0 0 fedora22:44634 192.168.0.14:58690 ESTABLISHED 4118/rygel
# tcp 0 0 fedora22:ssh 192.168.0.14:63599 ESTABLISHED -
# tcp 0 0 fedora22:42042 176.103.:universe_suite ESTABLISHED 23512/amule
# tcp6 0 0 [::]:wbem-http [::]:* LISTEN -
# tcp6 0 0 [::]:wbem-https [::]:* LISTEN -
# tcp6 0 0 [::]:mysql [::]:* LISTEN -
# tcp6 0 0 [::]:rfb [::]:* LISTEN 4119/vino-server
# tcp6 0 0 [::]:50000 [::]:* LISTEN 23512/amule
# tcp6 0 0 [::]:43056 [::]:* LISTEN 4125/httpd
# tcp6 0 0 [::]:http [::]:* LISTEN -
# tcp6 0 0 [::]:ssh [::]:* LISTEN -
# tcp6 0 0 localhost:ipp [::]:* LISTEN -
# tcp6 0 0 [::]:telnet [::]:* LISTEN -
#
def Main():
cgiEnv = lib_common.CgiEnv()
args = ["netstat", '-aptn', ]
p = lib_common.SubProcPOpen(args)
grph = cgiEnv.GetGraph()
(netstat_last_output, netstat_err) = p.communicate()
# Converts to string for Python3.
netstat_str = netstat_last_output.decode("utf-8")
netstat_lines = netstat_str.split('\n')
seenHeader = False
for lin in netstat_lines:
# By default, consecutive spaces are treated as one.
linSplit = lin.split()
if len(linSplit) == 0:
continue
if not seenHeader:
if linSplit[0] == "Proto":
seenHeader = True
continue
# TODO: "tcp6"
if linSplit[0] != "tcp":
continue
# sys.stderr.write("tcp_sockets.py lin=%s\n"%lin)
sockStatus = linSplit[5]
if sockStatus not in ["ESTABLISHED","TIME_WAIT"]:
continue
addrLocal = linSplit[3]
ipLocal, portLocal = survol_addr.SplitAddrPort(addrLocal)
# It does not use survol_addr.PsutilAddSocketToGraphOne(node_process,cnt,grph)
# because sometimes we do not have the process id.
localSocketNode = lib_common.gUriGen.AddrUri( ipLocal, portLocal )
grph.add( ( localSocketNode, pc.property_information, lib_common.NodeLiteral(sockStatus) ) )
addrRemot = linSplit[4]
# This is different for IPV6
if addrRemot != "0.0.0.0:*":
ipRemot, portRemot = survol_addr.SplitAddrPort(addrRemot)
remotSocketNode = lib_common.gUriGen.AddrUri( ipRemot, portRemot )
grph.add( ( localSocketNode, pc.property_socket_end, remotSocketNode ) )
pidCommand = linSplit[6]
if pidCommand != "-":
procPid, procNam = pidCommand.split("/")
procNode = lib_common.gUriGen.PidUri(procPid)
grph.add( ( procNode, pc.property_host, lib_common.nodeMachine ) )
grph.add( ( procNode, pc.property_pid, lib_common.NodeLiteral(procPid) ) )
grph.add( ( procNode, pc.property_has_socket, localSocketNode ) )
else:
# If the local process is not known, just link the local socket to the local machine.
grph.add( ( lib_common.nodeMachine, pc.property_host, localSocketNode ) )
cgiEnv.OutCgiRdf()
if __name__ == '__main__':
Main()
| 38.460317
| 98
| 0.577383
| 621
| 4,846
| 4.429952
| 0.362319
| 0.031261
| 0.038168
| 0.023264
| 0.211923
| 0.138495
| 0.115231
| 0.08615
| 0.077063
| 0.077063
| 0
| 0.12024
| 0.311804
| 4,846
| 125
| 99
| 38.768
| 0.704648
| 0.60648
| 0
| 0.08
| 0
| 0
| 0.035541
| 0
| 0
| 0
| 0
| 0.008
| 0
| 1
| 0.02
| false
| 0
| 0.14
| 0
| 0.16
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
57033e68edf1bc714421c03684cc8349a3a89d3f
| 5,832
|
py
|
Python
|
models.py
|
JiaMingLin/residual_adapters
|
a3d32b4fb6c3c252f5adc1ad178b026a111c1a08
|
[
"Apache-2.0"
] | 137
|
2018-03-22T15:45:30.000Z
|
2022-03-17T09:39:07.000Z
|
models.py
|
JiaMingLin/residual_adapters
|
a3d32b4fb6c3c252f5adc1ad178b026a111c1a08
|
[
"Apache-2.0"
] | 5
|
2018-09-25T19:44:34.000Z
|
2020-12-19T11:26:41.000Z
|
models.py
|
JiaMingLin/residual_adapters
|
a3d32b4fb6c3c252f5adc1ad178b026a111c1a08
|
[
"Apache-2.0"
] | 40
|
2018-04-04T12:36:54.000Z
|
2022-02-19T05:46:36.000Z
|
# models.py
# created by Sylvestre-Alvise Rebuffi [srebuffi@robots.ox.ac.uk]
# Copyright © The University of Oxford, 2017-2020
# This code is made available under the Apache v2.0 licence, see LICENSE.txt for details
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
from torch.nn.parameter import Parameter
import config_task
import math
def conv3x3(in_planes, out_planes, stride=1):
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=False)
def conv1x1_fonc(in_planes, out_planes=None, stride=1, bias=False):
if out_planes is None:
return nn.Conv2d(in_planes, in_planes, kernel_size=1, stride=stride, padding=0, bias=bias)
else:
return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, padding=0, bias=bias)
class conv1x1(nn.Module):
def __init__(self, planes, out_planes=None, stride=1):
super(conv1x1, self).__init__()
if config_task.mode == 'series_adapters':
self.conv = nn.Sequential(nn.BatchNorm2d(planes), conv1x1_fonc(planes))
elif config_task.mode == 'parallel_adapters':
self.conv = conv1x1_fonc(planes, out_planes, stride)
else:
self.conv = conv1x1_fonc(planes)
def forward(self, x):
y = self.conv(x)
if config_task.mode == 'series_adapters':
y += x
return y
class conv_task(nn.Module):
def __init__(self, in_planes, planes, stride=1, nb_tasks=1, is_proj=1, second=0):
super(conv_task, self).__init__()
self.is_proj = is_proj
self.second = second
self.conv = conv3x3(in_planes, planes, stride)
if config_task.mode == 'series_adapters' and is_proj:
self.bns = nn.ModuleList([nn.Sequential(conv1x1(planes), nn.BatchNorm2d(planes)) for i in range(nb_tasks)])
elif config_task.mode == 'parallel_adapters' and is_proj:
self.parallel_conv = nn.ModuleList([conv1x1(in_planes, planes, stride) for i in range(nb_tasks)])
self.bns = nn.ModuleList([nn.BatchNorm2d(planes) for i in range(nb_tasks)])
else:
self.bns = nn.ModuleList([nn.BatchNorm2d(planes) for i in range(nb_tasks)])
def forward(self, x):
task = config_task.task
y = self.conv(x)
if self.second == 0:
if config_task.isdropout1:
x = F.dropout2d(x, p=0.5, training = self.training)
else:
if config_task.isdropout2:
x = F.dropout2d(x, p=0.5, training = self.training)
if config_task.mode == 'parallel_adapters' and self.is_proj:
y = y + self.parallel_conv[task](x)
y = self.bns[task](y)
return y
# No projection: identity shortcut
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, in_planes, planes, stride=1, shortcut=0, nb_tasks=1):
super(BasicBlock, self).__init__()
self.conv1 = conv_task(in_planes, planes, stride, nb_tasks, is_proj=int(config_task.proj[0]))
self.conv2 = nn.Sequential(nn.ReLU(True), conv_task(planes, planes, 1, nb_tasks, is_proj=int(config_task.proj[1]), second=1))
self.shortcut = shortcut
if self.shortcut == 1:
self.avgpool = nn.AvgPool2d(2)
def forward(self, x):
residual = x
y = self.conv1(x)
y = self.conv2(y)
if self.shortcut == 1:
residual = self.avgpool(x)
residual = torch.cat((residual, residual*0),1)
y += residual
y = F.relu(y)
return y
class ResNet(nn.Module):
def __init__(self, block, nblocks, num_classes=[10]):
super(ResNet, self).__init__()
nb_tasks = len(num_classes)
blocks = [block, block, block]
factor = config_task.factor
self.in_planes = int(32*factor)
self.pre_layers_conv = conv_task(3,int(32*factor), 1, nb_tasks)
self.layer1 = self._make_layer(blocks[0], int(64*factor), nblocks[0], stride=2, nb_tasks=nb_tasks)
self.layer2 = self._make_layer(blocks[1], int(128*factor), nblocks[1], stride=2, nb_tasks=nb_tasks)
self.layer3 = self._make_layer(blocks[2], int(256*factor), nblocks[2], stride=2, nb_tasks=nb_tasks)
self.end_bns = nn.ModuleList([nn.Sequential(nn.BatchNorm2d(int(256*factor)),nn.ReLU(True)) for i in range(nb_tasks)])
self.avgpool = nn.AdaptiveAvgPool2d(1)
self.linears = nn.ModuleList([nn.Linear(int(256*factor), num_classes[i]) for i in range(nb_tasks)])
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _make_layer(self, block, planes, nblocks, stride=1, nb_tasks=1):
shortcut = 0
if stride != 1 or self.in_planes != planes * block.expansion:
shortcut = 1
layers = []
layers.append(block(self.in_planes, planes, stride, shortcut, nb_tasks=nb_tasks))
self.in_planes = planes * block.expansion
for i in range(1, nblocks):
layers.append(block(self.in_planes, planes, nb_tasks=nb_tasks))
return nn.Sequential(*layers)
def forward(self, x):
x = self.pre_layers_conv(x)
task = config_task.task
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.end_bns[task](x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.linears[task](x)
return x
def resnet26(num_classes=10, blocks=BasicBlock):
return ResNet(blocks, [4,4,4],num_classes)
| 39.945205
| 133
| 0.627743
| 837
| 5,832
| 4.201912
| 0.185185
| 0.045778
| 0.035826
| 0.021894
| 0.372477
| 0.287745
| 0.194484
| 0.141882
| 0.106625
| 0.077339
| 0
| 0.03175
| 0.249314
| 5,832
| 145
| 134
| 40.22069
| 0.771357
| 0.041152
| 0
| 0.196581
| 0
| 0
| 0.017192
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.102564
| false
| 0
| 0.059829
| 0.017094
| 0.282051
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
57053e08159134b657dc6dde4b49efc028c6a0a2
| 2,196
|
py
|
Python
|
main.py
|
GauravP2001/courseSniperBot
|
c3e05d2890f10177ee847a961b957d5e63e7d0ec
|
[
"BSD-2-Clause-FreeBSD"
] | null | null | null |
main.py
|
GauravP2001/courseSniperBot
|
c3e05d2890f10177ee847a961b957d5e63e7d0ec
|
[
"BSD-2-Clause-FreeBSD"
] | null | null | null |
main.py
|
GauravP2001/courseSniperBot
|
c3e05d2890f10177ee847a961b957d5e63e7d0ec
|
[
"BSD-2-Clause-FreeBSD"
] | null | null | null |
import discord
import os
import requests
import asyncio
import psycopg2
import logging
from apscheduler.schedulers.asyncio import AsyncIOScheduler
from discord.ext import commands
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
datefmt=f"%m/%d/%Y %H:%M:%S %Z")
logger = logging.getLogger("Snipe Bot")
client = commands.Bot(command_prefix=".")
scheduler = AsyncIOScheduler()
DATABASE_URL = os.environ.get("DATABASE_URL")
conn = psycopg2.connect(DATABASE_URL, sslmode="require")
cur = conn.cursor()
# with conn:
# cur.execute("CREATE TABLE coursesToBeFound (index VARCHAR primary key);")
# cur.execute("INSERT INTO coursesToBeFound (index) VALUES (%s)", ("00150",))
# cur.execute("DELETE FROM coursesToBeFound where index = %s", ("00150",))
# cur.execute("SELECT * from coursesToBeFound;")
# for row in cur:
# print(row[0])
sectionsFound = []
@client.event
async def on_ready():
logger.info("Bot is ready")
@client.command()
async def addCourse(ctx, arg):
logger.info(arg)
await ctx.send("Successfully Added the Course to Snipe!")
with conn:
cur.execute("INSERT INTO coursesToBeFound (index) VALUES (%s)", (arg,))
async def check_courses():
logger.info("Searching")
url = "https://sis.rutgers.edu/soc/api/openSections.json?year=2022&term=1&campus=NB"
try:
dataJSON = requests.get(url).json()
except Exception as e:
logger.error(e)
return
cur.execute("SELECT * from coursesToBeFound;")
for row in cur:
logger.info(row)
for index in dataJSON:
if row[0] == index:
sectionsFound.append(index)
logger.info(f"Found index: {row[0]}")
await client.get_channel(int(os.environ.get("CHANNEL_ID"))).send(f"Found Index: {index}")
for index in sectionsFound:
cur.execute("DELETE FROM coursesToBeFound where index = %s", (index,))
conn.commit()
if __name__ == "__main__":
logger.info("Starting")
scheduler.add_job(check_courses, "interval", seconds=10)
scheduler.start()
client.run(os.environ.get("token"))
| 28.519481
| 105
| 0.658015
| 276
| 2,196
| 5.17029
| 0.460145
| 0.049054
| 0.025228
| 0.025228
| 0.199019
| 0.199019
| 0.199019
| 0.199019
| 0.065872
| 0
| 0
| 0.012521
| 0.199909
| 2,196
| 76
| 106
| 28.894737
| 0.799659
| 0.142987
| 0
| 0
| 0
| 0.02
| 0.235326
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.16
| 0
| 0.18
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
57075fadbef4087df6eac236abcbc48b853a6d54
| 619
|
py
|
Python
|
Python_Exercicios/Mundo2/Condições em Python (if..elif)/python_038.py
|
jbauermanncode/Curso_Em_Video_Python
|
330c207d7bed4e663fe1b9ab433ab57a9828b7f1
|
[
"MIT"
] | null | null | null |
Python_Exercicios/Mundo2/Condições em Python (if..elif)/python_038.py
|
jbauermanncode/Curso_Em_Video_Python
|
330c207d7bed4e663fe1b9ab433ab57a9828b7f1
|
[
"MIT"
] | null | null | null |
Python_Exercicios/Mundo2/Condições em Python (if..elif)/python_038.py
|
jbauermanncode/Curso_Em_Video_Python
|
330c207d7bed4e663fe1b9ab433ab57a9828b7f1
|
[
"MIT"
] | null | null | null |
'''
Escreva um programa que leia dois números inteiros e compare- os, mostrando na tela uma mensagem:
- O primeiro valor é maior
- O segundo valor é maior
- não existe valor maior, os dois são iguais
'''
# Ler dois números inteiros
n1 = int(input('Informe o primeiro número: '))
n2 = int(input('Informe o segundo número: '))
# Operadores Lógicos
n1_maior = n1 > n2
n2_maior = n2 > n1
# Estrutura Condicional if, elif, else.
if n1_maior:
print('O número {} é o maior!'.format(n1))
elif n2_maior:
print('O número {} é o maior!'.format(n2))
else:
print('Os números são iguais!')
| 22.925926
| 101
| 0.663974
| 95
| 619
| 4.284211
| 0.442105
| 0.054054
| 0.093366
| 0.078624
| 0.14742
| 0.14742
| 0.14742
| 0.14742
| 0
| 0
| 0
| 0.025105
| 0.227787
| 619
| 27
| 102
| 22.925926
| 0.82636
| 0.455574
| 0
| 0
| 0
| 0
| 0.386364
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.3
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
57088093d1d0b3cfd26c3d3201f0bca2db2decb3
| 324
|
py
|
Python
|
ABS/ABC085C.py
|
fumiyanll23/AtCoder
|
362ca9fcacb5415c1458bc8dee5326ba2cc70b65
|
[
"MIT"
] | null | null | null |
ABS/ABC085C.py
|
fumiyanll23/AtCoder
|
362ca9fcacb5415c1458bc8dee5326ba2cc70b65
|
[
"MIT"
] | null | null | null |
ABS/ABC085C.py
|
fumiyanll23/AtCoder
|
362ca9fcacb5415c1458bc8dee5326ba2cc70b65
|
[
"MIT"
] | null | null | null |
def main():
# input
N, Y = map(int, input().split())
# compute
for i in range(N+1):
for j in range(N+1):
if 10000*i+5000*j+1000*(N-i-j)==Y and N-i-j>=0:
print(i, j, N-i-j)
exit()
# output
print(-1, -1, -1)
if __name__ == '__main__':
main()
| 18
| 59
| 0.435185
| 53
| 324
| 2.509434
| 0.471698
| 0.06015
| 0.067669
| 0.135338
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.093596
| 0.373457
| 324
| 17
| 60
| 19.058824
| 0.561576
| 0.061728
| 0
| 0
| 0
| 0
| 0.026667
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.1
| false
| 0
| 0
| 0
| 0.1
| 0.2
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5708df6ade016849aefe1a0044ec7ee2d375c82f
| 10,853
|
py
|
Python
|
testing/test_pulse_prop.py
|
ibegleris/w-fopo
|
e44b83b8ec54d01bb34b89805378a2b0659dfe6f
|
[
"BSD-3-Clause"
] | null | null | null |
testing/test_pulse_prop.py
|
ibegleris/w-fopo
|
e44b83b8ec54d01bb34b89805378a2b0659dfe6f
|
[
"BSD-3-Clause"
] | null | null | null |
testing/test_pulse_prop.py
|
ibegleris/w-fopo
|
e44b83b8ec54d01bb34b89805378a2b0659dfe6f
|
[
"BSD-3-Clause"
] | null | null | null |
import sys
sys.path.append('src')
from functions import *
import numpy as np
from numpy.testing import assert_allclose
"-----------------------Full soliton--------------------------------------------"
def get_Qs(nm, gama,fv, a_vec, dnerr, index, master_index,lamda, n2):
if nm == 1:
D = loadmat('loading_data/M1_M2_1m_new.mat')
M1_temp, M2 = D['M1'], D['M2']
M2[:, :] -= 1
M1 = np.empty([np.shape(M1_temp)[0]-2,
np.shape(M1_temp)[1]], dtype=np.int64)
M1[:4] = M1_temp[:4] - 1
Q_large = M1_temp[np.newaxis, 4:6, :]
M1[-1] = M1_temp[6, :] - 1
Q_large[:,:,:] = gama / (3*n2*(2*pi/lamda))
else:
M1, M2, dump, Q_large = \
fibre_parameter_loader(fv, a_vec, dnerr, index, master_index,
filename='step_index_2m', filepath='testing/testing_data/step_index/')
print(Q_large.shape)
Q_large[0,0,:] = gama / (3*n2*(2*pi/lamda)) * np.array([1,1,0,0,0,0,1,1])
Q_large[0,1,:] = gama / (3*n2*(2*pi/lamda)) * np.array([1,0,0,1,1,0,0,1])
return Q_large, M1, M2
def pulse_propagations(ram, ss, nm, N_sol=1, cython = True, u = None):
"SOLITON TEST. IF THIS FAILS GOD HELP YOU!"
n2 = 2.5e-20 # n2 for silica [m/W]
# 0.0011666666666666668 # loss [dB/m]
alphadB = np.array([0 for i in range(nm)])
gama = 1e-3 # w/m
"-----------------------------General options------------------------------"
maxerr = 1e-13 # maximum tolerable error per step
"----------------------------Simulation parameters-------------------------"
N = 10
z = np.array([0,70]) # total distance [m]
nplot = 10 # number of plots
nt = 2**N # number of grid points
#dzstep = z/nplot # distance per step
dz_less = 1
dz = 1 # starting guess value of the step
lam_p1 = 1550
lamda_c = 1550e-9
lamda = lam_p1*1e-9
beta2 = -1e-3
P0_p1 = 1
betas = np.array([0, 0, beta2])
T0 = (N_sol**2 * np.abs(beta2) / (gama * P0_p1))**0.5
TFWHM = (2*np.log(1+2**0.5)) * T0
int_fwm = sim_parameters(n2, nm, alphadB)
int_fwm.general_options(maxerr, raman_object, ss, ram)
int_fwm.propagation_parameters(N, z, nplot, dz_less, 1)
int_fwm.woble_propagate(0)
fv, where = fv_creator(lam_p1,lam_p1 + 25,0, 100, int_fwm)
#fv, where = fv_creator(lam_p1, , int_fwm, prot_casc=0)
sim_wind = sim_window(fv, lamda, lamda_c, int_fwm, fv_idler_int=1)
loss = Loss(int_fwm, sim_wind, amax=int_fwm.alphadB)
alpha_func = loss.atten_func_full(sim_wind.fv, int_fwm)
int_fwm.alphadB = alpha_func
int_fwm.alpha = int_fwm.alphadB
dnerr = [0]
index = 1
master_index = 0
a_vec = [2.2e-6]
Q_large,M1,M2 = get_Qs(nm, gama, fv, a_vec, dnerr, index, master_index, lamda, n2)
if nm ==1:
M1, M2, Q_large= np.array([1]), np.array([1]), Q_large[:,0,0]
betas = betas[np.newaxis, :]
# sys.exit()
Dop = dispersion_operator(betas, int_fwm, sim_wind)
print(Dop.shape)
integrator = Integrator(int_fwm)
integrand = Integrand(int_fwm.nm,ram, ss, cython = False, timing = False)
dAdzmm = integrand.dAdzmm
RK = integrator.RK45mm
dAdzmm = integrand.dAdzmm
pulse_pos_dict_or = ('after propagation', "pass WDM2",
"pass WDM1 on port2 (remove pump)",
'add more pump', 'out')
#M1, M2, Q = Q_matrixes(1, n2, lamda, gama=gama)
raman = raman_object(int_fwm.ram, int_fwm.how)
raman.raman_load(sim_wind.t, sim_wind.dt, M2, nm)
if raman.on == 'on':
hf = raman.hf
else:
hf = None
u = np.empty(
[ int_fwm.nm, len(sim_wind.t)], dtype='complex128')
U = np.empty([int_fwm.nm,
len(sim_wind.t)], dtype='complex128')
sim_wind.w_tiled = np.tile(sim_wind.w + sim_wind.woffset, (int_fwm.nm, 1))
u[:, :] = ((P0_p1)**0.5 / np.cosh(sim_wind.t/T0)) * \
np.exp(-1j*(sim_wind.woffset)*sim_wind.t)
U[:, :] = fftshift(sim_wind.dt*fft(u[:, :]))
gam_no_aeff = -1j*int_fwm.n2*2*pi/sim_wind.lamda
u, U = pulse_propagation(u, U, int_fwm, M1, M2.astype(np.int64), Q_large[0].astype(np.complex128),
sim_wind, hf, Dop[0], dAdzmm, gam_no_aeff,RK)
U_start = np.abs(U[ :, :])**2
u[:, :] = u[:, :] * \
np.exp(1j*z[-1]/2)*np.exp(-1j*(sim_wind.woffset)*sim_wind.t)
"""
fig1 = plt.figure()
plt.plot(sim_wind.fv,np.abs(U[1,:])**2)
plt.savefig('1.png')
fig2 = plt.figure()
plt.plot(sim_wind.fv,np.abs(U[1,:])**2)
plt.savefig('2.png')
fig3 = plt.figure()
plt.plot(sim_wind.t,np.abs(u[1,:])**2)
plt.xlim(-10*T0, 10*T0)
plt.savefig('3.png')
fig4 = plt.figure()
plt.plot(sim_wind.t,np.abs(u[1,:])**2)
plt.xlim(-10*T0, 10*T0)
plt.savefig('4.png')
fig5 = plt.figure()
plt.plot(fftshift(sim_wind.w),(np.abs(U[1,:])**2 - np.abs(U[1,:])**2 ))
plt.savefig('error.png')
fig6 = plt.figure()
plt.plot(sim_wind.t,np.abs(u[1,:])**2 - np.abs(u[1,:])**2)
plt.xlim(-10*T0, 10*T0)
plt.savefig('error2.png')
plt.show()
"""
return u, U, maxerr
class Test_cython_nm2(object):
def test_ramoff_s0_nm2(self):
u_c, U_c, maxerr = pulse_propagations('off', 0, nm=2, cython = True)
u_p, U_p, maxerr = pulse_propagations('off', 0, nm=2, cython = False)
a,b = np.sum(np.abs(u_c)**2), np.sum(np.abs(u_p)**2)
assert np.allclose(a,b)
def test_ramon_s0_nm2(self):
u_c, U_c, maxerr = pulse_propagations('on', 0, nm=2, cython = True)
u_p, U_p, maxerr = pulse_propagations('on', 0, nm=2, cython = False)
a,b = np.sum(np.abs(u_c)**2), np.sum(np.abs(u_p)**2)
assert np.allclose(a,b)
def test_ramoff_s1_nm2(self):
u_c, U_c, maxerr = pulse_propagations('off', 1, nm=2, cython = True)
u_p, U_p, maxerr = pulse_propagations('off', 1, nm=2, cython = False)
a,b = np.sum(np.abs(u_c)**2), np.sum(np.abs(u_p)**2)
assert np.allclose(a,b)
def test_ramon_s1_nm2(self):
u_c, U_c, maxerr = pulse_propagations('on', 1, nm=2, cython = True)
u_p, U_p, maxerr = pulse_propagations('on', 1, nm=2, cython = False)
a,b = np.sum(np.abs(u_c)**2), np.sum(np.abs(u_p)**2)
assert np.allclose(a,b)
class Test_cython_nm1(object):
def test_ramoff_s0_nm2(self):
u_c, U_c, maxerr = pulse_propagations('off', 0, nm=1, cython = True)
u_p, U_p, maxerr = pulse_propagations('off', 0, nm=1, cython = False)
a,b = np.sum(np.abs(u_c)**2), np.sum(np.abs(u_p)**2)
assert np.allclose(a,b)
def test_ramon_s0_nm2(self):
u_c, U_c, maxerr = pulse_propagations('on', 0, nm=1, cython = True)
u_p, U_p, maxerr = pulse_propagations('on', 0, nm=1, cython = False)
a,b = np.sum(np.abs(u_c)**2), np.sum(np.abs(u_p)**2)
assert np.allclose(a,b)
def test_ramoff_s1_nm2(self):
u_c, U_c, maxerr = pulse_propagations('off', 1, nm=1, cython = True)
u_p, U_p, maxerr = pulse_propagations('off', 1, nm=1, cython = False)
a,b = np.sum(np.abs(u_c)**2), np.sum(np.abs(u_p)**2)
assert np.allclose(a,b)
def test_ramon_s1_nm2(self):
u_c, U_c, maxerr = pulse_propagations('on', 1, nm=1, cython = True)
u_p, U_p, maxerr = pulse_propagations('on', 1, nm=1, cython = False)
a,b = np.sum(np.abs(u_c)**2), np.sum(np.abs(u_p)**2)
assert np.allclose(a,b)
class Test_pulse_prop(object):
def test_solit_r0_ss0(self):
u, U, maxerr = pulse_propagations('off', 0, nm=1)
assert_allclose(np.abs(u[:, :])**2,
np.abs(u[:, :])**2, atol=9e-4)
def test_solit_r0_ss0_2(self):
u, U, maxerr = pulse_propagations('off', 0, nm=2)
#print(np.linalg.norm(np.abs(u[:, 0])**2 - np.abs(u[:, -1])**2, 2))
assert_allclose(np.abs(u[:, :])**2,
np.abs(u[:, :])**2, atol=9e-3)
def test_energy_r0_ss0(self):
u, U, maxerr = pulse_propagations(
'off', 0, nm=1, N_sol=np.abs(10*np.random.randn()))
E = []
for i in range(np.shape(u)[1]):
E.append(np.linalg.norm(u[:, i], 2)**2)
assert np.all(x == E[0] for x in E)
def test_energy_r0_ss1(self):
u, U, maxerr = pulse_propagations(
'off', 1, nm=1, N_sol=np.abs(10*np.random.randn()))
E = []
for i in range(np.shape(u)[1]):
E.append(np.linalg.norm(u[:, i], 2)**2)
assert np.all(x == E[0] for x in E)
def test_energy_r1_ss0(self):
u, U, maxerr = pulse_propagations(
'on', 0, nm=1, N_sol=np.abs(10*np.random.randn()))
E = []
for i in range(np.shape(u)[1]):
E.append(np.linalg.norm(u[:, i], 2)**2)
assert np.all(x == E[0] for x in E)
def test_energy_r1_ss1(self):
u, U, maxerr = pulse_propagations(
'on', 1, nm=1, N_sol=np.abs(10*np.random.randn()))
E = []
for i in range(np.shape(u)[1]):
E.append(np.linalg.norm(u[:, i], 2)**2)
assert np.all(x == E[0] for x in E)
def test_energy_r0_ss0_2(self):
u, U, maxerr = pulse_propagations(
'off', 0, nm=2, N_sol=np.abs(10*np.random.randn()))
E = []
for i in range(np.shape(u)[1]):
E.append(np.linalg.norm(u[:, i], 2)**2)
assert np.all(x == E[0] for x in E)
def test_energy_r0_ss1_2(self):
u, U, maxerr = pulse_propagations(
'off', 1, nm=2, N_sol=np.abs(10*np.random.randn()))
E = []
for i in range(np.shape(u)[1]):
E.append(np.linalg.norm(u[:, i], 2)**2)
assert np.all(x == E[0] for x in E)
def test_energy_r1_ss0_2(self):
u, U, maxerr = pulse_propagations(
'on', 0, nm=2, N_sol=np.abs(10*np.random.randn()))
E = []
for i in range(np.shape(u)[1]):
E.append(np.linalg.norm(u[:, i], 2)**2)
assert np.all(x == E[0] for x in E)
def test_energy_r1_ss1_2(self):
u, U, maxerr = pulse_propagations(
'on', 1, nm=2, N_sol=np.abs(10*np.random.randn()))
E = []
for i in range(np.shape(u)[1]):
E.append(np.linalg.norm(u[:, i], 2)**2)
assert np.all(x == E[0] for x in E)
def test_bire_pass():
Da = np.random.uniform(0, 2*pi, 100)
b = birfeg_variation(Da,2)
u = np.random.randn(2, 2**14) + 1j * np.random.randn(2, 2**14)
u *= 10
for i in range(100):
ut = b.bire_pass(u,i)
assert_allclose(np.abs(u)**2, np.abs(ut)**2)
u = 1 * ut
| 36.056478
| 105
| 0.540404
| 1,815
| 10,853
| 3.074931
| 0.140496
| 0.037628
| 0.034402
| 0.028669
| 0.60491
| 0.590396
| 0.572478
| 0.566386
| 0.56119
| 0.508869
| 0
| 0.057186
| 0.268497
| 10,853
| 301
| 106
| 36.056478
| 0.645799
| 0.042385
| 0
| 0.339623
| 0
| 0
| 0.052882
| 0.029494
| 0
| 0
| 0
| 0
| 0.09434
| 1
| 0.099057
| false
| 0.018868
| 0.018868
| 0
| 0.141509
| 0.009434
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
570a3a32cbbdc85ab026871552208d720276a1d7
| 1,089
|
py
|
Python
|
download.py
|
wujushan/AndroidHeatMap
|
1d6ecff8d810ffd63ba84f56c1a44ee5e7770c59
|
[
"Apache-2.0"
] | 1
|
2019-06-13T16:05:36.000Z
|
2019-06-13T16:05:36.000Z
|
download.py
|
wujushan/AndroidHeatMap
|
1d6ecff8d810ffd63ba84f56c1a44ee5e7770c59
|
[
"Apache-2.0"
] | null | null | null |
download.py
|
wujushan/AndroidHeatMap
|
1d6ecff8d810ffd63ba84f56c1a44ee5e7770c59
|
[
"Apache-2.0"
] | null | null | null |
import os
import requests
def download(url):
download_path = '/root/AndroidHeatMap/download/'
if not os.path.exists(download_path):
os.mkdir(download_path)
all_content = requests.get(url).text
file_line = all_content.split("\n")
if file_line[0] != "#EXTM3U":
raise BaseException(u"not M3U8link")
else:
unknow = True
for index, line in enumerate(file_line):
if "EXTINF" in line:
unknow = False
pd_url = url.rsplit("/", 1)[0] + "/" + file_line[index + 1]
res = requests.get(pd_url)
c_fule_name = str(file_line[index + 1])
with open(download_path + "/" + c_fule_name, 'ab') as f:
f.write(res.content)
f.flush()
if unknow:
raise BaseException("cannot find link")
else:
print("finish downloading")
if __name__ == '__main__':
url = 'https://jjdong5.com/get_file/4/1fa69b06c6276768e95cc0c04d85feec693488a588/13000/13287/13287_360p.m3u8'
download(url)
| 34.03125
| 113
| 0.575758
| 129
| 1,089
| 4.651163
| 0.51938
| 0.066667
| 0.043333
| 0.046667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.07672
| 0.305785
| 1,089
| 31
| 114
| 35.129032
| 0.716931
| 0
| 0
| 0.071429
| 0
| 0
| 0.188246
| 0.027548
| 0
| 0
| 0
| 0
| 0
| 1
| 0.035714
| false
| 0
| 0.071429
| 0
| 0.107143
| 0.035714
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
570a7fbde091be0d15c77144e4caa11f184860d3
| 4,945
|
py
|
Python
|
tests/watermarks_test.py
|
yujialuo/erdos
|
7a631b55895f1a473b0f4d38a0d6053851e65b5d
|
[
"Apache-2.0"
] | null | null | null |
tests/watermarks_test.py
|
yujialuo/erdos
|
7a631b55895f1a473b0f4d38a0d6053851e65b5d
|
[
"Apache-2.0"
] | null | null | null |
tests/watermarks_test.py
|
yujialuo/erdos
|
7a631b55895f1a473b0f4d38a0d6053851e65b5d
|
[
"Apache-2.0"
] | null | null | null |
from collections import defaultdict
from time import sleep
from absl import app
from absl import flags
import erdos.graph
from erdos.op import Op
from erdos.utils import frequency
from erdos.message import Message
from erdos.data_stream import DataStream
from erdos.timestamp import Timestamp
from erdos.message import WatermarkMessage
INTEGER_FREQUENCY = 10 # The frequency at which to send the integers.
class FirstOperator(Op):
""" Source operator that publishes increasing integers at a fixed frequency.
The operator also inserts a watermark after a fixed number of messages.
"""
def __init__(self, name, batch_size):
""" Initializes the attributes to be used by the source operator."""
super(FirstOperator, self).__init__(name)
self.batch_size = batch_size
self.counter = 1
self.batch_number = 1
@staticmethod
def setup_streams(input_streams):
""" Outputs a single stream where the messages are sent. """
return [DataStream(data_type = int, name = "integer_out")]
@frequency(INTEGER_FREQUENCY)
def publish_numbers(self):
""" Sends an increasing count of numbers
to the downstream operators. """
output_msg = Message(self.counter,
Timestamp(coordinates = [self.batch_number]))
self.get_output_stream("integer_out").send(output_msg)
# Decide if the watermark needs to be sent.
if self.counter % self.batch_size == 0:
# The batch has completed. We need to send a watermark now.
watermark_msg = WatermarkMessage(Timestamp(coordinates =
[self.batch_number]))
self.batch_number += 1
self.get_output_stream("integer_out").send(watermark_msg)
# Update the counters.
self.counter += 1
def execute(self):
""" Execute the publish number loop. """
self.publish_numbers()
self.spin()
class SecondOperator(Op):
""" Second operator that listens in on the numbers and reports their
sum when the watermark is received. """
def __init__(self, name):
""" Initializes the attributes to be used."""
super(SecondOperator, self).__init__(name)
self.windows = defaultdict(list)
@staticmethod
def setup_streams(input_streams):
""" Subscribes all the input streams to the save numbers callback. """
input_streams.add_callback(SecondOperator.save_numbers)
input_streams.add_completion_callback(SecondOperator.execute_sum)
return [DataStream(data_type = int, name = "sum_out")]
def save_numbers(self, message):
""" Save all the numbers corresponding to a window. """
batch_number = message.timestamp.coordinates[0]
self.windows[batch_number].append(message.data)
def execute_sum(self, message):
""" Sum all the numbers in this window and send out the aggregate. """
batch_number = message.timestamp.coordinates[0]
window_data = self.windows.pop(batch_number, None)
#print("Received a watermark for the timestamp: {}".format(batch_number))
#print("The sum of the window {} is {}".format(
# window_data, sum(window_data)))
output_msg = Message(sum(window_data),
Timestamp(coordinates = [batch_number]))
self.get_output_stream("sum_out").send(output_msg)
def execute(self):
""" Execute the spin() loop to continue processing messages. """
self.spin()
class ThirdOperator(Op):
""" Third operator that listens in on the sum and verifies correctness."""
def __init__(self, name):
"""Initializes the attributes to be used."""
super(ThirdOperator, self).__init__(name)
@staticmethod
def setup_streams(input_streams):
""" Subscribes all the input streams to the assert callback."""
input_streams.add_callback(ThirdOperator.assert_correctness)
return []
def assert_correctness(self, message):
""" Assert the correctness of the results."""
batch_number = message.timestamp.coordinates[0]
sum_data = sum(range((batch_number - 1) * 10 + 1, batch_number * 10 + 1))
print("Received sum: {} for the batch_number {}, expected {}".format(
message.data, batch_number, sum_data))
def main(argv):
# Set up the graph.
graph = erdos.graph.get_current_graph()
# Add the operators.
source_op = graph.add(FirstOperator, name = "gen_op", init_args = {'batch_size' : 10})
sum_op = graph.add(SecondOperator, name = "sum_op")
assert_op = graph.add(ThirdOperator, name = "assert_op")
# Connect the operators.
graph.connect([source_op], [sum_op])
graph.connect([sum_op], [assert_op])
# Execute the graph.
graph.execute('ray')
if __name__ == "__main__":
app.run(main)
| 37.462121
| 90
| 0.658038
| 603
| 4,945
| 5.207297
| 0.242123
| 0.052548
| 0.019108
| 0.014331
| 0.264331
| 0.229299
| 0.1
| 0.078981
| 0.078981
| 0.078981
| 0
| 0.005087
| 0.244692
| 4,945
| 131
| 91
| 37.748092
| 0.835609
| 0.272801
| 0
| 0.194805
| 0
| 0
| 0.04091
| 0
| 0
| 0
| 0
| 0
| 0.051948
| 1
| 0.168831
| false
| 0
| 0.142857
| 0
| 0.38961
| 0.012987
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
570a9547e24dbd1a28701e76c97396c34016c792
| 1,436
|
py
|
Python
|
test/test_shop/views.py
|
blakelockley/django-base-shop
|
455a2f4465e90cde57719ac29dc090b14f0bd324
|
[
"MIT"
] | 1
|
2020-01-12T04:05:42.000Z
|
2020-01-12T04:05:42.000Z
|
test/test_shop/views.py
|
blakelockley/django-base-shop
|
455a2f4465e90cde57719ac29dc090b14f0bd324
|
[
"MIT"
] | 14
|
2020-03-24T18:11:07.000Z
|
2022-03-12T00:15:20.000Z
|
test/test_shop/views.py
|
blakelockley/django-base-shop
|
455a2f4465e90cde57719ac29dc090b14f0bd324
|
[
"MIT"
] | null | null | null |
from django.http import HttpResponse
from django_base_shop.models import ShippingTag
from .models import ConcreteCart, ConcreteProduct
def index(request):
return HttpResponse(b"Hello world")
def check_cart(request):
cart = request.cart
if not cart.is_persisted:
return HttpResponse(b"None")
return HttpResponse(cart.cart_token.encode("utf-8"))
def check_cart_items(request):
cart = request.cart
if not cart.is_persisted:
return HttpResponse(b"None")
body = f"{cart.cart_token}<br /><br />"
for item in cart.items.all():
body += f"{item.product.name} {item.quantity}<br />"
return HttpResponse(body.encode("utf-8"))
def add_cart_item(request, pk):
cart = request.cart
if ConcreteProduct.objects.count() == 0:
ConcreteProduct.objects.create(
handle="ANV-001",
name="Anvil",
price=100.0,
shipping_tag=ShippingTag.objects.create(
name="Medium", category="Size", order=1
),
)
product = ConcreteProduct.objects.get(pk=pk)
cart.add_item(product)
return HttpResponse(b"Item added! <a href='/check_cart_items'>Check items</a>")
def remove_cart_item(request, pk):
cart = request.cart
product = ConcreteProduct.objects.get(pk=pk)
cart.remove_item(product)
return HttpResponse(b"Item removed! <a href='/check_cart_items'>Check items</a>")
| 24.338983
| 85
| 0.660167
| 184
| 1,436
| 5.043478
| 0.353261
| 0.135776
| 0.102371
| 0.054957
| 0.43319
| 0.43319
| 0.359914
| 0.204741
| 0.140086
| 0.140086
| 0
| 0.009778
| 0.216574
| 1,436
| 58
| 86
| 24.758621
| 0.815111
| 0
| 0
| 0.27027
| 0
| 0
| 0.162256
| 0.041783
| 0
| 0
| 0
| 0
| 0
| 1
| 0.135135
| false
| 0
| 0.081081
| 0.027027
| 0.405405
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
570cfc314b92388cc92855fea7600f5e8b1e443e
| 11,600
|
py
|
Python
|
q3/q3/drivers/ui/pyqt5.py
|
virtimus/makaronLab
|
10b9be7d7d65d3da6219f929ea7070dd5fed3a81
|
[
"0BSD"
] | 2
|
2021-03-16T05:48:36.000Z
|
2021-10-11T01:55:48.000Z
|
q3/q3/drivers/ui/pyqt5.py
|
virtimus/makaronLab
|
10b9be7d7d65d3da6219f929ea7070dd5fed3a81
|
[
"0BSD"
] | null | null | null |
q3/q3/drivers/ui/pyqt5.py
|
virtimus/makaronLab
|
10b9be7d7d65d3da6219f929ea7070dd5fed3a81
|
[
"0BSD"
] | 1
|
2021-03-16T05:48:39.000Z
|
2021-03-16T05:48:39.000Z
|
# PYQT
import sys
#from ...TabPanel import TabPanel
import sip
from q3.ui.engine import qtw,qtc,qtg
from ... import consts, prop, direction
from ...ui import orientation, colors
from ...moduletype import ModuleType
from ...nodeiotype import NodeIoType
from ...q3vector import Q3Vector
from ...EventSignal import EventProps
from ..driverBase import Q3DriverBase
from enum import Enum
from ...valuetype import ValueType
from .IoLinkView import IoLinkView
from .IoNodeView import IoNodeView
from .ModuleViewImpl import ModuleViewImpl
from .GraphViewImpl import GraphViewImpl
#class IoNode:
# pass
class Q3Scene(qtw.QGraphicsScene):
def __init__(self,*args, **kwargs):
super(Q3Scene,self).__init__(*args, **kwargs)
def contextMenuEvent(self, event):
# Check it item exists on event position
item = self.itemAt(event.scenePos(),qtg.QTransform()) #.toPoint(),qtg.QTransform.TxNone)
if item:
# Try run items context if it has one
try:
item.contextMenuEvent(event)
return
except:
pass
menu = qtw.QMenu()
action = menu.addAction('ACTION')
class DetailWindowBaseImpl(qtw.QWidget):
def __init__(self,parent):
self._parent=parent
super(qtw.QWidget, self).__init__()
def resizeEvent(self, event):
self._parent.parent().events().detailWindowResized.emit(EventProps({'event':event}))
#print(f'WinResizeEV{dir(event)}')
def closeEvent(self, event):
evs = self._parent.parent().events()
if evs.callDetailWindowCloseReq.hasHandlers():
evs.callDetailWindowCloseReq.sync()
event.accept()
# .checkSyncHandler()
#windowDidResize
class Q3Driver(Q3DriverBase):
def doModuleView_Init(self):
if self.s().isRoot():#@s:PackageView::PackageView
#sc = qtw.QGraphicsScene(self.pimpl())
sc = Q3Scene(self.pimpl())
#result = qtw.QGraphicsView(sc,self.pimpl())
package = self.s().module().impl()
result = GraphViewImpl(sc,self.pimpl(),self.p(), package) #'''EditorFrame'''
result._self = self.s()
result._scene = sc
'''
wheelEvent = getattr(self.s(), "wheelEvent", None)
if callable(wheelEvent):
result.wheelEvent = wheelEvent
drawBackground = getattr(self.s(), "drawBackground", None)
if callable(drawBackground):
result.drawBackground = drawBackground
'''
else:
if isinstance(self.pimpl(), qtw.QGraphicsView): #//MODULES FIRST LEVEL
result = ModuleViewImpl(None)
result._self = self.s()
self.pimpl()._scene.addItem(result)
el = self.s().module().impl()
result.setElement(el)
else:
result = ModuleViewImpl(self.pimpl()) # next levels
result._self = self.s()
result._self = self.s()
return result;
def doModuleView_AfterInit(self):
tImpl = self.impl()
#tImpl._self = self.s()
#tImpl._element = self.s().module().impl()
tImpl.setElement(self.s().module().impl())
if self.s().isRoot():#@s:PackageView::PackageView
#self.s()._inputsView = self.s().addModuleView('moduleInputs', type=ModuleType.INPUTS)
#self.s()._outputsView = self.s().addModuleView('moduleOutputs', type=ModuleType.OUTPUTS)
#vec2d m_inputsPosition{ -400.0, 0.0 };
self.s()._inputsView.setProp(prop.PositionX,-400.0)
self.s()._inputsView.setProp(prop.PositionY,0.0)
self.s()._outputsView.setProp(prop.PositionX,400.0)
self.s()._outputsView.setProp(prop.PositionY,0.0)
else: #Node::Node
tImpl._nameFont.setFamily("Consolas")
tImpl._nameFont.setPointSize(8)
tImpl.setFlags(qtw.QGraphicsItem.ItemIsMovable | qtw.QGraphicsItem.ItemIsSelectable | qtw.QGraphicsItem.ItemSendsGeometryChanges)
tImpl.collapse()
tImpl.setGraphView(self.pimpl())
pass #nop
self.callAfterInit(tImpl)
#if iscallable(tImpl)
def doApp_Init(self):
result = qtw.QApplication(sys.argv)
app = result
app.setStyle(qtw.QStyleFactory.create("Fusion"));
darkPalette=qtg.QPalette()
c1 = qtg.QColor(55, 55, 55);
c2 = qtg.QColor(25, 25, 25);
c3 = qtg.QColor(45, 130, 220);
darkPalette.setColor(qtg.QPalette.Window, c1);
darkPalette.setColor(qtg.QPalette.WindowText, qtc.Qt.white);
darkPalette.setColor(qtg.QPalette.Base, c2);
darkPalette.setColor(qtg.QPalette.AlternateBase, c1);
darkPalette.setColor(qtg.QPalette.ToolTipBase, qtc.Qt.white);
darkPalette.setColor(qtg.QPalette.ToolTipText, qtc.Qt.white);
darkPalette.setColor(qtg.QPalette.Text, qtc.Qt.white);
darkPalette.setColor(qtg.QPalette.Button, c1);
darkPalette.setColor(qtg.QPalette.ButtonText, qtc.Qt.white);
darkPalette.setColor(qtg.QPalette.BrightText, qtc.Qt.red);
darkPalette.setColor(qtg.QPalette.Link, c3);
darkPalette.setColor(qtg.QPalette.Highlight, c3);
darkPalette.setColor(qtg.QPalette.HighlightedText, qtc.Qt.white);
app.setPalette(darkPalette);
app.setStyleSheet("QToolTip { color: #ffffff; background-color: #2b8bdb; border: 1px solid white; }");
'''
palette = app.palette()
palette.setColor(QPalette.Window, QColor(239, 240, 241))
palette.setColor(QPalette.WindowText, QColor(49, 54, 59))
palette.setColor(QPalette.Base, QColor(252, 252, 252))
palette.setColor(QPalette.AlternateBase, QColor(239, 240, 241))
palette.setColor(QPalette.ToolTipBase, QColor(239, 240, 241))
palette.setColor(QPalette.ToolTipText, QColor(49, 54, 59))
palette.setColor(QPalette.Text, QColor(49, 54, 59))
palette.setColor(QPalette.Button, QColor(239, 240, 241))
palette.setColor(QPalette.ButtonText, QColor(49, 54, 59))
palette.setColor(QPalette.BrightText, QColor(255, 255, 255))
palette.setColor(QPalette.Link, QColor(41, 128, 185))
# palette.setColor(QPalette.Highlight, QColor(126, 71, 130))
# palette.setColor(QPalette.HighlightedText, Qt.white)
palette.setColor(QPalette.Disabled, QPalette.Light, Qt.white)
palette.setColor(QPalette.Disabled, QPalette.Shadow, QColor(234, 234, 234))
app.setPalette(palette)
'''
return result
def doMainWindow_Init(self):
result = qtw.QMainWindow()
if 'title' in self._self._kwargs:
result.setWindowTitle(self._self._kwargs['title'])
#result = qtw.QFrame()
result.resize(1400, 980)
'''
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(result.sizePolicy().hasHeightForWidth())
result.setSizePolicy(sizePolicy)
'''
showEvent = getattr(self._self, "showEvent", None)
if callable(showEvent):
result.showEvent = showEvent
return result
def doMainWindow_Show(self):
result = self.impl().show()
return result
def doMenu_Init(self):
if self._impl == None:
self._self._qtMenu = qtw.QMenu(self._parent.implObject())
self.s()._menu = self._self._qtMenu
pass
else:
self.s()._menu = self._impl
return self._self._menu
def doMenu_AddSeparator(self):
result = self._self.implObject().addSeparator()
return result
def doMenu_addAction(self, label,id,helpStr,onClick):
if (label == None and consts.ID_EXIT == id):
exitAct = qtw.QAction(qtg.QIcon('exit.png'), '&Exit', self._self.implObject())
exitAct.setShortcut('Ctrl+Q')
exitAct.setStatusTip('Exit application')
exitAct.triggered.connect(qtw.qApp.quit)
result = self._self.implObject().addAction(exitAct)
else:
result = self._self.implObject().addAction(label, onClick)
if onClick != None:
result.triggered.connect(onClick)
#!TODO!result.onClick = onClick
return result
def doMenuBar_Init(self):
return self.pimpl().menuBar()
def doMenuBar_AddMenu(self,menuTitle):
return self.impl().addMenu(menuTitle)
'''
else:
result = Menu(self._parent)
self._wxMenuBar.Append(result.implObject(),menuTitle)
return result
'''
def doMdiPanel_Init(self):
result = qtw.QMdiArea(self._parent.impl())
return result
def doTabPanel_Init(self):
result = qtw.QTabWidget(self._parent.impl())
'''
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(10)
sizePolicy.setVerticalStretch(10)
sizePolicy.setHeightForWidth(result.sizePolicy().hasHeightForWidth())
result.setSizePolicy(sizePolicy)
'''
#result.setMinimumSize(QtCore.QSize(2080, 1630))
result.setTabsClosable(True)
return result
def doTabPanel_AddTab(self, obj, title):
return self.impl().addTab(obj.impl(),title)
def doTabPanel_CurrentIndex(self):
return self.impl().currentIndex()
def doTab_Init(self):
result = qtw.QWidget()
self._parent.impl().addTab(result,"test")
return result
def doLayout_Init(self):
orient = self.s()._kwargs['orient'] if 'orient' in self.s()._kwargs else None
result = qtw.QVBoxLayout() if orient == orientation.VERTICAL else qtw.QHBoxLayout()
return result
def doLayout_AddElement(self, element):
result = self.impl().addWidget(element.impl())
return result
def doLayout_Add(self,label, sizerFlags):
result = self.impl().addWidget(label.inpl())
return result
def doElement_Init(self):
result = qtw.QWidget(self.pimpl())
return result
def doElement_Resize(self,w,h):
result = self.impl().resize(w,h)
return result
def doElement_SizePolicy(self):
result = self.impl().sizePolicy()
return result
def doElement_SetSizePolicy(self, sizePolicy):
result = self.impl().setSizePolicy(sizePolicy)
return result
def doPanel_Init(self):
result = qtw.QFrame(self.pimpl())
return result
def doLabel_Init(self):
result = qtw.QLabel(self.pimpl())
if 'label' in self.s()._kwargs:
result.setText(self.s()._kwargs['label'])
return result
def doLabel_GetFont(self):
result = self.impl.font()
return result
def doLabel_SetFont(self, font):
result = self.impl().setFont(font)
return result
def doDetailWindow_Init(self):
result = DetailWindowBaseImpl(self.s())
result._self = self.s()
return result
def doDetailWindow_Show(self):
result = self.impl().show()
return result
| 34.017595
| 141
| 0.617931
| 1,199
| 11,600
| 5.899083
| 0.253545
| 0.0205
| 0.046656
| 0.055139
| 0.231019
| 0.165418
| 0.146755
| 0.045808
| 0.009897
| 0
| 0
| 0.021635
| 0.262845
| 11,600
| 340
| 142
| 34.117647
| 0.80552
| 0.071379
| 0
| 0.192893
| 0
| 0.005076
| 0.021793
| 0
| 0
| 0
| 0
| 0.002941
| 0
| 1
| 0.167513
| false
| 0.015228
| 0.081218
| 0.015228
| 0.406091
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
570eadcaa613e66d764e81bda74fc4c5ac38c715
| 2,538
|
py
|
Python
|
2. ExaminingBivariateandMultivariateRelationships/2. scatter_plots.py
|
michaelbwalker/Data-Cleaning-and-Exploration-with-Machine-Learning
|
9de44e5ad2e8d197b0a3c1b362b0377339278bd2
|
[
"MIT"
] | 7
|
2021-10-02T03:19:59.000Z
|
2022-03-21T21:24:14.000Z
|
2. ExaminingBivariateandMultivariateRelationships/2. scatter_plots.py
|
michaelbwalker/Data-Cleaning-and-Exploration-with-Machine-Learning
|
9de44e5ad2e8d197b0a3c1b362b0377339278bd2
|
[
"MIT"
] | null | null | null |
2. ExaminingBivariateandMultivariateRelationships/2. scatter_plots.py
|
michaelbwalker/Data-Cleaning-and-Exploration-with-Machine-Learning
|
9de44e5ad2e8d197b0a3c1b362b0377339278bd2
|
[
"MIT"
] | 6
|
2021-08-30T02:58:02.000Z
|
2022-02-01T07:46:49.000Z
|
# import pandas, matplotlib, and seaborn
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
pd.set_option('display.width', 53)
pd.set_option('display.max_columns', 5)
pd.set_option('display.max_rows', 200)
pd.options.display.float_format = '{:,.0f}'.format
covidtotals = pd.read_csv("data/covidtotals.csv")
covidtotals.set_index("iso_code", inplace=True)
landtemps = pd.read_csv("data/landtemps2019avgs.csv")
# do a scatterplot of total_cases by total_deaths
ax = sns.regplot(x="total_cases_mill", y="total_deaths_mill", data=covidtotals)
ax.set(xlabel="Cases Per Million", ylabel="Deaths Per Million", title="Total Covid Cases and Deaths by Country")
plt.show()
fig, axes = plt.subplots(1,2, sharey=True)
sns.regplot(x=covidtotals.aged_65_older, y=covidtotals.total_cases_mill, ax=axes[0])
sns.regplot(x=covidtotals.gdp_per_capita, y=covidtotals.total_cases_mill, ax=axes[1])
axes[0].set_xlabel("Aged 65 or Older")
axes[0].set_ylabel("Cases Per Million")
axes[1].set_xlabel("GDP Per Capita")
axes[1].set_ylabel("")
plt.suptitle("Age 65 Plus and GDP with Cases Per Million")
plt.tight_layout()
fig.subplots_adjust(top=0.92)
plt.show()
# show the high elevation points in a different color
low, high = landtemps.loc[landtemps.elevation<=1000], landtemps.loc[landtemps.elevation>1000]
low.shape[0], low.avgtemp.mean()
high.shape[0], high.avgtemp.mean()
plt.scatter(x="latabs", y="avgtemp", c="blue", data=low)
plt.scatter(x="latabs", y="avgtemp", c="red", data=high)
plt.legend(('low elevation', 'high elevation'))
plt.xlabel("Latitude (N or S)")
plt.ylabel("Average Temperature (Celsius)")
plt.title("Latitude and Average Temperature in 2019")
plt.show()
# show scatter plot with different regression lines by elevation group
landtemps['elevation_group'] = np.where(landtemps.elevation<=1000,'low','high')
sns.lmplot(x="latabs", y="avgtemp", hue="elevation_group", palette=dict(low="blue", high="red"), legend_out=False, data=landtemps)
plt.xlabel("Latitude (N or S)")
plt.ylabel("Average Temperature")
plt.legend(('low elevation', 'high elevation'), loc='lower left')
plt.yticks(np.arange(-60, 40, step=20))
plt.title("Latitude and Average Temperature in 2019")
plt.tight_layout()
plt.show()
# show this as a 3D plot
fig = plt.figure()
plt.suptitle("Latitude, Temperature, and Elevation in 2019")
ax = plt.axes(projection='3d')
ax.set_xlabel("Elevation")
ax.set_ylabel("Latitude")
ax.set_zlabel("Avg Temp")
ax.scatter3D(landtemps.elevation, landtemps.latabs, landtemps.avgtemp)
plt.show()
| 39.046154
| 130
| 0.754137
| 403
| 2,538
| 4.657568
| 0.330025
| 0.018647
| 0.017581
| 0.028769
| 0.256793
| 0.198189
| 0.161961
| 0.10016
| 0.10016
| 0.051145
| 0
| 0.027214
| 0.087864
| 2,538
| 64
| 131
| 39.65625
| 0.783585
| 0.090623
| 0
| 0.211538
| 0
| 0
| 0.300608
| 0.011295
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.076923
| 0
| 0.076923
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
570f7be4fc6a73c331b26ffda6ddfc47a075df88
| 1,252
|
py
|
Python
|
minifyoperation.py
|
seece/cbpp
|
b6771c7933fa07444e660eafda6f06cf60edce01
|
[
"MIT"
] | null | null | null |
minifyoperation.py
|
seece/cbpp
|
b6771c7933fa07444e660eafda6f06cf60edce01
|
[
"MIT"
] | null | null | null |
minifyoperation.py
|
seece/cbpp
|
b6771c7933fa07444e660eafda6f06cf60edce01
|
[
"MIT"
] | null | null | null |
import re
from util import *
from operation import Operation, OperationResult
class Replacement:
def __init__(self, regex, substitution):
self.regex = regex
self.substitution = substitution
class MinifyOperation(Operation):
def __init__(self):
self.inMultilineComment = False
pass
def apply(self, line, state):
result = OperationResult(line, False)
if not state.args.minify:
return result
l = stripComments(line)
strings = scanForStrings(l)
commentStart = len(l)
stringRegex = r'(("[^"]+")|(|[^"]*?)([^\s]*?))?'
comments = r'(?P<comment>(|(\'|//)*$))'
def string(s):
if not s:
return ""
return s
def replace(m, group):
if checkIfInsideString(m.start(group), strings):
return string(m.group(0))
return string(m.group(1)) + string(m.group(group))
ops = []
ops.append(Replacement(re.compile(r'' + stringRegex + '\s*(?P<op>[=+\-*/\><,\^]{1,2})\s*'), lambda m: replace(m, "op")))
ops.append(Replacement(re.compile(r'' + stringRegex + r'(?<=\D)(0)(?P<digit>\.\d+)'), lambda m: replace(m, "digit") ))
#l = l.lstrip("\t")
for o in ops:
l = o.regex.sub(o.substitution, l)
l = l.rstrip("\r\n")
result.line = strInsert(result.line, 0, commentStart-1, l)
return result
| 24.076923
| 122
| 0.626997
| 166
| 1,252
| 4.680723
| 0.379518
| 0.030888
| 0.046332
| 0.046332
| 0.105534
| 0.105534
| 0.105534
| 0
| 0
| 0
| 0
| 0.006776
| 0.17492
| 1,252
| 51
| 123
| 24.54902
| 0.745402
| 0.014377
| 0
| 0.055556
| 0
| 0
| 0.094968
| 0.073052
| 0
| 0
| 0
| 0
| 0
| 1
| 0.138889
| false
| 0.027778
| 0.083333
| 0
| 0.444444
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
570fe23611397bcc46c1ab733771a0e34fdc4ba4
| 1,302
|
py
|
Python
|
ep004_helper.py
|
jpch89/effectivepython
|
97ba297bf987f346219bf8de5198c0817f5146e0
|
[
"MIT"
] | null | null | null |
ep004_helper.py
|
jpch89/effectivepython
|
97ba297bf987f346219bf8de5198c0817f5146e0
|
[
"MIT"
] | null | null | null |
ep004_helper.py
|
jpch89/effectivepython
|
97ba297bf987f346219bf8de5198c0817f5146e0
|
[
"MIT"
] | null | null | null |
from urllib.parse import parse_qs
# 解析查询字符串 query string
my_values = parse_qs('red=5&blue=0&green=',
keep_blank_values=True)
# print(repr(my_values)) # 原书写法
print(my_values) # 返回的是字典,直接这样写就行了
# >>>
# {'red': ['5'], 'blue': ['0'], 'green': ['']}
# 查询字符串中的参数可能有:多个值和空白 blank 值。
# 有些参数则没有出现。
# 使用 get 方法可以不报错的从字典中取值。
print('Red: ', my_values.get('red'))
print('Green: ', my_values.get('green'))
print('Opacity: ', my_values.get('opacity'))
print('-' * 50)
# 需求:当查询的参数没有出现在查询字符串中
# 或者参数的值为空白的时候
# 可以返回 0
# 思路:空值和零值都是 False
red = my_values.get('red', [''])[0] or 0
green = my_values.get('green', [''])[0] or 0
opacity = my_values.get('opacity', [''])[0] or 0
print('Red: %r' % red)
print('Green: %r' % green)
print('Opacity: %r' % opacity)
print('-' * 50)
# 需求:最后要用到的是整数类型
# 思路:类型转换
red = int(my_values.get('red', [''])[0] or 0)
# 这种长表达式的写法看上去很乱!
# 改进1:使用 Python 2.5 添加的三元表达式
red = my_values.get('red', [''])
red = int(red[0]) if red[0] else 0
# 改进2:使用跨行的 if/else 语句
green = my_values.get('green', [''])
if green[0]:
green = int(green[0])
else:
green = 0
# 改进3:频繁使用的逻辑,需要封装成辅助函数
def get_first_value(values, key, default=0):
found = values.get(key, [''])
if found[0]:
found = int(found[0])
else:
found = default
return found
| 23.25
| 48
| 0.609831
| 192
| 1,302
| 4.041667
| 0.333333
| 0.123711
| 0.127577
| 0.072165
| 0.275773
| 0.046392
| 0.046392
| 0
| 0
| 0
| 0
| 0.029245
| 0.185868
| 1,302
| 55
| 49
| 23.672727
| 0.70283
| 0.264977
| 0
| 0.133333
| 0
| 0
| 0.130203
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.033333
| false
| 0
| 0.033333
| 0
| 0.1
| 0.3
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5712c5f2bba3745161134c95e4c1fe8d35033684
| 5,808
|
py
|
Python
|
sc_cost_meter/utils.py
|
zaro0508/lambda-sc-cost-meter
|
2e10fa102af983f61a352ae633651fc3eaf64b19
|
[
"Apache-2.0"
] | null | null | null |
sc_cost_meter/utils.py
|
zaro0508/lambda-sc-cost-meter
|
2e10fa102af983f61a352ae633651fc3eaf64b19
|
[
"Apache-2.0"
] | null | null | null |
sc_cost_meter/utils.py
|
zaro0508/lambda-sc-cost-meter
|
2e10fa102af983f61a352ae633651fc3eaf64b19
|
[
"Apache-2.0"
] | null | null | null |
import boto3
import logging
import os
from datetime import datetime
log = logging.getLogger(__name__)
log.setLevel(logging.DEBUG)
def get_ec2_client():
return boto3.client('ec2')
def get_ssm_client():
return boto3.client('ssm')
def get_ce_client():
return boto3.client('ce')
def get_meteringmarketplace_client():
return boto3.client('meteringmarketplace')
def get_dynamo_client():
return boto3.client('dynamodb')
def get_env_var_value(env_var):
'''Get the value of an environment variable
:param env_var: the environment variable
:returns: the environment variable's value, None if env var is not found
'''
value = os.getenv(env_var)
if not value:
log.warning(f'cannot get environment variable: {env_var}')
return value
def get_marketplace_synapse_ids():
'''Get Synapse IDs from the Marketplace Dynamo DB, these are the Marketplace customers.
Assumes that there is a Dynamo DB with a table containing a mapping of Synapse
IDs to SC subscriber data
:return a list of synapse IDs, otherwise return empty list if no customers are in DB
'''
synapse_ids = []
ddb_marketplace_table_name = get_env_var_value('MARKETPLACE_ID_DYNAMO_TABLE_NAME')
ddb_marketplace_synapse_user_id_attribute = "SynapseUserId"
if ddb_marketplace_table_name:
client = get_dynamo_client()
response = client.scan(
TableName=ddb_marketplace_table_name,
ProjectionExpression=ddb_marketplace_synapse_user_id_attribute,
)
if "Items" in response.keys():
for item in response["Items"]:
synapse_ids.append(item[ddb_marketplace_synapse_user_id_attribute]["S"])
return synapse_ids
def get_marketplace_customer_id(synapse_id):
'''Get the Service Catalog customer ID from the Marketplace Dynamo DB.
Assumes that there is a Dynamo DB with a table containing a mapping of Synapse
IDs to SC subscriber data
:param synapse_id: synapse user id
:return the Marketplace customer ID, otherwise return None if cannot find an
associated customer ID
'''
customer_id = None
ddb_marketplace_table_name = get_env_var_value('MARKETPLACE_ID_DYNAMO_TABLE_NAME')
if ddb_marketplace_table_name:
ddb_customer_id_attribute = 'MarketplaceCustomerId'
client = get_dynamo_client()
response = client.get_item(
Key={
'SynapseUserId': {
'S': synapse_id,
}
},
TableName=ddb_marketplace_table_name,
ConsistentRead=True,
AttributesToGet=[
ddb_customer_id_attribute
]
)
if "Item" in response.keys():
customer_id = response["Item"][ddb_customer_id_attribute]["S"]
else:
log.info(f'cannot find registration for synapse user: {synapse_id}')
return customer_id
def get_marketplace_product_code(synapse_id):
'''Get the registered Service Catalog customer product code.
Assumes that there is a Dynamo DB with a table containing a mapping of Synapse
IDs to SC subscriber data
:param synapse_id: synapse user id
:return the Marketplace product code, None if cannot find customer ID
'''
product_code = None
ddb_marketplace_table_name = get_env_var_value('MARKETPLACE_ID_DYNAMO_TABLE_NAME')
if ddb_marketplace_table_name:
ddb_product_code_attribute = 'ProductCode'
client = get_dynamo_client()
response = client.get_item(
Key={
'SynapseUserId': {
'S': synapse_id,
}
},
TableName=ddb_marketplace_table_name,
ConsistentRead=True,
AttributesToGet=[
ddb_product_code_attribute
]
)
if "Item" in response.keys():
product_code = response["Item"][ddb_product_code_attribute]["S"]
else:
log.info(f'cannot find registration for synapse user: {synapse_id}')
return product_code
def get_customer_cost(customer_id, time_period, granularity):
'''
Get the total cost of all resources tagged with the customer_id for a given
time_period. The time_period and time granularity must match.
:param customer_id: the Marketplace customer ID
:param time_period: the cost time period
:param granularity: the granularity of time HOURLY|DAILY|MONTHLY
:return: the total cost of all resources and the currency unit
'''
client = get_ce_client()
response = client.get_cost_and_usage(
TimePeriod=time_period,
Granularity=granularity,
Filter={
"Tags": {
"Key": "marketplace:customerId",
"Values": [
customer_id
]
}
},
Metrics=["UnblendedCost"]
)
results_by_time = response['ResultsByTime']
cost = results_by_time[0]["Total"]["UnblendedCost"]["Amount"]
unit = results_by_time[0]["Total"]["UnblendedCost"]["Unit"]
return float(cost), unit
def report_cost(cost, customer_id, product_code):
'''
Report the incurred cost of the customer's resources to the AWS Marketplace
:param cost: the cost (as a float value)
:param customer_id: the Marketplace customer ID
:param product_code: the Marketplace product code
'''
cost_accrued_rate = 0.001 # TODO: use mareketplace get_dimension API to get this info
quantity = int(cost / cost_accrued_rate)
mrktpl_client = get_meteringmarketplace_client()
response = mrktpl_client.batch_meter_usage(
UsageRecords=[
{
'Timestamp': datetime.utcnow(),
'CustomerIdentifier': customer_id,
'Dimension': 'costs_accrued',
'Quantity': quantity
}
],
ProductCode=product_code
)
log.debug(f'batch_meter_usage response: {response}')
results = response["Results"][0]
status = results["Status"]
if status == 'Success':
log.info(f'usage record: {results}')
else:
# TODO: need to add a retry mechanism for failed reports
unprocessed_records = response["UnprocessedRecords"][0]
log.error(f'unprocessed record: {unprocessed_records}')
| 31.394595
| 89
| 0.719697
| 760
| 5,808
| 5.261842
| 0.214474
| 0.050013
| 0.042761
| 0.051763
| 0.385096
| 0.357839
| 0.27857
| 0.27857
| 0.256564
| 0.256564
| 0
| 0.003421
| 0.194731
| 5,808
| 184
| 90
| 31.565217
| 0.851614
| 0.292355
| 0
| 0.252033
| 0
| 0
| 0.17182
| 0.0399
| 0
| 0
| 0
| 0.005435
| 0
| 1
| 0.089431
| false
| 0
| 0.03252
| 0.04065
| 0.203252
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5714de071955ec101c9d0bd2f8b9cad2f55c7b5c
| 8,000
|
py
|
Python
|
source/metadata.py
|
sanmik/brain-network-viz
|
9c881e49c14c94e3f7ef4b7776d98c930716ee91
|
[
"MIT"
] | 5
|
2017-09-01T14:05:03.000Z
|
2019-07-13T07:52:49.000Z
|
source/metadata.py
|
sanmik/brain-network-viz
|
9c881e49c14c94e3f7ef4b7776d98c930716ee91
|
[
"MIT"
] | null | null | null |
source/metadata.py
|
sanmik/brain-network-viz
|
9c881e49c14c94e3f7ef4b7776d98c930716ee91
|
[
"MIT"
] | 1
|
2017-09-01T14:05:03.000Z
|
2017-09-01T14:05:03.000Z
|
# Library Imports
from itertools import islice
import csv
# Local Module Imports
import config
class Metadata(object):
"""
Base class for maintaining metadata (properties and their attributes) about
Node and Edge objects. This base class handles parsing and storing the CSV
data, and providing accessor methods. The NodeMetadata and EdgeMetadata add
some specific methods.
The top rows of an input CSV file define metadata and should look like the
following example.
+-------------+-----------+-----------+-----------+
| Primary Key | Property1 | Property2 | Property3 | ...
+-------------------------------------------------+ <-+
| Attribute1 | | | | |
+-------------------------------------------------+ |
| Attribute2 | | | | | Metadata
+-------------------------------------------------+ |
| Attribute3 | | | | |
+-------------------------------------------------+ <-+
| Item1 | | | | |
+-------------------------------------------------+ |
| Item2 | | | | | Data
+-------------------------------------------------+ |
| Item3 | | | | |
+-------------+-----------+-----------+-----------+ <-+
Class usage example:
m = NodeMetadata('sample_nodes.csv', 3, 'Id')
m.get('X', 'MIN_VAL')
"""
def __init__(self, in_file, num_rows, prime_key):
"""
Construct a Metadata object
Args:
in_file: A file handle for an input csv file
num_rows: The number of rows of the csv file defining metadata
prime_key: The name of the column of primary keys. EG: Attribute names or
Item (Node, Edge) IDs.
"""
# 'data' will contain the property name row + all metadata rows as lists
self.data = []
# Lookup table mapping property names to column indices
self.prop_indices = {}
# Lookup table mapping attribute names to row indices
self.attr_indices = {}
# Detect and use correct delimiter. Commas and Tabs are supported.
dialect = csv.Sniffer().sniff(in_file.read(1024), delimiters=",\t")
in_file.seek(0)
reader = csv.reader(in_file, dialect)
# Populate data structs
while reader.line_num < num_rows + 1:
row = next(reader)
if reader.line_num == 1:
for i, name in enumerate(row):
self.prop_indices[name] = i
self.attr_indices[row[0]] = reader.line_num - 1
self.data.append(row)
def get(self, prop, attr):
"""
Gets the value of a specific property attribute.
Treats the CSV matrix shown up top as a 2D array, using prop to lookup
the column, and attr to lookup the row.
EG: To get the minimum value of a node's x coordinate.
m = Metadata('sample_nodes.csv', 3, 'Name')
m.get('X', 'MIN_VAL')
Args:
prop: The metadata property
attr: The attribute of that given property
Return:
The string value of the specified metadata property attribute
"""
# Get indices into 2D data array
j = self.getPropIdx(prop)
i = self.getAttrIdx(attr)
# Get value
return self.data[i][j]
def getPropIdx(self, prop):
"""
Gets the index of a metadata property (Column index).
Args:
prop: The name of the metadata property
Return:
Integer column index
"""
return self.prop_indices[prop]
def getAttrIdx(self, attr):
"""
Gets the index of a metadata attribute (Row index).
Args:
attr: The name of the metadata attribute
Return:
Integer row index
"""
return self.attr_indices[attr]
class NodeMetadata(Metadata):
"""
Subclass to implement Node specific Metadata functionality
"""
def __init__(self, in_file, num_rows, prime_key):
super(NodeMetadata, self).__init__(in_file, num_rows, prime_key)
"""
A list of dicts for looking up Property names by layer:
self.layers[0] =>
{
'C': (Property Name, CSV column index, min val, max val),
'D': (Property Name, CSV column index, min val, max val),
...
}.
And to get the property name used for color in layer 2 you would access as:
self.layers[0]['C'][0]
In the value tuples, Property Name and CSV column index will be None if
no such property is specified in the input file.
"""
self.layers = [{k: None for k in config.NODE_USE_AS_KEYS}]
# Populate self.layers
row_i = self.attr_indices['USE_AS']
for col_i in range(config.NODE_LAYER_COLS_BEGIN, len(self.data[0])):
prop_use_as = self.data[row_i][col_i]
assert prop_use_as in config.NODE_USE_AS_KEYS
# Find or create the destination layer object and property
dest_layer = None
for layer in self.layers:
if not layer[prop_use_as]:
dest_layer = layer
break
if not dest_layer:
dest_layer = {k: None for k in config.NODE_USE_AS_KEYS}
self.layers.append(dest_layer)
#min_val = self.data[self.getAttrIdx('MIN_VAL')][col_i]
min_val = self.data[self.getAttrIdx('MIN_VAL')][col_i]
max_val = self.data[self.getAttrIdx('MAX_VAL')][col_i]
prop_name = self.data[self.getAttrIdx(prime_key)][col_i]
dest_layer[prop_use_as] = (prop_name, col_i, min_val, max_val)
""" Fill in any gaps in self.layers. If a layer didn't have property
metadata explicitly set - it takes on default metadata values """
for layer_i, layer in enumerate(self.layers):
for use_as_key, v in layer.items():
if not v:
layer[use_as_key] = config.NODE_DEFAULT_META[use_as_key]
def getPropertyName(self, use_as, layer_i):
"""
Get the Property name associated with the given USE_AS string for the given
layer.
Args:
use_as: A USE_AS value. EG: C, D, etc.
layer_i: The layer index
Return:
The string name of the associated property. None if that property wasn't
set in the input file.
"""
return self.layers[layer_i][use_as][0]
def getPropertyIdx(self, use_as, layer_i):
"""
Get the CSV column of the Property associated with the given USE_AS value
for every node's layer i.
Return
Numeric index of the CSV colum. None that property was not set in the
input file.
"""
return self.layers[layer_i][use_as][1]
def getPropertyMinVal(self, use_as, layer_i):
"""
Get the minimum value of the Property associated with the given USE_AS
value for every node's layer i.
Return:
String minimum value for the property.
"""
return self.layers[layer_i][use_as][2]
def getPropertyMaxVal(self, use_as, layer_i):
"""
Get the maximum value of the Property associated with the given USE_AS
value for every node's layer i.
Return
String maximum value for the property.
"""
return self.layers[layer_i][use_as][3]
def numLabeledLayers(self):
"""
Return the number of node layers that a label property explicitly set.
"""
return len(filter(lambda l: l['L'][0] != None, self.layers))
# TODO: Write Unit Test
class EdgeMetadata(Metadata):
"""
Subclass to implement Node specific Metadata functionality
TODO: Consider constructing a lookup table in the same way NodeMetadata does.
"""
def getPropertyName(self, use_as):
"""
Get the Property name associated with the given USE_AS string.
Args:
use_as: A USE_AS value. EG: C, D, etc.
Return:
The string name of the associated property
"""
row_i = self.getAttrIdx('USE_AS')
use_as_row = self.data[row_i]
col_i = 0
for val in use_as_row:
if val == use_as:
return self.data[0][col_i]
col_i += 1
| 33.057851
| 79
| 0.58775
| 1,061
| 8,000
| 4.303487
| 0.223374
| 0.036137
| 0.009855
| 0.024091
| 0.307709
| 0.272887
| 0.246605
| 0.229523
| 0.184845
| 0.154183
| 0
| 0.006065
| 0.278625
| 8,000
| 241
| 80
| 33.195021
| 0.785133
| 0.511875
| 0
| 0.028986
| 0
| 0
| 0.010601
| 0
| 0
| 0
| 0
| 0.008299
| 0.014493
| 1
| 0.15942
| false
| 0
| 0.043478
| 0
| 0.376812
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
571574f8dd7e9bd961d512815c9fd6535e05f1d8
| 20,165
|
py
|
Python
|
src/src_python/antares_xpansion/driver.py
|
pelefebvre/antares-xpansion
|
c62ed1a982e970325dec6007eb57a9c6288ef0c7
|
[
"Apache-2.0"
] | null | null | null |
src/src_python/antares_xpansion/driver.py
|
pelefebvre/antares-xpansion
|
c62ed1a982e970325dec6007eb57a9c6288ef0c7
|
[
"Apache-2.0"
] | null | null | null |
src/src_python/antares_xpansion/driver.py
|
pelefebvre/antares-xpansion
|
c62ed1a982e970325dec6007eb57a9c6288ef0c7
|
[
"Apache-2.0"
] | 1
|
2021-05-27T13:06:26.000Z
|
2021-05-27T13:06:26.000Z
|
"""
Class to control the execution of the optimization session
"""
import shutil
import configparser
import glob
import os
import subprocess
import sys
from pathlib import Path
from antares_xpansion.input_checker import check_candidates_file
from antares_xpansion.input_checker import check_settings_file
from antares_xpansion.xpansion_utils import read_and_write_mps
class XpansionDriver():
"""
Class to control the execution of the optimization session
"""
def __init__(self, config):
"""
Initialise driver with a given antaresXpansion configuration,
the system platform and parses the arguments
:param config: configuration to use for the optimization
:type config: XpansionConfig object
"""
self.platform = sys.platform
self.config = config
self.candidates_list = []
self.check_candidates()
self.check_settings()
print(self.candidates_list)
def exe_path(self, exe):
"""
prefixes the input exe with the install direcectory containing the binaries
:param exe: executable name
:return: path to specified executable
"""
return os.path.normpath(os.path.join(self.config.installDir, exe))
def solver_cmd(self, solver):
"""
returns a list consisting of the path to the required solver and its launching options
"""
assert solver in [self.config.MERGE_MPS,
self.config.BENDERS_MPI,
self.config.BENDERS_SEQUENTIAL]
if solver == self.config.MERGE_MPS:
return self.exe_path(solver) +" "+ self.config.OPTIONS_TXT
elif solver == self.config.BENDERS_MPI:
return self.config.MPI_LAUNCHER +" "+\
self.config.MPI_N +" "+ str(self.config.n_mpi)+\
" "+ self.exe_path(solver) +" "+ self.config.OPTIONS_TXT
#solver == self.config.BENDERS_SEQUENTIAL:
return self.exe_path(solver) +" "+ self.config.OPTIONS_TXT
def antares(self):
"""
returns antares binaries location
"""
return os.path.normpath(os.path.join(self.config.installDir, self.config.ANTARES))
def general_data(self):
"""
returns path to general data ini file
"""
return os.path.normpath(os.path.join(self.data_dir(),
self.config.SETTINGS, self.config.GENERAL_DATA_INI))
def settings(self):
"""
returns path to setting ini file
"""
return os.path.normpath(os.path.join(self.data_dir(), self.config.USER,
self.config.EXPANSION, self.config.SETTINGS_INI))
def candidates(self):
"""
returns path to candidates ini file
"""
return os.path.normpath(os.path.join(self.data_dir(), self.config.USER,
self.config.EXPANSION, self.config.CANDIDATES_INI))
def capacity_file(self, filename):
"""
returns path to input capacity file
"""
return os.path.normpath(os.path.join(self.data_dir(), self.config.USER,
self.config.EXPANSION, self.config.CAPADIR, filename))
def weights_file(self, filename):
"""
returns the path to a yearly-weights file
:param filename: name of the yearly-weights file
:return: path to input yearly-weights file
"""
return os.path.normpath(os.path.join(self.data_dir(), self.config.USER,
self.config.EXPANSION, filename))
def antares_output(self):
"""
returns path to antares output data directory
"""
return os.path.normpath(os.path.join(self.data_dir(), self.config.OUTPUT))
def data_dir(self):
"""
returns path to the data directory
"""
return self.config.dataDir
def is_accurate(self):
"""
indicates if method to use is accurate by reading the uc_type in the settings file
"""
with open(self.settings(), 'r') as file_l:
options = dict(
{line.strip().split('=')[0].strip(): line.strip().split('=')[1].strip()
for line in file_l.readlines()})
uc_type = options.get(self.config.UC_TYPE,
self.config.settings_default[self.config.UC_TYPE])
assert uc_type in [self.config.EXPANSION_ACCURATE, self.config.EXPANSION_FAST]
return uc_type == self.config.EXPANSION_ACCURATE
assert False
def is_relaxed(self):
"""
indicates if method to use is relaxed by reading the relaxation_type
from the settings file
"""
with open(self.settings(), 'r') as file_l:
options = dict(
{line.strip().split('=')[0].strip(): line.strip().split('=')[1].strip()
for line in file_l.readlines()})
relaxation_type = options.get('master',
self.config.settings_default["master"])
assert relaxation_type in ['integer', 'relaxed', 'full_integer']
return relaxation_type == 'relaxed'
assert False
def optimality_gap(self):
"""
prints and returns the optimality gap read from the settings file
:return: gap value or 0 if the gap is set to -Inf
"""
with open(self.settings(), 'r') as file_l:
options = dict(
{line.strip().split('=')[0].strip(): line.strip().split('=')[1].strip()
for line in file_l.readlines()})
optimality_gap_str = options.get('optimality_gap',
self.config.settings_default["optimality_gap"])
assert not '%' in optimality_gap_str
print('optimality_gap_str :', optimality_gap_str)
return float(optimality_gap_str) if optimality_gap_str != '-Inf' else 0
assert False
def max_iterations(self):
"""
prints and returns the maximum iterations read from the settings file
:return: max iterations value or -1 if the parameter is is set to +Inf
"""
with open(self.settings(), 'r') as file_l:
options = dict(
{line.strip().split('=')[0].strip(): line.strip().split('=')[1].strip()
for line in file_l.readlines()})
max_iterations_str = options.get('max_iteration',
self.config.settings_default["max_iteration"])
assert not '%' in max_iterations_str
print('max_iterations_str :', max_iterations_str)
return float(max_iterations_str) if ( (max_iterations_str != '+Inf') and (max_iterations_str != '+infini') ) else -1
assert False
def additional_constraints(self):
"""
returns path to additional constraints file
"""
with open(self.settings(), 'r') as file_l:
options = dict(
{line.strip().split('=')[0].strip(): line.strip().split('=')[1].strip()
for line in file_l.readlines()})
additional_constraints_filename = options.get("additional-constraints",
self.config.settings_default["additional-constraints"])
if additional_constraints_filename == "" :
return ""
return os.path.normpath(os.path.join(self.data_dir(), self.config.USER,
self.config.EXPANSION, additional_constraints_filename))
def nb_years(self):
"""
returns the nubyears parameter value read from the general data file
"""
ini_file = configparser.ConfigParser()
ini_file.read(self.general_data())
return float(ini_file['general']['nbyears'])
def launch(self):
"""
launch antares xpansion steps
"""
self.clear_old_log()
if self.config.step == "full":
lp_path = self.generate_mps_files()
self.launch_optimization(lp_path)
elif self.config.step == "antares":
self.pre_antares()
self.launch_antares()
elif self.config.step == "getnames":
if self.config.simulationName:
self.get_names(self.config.simulationName)
else:
print("Missing argument simulationName")
sys.exit(1)
elif self.config.step == "lp":
if self.config.simulationName:
self.lp_step(self.config.simulationName)
output_path = os.path.normpath(os.path.join(self.antares_output(), self.config.simulationName))
self.set_options(output_path)
else:
print("Missing argument simulationName")
sys.exit(1)
elif self.config.step == "optim":
if self.config.simulationName:
lp_path = os.path.normpath(os.path.join(self.antares_output(),
self.config.simulationName, 'lp'))
self.launch_optimization(lp_path)
else:
print("Missing argument simulationName")
sys.exit(1)
else:
print("Launching failed")
sys.exit(1)
def clear_old_log(self):
"""
clears old log files for antares and the lp_namer
"""
if (self.config.step in ["full", "antares"]) and (os.path.isfile(self.antares() + '.log')):
os.remove(self.antares() + '.log')
if (self.config.step in ["full", "lp"])\
and (os.path.isfile(self.exe_path(self.config.LP_NAMER) + '.log')):
os.remove(self.exe_path(self.config.LP_NAMER) + '.log')
def check_candidates(self):
"""
checks that candidates file has correct format
"""
#check file existence
if not os.path.isfile(self.candidates()):
print('Missing file : %s was not retrieved.' % self.candidates())
sys.exit(1)
check_candidates_file(self)
def check_settings(self):
"""
checks that settings file has correct format
"""
#check file existence
if not os.path.isfile(self.settings()):
print('Missing file : %s was not retrieved.' % self.settings())
sys.exit(1)
check_settings_file(self)
def pre_antares(self):
"""
modifies the general data file to configure antares execution
"""
ini_file = configparser.ConfigParser()
ini_file.read(self.general_data())
ini_file[self.config.OPTIMIZATION][self.config.EXPORT_MPS] = "true"
ini_file[self.config.OPTIMIZATION][self.config.EXPORT_STRUCTURE] = "true"
ini_file[self.config.OPTIMIZATION][self.config.USE_XPRS] = "false"
ini_file.remove_option(self.config.OPTIMIZATION, self.config.USE_XPRS)
ini_file.remove_option(self.config.OPTIMIZATION, self.config.INBASIS)
ini_file.remove_option(self.config.OPTIMIZATION, self.config.OUTBASIS)
if self.is_accurate():
ini_file['general']['mode'] = 'expansion'
ini_file['other preferences']['unit-commitment-mode'] = 'accurate'
ini_file[self.config.OPTIMIZATION]['include-tc-minstablepower'] = 'true'
ini_file[self.config.OPTIMIZATION]['include-tc-min-ud-time'] = 'true'
ini_file[self.config.OPTIMIZATION]['include-dayahead'] = 'true'
else:
ini_file['general']['mode'] = 'Economy'
ini_file['other preferences']['unit-commitment-mode'] = 'fast'
ini_file[self.config.OPTIMIZATION]['include-tc-minstablepower'] = 'false'
ini_file[self.config.OPTIMIZATION]['include-tc-min-ud-time'] = 'false'
ini_file[self.config.OPTIMIZATION]['include-dayahead'] = 'false'
with open(self.general_data(), 'w') as out_file:
ini_file.write(out_file)
def launch_antares(self):
"""
launch antares
:return: name of the new simulation's directory
"""
# if not os.path.isdir(driver.antares_output()):
# os.mkdir(driver.antares_output(), )
old_output = os.listdir(self.antares_output())
print([self.antares(), self.data_dir()])
with open(self.antares() + '.log', 'w') as output_file:
returned_l = subprocess.call(self.antares() +" "+ self.data_dir(), shell=True,
stdout=output_file,
stderr=output_file)
if returned_l != 0:
print("WARNING: exited antares with status %d" % returned_l)
new_output = os.listdir(self.antares_output())
print(old_output)
print(new_output)
assert len(old_output) + 1 == len(new_output)
diff = list(set(new_output) - set(old_output))
return diff[0]
def post_antares(self, antares_output_name):
"""
creates necessary files for simulation using the antares simulation output files,
the existing configuration files, get_names and the lpnamer executable
:param antares_output_name: name of the antares simulation output directory
:return: path to the lp output directory
"""
output_path = os.path.normpath(os.path.join(self.antares_output(), antares_output_name))
self.get_names(antares_output_name)
lp_path = self.lp_step(antares_output_name)
self.set_options(output_path)
return lp_path
def get_names(self, antares_output_name):
"""
produces a .txt file describing the weekly problems:
each line of the file contains :
- mps file name
- variables file name
- constraints file name
:param antares_output_name: name of the antares simulation output directory
produces a file named with xpansionConfig.MPS_TXT
"""
output_path = os.path.normpath(os.path.join(self.antares_output(), antares_output_name))
mps_txt = read_and_write_mps(output_path)
# print(mps_txt)
with open(os.path.normpath(os.path.join(output_path, self.config.MPS_TXT)), 'w') as file_l:
for line in mps_txt.items():
file_l.write(line[1][0] + ' ' + line[1][1] + ' ' + line[1][2] + '\n')
glob_path= Path(output_path)
area_files = [str(pp) for pp in glob_path.glob("area*.txt")]
interco_files = [str(pp) for pp in glob_path.glob("interco*.txt")]
assert len(area_files) == 1
assert len(interco_files) == 1
shutil.copy(area_files[0], os.path.normpath(os.path.join(output_path, 'area.txt')))
shutil.copy(interco_files[0], os.path.normpath(os.path.join(output_path, 'interco.txt')))
def lp_step(self, antares_output_name):
"""
copies area and interco files and launches the lp_namer
:param output_path: path to the antares simulation output directory
produces a file named with xpansionConfig.MPS_TXT
"""
output_path = os.path.normpath(os.path.join(self.antares_output(), antares_output_name))
lp_path = os.path.normpath(os.path.join(output_path, 'lp'))
if os.path.isdir(lp_path):
shutil.rmtree(lp_path)
os.makedirs(lp_path)
is_relaxed = 'relaxed' if self.is_relaxed() else 'integer'
with open(self.exe_path(self.config.LP_NAMER) + '.log', 'w') as output_file:
lp_cmd = self.exe_path(self.config.LP_NAMER) +" "+ output_path +" "+ is_relaxed +" "+ self.additional_constraints()
returned_l = subprocess.call(lp_cmd,
shell=True,
stdout=output_file,
stderr=output_file)
if returned_l != 0:
print("ERROR: exited lpnamer with status %d" % returned_l)
sys.exit(1)
return lp_path
def launch_optimization(self, lp_path):
"""
launch the optimization of the antaresXpansion problem using the specified solver
:param lp_path: path to the lp directory containing input files
(c.f. generate_mps_files)
:param solver: name of the solver to be used
:type solver: value in [XpansionConfig.MERGE_MPS, XpansionConfig.BENDERS_MPI,
XpansionConfig.BENDERS_SEQUENTIAL]
"""
old_cwd = os.getcwd()
os.chdir(lp_path)
print('Current directory is now : ', os.getcwd())
solver = None
if self.config.method == "mpibenders":
solver = self.config.BENDERS_MPI
elif self.config.method == "mergeMPS":
solver = self.config.MERGE_MPS
mergemps_lp_log = "log_merged.lp"
if os.path.isfile(mergemps_lp_log):
os.remove(mergemps_lp_log)
mergemps_mps_log = "log_merged.mps"
if os.path.isfile(mergemps_mps_log):
os.remove(mergemps_lp_log)
elif self.config.method == "sequential":
solver = self.config.BENDERS_SEQUENTIAL
elif self.config.method == "both":
print("metod both is not handled yet")
sys.exit(1)
else:
print("Illegal optim method")
sys.exit(1)
#delete execution logs
logfile_list = glob.glob('./' +solver + 'Log*')
for file_path in logfile_list:
try:
os.remove(file_path)
except OSError:
print("Error while deleting file : ", file_path)
if os.path.isfile(solver + '.log'):
os.remove(solver + '.log')
print('Launching {}, logs will be saved to {}.log'.format(solver,
os.path.normpath(os.path.join(
os.getcwd(), solver))))
with open(solver + '.log', 'w') as output_file:
returned_l = subprocess.call(self.solver_cmd(solver), shell=True,
stdout=output_file,
stderr=output_file)
if returned_l != 0:
print("ERROR: exited solver with status %d" % returned_l)
sys.exit(1)
os.chdir(old_cwd)
def set_options(self, output_path):
"""
generates a default option file for the solver
"""
# computing the weight of slaves
options_values = self.config.options_default
options_values["SLAVE_WEIGHT_VALUE"] = str(self.nb_years())
print('Number of years is {}, setting SLAVE_WEIGHT_VALUE to {} '.
format(self.nb_years(), options_values["SLAVE_WEIGHT_VALUE"]))
options_values["GAP"] = self.optimality_gap()
options_values["MAX_ITERATIONS"] = self.max_iterations()
# generate options file for the solver
options_path = os.path.normpath(os.path.join(output_path, 'lp', self.config.OPTIONS_TXT))
with open(options_path, 'w') as options_file:
options_file.writelines(["%30s%30s\n" % (kvp[0], kvp[1])
for kvp in options_values.items()])
def generate_mps_files(self):
"""
launches antares to produce mps files
"""
print("starting mps generation")
# setting antares options
print("-- pre antares")
self.pre_antares()
# launching antares
print("-- launching antares")
antares_output_name = self.launch_antares()
# writting things
print("-- post antares")
lp_path = self.post_antares(antares_output_name)
return lp_path
| 41.069246
| 129
| 0.579965
| 2,326
| 20,165
| 4.865864
| 0.125967
| 0.079519
| 0.024739
| 0.028274
| 0.436296
| 0.379042
| 0.363668
| 0.324439
| 0.276992
| 0.237144
| 0
| 0.003246
| 0.312571
| 20,165
| 490
| 130
| 41.153061
| 0.81323
| 0.161716
| 0
| 0.272109
| 0
| 0
| 0.086957
| 0.008759
| 0
| 0
| 0
| 0
| 0.040816
| 1
| 0.098639
| false
| 0
| 0.034014
| 0
| 0.214286
| 0.085034
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
57199578121fb89b3db3e976c4737dd3dcc14bf5
| 2,258
|
py
|
Python
|
lambdas/get_users.py
|
charvi-a/320-S20-Track1
|
ac97504fc1fdedb1c311773b015570eeea8a8663
|
[
"BSD-3-Clause"
] | 9
|
2019-12-30T16:32:22.000Z
|
2020-03-03T20:14:47.000Z
|
lambdas/get_users.py
|
charvi-a/320-S20-Track1
|
ac97504fc1fdedb1c311773b015570eeea8a8663
|
[
"BSD-3-Clause"
] | 283
|
2020-02-03T15:16:03.000Z
|
2020-05-05T03:18:59.000Z
|
lambdas/get_users.py
|
charvi-a/320-S20-Track1
|
ac97504fc1fdedb1c311773b015570eeea8a8663
|
[
"BSD-3-Clause"
] | 3
|
2020-04-16T15:23:29.000Z
|
2020-05-12T00:38:41.000Z
|
from package.query_db import query
from package.lambda_exception import LambdaException
def handler(event, context):
is_admin = event['is_admin']
is_supporter = event['is_supporter']
is_student = event['is_student']
if is_admin == "" and is_supporter == "" and is_student == "":
get_users_sql = "SELECT id, first_name, last_name, email FROM users;"
params = []
else:
if is_admin != "":
is_admin = event['is_admin'].lower()
if is_admin == "true":
is_admin = True
else:
is_admin = False
else:
is_admin = False
if is_supporter != "":
is_supporter = event['is_supporter'].lower()
if is_supporter == "true":
is_supporter = True
else:
is_supporter = False
else:
is_supporter = False
if is_student != "":
is_student = event['is_student'].lower()
if is_student == "true":
is_student = True
else:
is_student = False
else:
is_student = True
is_admin_param = {'name' : 'is_admin', 'value' : {'booleanValue' : is_admin}}
is_supporter_param = {'name' : 'is_supporter', 'value' : {'booleanValue' : is_supporter}}
is_student_param = {'name' : 'is_student', 'value' : {'booleanValue' : is_student}}
get_users_sql = "SELECT id, first_name, last_name, email FROM users WHERE is_admin = :is_admin AND is_supporter = :is_supporter AND is_student = :is_student;"
params = [is_admin_param, is_supporter_param, is_student_param]
try:
users = query(get_users_sql, params)['records']
except Exception as e:
raise LambdaException("500: Failed to get users, " + str(e))
response = {
'users' : []
}
for u_id, f_name, l_name, email in users:
current_users = response['users']
next_user = {'user_id' : u_id["longValue"], 'first_name' : f_name["stringValue"], 'last_name' : l_name["stringValue"], 'email' : email["stringValue"]}
current_users.append(next_user)
response['users'] = current_users
return response
| 34.738462
| 171
| 0.574402
| 261
| 2,258
| 4.662835
| 0.226054
| 0.09203
| 0.029581
| 0.023007
| 0.266228
| 0.09696
| 0.09696
| 0.09696
| 0.09696
| 0.09696
| 0
| 0.001926
| 0.310009
| 2,258
| 64
| 172
| 35.28125
| 0.779204
| 0
| 0
| 0.25
| 0
| 0.019231
| 0.211249
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.019231
| false
| 0
| 0.038462
| 0
| 0.076923
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
571ab14954af261729cb1d3fc0d5e206657e96fa
| 705
|
py
|
Python
|
leetCode/swap_nodes_in_pairs.py
|
yskang/AlgorithmPracticeWithPython
|
f7129bd1924a7961489198f0ee052d2cd1e9cf40
|
[
"MIT"
] | null | null | null |
leetCode/swap_nodes_in_pairs.py
|
yskang/AlgorithmPracticeWithPython
|
f7129bd1924a7961489198f0ee052d2cd1e9cf40
|
[
"MIT"
] | 1
|
2019-11-04T06:44:04.000Z
|
2019-11-04T06:46:55.000Z
|
leetCode/swap_nodes_in_pairs.py
|
yskang/AlgorithmPractice
|
31b76e38b4c2f1e3e29fb029587662a745437912
|
[
"MIT"
] | null | null | null |
# Title: Swap Nodes in Pairs
# Link: https://leetcode.com/problems/swap-nodes-in-pairs
class ListNode:
def __init__(self, val=0, next=None):
self.val = val
self.next = next
class Problem:
def swap_pairs(self, head: ListNode) -> ListNode:
pre, pre.next = self, head
while pre.next and pre.next.next:
a = pre.next
b = a.next
pre.next, b.next, a.next = b, a, b.next
pre = a
return self.next
def solution():
head = ListNode(1, ListNode(2, ListNode(3, ListNode(4))))
problem = Problem()
return problem.swap_pairs(head)
def main():
print(solution())
if __name__ == '__main__':
main()
| 22.741935
| 61
| 0.58156
| 97
| 705
| 4.082474
| 0.371134
| 0.088384
| 0.055556
| 0.080808
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.00998
| 0.289362
| 705
| 31
| 62
| 22.741935
| 0.780439
| 0.116312
| 0
| 0
| 0
| 0
| 0.012882
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.190476
| false
| 0
| 0
| 0
| 0.380952
| 0.047619
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
571ac253ee844d994243e9c2e1443c9c4aa20002
| 16,967
|
py
|
Python
|
detect_actions.py
|
CTewan/ACAM_Demo
|
b76cf4ce1289b8c311dbad1588f299ff67f7eaf3
|
[
"MIT"
] | null | null | null |
detect_actions.py
|
CTewan/ACAM_Demo
|
b76cf4ce1289b8c311dbad1588f299ff67f7eaf3
|
[
"MIT"
] | null | null | null |
detect_actions.py
|
CTewan/ACAM_Demo
|
b76cf4ce1289b8c311dbad1588f299ff67f7eaf3
|
[
"MIT"
] | null | null | null |
import numpy as np
import cv2
import imageio
import tensorflow as tf
import json
import csv
import os
import sys
sys.path.append("object_detection")
sys.path.append("object_detection/deep_sort")
sys.path.append("action_detection")
import argparse
import object_detection.object_detector as obj
import action_detection.action_detector as act
import time
DISPLAY = False
SHOW_CAMS = False
def main():
parser = argparse.ArgumentParser()
parser.add_argument('-v', '--video_path', type=str, required=False, default="")
parser.add_argument('-d', '--display', type=str, required=False, default="True")
args = parser.parse_args()
display = (args.display == "True" or args.display == "true")
#actor_to_display = 6 # for cams
video_path = args.video_path
basename = os.path.basename(video_path).split('.')[0]
out_vid_path = "./output_videos/%s_output.mp4" % (basename if not SHOW_CAMS else basename+'_cams_actor_%.2d' % actor_to_display)
clf_out_path = "./clf_output/{}_output.csv".format(basename if not SHOW_CAMS else basename+'_cams_actor_{}'.format(actor_to_display))
#out_vid_path = './output_videos/testing.mp4'
# video_path = "./tests/chase1Person1View3Point0.mp4"
# out_vid_path = 'output.mp4'
main_folder = './'
# NAS
obj_detection_model = 'ssd_mobilenet_v1_fpn_shared_box_predictor_640x640_coco14_sync_2018_07_03'
obj_detection_graph = os.path.join("object_detection", "weights", obj_detection_model, "frozen_inference_graph.pb")
print("Loading object detection model at %s" % obj_detection_graph)
obj_detector = obj.Object_Detector(obj_detection_graph)
tracker = obj.Tracker()
print("Reading video file %s" % video_path)
reader = imageio.get_reader(video_path, 'ffmpeg')
action_freq = 8
# fps_divider = 1
print('Running actions every %i frame' % action_freq)
fps = reader.get_meta_data()['fps'] #// fps_divider
print("FPS: {}".format(fps))
W, H = reader.get_meta_data()['size']
T = tracker.timesteps
#if not display:
writer = imageio.get_writer(out_vid_path, fps=fps)
csv_file = open(clf_out_path, 'w', newline='')
csv_writer = csv.writer(csv_file, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
csv_writer.writerow(['Time', 'Person', 'Action', 'Probability'])
print("Writing output to %s" % out_vid_path)
# act_detector = act.Action_Detector('i3d_tail')
# ckpt_name = 'model_ckpt_RGB_i3d_pooled_tail-4'
act_detector = act.Action_Detector('soft_attn')
#ckpt_name = 'model_ckpt_RGB_soft_attn-16'
#ckpt_name = 'model_ckpt_soft_attn_ava-23'
ckpt_name = 'model_ckpt_soft_attn_pooled_cosine_drop_ava-130'
#input_frames, temporal_rois, temporal_roi_batch_indices, cropped_frames = act_detector.crop_tubes_in_tf([T,H,W,3])
memory_size = act_detector.timesteps - action_freq
updated_frames, temporal_rois, temporal_roi_batch_indices, cropped_frames = act_detector.crop_tubes_in_tf_with_memory([T,H,W,3], memory_size)
rois, roi_batch_indices, pred_probs = act_detector.define_inference_with_placeholders_noinput(cropped_frames)
ckpt_path = os.path.join(main_folder, 'action_detection', 'weights', ckpt_name)
act_detector.restore_model(ckpt_path)
prob_dict = {}
frame_cnt = 0
# Tewan
min_teacher_features = 3
teacher_identified = 0
#missed_frame_cnt = 0
#max_age = 120
#frame_skips = 60
#next_frame = 0
teacher_ids = []
matched_id = None
# Tewan
for cur_img in reader:
frame_cnt += 1
#if frame_cnt < next_frame:
# continue
# Detect objects and make predictions every 8 frames (0.3 seconds)
#if frame_cnt % action_freq == 0:
# Object Detection
expanded_img = np.expand_dims(cur_img, axis=0)
detection_list = obj_detector.detect_objects_in_np(expanded_img)
detection_info = [info[0] for info in detection_list]
# Updates active actors in tracker
tracker.update_tracker(detection_info, cur_img)
no_actors = len(tracker.active_actors)
"""
if no_actors == 0:
missed_frame_cnt += 1
if missed_frame_cnt >= max_age:
tracker.update_tracker(detection_info, cur_img)
no_actors = len(tracker.active_actors)
teacher_identified = False
tracker.set_invalid_track()
missed_frame_cnt = 0
print("Reset active actors. Current number: {}".format(no_actors))
"""
if frame_cnt % action_freq == 0 and frame_cnt > 16:
if no_actors == 0:
print("No actor found.")
continue
video_time = round(frame_cnt / fps, 1)
valid_actor_ids = [actor["actor_id"] for actor in tracker.active_actors]
print("frame count: {}, video time: {}s".format(frame_cnt, video_time))
probs = []
cur_input_sequence = np.expand_dims(np.stack(tracker.frame_history[-action_freq:], axis=0), axis=0)
rois_np, temporal_rois_np = tracker.generate_all_rois()
if teacher_identified < min_teacher_features:
prompt_img = visualize_detection_results(img_np=tracker.frame_history[-16],
active_actors=tracker.active_actors,
prob_dict=None)
cv2.imshow('prompt_img', prompt_img[:,:,::-1])
cv2.waitKey(500)
teacher_present = False
teacher_id = _prompt_user_input()
if not _check_teacher_in_frame(teacher_id=teacher_id):
print("Teacher not in this frame. Continuing.")
cv2.destroyWindow("prompt_img")
pass
else:
if _check_valid_teacher_id(teacher_id=teacher_id, valid_actor_ids=valid_actor_ids):
teacher_id = int(teacher_id)
teacher_identified += 1
teacher_present = True
else:
while not teacher_present:
print("Invalid ID.")
teacher_id = _prompt_user_input()
if not _check_teacher_in_frame(teacher_id=teacher_id):
print("Teacher not in this frame. Continuing.")
cv2.destroyWindow("prompt_img")
break
else:
if _check_valid_teacher_id(teacher_id=teacher_id, valid_actor_ids=valid_actor_ids):
teacher_id = int(teacher_id)
teacher_identified += 1
teacher_present = True
else:
pass
# Move on to next frame if teacher not in current frame
if not teacher_present:
continue
cv2.destroyWindow("prompt_img")
if teacher_id not in teacher_ids:
teacher_ids.append(teacher_id)
tracker.update_teacher_candidate_ids(teacher_candidate_id=teacher_id)
else:
tracker.set_valid_track()
# Identify idx of teacher for ROI selection
roi_idx = None
found_id = False
for idx, actor_info in enumerate(tracker.active_actors):
actor_id = actor_info["actor_id"]
for i in range(len(teacher_ids)-1, -1, -1):
if actor_id == teacher_ids[i]:
roi_idx = idx
matched_id = actor_info["actor_id"]
found_id = True
break
if found_id:
break
# Identify ROI and temporal ROI using ROI idx
if roi_idx is not None:
rois_np = rois_np[roi_idx]
temporal_rois_np = temporal_rois_np[roi_idx]
rois_np = np.expand_dims(rois_np, axis=0)
temporal_rois_np = np.expand_dims(temporal_rois_np, axis=0)
no_actors = 1
# If teacher not found (i.e. roi_idx is None) in current frame, move on to next frame
else:
continue
#max_actors = 5
#if no_actors > max_actors:
# no_actors = max_actors
# rois_np = rois_np[:max_actors]
# temporal_rois_np = temporal_rois_np[:max_actors]
# Might have issue of not using attention map because only predict action for 1 actor (memory issue)
feed_dict = {updated_frames:cur_input_sequence, # only update last #action_freq frames
temporal_rois: temporal_rois_np,
temporal_roi_batch_indices: np.zeros(no_actors),
rois:rois_np,
roi_batch_indices:np.arange(no_actors)}
run_dict = {'pred_probs': pred_probs}
if SHOW_CAMS:
run_dict['cropped_frames'] = cropped_frames
run_dict['final_i3d_feats'] = act_detector.act_graph.get_collection('final_i3d_feats')[0]
run_dict['cls_weights'] = act_detector.act_graph.get_collection('variables')[-2] # this is the kernel
out_dict = act_detector.session.run(run_dict, feed_dict=feed_dict)
probs = out_dict['pred_probs']
# associate probs with actor ids
print_top_k = 5
for bb in range(no_actors):
#act_probs = probs[bb]
#order = np.argsort(act_probs)[::-1]
#cur_actor_id = tracker.active_actors[bb]['actor_id']
act_probs = probs[bb]
order = np.argsort(act_probs)[::-1]
cur_actor_id = tracker.active_actors[roi_idx]["actor_id"]
#print(cur_actor_id == actor_id)
#print("Person %i" % cur_actor_id)
#print("act_probs: {}".format(act_probs))
#print("order: {}".format(order))
#print("tracker.active_actors[bb]: {}".format(tracker.active_actors[bb]))
cur_results = []
for pp in range(print_top_k):
#print('\t %s: %.3f' % (act.ACTION_STRINGS[order[pp]], act_probs[order[pp]]))
cur_results.append((act.ACTION_STRINGS[order[pp]], act_probs[order[pp]]))
csv_writer.writerow([video_time, cur_actor_id, act.ACTION_STRINGS[order[pp]], act_probs[order[pp]]])
prob_dict[cur_actor_id] = cur_results
if frame_cnt > 16:
out_img = visualize_detection_results(tracker.frame_history[-16],
tracker.active_actors,
prob_dict=prob_dict,
teacher_id=matched_id)
if SHOW_CAMS:
if tracker.active_actors:
actor_indices = [ii for ii in range(no_actors) if tracker.active_actors[ii]['actor_id'] == actor_to_display]
if actor_indices:
out_img = visualize_cams(out_img, cur_input_sequence, out_dict, actor_indices[0])
else:
continue
else:
continue
if display:
cv2.imshow('results', out_img[:,:,::-1])
cv2.waitKey(10)
writer.append_data(out_img)
#if not display:
writer.close()
csv_file.close()
def _prompt_user_input():
teacher_id = input("Enter the id of the teacher (type None if teacher is not present in this frame): ")
return teacher_id
def _check_teacher_in_frame(teacher_id):
if teacher_id == "None" or teacher_id == "none":
return False
return True
def _check_valid_teacher_id(teacher_id, valid_actor_ids):
try:
teacher_id = int(teacher_id)
if teacher_id in valid_actor_ids:
return True
else:
return False
except:
return False
np.random.seed(10)
COLORS = np.random.randint(0, 255, [1000, 3])
def visualize_detection_results(img_np, active_actors, prob_dict=None, teacher_id=None):
score_th = 0.30
action_th = 0.20
# copy the original image first
disp_img = np.copy(img_np)
H, W, C = img_np.shape
#for ii in range(len(active_actors)):
for ii in range(len(active_actors)):
cur_actor = active_actors[ii]
actor_id = cur_actor['actor_id']
if teacher_id is not None:
if actor_id != teacher_id:
continue
if prob_dict:
cur_act_results = prob_dict[actor_id] if actor_id in prob_dict else []
try:
if len(cur_actor["all_boxes"]) > 0:
cur_box, cur_score, cur_class = cur_actor['all_boxes'][-16], cur_actor['all_scores'][0], 1
else:
cur_box, cur_score, cur_class = cur_actor['all_boxes'][0], cur_actor['all_scores'][0], 1
except IndexError:
continue
if cur_score < score_th:
continue
top, left, bottom, right = cur_box
left = int(W * left)
right = int(W * right)
top = int(H * top)
bottom = int(H * bottom)
conf = cur_score
label = obj.OBJECT_STRINGS[cur_class]['name']
message = '%s_%i: %% %.2f' % (label, actor_id,conf)
if prob_dict:
action_message_list = ["%s:%.3f" % (actres[0][0:7], actres[1]) for actres in cur_act_results if actres[1]>action_th]
color = COLORS[actor_id]
color = (int(color[0]), int(color[1]), int(color[2]))
cv2.rectangle(disp_img, (left,top), (right,bottom), color, 3)
font_size = max(0.5,(right - left)/50.0/float(len(message)))
cv2.rectangle(disp_img, (left, top-int(font_size*40)), (right,top), color, -1)
cv2.putText(disp_img, message, (left, top-12), 0, font_size, (255-color[0], 255-color[1], 255-color[2]), 1)
if prob_dict:
#action message writing
cv2.rectangle(disp_img, (left, top), (right,top+10*len(action_message_list)), color, -1)
for aa, action_message in enumerate(action_message_list):
offset = aa*10
cv2.putText(disp_img, action_message, (left, top+5+offset), 0, 0.5, (255-color[0], 255-color[1], 255-color[2]), 1)
return disp_img
def visualize_cams(image, input_frames, out_dict, actor_idx):
#classes = ["walk", "bend", "carry"]
#classes = ["sit", "ride"]
classes = ["talk to", "watch (a", "listen to"]
action_classes = [cc for cc in range(60) if any([cname in act.ACTION_STRINGS[cc] for cname in classes])]
feature_activations = out_dict['final_i3d_feats']
cls_weights = out_dict['cls_weights']
input_frames = out_dict['cropped_frames'].astype(np.uint8)
probs = out_dict["pred_probs"]
class_maps = np.matmul(feature_activations, cls_weights)
min_val = np.min(class_maps[:,:, :, :, :])
max_val = np.max(class_maps[:,:, :, :, :]) - min_val
normalized_cmaps = np.uint8((class_maps-min_val)/max_val * 255.)
t_feats = feature_activations.shape[1]
t_input = input_frames.shape[1]
index_diff = (t_input) // (t_feats+1)
img_new_height = 400
img_new_width = int(image.shape[1] / float(image.shape[0]) * img_new_height)
img_to_show = cv2.resize(image.copy(), (img_new_width,img_new_height))[:,:,::-1]
#img_to_concat = np.zeros((400, 800, 3), np.uint8)
img_to_concat = np.zeros((400, 400, 3), np.uint8)
for cc in range(len(action_classes)):
cur_cls_idx = action_classes[cc]
act_str = act.ACTION_STRINGS[action_classes[cc]]
message = "%s:%%%.2d" % (act_str[:20], 100*probs[actor_idx, cur_cls_idx])
for tt in range(t_feats):
cur_cam = normalized_cmaps[actor_idx, tt,:,:, cur_cls_idx]
cur_frame = input_frames[actor_idx, (tt+1) * index_diff, :,:,::-1]
resized_cam = cv2.resize(cur_cam, (100,100))
colored_cam = cv2.applyColorMap(resized_cam, cv2.COLORMAP_JET)
overlay = cv2.resize(cur_frame.copy(), (100,100))
overlay = cv2.addWeighted(overlay, 0.5, colored_cam, 0.5, 0)
img_to_concat[cc*125:cc*125+100, tt*100:(tt+1)*100, :] = overlay
cv2.putText(img_to_concat, message, (20, 13+100+125*cc), 0, 0.5, (255,255,255), 1)
final_image = np.concatenate([img_to_show, img_to_concat], axis=1)
return final_image[:,:,::-1]
if __name__ == '__main__':
main()
| 38.386878
| 145
| 0.590263
| 2,166
| 16,967
| 4.309326
| 0.17036
| 0.031819
| 0.024427
| 0.013499
| 0.269124
| 0.195093
| 0.152025
| 0.136276
| 0.136276
| 0.115063
| 0
| 0.025602
| 0.304768
| 16,967
| 441
| 146
| 38.473923
| 0.765683
| 0.114045
| 0
| 0.192727
| 0
| 0
| 0.075996
| 0.015488
| 0
| 0
| 0
| 0
| 0
| 1
| 0.021818
| false
| 0.007273
| 0.043636
| 0
| 0.094545
| 0.043636
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
571af6febfc1dc4cb09b37f0fb44cc848ccf1059
| 5,556
|
py
|
Python
|
tests/test_parametric_shapes/test_SweepMixedShape.py
|
RemDelaporteMathurin/paramak
|
10552f1b89820dd0f7a08e4a126834877e3106b4
|
[
"MIT"
] | null | null | null |
tests/test_parametric_shapes/test_SweepMixedShape.py
|
RemDelaporteMathurin/paramak
|
10552f1b89820dd0f7a08e4a126834877e3106b4
|
[
"MIT"
] | null | null | null |
tests/test_parametric_shapes/test_SweepMixedShape.py
|
RemDelaporteMathurin/paramak
|
10552f1b89820dd0f7a08e4a126834877e3106b4
|
[
"MIT"
] | null | null | null |
import os
import unittest
from pathlib import Path
import pytest
from paramak import SweepMixedShape
class test_object_properties(unittest.TestCase):
def test_solid_construction(self):
"""checks that a SweepMixedShape solid can be created"""
test_shape = SweepMixedShape(
points=[
(-10, -10, "straight"),
(-10, 10, "spline"),
(0, 20, "spline"),
(10, 10, "circle"),
(0, 0, "circle"),
(10, -10, "straight")
],
path_points=[
(50, 0),
(20, 50),
(50, 100)
]
)
test_shape.create_solid()
assert test_shape.solid is not None
def test_solid_construction(self):
"""checks that a SweepMixedShape solid can be created with workplane
YZ"""
test_shape = SweepMixedShape(
points=[
(-10, -10, "straight"),
(-10, 10, "spline"),
(0, 20, "spline"),
(10, 10, "circle"),
(0, 0, "circle"),
(10, -10, "straight")
],
path_points=[
(50, 0),
(20, 50),
(50, 100)
],
workplane='YZ',
path_workplane="YX"
)
assert test_shape.solid is not None
def test_solid_construction_workplane_XZ(self):
"""checks that a SweepMixedShape solid can be created with workplane
XZ"""
test_shape = SweepMixedShape(
points=[
(-10, -10, "straight"),
(-10, 10, "spline"),
(0, 20, "spline"),
(10, 10, "circle"),
(0, 0, "circle"),
(10, -10, "straight")
],
path_points=[
(50, 0),
(20, 50),
(50, 100)
],
workplane='XZ',
path_workplane="XY"
)
assert test_shape.solid is not None
def test_relative_shape_volume(self):
"""creates two SweepMixedShapes and checks that their relative volumes
are correct"""
test_shape_1 = SweepMixedShape(
points=[
(-10, -10, "straight"),
(-10, 10, "spline"),
(0, 20, "spline"),
(10, 10, "circle"),
(0, 0, "circle"),
(10, -10, "straight")
],
path_points=[
(50, 0),
(30, 50),
(50, 100)
]
)
test_shape_1.create_solid()
test_shape_2 = SweepMixedShape(
points=[
(-20, -20, "straight"),
(-20, 20, "spline"),
(0, 40, "spline"),
(20, 20, "circle"),
(0, 0, "circle"),
(20, -20, "straight")
],
path_points=[
(50, 0),
(30, 50),
(50, 100)
]
)
test_shape_2.create_solid()
assert test_shape_1.volume == pytest.approx(
test_shape_2.volume * 0.25, rel=0.01)
def test_iterable_azimuthal_placement(self):
"""checks that swept solids can be placed at multiple azimuth placement angles"""
test_shape = SweepMixedShape(
points=[
(-10, -10, "straight"),
(-10, 10, "spline"),
(0, 20, "spline"),
(10, 10, "circle"),
(0, 0, "circle"),
(10, -10, "straight")
],
path_points=[
(50, 0),
(30, 50),
(60, 100),
(50, 150)
]
)
test_shape.create_solid()
test_volume = test_shape.volume
test_shape.azimuth_placement_angle = [0, 90, 180, 270]
assert test_shape.volume == pytest.approx(test_volume * 4, rel=0.01)
def test_workplane_path_workplane_error_raises(self):
"""checks that errors are raised when disallowed workplane and path_workplane
combinations are used"""
def workplane_and_path_workplane_equal():
test_shape = SweepMixedShape(
points=[
(-10, -10, "straight"),
(-10, 10, "spline"),
(0, 20, "spline"),
(10, 10, "circle"),
(0, 0, "circle"),
(10, -10, "straight")
],
path_points=[(50, 0), (30, 50), (60, 100), (50, 150)],
workplane="XZ",
path_workplane="XZ"
)
def invalid_relative_workplane_and_path_workplane():
test_shape = SweepMixedShape(
points=[
(-10, -10, "straight"),
(-10, 10, "spline"),
(0, 20, "spline"),
(10, 10, "circle"),
(0, 0, "circle"),
(10, -10, "straight")
],
path_points=[(50, 0), (30, 50), (60, 100), (50, 150)],
workplane="XZ",
path_workplane="YZ"
)
self.assertRaises(ValueError, workplane_and_path_workplane_equal)
self.assertRaises(
ValueError,
invalid_relative_workplane_and_path_workplane)
if __name__ == "__main__":
unittest.main()
| 29.089005
| 89
| 0.430166
| 508
| 5,556
| 4.519685
| 0.187008
| 0.04878
| 0.073171
| 0.04878
| 0.634146
| 0.576655
| 0.541812
| 0.541812
| 0.541812
| 0.526132
| 0
| 0.098446
| 0.444204
| 5,556
| 190
| 90
| 29.242105
| 0.645078
| 0.079374
| 0
| 0.660131
| 0
| 0
| 0.068092
| 0
| 0
| 0
| 0
| 0
| 0.045752
| 1
| 0.052288
| false
| 0
| 0.03268
| 0
| 0.091503
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
571dbed119712d82f6343f841d5c39a1d78ee427
| 996
|
py
|
Python
|
run_rnn.py
|
iqbaalmuhmd/CNNnumpyTest
|
eaecf5bc53a7b5c932a82d38cc6ca2a40430af4b
|
[
"MIT"
] | 332
|
2017-06-13T10:40:05.000Z
|
2022-03-11T15:10:02.000Z
|
run_rnn.py
|
iqbaalmuhmd/CNNnumpyTest
|
eaecf5bc53a7b5c932a82d38cc6ca2a40430af4b
|
[
"MIT"
] | 9
|
2017-06-16T02:36:06.000Z
|
2021-05-09T06:01:34.000Z
|
run_rnn.py
|
iqbaalmuhmd/CNNnumpyTest
|
eaecf5bc53a7b5c932a82d38cc6ca2a40430af4b
|
[
"MIT"
] | 105
|
2017-06-15T06:40:44.000Z
|
2022-03-09T06:38:59.000Z
|
import numpy as np
from deepnet.nnet import RNN
from deepnet.solver import sgd_rnn
def text_to_inputs(path):
"""
Converts the given text into X and y vectors
X : contains the index of all the characters in the text vocab
y : y[i] contains the index of next character for X[i] in the text vocab
"""
with open(path) as f:
txt = f.read()
X, y = [], []
char_to_idx = {char: i for i, char in enumerate(set(txt))}
idx_to_char = {i: char for i, char in enumerate(set(txt))}
X = np.array([char_to_idx[i] for i in txt])
y = [char_to_idx[i] for i in txt[1:]]
y.append(char_to_idx['.'])
y = np.array(y)
vocab_size = len(char_to_idx)
return X, y, vocab_size, char_to_idx, idx_to_char
if __name__ == "__main__":
X, y, vocab_size, char_to_idx, idx_to_char = text_to_inputs('data/Rnn.txt')
rnn = RNN(vocab_size,vocab_size,char_to_idx,idx_to_char)
rnn = sgd_rnn(rnn,X,y,10,10,0.1)
| 27.666667
| 79
| 0.625502
| 179
| 996
| 3.240223
| 0.307263
| 0.082759
| 0.124138
| 0.077586
| 0.298276
| 0.298276
| 0.298276
| 0.212069
| 0.1
| 0.1
| 0
| 0.009485
| 0.259036
| 996
| 35
| 80
| 28.457143
| 0.776423
| 0.180723
| 0
| 0
| 0
| 0
| 0.026515
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.052632
| false
| 0
| 0.157895
| 0
| 0.263158
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
571ddbe314e19b402b88195037ee31e371ecdddf
| 5,421
|
py
|
Python
|
lcclassifier/experiments/attnstats.py
|
oscarpimentel/astro-lightcurves-classifier
|
f697b43e22bd8c92c1b9df514be8565c736dd7cc
|
[
"MIT"
] | 1
|
2021-12-31T18:00:08.000Z
|
2021-12-31T18:00:08.000Z
|
lcclassifier/experiments/attnstats.py
|
oscarpimentel/astro-lightcurves-classifier
|
f697b43e22bd8c92c1b9df514be8565c736dd7cc
|
[
"MIT"
] | null | null | null |
lcclassifier/experiments/attnstats.py
|
oscarpimentel/astro-lightcurves-classifier
|
f697b43e22bd8c92c1b9df514be8565c736dd7cc
|
[
"MIT"
] | null | null | null |
from __future__ import print_function
from __future__ import division
from . import _C
import torch
from fuzzytorch.utils import TDictHolder, tensor_to_numpy, minibatch_dict_collate
import numpy as np
from fuzzytools.progress_bars import ProgressBar, ProgressBarMulti
import fuzzytools.files as files
import fuzzytools.datascience.metrics as fcm
from fuzzytools.matplotlib.utils import save_fig
import matplotlib.pyplot as plt
import fuzzytorch.models.seq_utils as seq_utils
from scipy.optimize import curve_fit
from lchandler import _C as _Clchandler
from lchandler.plots.lc import plot_lightcurve
from .utils import check_attn_scores
EPS = _C.EPS
###################################################################################################################################################
def local_slope_f(time, m, n):
return time*m+n
def get_local_slope(days, obs, j, dj,
p0=[0,0],
):
assert not dj%2==0
assert dj>=3
ji = max(0, j-dj//2)
jf = min(j+dj//2+1, len(obs))
sub_days = days[ji:jf] # sequence steps
sub_obs = obs[ji:jf] # sequence steps
popt, pcov = curve_fit(local_slope_f, sub_days, sub_obs, p0=p0)
local_slope_m, local_slope_n = popt
return local_slope_m, local_slope_n, sub_days, sub_obs
###################################################################################################################################################
def save_attnstats(train_handler, data_loader, save_rootdir,
eps:float=EPS,
dj=3,
min_len=3,
**kwargs):
train_handler.load_model() # important, refresh to best model
train_handler.model.eval() # important, model eval mode
dataset = data_loader.dataset # get dataset
is_parallel = 'Parallel' in train_handler.model.get_name()
if not is_parallel:
return
attn_scores_collection = {b:[] for kb,b in enumerate(dataset.band_names)}
with torch.no_grad():
tdicts = []
for ki,in_tdict in enumerate(data_loader):
train_handler.model.autoencoder['encoder'].add_extra_return = True
_tdict = train_handler.model(TDictHolder(in_tdict).to(train_handler.device))
train_handler.model.autoencoder['encoder'].add_extra_return = False
tdicts += [_tdict]
tdict = minibatch_dict_collate(tdicts)
for kb,b in enumerate(dataset.band_names):
p_onehot = tdict[f'input/onehot.{b}'][...,0] # (n,t)
#p_rtime = tdict[f'input/rtime.{b}'][...,0] # (n,t)
#p_dtime = tdict[f'input/dtime.{b}'][...,0] # (n,t)
#p_x = tdict[f'input/x.{b}'] # (n,t,i)
#p_rerror = tdict[f'target/rerror.{b}'] # (n,t,1)
#p_rx = tdict[f'target/recx.{b}'] # (n,t,1)
# print(tdict.keys())
uses_attn = any([f'attn_scores' in k for k in tdict.keys()])
if not uses_attn:
return
### attn scores
attn_scores = tdict[f'model/attn_scores/encz.{b}'] # (n,h,qt)
assert check_attn_scores(attn_scores)
attn_scores_mean = attn_scores.mean(dim=1)[...,None] # (n,h,qt)>(n,qt)>(n,qt,1) # mean attention score among the heads: not a distribution
attn_scores_min_max = seq_utils.seq_min_max_norm(attn_scores_mean, p_onehot) # (n,qt,1)
### stats
lcobj_names = dataset.get_lcobj_names()
bar = ProgressBar(len(lcobj_names))
for k,lcobj_name in enumerate(lcobj_names):
lcobj = dataset.lcset[lcobj_name]
lcobjb = lcobj.get_b(b) # complete lc
p_onehot_k = tensor_to_numpy(p_onehot[k]) # (n,t)>(t)
b_len = p_onehot_k.sum()
assert b_len<=len(lcobjb), f'{b_len}<={len(lcobjb)}'
if not b_len>=min_len:
continue
attn_scores_k = tensor_to_numpy(attn_scores_mean[k,:b_len,0]) # (n,qt,1)>(t)
attn_scores_min_max_k = tensor_to_numpy(attn_scores_min_max[k,:b_len,0]) # (n,qt,1)>(t)
days = lcobjb.days[:b_len] # (t)
obs = lcobjb.obs[:b_len] # (t)
obse = lcobjb.obse[:b_len] # (t)
snr = lcobjb.get_snr(max_len=b_len)
max_obs = np.max(obs)
peak_day = days[np.argmax(obs)]
duration = days[-1]-days[0]
bar(f'b={b}; lcobj_name={lcobj_name}; b_len={b_len}; snr={snr}; max_obs={max_obs}')
lc_features = []
for j in range(0, b_len):
j_features = {
f'j':j,
f'attn_scores_k.j':attn_scores_k[j],
f'attn_scores_min_max_k.j':attn_scores_min_max_k[j],
f'days.j':days[j],
f'obs.j':obs[j],
f'obse.j':obse[j],
}
local_slope_m, local_slope_n, sub_days, sub_obs = get_local_slope(days, obs, j, dj) # get local slope
j_features.update({
f'local_slope_m.j~dj={dj}':local_slope_m,
f'local_slope_n.j~dj={dj}':local_slope_n,
f'peak_distance.j~dj={dj}~mode=local':days[j]-peak_day,
f'peak_distance.j~dj={dj}~mode=mean':np.mean(sub_days)-peak_day,
f'peak_distance.j~dj={dj}~mode=median':np.median(sub_days)-peak_day,
})
lc_features += [j_features]
attn_scores_collection[b] += [{
f'c':dataset.class_names[lcobj.y],
f'b_len':b_len,
f'peak_day':peak_day,
f'duration':duration,
f'snr':snr,
f'max_obs':max_obs,
f'lc_features':lc_features,
}]
bar.done()
results = {
'model_name':train_handler.model.get_name(),
'survey':dataset.survey,
'band_names':dataset.band_names,
'class_names':dataset.class_names,
'max_day':dataset.max_day,
'attn_scores_collection':attn_scores_collection,
}
### save file
save_filedir = f'{save_rootdir}/{dataset.lcset_name}/id={train_handler.id}.d'
files.save_pickle(save_filedir, results) # save file
dataset.reset_max_day() # very important!!
dataset.calcule_precomputed() # very important!!
return
| 35.431373
| 147
| 0.65855
| 861
| 5,421
| 3.876887
| 0.212544
| 0.068904
| 0.030557
| 0.023966
| 0.180647
| 0.146195
| 0.11444
| 0.094068
| 0.038346
| 0.020971
| 0
| 0.00627
| 0.146836
| 5,421
| 153
| 148
| 35.431373
| 0.715459
| 0.110312
| 0
| 0.02459
| 0
| 0
| 0.121212
| 0.072193
| 0
| 0
| 0
| 0
| 0.032787
| 1
| 0.02459
| false
| 0
| 0.131148
| 0.008197
| 0.196721
| 0.008197
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
571ea096124b732422144c10209f4cc5cb3c06c7
| 1,473
|
py
|
Python
|
get_item_by_key.py
|
flyco2016/my_python_module_project
|
6e1ac7f074f7b57403d7b7c6adadab17a26fc27d
|
[
"Apache-2.0"
] | null | null | null |
get_item_by_key.py
|
flyco2016/my_python_module_project
|
6e1ac7f074f7b57403d7b7c6adadab17a26fc27d
|
[
"Apache-2.0"
] | 1
|
2019-01-04T06:37:06.000Z
|
2019-01-04T06:37:06.000Z
|
get_item_by_key.py
|
flyco2016/my_python_module_project
|
6e1ac7f074f7b57403d7b7c6adadab17a26fc27d
|
[
"Apache-2.0"
] | null | null | null |
# 处理嵌套的根据键取值
def getItemByKey(obj, key, result=None):
if isinstance(obj, dict):
for k in obj:
if key == k:
if isinstance(result, list):
if isinstance(obj[k], list):
result.extend(obj[k])
else:
result.append(obj[k])
elif result is None:
result = obj[k]
else:
result = [result]
result.append(obj[k])
else:
if isinstance(obj[k], dict) or isinstance(obj[k], list):
result = getItemByKey(obj[k], key, result)
elif isinstance(obj, list):
for i in obj:
if isinstance(i, dict) or isinstance(i, list):
result = getItemByKey(i, key, result)
return result[0] if isinstance(result, list) and len(result) == 1 else result
def getItemByKeyInMyMethod(dict_obj, key, default=None):
import types
for k ,v in dict_obj.items():
if k == key:
return v
else:
if type(v) is dict:
ret = getItemByKeyInMyMethod(v, key, default)
if ret is not default:
return ret
return default
if __name__ == "__main__":
test_dic = {'a': 1, 'b': 2, 'c': {'a': 1, 'b': {'b': 4}}}
r1 = getItemByKey(test_dic, 'b')
r2 = getItemByKeyInMyMethod(test_dic, 'b')
print(r1, r2, sep='\n')
| 33.477273
| 81
| 0.491514
| 172
| 1,473
| 4.133721
| 0.284884
| 0.045007
| 0.063291
| 0.061885
| 0.067511
| 0
| 0
| 0
| 0
| 0
| 0
| 0.011173
| 0.392396
| 1,473
| 44
| 82
| 33.477273
| 0.78324
| 0.006789
| 0
| 0.157895
| 0
| 0
| 0.012312
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.052632
| false
| 0
| 0.026316
| 0
| 0.184211
| 0.026316
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
571f77622a48c2fb03cc44698429e534d7932593
| 7,166
|
py
|
Python
|
calories.py
|
davidsvaughn/har-pytorch
|
334733a1e870637c9077d16fc15e0b1954a6dfc5
|
[
"MIT"
] | 5
|
2020-09-17T12:17:13.000Z
|
2022-02-28T08:07:49.000Z
|
calories.py
|
davidsvaughn/har-pytorch
|
334733a1e870637c9077d16fc15e0b1954a6dfc5
|
[
"MIT"
] | null | null | null |
calories.py
|
davidsvaughn/har-pytorch
|
334733a1e870637c9077d16fc15e0b1954a6dfc5
|
[
"MIT"
] | null | null | null |
import numpy as np
import pandas as pd
import json
from datetime import datetime
import psycopg2
import functools
import requests
##############################################################
## https://www.exrx.net/Calculators/WalkRunMETs
## https://www.cdc.gov/growthcharts/clinical_charts.htm
## https://help.fitbit.com/articles/en_US/Help_article/1141
##############################################################
URL = 'https://f73lzrw31i.execute-api.us-west-2.amazonaws.com/default/demo_data_server'
HEADER = {'x-api-key': 'XXXXXX'}
class adict(dict):
def __init__(self, *av, **kav):
dict.__init__(self, *av, **kav)
self.__dict__ = self
def tofloat(x):
try:
return float(x.strip())
except:
return None
@functools.lru_cache(maxsize=250)
def request_demo_data(pid):
payload = {'pid': pid}
r = requests.post(URL, headers=HEADER, data=json.dumps(payload))
return adict((k.strip("' "), tofloat(v)) for k,v in (item.split(':') for item in r.text[2:-2].split(',')))
#############################################################################################
#############################################################################################
revibe = adict()
revibe.DBNAME = 'revibe'
revibe.HOST = 'prd.c5fw7irdcxik.us-west-2.rds.amazonaws.com'
#revibe.PORT = '5432'
revibe.USER = 'dave'
revibe.PASS = 'tnoiSLoHjEBZE6JKsFgY'
revibe.SSLMODE = 'require'
CONN = None
def get_conn_string(creds):
conn_str = 'host='+ creds.HOST \
+' dbname='+ creds.DBNAME +' user=' + creds.USER \
+' password='+ creds.PASS \
+ (' sslmode='+ creds.SSLMODE if 'SSLMODE' in creds else '') \
+ (' port='+ creds.PORT if 'PORT' in creds else '')
return conn_str
def get_conn(creds):
conn_str = get_conn_string(creds)
return psycopg2.connect(conn_str)
def run_sql(sql, verbose=False):
global CONN
if CONN is None:
CONN = get_conn(revibe)
if verbose: print(sql)
with CONN:
data = pd.read_sql(sql, CONN)
if verbose: print(data.shape)
return (data)
def get_pid_data(pid):
table = 'private.person_demographic_view'
sql_command = "SELECT * FROM {} WHERE (person_id={});".format(table, pid)
df = run_sql(sql_command)
if df.size==0:
raise ValueError('SQL returned no records:\n\t{}'.format(sql_command))
data = adict()
bday = df.birthday.values[0]
sex = df.sex_id.values[0]
grade = df.grade.values[0]
ht = df.height.values[0]
wt = df.weight.values[0]
wrist = df.wrist_id.values[0]
data.pid = pid
data.age = None
if bday is not None:
data.bday = str(bday)
bday = pd.Timestamp(str(bday)).to_pydatetime()
data.age = np.round((datetime.now()-bday).total_seconds() / (60*60*2*365), 2) ## in months
data.sex = None if sex==0 or sex>2 else sex
data.grade = None if grade==0 else grade
data.ht = None if ht==0 else ht
data.wt = None if wt==0 else wt
data.wrist = None if wrist==0 else wrist
return data
## revised Harris-Benedict BMR equations...
def bmr_hb(dd, sex=None):
try:
sex = dd.sex if sex is None else sex
if sex==1:
return 6.078*dd.wt + 12.192*dd.ht - 0.473*dd.age + 88.4
if sex==2:
return 4.196*dd.wt + 7.874*dd.ht - 0.36*dd.age + 447.6
return None
except ex:
return None
## basal metabolic rate (kCals per day)
def BMR(dd):
try:
if dd.sex is None:
bmr = (bmr_hb(dd,1) + bmr_hb(dd,2))/2
else:
bmr = bmr_hb(dd)
return int(round(bmr))
except ex:
return None
## find index j into y that minimizes abs(x-y[j])
def xminy(x, y):
return abs(x-y).argmin(axis=-1)
class GrowthChart(object):
## columns: age ht_boy ht_girl wt_boy wt_girl
def __init__(self, fn='growth.tsv'):#, path=None):
# if path is None: path = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
# fn = os.path.join(path, fn)
df = pd.read_csv(fn, sep='\t')
self.G = df.values
self.S = np.array([[0.415, 0.413], [0.675, 0.57]])
def fill_data(self, d):
if d.age is None:
if d.ht is None or d.wt is None:
raise ValueError('Either birthday, or both height and weight, must be non-null')
else:
row = xminy(d.age, self.G[:,0])
cols = np.array([d.sex] if d.sex is not None else [1, 2])
if d.ht is None:
d.ht = self.G[row, cols].mean()
if d.wt is None:
d.wt = self.G[row, cols+2].mean()
d.ws = np.round(d.ht * self.S[0, cols-1].mean(), 2) ## walk stride
d.rs = np.round(d.ht * self.S[1, cols-1].mean(), 2) ## run stride
#d.bmr = BMR(d) ## basal metabolic rate (kCals per day)
GC = None
@functools.lru_cache(maxsize=250)
def get_demo_data(pid):
data = get_pid_data(pid)
global GC
if GC is None:
GC = GrowthChart()
GC.fill_data(data)
return data
def fixnum(x, dtype=float):
if x is None: return None
x = dtype(x)
if x==0: return None
return x
def validate_demo_data(data):
data.ht = fixnum(data.ht)
data.wt = fixnum(data.wt)
data.sex = fixnum(data.sex, int)
if data.sex is not None and data.sex>2:
data.sex = None
data.age = None
if data.bday is None:
if data.ht is None or data.wt is None:
raise ValueError('Either birthday, or both height and weight, must be non-null')
else:
bday = pd.Timestamp(str(data.bday)).to_pydatetime()
data.age = np.round((datetime.now()-bday).total_seconds() / (60*60*2*365), 2) ## in months
# data.bday = data.bday.strftime('%Y-%m-%d')
@functools.lru_cache(maxsize=250)
def make_demo_data(bday=None, ht=None, wt=None, sex=None):
data = adict()
data.bday = bday or None
data.ht = ht or None
data.wt = wt or None
data.sex = sex or None
validate_demo_data(data)
#########
global GC
if GC is None:
GC = GrowthChart()
GC.fill_data(data)
return data
## s : speed in mph... sec by second vector of speeds....
## w : weight in lbs
## mode : 2=='walk', 3=='run'
## returns : calories summed across all seconds
def calsum(s, w, mode=2):
su, wu = 26.8, 2.2
s = s*su
w = w/wu
if mode==3:## run mode
vo = 0.2*s
else: ## walk mode == 2
fwvo = 21.11 - 0.3593*s + 0.003*s*s - 3.5
wvo = 0.1*s
d = 30
a = np.clip((s-(100-d))/(2*d), 0, 1)
vo = wvo*(1.-a) + fwvo*a
#############################
return np.sum(vo*w) / 12000.0
###################################
if __name__ == "__main__":
pid = 135 ## 135,"1974-05-28",1,0,74,196,1
pid = 169 ## 169,"1980-12-01",1,12,72,170,2
pid = 18947 ## 18947,"2010-08-28",0,0,0,0,0
pid = 10885 ##
# dd = request_demo_data(pid)
# print(dd)
# dd = get_demo_data(pid)
# print(dd)
#############
dd = make_demo_data(bday='2010-08-28', ht='54.035', wt='69.69', sex='3')
# dd = make_demo_data(ht='70', wt='120', sex='2')
print(dd)
| 30.887931
| 110
| 0.558889
| 1,099
| 7,166
| 3.561419
| 0.272975
| 0.022994
| 0.011242
| 0.018396
| 0.169647
| 0.164027
| 0.123659
| 0.106285
| 0.106285
| 0.106285
| 0
| 0.048942
| 0.241557
| 7,166
| 232
| 111
| 30.887931
| 0.671205
| 0.144711
| 0
| 0.202312
| 0
| 0.00578
| 0.087707
| 0.013209
| 0
| 0
| 0
| 0
| 0
| 1
| 0.098266
| false
| 0.011561
| 0.040462
| 0.00578
| 0.254335
| 0.017341
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5721e5bf810d647e593fd1d82e6a86cb2fa7e570
| 14,744
|
py
|
Python
|
alphad3m/alphad3m/metalearning/grammar_builder.py
|
VIDA-NYU/alphad3m
|
db40193a448300d87442c451f9da17fa5cb845fd
|
[
"Apache-2.0"
] | null | null | null |
alphad3m/alphad3m/metalearning/grammar_builder.py
|
VIDA-NYU/alphad3m
|
db40193a448300d87442c451f9da17fa5cb845fd
|
[
"Apache-2.0"
] | null | null | null |
alphad3m/alphad3m/metalearning/grammar_builder.py
|
VIDA-NYU/alphad3m
|
db40193a448300d87442c451f9da17fa5cb845fd
|
[
"Apache-2.0"
] | null | null | null |
import logging
import numpy as np
from scipy import stats
from collections import OrderedDict
from alphad3m.metalearning.resource_builder import load_metalearningdb
from alphad3m.metalearning.dataset_similarity import get_similar_datasets
from alphad3m.primitive_loader import load_primitives_by_name, load_primitives_by_id
logger = logging.getLogger(__name__)
def load_related_pipelines(dataset_path, target_column, task_keywords):
available_primitives = load_primitives_by_id()
all_pipelines = load_metalearningdb()
similar_datasets = get_similar_datasets('dataprofiles', dataset_path, target_column, task_keywords)
task_pipelines = []
for similar_dataset in similar_datasets.keys():
if similar_dataset not in all_pipelines['pipeline_performances']:
continue
for pipeline_id, pipeline_performances in all_pipelines['pipeline_performances'][similar_dataset].items():
primitive_ids = all_pipelines['pipeline_structure'][pipeline_id]
if is_available_primitive(primitive_ids, available_primitives):
for index in range(len(pipeline_performances['score'])):
primitives = [available_primitives[p] for p in primitive_ids] # Use the current names of primitives
score = pipeline_performances['score'][index]
metric = pipeline_performances['metric'][index]
task_pipelines.append({'pipeline': primitives, 'score': score, 'metric': metric, 'dataset': similar_dataset,
'pipeline_repr': '_'.join(primitives)})
logger.info('Found %d related pipelines', len(task_pipelines))
return task_pipelines
def create_metalearningdb_grammar(task_name, dataset_path, target_column, task_keywords):
pipelines = load_related_pipelines(dataset_path, target_column, task_keywords)
patterns, primitives = extract_patterns(pipelines)
merged_patterns, empty_elements = merge_patterns(patterns)
grammar = format_grammar(task_name, merged_patterns, empty_elements)
return grammar, primitives
def format_grammar(task_name, patterns, empty_elements):
if len(patterns) == 0:
logger.info('Empty patterns, no grammar have been generated')
return None
grammar = 'S -> %s\n' % task_name
grammar += task_name + ' -> ' + ' | '.join([' '.join(p) for p in patterns])
for element in sorted(set([e for sublist in patterns for e in sublist])): # Sort to have a deterministic grammar
production_rule = element + " -> 'primitive_terminal'"
if element in empty_elements:
production_rule += " | 'E'"
grammar += '\n' + production_rule
logger.info('Grammar obtained:\n%s', grammar)
return grammar
def extract_patterns(pipelines, max_nro_patterns=15, min_frequency=3, adtm_threshold=0.5, mean_score_threshold=0.5, ratio_datasets=0.2):
available_primitives = load_primitives_by_name()
pipelines = calculate_adtm(pipelines)
patterns = {}
for pipeline_data in pipelines:
if pipeline_data['adtm'] > adtm_threshold:
# Skip pipelines with average distance to the minimum higher than the threshold
continue
primitive_types = [available_primitives[p]['type'] for p in pipeline_data['pipeline']]
pattern_id = ' '.join(primitive_types)
if pattern_id not in patterns:
patterns[pattern_id] = {'structure': primitive_types, 'primitives': set(), 'datasets': set(), 'pipelines': [], 'scores': [], 'adtms': [], 'frequency': 0}
patterns[pattern_id]['primitives'].update(pipeline_data['pipeline'])
patterns[pattern_id]['datasets'].add(pipeline_data['dataset'])
patterns[pattern_id]['pipelines'].append(pipeline_data['pipeline'])
patterns[pattern_id]['scores'].append(pipeline_data['score'])
patterns[pattern_id]['adtms'].append(pipeline_data['adtm'])
patterns[pattern_id]['frequency'] += 1
logger.info('Found %d different patterns, after creating the portfolio', len(patterns))
# TODO: Group these removing conditions into a single loop
# Remove patterns with fewer elements than the minimum frequency
patterns = {k: v for k, v in patterns.items() if v['frequency'] >= min_frequency}
logger.info('Found %d different patterns, after removing uncommon patterns', len(patterns))
# Remove patterns with undesirable primitives (AlphaD3M doesn't have support to handle some of these primitives)
blacklist_primitives = {'d3m.primitives.data_transformation.dataframe_to_ndarray.Common',
'd3m.primitives.data_transformation.list_to_dataframe.DistilListEncoder',
'd3m.primitives.data_transformation.ndarray_to_dataframe.Common',
'd3m.primitives.data_transformation.horizontal_concat.DSBOX',
'd3m.primitives.data_transformation.horizontal_concat.DataFrameCommon',
'd3m.primitives.data_transformation.multi_horizontal_concat.Common',
'd3m.primitives.data_transformation.conditioner.Conditioner',
'd3m.primitives.data_transformation.remove_semantic_types.Common',
'd3m.primitives.data_transformation.replace_semantic_types.Common',
'd3m.primitives.data_transformation.remove_columns.Common',
'd3m.primitives.operator.dataset_map.DataFrameCommon',
'd3m.primitives.data_transformation.i_vector_extractor.IVectorExtractor'}
patterns = {k: v for k, v in patterns.items() if not blacklist_primitives & v['primitives']}
logger.info('Found %d different patterns, after blacklisting primitives', len(patterns))
unique_datasets = set()
for pattern_id in patterns:
scores = patterns[pattern_id]['scores']
adtms = patterns[pattern_id]['adtms']
patterns[pattern_id]['mean_score'] = np.mean(scores)
patterns[pattern_id]['mean_adtm'] = np.mean(adtms)
unique_datasets.update(patterns[pattern_id]['datasets'])
# Remove patterns with low performances
patterns = {k: v for k, v in patterns.items() if v['mean_score'] >= mean_score_threshold}
logger.info('Found %d different patterns, after removing low-performance patterns', len(patterns))
# Remove patterns with low variability
patterns = {k: v for k, v in patterns.items() if len(v['datasets']) >= len(unique_datasets) * ratio_datasets}
logger.info('Found %d different patterns, after removing low-variability patterns', len(patterns))
if len(patterns) > max_nro_patterns:
logger.info('Found many patterns, selecting top %d (max_nro_patterns)' % max_nro_patterns)
sorted_patterns = sorted(patterns.items(), key=lambda x: x[1]['mean_score'], reverse=True)
patterns = {k: v for k, v in sorted_patterns[:max_nro_patterns]}
primitive_hierarchy = {}
all_pipelines = []
all_performances = []
all_primitives = []
local_probabilities = {}
for pattern_id, pattern in patterns.items():
for primitive in pattern['primitives']:
primitive_type = available_primitives[primitive]['type']
if primitive_type not in primitive_hierarchy:
primitive_hierarchy[primitive_type] = set()
primitive_hierarchy[primitive_type].add(primitive)
performances = [1 - x for x in pattern['adtms']] # Use adtms as performances because their are scaled
all_pipelines += pattern['pipelines']
all_primitives += pattern['primitives']
all_performances += performances
correlations = calculate_correlations(pattern['primitives'], pattern['pipelines'], performances)
local_probabilities[pattern_id] = {}
for primitive, correlation in correlations.items():
primitive_type = available_primitives[primitive]['type']
if primitive_type not in local_probabilities[pattern_id]:
local_probabilities[pattern_id][primitive_type] = {}
local_probabilities[pattern_id][primitive_type][primitive] = correlation
correlations = calculate_correlations(set(all_primitives), all_pipelines, all_performances)
global_probabilities = {}
for primitive, correlation in correlations.items():
primitive_type = available_primitives[primitive]['type']
if primitive_type not in global_probabilities:
global_probabilities[primitive_type] = {}
global_probabilities[primitive_type][primitive] = correlation
# Make deterministic the order of the patterns and hierarchy
patterns = sorted(patterns.values(), key=lambda x: x['mean_score'], reverse=True)
primitive_hierarchy = OrderedDict({k: sorted(v) for k, v in sorted(primitive_hierarchy.items(), key=lambda x: x[0])})
logger.info('Patterns:\n%s', patterns_repr(patterns))
logger.info('Hierarchy:\n%s', '\n'.join(['%s:\n%s' % (k, ', '.join(v)) for k, v in primitive_hierarchy.items()]))
patterns = [p['structure'] for p in patterns]
primitive_probabilities = {'global': global_probabilities, 'local': local_probabilities, 'types': available_primitives}
primitive_info = {'hierarchy': primitive_hierarchy, 'probabilities': primitive_probabilities}
return patterns, primitive_info
def calculate_correlations(primitives, pipelines, scores, normalize=True):
correlations = {}
for primitive in primitives:
occurrences = [1 if primitive in pipeline else 0 for pipeline in pipelines]
correlation_coefficient, p_value = stats.pointbiserialr(occurrences, scores)
if np.isnan(correlation_coefficient): # Assign a positive correlation (1) to NaN values
correlation_coefficient = 1
if normalize: # Normalize the Pearson values, from [-1, 1] to [0, 1] range
correlation_coefficient = (correlation_coefficient - (-1)) / 2 # xi − min(x) / max(x) − min(x)
correlations[primitive] = round(correlation_coefficient, 4)
return correlations
def calculate_adtm(pipelines):
dataset_performaces = {}
pipeline_performances = {}
for pipeline_data in pipelines:
# Even the same dataset can be run under different metrics. So, use the metric to create the id of the dataset
id_dataset = pipeline_data['dataset'] + '_' + pipeline_data['metric']
if id_dataset not in dataset_performaces:
dataset_performaces[id_dataset] = {'min': float('inf'), 'max': float('-inf')}
performance = pipeline_data['score']
if performance > dataset_performaces[id_dataset]['max']:
dataset_performaces[id_dataset]['max'] = performance
if performance < dataset_performaces[id_dataset]['min']:
dataset_performaces[id_dataset]['min'] = performance
id_pipeline = pipeline_data['pipeline_repr']
if id_pipeline not in pipeline_performances:
pipeline_performances[id_pipeline] = {}
if id_dataset not in pipeline_performances[id_pipeline]:
pipeline_performances[id_pipeline][id_dataset] = pipeline_data['score']
else:
# A pipeline can have different performances for a given dataset, choose the best one
if pipeline_data['score'] > pipeline_performances[id_pipeline][id_dataset]:
pipeline_performances[id_pipeline][id_dataset] = pipeline_data['score']
for pipeline_data in pipelines:
id_pipeline = pipeline_data['pipeline_repr']
id_dataset_pipeline = pipeline_data['dataset'] + '_' + pipeline_data['metric']
dtm = 0
for id_dataset in pipeline_performances[id_pipeline]: # Iterate over the datasets where the pipeline was used
minimum = dataset_performaces[id_dataset]['min']
maximum = dataset_performaces[id_dataset]['max']
if id_dataset_pipeline == id_dataset:
score = pipeline_data['score']
else:
score = pipeline_performances[id_pipeline][id_dataset]
if minimum != maximum:
dtm += (maximum - score) / (maximum - minimum)
adtm = dtm / len(pipeline_performances[id_pipeline])
pipeline_data['adtm'] = adtm
return pipelines
def merge_patterns(grammar_patterns):
patterns = sorted(grammar_patterns, key=lambda x: len(x), reverse=True)
empty_elements = set()
skip_patterns = []
for pattern in patterns:
for element in pattern:
modified_pattern = [e for e in pattern if e != element]
for current_pattern in patterns:
if modified_pattern == current_pattern:
empty_elements.add(element)
skip_patterns.append(modified_pattern)
for skip_pattern in skip_patterns:
if skip_pattern in patterns:
patterns.remove(skip_pattern)
return patterns, empty_elements
def is_available_primitive(pipeline_primitives, available_primitives, verbose=False):
for primitive in pipeline_primitives:
if primitive not in available_primitives:
if verbose:
logger.warning('Primitive %s is not longer available' % primitive)
return False
return True
def patterns_repr(patterns):
patterns_string = []
for pattern in patterns:
pretty_string = ''
pretty_string += 'structure: [%s]' % ', '.join([i for i in pattern['structure']])
pretty_string += ', frequency: %d' % pattern['frequency']
if 'mean_score' in pattern:
pretty_string += ', mean_score: %.3f' % pattern['mean_score']
if 'mean_adtm' in pattern:
pretty_string += ', mean_adtm: %.3f' % pattern['mean_adtm']
patterns_string.append(pretty_string)
return '\n'.join(patterns_string)
def test_dataset(dataset_id, task_name='TASK'):
from os.path import join
import json
dataset_folder_path = join('/Users/rlopez/D3M/datasets/seed_datasets_current/', dataset_id)
dataset_path = join(dataset_folder_path, 'TRAIN/dataset_TRAIN/tables/learningData.csv')
problem_path = join(dataset_folder_path, 'TRAIN/problem_TRAIN/problemDoc.json')
with open(problem_path) as fin:
problem_doc = json.load(fin)
task_keywords = problem_doc['about']['taskKeywords']
target_column = problem_doc['inputs']['data'][0]['targets'][0]['colName']
logger.info('Evaluating dataset %s with task keywords=%s' % (dataset_id, str(task_keywords)))
create_metalearningdb_grammar(task_name, dataset_path, target_column, task_keywords)
if __name__ == '__main__':
test_dataset('185_baseball_MIN_METADATA')
| 48.983389
| 165
| 0.686788
| 1,690
| 14,744
| 5.756213
| 0.16213
| 0.027138
| 0.02097
| 0.035053
| 0.282072
| 0.190687
| 0.122944
| 0.094058
| 0.08933
| 0.056435
| 0
| 0.004397
| 0.213375
| 14,744
| 300
| 166
| 49.146667
| 0.834196
| 0.064433
| 0
| 0.079295
| 0
| 0
| 0.167308
| 0.068302
| 0
| 0
| 0
| 0.003333
| 0
| 1
| 0.044053
| false
| 0
| 0.039648
| 0
| 0.132159
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5723328e5cd271a82c8d25b908bc2b420246795d
| 4,512
|
py
|
Python
|
deap_learning.py
|
fzjcdt/Genetic-CNN
|
6bd53f3f429434557b7fbf1122020259d910f618
|
[
"Apache-2.0"
] | 2
|
2019-10-08T08:27:41.000Z
|
2021-12-02T07:37:27.000Z
|
deap_learning.py
|
fzjcdt/Genetic-CNN
|
6bd53f3f429434557b7fbf1122020259d910f618
|
[
"Apache-2.0"
] | null | null | null |
deap_learning.py
|
fzjcdt/Genetic-CNN
|
6bd53f3f429434557b7fbf1122020259d910f618
|
[
"Apache-2.0"
] | null | null | null |
from deap import base, creator, tools
import random
"""
每个individual是一个list,包含10个元素,需要演化到元素和最小
"""
# ****************************Types********************************
# def create(name, base, **kargs):
# Creates a new class named *name* inheriting from *base*
# A negative weight element corresponds to the minimization of
# the associated objective and positive weight to the maximization.
creator.create("FitnessMin", base.Fitness, weights=(-1.0,))
# an Individual class that is derived from a list with a fitness attribute set
# to the just created fitness
"""
create("Foo", list, bar=dict, spam=1)
This above line is exactly the same as defining in the :mod:`creator`
module something like the following. ::
class Foo(list):
spam = 1
def __init__(self):
self.bar = dict()
"""
creator.create("Individual", list, fitness=creator.FitnessMin)
# ****************************Initialization********************************
IND_SIZE = 10
toolbox = base.Toolbox()
# def register(self, alias, function, *args, **kargs):
# Register a *function* in the toolbox under the name *alias*.
# *args当function被调用时自动作为function相应参数
"""
>>> def func(a, b, c=3):
... print(a, b, c)
...
>>> tools = Toolbox()
>>> tools.register("myFunc", func, 2, c=4)
>>> tools.myFunc(3)
2 3 4
"""
toolbox.register("attribute", random.random)
# def initRepeat(container, func, n):
# Call the function *container* with a generator function corresponding
# to the calling *n* times the function *func*.
"""
>>> initRepeat(list, random.random, 2) # doctest: +ELLIPSIS,
... # doctest: +NORMALIZE_WHITESPACE
[0.6394..., 0.0250...]
"""
# 将IND_SIZE个 random.random()加入到Individual里,即初始化Individual,每个Individual list里共IND_SIZE个初始值
toolbox.register("individual", tools.initRepeat, creator.Individual,
toolbox.attribute, n=IND_SIZE)
# 将individual加入到population里,n由初始化时指定
toolbox.register("population", tools.initRepeat, list, toolbox.individual)
# ****************************Operators********************************
def evaluate(individual):
# 评估函数为individual里IND_SIZE个值的和
# 这里,很重要,返回的是(a, ),以为weight是(fitness, )
# 也可以返回(sum(individual), )
return sum(individual),
# def cxTwoPoint(ind1, ind2):
# Executes a two-point crossover on the input :term:`sequence` individuals.
toolbox.register("mate", tools.cxTwoPoint)
# gaussian mutation with mu and sigma
# The *indpb* argument is the probability of each attribute to be mutated.
# 元素增减一个小的值
toolbox.register("mutate", tools.mutGaussian, mu=0, sigma=1, indpb=0.1)
# def selTournament(individuals, k, tournsize, fit_attr="fitness"):
# Select the best individual among *tournsize* randomly chosen
# individuals, *k* times. The list returned contains
# references to the input *individuals*.
toolbox.register("select", tools.selTournament, tournsize=3)
toolbox.register("evaluate", evaluate)
def main():
pop = toolbox.population(n=50)
CXPB, MUTPB, NGEN = 0.5, 0.2, 40
# map(func, *iterables) --> map object
# Make an iterator that computes the function using arguments from
# each of the iterables.
# map 以参数序列中的每一个元素调用 function 函数,返回包含每次 function 函数返回值的新列表。
fitnesses = map(toolbox.evaluate, pop)
for ind, fit in zip(pop, fitnesses):
ind.fitness.values = fit
# ind.fitness = fit
for g in range(NGEN):
# 每次拿三个,选其中最好的,一直选到len(pop)个
offspring = toolbox.select(pop, len(pop))
# 什么的offspring还指向是pop里的同一个对象,需要克隆一下
# 担心的是一个对象/individual被选到了两次或多次
offspring = list(map(toolbox.clone, offspring))
for child1, child2 in zip(offspring[::2], offspring[1::2]):
if random.random() < CXPB:
toolbox.mate(child1, child2)
del child1.fitness.values
del child2.fitness.values
for mutant in offspring:
if random.random() < MUTPB:
toolbox.mutate(mutant)
del mutant.fitness.values
# 上面删除了fitness,下面找出删除的individual,只需要重新评估这些就行,不用重新评估所有
invalid_ind = [ind for ind in offspring if not ind.fitness.valid]
fitnesses = map(toolbox.evaluate, invalid_ind)
for ind, fit in zip(invalid_ind, fitnesses):
ind.fitness.values = fit
pop[:] = offspring
return pop
for ind in main():
print(evaluate(ind))
| 33.176471
| 89
| 0.629876
| 516
| 4,512
| 5.48062
| 0.408915
| 0.037129
| 0.002122
| 0.019095
| 0.029703
| 0
| 0
| 0
| 0
| 0
| 0
| 0.014245
| 0.222074
| 4,512
| 135
| 90
| 33.422222
| 0.791453
| 0.407801
| 0
| 0.047619
| 0
| 0
| 0.038747
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.047619
| false
| 0
| 0.047619
| 0.02381
| 0.142857
| 0.02381
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
57262781980201cf7735ba35e8965dd0cb76ade8
| 1,674
|
py
|
Python
|
pacman/utils/replay_buffer.py
|
i-rme/openai-pacman
|
4a80ed023ed2bdf031990147acbbeea904b9fc8e
|
[
"MIT"
] | 2
|
2020-01-26T23:06:57.000Z
|
2021-04-12T08:36:55.000Z
|
pacman/utils/replay_buffer.py
|
i-rme/openai-pacman
|
4a80ed023ed2bdf031990147acbbeea904b9fc8e
|
[
"MIT"
] | null | null | null |
pacman/utils/replay_buffer.py
|
i-rme/openai-pacman
|
4a80ed023ed2bdf031990147acbbeea904b9fc8e
|
[
"MIT"
] | null | null | null |
from collections import deque
import random
import numpy as np
class ReplayBuffer:
'''
construct a buffer object that stores the past
moves and samples a set of subsamples
'''
def __init__(self, buffer_size):
self.buffer_size = buffer_size
self.count = 0
self.buffer = deque()
def add(self, s, a, r, d, s2):
'''
add an experience to the buffer
s: current state,
a: action, r: reward,
d: done, s2: next state
'''
experience = (s, a, r, d, s2)
if self.count < self.buffer_size:
self.buffer.append(experience)
self.count += 1
else:
self.buffer.popleft()
self.buffer.append(experience)
def size(self):
return self.count
def clear(self):
self.buffer.clear()
self.count = 0
def sample(self, batch_size):
'''
sample a total of elements equal to batch_size from buffer
if buffer contains enough elements;
otherwise, return all elements
list1 = [1, 2, 3, 4, 5, 6]
random.sample(list1, 3)
--
OUTPUT: [3, 1, 2]
'''
batch = []
if self.count < batch_size:
batch = random.sample(self.buffer, self.count)
else:
batch = random.sample(self.buffer, batch_size)
# map each experience in batch in batches of
# [array([s1, ..., sN]), ..., array([s21, ..., s2N])]
s_batch, a_batch, r_batch, d_batch, s2_batch = list(map(np.array, list(zip(*batch))))
return s_batch, a_batch, r_batch, d_batch, s2_batch
| 27.442623
| 93
| 0.548387
| 214
| 1,674
| 4.186916
| 0.364486
| 0.111607
| 0.046875
| 0.040179
| 0.196429
| 0.069196
| 0.069196
| 0.069196
| 0.069196
| 0.069196
| 0
| 0.021858
| 0.344086
| 1,674
| 61
| 94
| 27.442623
| 0.794171
| 0.299881
| 0
| 0.206897
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.172414
| false
| 0
| 0.103448
| 0.034483
| 0.37931
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5726ab8f943f02dfa0eee1936447786383a1ce72
| 9,126
|
py
|
Python
|
tests/entities/test_creature.py
|
Flame753/ARPG
|
f931d3437a83995b43bdddc68cb5ba89922dc259
|
[
"MIT"
] | null | null | null |
tests/entities/test_creature.py
|
Flame753/ARPG
|
f931d3437a83995b43bdddc68cb5ba89922dc259
|
[
"MIT"
] | null | null | null |
tests/entities/test_creature.py
|
Flame753/ARPG
|
f931d3437a83995b43bdddc68cb5ba89922dc259
|
[
"MIT"
] | null | null | null |
# Standard library imports
from pprint import pprint
import unittest
# Local application imports
from context import entities
from entities import creatures
from entities import items
from entities import currency
from entities import slots
class TestCreature(unittest.TestCase):
def setUp(self):
self.dagger = items.Dagger()
self.copper_coin = currency.CopperCoin()
self.bread = items.Bread()
def equipment_slot_helper(self, creature_obj, answer):
list_of_slots = [slots.Head, slots.Body, slots.Legs, slots.Boots, slots.OneHanded, slots.TwoHanded]
for slot in list_of_slots:
creature_obj.equippable_slots.slots.get(slot)._ensure_inventory()
self.assertDictEqual(creature_obj.equippable_slots.slots.get(slot).inventory, answer)
def test_class_initializer(self):
creature_A = creatures.Creature()
creature_B = creatures.Creature()
self.assertFalse(creature_A is creature_B)
self.assertFalse(creature_A.equippable_slots is creature_B.equippable_slots)
self.assertEqual(creature_A.inventory, creature_B.inventory)
self.assertFalse(creature_A.inventory is creature_B.inventory)
self.assertEqual(creature_A.coin_pouch, creature_B.coin_pouch)
self.assertFalse(creature_A.coin_pouch is creature_B.coin_pouch)
def test_add_item(self):
creature = creatures.Creature()
creature.add_item(self.dagger)
self.assertDictEqual(creature.inventory.inventory,
{self.dagger: {'amount': 1}})
creature.add_item(self.copper_coin, 2)
self.assertDictEqual(creature.coin_pouch.inventory,
{self.copper_coin: {'amount': 2}})
creature.add_item(self.bread, 6)
self.assertDictEqual(creature.inventory.inventory,
{self.bread: {'amount': 6}, self.dagger: {'amount': 1}})
creature.add_item(self.dagger, 3)
self.assertDictEqual(creature.inventory.inventory,
{self.bread: {'amount': 6}, self.dagger: {'amount': 4}})
def test_remove_item(self):
creature = creatures.Creature()
# Testing when removing an item from a empty dict
result = creature.remove_item(self.dagger)
self.assertFalse(result)
creature.add_item(self.dagger)
creature.remove_item(self.dagger)
self.assertDictEqual(creature.inventory.inventory, {})
creature.add_item(self.copper_coin, 8)
creature.remove_item(self.copper_coin, 3)
self.assertDictEqual(creature.coin_pouch.inventory,
{self.copper_coin: {'amount': 5}})
def test_equip(self):
creature = creatures.Creature()
# Equipping dagger that is not in creature
result = creature.equip(self.dagger)
self.assertFalse(result)
# Verifying that there is no inventory was added
answer = {}
self.equipment_slot_helper(creature, answer)
result = creature.inventory.inventory
self.assertDictEqual(result, answer)
self.assertFalse(hasattr(creature.coin_pouch, 'inventory'))
# Equipping non equipable item
creature.add_item(self.copper_coin)
result = creature.equip(self.copper_coin)
self.assertFalse(result)
# Verifying that there is no inventory was added
answer = {self.copper_coin: {'amount': 1}}
result = creature.coin_pouch.inventory
self.assertDictEqual(result, answer)
answer = {}
self.equipment_slot_helper(creature, answer)
result = creature.inventory.inventory
self.assertDictEqual(result, answer)
# Equipping a dagger
creature.add_item(self.dagger)
result = creature.equip(self.dagger)
self.assertTrue(result)
answer = {self.dagger: {'amount': 1}}
result = creature.inventory.inventory
self.assertDictEqual(result, answer)
answer = {self.dagger: {'amount': 1}}
result = creature.equippable_slots.slots.get(slots.OneHanded).inventory
self.assertDictEqual(result, answer)
# equipping a non equipable item
creature.add_item(self.bread)
result = creature.equip(self.bread)
self.assertFalse(result)
def test_unequip(self):
creature = creatures.Creature()
# Unequipping a item that doesn't exist
result = creature.unequip(self.dagger)
self.assertFalse(result)
# Verifying that there is no inventory was added
answer = {}
self.equipment_slot_helper(creature, answer)
self.assertFalse(hasattr(creature.inventory, 'inventory'))
creature.add_item(self.copper_coin)
result = creature.unequip(self.copper_coin)
self.assertFalse(result)
# Verifying that there is no inventory was added
answer = {}
self.equipment_slot_helper(creature, answer)
self.assertFalse(hasattr(creature.inventory, 'inventory'))
answer = {self.copper_coin: {'amount': 1}}
result = creature.coin_pouch.inventory
self.assertDictEqual(result, answer)
# Preparing for next test case
creature.remove_item(self.copper_coin)
# Actually tesing the removal of a item
creature.add_item(self.dagger)
creature.equip(self.dagger)
result = creature.unequip(self.dagger)
self.assertTrue(result)
answer = {}
self.equipment_slot_helper(creature, answer)
result = creature.coin_pouch.inventory
self.assertDictEqual(result, answer)
answer = {self.dagger: {'amount': 1}}
result = creature.inventory.inventory
self.assertDictEqual(result, answer)
def test_calculate_item_worth(self):
creature = creatures.Creature()
copper_amount = 10
bread_amount = 5
dagger_amount = 5
creature.add_item(self.copper_coin, copper_amount)
creature.add_item(self.bread, bread_amount)
creature.add_item(self.dagger, dagger_amount)
creature.equip(self.dagger)
result = creature.calculate_item_worth(self.copper_coin)
self.assertEqual(result, copper_amount*self.copper_coin.worth)
result = creature.calculate_item_worth(self.dagger)
self.assertEqual(result, dagger_amount*self.dagger.worth)
result = creature.calculate_item_worth(self.bread)
self.assertEqual(result, bread_amount*self.bread.worth)
def test_calculate_total_worth(self):
creature = creatures.Creature()
copper_amount = 10
bread_amount = 5
dagger_amount = 5
creature.add_item(self.copper_coin, copper_amount)
creature.add_item(self.bread, bread_amount)
creature.add_item(self.dagger, dagger_amount)
creature.equip(self.dagger)
result = creature.calculate_total_worth()
answer = (self.copper_coin.worth * copper_amount) + \
(self.dagger.worth * dagger_amount) + \
(self.bread.worth * bread_amount)
self.assertEqual(result, answer)
def test_type_error(self):
creature = creatures.Creature()
test_num = 2
test_string = 'Test'
test_list = [7]
test_dict = {"s":2}
test_tuple = (2, "2")
test_case = [test_num, test_string, test_list, test_dict, test_tuple, [], {}, ()]
for case in test_case:
func = creature.add_item
self.assertRaises(TypeError, func, case)
self.assertRaises(TypeError, func, (self.dagger, case))
func = creature.remove_item
self.assertRaises(TypeError, func, case)
self.assertRaises(TypeError, func, (self.dagger, case))
func = creature.equip
self.assertRaises(TypeError, func, case)
func = creature.unequip
self.assertRaises(TypeError, func, case)
func = creature.calculate_item_worth
self.assertRaises(TypeError, func, case)
func = creature.calculate_total_worth
self.assertRaises(TypeError, func, case)
def test_value_error(self):
creature = creatures.Creature()
test_case = -32
func = creature.add_item
self.assertRaises(TypeError, func, (self.dagger, test_case))
func = creature.remove_item
self.assertRaises(TypeError, func, (self.dagger, test_case))
def test_EquippedItemRemovealError(self):
creature = creatures.Creature()
# Tesing after removing item from inventory item should not exist in equipment slot
creature.add_item(self.dagger)
creature.equip(self.dagger)
self.assertRaises(creatures.EquippedItemRemovealError, creature.remove_item, self.dagger)
def suite():
suite = unittest.TestSuite()
suite.addTest(TestCreature('test_addItem'))
return suite
if __name__ == '__main__':
unittest.main()
# runner = unittest.TextTestRunner()
# runner.run(suite())
| 36.504
| 107
| 0.655928
| 1,013
| 9,126
| 5.743337
| 0.126357
| 0.056721
| 0.039704
| 0.065315
| 0.638192
| 0.599691
| 0.538329
| 0.474046
| 0.415091
| 0.355105
| 0
| 0.004676
| 0.250055
| 9,126
| 250
| 108
| 36.504
| 0.845412
| 0.071225
| 0
| 0.527778
| 0
| 0
| 0.014777
| 0
| 0
| 0
| 0
| 0
| 0.261111
| 1
| 0.072222
| false
| 0
| 0.038889
| 0
| 0.122222
| 0.005556
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
572995aff10ad23755f80a0359fa3ca259ee111e
| 199
|
py
|
Python
|
testfiles/benchmarks/send_multiple.py
|
marcolamartina/PASTEL
|
8e1e0fd086a26b7c50f15fe87ffe5dbd007cf925
|
[
"MIT"
] | null | null | null |
testfiles/benchmarks/send_multiple.py
|
marcolamartina/PASTEL
|
8e1e0fd086a26b7c50f15fe87ffe5dbd007cf925
|
[
"MIT"
] | null | null | null |
testfiles/benchmarks/send_multiple.py
|
marcolamartina/PASTEL
|
8e1e0fd086a26b7c50f15fe87ffe5dbd007cf925
|
[
"MIT"
] | 1
|
2020-07-08T11:23:22.000Z
|
2020-07-08T11:23:22.000Z
|
import binascii
from pwn import *
def send(r,num):
r.sendline(str(num))
port = 1234
server = '127.0.0.1'
sleep(1)
for i in range(10000):
r = remote(server, port)
send(r,i)
r.close()
| 15.307692
| 28
| 0.623116
| 36
| 199
| 3.444444
| 0.666667
| 0.080645
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.102564
| 0.21608
| 199
| 12
| 29
| 16.583333
| 0.692308
| 0
| 0
| 0
| 0
| 0
| 0.045455
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.090909
| false
| 0
| 0.181818
| 0
| 0.272727
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
573671e14e06512a6056d7ef96ce655d220e4a19
| 2,857
|
py
|
Python
|
Run_exphydro_distributed_type1_pso.py
|
sopanpatil/exp-hydro
|
7295dddc4df1028f669a223e1b631a4a91669515
|
[
"MIT"
] | 11
|
2016-11-25T13:05:26.000Z
|
2022-03-25T03:24:16.000Z
|
Run_exphydro_distributed_type1_pso.py
|
sopanpatil/exp-hydro
|
7295dddc4df1028f669a223e1b631a4a91669515
|
[
"MIT"
] | null | null | null |
Run_exphydro_distributed_type1_pso.py
|
sopanpatil/exp-hydro
|
7295dddc4df1028f669a223e1b631a4a91669515
|
[
"MIT"
] | 6
|
2017-03-28T12:06:00.000Z
|
2021-09-16T17:50:34.000Z
|
#!/usr/bin/env python
# Programmer(s): Sopan Patil.
""" MAIN PROGRAM FILE
Run this file to optimise the model parameters of the spatially distributed
version of EXP-HYDRO model using Particle Swarm Optimisation (PSO) algorithm.
Type 1 Model:
- This type of distributed model is pixel based (i.e., all sub-components
have the same drainage area).
- All pixels receive the same meteorological inputs.
- Channel routing is ignored and it is assumed that streamflow generated from
each pixel reaches the catchment outlet on same day.
"""
import numpy
import os
import time
import matplotlib.pyplot as plt
from exphydro.distributed import ExphydroDistrParameters
from exphydro.distributed.type1 import ExphydroDistrModel
from hydroutils import Calibration, ObjectiveFunction
start_time = time.time()
######################################################################
# SET WORKING DIRECTORY
# Getting current directory, i.e., directory containing this file
dir1 = os.path.dirname(os.path.abspath('__file__'))
# Setting to current directory
os.chdir(dir1)
######################################################################
# MAIN PROGRAM
# Load meteorological and observed flow data
P = numpy.genfromtxt('SampleData/P_test.txt') # Observed rainfall (mm/day)
T = numpy.genfromtxt('SampleData/T_test.txt') # Observed air temperature (deg C)
PET = numpy.genfromtxt('SampleData/PET_test.txt') # Potential evapotranspiration (mm/day)
Qobs = numpy.genfromtxt('SampleData/Q_test.txt') # Observed streamflow (mm/day)
# Specify the number of pixels in the catchment
npixels = 5
# Specify the no. of parameter sets (particles) in a PSO swarm
npart = 10
# Generate 'npart' initial EXP-HYDRO model parameters
params = [ExphydroDistrParameters(npixels) for j in range(npart)]
# Initialise the model by loading its climate inputs
model = ExphydroDistrModel(P, PET, T, npixels)
# Specify the start and end day numbers of the calibration period.
# This is done separately for the observed and simulated data
# because they might not be of the same length in some cases.
calperiods_obs = [365, 2557]
calperiods_sim = [365, 2557]
# Calibrate the model to identify optimal parameter set
paramsmax = Calibration.pso_maximise(model, params, Qobs, ObjectiveFunction.klinggupta, calperiods_obs, calperiods_sim)
print ('Calibration run KGE value = ', paramsmax.objval)
# Run the optimised model for validation period
Qsim = model.simulate(paramsmax)
kge = ObjectiveFunction.klinggupta(Qobs[calperiods_obs[1]:], Qsim[calperiods_sim[1]:])
print ('Independent run KGE value = ', kge)
print("Total runtime: %s seconds" % (time.time() - start_time))
# Plot the observed and simulated hydrographs
plt.plot(Qobs[calperiods_obs[0]:], 'b-')
plt.plot(Qsim[calperiods_sim[0]:], 'r-')
plt.show()
######################################################################
| 35.7125
| 119
| 0.716136
| 374
| 2,857
| 5.419786
| 0.47861
| 0.0296
| 0.049334
| 0.022694
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.010036
| 0.128106
| 2,857
| 79
| 120
| 36.164557
| 0.803693
| 0.476024
| 0
| 0
| 0
| 0
| 0.142176
| 0.068308
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.241379
| 0
| 0.241379
| 0.103448
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5738d01ad1ed866e8e47c9a1f5dadbf2cfce3611
| 11,104
|
py
|
Python
|
multi_input_multi_output/train.py
|
alt113/CS591-Multimodal-Spring2021
|
f28bade729818aa51fd131e86f1ba2271cca8947
|
[
"MIT"
] | null | null | null |
multi_input_multi_output/train.py
|
alt113/CS591-Multimodal-Spring2021
|
f28bade729818aa51fd131e86f1ba2271cca8947
|
[
"MIT"
] | 1
|
2021-05-03T18:59:43.000Z
|
2021-05-03T19:04:19.000Z
|
multi_input_multi_output/train.py
|
alt113/CS591-Multimodal-Spring2021
|
f28bade729818aa51fd131e86f1ba2271cca8947
|
[
"MIT"
] | null | null | null |
import os
from multi_input_multi_output.models import MultiNet
from shared_weights.helpers import config, utils
from shared_weights.helpers.siamese_network import create_encoder
from data.data_tf import fat_dataset
import tensorflow as tf
from tensorflow import keras
# ----------------------
def flatten_model(model_nested):
layers_flat = []
for layer in model_nested.layers:
try:
layers_flat.extend(layer.layers)
except AttributeError:
layers_flat.append(layer)
model_flat = keras.models.Sequential(layers_flat)
return model_flat
""" Data augmentation"""
augmentation_input = keras.layers.Input(shape=config.IMG_SHAPE)
data_augmentation = keras.layers.experimental.preprocessing.RandomTranslation(
height_factor=(-0.2, 0.2),
width_factor=(-0.2, 0.2),
fill_mode="constant"
)(augmentation_input)
data_augmentation = keras.layers.experimental.preprocessing.RandomFlip(mode="horizontal")(data_augmentation)
data_augmentation = keras.layers.experimental.preprocessing.RandomRotation(factor=0.15,
fill_mode="constant")(data_augmentation)
augmentation_output = keras.layers.experimental.preprocessing.RandomZoom(height_factor=(-0.3, 0.1),
width_factor=(-0.3, 0.1),
fill_mode="constant")(data_augmentation)
data_augmentation = keras.Model(augmentation_input, augmentation_output)
""" Unsupervised contrastive loss"""
class RepresentationLearner(keras.Model):
def __init__(
self,
encoder,
projection_units,
num_augmentations,
temperature=1.0,
dropout_rate=0.1,
l2_normalize=False,
**kwargs
):
super(RepresentationLearner, self).__init__(**kwargs)
self.encoder = encoder
# Create projection head.
self.projector = keras.Sequential(
[
keras.layers.Dropout(dropout_rate),
keras.layers.Dense(units=projection_units, use_bias=False),
keras.layers.BatchNormalization(),
keras.layers.ReLU(),
]
)
self.num_augmentations = num_augmentations
self.temperature = temperature
self.l2_normalize = l2_normalize
self.loss_tracker = keras.metrics.Mean(name="loss")
@property
def metrics(self):
return [self.loss_tracker]
def compute_contrastive_loss(self, feature_vectors, batch_size):
num_augmentations = tf.shape(feature_vectors)[0] // batch_size
if self.l2_normalize:
feature_vectors = tf.math.l2_normalize(feature_vectors, -1)
# The logits shape is [num_augmentations * batch_size, num_augmentations * batch_size].
logits = (
tf.linalg.matmul(feature_vectors, feature_vectors, transpose_b=True)
/ self.temperature
)
# Apply log-max trick for numerical stability.
logits_max = tf.math.reduce_max(logits, axis=1)
logits = logits - logits_max
# The shape of targets is [num_augmentations * batch_size, num_augmentations * batch_size].
# targets is a matrix consits of num_augmentations submatrices of shape [batch_size * batch_size].
# Each [batch_size * batch_size] submatrix is an identity matrix (diagonal entries are ones).
targets = tf.tile(tf.eye(batch_size), [num_augmentations, num_augmentations])
# Compute cross entropy loss
return keras.losses.categorical_crossentropy(
y_true=targets, y_pred=logits, from_logits=True
)
def call(self, inputs):
# Create augmented versions of the images.
augmented = []
for _ in range(self.num_augmentations):
x = data_augmentation(inputs)
augmented.append(x)
augmented = keras.layers.Concatenate(axis=0)(augmented)
# Generate embedding representations of the images.
features = self.encoder(augmented)
# Apply projection head.
return self.projector(features)
def train_step(self, data):#inputs):
inputs = data[0]
batch_size = tf.shape(inputs)[0]
# Run the forward pass and compute the contrastive loss
with tf.GradientTape() as tape:
feature_vectors = self(inputs, training=True)
loss = self.compute_contrastive_loss(feature_vectors, batch_size)
# Compute gradients
trainable_vars = self.trainable_variables
gradients = tape.gradient(loss, trainable_vars)
# Update weights
self.optimizer.apply_gradients(zip(gradients, trainable_vars))
# Update loss tracker metric
self.loss_tracker.update_state(loss)
# Return a dict mapping metric names to current value
return {m.name: m.result() for m in self.metrics}
def test_step(self, data):#inputs):
inputs = data[0]
batch_size = tf.shape(inputs)[0]
feature_vectors = self(inputs, training=False)
loss = self.compute_contrastive_loss(feature_vectors, batch_size)
self.loss_tracker.update_state(loss)
return {"loss": self.loss_tracker.result()}
""" Train the model"""
network_input = keras.layers.Input(shape=config.IMG_SHAPE)
# Load RGB vision encoder.
r_encoder = create_encoder(base='resnet50', pretrained=True)(network_input)
encoder_output = keras.layers.Dense(config.HIDDEN_UNITS)(r_encoder)
r_encoder = keras.Model(network_input, encoder_output)
# Create representation learner.
r_representation_learner = RepresentationLearner(
r_encoder, config.PROJECTION_UNITS, num_augmentations=2, temperature=0.1
)
r_representation_learner.build((None, 128, 128, 3))
# base_path = os.environ['PYTHONPATH'].split(os.pathsep)[1]
# representation_learner.load_weights(base_path + '/multi_input_multi_output/simclr/weights/simclr_resnet50_rgb_scratch_weights.h5')
r_representation_learner.load_weights(config.RGB_MODALITY_WEIGHT_PATH)
functional_model = flatten_model(r_representation_learner.layers[0])
rgb_encoder = functional_model.layers[1]
# Load Depth vision encoder.
d_encoder = create_encoder(base='resnet50', pretrained=True)(network_input)
encoder_output = keras.layers.Dense(config.HIDDEN_UNITS)(d_encoder)
d_encoder = keras.Model(network_input, encoder_output)
# Create representation learner.
d_representation_learner = RepresentationLearner(
d_encoder, config.PROJECTION_UNITS, num_augmentations=2, temperature=0.1
)
d_representation_learner.build((None, 128, 128, 3))
# base_path = os.environ['PYTHONPATH'].split(os.pathsep)[1]
# representation_learner.load_weights(base_path + '/multi_input_multi_output/simclr/weights/simclr_resnet50_rgb_scratch_weights.h5')
d_representation_learner.load_weights(config.DEPTH_MODALITY_WEIGHT_PATH)
functional_model = flatten_model(d_representation_learner.layers[0])
depth_encoder = functional_model.layers[1]
# ----------------------
# RGB
rgb_input = keras.layers.Input(shape=config.IMG_SHAPE)
# rgb_encoder = keras.applications.ResNet50V2(include_top=False,
# weights=None,
# input_shape=config.IMG_SHAPE,
# pooling="avg")
rgb = rgb_encoder(rgb_input)
rgb = keras.layers.Dropout(config.DROPOUT_RATE)(rgb)
rgb = keras.layers.Dense(config.HIDDEN_UNITS, activation="relu")(rgb)
rgb = keras.layers.Dropout(config.DROPOUT_RATE)(rgb)
rgb = keras.layers.Flatten()(rgb)
rgb = keras.layers.Dense(config.NUM_OF_CLASSES, activation="softmax")(rgb)
rgb_classifier = keras.models.Model(inputs=rgb_input, outputs=rgb, name='rgb_classifier')
for layer in rgb_classifier.layers:
layer._name += '_rgb'
layer.trainable = True
print('[INFO] built rgb classifier')
print(rgb_classifier.summary())
# Depth
depth_input = keras.layers.Input(shape=config.IMG_SHAPE)
# depth_encoder = keras.applications.ResNet50V2(include_top=False,
# weights=None,
# input_shape=config.IMG_SHAPE,
# pooling="avg")
depth = depth_encoder(depth_input)
depth = keras.layers.Dropout(config.DROPOUT_RATE)(depth)
depth = keras.layers.Dense(config.HIDDEN_UNITS, activation="relu")(depth)
depth = keras.layers.Dropout(config.DROPOUT_RATE)(depth)
depth = keras.layers.Flatten()(depth)
depth = keras.layers.Dense(config.NUM_OF_CLASSES, activation="softmax")(depth)
depth_classifier = keras.models.Model(inputs=depth_input, outputs=depth, name='depth_classifier')
for layer in depth_classifier.layers:
layer._name += '_depth'
layer.trainable = True
print('[INFO] built depth classifier')
print(depth_classifier.summary())
# Build and compile MultiNet
multinet_class = MultiNet(rgb_classifier=rgb_classifier,
rgb_output_branch=rgb,
depth_classifier=depth_classifier,
depth_output_branch=depth)
multinet_class.compile()
multinet_model = multinet_class.model
print('[INFO] built MultiNet classifier')
# train the network to perform multi-output classification
train_ds = fat_dataset(split='train',
data_type='all',
batch_size=config.BATCH_SIZE,
shuffle=True,
pairs=False)
val_ds = fat_dataset(split='validation',
data_type='all',
batch_size=config.BATCH_SIZE,
shuffle=True,
pairs=False)
print("[INFO] training MultiNet...")
counter = 0
history = None
toCSV = []
while counter <= config.EPOCHS:
counter += 1
print(f'* Epoch: {counter}')
data_batch = 0
for imgs, labels in train_ds:
data_batch += 1
history = multinet_model.train_on_batch(x=[imgs[:, 0], imgs[:, 1]],
y={'dense_5_rgb': labels[:], 'dense_7_depth': labels[:]},
reset_metrics=False,
return_dict=True)
print(f'* Data Batch: {data_batch}')
print(f'\t{history}')
break
if counter % 10 == 0:
print("[VALUE] Testing model on batch")
for val_data, val_labels in val_ds:
val_results = multinet_model.test_on_batch(x=[val_data[:, 0], val_data[:, 1]],
y={'dense_5_rgb': val_labels[:], 'dense_7_depth': val_labels[:]})
print(val_results)
toCSV.append(val_results)
print('Saving MultiNet validation results as CSV file')
utils.save_model_history(H=toCSV, path_to_csv=config.FROZEN_SIAMESE_TRAINING_HISTORY_CSV_PATH)
rgb_classifier.save_weights(config.MIMO_RGB_WEIGHTS)
print("Saved RGB model weights to disk")
# serialize weights to HDF5
depth_classifier.save_weights(config.MIMO_DEPTH_WEIGHTS)
print("Saved Depth model weights to disk")
| 40.525547
| 132
| 0.665616
| 1,296
| 11,104
| 5.462191
| 0.200617
| 0.038847
| 0.015821
| 0.016104
| 0.409521
| 0.340726
| 0.307388
| 0.284503
| 0.25173
| 0.207656
| 0
| 0.011048
| 0.23379
| 11,104
| 273
| 133
| 40.673993
| 0.820992
| 0.173811
| 0
| 0.125654
| 0
| 0
| 0.055132
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.036649
| false
| 0
| 0.036649
| 0.005236
| 0.109948
| 0.073298
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
573a1fa313f96c01ab6df0ada017abeca301701e
| 856
|
py
|
Python
|
tools/rebuild_caches.py
|
newbdoc/lookyloo
|
53a8952fccaf9ae42fa582d3475283babd55d08a
|
[
"BSD-3-Clause"
] | 148
|
2020-06-14T06:55:42.000Z
|
2022-03-19T05:37:02.000Z
|
tools/rebuild_caches.py
|
newbdoc/lookyloo
|
53a8952fccaf9ae42fa582d3475283babd55d08a
|
[
"BSD-3-Clause"
] | 261
|
2020-06-16T22:29:27.000Z
|
2022-03-31T10:40:52.000Z
|
tools/rebuild_caches.py
|
newbdoc/lookyloo
|
53a8952fccaf9ae42fa582d3475283babd55d08a
|
[
"BSD-3-Clause"
] | 27
|
2020-06-08T12:28:33.000Z
|
2022-02-15T18:50:50.000Z
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import argparse
import logging
from lookyloo.lookyloo import Indexing, Lookyloo
logging.basicConfig(format='%(asctime)s %(name)s %(levelname)s:%(message)s',
level=logging.INFO)
def main():
parser = argparse.ArgumentParser(description='Rebuild the redis cache.')
parser.add_argument('--rebuild_pickles', default=False, action='store_true', help='Delete and rebuild the pickles. Count 20s/pickle, it can take a very long time.')
args = parser.parse_args()
lookyloo = Lookyloo()
if args.rebuild_pickles:
lookyloo.rebuild_all()
else:
lookyloo.rebuild_cache()
indexing = Indexing()
indexing.clear_indexes()
# This call will rebuild all the caches as needed.
lookyloo.sorted_capture_cache()
if __name__ == '__main__':
main()
| 25.939394
| 168
| 0.684579
| 106
| 856
| 5.358491
| 0.632075
| 0.056338
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.005797
| 0.193925
| 856
| 32
| 169
| 26.75
| 0.817391
| 0.107477
| 0
| 0
| 0
| 0
| 0.241787
| 0.032852
| 0
| 0
| 0
| 0
| 0
| 1
| 0.052632
| false
| 0
| 0.157895
| 0
| 0.210526
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
573b50d93fdcd613c5e4eb9cd5d3608413327c07
| 633
|
py
|
Python
|
src/game.py
|
LuisMarques99/Number-Guesser-Terminal
|
6abfac23268022f7ce3776a20d1d6f550955d6c8
|
[
"MIT"
] | null | null | null |
src/game.py
|
LuisMarques99/Number-Guesser-Terminal
|
6abfac23268022f7ce3776a20d1d6f550955d6c8
|
[
"MIT"
] | null | null | null |
src/game.py
|
LuisMarques99/Number-Guesser-Terminal
|
6abfac23268022f7ce3776a20d1d6f550955d6c8
|
[
"MIT"
] | null | null | null |
from random import randrange
def main():
MIN = 1
MAX = 100
NUMBER = randrange(MIN, MAX + 1)
guesses = 9
print(f"Guess a number from {MIN} to {MAX}.\nYou have {guesses} chances. Start now!\n")
while guesses > 0:
guess = input(f"Guess ({guesses}): ")
guesses -= 1
try:
guess = int(guess)
if guess == NUMBER:
print("You won!")
break
if guesses == 0:
print(f"\nYou ran out of guesses... Best luck next time.\nThe number was [{NUMBER}].")
else:
print("Smaller\n" if (guess > NUMBER) else "Bigger\n")
except ValueError:
print("Enter just the number.\n")
if __name__ == "__main__":
main()
| 21.827586
| 90
| 0.624013
| 94
| 633
| 4.117021
| 0.542553
| 0.031008
| 0.067183
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.01833
| 0.224329
| 633
| 29
| 91
| 21.827586
| 0.769857
| 0
| 0
| 0
| 0
| 0.043478
| 0.361199
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.043478
| false
| 0
| 0.043478
| 0
| 0.086957
| 0.217391
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
573b7032640a85abec559a72d8a9edcb24834621
| 378
|
py
|
Python
|
Data Structures and Algorithms/HackerRank Algo Solutions/EASY PROBLEMS/Arrays.py
|
akkik04/Python-DataStructures-and-Algorithms
|
8db63173218e5a9205dbb325935c71fec93b695c
|
[
"MIT"
] | 1
|
2022-01-22T18:19:07.000Z
|
2022-01-22T18:19:07.000Z
|
Data Structures and Algorithms/HackerRank Algo Solutions/EASY PROBLEMS/Arrays.py
|
akkik04/Python-DataStructures-and-Algorithms
|
8db63173218e5a9205dbb325935c71fec93b695c
|
[
"MIT"
] | null | null | null |
Data Structures and Algorithms/HackerRank Algo Solutions/EASY PROBLEMS/Arrays.py
|
akkik04/Python-DataStructures-and-Algorithms
|
8db63173218e5a9205dbb325935c71fec93b695c
|
[
"MIT"
] | null | null | null |
# ARRAYS-DS HACKERANK SOLUTION:
# creating a function to reverse the array.
def reverseArray(arr):
# reversing the array.
reversed = arr[::-1]
# returning the reversed array.
return reversed
# receiving input.
arr_count = int(input().strip())
arr = list(map(int, input().rstrip().split()))
# printing the output.
print(reverseArray(arr))
| 22.235294
| 47
| 0.653439
| 46
| 378
| 5.347826
| 0.673913
| 0.065041
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.00339
| 0.219577
| 378
| 17
| 48
| 22.235294
| 0.830508
| 0.42328
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.166667
| false
| 0
| 0
| 0
| 0.333333
| 0.166667
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
574587d505f7c19dabd0452d40b6544e75b9a682
| 10,136
|
py
|
Python
|
processing_scripts/database_update/pokedex_entry.py
|
CorentG/Pokecube-Issues-and-Wiki
|
690af5d8499561f65f761fd49fbf5fc2bc85c4c3
|
[
"MIT"
] | 24
|
2019-02-02T20:37:53.000Z
|
2022-02-09T13:51:41.000Z
|
processing_scripts/database_update/pokedex_entry.py
|
CorentG/Pokecube-Issues-and-Wiki
|
690af5d8499561f65f761fd49fbf5fc2bc85c4c3
|
[
"MIT"
] | 671
|
2018-08-20T08:46:35.000Z
|
2022-03-26T00:11:43.000Z
|
processing_scripts/database_update/pokedex_entry.py
|
CorentG/Pokecube-Issues-and-Wiki
|
690af5d8499561f65f761fd49fbf5fc2bc85c4c3
|
[
"MIT"
] | 68
|
2018-09-25T21:03:40.000Z
|
2022-02-25T19:59:51.000Z
|
import csv_loader
import moves_names
def getSingle(name, data, file, value):
return data.get_info(name,value, expected_file=file, use_names_map=True)[file][0]
def getExpYield(name, data):
return int(getSingle(name, data,"pokemon" ,"base_experience"))
def getHeight(name, data):
return int(getSingle(name, data,"pokemon" ,"height")) / 10.0
def getWeight(name, data):
return int(getSingle(name, data,"pokemon" ,"weight")) / 10.0
def getGenderRatio(name, data):
rates = {}
val = int(getSingle(name, data,"pokemon_species" ,"gender_rate"))
rates[-1] = 255
rates[0] = 0
rates[1] = 30
rates[2] = 62
rates[4] = 127
rates[6] = 191
rates[7] = 225
rates[8] = 254
return rates[val]
def getCaptureRate(name, data):
return int(getSingle(name, data,"pokemon_species" ,"capture_rate"))
def getBaseFriendship(name, data):
return int(getSingle(name, data,"pokemon_species" ,"base_happiness"))
def getExpMode(name, data):
stats = data.get_info(name,"growth_rate_id", expected_file="pokemon_species", use_names_map=True)["pokemon_species"]
_id = stats[0]
stats = data.get_entry(_id, expected_file="growth_rate_prose", use_names_map=True)["growth_rate_prose"]
for row in stats:
_id = row[1]
if _id == '9':
return row[2]
return None
def getLevelMoves(name, data):
moves = {}
names = []
try:
moves_entries = data.get_entry(name, expected_file="pokemon_moves", use_names_map=True)["pokemon_moves"]
version = 1
# First, locate the most recent version that has lvl up moves
for entry in moves_entries:
if entry[4] == "0":
continue
vers = int(entry[1])
if vers > version:
version = vers
version = str(version)
# Now we can actually parse the moves
for entry in moves_entries:
# TODO figure out if a move is an evolution move, is that info here?
if entry[4] == "0":
continue
if entry[1] != version:
continue
level = entry[4]
move_id = entry[2]
move = data.get_info(move_id,"identifier", expected_file="moves")["moves"][0]
move_, conf = csv_loader.match_name(move, moves_names.moves)
if(conf < 80):
print("{} -> {} ({})".format(move, move_, conf))
else:
move = move_
if level in moves.keys():
moves[level] = moves[level]+","+move
else:
moves[level] = move
if not move in names:
names.append(move)
except:
# print("No moves found for {}".format(name))
pass
return moves, names
def getAllMoves(name, data, exclude=[]):
names = []
try:
moves_entries = data.get_entry(name, expected_file="pokemon_moves", use_names_map=True)["pokemon_moves"]
for entry in moves_entries:
move_id = entry[2]
move = data.get_info(move_id,"identifier", expected_file="moves")["moves"][0]
move_, conf = csv_loader.match_name(move, moves_names.moves)
if(conf < 80):
print("{} -> {} ({})??".format(move, move_, conf))
else:
move = move_
if move in exclude or move in names:
continue
names.append(move)
except:
# print("No moves found for {}".format(name))
pass
return names
def getTypes(name, data):
types_nums = data.get_info(name,"type_id", expected_file="pokemon_types", use_names_map=True)["pokemon_types"]
types = []
for num in types_nums:
names = data.get_info(num,"identifier", expected_file="types")["types"]
types.append(names[0])
return types
def getStats(name, data):
stats = []
# TODO maybe validate that these are in the correct order, the index is also stored
# in the csv file, so that validation can be done if needed!
stats = data.get_info(name,"base_stat", expected_file="pokemon_stats", use_names_map=True)["pokemon_stats"]
return stats
def getEVs(name, data):
stats = []
# TODO maybe validate that these are in the correct order, the index is also stored
# in the csv file, so that validation can be done if needed!
stats = data.get_info(name,"effort", expected_file="pokemon_stats", use_names_map=True)["pokemon_stats"]
return stats
def getAbilities(name, data):
hidden = []
abilities = ["",""]
rows = data.get_entry(name, expected_file="pokemon_abilities", use_names_map=True)["pokemon_abilities"]
for row in rows:
ability_id = row[1]
isHidden = row[2]
slot = int(row[3]) - 1
ability_name = data.get_info(ability_id,"identifier", expected_file="abilities")["abilities"][0]
if ability_name == '':
continue
if isHidden == "1":
hidden.append(ability_name)
elif slot < len(abilities):
abilities[slot] = ability_name
return abilities, hidden
def sorter(e):
return int(e)
class Pokedex(object):
def __init__(self, names, data, originals, do_moves, do_stats):
self.pokemon = []
for name in names:
defaults = None
if name in originals:
defaults = originals[name]
try:
entry = PokedexEntry(name, data, defaults, do_moves, do_stats)
if do_moves and not "moves" in entry.map:
continue
self.pokemon.append(entry.map)
except Exception as err:
print("Error with {} {}, Using default if present? {}".format(name, err, defaults is not None))
if defaults is not None and do_stats:
self.pokemon.append(defaults)
class PokedexEntry(object):
def __init__(self, name, data, defaults, do_moves, do_stats):
_map = self.map = {}
_map["name"] = name
if do_stats:
_map["number"] = int(getSingle(name, data, "pokemon", "species_id"))
id = int(getSingle(name, data, "pokemon", "id"))
is_default = id == _map["number"]
if(is_default):
_map["base"] = True
_map["stats"] = {}
statsOrder = ["hp", "atk", "def", "spatk", "spdef", "spd"]
# Do the base stats
stats = getStats(name, data)
_map["stats"]["stats"] = {}
values = _map["stats"]["stats"]["values"] = {}
for i in range(len(statsOrder)):
values[statsOrder[i]] = stats[i]
if defaults is not None:
_map["stats"]["sizes"] = defaults["stats"]["sizes"]
else:
print("Cannot copy sizes for {}".format(name))
# Do the evs
stats = getEVs(name, data)
_map["stats"]["evs"] = {}
values = _map["stats"]["evs"]["values"] = {}
for i in range(len(statsOrder)):
if stats[i] == "0":
continue
values[statsOrder[i]] = stats[i]
# Get the types
types = getTypes(name,data)
_map["stats"]["types"] = {}
values = _map["stats"]["types"]["values"] = {}
for i in range(len(types)):
ident = "type{}".format(i+1)
values[ident] = types[i]
# Get Abilities
abilities, hidden = getAbilities(name, data)
_map["stats"]["abilities"] = {}
values = _map["stats"]["abilities"]["values"] = {}
if len(abilities) > 0:
normals = abilities[0]
if len(abilities) > 1:
for i in range(1, len(abilities)):
if abilities[i] != "":
normals = normals +", "+abilities[i]
values["normal"] = normals
if len(hidden) > 0:
hiddens = hidden[0]
if len(hidden) > 1:
for i in range(1, len(hidden)):
if hidden[i] != "":
hiddens = hiddens +", "+hidden[i]
values["hidden"] = hiddens
# Get the simple values
_map["stats"]["mass"] = getWeight(name, data)
_map["stats"]["baseExp"] = getExpYield(name, data)
# This set is not defined for all targets, so try/except them
try:
_map["stats"]["captureRate"] = getCaptureRate(name, data)
except:
pass
try:
_map["stats"]["baseFriendship"] = getBaseFriendship(name, data)
except:
pass
try:
_map["stats"]["genderRatio"] = getGenderRatio(name, data)
except:
pass
try:
_map["stats"]["expMode"] = getExpMode(name, data)
except:
pass
if do_moves:
# Do the moves
# First lvl up moves
moves, names = getLevelMoves(name, data)
moves_list = getAllMoves(name, data, exclude=names)
if len(moves) != 0 or len(moves_list) != 0:
_map["moves"] = {}
elif defaults is not None:
_map["moves"] = defaults["moves"]
print("Not Updating moves for {}".format(name))
if len(moves) > 0:
lvlMoves = _map["moves"]["lvlupMoves"] = {}
levels = [x for x in moves.keys()]
levels.sort(key=sorter)
for level in levels:
lvlMoves[level] = moves[level]
# Then remainder
moves = ""
if len(moves_list)>0:
moves = moves_list[0]
for i in range(1, len(moves_list)):
moves = moves +", "+moves_list[i]
misc = _map["moves"]["misc"] = {}
misc["moves"] = moves
| 37.128205
| 120
| 0.530979
| 1,159
| 10,136
| 4.505608
| 0.178602
| 0.056683
| 0.029299
| 0.025852
| 0.384719
| 0.325163
| 0.290885
| 0.238414
| 0.21486
| 0.196476
| 0
| 0.011726
| 0.343725
| 10,136
| 272
| 121
| 37.264706
| 0.773301
| 0.070837
| 0
| 0.259912
| 0
| 0
| 0.10299
| 0
| 0
| 0
| 0
| 0.003676
| 0
| 1
| 0.07489
| false
| 0.026432
| 0.008811
| 0.030837
| 0.162996
| 0.022026
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5746c4fc2776ee414b40d5372100f22e8a3258f4
| 25,539
|
py
|
Python
|
tests/test_add.py
|
open-contracting/kingfisher-views
|
7887610a144493f2ccd0d9a22cf43157dc180479
|
[
"BSD-3-Clause"
] | 2
|
2019-02-19T16:15:19.000Z
|
2020-07-25T04:05:45.000Z
|
tests/test_add.py
|
open-contracting/kingfisher-views
|
7887610a144493f2ccd0d9a22cf43157dc180479
|
[
"BSD-3-Clause"
] | 142
|
2019-03-11T15:14:22.000Z
|
2020-11-11T19:26:09.000Z
|
tests/test_add.py
|
open-contracting/kingfisher-views
|
7887610a144493f2ccd0d9a22cf43157dc180479
|
[
"BSD-3-Clause"
] | 5
|
2019-04-11T14:11:10.000Z
|
2020-07-30T22:45:59.000Z
|
import datetime
import decimal
from unittest.mock import patch
import pytest
from click.testing import CliRunner
from psycopg2 import sql
from manage import SUMMARIES, cli, construct_where_fragment
from tests import assert_bad_argument, assert_log_records, assert_log_running, fixture, noop
command = 'add'
TABLES = {
'note',
}
SUMMARY_TABLES = set()
SUMMARY_VIEWS = set()
FIELD_LIST_TABLES = set()
NO_FIELD_LIST_TABLES = set()
NO_FIELD_LIST_VIEWS = set()
for table_name, table in SUMMARIES.items():
FIELD_LIST_TABLES.add(f'{table_name}_field_list')
if table.is_table:
SUMMARY_TABLES.add(table_name)
NO_FIELD_LIST_TABLES.add(f'{table_name}_no_field_list')
else:
SUMMARY_VIEWS.add(table_name)
NO_FIELD_LIST_VIEWS.add(f'{table_name}_no_field_list')
TABLES.add(f'{table_name}_no_data')
def test_construct_where_fragment(db):
assert construct_where_fragment(db.cursor, 'a', 'z') == " AND d.data->>'a' = 'z'"
assert construct_where_fragment(db.cursor, 'a.b', 'z') == " AND d.data->'a'->>'b' = 'z'"
assert construct_where_fragment(db.cursor, 'a.b.c', 'z') == " AND d.data->'a'->'b'->>'c' = 'z'"
assert construct_where_fragment(db.cursor, 'a.b.c.d', 'z') == " AND d.data->'a'->'b'->'c'->>'d' = 'z'"
assert construct_where_fragment(db.cursor, 'a.b.c', '') == " AND d.data->'a'->'b'->>'c' = ''"
assert construct_where_fragment(db.cursor, '', 'z') == " AND d.data->>'' = 'z'"
@pytest.mark.parametrize('collections, message', [
('a', 'Collection IDs must be integers'),
('1,10,100', 'Collection IDs {10, 100} not found'),
])
def test_validate_collections(collections, message, caplog):
runner = CliRunner()
result = runner.invoke(cli, [command, collections])
assert result.exit_code == 2
assert_bad_argument(result, 'COLLECTIONS', message)
assert_log_running(caplog, command)
def test_validate_name(caplog):
runner = CliRunner()
result = runner.invoke(cli, [command, '1', '--name', 'camelCase'])
assert result.exit_code == 2
assert_bad_argument(result, '--name', 'value must be lowercase')
assert_log_running(caplog, command)
@patch('manage.summary_tables', noop)
@patch('manage.field_counts', noop)
@patch('manage.field_lists', noop)
@pytest.mark.parametrize('kwargs, name, collections', [
({}, 'collection_1', (1,)),
({'collections': '1,2'}, 'collection_1_2', (1, 2)),
({'name': 'custom'}, 'custom', (1,)),
])
def test_command_name(kwargs, name, collections, db, caplog):
schema = f'view_data_{name}'
identifier = sql.Identifier(schema)
with fixture(db, **kwargs) as result:
assert db.schema_exists(schema)
assert db.all('SELECT collection_id, schema FROM summaries.selected_collections WHERE schema=%(schema)s',
{'schema': schema}) == [(collection, schema,) for collection in collections]
assert db.all(sql.SQL('SELECT id, note FROM {schema}.note').format(schema=identifier)) == [
(1, 'Default'),
]
assert result.exit_code == 0
assert result.output == ''
assert_log_records(caplog, command, [
f'Arguments: collections={collections!r} note=Default name={kwargs.get("name")} tables_only=False '
'filters=()',
f'Added {name}',
'Running summary-tables routine',
'Running field-counts routine',
'Running field-lists routine',
])
@pytest.mark.parametrize('filters', [(), (('ocid', 'dolore'),)])
@pytest.mark.parametrize('tables_only, field_counts, field_lists, tables, views', [
(False, True, False,
TABLES | SUMMARY_TABLES, SUMMARY_VIEWS),
(True, True, False,
TABLES | SUMMARY_TABLES | SUMMARY_VIEWS, set()),
(False, False, True,
TABLES | FIELD_LIST_TABLES | NO_FIELD_LIST_TABLES, SUMMARY_TABLES | SUMMARY_VIEWS | NO_FIELD_LIST_VIEWS),
(True, False, True,
TABLES | FIELD_LIST_TABLES | NO_FIELD_LIST_TABLES | SUMMARY_TABLES | SUMMARY_VIEWS | NO_FIELD_LIST_VIEWS, set()),
])
def test_command(db, tables_only, field_counts, field_lists, tables, views, filters, caplog):
# Load collection 2 first, to check that existing collections aren't included when we load collection 1.
with fixture(db, collections='2', tables_only=tables_only, field_counts=field_counts, field_lists=field_lists,
filters=filters), fixture(db, tables_only=tables_only, field_counts=field_counts,
field_lists=field_lists, filters=filters) as result:
# Check existence of schema, tables and views.
if field_counts:
tables.add('field_counts')
assert db.schema_exists('view_data_collection_1')
assert db.schema_exists('view_data_collection_2')
assert set(db.pluck("SELECT table_name FROM information_schema.tables WHERE table_schema = %(schema)s "
"AND table_type = 'BASE TABLE'", {'schema': 'view_data_collection_1'})) == tables
assert set(db.pluck("SELECT table_name FROM information_schema.tables WHERE table_schema = %(schema)s "
"AND table_type = 'VIEW'", {'schema': 'view_data_collection_1'})) == views
# Check contents of summary relations.
rows = db.all("""
SELECT
award_index,
release_type,
collection_id,
ocid,
release_id,
award_id,
title,
status,
description,
value_amount,
value_currency,
date,
contractperiod_startdate,
contractperiod_enddate,
contractperiod_maxextentdate,
contractperiod_durationindays,
total_suppliers,
total_documents,
document_documenttype_counts,
total_items
FROM view_data_collection_1.awards_summary
ORDER BY id, award_index
""")
assert rows[0] == (
0, # award_index
'release', # release_type
1, # collection_id
'dolore', # ocid
'ex laborumsit autein magna veniam', # release_id
'reprehenderit magna cillum eu nisi', # award_id
'laborum aute nisi eiusmod', # award_title
'pending', # award_status
'ullamco in voluptate', # award_description
decimal.Decimal('-95099396'), # award_value_amount
'AMD', # award_value_currency
datetime.datetime(3263, 12, 5, 21, 24, 19, 161000), # award_date
datetime.datetime(4097, 9, 16, 5, 55, 19, 125000), # award_contractperiod_startdate
datetime.datetime(4591, 4, 29, 6, 34, 28, 472000), # award_contractperiod_enddate
datetime.datetime(3714, 8, 9, 7, 21, 37, 544000), # award_contractperiod_maxextentdate
decimal.Decimal('72802012'), # award_contractperiod_durationindays
2, # total_suppliers
4, # total_documents
{
'Excepteur nisi et': 1,
'proident exercitation in': 1,
'ut magna dolore velit aute': 1,
'veniam enim aliqua d': 1,
}, # document_documenttype_counts
5, # total_items
)
if filters:
assert len(rows) == 4
else:
assert len(rows) == 301
rows = db.all("""
SELECT
party_index,
release_type,
collection_id,
ocid,
release_id,
party_id,
roles,
identifier,
unique_identifier_attempt,
additionalidentifiers_ids,
total_additionalidentifiers
FROM view_data_collection_1.parties_summary
ORDER BY id, party_index
""")
assert rows[0] == (
0, # party_index
'release', # release_type
1, # collection_id
'dolore', # ocid
'ex laborumsit autein magna veniam', # release_id
'voluptate officia tempor dolor', # party_id
[
'ex ',
'in est exercitation nulla Excepteur',
'ipsum do',
], # roles
'ad proident dolor reprehenderit veniam-in quis exercitation reprehenderit', # identifier
'voluptate officia tempor dolor', # unique_identifier_attempt
[
'exercitation proident voluptate-sed culpa eamollit consectetur dolor l',
'magna-dolor ut indolorein in tempor magna mollit',
'ad occaecat amet anim-laboris ea Duisdeserunt quis sed pariatur mollit',
'elit mollit-officia proidentmagna',
'ex-minim Ut consectetur',
], # additionalidentifiers_ids
5, # total_additionalidentifiers
)
if filters:
assert len(rows) == 4
else:
assert len(rows) == 296
if field_counts:
# Check contents of field_counts table.
rows = db.all('SELECT * FROM view_data_collection_1.field_counts')
if filters:
assert len(rows) == 1046
assert rows[0] == (1, 'release', 'awards', 1, 4, 1)
else:
assert len(rows) == 65235
assert rows[0] == (1, 'release', 'awards', 100, 301, 100)
if field_lists:
# Check the count of keys in the field_list field for the lowest primary keys in each summary relation.
statement = """
SELECT
count(*)
FROM
(SELECT
jsonb_each(field_list)
FROM (
SELECT
field_list
FROM
view_data_collection_1.{table}
ORDER BY
{primary_keys}
LIMIT 1) AS field_list
) AS each
"""
expected = {
'award_documents_summary': 11,
'award_items_summary': 26,
'award_suppliers_summary': 28,
'awards_summary': 469,
'buyer_summary': 28,
'contract_documents_summary': 11,
'contract_implementation_documents_summary': 11,
'contract_implementation_milestones_summary': 29,
'contract_implementation_transactions_summary': 83,
'contract_items_summary': 26,
'contract_milestones_summary': 27,
'contracts_summary': 469,
'parties_summary': 34,
'planning_documents_summary': 11,
'planning_milestones_summary': 29,
'planning_summary': 61,
'procuringentity_summary': 32,
'relatedprocesses_summary': 6,
'release_summary': 1046,
'tender_documents_summary': 15,
'tender_items_summary': 25,
'tender_milestones_summary': 23,
'tender_summary': 228,
'tenderers_summary': 31,
}
for table_name, table in SUMMARIES.items():
count = db.one(db.format(statement, table=table_name, primary_keys=table.primary_keys))[0]
assert count == expected[table_name], f'{table_name}: {count} != {expected[table_name]}'
def result_dict(statement):
result = db.one(statement)
return {column.name: result for column, result in zip(db.cursor.description, result)}
statement = """
SELECT
count(*) total,
sum(coalesce((field_list ->> 'contracts')::int, 0)) contracts,
sum(coalesce((field_list ->> 'awards')::int, 0)) awards,
sum(coalesce((field_list ->> 'awards/id')::int, 0)) awards_id,
sum(coalesce((field_list ->> 'awards/value/amount')::int, 0)) awards_amount
FROM
view_data_collection_1.contracts_summary
"""
if filters:
assert result_dict(statement) == {
'awards': 1,
'awards_amount': 1,
'awards_id': 1,
'contracts': 0,
'total': 1,
}
else:
assert result_dict(statement) == {
'awards': 213,
'awards_amount': 213,
'awards_id': 213,
'contracts': 0,
'total': 285,
}
statement = """
SELECT
count(*) total,
sum(coalesce((field_list ->> 'awards')::int, 0)) awards,
sum(coalesce((field_list ->> 'contracts')::int, 0)) contracts,
sum(coalesce((field_list ->> 'contracts/id')::int, 0)) contracts_id,
sum(coalesce((field_list ->> 'contracts/value/amount')::int, 0)) contracts_amount
FROM
view_data_collection_1.awards_summary
"""
if filters:
assert result_dict(statement) == {
'contracts': 1,
'contracts_amount': 1,
'contracts_id': 1,
'awards': 0,
'total': 4,
}
else:
assert result_dict(statement) == {
'contracts': 213,
'contracts_amount': 213,
'contracts_id': 213,
'awards': 0,
'total': 301,
}
# All columns have comments.
assert not db.all("""
SELECT
isc.table_name,
isc.column_name,
isc.data_type
FROM
information_schema.columns isc
WHERE
isc.table_schema = %(schema)s
AND LOWER(isc.table_name) NOT IN ('selected_collections', 'note')
AND LOWER(isc.table_name) NOT LIKE '%%_no_data'
AND LOWER(isc.table_name) NOT LIKE '%%_field_list'
AND pg_catalog.col_description(format('%%s.%%s',isc.table_schema,isc.table_name)::regclass::oid,
isc.ordinal_position) IS NULL
""", {'schema': 'view_data_collection_1'})
expected = []
for collection_id in [2, 1]:
expected.extend([
f'Arguments: collections=({collection_id},) note=Default name=None tables_only={tables_only!r} '
f'filters={filters!r}',
f'Added collection_{collection_id}',
'Running summary-tables routine',
])
if field_counts:
expected.append('Running field-counts routine')
if field_lists:
expected.append('Running field-lists routine')
assert result.exit_code == 0
assert result.output == ''
assert_log_records(caplog, command, expected)
@pytest.mark.parametrize('filters', [
(('tender.procurementMethod', 'direct'),),
(('tender.procurementMethod', 'direct'), ('tender.status', 'planned'),),
])
@pytest.mark.parametrize('tables_only, field_counts, field_lists, tables, views', [
(False, True, False,
TABLES | SUMMARY_TABLES, SUMMARY_VIEWS),
(True, True, False,
TABLES | SUMMARY_TABLES | SUMMARY_VIEWS, set()),
(False, False, True,
TABLES | FIELD_LIST_TABLES | NO_FIELD_LIST_TABLES, SUMMARY_TABLES | SUMMARY_VIEWS | NO_FIELD_LIST_VIEWS),
(True, False, True,
TABLES | FIELD_LIST_TABLES | NO_FIELD_LIST_TABLES | SUMMARY_TABLES | SUMMARY_VIEWS | NO_FIELD_LIST_VIEWS, set()),
])
def test_command_filter(db, tables_only, field_counts, field_lists, tables, views, filters, caplog):
# Load collection 2 first, to check that existing collections aren't included when we load collection 1.
with fixture(db, collections='2', tables_only=tables_only, field_counts=field_counts, field_lists=field_lists,
filters=filters), fixture(db, tables_only=tables_only, field_counts=field_counts,
field_lists=field_lists, filters=filters) as result:
# Check existence of schema, tables and views.
if field_counts:
tables.add('field_counts')
assert db.schema_exists('view_data_collection_1')
assert db.schema_exists('view_data_collection_2')
assert set(db.pluck("SELECT table_name FROM information_schema.tables WHERE table_schema = %(schema)s "
"AND table_type = 'BASE TABLE'", {'schema': 'view_data_collection_1'})) == tables
assert set(db.pluck("SELECT table_name FROM information_schema.tables WHERE table_schema = %(schema)s "
"AND table_type = 'VIEW'", {'schema': 'view_data_collection_1'})) == views
# Check that the tender_summary table only has correctly filtered items
rows = db.all("""
SELECT
procurementmethod
FROM view_data_collection_1.tender_summary
""")
for row in rows:
assert row[0] == 'direct'
if len(filters) > 1:
assert len(rows) == 2
else:
assert len(rows) == 19
# Check data_id's in the summary against the data table
# This allows us to check that missing data doesn't have the filtered value
rows = db.all("""
SELECT
data_id
FROM view_data_collection_1.release_summary
""")
if len(filters) > 1:
assert len(rows) == 2
else:
assert len(rows) == 19
data_ids = [row[0] for row in rows]
rows = db.all("""
SELECT
data.id,
data.data->'tender'->'procurementMethod',
data.data->'tender'->'status'
FROM data
JOIN release ON release.data_id=data.id
WHERE release.collection_id=1
""")
for row in rows:
if row[1] == 'direct' and (len(filters) == 1 or row[2] == 'planned'):
assert row[0] in data_ids
else:
assert row[0] not in data_ids
# Check contents of summary relations.
rows = db.all("""
SELECT
award_index,
release_type,
collection_id,
ocid,
release_id,
award_id,
title,
status,
description,
value_amount,
value_currency,
date,
contractperiod_startdate,
contractperiod_enddate,
contractperiod_maxextentdate,
contractperiod_durationindays,
total_suppliers,
total_documents,
document_documenttype_counts,
total_items
FROM view_data_collection_1.awards_summary
ORDER BY id, award_index
""")
assert rows[0] == (
0, # award_index
'release', # release_type
1, # collection_id
'officia dolore non', # ocid
'laborum irure consectetur fugiat', # release_id
'dolorLorem fugiat ut', # award_id
'et', # award_title
'pending', # award_status
'adipisicing ame', # award_description
decimal.Decimal('-7139109'), # award_value_amount
'AUD', # award_value_currency
datetime.datetime(3672, 10, 26, 4, 38, 28, 786000), # award_date
datetime.datetime(2192, 8, 27, 0, 9, 1, 626000), # award_contractperiod_startdate
datetime.datetime(4204, 1, 22, 22, 4, 18, 268000), # award_contractperiod_enddate
datetime.datetime(5117, 12, 26, 11, 33, 27, 496000), # award_contractperiod_maxextentdate
decimal.Decimal('-30383739'), # award_contractperiod_durationindays
5, # total_suppliers
4, # total_documents
{
'in sint enim labore': 1,
'mollit labore Lorem': 1,
'minim incididunt sed ipsum': 1,
'ad reprehenderit sit dolor enim': 1
}, # document_documenttype_counts
5, # total_items
)
if len(filters) > 1:
assert len(rows) == 7
else:
assert len(rows) == 55
rows = db.all("""
SELECT
party_index,
release_type,
collection_id,
ocid,
release_id,
party_id,
roles,
identifier,
unique_identifier_attempt,
additionalidentifiers_ids,
total_additionalidentifiers
FROM view_data_collection_1.parties_summary
ORDER BY id, party_index
""")
assert rows[0] == (
0, # party_index
'release', # release_type
1, # collection_id
'officia dolore non', # ocid
'laborum irure consectetur fugiat', # release_id
'eu voluptateeiusmod ipsum ea', # party_id
[
'laborum',
'tempor',
], # roles
'cupidatat consequat in ullamco-in incididunt commodo elit', # identifier
'eu voluptateeiusmod ipsum ea', # unique_identifier_attempt
[
'non ei-commododolor laborum',
], # additionalidentifiers_ids
1, # total_additionalidentifiers
)
if len(filters) > 1:
assert len(rows) == 5
else:
assert len(rows) == 56
if field_counts:
# Check contents of field_counts table.
rows = db.all('SELECT * FROM view_data_collection_1.field_counts')
if len(filters) > 1:
assert len(rows) == 1515
assert rows[0] == (1, 'release', 'awards', 2, 7, 2)
else:
assert len(rows) == 13077
assert rows[0] == (1, 'release', 'awards', 19, 55, 19)
if field_lists:
# Check the count of keys in the field_list field for the lowest primary keys in each summary relation.
statement = """
SELECT
count(*)
FROM
(SELECT
jsonb_each(field_list)
FROM (
SELECT
field_list
FROM
view_data_collection_1.{table}
ORDER BY
{primary_keys}
LIMIT 1) AS field_list
) AS each
"""
expected = {
'award_documents_summary': 11,
'award_items_summary': 29,
'award_suppliers_summary': 30,
'awards_summary': 492,
'buyer_summary': 31,
'contract_documents_summary': 11,
'contract_implementation_documents_summary': 11,
'contract_implementation_milestones_summary': 23,
'contract_implementation_transactions_summary': 83,
'contract_items_summary': 26,
'contract_milestones_summary': 26,
'contracts_summary': 492,
'parties_summary': 30,
'planning_documents_summary': 11,
'planning_milestones_summary': 27,
'planning_summary': 99,
'procuringentity_summary': 30,
'relatedprocesses_summary': 6,
'release_summary': 987,
'tender_documents_summary': 13,
'tender_items_summary': 28,
'tender_milestones_summary': 27,
'tender_summary': 265,
'tenderers_summary': 32,
}
for table_name, table in SUMMARIES.items():
count = db.one(db.format(statement, table=table_name, primary_keys=table.primary_keys))[0]
assert count == expected[table_name], f'{table_name}: {count} != {expected[table_name]}'
expected = []
for collection_id in [2, 1]:
expected.extend([
f'Arguments: collections=({collection_id},) note=Default name=None tables_only={tables_only!r} '
f'filters={filters!r}',
f'Added collection_{collection_id}',
'Running summary-tables routine',
])
if field_counts:
expected.append('Running field-counts routine')
if field_lists:
expected.append('Running field-lists routine')
assert result.exit_code == 0
assert result.output == ''
assert_log_records(caplog, command, expected)
| 40.092622
| 118
| 0.539958
| 2,582
| 25,539
| 5.113478
| 0.139814
| 0.025903
| 0.02863
| 0.027342
| 0.655912
| 0.588578
| 0.567068
| 0.527304
| 0.510187
| 0.500568
| 0
| 0.030244
| 0.360429
| 25,539
| 636
| 119
| 40.15566
| 0.77807
| 0.075179
| 0
| 0.590747
| 0
| 0.003559
| 0.449877
| 0.132487
| 0
| 0
| 0
| 0
| 0.119217
| 1
| 0.012456
| false
| 0
| 0.014235
| 0
| 0.02847
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|