hexsha string | size int64 | ext string | lang string | max_stars_repo_path string | max_stars_repo_name string | max_stars_repo_head_hexsha string | max_stars_repo_licenses list | max_stars_count int64 | max_stars_repo_stars_event_min_datetime string | max_stars_repo_stars_event_max_datetime string | max_issues_repo_path string | max_issues_repo_name string | max_issues_repo_head_hexsha string | max_issues_repo_licenses list | max_issues_count int64 | max_issues_repo_issues_event_min_datetime string | max_issues_repo_issues_event_max_datetime string | max_forks_repo_path string | max_forks_repo_name string | max_forks_repo_head_hexsha string | max_forks_repo_licenses list | max_forks_count int64 | max_forks_repo_forks_event_min_datetime string | max_forks_repo_forks_event_max_datetime string | content string | avg_line_length float64 | max_line_length int64 | alphanum_fraction float64 | qsc_code_num_words_quality_signal int64 | qsc_code_num_chars_quality_signal float64 | qsc_code_mean_word_length_quality_signal float64 | qsc_code_frac_words_unique_quality_signal float64 | qsc_code_frac_chars_top_2grams_quality_signal float64 | qsc_code_frac_chars_top_3grams_quality_signal float64 | qsc_code_frac_chars_top_4grams_quality_signal float64 | qsc_code_frac_chars_dupe_5grams_quality_signal float64 | qsc_code_frac_chars_dupe_6grams_quality_signal float64 | qsc_code_frac_chars_dupe_7grams_quality_signal float64 | qsc_code_frac_chars_dupe_8grams_quality_signal float64 | qsc_code_frac_chars_dupe_9grams_quality_signal float64 | qsc_code_frac_chars_dupe_10grams_quality_signal float64 | qsc_code_frac_chars_replacement_symbols_quality_signal float64 | qsc_code_frac_chars_digital_quality_signal float64 | qsc_code_frac_chars_whitespace_quality_signal float64 | qsc_code_size_file_byte_quality_signal float64 | qsc_code_num_lines_quality_signal float64 | qsc_code_num_chars_line_max_quality_signal float64 | qsc_code_num_chars_line_mean_quality_signal float64 | qsc_code_frac_chars_alphabet_quality_signal float64 | qsc_code_frac_chars_comments_quality_signal float64 | qsc_code_cate_xml_start_quality_signal float64 | qsc_code_frac_lines_dupe_lines_quality_signal float64 | qsc_code_cate_autogen_quality_signal float64 | qsc_code_frac_lines_long_string_quality_signal float64 | qsc_code_frac_chars_string_length_quality_signal float64 | qsc_code_frac_chars_long_word_length_quality_signal float64 | qsc_code_frac_lines_string_concat_quality_signal float64 | qsc_code_cate_encoded_data_quality_signal float64 | qsc_code_frac_chars_hex_words_quality_signal float64 | qsc_code_frac_lines_prompt_comments_quality_signal float64 | qsc_code_frac_lines_assert_quality_signal float64 | qsc_codepython_cate_ast_quality_signal float64 | qsc_codepython_frac_lines_func_ratio_quality_signal float64 | qsc_codepython_cate_var_zero_quality_signal bool | qsc_codepython_frac_lines_pass_quality_signal float64 | qsc_codepython_frac_lines_import_quality_signal float64 | qsc_codepython_frac_lines_simplefunc_quality_signal float64 | qsc_codepython_score_lines_no_logic_quality_signal float64 | qsc_codepython_frac_lines_print_quality_signal float64 | qsc_code_num_words int64 | qsc_code_num_chars int64 | qsc_code_mean_word_length int64 | qsc_code_frac_words_unique null | qsc_code_frac_chars_top_2grams int64 | qsc_code_frac_chars_top_3grams int64 | qsc_code_frac_chars_top_4grams int64 | qsc_code_frac_chars_dupe_5grams int64 | qsc_code_frac_chars_dupe_6grams int64 | qsc_code_frac_chars_dupe_7grams int64 | qsc_code_frac_chars_dupe_8grams int64 | qsc_code_frac_chars_dupe_9grams int64 | qsc_code_frac_chars_dupe_10grams int64 | qsc_code_frac_chars_replacement_symbols int64 | qsc_code_frac_chars_digital int64 | qsc_code_frac_chars_whitespace int64 | qsc_code_size_file_byte int64 | qsc_code_num_lines int64 | qsc_code_num_chars_line_max int64 | qsc_code_num_chars_line_mean int64 | qsc_code_frac_chars_alphabet int64 | qsc_code_frac_chars_comments int64 | qsc_code_cate_xml_start int64 | qsc_code_frac_lines_dupe_lines int64 | qsc_code_cate_autogen int64 | qsc_code_frac_lines_long_string int64 | qsc_code_frac_chars_string_length int64 | qsc_code_frac_chars_long_word_length int64 | qsc_code_frac_lines_string_concat null | qsc_code_cate_encoded_data int64 | qsc_code_frac_chars_hex_words int64 | qsc_code_frac_lines_prompt_comments int64 | qsc_code_frac_lines_assert int64 | qsc_codepython_cate_ast int64 | qsc_codepython_frac_lines_func_ratio int64 | qsc_codepython_cate_var_zero int64 | qsc_codepython_frac_lines_pass int64 | qsc_codepython_frac_lines_import int64 | qsc_codepython_frac_lines_simplefunc int64 | qsc_codepython_score_lines_no_logic int64 | qsc_codepython_frac_lines_print int64 | effective string | hits int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
46f529ee223628b6a5b01d5ac087af9e7d7bb5b3 | 7,863 | py | Python | baseline_models/slowvae/train.py | slachapelle/anon_disentanglement_via_mechanism_sparsity | 677f7e160f3532e1357a3c7f35f9f8f8529b389a | [
"Apache-2.0"
] | null | null | null | baseline_models/slowvae/train.py | slachapelle/anon_disentanglement_via_mechanism_sparsity | 677f7e160f3532e1357a3c7f35f9f8f8529b389a | [
"Apache-2.0"
] | null | null | null | baseline_models/slowvae/train.py | slachapelle/anon_disentanglement_via_mechanism_sparsity | 677f7e160f3532e1357a3c7f35f9f8f8529b389a | [
"Apache-2.0"
] | null | null | null | import argparse
import shutil
import os, json, sys, traceback, time
import pathlib
try:
from comet_ml import Experiment
COMET_AVAIL = True
except:
COMET_AVAIL = False
import numpy as np
import torch
import datetime
sys.path.insert(0, str(pathlib.Path(__file__).parent))
from scripts.solver import Solver
sys.path.insert(0, str(pathlib.Path(__file__).parent.parent.parent))
from train import get_dataset, get_loader
from universal_logger.logger import UniversalLogger
from metrics import mean_corr_coef, get_linear_score
torch.backends.cudnn.enabled = True
torch.backends.cudnn.benchmark = True
def main(args, writer=None):
torch.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)
np.random.seed(args.seed)
device = torch.device('cuda:0' if args.cuda else 'cpu')
## ---- Data ---- ##
args.no_norm = False
args.n_lag = 0 # get dataset expects this argument, but no effect.
args.num_workers = args.n_workers
image_shape, cont_c_dim, disc_c_dim, disc_c_n_values, train_dataset, valid_dataset, test_dataset, = get_dataset(args)
train_loader, valid_loader, test_loader = get_loader(args, train_dataset, valid_dataset, test_dataset)
data_loader = train_loader
if len(image_shape) == 3:
args.num_channel = image_shape[-1]
else:
args.num_channel = None
## ---- Logging ---- ##
if COMET_AVAIL and args.comet_key is not None and args.comet_workspace is not None and args.comet_project_name is not None:
comet_exp = Experiment(api_key=args.comet_key, project_name=args.comet_project_name,
workspace=args.comet_workspace, auto_metric_logging=False, auto_param_logging=False)
comet_exp.log_parameters(vars(args))
if args.comet_tag is not None:
comet_exp.add_tag(args.comet_tag)
else:
comet_exp = None
logger = UniversalLogger(comet=comet_exp,
stdout=(not args.no_print),
json=args.output_dir, throttle=None)
t0 = time.time()
# saving hp
## ---- Save hparams ---- ##
args.mode = 'slowvae'
kwargs = vars(args)
with open(os.path.join(args.output_dir, "hparams.json"), "w") as fp:
json.dump(kwargs, fp, sort_keys=True, indent=4)
with open(os.path.join(args.output_dir, "args"), "w") as f:
json.dump(args.__dict__, f)
net = Solver(args, image_shape, data_loader=data_loader, logger=logger, z_dim=train_dataset.z_dim)
failure = net.train(writer)
if failure:
print('failed in %.2fs' % (time.time() - t0))
#shutil.rmtree(args.output_dir)
else:
print('done in %.2fs' % (time.time() - t0))
## ---- Evaluate performance ---- ##
# compute MCC and save representation
mcc, cc_program_perm, assignments, z, z_hat = mean_corr_coef(net.net, test_loader, device, opt=args)
linear_score = get_linear_score(z_hat, z)
## ---- Save ---- ##
# save scores
logger.log_metrics(step=0, metrics={"mcc": mcc, "linear_score": linear_score})
# save both ground_truth and learned latents
np.save(os.path.join(args.output_dir, "z_hat.npy"), z_hat)
np.save(os.path.join(args.output_dir, "z_gt.npy"), z)
### For Random Search ###
def randint(low, high):
return np.int(np.random.randint(low, high, 1)[0])
def uniform(low, high):
return np.random.uniform(low, high, 1)[0]
def loguniform(low, high):
return np.exp(np.random.uniform(np.log(low), np.log(high), 1))[0]
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='slowVAE')
parser.add_argument("--output_dir", required=True,
help="Directory to output logs and model checkpoints")
parser.add_argument("--dataset", type=str, required=True,
help="Type of the dataset to be used 'toy-MANIFOLD/TRANSITION_MODEL'")
parser.add_argument("--dataroot", type=str, default="./",
help="path to dataset")
parser.add_argument("--gt_z_dim", type=int, default=10,
help="ground truth dimensionality of z (for TRANSITION_MODEL == 'linear_system')")
parser.add_argument("--gt_x_dim", type=int, default=20,
help="ground truth dimensionality of x (for MANIFOLD == 'nn')")
parser.add_argument("--num_samples", type=float, default=int(1e6),
help="number of trajectories in toy dataset")
parser.add_argument("--architecture", type=str, default='ilcm_tabular', choices=['ilcm_tabular', 'standard_conv'],
help="VAE encoder/decoder architecture.")
parser.add_argument("--train_prop", type=float, default=None,
help="proportion of all samples used in validation set")
parser.add_argument("--valid_prop", type=float, default=0.10,
help="proportion of all samples used in validation set")
parser.add_argument("--test_prop", type=float, default=0.10,
help="proportion of all samples used in test set")
parser.add_argument("--n_workers", type=int, default=4)
parser.add_argument("--time_limit", type=float, default=None,
help="After this amount of time, terminate training. (in hours)")
parser.add_argument("--max_iter", type=int, default=int(1e6),
help="Maximal amount of iterations")
parser.add_argument("--seed", type=int, default=0,
help="manual seed")
parser.add_argument('--no_print', action="store_true",
help='do not print')
parser.add_argument('--comet_key', type=str, default=None,
help="comet api-key")
parser.add_argument('--comet_tag', type=str, default=None,
help="comet tag, to ease comparison")
parser.add_argument('--comet_workspace', type=str, default=None,
help="comet workspace")
parser.add_argument('--comet_project_name', type=str, default=None,
help="comet project_name")
parser.add_argument("--add_noise", type=float, default=0.0,
help="Add normal noise sigma = add_noise on images (only training data)")
parser.add_argument("--no_cuda", action="store_false", dest="cuda",
help="Disables cuda")
parser.add_argument("--batch_size", type=int, default=1024,
help="batch size used during training")
parser.add_argument("--eval_batch_size", type=int, default=1024,
help="batch size used during evaluation")
parser.add_argument('--beta', default=1, type=float,
help='weight for kl to normal')
parser.add_argument('--gamma', default=10, type=float,
help='weight for kl to laplace')
parser.add_argument('--rate_prior', default=6, type=float,
help='rate (or inverse scale) for prior laplace (larger -> sparser).')
parser.add_argument('--lr', default=1e-4, type=float, help='learning rate')
parser.add_argument('--beta1', default=0.9, type=float,
help='Adam optimizer beta1')
parser.add_argument('--beta2', default=0.999, type=float,
help='Adam optimizer beta2')
parser.add_argument('--ckpt-name', default='last', type=str,
help='load previous checkpoint. insert checkpoint filename')
parser.add_argument('--log_step', default=100, type=int,
help='numer of iterations after which data is logged')
parser.add_argument('--save_step', default=10000, type=int,
help='number of iterations after which a checkpoint is saved')
args = parser.parse_args()
args = main(args)
| 46.526627 | 127 | 0.6336 | 1,031 | 7,863 | 4.652764 | 0.267701 | 0.060038 | 0.113404 | 0.011674 | 0.212216 | 0.160934 | 0.115072 | 0.104232 | 0.091307 | 0.062956 | 0 | 0.01154 | 0.239603 | 7,863 | 168 | 128 | 46.803571 | 0.790768 | 0.038789 | 0 | 0.036765 | 0 | 0 | 0.215444 | 0.00412 | 0 | 0 | 0 | 0 | 0 | 1 | 0.029412 | false | 0 | 0.088235 | 0.022059 | 0.139706 | 0.036765 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
46f57addab66e45928b367affb186d84c36a11cb | 854 | py | Python | Lottery/loterry.py | camicasii/Casino | 0534092c1b2746d5561761b65bad1a97982a54f6 | [
"MIT"
] | null | null | null | Lottery/loterry.py | camicasii/Casino | 0534092c1b2746d5561761b65bad1a97982a54f6 | [
"MIT"
] | null | null | null | Lottery/loterry.py | camicasii/Casino | 0534092c1b2746d5561761b65bad1a97982a54f6 | [
"MIT"
] | null | null | null | from TicketGenerator import TicketGenerator as Tickes
import random
class Lottery:
def __init__(self,seed=18,size=6,maxTickes=10,all=False):
self.maxTickes=maxTickes
self.tickes =Tickes(seed=seed,size=size,maxTickes=maxTickes,all=all).tickes
self.seller=[]
self.sell()
def randomTicket(self):
[test] =random.sample(self.tickes,1)
return test
def sell(self):
num = random.randint(3,self.maxTickes)
for _ in range(num):
self.seller.append(self.randomTicket())
def Winner(self):
winner=self.randomTicket()
if winner in self.seller:
print("hay ganador")
else:
print("nadie gano")
if __name__== "__main__":
a=Lottery()
print(
a.Winner()
)
| 23.081081 | 89 | 0.571429 | 95 | 854 | 5 | 0.463158 | 0.063158 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.012027 | 0.318501 | 854 | 36 | 90 | 23.722222 | 0.804124 | 0 | 0 | 0 | 0 | 0 | 0.033958 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.153846 | false | 0 | 0.076923 | 0 | 0.307692 | 0.115385 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
46f79c6995e3596cb2225c315e33d1b7fa73ac8b | 291 | py | Python | FacialRecognition/Media Manipulation/open_camera.py | markgacoka/micro-projects | e8115c8270a115282e7dfda6e24620b3333f8c6b | [
"MIT"
] | 1 | 2021-03-19T10:42:07.000Z | 2021-03-19T10:42:07.000Z | Media Manipulation/open_camera.py | markgacoka/FacialRecognition | af3e4e37f40f7995f2e276c35283bbe3b73a2a27 | [
"MIT"
] | null | null | null | Media Manipulation/open_camera.py | markgacoka/FacialRecognition | af3e4e37f40f7995f2e276c35283bbe3b73a2a27 | [
"MIT"
] | null | null | null | import numpy as np
import cv2
video = cv2.VideoCapture(0)
while(True):
ret, frame = video.read()
resized_video = cv2.resize(frame, (1000, 700))
cv2.imshow('Video Capture', resized_video)
if cv2.waitKey(10) == ord('q'):
break
video.release()
cv2.destroyAllWindows() | 20.785714 | 50 | 0.666667 | 40 | 291 | 4.8 | 0.675 | 0.083333 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.067797 | 0.189003 | 291 | 14 | 51 | 20.785714 | 0.745763 | 0 | 0 | 0 | 0 | 0 | 0.047945 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.181818 | 0 | 0.181818 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
46fbd511834efaa0b005f2300675fbcff5d8801c | 4,193 | py | Python | robot.py | 3299/2018 | b886b1409d3240ad1e2dad7ecee963e401c7bfec | [
"MIT"
] | null | null | null | robot.py | 3299/2018 | b886b1409d3240ad1e2dad7ecee963e401c7bfec | [
"MIT"
] | null | null | null | robot.py | 3299/2018 | b886b1409d3240ad1e2dad7ecee963e401c7bfec | [
"MIT"
] | null | null | null | """
Main logic code
"""
import wpilib
from inits import Component
import helpers
from components.chassis import Chassis
from autonomous import Autonomous
from components.lights import Lights
from components.metabox import MetaBox
from components.winch import Winch
from components.pdb import Power
from networktables import NetworkTables
class Randy(wpilib.TimedRobot):
def robotInit(self):
self.C = Component() # Components inits all connected motors, sensors, and joysticks. See inits.py.
# Setup subsystems
self.driverStation = wpilib.DriverStation.getInstance()
self.drive = Chassis(self.C.driveTrain, self.C.gyroS, self.C.driveYEncoderS)
self.lights = Lights()
self.metabox = MetaBox(self.C.elevatorEncoderS,
self.C.elevatorLimitS,
self.C.jawsLimitS,
self.C.metaboxLimitS,
self.C.jawsM,
self.C.elevatorM,
self.C.intakeM,
self.C.jawsSol)
self.winch = Winch(self.C.winchM)
self.power = Power()
# Joysticks
self.joystick = wpilib.XboxController(0)
self.leftJ = wpilib.Joystick(1)
# default to rainbow effect
self.lights.run({'effect': 'rainbow'})
self.sd = NetworkTables.getTable('SmartDashboard')
self.sd.putNumber('station', 2)
def teleopPeriodic(self):
"""This function is called periodically during operator control."""
'''Components'''
# Rumble
averageDriveCurrent = self.power.getAverageCurrent([0, 1, 14, 15])
if (averageDriveCurrent > 8):
self.joystick.setRumble(0, 1)
else:
self.joystick.setRumble(0, 0)
print(self.metabox.getEncoder())
'''
TODO: calibrate sparks
'''
# Drive
self.drive.run(self.joystick.getRawAxis(0), self.joystick.getRawAxis(1), self.joystick.getRawAxis(4))
# MetaBox
self.metabox.run(self.leftJ.getY(), # elevator rate of change
self.leftJ.getRawButton(1), # run intake wheels in
self.leftJ.getRawButton(3), # open jaws
self.leftJ.getRawButton(2), # run intake wheels out
self.leftJ.getRawButton(4), # go to bottom
self.leftJ.getRawAxis(2), # set angle of jaws
self.leftJ.getRawButton(8)) # calibrate elevator
# Lights
self.lights.setColor(self.driverStation.getAlliance())
if (self.driverStation.getMatchTime() < 30 and self.driverStation.getMatchTime() != -1):
self.lights.run({'effect': 'flash', 'fade': True, 'speed': 200})
elif (helpers.deadband(self.leftJ.getY(), 0.1) != 0):
self.lights.run({'effect': 'stagger'})
elif (self.leftJ.getRawButton(1) or self.leftJ.getRawButton(2)):
self.lights.run({'effect': 'flash', 'fade': False, 'speed': 20})
else:
self.lights.run({'effect': 'rainbow'})
def teleopInit(self):
"""This function is run once each time the robot enters teleop mode."""
# reset gyro
self.C.gyroS.reset()
# reset encoder
self.C.driveYEncoderS.reset()
def autonomousInit(self):
"""This function is run once each time the robot enters autonomous mode."""
self.lights.run({'effect': 'flash', 'fade': True, 'speed': 400})
# reset gyro
self.C.gyroS.reset()
# reset encoder
self.C.driveYEncoderS.reset()
# Init autonomous
self.autonomousRoutine = Autonomous(self.drive, self.C.driveYEncoderS, self.C.gyroS, self.metabox, self.driverStation)
# reset state
self.autonomousRoutine.state = 0
def autonomousPeriodic(self):
self.autonomousRoutine.run() # see autonomous.py
def test(self):
# reset gyro
self.C.gyroS.reset()
# reset encoder
self.C.driveYEncoderS.reset()
if __name__ == "__main__":
wpilib.run(Randy)
| 35.235294 | 126 | 0.589554 | 446 | 4,193 | 5.524664 | 0.320628 | 0.042614 | 0.059659 | 0.046266 | 0.173701 | 0.152597 | 0.141234 | 0.141234 | 0.111201 | 0.111201 | 0 | 0.013596 | 0.298354 | 4,193 | 118 | 127 | 35.533898 | 0.823929 | 0.147865 | 0 | 0.140845 | 0 | 0 | 0.036941 | 0 | 0 | 0 | 0 | 0.008475 | 0 | 1 | 0.084507 | false | 0 | 0.140845 | 0 | 0.239437 | 0.014085 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
46fc0e73c34a3b1f86eeecfd31ad3ce03445be4e | 6,496 | py | Python | iotFlaskPro/apis/aep_device_management.py | ChrisLeff/IOT-BUAA_2021 | e8220350a156daba93e6baf91c793a81629aa956 | [
"MIT"
] | 2 | 2022-02-28T15:07:53.000Z | 2022-03-01T06:57:20.000Z | iotFlaskPro/apis/aep_device_management.py | ChrisLeff/IOT-BUAA_2021 | e8220350a156daba93e6baf91c793a81629aa956 | [
"MIT"
] | null | null | null | iotFlaskPro/apis/aep_device_management.py | ChrisLeff/IOT-BUAA_2021 | e8220350a156daba93e6baf91c793a81629aa956 | [
"MIT"
] | null | null | null | #!/usr/bin/python
# encoding=utf-8
import sys
if sys.version_info[0] == 2:
# Python2
import core.AepSdkRequestSend as AepSdkRequestSend
else:
# Python3
from apis.core import AepSdkRequestSend
#参数MasterKey: 类型String, 参数不可以为空
# 描述:MasterKey在该设备所属产品的概况中可以查看
#参数productId: 类型long, 参数不可以为空
# 描述:
#参数searchValue: 类型String, 参数可以为空
# 描述:T-link协议可选填:设备名称,设备编号,设备Id
# MQTT协议可选填:设备名称,设备编号,设备Id
# LWM2M协议可选填:设备名称,设备Id ,IMEI号
# TUP协议可选填:设备名称,设备Id ,IMEI号
# TCP协议可选填:设备名称,设备编号,设备Id
# HTTP协议可选填:设备名称,设备编号,设备Id
# JT/T808协议可选填:设备名称,设备编号,设备Id
#参数pageNow: 类型long, 参数可以为空
# 描述:当前页数
#参数pageSize: 类型long, 参数可以为空
# 描述:每页记录数,最大100
def QueryDeviceList(appKey, appSecret, MasterKey, productId, searchValue, pageNow, pageSize):
path = '/aep_device_management/devices'
head = {}
param = {'productId':productId, 'searchValue':searchValue, 'pageNow':pageNow, 'pageSize':pageSize}
version = '20190507012134'
application = appKey
key = appSecret
response = AepSdkRequestSend.sendSDKRequest(path, head, param, None, version, application, MasterKey, key, 'GET')
if response is not None:
return response.read()
return None
#参数MasterKey: 类型String, 参数不可以为空
# 描述:MasterKey在该设备所属产品的概况中可以查看
#参数deviceId: 类型String, 参数不可以为空
# 描述:
#参数productId: 类型long, 参数不可以为空
# 描述:
def QueryDevice(appKey, appSecret, MasterKey, deviceId, productId):
path = '/aep_device_management/device'
head = {}
param = {'deviceId':deviceId, 'productId':productId}
version = '20181031202139'
application = appKey
key = appSecret
response = AepSdkRequestSend.sendSDKRequest(path, head, param, None, version, application, MasterKey, key, 'GET')
if response is not None:
return response.read()
return None
#参数MasterKey: 类型String, 参数不可以为空
# 描述:MasterKey在该设备所属产品的概况中可以查看
#参数productId: 类型long, 参数不可以为空
# 描述:
#参数deviceIds: 类型String, 参数不可以为空
# 描述:可以删除多个设备(最多支持200个设备)。多个设备id,中间以逗号 "," 隔开 。样例:05979394b88a45b0842de729c03d99af,06106b8e1d5a458399326e003bcf05b4
def DeleteDevice(appKey, appSecret, MasterKey, productId, deviceIds):
path = '/aep_device_management/device'
head = {}
param = {'productId':productId, 'deviceIds':deviceIds}
version = '20181031202131'
application = appKey
key = appSecret
response = AepSdkRequestSend.sendSDKRequest(path, head, param, None, version, application, MasterKey, key, 'DELETE')
if response is not None:
return response.read()
return None
#参数MasterKey: 类型String, 参数不可以为空
# 描述:
#参数deviceId: 类型String, 参数不可以为空
# 描述:
#参数body: 类型json, 参数不可以为空
# 描述:body,具体参考平台api说明
def UpdateDevice(appKey, appSecret, MasterKey, deviceId, body):
path = '/aep_device_management/device'
head = {}
param = {'deviceId':deviceId}
version = '20181031202122'
application = appKey
key = appSecret
response = AepSdkRequestSend.sendSDKRequest(path, head, param, body, version, application, MasterKey, key, 'PUT')
if response is not None:
return response.read()
return None
#参数MasterKey: 类型String, 参数不可以为空
# 描述:MasterKey在该设备所属产品的概况中可以查看
#参数body: 类型json, 参数不可以为空
# 描述:body,具体参考平台api说明
def CreateDevice(appKey, appSecret, MasterKey, body):
path = '/aep_device_management/device'
head = {}
param = {}
version = '20181031202117'
application = appKey
key = appSecret
response = AepSdkRequestSend.sendSDKRequest(path, head, param, body, version, application, MasterKey, key, 'POST')
if response is not None:
return response.read()
return None
#参数MasterKey: 类型String, 参数不可以为空
# 描述:
#参数body: 类型json, 参数不可以为空
# 描述:body,具体参考平台api说明
def BindDevice(appKey, appSecret, MasterKey, body):
path = '/aep_device_management/bindDevice'
head = {}
param = {}
version = '20191024140057'
application = appKey
key = appSecret
response = AepSdkRequestSend.sendSDKRequest(path, head, param, body, version, application, MasterKey, key, 'POST')
if response is not None:
return response.read()
return None
#参数MasterKey: 类型String, 参数不可以为空
# 描述:
#参数body: 类型json, 参数不可以为空
# 描述:body,具体参考平台api说明
def UnbindDevice(appKey, appSecret, MasterKey, body):
path = '/aep_device_management/unbindDevice'
head = {}
param = {}
version = '20191024140103'
application = appKey
key = appSecret
response = AepSdkRequestSend.sendSDKRequest(path, head, param, body, version, application, MasterKey, key, 'POST')
if response is not None:
return response.read()
return None
#参数imei: 类型String, 参数不可以为空
# 描述:
def QueryProductInfoByImei(appKey, appSecret, imei):
path = '/aep_device_management/device/getProductInfoFormApiByImei'
head = {}
param = {'imei':imei}
version = '20191213161859'
application = appKey
key = appSecret
response = AepSdkRequestSend.sendSDKRequest(path, head, param, None, version, application, None, key, 'GET')
if response is not None:
return response.read()
return None
#参数MasterKey: 类型String, 参数不可以为空
# 描述:
#参数body: 类型json, 参数不可以为空
# 描述:body,具体参考平台api说明
def ListDeviceInfo(appKey, appSecret, MasterKey, body):
path = '/aep_device_management/listByDeviceIds'
head = {}
param = {}
version = '20210828062945'
application = appKey
key = appSecret
response = AepSdkRequestSend.sendSDKRequest(path, head, param, body, version, application, MasterKey, key, 'POST')
if response is not None:
return response.read()
return None
#参数MasterKey: 类型String, 参数不可以为空
# 描述:
#参数body: 类型json, 参数不可以为空
# 描述:body,具体参考平台api说明
def DeleteDeviceByPost(appKey, appSecret, MasterKey, body):
path = '/aep_device_management/deleteDevice'
head = {}
param = {}
version = '20211009132842'
application = appKey
key = appSecret
response = AepSdkRequestSend.sendSDKRequest(path, head, param, body, version, application, MasterKey, key, 'POST')
if response is not None:
return response.read()
return None
#参数MasterKey: 类型String, 参数不可以为空
# 描述:
#参数body: 类型json, 参数不可以为空
# 描述:body,具体参考平台api说明
def ListDeviceActiveStatus(appKey, appSecret, MasterKey, body):
path = '/aep_device_management/listActiveStatus'
head = {}
param = {}
version = '20211010063104'
application = appKey
key = appSecret
response = AepSdkRequestSend.sendSDKRequest(path, head, param, body, version, application, MasterKey, key, 'POST')
if response is not None:
return response.read()
return None
| 31.533981 | 120 | 0.704895 | 671 | 6,496 | 6.789866 | 0.177347 | 0.04741 | 0.052239 | 0.055531 | 0.664618 | 0.658253 | 0.658253 | 0.641352 | 0.570018 | 0.523266 | 0 | 0.040867 | 0.190117 | 6,496 | 205 | 121 | 31.687805 | 0.825128 | 0.227371 | 0 | 0.666667 | 0 | 0 | 0.133428 | 0.077311 | 0 | 0 | 0 | 0 | 0 | 1 | 0.087302 | false | 0 | 0.02381 | 0 | 0.285714 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
46fd902b540497f93d018433534679e3a97042d2 | 8,933 | py | Python | examples/Random/Random.py | aniknarayan/ioticiser_new | 9886f1ba5c249ebbcebfb0e1f4434fecdf8c0680 | [
"Apache-2.0"
] | null | null | null | examples/Random/Random.py | aniknarayan/ioticiser_new | 9886f1ba5c249ebbcebfb0e1f4434fecdf8c0680 | [
"Apache-2.0"
] | null | null | null | examples/Random/Random.py | aniknarayan/ioticiser_new | 9886f1ba5c249ebbcebfb0e1f4434fecdf8c0680 | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2017 Iotic Labs Ltd. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://github.com/Iotic-Labs/py-IoticBulkData/blob/master/LICENSE
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Simple Test Feed Generator, example source for Ioticiser
"""
from __future__ import unicode_literals
from datetime import datetime
import logging
import math
import string
import random
logging.basicConfig(format='%(asctime)s,%(msecs)03d %(levelname)s [%(name)s] {%(threadName)s} %(message)s',
level=logging.INFO)
logger = logging.getLogger(__name__)
from IoticAgent import Datatypes
from IoticAgent.Core.compat import monotonic
from Ioticiser import SourceBase
LANG = 'en'
HOURLY_SINE_SLEEP_SECS = 60 # 360 minutes in an hour, 360 degrees in a circle
SAW_TOOTH_SLEEP_SECS = 10
ALPHA_LOWER_SLEEP_SECS = 10
ALPHA_UPPER_SLEEP_SECS = 10
ALPHA_RANDOM_SLEEP_SECS = 10
RANDOM_NUMBER_SLEEP_SECS = 10
RANDOM_NUMBER = "random number"
RANDOM_ALPHA = "alphabet random"
ALPHA_LOWER = "alphabet lower"
ALPHA_UPPER = "alphabet upper"
SAW_TOOTH = "saw tooth"
HOURLY_SINE = "hourly sine"
class Random(SourceBase): # pylint: disable=too-many-instance-attributes
def __init__(self, stash, config, stop):
super(Random, self).__init__(stash, config, stop)
self.__thing = None
self.__number_start = 0
self.__alpha_start = 0
self.__alpha_idx = 0
self.__alpha_list = list(string.ascii_lowercase)
self.__lower_start = 0
self.__lower_idx = 0
self.__upper_start = 0
self.__upper_list = list(string.ascii_uppercase)
self.__upper_idx = 0
self.__saw_start = 0
self.__saw_count_up = True
self.__saw_value = 0
self.__sine_start = 0
self.__sine_degrees = 0
def run(self):
self.__thing = self.__create_thing()
self.__create_hourly_sine()
self.__create_saw_tooth()
self.__create_alphabet_lower()
self.__create_alphabet_upper()
self.__create_alphabet_random()
self.__create_random_number()
self.__thing.set_public(public=True)
while not self._stop.is_set():
with self.__thing:
self.__run_hourly_sine()
self.__run_saw_tooth()
self.__run_alphabet_lower()
self.__run_alphabet_upper()
self.__run_alphabet_random()
self.__run_random_number()
self._stop.wait(timeout=1)
logger.info("Finished")
def __create_thing(self):
t_feed_generator = self._stash.create_thing("Test feed generator")
t_feed_generator.set_label("Test feed generator", lang=LANG)
t_feed_generator.set_description("Generates Wave forms for testing", lang=LANG)
t_feed_generator.create_tag(["test", "feed", "generator","iotics"])
return t_feed_generator
def __create_random_number(self):
f_alphabet = self.__thing.create_feed(RANDOM_NUMBER)
f_alphabet.set_recent_config(max_samples=1)
f_alphabet.set_label("Random number generator", lang=LANG)
f_alphabet.set_description("Generates a random number from 0 - 10", lang=LANG)
def __run_random_number(self):
if monotonic() - self.__number_start >= RANDOM_NUMBER_SLEEP_SECS:
feed = self.__thing.create_feed(RANDOM_NUMBER)
feed.create_value("value",
Datatypes.INTEGER,
"en",
"random number",
data=random.randint(0, 10))
feed.share(time=datetime.utcnow())
self.__number_start = monotonic()
def __create_alphabet_random(self):
f_alphabet = self.__thing.create_feed(RANDOM_ALPHA)
f_alphabet.set_recent_config(max_samples=1)
f_alphabet.set_label("Alphabet generator - random letter", lang=LANG)
f_alphabet.set_description("generates a random letter from the alphabet", lang=LANG)
def __run_alphabet_random(self):
if monotonic() - self.__alpha_start >= ALPHA_LOWER_SLEEP_SECS:
feed = self.__thing.create_feed(RANDOM_ALPHA)
feed.create_value("value",
Datatypes.STRING,
"en",
"random letter",
data=random.choice(self.__alpha_list))
feed.share(time=datetime.utcnow())
self.__alpha_idx += 1
if self.__alpha_idx >= len(self.__alpha_list):
self.__alpha_idx = 0
self.__alpha_start = monotonic()
def __create_alphabet_lower(self):
f_alphabet = self.__thing.create_feed(ALPHA_LOWER)
f_alphabet.set_recent_config(max_samples=1)
f_alphabet.set_label("Alphabet generator - lower case", lang=LANG)
f_alphabet.set_description("Cycles Through a-z and then starts at a again", lang=LANG)
def __run_alphabet_lower(self):
if monotonic() - self.__lower_start >= ALPHA_LOWER_SLEEP_SECS:
feed = self.__thing.create_feed(ALPHA_LOWER)
feed.create_value("value",
Datatypes.STRING, "en", "value of letter",
data=self.__alpha_list[self.__lower_idx])
feed.share(time=datetime.utcnow())
self.__lower_idx += 1
if self.__lower_idx >= len(self.__alpha_list):
self.__lower_idx = 0
self.__lower_start = monotonic()
def __create_alphabet_upper(self):
f_alphabet = self.__thing.create_feed(ALPHA_UPPER)
f_alphabet.set_recent_config(max_samples=1)
f_alphabet.set_label("Alphabet generator - upper case", lang=LANG)
f_alphabet.set_description("Cycles Through A-Z and then starts at A again", lang=LANG)
def __run_alphabet_upper(self):
if monotonic() - self.__upper_start >= ALPHA_UPPER_SLEEP_SECS:
feed = self.__thing.create_feed(ALPHA_UPPER)
feed.create_value("value",
Datatypes.STRING,
"en",
"value of letter",
data=self.__upper_list[self.__upper_idx])
feed.share(time=datetime.utcnow())
self.__upper_idx += 1
if self.__upper_idx >= len(self.__upper_list):
self.__upper_idx = 0
self.__upper_start = monotonic()
def __create_saw_tooth(self):
f_saw_tooth = self.__thing.create_feed(SAW_TOOTH)
f_saw_tooth.set_recent_config(max_samples=1)
f_saw_tooth.set_label("Saw tooth wave", lang=LANG)
f_saw_tooth.set_description("Cycles from 0 to 10 and back down again", lang=LANG)
def __run_saw_tooth(self):
if monotonic() - self.__saw_start >= SAW_TOOTH_SLEEP_SECS:
feed = self.__thing.create_feed(SAW_TOOTH)
feed.create_value("value",
Datatypes.DECIMAL,
"en",
"value of sawtooth",
data=self.__saw_value)
feed.share(time=datetime.utcnow())
if self.__saw_count_up:
self.__saw_value += 1
else:
self.__saw_value -= 1
if self.__saw_value > 10:
self.__saw_value = 9
self.__saw_count_up = False
elif self.__saw_value < 0:
self.__saw_value = 1
self.__saw_count_up = True
self.__saw_start = monotonic()
def __create_hourly_sine(self):
f_hourly_sine = self.__thing.create_feed(HOURLY_SINE)
f_hourly_sine.set_recent_config(max_samples=1)
f_hourly_sine.set_label("Sine wave", lang=LANG)
f_hourly_sine.set_description("Cycles through 360 degrees of a sine wave in one hour", lang=LANG)
def __run_hourly_sine(self):
if monotonic() - self.__sine_start >= HOURLY_SINE_SLEEP_SECS:
radians = self.__sine_degrees * (math.pi / 180)
feed = self.__thing.create_feed(HOURLY_SINE)
feed.create_value("value", Datatypes.DECIMAL, "en", "value of sine function", data=math.sin(radians))
feed.share(time=datetime.utcnow())
self.__sine_degrees += 1
if self.__sine_degrees >= 360:
self.__sine_degrees = 0
self.__sine_start = monotonic()
| 38.175214 | 113 | 0.629912 | 1,102 | 8,933 | 4.669691 | 0.188748 | 0.027983 | 0.034979 | 0.044306 | 0.403031 | 0.330159 | 0.255733 | 0.2089 | 0.163039 | 0.127283 | 0 | 0.012009 | 0.282212 | 8,933 | 233 | 114 | 38.339056 | 0.790549 | 0.084182 | 0 | 0.177143 | 0 | 0.005714 | 0.097672 | 0.002819 | 0 | 0 | 0 | 0 | 0 | 1 | 0.085714 | false | 0 | 0.051429 | 0 | 0.148571 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
46fda291ce75700d6c941a1210e9051ba1026be6 | 400 | py | Python | hermione/module_templates/__IMPLEMENTED_BASE__/src/api/myrequests.py | RodrigoATorres/hermione | 6cbed73e309f8025a48f33165d8f29561c6a3cc7 | [
"Apache-2.0"
] | 183 | 2020-06-03T22:43:14.000Z | 2022-03-17T22:39:07.000Z | hermione/module_templates/__IMPLEMENTED_BASE__/src/api/myrequests.py | RodrigoATorres/hermione | 6cbed73e309f8025a48f33165d8f29561c6a3cc7 | [
"Apache-2.0"
] | 31 | 2020-06-03T22:55:18.000Z | 2022-03-27T20:06:17.000Z | hermione/module_templates/__IMPLEMENTED_BASE__/src/api/myrequests.py | RodrigoATorres/hermione | 6cbed73e309f8025a48f33165d8f29561c6a3cc7 | [
"Apache-2.0"
] | 43 | 2020-06-03T22:45:03.000Z | 2021-12-29T19:43:54.000Z | import requests
import json
url = 'http://localhost:5000/invocations'
data = {
'Pclass':[3,3,3],
'Sex': ['male', 'female', 'male'],
'Age':[4, 22, 28]
}
j_data = json.dumps(data)
headers = {'Content-Type': 'application/json'}
print("Sending request for model...")
print(f"Data: {j_data}")
r = requests.post(url, json=j_data, headers=headers)
print(f"Response: {r.text}") | 23.529412 | 52 | 0.6175 | 56 | 400 | 4.357143 | 0.607143 | 0.061475 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.036254 | 0.1725 | 400 | 17 | 53 | 23.529412 | 0.700906 | 0 | 0 | 0 | 0 | 0 | 0.366584 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.142857 | 0 | 0.142857 | 0.214286 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
200010fc68a811414b58910a066945d5e6e9b2e8 | 2,027 | py | Python | onlinejudge/_implementation/command/split_input.py | btk15049/online-judge-tools | 22505e98359c50df06e7cc1d53a7d253cb096b14 | [
"MIT"
] | null | null | null | onlinejudge/_implementation/command/split_input.py | btk15049/online-judge-tools | 22505e98359c50df06e7cc1d53a7d253cb096b14 | [
"MIT"
] | null | null | null | onlinejudge/_implementation/command/split_input.py | btk15049/online-judge-tools | 22505e98359c50df06e7cc1d53a7d253cb096b14 | [
"MIT"
] | null | null | null | # Python Version: 3.x
import subprocess
import sys
import time
from typing import *
from typing.io import *
import onlinejudge._implementation.format_utils as format_utils
import onlinejudge._implementation.logging as log
if TYPE_CHECKING:
import argparse
def non_block_read(fh: IO[Any]) -> str:
# workaround
import fcntl
import os
fd = fh.fileno()
fl = fcntl.fcntl(fd, fcntl.F_GETFL)
fcntl.fcntl(fd, fcntl.F_SETFL, fl | os.O_NONBLOCK)
try:
return fh.read()
except:
return ''
split_input_auto_footer = ('__AUTO_FOOTER__', ) # this shouldn't be a string, so a tuple
def split_input(args: 'argparse.Namespace') -> None:
with open(args.input) as fh:
inf = fh.read()
if args.footer == split_input_auto_footer:
args.footer = inf.splitlines(keepends=True)[-1]
with subprocess.Popen(args.command, shell=True, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=sys.stderr) as proc:
index = 0
acc = ''
for line in inf.splitlines(keepends=True):
if args.ignore:
args.ignore -= 1
else:
acc += line
proc.stdin.write(line.encode())
proc.stdin.flush()
time.sleep(args.time)
if non_block_read(proc.stdout): # if output exists
index += 1
path = format_utils.percentformat(args.output, {'i': str(index)})
log.info('case found: %d', index)
if args.header:
if args.header == args.header.strip():
acc = '\n' + acc
acc = args.header + acc
if args.footer:
acc = acc + args.footer
log.emit(log.bold(acc))
with open(path, 'w') as fh:
fh.write(acc)
log.success('saved to: %s', path)
acc = ''
while non_block_read(proc.stdout): # consume all
pass
| 31.671875 | 126 | 0.556487 | 248 | 2,027 | 4.439516 | 0.435484 | 0.027248 | 0.032698 | 0.030881 | 0.072661 | 0 | 0 | 0 | 0 | 0 | 0 | 0.003726 | 0.337938 | 2,027 | 63 | 127 | 32.174603 | 0.816692 | 0.048347 | 0 | 0.037736 | 0 | 0 | 0.032761 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.037736 | false | 0.018868 | 0.188679 | 0 | 0.264151 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
200358a6a93b6e371ca0faec84207e445afd7915 | 24,656 | py | Python | linkwarner/linkwarner.py | jack1142/JackCogs | ad3b5c43e4597f92816db8f0974f8f61d511abc3 | [
"Apache-2.0"
] | 18 | 2019-01-18T07:00:26.000Z | 2021-09-22T00:12:40.000Z | linkwarner/linkwarner.py | jack1142/JackCogs | ad3b5c43e4597f92816db8f0974f8f61d511abc3 | [
"Apache-2.0"
] | 43 | 2019-04-28T01:31:17.000Z | 2022-03-08T02:17:55.000Z | linkwarner/linkwarner.py | jack1142/JackCogs | ad3b5c43e4597f92816db8f0974f8f61d511abc3 | [
"Apache-2.0"
] | 20 | 2020-01-21T10:49:37.000Z | 2022-03-21T02:16:45.000Z | # Copyright 2018-2021 Jakub Kuczys (https://github.com/jack1142)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from typing import Any, Dict, Literal
import discord
from redbot.core import commands, modlog
from redbot.core.bot import Red
from redbot.core.commands import GuildContext
from redbot.core.config import Config
from redbot.core.utils.chat_formatting import humanize_list, inline
from redbot.core.utils.common_filters import URL_RE
from .converters import DomainName
from .data_classes import ChannelData, DomainsMode, GuildData, GuildDomainsMode
log = logging.getLogger("red.jackcogs.linkwarner")
RequestType = Literal["discord_deleted_user", "owner", "user", "user_strict"]
class LinkWarner(commands.Cog):
"""Remove messages containing links and warn users for it."""
def __init__(self, bot: Red) -> None:
self.bot = bot
self.config = Config.get_conf(
self, identifier=176070082584248321, force_registration=True
)
self.config.register_guild(
enabled=False,
check_edits=True,
use_dms=False,
delete_delay=None,
excluded_roles=[],
domains_mode=DomainsMode.ALLOW_FROM_SCOPE_LIST.value,
domains_list=[],
warn_message="",
)
self.config.register_channel(
ignored=False,
domains_mode=DomainsMode.INHERIT_MODE_AND_UNION_LISTS.value,
domains_list=[],
warn_message="",
)
self.guild_cache: Dict[int, GuildData] = {}
async def initialize(self) -> None:
try:
await modlog.register_casetype(
name="linkwarn",
default_setting=True,
image="\N{WARNING SIGN}",
case_str="Link Warning",
)
except RuntimeError:
pass
async def red_get_data_for_user(self, *, user_id: int) -> Dict[str, Any]:
# this cog does not story any data
return {}
async def red_delete_data_for_user(
self, *, requester: RequestType, user_id: int
) -> None:
# this cog does not story any data
pass
async def get_guild_data(self, guild: discord.Guild) -> GuildData:
try:
return self.guild_cache[guild.id]
except KeyError:
pass
data = await GuildData.from_guild(self.config, guild)
self.guild_cache[guild.id] = data
return data
async def get_channel_data(self, channel: discord.TextChannel) -> ChannelData:
guild_data = await self.get_guild_data(channel.guild)
return await guild_data.get_channel_data(channel)
@commands.admin()
@commands.guild_only()
@commands.group()
async def linkwarner(self, ctx: GuildContext) -> None:
"""Settings for LinkWarner cog."""
@linkwarner.command(name="showsettings")
async def linkwarner_showsettings(self, ctx: GuildContext) -> None:
"""Show settings for the current guild."""
guild_data = await self.get_guild_data(ctx.guild)
enabled = "Yes" if guild_data.enabled else "No"
use_dms = "Yes" if guild_data.use_dms else "No"
delete_delay = guild_data.delete_delay
auto_deletion = f"After {delete_delay} seconds" if delete_delay else "Disabled"
excluded_roles = (
humanize_list(
[
r.mention
for r in ctx.guild.roles
if r.id in guild_data.excluded_roles
]
)
or "*None*"
)
domains_mode = (
"Only allow domains from the domains list"
if guild_data.domains_mode is DomainsMode.ALLOW_FROM_SCOPE_LIST
else "Allow all domains except the domains from the domains list"
)
# purposefully not using humanize_list() here to avoid confusion
domains_list = ", ".join(guild_data.domains_list) or "*Empty*"
await ctx.send(
"**LinkWarner's Guild Settings**\n\n"
">>> "
f"**Enabled:** {enabled}\n"
f"**Send warning message in DMs:** {use_dms}\n"
f"**Auto-deletion of warning messages:** {auto_deletion}\n"
f"**Excluded roles:** {excluded_roles}\n"
f"**Domains list mode:** {domains_mode}\n"
f"**Domains list:** {domains_list}"
)
@linkwarner.group(name="channel")
async def linkwarner_channel(self, ctx: GuildContext) -> None:
"""Channel-specific settings for LinkWarner."""
@linkwarner_channel.command(name="showsettings")
async def linkwarner_channel_showsettings(
self, ctx: GuildContext, channel: discord.TextChannel
) -> None:
"""Show settings for the given channel."""
channel_data = await self.get_channel_data(channel)
guild_data = channel_data.guild_data
ignored = "Yes" if channel_data.ignored else "No"
if channel_data.domains_mode is DomainsMode.ALLOW_FROM_SCOPE_LIST:
domains_mode = "Only allow domains from the channel's domains list"
elif channel_data.domains_mode is DomainsMode.DISALLOW_FROM_SCOPE_LIST:
domains_mode = (
"Allow all domains except the domains from the channel's domains list"
)
else:
if guild_data.domains_mode is DomainsMode.ALLOW_FROM_SCOPE_LIST:
domains_mode = (
"Only allow domains from the guild's and channel's domains list"
)
else:
domains_mode = (
"Allow all domains except the domains"
" from the guild's and channel's domains list"
)
# purposefully not using humanize_list() here to avoid confusion
domains_list = ", ".join(channel_data.scoped_domains_list) or "*Empty*"
await ctx.send(
f"**LinkWarner's Channel Settings for {channel.mention}**\n\n"
">>> "
f"**Ignored:** {ignored}\n"
f"**Domains list mode:** {domains_mode}\n"
f"**Channel's domains list:** {domains_list}"
)
# Enabled/ignored state commands
@linkwarner.command(name="state")
async def linkwarner_state(self, ctx: GuildContext, new_state: bool) -> None:
"""
Set if LinkWarner should be enabled for this guild.
If used without a setting, this will show the current state.
"""
guild_data = await self.get_guild_data(ctx.guild)
await guild_data.set_enabled_state(new_state)
if new_state:
message = "Bot will now filter links in this server."
else:
message = "Bot will no longer filter links in this server."
await ctx.send(message)
@linkwarner_channel.command(name="ignore")
async def linkwarner_channel_ignore(
self, ctx: GuildContext, channel: discord.TextChannel, new_state: bool
) -> None:
"""Set if LinkWarner should ignore links in provided channel."""
channel_data = await self.get_channel_data(channel)
await channel_data.set_ignored_state(new_state)
if new_state:
message = f"Bot will now ignore links in {channel.mention} channel."
else:
message = f"Bot will now filter links in {channel.mention} channel."
await ctx.send(message)
# Command for Use DMs setting
@linkwarner.command(name="usedms")
async def linkwarner_usedms(self, ctx: GuildContext, new_state: bool) -> None:
"""
Set if LinkWarner should use DMs for warning messages.
Note: This is NOT recommended as the user might block the bot or all DMs
from the server and the warning might not get sent to the offender at all.
This also means that the bot is more likely to get ratelimited for repeatedly
trying to DM the user when they spam links.
If you're trying to minimize spam that the warning messages cause,
you should consider enabling delete delay instead.
"""
guild_data = await self.get_guild_data(ctx.guild)
await guild_data.set_use_dms(new_state)
if new_state:
message = "Bot will now send the warning message in DMs."
else:
message = (
"Bot will now send the warning message in the channel"
" where the link was sent in."
)
await ctx.send(message)
# Delete delay commands
@linkwarner.group(name="deletedelay", invoke_without_command=True)
async def linkwarner_deletedelay(self, ctx: GuildContext, new_value: int) -> None:
"""
Set the delete delay (in seconds) for the warning message.
Use `[p]linkwarner deletedelay disable` to disable auto-deletion.
Note: This does not work when the warning messages are sent through DMs.
"""
if new_value < 1:
command = inline(f"{ctx.clean_prefix}linkwarner deletedelay disable")
await ctx.send(
"The delete delay cannot be lower than 1 second."
f" If you want to disable auto-deletion, use {command}."
)
return
if new_value > 300:
await ctx.send(
"The delete delay cannot be higher than 5 minutes (300 seconds)."
)
return
guild_data = await self.get_guild_data(ctx.guild)
await guild_data.set_delete_delay(new_value)
plural = "s" if new_value > 1 else ""
await ctx.send(
"Bot will now auto-delete the warning message"
f" after {new_value} second{plural}."
)
@linkwarner_deletedelay.command(name="disable")
async def linkwarner_deletedelay_disable(self, ctx: GuildContext) -> None:
"""Disable auto-deletion of the warning messages."""
guild_data = await self.get_guild_data(ctx.guild)
await guild_data.set_delete_delay(None)
await ctx.send("Bot will no longer delete the warning messages automatically.")
# Excluded roles commands
@linkwarner.group(name="excludedroles")
async def linkwarner_excludedroles(self, ctx: GuildContext) -> None:
"""Settings for roles that are excluded from getting filtered."""
@linkwarner_excludedroles.command(name="add", require_var_positional=True)
async def linkwarner_excludedroles_add(
self, ctx: GuildContext, *roles: discord.Role
) -> None:
"""Add roles that will be excluded from getting filtered."""
guild_data = await self.get_guild_data(ctx.guild)
await guild_data.add_excluded_roles(role.id for role in roles)
await ctx.send("Excluded roles updated.")
@linkwarner_excludedroles.command(
name="remove", aliases=["delete"], require_var_positional=True
)
async def linkwarner_excludedroles_remove(
self, ctx: GuildContext, *roles: discord.Role
) -> None:
"""Remove roles that will be excluded from getting filtered."""
guild_data = await self.get_guild_data(ctx.guild)
await guild_data.remove_excluded_roles(role.id for role in roles)
await ctx.send("Excluded roles updated.")
# Domains list commands
@linkwarner.group(name="domains")
async def linkwarner_domains(self, ctx: GuildContext) -> None:
"""Configuration for allowed/disallowed domains in the guild."""
@linkwarner_channel.group(name="domains")
async def linkwarner_channel_domains(self, ctx: GuildContext) -> None:
"""Configuration for allowed/disallowed domains in the specific channel."""
@linkwarner_domains.command(name="setmode")
async def linkwarner_domains_setmode(
self, ctx: GuildContext, new_mode: GuildDomainsMode
) -> None:
"""
Change current domains list mode.
Available modes:
`1` - Only domains on the domains list can be sent.
`2` - All domains can be sent except the ones on the domains list.
"""
guild_data = await self.get_guild_data(ctx.guild)
await guild_data.set_domains_mode(new_mode)
if new_mode is DomainsMode.ALLOW_FROM_SCOPE_LIST:
message = "Bot will now only allow domains from the domains list."
else:
message = "Bot will now only allow domains that aren't on the domains list."
await ctx.send(message)
@linkwarner_channel_domains.command(name="setmode")
async def linkwarner_channel_domains_setmode(
self,
ctx: GuildContext,
channel: discord.TextChannel,
new_mode: DomainsMode,
) -> None:
"""
Change current domains list mode.
Available modes:
`0` - Inherit the guild setting and use domains
from both guild's and channel's domain list.
`1` - Only domains on the channel's domains list can be sent.
`2` - All domains can be sent except the ones on the channel's domains list.
"""
channel_data = await self.get_channel_data(channel)
guild_data = channel_data.guild_data
await channel_data.set_domains_mode(new_mode)
if new_mode is DomainsMode.ALLOW_FROM_SCOPE_LIST:
message = (
f"For {channel.mention}, bot will now only allow domains"
" from the channel's domains list."
)
elif new_mode is DomainsMode.DISALLOW_FROM_SCOPE_LIST:
message = (
f"For {channel.mention}, bot will now only allow domains"
" that aren't on the channel's domains list."
)
else:
if guild_data.domains_mode is DomainsMode.ALLOW_FROM_SCOPE_LIST:
message = (
f"For {channel.mention}, bot will now only allow domains"
" from the guild's and channel's domains list."
)
else:
message = (
f"For {channel.mention}, bot will now only allow domains"
" that aren't on the guild's nor channel's domains list."
)
await ctx.send(message)
@linkwarner_domains.command(name="add", require_var_positional=True)
async def linkwarner_domains_add(
self, ctx: GuildContext, *domains: DomainName
) -> None:
"""
Add domains to the domains list.
Note: The cog is using exact matching for domain names
which means that domain names like youtube.com and www.youtube.com
are treated as 2 different domains.
Example:
`[p]linkwarner domains add google.com youtube.com`
"""
guild_data = await self.get_guild_data(ctx.guild)
await guild_data.add_domains(domains)
await ctx.send("Domains list updated.")
@linkwarner_channel_domains.command(name="add", require_var_positional=True)
async def linkwarner_channel_domains_add(
self, ctx: GuildContext, channel: discord.TextChannel, *domains: DomainName
) -> None:
"""
Add domains to the domains list of the provided channel.
Note: The cog is using exact matching for domain names
which means that domain names like youtube.com and www.youtube.com
are treated as 2 different domains.
Example:
`[p]linkwarner channel domains add #channel youtube.com discord.com`
"""
channel_data = await self.get_channel_data(channel)
await channel_data.add_domains(domains)
await ctx.send("Domains list updated.")
@linkwarner_domains.command(
name="remove", aliases=["delete"], require_var_positional=True
)
async def linkwarner_domains_remove(
self, ctx: GuildContext, *domains: DomainName
) -> None:
"""
Remove domains from the domains list.
Example:
`[p]linkwarner domains remove youtube.com discord.com`
"""
guild_data = await self.get_guild_data(ctx.guild)
await guild_data.remove_domains(domains)
await ctx.send("Domains list updated.")
@linkwarner_channel_domains.command(
name="remove", aliases=["delete"], require_var_positional=True
)
async def linkwarner_channel_domains_remove(
self, ctx: GuildContext, channel: discord.TextChannel, *domains: DomainName
) -> None:
"""
Remove domains from the domains list of the provided channel.
Example:
`[p]linkwarner channel domains remove #channel youtube.com discord.com`
"""
channel_data = await self.get_channel_data(channel)
await channel_data.remove_domains(domains)
await ctx.send("Domains list updated.")
@linkwarner_domains.command(name="clear")
async def linkwarner_domains_clear(self, ctx: GuildContext) -> None:
"""Clear domains from the domains list."""
guild_data = await self.get_guild_data(ctx.guild)
await guild_data.clear_domains()
await ctx.send("Domains list cleared.")
@linkwarner_channel_domains.command(name="clear")
async def linkwarner_channel_domains_clear(
self, ctx: GuildContext, channel: discord.TextChannel
) -> None:
"""Clear domains from the domains list of the provided channel."""
channel_data = await self.get_channel_data(channel)
await channel_data.clear_domains()
await ctx.send("Domains list cleared.")
# Warning message commands
@linkwarner.command(name="setmessage")
async def linkwarner_setmessage(self, ctx: GuildContext, *, message: str) -> None:
"""
Set link warning message.
Those fields will get replaced automatically:
$mention - Mention the user who sent the message with a link
$username - The user's display name
$server - The name of the server
"""
guild_data = await self.get_guild_data(ctx.guild)
await guild_data.set_warn_message(message)
content = guild_data.format_warn_message(ctx.message)
# we've just set the template, content can't be None
assert content is not None, "mypy"
await ctx.send("Link warning message set, sending a test message here...")
await ctx.send(content)
@linkwarner_channel.command(name="setmessage")
async def linkwarner_channel_setmessage(
self, ctx: GuildContext, channel: discord.TextChannel, *, message: str
) -> None:
"""
Set link warning message for provided channel.
Those fields will get replaced automatically:
$mention - Mention the user who sent the message with a link
$username - The user's display name
$server - The name of the server
"""
channel_data = await self.get_channel_data(channel)
await channel_data.set_warn_message(message)
content = channel_data.format_warn_message(ctx.message)
# we've just set the template, content can't be None
assert content is not None, "mypy"
await ctx.send("Link warning message set, sending a test message here...")
await ctx.send(content)
@linkwarner.command(name="unsetmessage")
async def linkwarner_unsetmessage(self, ctx: GuildContext) -> None:
"""Unset link warning message."""
guild_data = await self.get_guild_data(ctx.guild)
await guild_data.set_warn_message("")
await ctx.send("Link warning message unset.")
@linkwarner_channel.command(name="unsetmessage")
async def linkwarner_channel_unsetmessage(
self, ctx: GuildContext, channel: discord.TextChannel
) -> None:
"""Unset link warning message for provided channel."""
channel_data = await self.get_channel_data(channel)
await channel_data.set_warn_message("")
await ctx.send("Link warning message unset.")
async def _should_ignore(
self, message: discord.Message, *, edit: bool = False
) -> bool:
"""
Checks whether message should be ignored in the `on_message` listener.
This checks whether:
- message has been sent in guild
- message author is a bot
- cog is disabled in guild
- message author is on Red's immunity list for automated moderator actions
- channel is ignored in cog's settings
- message author has any role that is excluded from the filter in cog's settings
Returns
-------
bool
`True` if message should be ignored, `False` otherwise
"""
guild = message.guild
if guild is None or message.author.bot:
return True
if await self.bot.cog_disabled_in_guild(self, guild):
return True
if await self.bot.is_automod_immune(message):
return True
assert isinstance(message.channel, discord.TextChannel), "mypy"
assert isinstance(message.author, discord.Member), "mypy"
channel_data = await self.get_channel_data(message.channel)
if not channel_data.enabled:
return True
if channel_data.guild_data.has_excluded_roles(message.author):
return True
if edit and not channel_data.guild_data.check_edits:
return True
return False
# listener
@commands.Cog.listener()
async def on_message(self, message: discord.Message, *, edit: bool = False) -> None:
if await self._should_ignore(message, edit=edit):
return
guild = message.guild
channel = message.channel
author = message.author
assert guild is not None, "mypy"
assert isinstance(channel, discord.TextChannel), "mypy"
channel_data = await self.get_channel_data(channel)
guild_data = channel_data.guild_data
assert guild.me is not None, "mypy"
for match in URL_RE.finditer(message.content):
if channel_data.is_url_allowed(match.group(2)):
continue
try:
if not channel.permissions_for(guild.me).manage_messages:
raise RuntimeError
await message.delete()
except (discord.Forbidden, RuntimeError):
log.error(
"Bot can't delete messages in channel with ID %s (guild ID: %s)",
channel.id,
guild.id,
)
except discord.NotFound:
# message had been removed before we got to it
pass
msg = channel_data.format_warn_message(message)
if msg is not None:
if guild_data.use_dms:
try:
await author.send(msg)
except discord.Forbidden:
log.info(
"Bot couldn't send a message to the user with ID %s",
author.id,
)
else:
try:
if not channel.permissions_for(guild.me).send_messages:
raise RuntimeError
await channel.send(msg, delete_after=guild_data.delete_delay)
except (discord.Forbidden, RuntimeError):
log.error(
"Bot can't send messages in channel with ID %s"
" (guild ID: %s)",
channel.id,
guild.id,
)
await modlog.create_case(
bot=self.bot,
guild=guild,
created_at=message.created_at,
action_type="linkwarn",
user=author,
moderator=guild.me,
reason=f"Warned for posting a link - {match.group(0)}",
channel=channel,
)
return
@commands.Cog.listener()
async def on_message_edit(
self, _before: discord.Message, after: discord.Message
) -> None:
await self.on_message(after, edit=True)
| 39.4496 | 88 | 0.624108 | 2,959 | 24,656 | 5.061507 | 0.124704 | 0.036656 | 0.031248 | 0.025639 | 0.576417 | 0.533618 | 0.482006 | 0.407959 | 0.379782 | 0.327435 | 0 | 0.003042 | 0.293316 | 24,656 | 624 | 89 | 39.512821 | 0.85652 | 0.046317 | 0 | 0.356627 | 0 | 0 | 0.154506 | 0.003873 | 0 | 0 | 0 | 0 | 0.016867 | 1 | 0.00241 | false | 0.009639 | 0.026506 | 0 | 0.06747 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
20037c2305209c3714382f992c3dbf0aa31b9950 | 384 | py | Python | app/commands/base.py | AndersonSMed/DiscordBot | f42bae852b72d486d347416c7594d1631158631a | [
"MIT"
] | 2 | 2021-02-05T18:43:10.000Z | 2021-02-09T01:23:27.000Z | app/commands/base.py | AndersonSMed/DiscordBot | f42bae852b72d486d347416c7594d1631158631a | [
"MIT"
] | 5 | 2021-02-05T17:15:21.000Z | 2021-06-23T00:39:51.000Z | app/commands/base.py | AndersonSMed/EducaBot | f42bae852b72d486d347416c7594d1631158631a | [
"MIT"
] | null | null | null | import abc
class Base(abc.ABC):
command_name = None
def __init__(self, client, message, payload=None):
self.client = client
self.message = message
self.payload = payload
if not self.command_name:
raise ValueError('Should have a command name!')
@abc.abstractmethod
async def run(self):
raise NotImplementedError
| 21.333333 | 59 | 0.638021 | 45 | 384 | 5.311111 | 0.533333 | 0.138075 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.283854 | 384 | 17 | 60 | 22.588235 | 0.869091 | 0 | 0 | 0 | 0 | 0 | 0.070313 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.083333 | false | 0 | 0.083333 | 0 | 0.333333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2003a339006e0e413215fedb71b8a6d01e840444 | 2,375 | py | Python | src/spaceone/inventory/connector/aws_sqs_connector/connector.py | spaceone-dev/plugin-aws-cloud-service-inven-collector | aa252a41940e0941d4b0f7be7fc05d152da654dd | [
"Apache-2.0"
] | null | null | null | src/spaceone/inventory/connector/aws_sqs_connector/connector.py | spaceone-dev/plugin-aws-cloud-service-inven-collector | aa252a41940e0941d4b0f7be7fc05d152da654dd | [
"Apache-2.0"
] | 1 | 2022-02-10T04:38:11.000Z | 2022-02-10T04:38:11.000Z | src/spaceone/inventory/connector/aws_sqs_connector/connector.py | spaceone-dev/plugin-aws-cloud-service-inven-collector | aa252a41940e0941d4b0f7be7fc05d152da654dd | [
"Apache-2.0"
] | 1 | 2021-11-15T05:19:44.000Z | 2021-11-15T05:19:44.000Z | import time
import logging
from typing import List
import json
from spaceone.core.utils import *
from spaceone.inventory.connector.aws_sqs_connector.schema.data import QueData, RedrivePolicy
from spaceone.inventory.connector.aws_sqs_connector.schema.resource import SQSResponse, QueResource
from spaceone.inventory.connector.aws_sqs_connector.schema.service_type import CLOUD_SERVICE_TYPES
from spaceone.inventory.libs.connector import SchematicAWSConnector
_LOGGER = logging.getLogger(__name__)
class SQSConnector(SchematicAWSConnector):
service_name = 'sqs'
cloud_service_group = 'SQS'
cloud_service_type = 'Queue'
def get_resources(self) -> List[SQSResponse]:
_LOGGER.debug("[get_resources] START: SQS")
resources = []
start_time = time.time()
collect_resource = {
'request_method': self.request_data,
'resource': QueResource,
'response_schema': SQSResponse
}
# init cloud service type
for cst in CLOUD_SERVICE_TYPES:
resources.append(cst)
# merge data
for region_name in self.region_names:
self.reset_region(region_name)
resources.extend(self.collect_data_by_region(self.service_name, region_name, collect_resource))
_LOGGER.debug(f'[get_resources] FINISHED: SQS ({time.time() - start_time} sec)')
return resources
def request_data(self, region_name) -> List[QueData]:
resource = self.session.resource('sqs')
for que in resource.queues.all():
try:
attr = que.attributes
if 'RedrivePolicy' in attr:
attr['RedrivePolicy'] = RedrivePolicy(json.loads(attr.get('RedrivePolicy')), strict=False)
result = QueData(attr)
result.region_name = region_name
result.url = que.url
yield {
'data': result,
'name': result.name,
'launched_at': self.datetime_to_iso8601(result.created_timestamp),
'account': self.account_id
}
except Exception as e:
resource_id = ''
error_resource_response = self.generate_error(region_name, resource_id, e)
yield {'data': error_resource_response}
| 35.984848 | 110 | 0.634526 | 252 | 2,375 | 5.746032 | 0.349206 | 0.048343 | 0.058011 | 0.062155 | 0.105663 | 0.105663 | 0.105663 | 0.105663 | 0 | 0 | 0 | 0.002342 | 0.280842 | 2,375 | 65 | 111 | 36.538462 | 0.845433 | 0.014316 | 0 | 0 | 0 | 0 | 0.088965 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.04 | false | 0 | 0.18 | 0 | 0.32 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
20059b820157bb2d0ee1f766dc27bdd85fb8b399 | 936 | py | Python | Code in Place/Assignment2/nimm.py | LuisAdolfoAlves/Stanford | 8358d315a4c475359742a70eb862a3ac71abd042 | [
"MIT"
] | null | null | null | Code in Place/Assignment2/nimm.py | LuisAdolfoAlves/Stanford | 8358d315a4c475359742a70eb862a3ac71abd042 | [
"MIT"
] | null | null | null | Code in Place/Assignment2/nimm.py | LuisAdolfoAlves/Stanford | 8358d315a4c475359742a70eb862a3ac71abd042 | [
"MIT"
] | null | null | null | """
File: nimm.py
-------------------------
Add your comments here.
"""
def main():
stones = 20
while stones > -1:
for player in range(1, 3):
if stones == 0:
print(f'Player {player} wins!')
stones -= 1
break
print(f'There are {stones} stones left.')
answer = int(input(f'Player {player} would you like to remove 1 or 2 stones?'))
while answer > 2:
answer = int(input('Enter 1 or 2: '))
while answer < 1:
answer = int(input('Enter 1 or 2: '))
if stones == 1:
while answer != 1:
answer = int(input('2 is not available : '))
stones -= answer
if stones == -1:
break
# This provided line is required at the end of a Python file
# to call the main() function.
if __name__ == '__main__':
main()
| 24.631579 | 91 | 0.470085 | 113 | 936 | 3.823009 | 0.477876 | 0.064815 | 0.12963 | 0.087963 | 0.194444 | 0.194444 | 0.106481 | 0 | 0 | 0 | 0 | 0.033569 | 0.395299 | 936 | 37 | 92 | 25.297297 | 0.729682 | 0.162393 | 0 | 0.181818 | 0 | 0 | 0.211613 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.045455 | false | 0 | 0 | 0 | 0.045455 | 0.090909 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
200887615ac67e1a7a3968d230b1ee9b68acdafe | 1,371 | py | Python | ssidstat/ssidstatd/monitor.py | putrasattvika/ssidstat | 90bc4a52702ec314a0385f669d68446fa46fe153 | [
"Apache-2.0"
] | null | null | null | ssidstat/ssidstatd/monitor.py | putrasattvika/ssidstat | 90bc4a52702ec314a0385f669d68446fa46fe153 | [
"Apache-2.0"
] | null | null | null | ssidstat/ssidstatd/monitor.py | putrasattvika/ssidstat | 90bc4a52702ec314a0385f669d68446fa46fe153 | [
"Apache-2.0"
] | null | null | null | import time
import daemon
from ssidstat.common import db
from ssidstat.common import sysutils
class MonitorDaemon(daemon.Daemon):
def __init__(self, dbfile, pidfile, interval=10, stdin='/dev/null', stdout='/dev/null', stderr='/dev/null'):
daemon.Daemon.__init__(self, pidfile, stdin=stdin, stdout=stdout, stderr=stderr)
self.dbfile = dbfile
self.boot_id = sysutils.get_boot_id()
self.interval = interval
def run(self):
self.db = db.SSIDStatDB(self.dbfile)
while True:
self.monitor()
time.sleep(self.interval)
def monitor(self):
adapters_ssid = sysutils.get_adapters_ssid()
adapters_stat = sysutils.get_adapters_traffic()
for adapter in adapters_ssid:
ssid = adapters_ssid[adapter]
recorded_adapter_usage = self.db.query_boot_traffic_history(self.boot_id, adapter)
if not recorded_adapter_usage:
self.db.clear_boot_traffic_history(adapter)
recorded_adapter_usage = {
'boot_id': self.boot_id,
'adapter': adapter,
'rx': 0,
'tx': 0
}
delta_rx = adapters_stat[adapter]['rx'] - recorded_adapter_usage['rx']
delta_tx = adapters_stat[adapter]['tx'] - recorded_adapter_usage['tx']
self.db.update_boot_traffic_history(
self.boot_id, adapter,
adapters_stat[adapter]['rx'],
adapters_stat[adapter]['tx']
)
self.db.add_ssid_traffic_history(adapter, ssid, delta_rx, delta_tx)
| 26.882353 | 109 | 0.726477 | 188 | 1,371 | 5.015957 | 0.271277 | 0.038176 | 0.106045 | 0.054083 | 0.129374 | 0.074231 | 0.074231 | 0 | 0 | 0 | 0 | 0.003445 | 0.153173 | 1,371 | 50 | 110 | 27.42 | 0.808786 | 0 | 0 | 0 | 0 | 0 | 0.041575 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.081081 | false | 0 | 0.108108 | 0 | 0.216216 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
200c3ab143b719e01a8471eeab8c7b2a5fb55afb | 1,322 | py | Python | operating_system/first_in_first_out/Python/FCFS.py | CarbonDDR/al-go-rithms | 8e65affbe812931b7dde0e2933eb06c0f44b4130 | [
"CC0-1.0"
] | 1,253 | 2017-06-06T07:19:25.000Z | 2022-03-30T17:07:58.000Z | operating_system/first_in_first_out/Python/FCFS.py | rishabh99-rc/al-go-rithms | 4df20d7ef7598fda4bc89101f9a99aac94cdd794 | [
"CC0-1.0"
] | 554 | 2017-09-29T18:56:01.000Z | 2022-02-21T15:48:13.000Z | operating_system/first_in_first_out/Python/FCFS.py | rishabh99-rc/al-go-rithms | 4df20d7ef7598fda4bc89101f9a99aac94cdd794 | [
"CC0-1.0"
] | 2,226 | 2017-09-29T19:59:59.000Z | 2022-03-25T08:59:55.000Z | '''
Author - Ronak Vadhaiya
FCFS
'''
import math
def FCFS(n_process, data):
ttr, ttw = 0, 0
print("="*20 + "FCFS" + "="*20)
tr = data[0][1]
wt = 0
ttr += tr
prev = data[0][0] + data[0][1]
print("P1", "Start Time: ", data[0][0], "End Time: ",
data[0][0]+data[0][1], "TR : ", tr, "WT : ", wt)
for i in range(1, n_process):
process_name = "P"+str(i+1)
if data[i][0] >= prev:
start_time = data[i][0]
end_time = data[i][0] + data[i][1]
else:
start_time = prev
end_time = prev + data[i][1]
tr = end_time - data[i][0]
wt = start_time - data[i][0]
ttr += tr
ttw += wt
print(process_name, "Start Time: ", start_time,
"End Time: ", end_time, "TR : ", tr, "WT : ", wt)
prev = end_time
print("Average TR: ", ttr/float(n_process))
print("Average TW: ", ttw/float(n_process))
if __name__ == "__main__":
data = []
n_process = int(input("Number of Process: "))
for _ in range(n_process):
arrival_time, service_time, priority = map(
int, input("Arrival | Service | Priority").split())
data.append([arrival_time, service_time, priority])
# sort by arrival time
data.sort()
FCFS(n_process, data)
| 26.44 | 63 | 0.516641 | 187 | 1,322 | 3.486631 | 0.256684 | 0.08589 | 0.046012 | 0.06135 | 0.214724 | 0.03681 | 0 | 0 | 0 | 0 | 0 | 0.031798 | 0.310136 | 1,322 | 49 | 64 | 26.979592 | 0.683114 | 0.037821 | 0 | 0.055556 | 0 | 0 | 0.120348 | 0 | 0.027778 | 0 | 0 | 0 | 0 | 1 | 0.027778 | false | 0 | 0.027778 | 0 | 0.055556 | 0.138889 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
200c3d5ac5bbf2a6efa0995f9b7a1d5bf57885ff | 2,418 | py | Python | ps1/src/linearclass/gda.py | Ziggareto/cs229 | 10b03b68b24d252dad3e3437561976d9509ebdd0 | [
"MIT"
] | null | null | null | ps1/src/linearclass/gda.py | Ziggareto/cs229 | 10b03b68b24d252dad3e3437561976d9509ebdd0 | [
"MIT"
] | null | null | null | ps1/src/linearclass/gda.py | Ziggareto/cs229 | 10b03b68b24d252dad3e3437561976d9509ebdd0 | [
"MIT"
] | null | null | null | import numpy as np
import linearclass.util
def main(train_path, valid_path, save_path):
"""Problem: Gaussian discriminant analysis (GDA)
Args:
train_path: Path to CSV file containing dataset for training.
valid_path: Path to CSV file containing dataset for validation.
save_path: Path to save predicted probabilities using np.savetxt().
"""
# Load dataset
x_train, y_train = util.load_dataset(train_path, add_intercept=False)
# *** START CODE HERE ***
# Train a GDA classifier
# Plot decision boundary on validation set
# Use np.savetxt to save outputs from validation set to save_path
# *** END CODE HERE ***
class GDA:
"""Gaussian Discriminant Analysis.
Example usage:
> clf = GDA()
> clf.fit(x_train, y_train)
> clf.predict(x_eval)
"""
def __init__(self, step_size=0.01, max_iter=10000, eps=1e-5,
theta_0=None, verbose=True):
"""
Args:
step_size: Step size for iterative solvers only.
max_iter: Maximum number of iterations for the solver.
eps: Threshold for determining convergence.
theta_0: Initial guess for theta. If None, use the zero vector.
verbose: Print loss values during training.
"""
self.theta = theta_0
self.step_size = step_size
self.max_iter = max_iter
self.eps = eps
self.verbose = verbose
def fit(self, x, y):
"""Fit a GDA model to training set given by x and y by updating
self.theta.
Args:
x: Training example inputs. Shape (n_examples, dim).
y: Training example labels. Shape (n_examples,).
"""
# *** START CODE HERE ***
# Find phi, mu_0, mu_1, and sigma
# Write theta in terms of the parameters
# *** END CODE HERE ***
def predict(self, x):
"""Make a prediction given new inputs x.
Args:
x: Inputs of shape (n_examples, dim).
Returns:
Outputs of shape (n_examples,).
"""
# *** START CODE HERE ***
# *** END CODE HERE
if __name__ == '__main__':
main(train_path='ds1_train.csv',
valid_path='ds1_valid.csv',
save_path='gda_pred_1.txt')
main(train_path='ds2_train.csv',
valid_path='ds2_valid.csv',
save_path='gda_pred_2.txt')
| 30.225 | 75 | 0.597601 | 317 | 2,418 | 4.375394 | 0.391167 | 0.034607 | 0.040375 | 0.018745 | 0.125451 | 0.125451 | 0.053353 | 0.053353 | 0 | 0 | 0 | 0.012522 | 0.306452 | 2,418 | 79 | 76 | 30.607595 | 0.81455 | 0.551696 | 0 | 0 | 0 | 0 | 0.101734 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.190476 | false | 0 | 0.095238 | 0 | 0.333333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
200f1d4b82c58a0465fa2637a27c721e431a980f | 2,532 | py | Python | tracardi/process_engine/action/v1/flow/property_exists/plugin.py | DawidekZagajnik/tracardi | 979015b7b14cb87fb639efb1eee6537932319b61 | [
"MIT"
] | 153 | 2021-11-02T00:35:41.000Z | 2022-03-25T16:37:44.000Z | tracardi/process_engine/action/v1/flow/property_exists/plugin.py | DawidekZagajnik/tracardi | 979015b7b14cb87fb639efb1eee6537932319b61 | [
"MIT"
] | 243 | 2021-10-17T17:00:22.000Z | 2022-03-28T10:13:34.000Z | tracardi/process_engine/action/v1/flow/property_exists/plugin.py | DawidekZagajnik/tracardi | 979015b7b14cb87fb639efb1eee6537932319b61 | [
"MIT"
] | 14 | 2021-10-17T11:39:04.000Z | 2022-03-14T14:44:02.000Z | from tracardi.process_engine.action.v1.flow.property_exists.model.configuration import Configuration
from tracardi.service.plugin.domain.register import Plugin, Spec, MetaData, Documentation, PortDoc, Form, FormGroup, \
FormField, FormComponent
from tracardi.service.plugin.domain.result import Result
from tracardi.service.plugin.runner import ActionRunner
def validate(config: dict):
return Configuration(**config)
class PropertyExistsAction(ActionRunner):
def __init__(self, **kwargs):
self.config = validate(kwargs)
async def run(self, payload):
dot = self._get_dot_accessor(payload)
if self.config.property in dot:
return Result(port="true", value=payload), Result(port="false", value=None)
return Result(port="false", value=payload), Result(port="true", value=None)
def register() -> Plugin:
return Plugin(
start=False,
spec=Spec(
module=__name__,
className='PropertyExistsAction',
author="Risto Kowaczewski",
inputs=["payload"],
outputs=["true", "false"],
version="0.6.2",
init={
'property': 'event@context.page.url'
},
form=Form(groups=[
FormGroup(
fields=[
FormField(
id="property",
name="Data property to check",
description="Type data to validate if exists.",
component=FormComponent(type="dotPath", props={
"defaultSourceValue": "event",
"defaultMode": 1
})
),
]
),
]),
manual=None
),
metadata=MetaData(
name='Data exists',
desc='Checks if the data property exists and is not null.',
icon='exists',
group=["Operations"],
documentation=Documentation(
inputs={
"payload": PortDoc(desc="This port takes any payload object.")
},
outputs={
"true": PortDoc(desc="This port is triggered with input payload if data property exists."),
"false": PortDoc(desc="This port is triggered with input payload if data property does not exist.")
}
)
)
)
| 35.661972 | 119 | 0.525276 | 226 | 2,532 | 5.827434 | 0.446903 | 0.036446 | 0.04328 | 0.056948 | 0.138193 | 0.091116 | 0.091116 | 0.091116 | 0.091116 | 0.091116 | 0 | 0.003157 | 0.374408 | 2,532 | 70 | 120 | 36.171429 | 0.828283 | 0 | 0 | 0.04918 | 0 | 0 | 0.188784 | 0.008689 | 0 | 0 | 0 | 0 | 0 | 1 | 0.04918 | false | 0 | 0.065574 | 0.032787 | 0.196721 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
200fe5bb13aa8a362c4acb4df1a5ad2b2c8c36a2 | 8,178 | py | Python | experiments/s3dis/train.py | corochann/chainer-pointnet | 4b0350122c6a704ebea9bf206896a6f18e1ab4d7 | [
"MIT"
] | 37 | 2018-06-01T21:10:58.000Z | 2021-11-14T15:42:33.000Z | experiments/s3dis/train.py | KosukeArase/chainer-pointnet | 4b0350122c6a704ebea9bf206896a6f18e1ab4d7 | [
"MIT"
] | 3 | 2018-07-20T10:16:07.000Z | 2019-10-01T01:36:52.000Z | experiments/s3dis/train.py | KosukeArase/chainer-pointnet | 4b0350122c6a704ebea9bf206896a6f18e1ab4d7 | [
"MIT"
] | 11 | 2018-08-01T07:05:41.000Z | 2022-03-23T06:07:00.000Z | #!/usr/bin/env python
from __future__ import print_function
import argparse
from distutils.util import strtobool
import os
import chainer
from chainer import serializers
from chainer import iterators
from chainer import optimizers
from chainer import training
from chainer.dataset import to_device, concat_examples
from chainer.datasets import TransformDataset
from chainer.training import extensions as E
from chainer_pointnet.models.kdcontextnet.kdcontextnet_seg import \
KDContextNetSeg
from chainer_pointnet.models.kdnet.kdnet_seg import KDNetSeg
from chainer_pointnet.models.pointnet.pointnet_seg import PointNetSeg
from chainer_pointnet.models.pointnet2.pointnet2_seg_ssg import PointNet2SegSSG
from s3dis_dataset import get_dataset
from chainer_pointnet.utils.kdtree import calc_max_level, TransformKDTreeSeg
def main():
parser = argparse.ArgumentParser(
description='S3DIS segmentation')
parser.add_argument('--method', '-m', type=str, default='point_seg')
parser.add_argument('--batchsize', '-b', type=int, default=32)
parser.add_argument('--dropout_ratio', type=float, default=0.0)
parser.add_argument('--num_point', type=int, default=4096)
parser.add_argument('--gpu', '-g', type=int, default=-1)
parser.add_argument('--out', '-o', type=str, default='result')
parser.add_argument('--epoch', '-e', type=int, default=250)
parser.add_argument('--seed', '-s', type=int, default=777)
parser.add_argument('--protocol', type=int, default=2)
parser.add_argument('--model_filename', type=str, default='model.npz')
parser.add_argument('--resume', type=str, default='')
parser.add_argument('--trans', type=strtobool, default='false')
parser.add_argument('--use_bn', type=strtobool, default='true')
parser.add_argument('--normalize', type=strtobool, default='false')
parser.add_argument('--residual', type=strtobool, default='false')
args = parser.parse_args()
seed = args.seed
out_dir = args.out
method = args.method
num_point = args.num_point
try:
os.makedirs(out_dir, exist_ok=True)
import chainerex.utils as cl
fp = os.path.join(out_dir, 'args.json')
cl.save_json(fp, vars(args))
print('save args to', fp)
except ImportError:
pass
# S3DIS dataset has 13 labels
num_class = 13
in_dim = 9
# Dataset preparation
train, val = get_dataset(num_point=num_point)
if method == 'kdnet_seg' or method == 'kdcontextnet_seg':
from chainer_pointnet.utils.kdtree import TransformKDTreeSeg, \
calc_max_level
max_level = calc_max_level(num_point)
print('kdnet max_level {}'.format(max_level))
return_split_dims = (method == 'kdnet_seg')
train = TransformDataset(train, TransformKDTreeSeg(
max_level=max_level, return_split_dims=return_split_dims))
val = TransformDataset(val, TransformKDTreeSeg(
max_level=max_level, return_split_dims=return_split_dims))
if method == 'kdnet_seg':
# Debug print
points, split_dims, t = train[0]
print('converted to kdnet dataset train', points.shape, split_dims.shape, t.shape)
points, split_dims, t = val[0]
print('converted to kdnet dataset val', points.shape, split_dims.shape, t.shape)
if method == 'kdcontextnet_seg':
# Debug print
points, t = train[0]
print('converted to kdcontextnet dataset train', points.shape, t.shape)
points, t = val[0]
print('converted to kdcontextnet dataset val', points.shape, t.shape)
# Network
trans = args.trans
use_bn = args.use_bn
dropout_ratio = args.dropout_ratio
normalize = args.normalize
residual = args.residual
converter = concat_examples
if method == 'point_seg':
print('Train PointNetSeg model... trans={} use_bn={} dropout={}'
.format(trans, use_bn, dropout_ratio))
model = PointNetSeg(
out_dim=num_class, in_dim=in_dim, middle_dim=64,
dropout_ratio=dropout_ratio,
trans=trans, trans_lam1=0.001, trans_lam2=0.001, use_bn=use_bn,
residual=residual)
elif method == 'point2_seg_ssg':
print('Train PointNet2SegSSG model... use_bn={} dropout={}'
.format(use_bn, dropout_ratio))
model = PointNet2SegSSG(
out_dim=num_class, in_dim=in_dim,
dropout_ratio=dropout_ratio, use_bn=use_bn, residual=residual)
elif method == 'kdnet_seg':
print('Train KDNetSeg model... use_bn={} dropout={}'
.format(use_bn, dropout_ratio))
model = KDNetSeg(
out_dim=num_class, in_dim=in_dim,
dropout_ratio=dropout_ratio, use_bn=use_bn, max_level=max_level,
residual=residual)
def kdnet_converter(batch, device=None, padding=None):
# concat_examples to CPU at first.
result = concat_examples(batch, device=None, padding=padding)
out_list = []
for elem in result:
if elem.dtype != object:
# Send to GPU for int/float dtype array.
out_list.append(to_device(device, elem))
else:
# Do NOT send to GPU for dtype=object array.
out_list.append(elem)
return tuple(out_list)
converter = kdnet_converter
elif method == 'kdcontextnet_seg':
print('Train KDContextNetSeg model... use_bn={} dropout={} '
'normalize={} residual={}'
.format(use_bn, dropout_ratio, normalize, residual))
model = KDContextNetSeg(
out_dim=num_class, in_dim=in_dim,
dropout_ratio=dropout_ratio, use_bn=use_bn, normalize=True,
residual=residual)
else:
raise ValueError('[ERROR] Invalid method {}'.format(method))
train_iter = iterators.SerialIterator(train, args.batchsize)
val_iter = iterators.SerialIterator(
val, args.batchsize, repeat=False, shuffle=False)
device = args.gpu
# classifier = Classifier(model, device=device)
classifier = model
load_model = False
if load_model:
serializers.load_npz(
os.path.join(args.out, args.model_filename), classifier)
if device >= 0:
chainer.cuda.get_device_from_id(device).use()
classifier.to_gpu() # Copy the model to the GPU
optimizer = optimizers.Adam()
optimizer.setup(classifier)
updater = training.StandardUpdater(
train_iter, optimizer, device=args.gpu, converter=converter)
trainer = training.Trainer(updater, (args.epoch, 'epoch'), out=args.out)
from chainerex.training.extensions import schedule_optimizer_value
from chainer.training.extensions import observe_value
# trainer.extend(observe_lr)
observation_key = 'lr'
trainer.extend(observe_value(
observation_key,
lambda trainer: trainer.updater.get_optimizer('main').alpha))
trainer.extend(schedule_optimizer_value(
[10, 20, 100, 150, 200, 230],
[0.003, 0.001, 0.0003, 0.0001, 0.00003, 0.00001]))
trainer.extend(E.Evaluator(
val_iter, classifier, device=args.gpu, converter=converter))
trainer.extend(E.snapshot(), trigger=(args.epoch, 'epoch'))
trainer.extend(E.LogReport())
trainer.extend(E.PrintReport(
['epoch', 'main/loss', 'main/cls_loss', 'main/trans_loss1',
'main/trans_loss2', 'main/accuracy', 'validation/main/loss',
# 'validation/main/cls_loss',
# 'validation/main/trans_loss1', 'validation/main/trans_loss2',
'validation/main/accuracy', 'lr', 'elapsed_time']))
trainer.extend(E.ProgressBar(update_interval=10))
if args.resume:
serializers.load_npz(args.resume, trainer)
trainer.run()
# --- save classifier ---
# protocol = args.protocol
# classifier.save_pickle(
# os.path.join(args.out, args.model_filename), protocol=protocol)
serializers.save_npz(
os.path.join(args.out, args.model_filename), classifier)
if __name__ == '__main__':
main()
| 39.892683 | 94 | 0.663732 | 1,004 | 8,178 | 5.210159 | 0.22012 | 0.018161 | 0.048748 | 0.016249 | 0.214491 | 0.201491 | 0.144523 | 0.116613 | 0.093672 | 0.093672 | 0 | 0.015576 | 0.222793 | 8,178 | 204 | 95 | 40.088235 | 0.807426 | 0.066642 | 0 | 0.08642 | 0 | 0 | 0.118319 | 0.003152 | 0 | 0 | 0 | 0 | 0 | 1 | 0.012346 | false | 0.006173 | 0.141975 | 0 | 0.160494 | 0.067901 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
20105ad6c1b6144c00a1af858c386181e3dcb971 | 4,617 | py | Python | tests/image/test_init.py | PyExplorer/shub | dc38191e6593f3c012cb89ed1551f8b0dd2981d8 | [
"BSD-3-Clause"
] | 111 | 2015-02-05T15:24:15.000Z | 2022-03-31T03:31:22.000Z | tests/image/test_init.py | PyExplorer/shub | dc38191e6593f3c012cb89ed1551f8b0dd2981d8 | [
"BSD-3-Clause"
] | 355 | 2015-01-01T16:18:46.000Z | 2022-03-18T15:41:10.000Z | tests/image/test_init.py | PyExplorer/shub | dc38191e6593f3c012cb89ed1551f8b0dd2981d8 | [
"BSD-3-Clause"
] | 79 | 2015-02-23T17:07:32.000Z | 2022-01-03T09:15:39.000Z | import os
import pytest
from click.testing import CliRunner
from shub.exceptions import BadConfigException
from shub.image.init import cli
from shub.image.init import _format_system_deps
from shub.image.init import _format_system_env
from shub.image.init import _format_requirements
from shub.image.init import _wrap
from .utils import add_fake_requirements
@pytest.fixture
def project_dir(project_dir):
"""Overriden project_dir fixture without Dockerfile"""
os.remove(os.path.join(project_dir, 'Dockerfile'))
return project_dir
def test_cli_default_settings(project_dir):
dockerfile_path = os.path.join(project_dir, 'Dockerfile')
assert not os.path.exists(dockerfile_path)
runner = CliRunner()
result = runner.invoke(cli, [])
assert result.exit_code == 0
msg = 'Dockerfile is saved to {}'.format(dockerfile_path)
assert msg in result.output
assert os.path.exists(dockerfile_path)
@pytest.mark.usefixtures('project_dir')
def test_cli_list_recommended_reqs():
runner = CliRunner()
result = runner.invoke(cli, ["--list-recommended-reqs"])
assert result.exit_code == 0
assert "Recommended Python deps list:" in result.output
def test_cli_abort_if_dockerfile_exists(project_dir):
dockerfile_path = os.path.join(project_dir, 'Dockerfile')
open(dockerfile_path, 'w').close()
runner = CliRunner()
result = runner.invoke(cli, [], input='yes\n')
assert result.exit_code == 1
assert 'Found a Dockerfile in the project directory, aborting' in result.output
assert os.path.exists(os.path.join(project_dir, 'Dockerfile'))
with open(dockerfile_path) as f:
assert f.read() == ''
def test_cli_create_setup_py(project_dir):
setup_py_path = os.path.join(project_dir, 'setup.py')
os.remove(setup_py_path)
runner = CliRunner()
result = runner.invoke(cli, [], input='yes\n')
assert result.exit_code == 0
assert os.path.isfile(setup_py_path)
def test_wrap():
short_cmd = "run short command wrapping another one short"
assert _wrap(short_cmd) == short_cmd
assert _wrap(short_cmd + ' ' + short_cmd) == (
short_cmd + ' ' + ' '.join(short_cmd.split()[:3]) +
" \\\n " + ' '.join(short_cmd.split()[3:]))
def test_format_system_deps():
# no deps at all
assert _format_system_deps('-', None) is None
# base deps only
assert _format_system_deps('a,b,cd', None) == (
"RUN apt-get update -qq && \\\n"
" apt-get install -qy a b cd && \\\n"
" rm -rf /var/lib/apt/lists/*")
# base & additional deps only
assert _format_system_deps('a,b,cd', 'ef,hk,b') == (
"RUN apt-get update -qq && \\\n"
" apt-get install -qy a b cd ef hk && \\\n"
" rm -rf /var/lib/apt/lists/*")
# additional deps only
assert _format_system_deps('-', 'ef,hk,b') == (
"RUN apt-get update -qq && \\\n"
" apt-get install -qy b ef hk && \\\n"
" rm -rf /var/lib/apt/lists/*")
def test_format_system_env():
assert _format_system_env(None) == 'ENV TERM xterm'
assert _format_system_env('test.settings') == (
"ENV TERM xterm\n"
"ENV SCRAPY_SETTINGS_MODULE test.settings")
def test_format_requirements(project_dir):
add_fake_requirements(project_dir)
basereqs = os.path.join(project_dir, 'requirements.txt')
if os.path.exists(basereqs):
os.remove(basereqs)
# use given requirements
assert _format_requirements(
os.getcwd(), 'fake-requirements.txt') == (
"COPY ./fake-requirements.txt /app/requirements.txt\n"
"RUN pip install --no-cache-dir -r requirements.txt")
assert not os.path.exists(basereqs)
# using base requirements
assert _format_requirements(
os.getcwd(), 'requirements.txt') == (
"COPY ./requirements.txt /app/requirements.txt\n"
"RUN pip install --no-cache-dir -r requirements.txt")
assert os.path.exists(basereqs)
os.remove(basereqs)
def test_no_scrapy_cfg(project_dir):
os.remove(os.path.join(project_dir, 'scrapy.cfg'))
runner = CliRunner()
result = runner.invoke(cli, [])
assert result.exit_code == BadConfigException.exit_code
error_msg = (
'Error: Cannot find Scrapy project settings. Please ensure that current '
'directory contains scrapy.cfg with settings section, see example at '
'https://doc.scrapy.org/en/latest/topics/commands.html#default-structure-of-scrapy-projects'
)
assert error_msg in result.output
assert not os.path.exists(os.path.join(project_dir, 'Dockerfile'))
| 35.515385 | 100 | 0.675114 | 631 | 4,617 | 4.757528 | 0.234548 | 0.063291 | 0.026649 | 0.045303 | 0.551299 | 0.474017 | 0.383078 | 0.274151 | 0.274151 | 0.208195 | 0 | 0.001621 | 0.198397 | 4,617 | 129 | 101 | 35.790698 | 0.809511 | 0.037903 | 0 | 0.262626 | 0 | 0.010101 | 0.264726 | 0.030467 | 0 | 0 | 0 | 0 | 0.272727 | 1 | 0.10101 | false | 0 | 0.10101 | 0 | 0.212121 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2014bfd95f78901caeb13c9daf7444041662fd7c | 6,195 | py | Python | ufo2otf/compilers.py | debian-janitor/ufo2otf-debian | 1203d95f454d918ab74ffef0f7f00addbd410ded | [
"BSD-3-Clause"
] | 21 | 2015-05-04T14:15:01.000Z | 2021-11-15T03:17:58.000Z | ufo2otf/compilers.py | debian-janitor/ufo2otf-debian | 1203d95f454d918ab74ffef0f7f00addbd410ded | [
"BSD-3-Clause"
] | 4 | 2018-07-31T10:07:29.000Z | 2020-09-10T11:13:45.000Z | ufo2otf/compilers.py | fonts/ufo2otf | 9025ba292c2a17e7dc8010c4fed79ab1a036403a | [
"BSD-3-Clause"
] | 3 | 2016-01-26T04:01:13.000Z | 2017-12-08T13:16:37.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from os import mkdir
from os.path import splitext, dirname, sep, join, exists, basename
from subprocess import Popen
from diagnostics import diagnostics, known_compilers, FontError
import codecs
import re
diagnostics = diagnostics()
class Compiler:
def __init__(self,infiles,webfonts=False,afdko=False):
# we strip trailing slashes from ufo names,
# otherwise we get confused later on when
# generating filenames:
self.infiles = [i.strip(sep) for i in infiles]
self.webfonts = webfonts
self.css = ''
if afdko:
if diagnostics['afdko']:
self.compile = self.afdko
else:
raise FontError("afdko", diagnostics)
else:
if diagnostics['fontforge']:
self.compile = self.fontforge
else:
raise FontError("fontforge", diagnostics)
def fontforge(self):
import fontforge
eot = False
if diagnostics['mkeot']:
eot = True
for infile in self.infiles:
outdir = dirname(infile)
name = splitext(infile)[0]
font = fontforge.open(infile)
otf_file_name = join(outdir, basename(name) + '.otf')
if splitext(infile)[1].lower != 'otf':
"""
Even if the tool is called Ufo2Otf, it can be used on otf’s too:
In that case it’s just to generate webfonts. If an otf file is the
infile, we skip otf generation.
"""
font.generate(otf_file_name, flags=("round"))
if self.webfonts:
# Optimise for Web
font.autoHint()
# Generate Webfonts
webfonts_path = join(outdir, 'webfonts')
if not exists(webfonts_path):
mkdir(webfonts_path)
woff_file_name = join(outdir, 'webfonts', basename(name) + '.woff')
ttf_file_name = join(outdir, 'webfonts', basename(name) + '.ttf')
eot_file_name = join(outdir, 'webfonts', basename(name) + '.eot')
font.generate(woff_file_name, flags=("round"))
font.generate(ttf_file_name, flags=("round"))
if eot:
eot_file = open(eot_file_name, 'wb')
pipe = Popen(['mkeot', ttf_file_name], stdout=eot_file)
pipe.wait()
# Generating CSS
#
# CSS can only cover a limited set of styles:
# it knows about font weight, and about the difference between
# regular and italic.
# It also knows font-style: oblique, but most browser will take
# the regular variant and slant it.
font_style = "normal"
# This tends to work quite well, as long as you have one kind of
# italic in your font family:
if font.italicangle != 0:
font_style = "italic"
# CSS weights map quite well to Opentype, so including families
# with lots of different weights is no problem.
#
# http://www.microsoft.com/typography/otspec/os2ver0.htm#wtc
# ->
# http://www.w3.org/TR/CSS21/fonts.html#font-boldness
font_weight = font.os2_weight
#
# Anything else, like condensed, for example, will need to be
# be put into a different font family, because there is no way
# to encode it into CSS.
#
# What we do here, is try to determine whether this is the case.
# ie:
# >>> font.fullname
# 'Nimbus Sans L Bold Condensed Italic'
# >>> font.familyname
# 'Nimbus Sans L'
# >>> font.weight
# 'Bold'
# >>> re.findall("italic|oblique", f.fullname, re.I)
# ['Italic']
#
# By then removing all these components from the full name,
# we find out there is a specific style such as, in this case,
# 'Condensed'
font_family = font.familyname
specifics = re.sub("italic|oblique", '',
font.fullname.
replace(font.familyname, '').
replace(font.weight, ''),
flags=re.I).strip()
if specifics:
font_family = "%s %s" % (font.familyname, specifics)
if eot:
self.css += """@font-face {
font-family: '%s';
font-style: '%s';
font-weight: '%s';
src: url('%s'); /* IE9 Compat Modes */
src: url('%s?#iefix') format('embedded-opentype'),
url('%s') format('woff'),
url('%s') format('truetype');
}
""" % (font_family,
font_style,
font_weight,
basename(eot_file_name),
basename(eot_file_name),
basename(woff_file_name),
basename(ttf_file_name) )
else:
self.css += """@font-face {
font-family: '%s';
font-style: '%s';
font-weight: '%s';
src: url('%s') format('woff'),
url('%s') format('truetype');
}
""" % (font_family,
font_style,
font_weight,
basename(woff_file_name),
basename(ttf_file_name) )
if self.css:
c = codecs.open(join(dirname(self.infiles[0]), 'webfonts', 'style.css'),'w','UTF-8')
c.write(self.css)
c.close()
def afdko(self):
import ufo2fdk
from robofab.objects.objectsRF import RFont
compiler = ufo2fdk.OTFCompiler()
for infile in self.infiles:
outfile = splitext(infile)[0] + '.otf'
font = RFont(infile)
compiler.compile(font, outfile, releaseMode=True)
| 36.875 | 96 | 0.504762 | 656 | 6,195 | 4.6875 | 0.355183 | 0.039024 | 0.017886 | 0.023415 | 0.17561 | 0.138537 | 0.138537 | 0.101463 | 0.078699 | 0.078699 | 0 | 0.004516 | 0.392413 | 6,195 | 167 | 97 | 37.095808 | 0.812434 | 0.201453 | 0 | 0.294118 | 0 | 0 | 0.124464 | 0.005998 | 0 | 0 | 0 | 0 | 0 | 1 | 0.029412 | false | 0 | 0.088235 | 0 | 0.127451 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2014e723afe7cd6d380a497e7707c33b3a2406d2 | 1,999 | py | Python | model_converter.py | dylancrockett/classical-music-generator-ai | 8f8416c022cecd238be80473acbf497d052516c5 | [
"MIT"
] | null | null | null | model_converter.py | dylancrockett/classical-music-generator-ai | 8f8416c022cecd238be80473acbf497d052516c5 | [
"MIT"
] | null | null | null | model_converter.py | dylancrockett/classical-music-generator-ai | 8f8416c022cecd238be80473acbf497d052516c5 | [
"MIT"
] | null | null | null | from midi_converter import char_to_note, note_to_char
import random
def load_data(filename):
data = []
# load the file
with open(filename, "r") as f:
file = f.readlines()
for line in file:
line = line.replace("\n", "")
line = line.replace(",", "")
data.append(line)
return data
# generate a seed for the model to use to generate a song sequence
def generate_seed(size):
seed = []
for i in range(size):
arr = []
for x in range(88):
arr.append(0)
for x in range(random.randint(5, 10)):
arr[random.randint(0, 81)] = 1
seed.append(arr)
print(seed)
return seed
def str_to_mask(string: str):
arr = []
for x in range(88):
arr.append(0)
if string == " ":
return arr
else:
for note in string:
arr[char_to_note(note) - 1] = 1
return arr
def bool_to_mask(boolean_list: list):
arr = []
for x in range(88):
if boolean_list[0][x - 1]:
arr.append(1)
else:
arr.append(0)
return arr
def mask_to_string(mask: list):
string = ""
for x, val in enumerate(mask):
if val == 1:
string += note_to_char(x + 1)
return string
# convert a dataset into a sample for the AI to train off of
class Sample:
def __init__(self, song: list):
self.song = song
self.data = []
for beat in song:
arr = []
for x in range(88):
arr.append(0)
if beat == " ":
self.data.append(arr)
continue
else:
for note in beat:
arr[char_to_note(note) - 1] = 1
self.data.append(arr)
continue
def get_unique_list(origin: list):
unique = []
for item in origin:
if item not in unique:
unique.append(item)
return unique
| 18.682243 | 66 | 0.511756 | 266 | 1,999 | 3.744361 | 0.281955 | 0.042169 | 0.03012 | 0.055221 | 0.186747 | 0.136546 | 0.120482 | 0.082329 | 0.082329 | 0.056225 | 0 | 0.02282 | 0.386193 | 1,999 | 106 | 67 | 18.858491 | 0.788916 | 0.068534 | 0 | 0.352941 | 0 | 0 | 0.003229 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.102941 | false | 0 | 0.029412 | 0 | 0.25 | 0.014706 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2015cd2bc695dff119d87cd94a61638c234d79af | 2,978 | py | Python | src/baseline_main.py | NaiboWang/HFL-CS6203-NaiboShiqi | 4bab35a20f1ec1229b0011c952d93c341579c402 | [
"MIT"
] | null | null | null | src/baseline_main.py | NaiboWang/HFL-CS6203-NaiboShiqi | 4bab35a20f1ec1229b0011c952d93c341579c402 | [
"MIT"
] | null | null | null | src/baseline_main.py | NaiboWang/HFL-CS6203-NaiboShiqi | 4bab35a20f1ec1229b0011c952d93c341579c402 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Python version: 3.8
"""
This script is used to run the baseline experiments by arguments from commandline.
Please see readme.md to see the supported arguments.
"""
import pickle
import time
from tqdm import tqdm
import matplotlib.pyplot as plt
from utils import *
from options import *
from update import *
from models import *
if __name__ == '__main__':
start_time = time.time()
args = args_parser()
if args.gpu and torch.cuda.is_available():
torch.cuda.set_device(int(args.gpu))
device = 'cuda' if args.gpu>=0 and torch.cuda.is_available() else 'cpu'
# load datasets
train_dataset, test_dataset, _ = get_dataset(args)
global_model = get_model(args)
global_model.to(device)
# Set model to training mode
global_model.train()
print("Dataset: ", args.dataset)
print(global_model)
# Training
optimizer = get_optimizer(global_model,args)
if args.dataset=="COVID19_twitter" or args.dataset == "heartbeat":
batch_size = 1
else:
batch_size = 64
trainloader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True)
criterion = torch.nn.CrossEntropyLoss().to(device)
train_accuracy, test_accuracy = [], []
train_losses, test_losses = [], []
for epoch in tqdm(range(args.epochs)):
global_model.train()
for batch_idx, (data, labels) in enumerate(trainloader):
data, labels = data.to(device), labels.to(device)
optimizer.zero_grad()
outputs = global_model(data)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
# Train Accuracy
train_acc, train_loss, _, _ = test_inference(args, global_model, train_dataset)
print('\r\nTraining on', len(train_dataset), 'samples')
print("Training Accuracy: {:.2f}%, loss: {} for epoch {}".format(100 * train_acc, train_loss, epoch + 1))
# testing
test_acc, test_loss, _, _ = test_inference(args, global_model, test_dataset)
print('Test on', len(test_dataset), 'samples')
print("Test Accuracy: {:.2f}%, loss: {} for epoch {}".format(100 * test_acc, test_loss, epoch + 1))
train_accuracy.append(train_acc)
test_accuracy.append(test_acc)
train_losses.append(train_loss)
test_losses.append(test_loss)
# Saving the objects train_loss and train_accuracy:
file_name = '../save/objects/baseline_{}_{}_O[{}]_C[{}]_E[{}]_B[{}].pkl'. \
format(args.dataset, args.epochs, args.optimizer, args.frac, args.local_ep, args.local_bs)
print("file_name:", file_name)
with open(file_name, 'wb') as f:
pickle.dump(
{"train_accuracy": train_accuracy, "test_accuracy": test_accuracy, "train_losses": train_losses, "test_losses": test_losses, "runtime": time.time() - start_time}, f)
print('\n Total Run Time: {0:0.4f}'.format(time.time() - start_time))
| 36.765432 | 178 | 0.659167 | 389 | 2,978 | 4.81491 | 0.344473 | 0.052856 | 0.032034 | 0.014949 | 0.124933 | 0.067272 | 0.033102 | 0 | 0 | 0 | 0 | 0.009374 | 0.211887 | 2,978 | 80 | 179 | 37.225 | 0.788666 | 0.10779 | 0 | 0.036364 | 0 | 0 | 0.12571 | 0.021961 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.145455 | 0 | 0.145455 | 0.145455 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
201664b688293c2f210424f82b9dea760c6421c4 | 2,160 | py | Python | openmdao/test_suite/groups/parametric_group.py | ryanfarr01/blue | a9aac98c09cce0f7cadf26cf592e3d978bf4e3ff | [
"Apache-2.0"
] | null | null | null | openmdao/test_suite/groups/parametric_group.py | ryanfarr01/blue | a9aac98c09cce0f7cadf26cf592e3d978bf4e3ff | [
"Apache-2.0"
] | null | null | null | openmdao/test_suite/groups/parametric_group.py | ryanfarr01/blue | a9aac98c09cce0f7cadf26cf592e3d978bf4e3ff | [
"Apache-2.0"
] | null | null | null | """Define the test group classes."""
from __future__ import division, print_function
from openmdao.core.group import Group
class ParametericTestGroup(Group):
"""
Test Group expected by `ParametricInstance`. Groups inheriting from this should extend
`default_params` to include valid parametric options for that model.
Attributes
----------
expected_totals : dict or None
Dictionary mapping (out, in) pairs to the associated total derivative. Optional
total_of : iterable
Iterable containing which outputs to take the derivative of.
total_wrt : iterable
Iterable containing which variables with which to take the derivative of the above.
expected_values : dict or None
Dictionary mapping variable names to expected values. Optional.
default_params : dict
Dictionary containing the available options and default values for parametric sweeps.
"""
def __init__(self, **kwargs):
self.expected_totals = None
self.total_of = None
self.total_wrt = None
self.expected_values = None
self.default_params = {
'vector_class': ['default', 'petsc'],
'assembled_jac': [True, False],
'jacobian_type': ['matvec', 'dense', 'sparse-coo', 'sparse-csr',
'sparse-csc'],
}
super(ParametericTestGroup, self).__init__()
self.metadata.declare('vector_class', default='default',
values=['default', 'petsc'],
type_=str,
desc='Which vector implementation to use.')
self.metadata.declare('assembled_jac', default=True,
type_=bool,
desc='If an assemebled Jacobian should be used.')
self.metadata.declare('jacobian_type', default='matvec',
type_=str,
values=['dense', 'matvec', 'sparse-coo', 'sparse-csr', 'sparse-csc'],
desc='Controls the type of the assembled jacobian.')
self.metadata.update(kwargs)
| 40.754717 | 99 | 0.598611 | 223 | 2,160 | 5.650224 | 0.430493 | 0.025397 | 0.045238 | 0.031746 | 0.119048 | 0.042857 | 0 | 0 | 0 | 0 | 0 | 0 | 0.310185 | 2,160 | 52 | 100 | 41.538462 | 0.845638 | 0.3375 | 0 | 0.074074 | 0 | 0 | 0.231959 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.037037 | false | 0 | 0.074074 | 0 | 0.148148 | 0.037037 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2016f524e2a1b9db4f8a8bef592b86cb754e2948 | 2,577 | py | Python | CSE4309/assignment 6/knn_classify.py | theoneineed/MLandVision | b9bbef887ed63829e958daee660dd7a900e55f8f | [
"MIT"
] | null | null | null | CSE4309/assignment 6/knn_classify.py | theoneineed/MLandVision | b9bbef887ed63829e958daee660dd7a900e55f8f | [
"MIT"
] | null | null | null | CSE4309/assignment 6/knn_classify.py | theoneineed/MLandVision | b9bbef887ed63829e958daee660dd7a900e55f8f | [
"MIT"
] | null | null | null | #Nabin Chapagain
#1001551151
#knn_classify(<training_file>, <test_file>, <k>)
# Importing all needed libraries
import numpy as np
import math
import sys
import random
from scipy import stats
from scipy.spatial import distance
import statistics as s
from statistics import mean, median, mode, stdev
fname = sys.argv[1]
fname1 = sys.argv[2]
k_nearest = int(sys.argv[3])
mat_train = np.loadtxt(fname)
mat_train_prime = mat_train[:,0:-1]
mat_test= np.loadtxt(fname1)
mat_test_prime = mat_test[:,0:-1]
no_train_mat_row = len(mat_train)
no_train_mat_col = len(mat_train[0])
mean_arr = np.zeros(no_train_mat_col-1)
std_arr = np.zeros(no_train_mat_col-1)
std_in_between = 0
#getting mean and standard deviation for each attribute before normalizing
for i in range (0,no_train_mat_col-1):
mean_arr[i] = mean(mat_train[:,i])
std_in_between = stdev(mat_train[:,i])
if (std_in_between == 0):
std_in_between = 1
std_arr[i] = std_in_between
#next step, normalizing the values
for j in range(0,no_train_mat_col-1):
mat_train[:,j] = (mat_train[:,j] - mean_arr[j])/std_arr[j]
# F(v) = (v - mean)/std
for j in range(0,no_train_mat_col-1):
mat_test[:,j] = (mat_test[:,j] - mean_arr[j])/std_arr[j]
# F(v) = (v - mean)/std
#here, we have normalized training matrix and test matrix
classification_accuracy = 0
for i in range(0,len(mat_test)):
E_dist = np.zeros(no_train_mat_row)
for k in range(len(E_dist)):
E_dist[k]= distance.euclidean(mat_test_prime[i],mat_train_prime[k])
accuracy = 0
true_class = mat_test[i][-1]
k_nearest_points = E_dist.argsort()[:k_nearest]
#This is the index of k_nearest_points number of lowest numbers in the list E_dist
predicted_array = mat_train[k_nearest_points,-1]
accuracy = 0
mode_class = s.multimode(predicted_array)
mode_class.sort()
#print(mode_class,"\n")
if(len(mode_class) == 1):
predicted_class = mode_class[0]
if(predicted_class == true_class):
accuracy = 1
else:
#now to deal with ties
for m in range(0,len(mode_class)):
predicted_class = mode_class[m]
if(true_class == predicted_class):
accuracy = len(np.where(true_class == mode_class))/len(mode_class)
break
object_id = i+1
print('ID=%5d, predicted=%3d, true=%3d, accuracy=%4.2f'%( object_id, predicted_class, true_class, accuracy))
classification_accuracy+=accuracy
print('classification accuracy=%6.4f\n'% (classification_accuracy/len(mat_test)))
| 25.019417 | 112 | 0.683741 | 419 | 2,577 | 3.947494 | 0.279236 | 0.053204 | 0.048368 | 0.047158 | 0.169891 | 0.112455 | 0.112455 | 0.112455 | 0.099154 | 0.066505 | 0 | 0.023614 | 0.1948 | 2,577 | 102 | 113 | 25.264706 | 0.773494 | 0.168413 | 0 | 0.071429 | 0 | 0 | 0.03681 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.142857 | 0 | 0.142857 | 0.035714 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
201aafc09a27272df9e65b3abf61a1c4c43849ac | 3,218 | py | Python | aileen/box/management/commands/monitor_tmux.py | aileenproject/aileen | ea3cee33658e8f0a32edc806b4aad22a75227f26 | [
"MIT"
] | 11 | 2018-12-16T10:59:19.000Z | 2019-04-13T09:35:25.000Z | aileen/box/management/commands/monitor_tmux.py | aileenproject/aileen-core | ea3cee33658e8f0a32edc806b4aad22a75227f26 | [
"MIT"
] | 6 | 2019-07-08T09:21:50.000Z | 2019-11-08T08:21:54.000Z | aileen/box/management/commands/monitor_tmux.py | aileenproject/aileen-core | ea3cee33658e8f0a32edc806b4aad22a75227f26 | [
"MIT"
] | 3 | 2019-01-17T23:18:27.000Z | 2019-04-13T09:55:05.000Z | import logging
import os
import time
from datetime import datetime
import pytz
from django.conf import settings
from django.core.management.base import BaseCommand
import libtmux
from box.management.commands.run_box import start_sensor_in_tmux
from box.models import BoxSettings
from box.utils.dir_handling import build_tmp_dir_name
from data.models import TmuxStatus
from data.time_utils import sleep_until_interval_is_complete
"""
This command is concerned with the health of tmux sessions. State is monitored and periodic restarts are done.
Currently, this concerns only the sensor, but add any other handling here as necessary (state is of interest on the
server, or periodic restarts would improve stability).
"""
logger = logging.getLogger(__name__)
def restart_sensor(tmux_session):
"""Kill the tmux window running the sensor, delete all sensor files, and start fresh."""
logger.info("Restarting sensor for long-term health and sanitary reasons ...")
tmux_session.find_where({"window_name": "sensor"}).kill_window()
tmp_dir = build_tmp_dir_name()
for sensor_file in [
f for f in os.listdir(tmp_dir) if f.startswith(settings.SENSOR_FILE_PREFIX)
]:
os.remove(f"{tmp_dir}/{sensor_file}")
start_sensor_in_tmux(tmux_session, new_window=True)
def monitor_tmux_windows(tmux_session):
"""Monitor if tmux windows are doing fine. For now, only the sensor, can add others later."""
box_id = BoxSettings.objects.last().box_id
timezone = pytz.timezone(settings.TIME_ZONE)
status = True # start optimistic
tmux_window = tmux_session.find_where({"window_name": "sensor"})
if tmux_window is None:
status = False
logger.info(
'Cannot find the "sensor" tmux window. Assuming sensor is not running...'
)
tmux_pane = tmux_window.list_panes()[0]
last_message = tmux_pane.cmd("capture-pane", "-p").stdout[-1]
if last_message == "sleeping a bit...":
status = False
logger.info(
"The sensor seems to be off (process is sleeping and will try again) ..."
)
TmuxStatus.objects.update_or_create(
box_id=box_id,
sensor_status=status,
time_stamp=timezone.localize(datetime.now()),
)
class Command(BaseCommand):
def handle(self, *args, **kwargs):
tmux_server = libtmux.Server()
tmux_session = tmux_server.find_where(
{"session_name": settings.TMUX_SESSION_NAME}
)
restart_frequency = (
settings.PROCESS_RESTART_INTERVAL_IN_SECONDS
/ settings.STATUS_MONITORING_INTERVAL_IN_SECONDS
)
logger.info(
"I will restart processes after %d status check(s)..." % restart_frequency
)
monitoring_count = 0
while True:
start_time = time.time()
monitor_tmux_windows(tmux_session)
sleep_until_interval_is_complete(
start_time, settings.STATUS_MONITORING_INTERVAL_IN_SECONDS
)
monitoring_count += 1
if monitoring_count == restart_frequency:
restart_sensor(tmux_session)
monitoring_count = 0
print()
| 33.175258 | 115 | 0.686451 | 416 | 3,218 | 5.074519 | 0.387019 | 0.046897 | 0.024159 | 0.016106 | 0.126954 | 0.072951 | 0.034107 | 0 | 0 | 0 | 0 | 0.002023 | 0.232132 | 3,218 | 96 | 116 | 33.520833 | 0.852287 | 0.058421 | 0 | 0.1 | 0 | 0 | 0.130769 | 0.008425 | 0 | 0 | 0 | 0 | 0 | 1 | 0.042857 | false | 0 | 0.185714 | 0 | 0.242857 | 0.014286 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
201ab30fb25e805b4259dacde1a4af4992404af1 | 8,244 | py | Python | pySPM/nanoscan.py | BBarbara-fr/pySPM | 6dfd59b0e873173c455b1085e091495cf775f852 | [
"Apache-2.0"
] | 39 | 2016-08-23T19:12:29.000Z | 2022-02-11T11:36:33.000Z | pySPM/nanoscan.py | BBarbara-fr/pySPM | 6dfd59b0e873173c455b1085e091495cf775f852 | [
"Apache-2.0"
] | 24 | 2018-04-26T12:05:00.000Z | 2022-02-27T12:36:51.000Z | pySPM/nanoscan.py | BBarbara-fr/pySPM | 6dfd59b0e873173c455b1085e091495cf775f852 | [
"Apache-2.0"
] | 40 | 2018-01-23T07:11:14.000Z | 2022-03-11T12:41:49.000Z | # -- coding: utf-8 --
# Copyright 2018 Olivier Scholder <o.scholder@gmail.com>
import os
import base64
import xml.etree.ElementTree as ET
import struct
import numpy as np
import pySPM.SPM
from pySPM.SPM import SPM_image, funit
from .utils.misc import aliased, alias, deprecated
@deprecated("getCurve")
def get_curve(filename, channel='Normal Deflection', backward=False):
"""
function to retrieve data which are not in the form of images.
This is typically used for 1D channel where the normal deflection is recorded while z is swept.
"""
tree = ET.parse(filename)
root = tree.getroot()
namespace = {'spm': 'http://www.nanoscan.ch/SPM'}
RAW = root.findall("spm:vector/spm:contents/spm:direction/spm:vector/"
"spm:contents/spm:name[spm:v='{direction}']/../spm:channel/spm:vector/"
"spm:contents/spm:name[spm:v='{channel}']/../spm:data/spm:v"
.format(direction=['forward', 'backward'][backward],
channel=channel), namespace)[0].text
start = float(root.findall("spm:vector/spm:contents/spm:axis/spm:vector/"
"spm:contents/spm:start/spm:vector/spm:v", namespace)[0].text)
stop = float(root.findall("spm:vector/spm:contents/spm:axis/spm:vector/"
"spm:contents/spm:stop/spm:vector/spm:v", namespace)[0].text)
unit = root.findall("spm:vector/spm:contents/spm:axis/spm:vector/spm:contents"
"/spm:unit/spm:v", namespace)[0].text
BIN = base64.b64decode(RAW)
N = len(BIN)
vals = np.array(struct.unpack("<"+str(N//4)+"f", BIN))
x = np.linspace(start, stop, len(vals))
return x, vals
@aliased
class Nanoscan():
def __init__(self, filename=None):
if not os.path.exists(filename):
raise IOError('File "{0}" Not Found'.format(filename))
if filename[-4:] != '.xml':
raise TypeError("Nanoscan files should be xml files")
self.filename = filename
tree = ET.parse(filename)
self.root = tree.getroot()
if self.root.tag == "{http://www.nanoscan.ch/SPM}scan":
self.namespaces = {'spm': "http://www.nanoscan.ch/SPM"}
self.type = "Nanoscan"
self.fbPath = "spm:vector/spm:contents/spm:instrumental_parameters/spm:contents/spm:z_control/spm:contents"
self.pixel_size = [int(z) for z in self.__grab(
"spm:vector/spm:contents/spm:size/spm:contents//spm:v")]
uval = float(self.__grab(
".//spm:area//spm:contents/spm:size/spm:contents/spm:fast_axis/spm:v"))
udispu = self.__grab(
".//spm:area//spm:contents/spm:display_unit/spm:v")
udisps = float(self.__grab(
".//spm:area/spm:contents/spm:display_scale/spm:v"))
uname = self.__grab(".//spm:area/spm:contents/spm:unit/spm:v")
x = funit(uval*udisps, udispu)
uval = float(self.__grab(
".//spm:area//spm:contents/spm:size/spm:contents/spm:slow_axis/spm:v"))
y = funit(uval*udisps, udispu)
self.size = {
'unit': x['unit'],
'x': x['value'],
'y': y['value']}
try:
self.feedback = {'channel': self.__grab(
'{0}/spm:z_feedback_channel/spm:v'.format(self.fbPath))}
self.feedback['P'] = {
'value': float(self.__grab('{0}/spm:proportional_z_gain/spm:v'.format(self.fbPath))),
'unit': self.__grab('{0}/spm:proportional_z_gain_unit/spm:v'.format(self.fbPath))}
self.feedback['I'] = {
'value': float(self.__grab('{0}/spm:integral_z_time/spm:v'.format(self.fbPath))),
'unit': self.__grab('{0}/spm:integral_z_time_unit/spm:v'.format(self.fbPath))}
if self.feedback['channel'] == 'df':
self.feedback['channel'] = u'Δf'
except:
self.feedback = {}
self.scan_speed = {
z: {
'value': float(self.__grab("spm:vector//spm:direction/spm:vector/spm:contents/spm:name[spm:v='{dir}']/../spm:point_interval/spm:v".format(dir=z))) * self.pixel_size[0],
'unit': self.__grab("spm:vector//spm:direction/spm:vector/spm:contents/spm:name[spm:v='{dir}']/../spm:point_interval_unit/spm:v".format(dir=z))} for z in ['forward', 'backward']}
else:
raise TypeError(
"Unknown or wrong data type. Expecting a valid Nanoscan xml")
def list_channels(self):
"""
Printout the list of stored channels
"""
for d in ['forward','backward']:
print(d)
print("="*len(d))
for z in self.root.findall("spm:vector//spm:direction/spm:vector/spm:contents/spm:name[spm:v='{}']/../spm:channel//spm:contents/spm:name/spm:v".format(d), self.namespaces):
print(" - "+z.text)
print()
def get_channel(self, channel='Topography', backward=False, corr=None):
try:
RAW = self.__grab("spm:vector//spm:direction/spm:vector/spm:contents"
"/spm:name[spm:v='{direction}']/../spm:channel//spm:contents/spm:name[spm:v='{channel}']"
"/../spm:data/spm:v".format(direction=["forward", "backward"][backward], channel=channel))
except:
raise 'Channel {0} in {1} scan not found'.format(
channel, direction)
return None
BIN = base64.b64decode(RAW)
recorded_length = len(BIN)/4
py = int(recorded_length/self.pixel_size[0])
recorded_size = {
'x': self.size['x'],
'y': self.size['y']*py/float(self.pixel_size[1]),
'unit': self.size['unit']}
image_array = np.array(struct.unpack("<%if" % (recorded_length), BIN)).reshape(
(py, self.pixel_size[0]))
return SPM_image(image_array, channel=channel, _type=self.type, real=recorded_size, corr=corr)
def __grab(self, path):
result = [z.text for z in self.root.findall(path, self.namespaces)]
if len(result) == 1:
result = result[0]
return result
@deprecated("arraySummary")
def array_summary(self):
from pySPM.utils import htmlTable
res = [y.format(**self.__dict__) for y in
["{filename}", "{pixel_size[0]}×{pixel_size[1]}", "{size[x][value]}×{size[y][value]} {size[x][unit]}",
"{scan_speed[forward][value]} {scan_speed[forward][unit]}",
"{feedback[channel]}", "{P[value]:.2f} {P[unit]}", "{I[value]:.2f} {I[unit]}"]]
@alias("getSummary")
def get_summary(self):
x = funit(self.size['x'], self.size['unit'])
y = funit(self.size['y'], self.size['unit'])
P = funit(self.feedback['P'])
I = funit(self.feedback['I'])
return u"""Feedback: {feedback[channel]} : P:{P[value]}{P[unit]} : I:{I[value]}{I[unit]}
Size: {pixel_size[0]}×{pixel_size[1]} pixels = {x[value]:.3} {x[unit]}×{y[value]:.3} {y[unit]}
Scan Speed: {scanSpeed[value]}{scanSpeed[unit]}/line""".format(
x=x, y=y, P=P, I=I,
feedback=self.feedback, pixel_size=self.pixel_size, size=self.size,
scanSpeed=self.scan_speed['forward'])
@staticmethod
def show_dir_summary(path):
from pySPM.utils import htmlTable
res = [["Filename", "pixel size", "real size",
"scan_speed", "feedback", "P", "I"]]
for x in os.listdir(path):
try:
A = pySPM.Nanoscan(path+x)
res.append([y.format(f=os.path.basename(A.filename), **A.__dict__) for y in
["{f}", "{pixel_size[0]}×{pixel_size[1]}", "{size[x]}×{size[y]} {size[unit]}", "{scan_speed[forward][value]} {scan_speed[forward][unit]}",
"{feedback[channel]}", "{feedback[P][value]:.2f} {feedback[P][unit]}", "{feedback[I][value]:.2f} {feedback[I][unit]}"]])
except:
print("Cannot read image \""+x+"\" skipping it")
htmlTable(res, header=True)
| 49.071429 | 198 | 0.563925 | 1,058 | 8,244 | 4.305293 | 0.189036 | 0.065203 | 0.079912 | 0.065862 | 0.402854 | 0.380461 | 0.333041 | 0.263886 | 0.238639 | 0.227223 | 0 | 0.008031 | 0.259947 | 8,244 | 167 | 199 | 49.365269 | 0.737584 | 0.032872 | 0 | 0.097222 | 0 | 0.076389 | 0.339639 | 0.229663 | 0 | 0 | 0 | 0 | 0 | 1 | 0.055556 | false | 0 | 0.069444 | 0 | 0.166667 | 0.034722 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
201c3ff46ed39f9d31b9bd6af4988cd2d18f6c06 | 1,342 | py | Python | setup/util/logging.py | JackInTaiwan/ViDB | d658fd4f6a1ad2d7d36bb270fde2a373d3cc965d | [
"MIT"
] | 2 | 2021-05-29T06:57:24.000Z | 2021-06-15T09:13:38.000Z | setup/util/logging.py | JackInTaiwan/ViDB | d658fd4f6a1ad2d7d36bb270fde2a373d3cc965d | [
"MIT"
] | null | null | null | setup/util/logging.py | JackInTaiwan/ViDB | d658fd4f6a1ad2d7d36bb270fde2a373d3cc965d | [
"MIT"
] | null | null | null | import os
import logging
import logging.config
def logging_config(log_dir=None, log_file_path=None):
config = {
"version": 1,
"disable_existing_loggers": False,
"formatters": {
"standard": {
"format": "[%(asctime)s][%(name)s][%(funcName)s][%(levelname)s] %(message)s"
},
"simple": {
"format": "[%(asctime)s] %(message)s"
},
},
"handlers": {
"terminal": {
"level": "INFO",
"formatter": "standard",
"class": "logging.StreamHandler",
}
},
"loggers": {
},
"root": {
"handlers": ["terminal"],
"level": "INFO",
}
}
if log_dir or log_file_path:
log_file_path = log_file_path or os.path.join(log_dir, "output.log")
if not os.path.exists(os.path.dirname(log_file_path)):
os.makedirs(os.path.dirname(log_file_path))
config["handlers"]["file"] = {
"level": "INFO",
"formatter": "standard",
"class": "logging.FileHandler",
"filename": log_file_path,
"mode": "a+",
}
config["root"]["handlers"].append("file")
logging.config.dictConfig(config)
| 26.313725 | 92 | 0.469449 | 123 | 1,342 | 4.95935 | 0.390244 | 0.080328 | 0.12623 | 0.081967 | 0.257377 | 0.257377 | 0 | 0 | 0 | 0 | 0 | 0.001182 | 0.369598 | 1,342 | 50 | 93 | 26.84 | 0.719858 | 0 | 0 | 0.119048 | 0 | 0.02381 | 0.269948 | 0.072334 | 0 | 0 | 0 | 0 | 0 | 1 | 0.02381 | false | 0 | 0.071429 | 0 | 0.095238 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2020ae402acc11537dc961ac7e378c574b23fb5e | 6,794 | py | Python | chess_net_v2.py | michaelwolz/ChessML | 72d82804cac0554440ef39caf0f25eb399f4a34f | [
"MIT"
] | 7 | 2019-04-05T10:33:28.000Z | 2021-06-07T17:14:55.000Z | chess_net_v2.py | michaelwolz/ChessML | 72d82804cac0554440ef39caf0f25eb399f4a34f | [
"MIT"
] | null | null | null | chess_net_v2.py | michaelwolz/ChessML | 72d82804cac0554440ef39caf0f25eb399f4a34f | [
"MIT"
] | 2 | 2019-03-29T18:57:48.000Z | 2020-09-13T10:11:26.000Z | import torch
import torch.optim as optim
import torch.nn as nn
import torchvision
import time
import progressbar
import os
from torchvision import transforms, models
# Implementation based on resnet18
# Accuracy of 99% after 12 Epochs of training with 31.200 training images and 7.800 validation images
# Where to store the model
MODELPATH = "/content/drive/My Drive/ChessNetData/model/chess-net-v2-sgd.tar"
# Defining basic transform operations. Image size of 224x224 is required by underlying resnet
# The normalization function based on the ImageNet data which was used to train the resnet model
transform = transforms.Compose([
transforms.Resize(224),
transforms.ToTensor(),
transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)),
])
# Loading the training and validation data
# Train Data
train_set = torchvision.datasets.ImageFolder(root="/content/data/augmented/train", transform=transform)
train_loader = torch.utils.data.DataLoader(train_set, batch_size=25, num_workers=2, shuffle=True, drop_last=True)
# Validation Data
val_set = torchvision.datasets.ImageFolder(root="/content/data/augmented/validation", transform=transform)
val_loader = torch.utils.data.DataLoader(val_set, batch_size=25, num_workers=2, shuffle=True, drop_last=True)
# Defining classes:
# bb = Black Bishop
# bk = Black King
# bn = Black Knight
# bp = Black Pawn
# bq = Black Queen
# br = Black Rook
classes = ("bb", "bk", "bn", "bp", "bq", "br", "empty", "wb", "wk", "wn", "wp", "wq", "wr")
def train(model, optimizer, criterion):
model.train()
running_loss = 0.0
with progressbar.ProgressBar(max_value=len(train_loader)) as bar:
for i, t_data in enumerate(train_loader):
data, target = t_data
# put data on the gpu if available
if torch.cuda.is_available():
data = data.cuda()
target = target.cuda()
# zero the parameter gradients
optimizer.zero_grad()
# forward + backward + optimize
out = model(data)
loss = criterion(out, target)
loss.backward()
optimizer.step()
# print statistics
running_loss += loss.item()
bar.update(i)
if i % 200 == 199:
print(" => Loss:", running_loss / 200)
running_loss = 0.0
def validate(model, epoch=0):
model.eval()
correct = 0
total = 0
class_correct = list(0. for i in range(len(classes)))
class_total = list(0. for i in range(len(classes)))
with torch.no_grad():
for data, target in val_loader:
# put data on the gpu if available
if torch.cuda.is_available():
data = data.cuda()
target = target.cuda()
out = model(data)
_, prediction = torch.max(out.data, 1)
total += target.size(0)
if torch.cuda.is_available():
correct += prediction.eq(target).sum().cpu().item()
else:
correct += prediction.eq(target).sum().item()
c = (prediction == target).squeeze()
for i in range(target.size(0)):
label = target[i]
class_correct[label] += c[i].item()
class_total[label] += 1
print("\nValidation")
print("###################################")
print("Epoch", epoch)
print("Accuracy: %.2f%%" % (100 * correct / total))
print("###################################\n")
for i in range(len(classes)):
try:
print('Accuracy of %5s : %2d%% [%2d/%2d]' %
(classes[i], 100 * class_correct[i] / class_total[i], class_correct[i], class_total[i]))
except ZeroDivisionError:
print('No Accuracy for %s' % classes[i])
return correct / total # Returning accuracy
def save_model(model, optimizer, epoch, best_acc):
# Saving a checkpoint of the training. This is essential for using the trained network and also to resume training
# if it stopped for some reason (e.g. limitations of Google Colab)
torch.save({
'epoch': epoch,
'model_state_dict': model.state_dict(),
'optimizer_state_dict': optimizer.state_dict(),
'bestacc': best_acc,
}, MODELPATH)
print("\n------- Checkpoint saved -------\n")
def main():
resume_training = True # resuming training or starting a new one
model = models.resnet18(pretrained=True) # use pretrained version of resnet18
for param in model.parameters():
param.require_grad = False # freeze model to modify just the last layer of the nn
n_features = model.fc.in_features # get the number of features for the new last layer
fc = nn.Sequential(
nn.Linear(n_features, 320),
nn.ReLU(),
nn.Dropout(),
nn.Linear(460, 13) # one output for every class
)
model.classifier = fc
# Activate cuda support if available
if torch.cuda.is_available():
print("### Activating cuda support! ###\n")
model = model.cuda()
# Defining the loss function
criterion = nn.CrossEntropyLoss()
# Defining the optimizer
optimizer = optim.SGD(model.parameters(), lr=0.001, momentum=0.9)
# Loading model for resuming training
starting_epoch = 0
best_acc = 0
best_epoch = 0
if resume_training:
if os.path.exists(MODELPATH):
state = torch.load(MODELPATH)
model.load_state_dict(state["model_state_dict"])
optimizer.load_state_dict(state["optimizer_state_dict"])
starting_epoch = state["epoch"]
best_acc = state["bestacc"]
best_epoch = state["epoch"]
print("=> Resuming training at epoch %d with best-accuracy of: %.2f%%" % (starting_epoch, 100 * best_acc))
else:
if os.path.exists(MODELPATH):
answer = input("This will overwrite your existing model! Do you want to continue? [y, n]")
if answer != 'y':
exit(0)
print("=> Starting first training of model")
# Start training
epochs = 20 # amount of epochs for training
start = time.time()
print("Start training for %s epochs on %s" % (epochs - starting_epoch, time.ctime()))
for epoch in range(starting_epoch, epochs):
train(model, optimizer, criterion)
acc = validate(model, epoch)
if acc > best_acc:
best_acc = acc
best_epoch = epoch
save_model(model, optimizer, epoch, acc)
end = time.time()
print("Training of the model done.")
print("Time spent:", end - start, "s")
print("Best-Accuracy: %.2f%% after epoch %d" % (100 * best_acc, best_epoch))
if __name__ == "__main__":
main()
| 34.140704 | 118 | 0.611422 | 861 | 6,794 | 4.727062 | 0.315912 | 0.013759 | 0.010811 | 0.012776 | 0.196806 | 0.126781 | 0.109828 | 0.10172 | 0.060934 | 0.060934 | 0 | 0.023986 | 0.263615 | 6,794 | 198 | 119 | 34.313131 | 0.789526 | 0.182661 | 0 | 0.120301 | 0 | 0.007519 | 0.140993 | 0.032621 | 0 | 0 | 0 | 0 | 0 | 1 | 0.030075 | false | 0 | 0.06015 | 0 | 0.097744 | 0.120301 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2020c4305fe293aa2a86925dccc2971f9a3880bb | 961 | py | Python | Patent2Net/app/events/to_be_found_change.py | Patent2net/P2N-v3 | 19f5e7ebd993183bc3c1c6a9676302a71dc9277b | [
"CECILL-B"
] | 11 | 2019-08-05T11:49:11.000Z | 2022-01-28T15:36:12.000Z | Patent2Net/app/events/to_be_found_change.py | Patent2net/P2N-v3 | 19f5e7ebd993183bc3c1c6a9676302a71dc9277b | [
"CECILL-B"
] | 10 | 2019-07-11T11:26:28.000Z | 2022-02-27T13:47:26.000Z | Patent2Net/app/events/to_be_found_change.py | Patent2net/P2N-v3 | 19f5e7ebd993183bc3c1c6a9676302a71dc9277b | [
"CECILL-B"
] | 4 | 2019-04-02T07:11:04.000Z | 2022-02-21T12:26:14.000Z |
class ToBeFoundChange:
"""Event used when the number of resources available for a request has been retrieved and recorded"""
NAME = "TO_BE_FOUND_CHANGE"
def __init__(self, directory, need_spliter, amount):
self.directory = directory
self.need_spliter = need_spliter
self.amount = amount
def serialize(self):
return {
"name": self.NAME,
"data": {
"directory": self.directory,
"need_spliter": self.need_spliter,
"amount": self.amount
}
}
@staticmethod
def deserialize(serializedHook):
data = serializedHook["data"]
directory = data["directory"] if "directory" in data else None
need_spliter = data["need_spliter"] if "need_spliter" in data else None
amount = data["amount"] if "amount" in data else None
return ToBeFoundChange(directory, need_spliter, amount)
| 32.033333 | 105 | 0.608741 | 103 | 961 | 5.524272 | 0.38835 | 0.173989 | 0.105448 | 0.073814 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.30281 | 961 | 30 | 106 | 32.033333 | 0.849254 | 0.098855 | 0 | 0 | 0 | 0 | 0.12907 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.136364 | false | 0 | 0 | 0.045455 | 0.318182 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
202123a817addffe802bc7038a57cc62474ba2d3 | 1,187 | py | Python | code/ch06-data-structs/friend_map.py | tamnguyenchi93/python-memory-management-course | e858b1a5e4f423fae5b431a7727a9eb953e8b6a0 | [
"MIT"
] | 30 | 2020-08-24T14:01:26.000Z | 2022-03-26T18:55:55.000Z | code/ch06-data-structs/friend_map.py | danielhao5/python-memory-management-course | 89015877d858488e018e07fad52eec7bf3acd394 | [
"MIT"
] | null | null | null | code/ch06-data-structs/friend_map.py | danielhao5/python-memory-management-course | 89015877d858488e018e07fad52eec7bf3acd394 | [
"MIT"
] | 17 | 2020-08-21T01:52:21.000Z | 2021-11-28T13:11:11.000Z | import weakref
from collections import defaultdict
from typing import List, Dict
from person import Person
__map: Dict[int, List] = defaultdict(list)
def add_friend(person: Person, friend: Person):
if not person or not friend:
return
if person.id == friend.id:
return
if is_friend(person, friend):
return
current_friends = __map[person.id]
current_friends.append(weakref.ref(friend))
def is_friend(person: Person, friend: Person) -> bool:
if not person or not friend:
return False
if person.id == friend.id:
return True
friends: List[weakref] = __map[person.id]
for ref in friends:
f: Person = ref()
if f and f.id == friend.id:
return True
return False
def get_friends(person: Person) -> List[Person]:
friends: List[weakref] = __map[person.id]
realized_friends = [p for ref in friends if (p := ref())]
return realized_friends
def erase_person(person: Person):
if person.id in __map:
del __map[person.id]
for lst in __map.values():
for wr in lst:
if (p := wr()) and p.id == person.id:
lst.remove(wr)
| 22.396226 | 61 | 0.628475 | 165 | 1,187 | 4.381818 | 0.242424 | 0.08852 | 0.060858 | 0.06639 | 0.340249 | 0.224066 | 0.077455 | 0 | 0 | 0 | 0 | 0 | 0.272957 | 1,187 | 52 | 62 | 22.826923 | 0.837775 | 0 | 0 | 0.361111 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.111111 | false | 0 | 0.111111 | 0 | 0.444444 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2021a506098259e1c1e707ac8752765f8b357bc4 | 3,450 | py | Python | buildScripts/Android_build.py | Zhanghuahua/50KEngine | 843bb1fbeebaa42029793a28336bac4870554984 | [
"Apache-2.0"
] | 4 | 2020-05-06T06:18:15.000Z | 2021-11-24T09:27:03.000Z | buildScripts/Android_build.py | Zhanghuahua/50KEngine | 843bb1fbeebaa42029793a28336bac4870554984 | [
"Apache-2.0"
] | null | null | null | buildScripts/Android_build.py | Zhanghuahua/50KEngine | 843bb1fbeebaa42029793a28336bac4870554984 | [
"Apache-2.0"
] | null | null | null | import os
import sys
import shutil
ANDROID_NDK = os.getenv("NDK_HOME")
if(ANDROID_NDK == None):
print("no ndk found")
else:
print("foud ndk in " + ANDROID_NDK)
BUILD_TYPE="Release"
BUILD_SHARED_LIBS="OFF"
BUILD_HIDDEN_SYMBOL="ON"
BUILD_RTTI="ON"
BUILD_EXCEPTIONS="ON"
ANDROID_STL_32BIT="c++_shared"
ANDROID_STL_64BIT="c++_shared"
ANDROID_PLATFORM="android-19"
ANDROID_TOOLCHAIN_32BIT="clang"
ANDROID_TOOLCHAIN_64BIT="clang"
CMAKE_TOOLCHAIN_FILE=ANDROID_NDK+"/build/cmake//android.toolchain.cmake"
# CMAKE_TOOLCHAIN_FILE = os.path.join(ANDROID_NDK,"/build/cmake/android.toolchain.cmake")
ALL_ARCHS=["arm64-v8a","armeabi-v7a"]
PROJECT_NAME="FiftyKEngine"
COMMON_FLAGS = " "
COMMON_FLAGS_RELEASE = " -O3"
if(BUILD_HIDDEN_SYMBOL !="OFF"):
COMMON_FLAGS= COMMON_FLAGS+" -fvisibility=hidden -fvisibility-inlines-hidden"
if(BUILD_RTTI!= "OFF"):
COMMON_FLAGS=COMMON_FLAGS+" -frtti"
if(BUILD_RTTI!= "OFF"):
COMMON_FLAGS=COMMON_FLAGS+" -fexceptions"
BUILD_C_FLAGS = ""
BUILD_CXX_FLAGS = ""
BUILD_C_FLAGS = BUILD_C_FLAGS+ COMMON_FLAGS
BUILD_CXX_FLAGS = BUILD_CXX_FLAGS + COMMON_FLAGS + " -std=c++11"
for BUILD_ARCH in ALL_ARCHS:
if(BUILD_ARCH == "amrabi-v7"):
# armv7默认关闭neon加速,需要手动开启
ANDROID_ABI="armeabi-v7a with NEON"
ANDROID_STL=ANDROID_STL_32BIT
ANDROID_TOOLCHAIN=ANDROID_TOOLCHAIN_32BIT
CMAKE_ANDROID_NDK_TOOLCHAIN_VERSION = "gcc"
else:
ANDROID_ABI=BUILD_ARCH
ANDROID_STL=ANDROID_STL_64BIT
ANDROID_TOOLCHAIN=ANDROID_TOOLCHAIN_64BIT
CMAKE_ANDROID_NDK_TOOLCHAIN_VERSION = "clang"
CURRENT_DIR = os.path.split(os.path.abspath(__file__))
CURRENT_DIR = CURRENT_DIR[0]
OUTPUT_DIR = os.path.join(CURRENT_DIR, "lib"+PROJECT_NAME, "android", BUILD_ARCH)
BUILD_DIR = os.path.join(CURRENT_DIR, "lib"+PROJECT_NAME+"Symbols", "android", BUILD_ARCH)
if(os.path.exists(BUILD_DIR)):
shutil.rmtree(BUILD_DIR)
os.makedirs(BUILD_DIR)
if(os.path.exists(OUTPUT_DIR)):
shutil.rmtree(OUTPUT_DIR)
os.makedirs(OUTPUT_DIR)
CMAKE_C_FLAGS = BUILD_C_FLAGS
CMAKE_CXX_FLAGS = BUILD_CXX_FLAGS
CMakeCommand = "cmake"
CMakeCommand += (" -DCMAKE_ANDROID_NDK_TOOLCHAIN_VERSION=" + CMAKE_ANDROID_NDK_TOOLCHAIN_VERSION)
CMakeCommand += (" -DCMAKE_TOOLCHAIN_FILE=" + CMAKE_TOOLCHAIN_FILE)
CMakeCommand += (" -DANDROID_NDK=" + ANDROID_NDK)
CMakeCommand += (" -DANDROID_PLATFORM=" + ANDROID_PLATFORM)
CMakeCommand += (" -DANDROID_ABI=" + ANDROID_ABI)
CMakeCommand += (" -DCMAKE_INSTALL_PREFIX=/")
CMakeCommand += (" -DCMAKE_BUILD_TYPE=" + CMAKE_TOOLCHAIN_FILE)
CMakeCommand += (" -DANDROID_STL=" + ANDROID_STL)
CMakeCommand += (" -DCMAKE_C_FLAGS=" + CMAKE_C_FLAGS)
CMakeCommand += (" -DCMAKE_CXX_FLAGS=" + CMAKE_CXX_FLAGS)
CMakeCommand += (" -DBUILD_SHARED_LIBS=" + BUILD_SHARED_LIBS)
CMakeCommand += (" -DANDROID_TOOLCHAIN=" + ANDROID_TOOLCHAIN)
CMakeCommand += (" -DCMAKE_C_FLAGS_RELEASE=" + COMMON_FLAGS_RELEASE)
CMakeCommand += (" -DCMAKE_CXX_FLAGS_RELEASE=" + COMMON_FLAGS_RELEASE)
CMakeCommand += (" -DANDROID_ARM_NEON=" + "TRUE")
CMakeCommand += " " + CURRENT_DIR+"/.."
print(CMakeCommand)
bashCommand = "cd " + BUILD_DIR
bashCommand += "&& " + CMakeCommand
bashCommand += "&& make all -j8"
bashCommand += "&& make install/strip DESTDIR=" + OUTPUT_DIR
os.system(bashCommand)
# os.system("cd " + CURRENT_DIR);
| 31.081081 | 101 | 0.713333 | 426 | 3,450 | 5.382629 | 0.223005 | 0.057567 | 0.041867 | 0.038378 | 0.268208 | 0.136066 | 0.099433 | 0.063672 | 0.032272 | 0 | 0 | 0.010327 | 0.157971 | 3,450 | 110 | 102 | 31.363636 | 0.779002 | 0.041159 | 0 | 0.050633 | 0 | 0 | 0.212534 | 0.060248 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.037975 | 0 | 0.037975 | 0.037975 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
20250ba40775118058acfaa6b23f44f28eae0339 | 5,143 | py | Python | run_project.py | bleotiu/Mosaics | 3ef0fcd9ea37de03c15bc8eac4dd02e80f0cb4f4 | [
"MIT"
] | null | null | null | run_project.py | bleotiu/Mosaics | 3ef0fcd9ea37de03c15bc8eac4dd02e80f0cb4f4 | [
"MIT"
] | null | null | null | run_project.py | bleotiu/Mosaics | 3ef0fcd9ea37de03c15bc8eac4dd02e80f0cb4f4 | [
"MIT"
] | null | null | null | """
PROIECT MOZAIC
"""
# Parametrii algoritmului sunt definiti in clasa Parameters.
from parameters import *
from build_mosaic import *
import timeit
import numpy as np
import os
import cv2 as cv
dir_path = './..data/imaginiTest/'
filenames = [('./../data/imaginiTest/ferrari.jpeg', 'ferrari'),
('./../data/imaginiTest/adams.JPG', 'adams'),
('./../data/imaginiTest/liberty.jpg', 'liberty'),
('./../data/imaginiTest/obama.jpeg', 'obama'),
('./../data/imaginiTest/romania.jpeg', 'romania'),
('./../data/imaginiTest/tomJerry.jpeg', 'tomJerry')]
small_images_path = './../data/colectie/'
sizes = [25, 50, 75, 100]
layouts = ["caroiaj", "aleator"]
criteria = 'distantaCuloareMedie'
hexagons = [False, True]
neighbours = [False, True]
start_time = timeit.default_timer()
#(a)
for file in filenames:
for size in sizes:
file_name, name = file
size = 100
params = Parameters(file_name)
params.small_images_dir = small_images_path
params.image_type = 'jpg'
params.num_pieces_horizontal = size
params.show_small_images = False
params.layout = 'caroiaj'
params.hexagon = False
params.different_neighbours = False
params.criterion = criteria
mosaic = build_mosaic(params)
cv.imwrite(name + '_' + size.__str__() + '_caroiaj.png', mosaic)
#(b)
size = 100
for file in filenames:
file_name, name = file
params = Parameters(file_name)
params.small_images_dir = small_images_path
params.image_type = 'jpg'
params.num_pieces_horizontal = size
params.show_small_images = False
params.layout = 'aleator'
params.hexagon = False
params.different_neighbours = False
params.criterion = criteria
mosaic = build_mosaic(params)
cv.imwrite(name + '_' + size.__str__() + '_random.png', mosaic)
#(c)
size = 100
for file in filenames:
file_name, name = file
params = Parameters(file_name)
params.small_images_dir = small_images_path
params.image_type = 'jpg'
params.num_pieces_horizontal = size
params.show_small_images = False
params.layout = 'caroiaj'
params.hexagon = False
params.different_neighbours = True
params.criterion = criteria
mosaic = build_mosaic(params)
cv.imwrite(name + '_' + size.__str__() + '_caroiaj_different_neighbours.png', mosaic)
#(d)
# cifar_names = [b'airplane', b'automobile', b'bird', b'cat', b'deer',
# b'dog', b'frog', b'horse', b'ship', b'truck']
filenames2 = [('./../data/imaginiNoi/troian.jpg', 'troian', b'horse'),
('./../data/imaginiNoi/pinguini.jpg', 'pinguini', b'bird'),
('./../data/imaginiNoi/dacia.jpg', 'dacia', b'automobile'),
('./../data/imaginiNoi/snoopdogg.jpg', 'snoopdogg', b'dog'),
('./../data/imaginiNoi/frog.jpg', 'frog', b'frog')]
cifar_dir_path = './../data/cifar-10-batches-py/'
cifar_path = './../data/cifar-10-batches-py/data_batch_1'
size = 100
for file in filenames2:
file_name, name, cifar_name = file
params = Parameters(file_name)
params.small_images_dir = cifar_dir_path
params.image_type = 'jpg'
params.num_pieces_horizontal = size
params.show_small_images = False
params.layout = 'caroiaj'
params.hexagon = False
params.different_neighbours = False
params.criterion = criteria
params.cifar = True
params.cifar_name = cifar_name
mosaic = build_mosaic(params)
# These lines are optional if you want to resize the mosaic
# so that the image won't occupy a tone of space
# if params.grayscale:
# H, W = mosaic.shape
# else:
# H, W, _ = mosaic.shape
# mosaic = cv.resize(mosaic, (H // 4, W // 4))
cv.imwrite(name + '_' + size.__str__() + '_cifar_' + cifar_name.decode('ascii') + '.png', mosaic)
#(e)
size = 100
for file in filenames:
file_name, name = file
params = Parameters(file_name)
params.small_images_dir = small_images_path
params.image_type = 'jpg'
params.num_pieces_horizontal = size
params.show_small_images = False
params.layout = 'caroiaj'
params.hexagon = True
params.different_neighbours = False
params.criterion = criteria
mosaic = build_mosaic(params)
cv.imwrite(name + '_' + size.__str__() + '_hexagoane.png', mosaic)
#(f)
size = 100
for file in filenames:
file_name, name = file
params = Parameters(file_name)
params.small_images_dir = small_images_path
params.image_type = 'jpg'
params.num_pieces_horizontal = size
params.show_small_images = False
params.layout = 'caroiaj'
params.hexagon = True
params.different_neighbours = True
params.criterion = criteria
mosaic = build_mosaic(params)
cv.imwrite(name + '_' + size.__str__() + '_hexagoane_different_neighbours.png', mosaic)
end_time = timeit.default_timer()
print('Entire Project running time: %f s.' % (end_time - start_time))
| 32.967949 | 102 | 0.637177 | 611 | 5,143 | 5.134206 | 0.220949 | 0.063118 | 0.02869 | 0.045904 | 0.577941 | 0.566465 | 0.551164 | 0.551164 | 0.551164 | 0.551164 | 0 | 0.009396 | 0.234299 | 5,143 | 155 | 103 | 33.180645 | 0.787202 | 0.087498 | 0 | 0.637931 | 0 | 0 | 0.181818 | 0.114634 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.051724 | 0 | 0.051724 | 0.008621 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
20255a880a1144e9646a6cba9442f38fbfcf88ba | 28,776 | py | Python | tests/test_customer.py | psantori/contacthub-sdk-python | 03b6dba72ac0a5c34775b409f28b501894cae080 | [
"Apache-2.0"
] | null | null | null | tests/test_customer.py | psantori/contacthub-sdk-python | 03b6dba72ac0a5c34775b409f28b501894cae080 | [
"Apache-2.0"
] | null | null | null | tests/test_customer.py | psantori/contacthub-sdk-python | 03b6dba72ac0a5c34775b409f28b501894cae080 | [
"Apache-2.0"
] | null | null | null | import json
import unittest
from datetime import datetime
import mock
from contacthub.lib.paginated_list import PaginatedList
from contacthub.lib.read_only_list import ReadOnlyList
from contacthub.models.customer import Customer
from contacthub.models.education import Education
from contacthub.models.properties import Properties
from contacthub.models.event import Event
from contacthub.models.job import Job
from contacthub.models.like import Like
from contacthub.models.subscription import Subscription
from contacthub.workspace import Workspace
from copy import deepcopy
from requests import HTTPError
from tests.utility import FakeHTTPResponse
class TestCustomer(unittest.TestCase):
@classmethod
@mock.patch('requests.get', return_value=FakeHTTPResponse())
def setUp(cls, mock_get):
w = Workspace(workspace_id="123", token="456")
cls.node = w.get_node("123")
cls.customers = cls.node.get_customers()
cls.headers_expected = {'Authorization': 'Bearer 456', 'Content-Type': 'application/json'}
cls.base_url_events = 'https://api.contactlab.it/hub/v1/workspaces/123/events'
cls.base_url_customer = 'https://api.contactlab.it/hub/v1/workspaces/123/customers'
@classmethod
def tearDown(cls):
pass
def test_customer_base(self):
for customer in self.customers:
assert type(customer.base) is Properties, type(customer.base)
def test_customer_tags(self):
tags = self.customers[0].tags
assert type(tags) is Properties, type(tags)
assert type(tags.auto) is ReadOnlyList, type(tags.auto)
assert type(tags.manual) is ReadOnlyList, type(tags.manual)
assert tags.auto[0] == 'auto', tags.auto[0]
assert tags.manual[0] == 'manual', tags.manual[0]
def test_customer_tags_wrog_attr(self):
try:
self.customers[0].tags.attr
except AttributeError as e:
assert 'attr' in str(e), str(e)
def test_customer_tags_empty(self):
tags = self.customers[1].tags
assert type(tags) is Properties, type(tags)
assert type(tags.auto) is ReadOnlyList, type(tags.auto)
assert type(tags.manual) is ReadOnlyList, type(tags.manual)
assert len(tags.auto) == 0, len(tags.auto)
assert len(tags.manual) == 0, len(tags.manual)
def test_customer_contacts_other_contacts(self):
other_contact = self.customers[0].base.contacts.otherContacts[0]
assert type(other_contact) is Properties, type(other_contact)
assert other_contact.name == 'name', other_contact.name
assert other_contact.value == 'value', other_contact.value
def test_customer_contacts_mobile_devices(self):
mobile_device = self.customers[0].base.contacts.mobileDevices[0]
assert type(mobile_device) is Properties, type(mobile_device)
assert mobile_device.identifier == 'identifier', mobile_device.name
assert mobile_device.name == 'name', mobile_device.value
def test_customer_contacts(self):
contacts = self.customers[0].base.contacts
assert type(contacts) is Properties, type(contacts)
assert contacts.email == 'email@email.it', contacts.email
assert contacts.fax == 'fax', contacts.fax
assert contacts.mobilePhone == 'mobilePhone', contacts.mobilePhone
assert contacts.phone == 'phone', contacts.phone
assert type(contacts.otherContacts) is ReadOnlyList, type(contacts.otherContacts)
assert type(contacts.mobileDevices) is ReadOnlyList, type(contacts.mobileDevices)
def test_customer_contacts_other_contacts_empty(self):
other_contacts = self.customers[1].base.contacts.otherContacts
assert len(other_contacts) == 0, len(other_contacts)
def test_customer_contacts_mobile_devices_empty(self):
mobile_devices = self.customers[1].base.contacts.mobileDevices
assert len(mobile_devices) == 0, len(mobile_devices)
def test_customer_contacts_empty(self):
contacts = self.customers[1].base.contacts
assert type(contacts) is Properties, type(contacts)
assert contacts.fax is None, contacts.fax
assert contacts.mobilePhone is None, contacts.mobilePhone
assert contacts.phone is None, contacts.phone
assert type(contacts.otherContacts) is ReadOnlyList, type(contacts.otherContacts)
assert type(contacts.mobileDevices) is ReadOnlyList, type(contacts.mobileDevices)
def test_customer_credentials(self):
credentials = self.customers[0].base.credential
assert type(credentials) is Properties, type(credentials)
assert credentials.username == 'username', credentials.username
assert credentials.password == 'password', credentials.password
def test_customer_credentials_empty(self):
credentials = self.customers[1].base.credential
assert credentials is None, credentials
def test_customer_education(self):
educations = self.customers[0].base.educations
assert type(educations) is ReadOnlyList, type(educations)
education = educations[0]
assert type(education) is Education, type(education)
assert education.schoolType == Education.SCHOOL_TYPES.COLLEGE, education.schoolType
assert education.schoolName == 'schoolName', education.schoolName
assert education.schoolConcentration == 'schoolConcentration', education.schoolConcentration
assert education.startYear == 1994, education.startYear
assert education.endYear == 2000, education.endYear
assert education.isCurrent, education.isCurrent
def test_customer_unexsistant_attribute(self):
educations = self.customers[0].base.educations
assert type(educations) is ReadOnlyList, type(educations)
education = educations[0]
try:
attr = education.attr
except AttributeError as e:
assert 'attr' in str(e), str(e)
def test_customer_education_empty(self):
educations = self.customers[1].base.educations
assert type(educations) is ReadOnlyList, type(educations)
assert len(educations) == 0, len(educations)
def test_customer_subscriptions(self):
subscriptions = self.customers[0].base.subscriptions
assert type(subscriptions) is ReadOnlyList, type(subscriptions)
subscription = subscriptions[0]
assert type(subscription) is Subscription, type(subscription)
assert subscription.id == "01", subscription.id
assert subscription.name == "name", subscription.name
assert subscription.type == "type", subscription.type
assert subscription.subscribed, subscription.subscribed
# assert type(subscription.startDate) is datetime, type(subscription.startDate)
# assert type(subscription.endDate) is datetime, type(subscription.endDate)
assert subscription.subscriberId == "subscriberId", subscription.id
# assert type(subscription.registeredAt) is datetime, type(subscription.registeredAt)
# assert type(subscription.updatedAt) is datetime, type(subscription.updatedAt)
assert type(subscription.preferences) is ReadOnlyList, type(subscription.preferences)
def test_customer_subscriptions_preferences(self):
preferences = self.customers[0].base.subscriptions[0].preferences
assert type(preferences) is ReadOnlyList, type(preferences)
preference = preferences[0]
assert type(preference) is Properties, type(preference)
assert preference.key == "key", preference.key
assert preference.value == "value", preference.value
def test_customer_subscriptions_empty(self):
subscriptions = self.customers[1].base.subscriptions
assert type(subscriptions) is ReadOnlyList, type(subscriptions)
assert len(subscriptions) == 0, len(subscriptions)
def test_customer_jobs(self):
jobs = self.customers[0].base.jobs
assert type(jobs) is ReadOnlyList, type(jobs)
job = jobs[0]
assert type(job) is Job, type(job)
assert job.companyIndustry == 'companyIndustry', job.companyIndustry
assert job.companyName == 'companyName', job.companyName
assert job.jobTitle == 'jobTitle', job.jobTitle
assert job.isCurrent, job.isCurrent
def test_customer_like(self):
likes = self.customers[0].base.likes
assert type(likes) is ReadOnlyList, type(likes)
like = likes[0]
assert type(like) is Like, type(like)
def test_customer_jobs_empty(self):
jobs = self.customers[1].base.jobs
assert type(jobs) is ReadOnlyList, type(jobs)
assert len(jobs) == 0, len(jobs)
def test_customer_address(self):
address = self.customers[0].base.address
assert type(address) is Properties, type(address)
assert address.street == 'street', address.street
assert address.city == 'city', address.city
assert address.country == 'country', address.country
assert address.province == 'province', address.province
assert address.zip == 'zip', address.zip
assert type(address.geo) is Properties, type(address.geo)
def test_customer_address_geo(self):
geo = self.customers[0].base.address.geo
assert type(geo.lat) is int, type(geo.lat)
assert type(geo.lon) is int, type(geo.lon)
def test_customer_address_empty(self):
address = self.customers[1].base.address
assert address is None, address
def test_customer_social_profile(self):
social_profile = self.customers[0].base.socialProfile
assert social_profile.facebook == 'facebook', social_profile.facebook
assert social_profile.google == 'google', social_profile.google
assert social_profile.instagram == 'instagram', social_profile.instagram
assert social_profile.linkedin == 'linkedin', social_profile.linkedin
assert social_profile.qzone == 'qzone', social_profile.qzone
assert social_profile.twitter == 'twitter', social_profile.twitter
def test_customer_social_profile_empty(self):
social_profile = self.customers[1].base.socialProfile
assert social_profile is None, social_profile
def test_customer_unexistent_attr(self):
with self.assertRaises(AttributeError) as context:
attr = self.customers[0].attr
self.assertTrue('attr' in str(context.exception))
def test_customer_sett_attr(self):
self.customers[0].externalId = 3
assert self.customers[0].externalId == 3, self.customers[0].externalId
def test_customer_address_unexistent_attr(self):
with self.assertRaises(AttributeError) as context:
attr = self.customers[0].base.address.attr
self.assertTrue('attr' in str(context.exception))
def test_customer_contacts_unexistent_attr(self):
with self.assertRaises(AttributeError) as context:
attr = self.customers[0].base.contacts.attr
self.assertTrue('attr' in str(context.exception))
def test_customer_base_unexistent_attr(self):
with self.assertRaises(AttributeError) as context:
attr = self.customers[0].base.attr
self.assertTrue('attr' in str(context.exception))
def test_customer_subscription_unexistent_attr(self):
with self.assertRaises(AttributeError) as context:
attr = self.customers[0].base.subscriptions[0].attr
self.assertTrue('attr' in str(context.exception))
def test_customer_properties_unexistent_attr(self):
with self.assertRaises(AttributeError) as context:
p = Properties({'attributo': 1})
attr = p.attr
self.assertTrue('attr' in str(context.exception))
def test_customer_job_unexistent_attr(self):
with self.assertRaises(AttributeError) as context:
attr = self.customers[0].base.jobs[0].attr
self.assertTrue('attr' in str(context.exception))
def test_customer_like_unexistent_attr(self):
with self.assertRaises(AttributeError) as context:
attr = self.customers[0].base.likes[0].attr
self.assertTrue('attr' in str(context.exception))
@mock.patch('requests.get', return_value=FakeHTTPResponse(resp_path='tests/util/fake_event_response'))
def test_all_events(self, mock_get_event):
events = self.customers[0].get_events()
params_expected = {'customerId': self.customers[0].id}
mock_get_event.assert_called_with(self.base_url_events, params=params_expected, headers=self.headers_expected)
assert isinstance(events, PaginatedList), type(events)
assert events[0].type == Event.TYPES.ADDED_COMPARE, events[0].type
def test_all_events_new_customer(self):
try:
Customer(node=self.node).get_events()
except Exception as e:
assert 'events' in str(e), str(e)
def test_customer_create_extra(self):
c = Customer(node=self.node, extra='extra')
assert c.attributes['extra'] == 'extra', c.attributes['extra']
assert c.extra == 'extra', c.extra
@mock.patch('requests.delete', return_value=FakeHTTPResponse(resp_path='tests/util/fake_post_response'))
def test_delete(self, mock_delete):
id = self.customers[0].id
self.customers[0].delete()
mock_delete.assert_called_with(self.base_url_customer + '/' + id, headers=self.headers_expected)
def test_delete_created_new_customer(self):
try:
Customer(node=self.node).delete()
except KeyError as e:
assert 'id' in str(e), str(e)
@mock.patch('requests.delete', return_value=FakeHTTPResponse(resp_path='tests/util/fake_post_response',
status_code=401))
def test_delete_not_permitted(self, mock_delete):
try:
self.customers[0].delete()
except HTTPError as e:
assert 'Message' in str(e), str(e)
@mock.patch('requests.delete', return_value=FakeHTTPResponse(resp_path='tests/util/fake_post_response'))
def test_delete_created(self, mock_delete):
Customer(id='01', node=self.node).delete()
mock_delete.assert_called_with(self.base_url_customer + '/01', headers=self.headers_expected)
@mock.patch('contacthub._api_manager._api_customer._CustomerAPIManager.post')
def test_post_customer_creation_first_method(self, mock_post):
expected_body = {'base': {'contacts': {'email': 'email@email.email'}}, 'extra': 'extra',
'extended': {'prova': 'prova'}, 'tags': {'auto': ['auto'], 'manual': ['manual']}}
mock_post.return_value = json.loads(FakeHTTPResponse(resp_path='tests/util/fake_post_response').text)
c = Customer(node=self.node,
base=Properties(
contacts=Properties(email='email@email.email')
)
)
c.extra = 'extra'
c.extended.prova = 'prova'
c.tags.auto = ['auto']
c.tags.manual = ['manual']
c.post()
mock_post.assert_called_with(body=expected_body, force_update=False)
# @mock.patch('contacthub._api_manager._api_customer._CustomerAPIManager.post')
# def test_post_customer_creation_second_method(self, mock_post):
# expected_body = {'base': {'contacts': {'email': 'email@email.email'}}, 'extra': 'extra'}
# mock_post.return_value = json.loads(FakeHTTPResponse(resp_path='tests/util/fake_post_response').text)
# c = Customer(base=Properties(), node=self.node)
# c.base.contacts = Properties(email='email@email.email')
# c.extra = 'extra'
# posted = c.post()
# mock_post.assert_called_with(body=expected_body, force_update=False)
# assert isinstance(posted, Customer), type(posted)
# assert posted.base.contacts.email == c.base.contacts.email, posted.base.contacts.email
# assert posted.extra == c.extra, posted.extra
@mock.patch('contacthub._api_manager._api_customer._CustomerAPIManager.post')
def test_post_customer_creation_second_method(self, mock_post):
expected_body = {'base': {'contacts': {'email': 'email@email.email'}}, 'extra': 'extra', 'extended': {},
'tags': {'auto': [], 'manual': []}}
mock_post.return_value = json.loads(FakeHTTPResponse(resp_path='tests/util/fake_post_response').text)
c = Customer(node=self.node, base=Properties())
c.base.contacts = {'email': 'email@email.email'}
c.extra = 'extra'
c.post()
mock_post.assert_called_with(body=expected_body, force_update=False)
@mock.patch('requests.patch', return_value=FakeHTTPResponse(resp_path='tests/util/fake_post_response'))
def test_patch(self, mock_patch):
self.customers[0].base.firstName = 'fn'
self.customers[0].patch()
body = {'base': {'firstName': 'fn'}}
mock_patch.assert_called_with(self.base_url_customer + '/' + self.customers[0].id,
headers=self.headers_expected, json=body)
@mock.patch('requests.put', return_value=FakeHTTPResponse(resp_path='tests/util/fake_post_response'))
def test_put(self, mock_patch):
self.customers[0].base.firstName = 'fn'
self.customers[0].put()
body = deepcopy(self.customers[0].attributes)
body.pop('updatedAt')
body.pop('registeredAt')
mock_patch.assert_called_with(self.base_url_customer + '/' + self.customers[0].id,
headers=self.headers_expected, json=body)
@mock.patch('requests.patch', return_value=FakeHTTPResponse(resp_path='tests/util/fake_post_response'))
def test_patch_entity(self, mock_patch):
self.customers[0].extended = Properties(a=1, prova=Properties(b=1))
self.customers[0].patch()
body = {'extended': {'prova': {'b': 1, 'oggetto': None, 'list': []}, 'a': 1}}
mock_patch.assert_called_with(self.base_url_customer + '/' + self.customers[0].id,
headers=self.headers_expected, json=body)
@mock.patch('requests.patch', return_value=FakeHTTPResponse(resp_path='tests/util/fake_post_response'))
def test_patch_entity_extended_and_base(self, mock_patch):
self.customers[0].extended = Properties(a=1, prova=Properties(b=1))
self.customers[0].base.firstName = 'fn'
self.customers[0].patch()
body = {'extended': {'prova': {'b': 1, 'oggetto': None, 'list': []}, 'a': 1}, 'base': {'firstName': 'fn'}}
mock_patch.assert_called_with(self.base_url_customer + '/' + self.customers[0].id,
headers=self.headers_expected, json=body)
@mock.patch('requests.patch', return_value=FakeHTTPResponse(resp_path='tests/util/fake_post_response'))
def test_patch_extended_entity_and_base_entity(self, mock_patch):
self.customers[0].extended = Properties(a=1, prova=Properties(b=1))
self.customers[0].base = Properties(contacts=Properties(email='email'))
self.customers[0].patch()
body = {'extended': {'prova': {'b': 1, 'oggetto': None, 'list': []}, 'a': 1},
'base': {
'pictureUrl': None, 'title': None, 'prefix': None, 'firstName': None, 'lastName': None,
'middleName': None, 'gender': None, 'dob': None, 'locale': None, 'timezone': None,
'contacts':
{'email': 'email', 'fax': None, 'mobilePhone': None, 'phone': None, 'otherContacts': [],
'mobileDevices': []},
'address': None, 'credential': None, 'educations': [], 'likes': [], 'socialProfile': None,
'jobs': [], 'subscriptions': []}}
mock_patch.assert_called_with(self.base_url_customer + '/' + self.customers[0].id,
headers=self.headers_expected, json=body)
@mock.patch('requests.patch', return_value=FakeHTTPResponse(resp_path='tests/util/fake_post_response'))
def test_patch_entity_with_entity(self, mock_patch):
self.customers[0].extended = Properties(a=1, prova=Properties(b=1))
self.customers[0].base.contacts = Properties(email='email')
self.customers[0].patch()
body = {'extended': {'prova': {'b': 1, 'oggetto': None, 'list': []}, 'a': 1}, 'base':
{'contacts': {'email': 'email', 'fax': None, 'mobilePhone': None, 'phone': None, 'otherContacts': [],
'mobileDevices': []}}}
mock_patch.assert_called_with(self.base_url_customer + '/' + self.customers[0].id,
headers=self.headers_expected, json=body)
@mock.patch('requests.patch', return_value=FakeHTTPResponse(resp_path='tests/util/fake_post_response'))
def test_patch_entity_with_rename(self, mock_patch):
self.customers[0].extended = Properties(a=1, prova=Properties(b=1))
self.customers[0].base.contacts = Properties(email1='email')
self.customers[0].patch()
body = {'extended': {'prova': {'b': 1, 'oggetto': None, 'list': []}, 'a': 1}, 'base':
{'contacts': {'email': None, 'email1': 'email', 'fax': None, 'mobilePhone': None, 'phone': None,
'otherContacts': [],
'mobileDevices': []}}}
mock_patch.assert_called_with(self.base_url_customer + '/' + self.customers[0].id,
headers=self.headers_expected, json=body)
@mock.patch('requests.patch', return_value=FakeHTTPResponse(resp_path='tests/util/fake_post_response'))
def test_patch_entity_with_rename_dict(self, mock_patch):
self.customers[0].extended = Properties(a=1, prova=Properties(b=1))
self.customers[0].base.contacts = Properties(email1=Properties(a=1))
self.customers[0].patch()
body = {'extended': {'prova': {'b': 1, 'oggetto': None, 'list': []}, 'a': 1}, 'base':
{'contacts': {'email': None, 'email1': {'a': 1}, 'fax': None, 'mobilePhone': None, 'phone': None,
'otherContacts': [],
'mobileDevices': []}}}
mock_patch.assert_called_with(self.base_url_customer + '/' + self.customers[0].id,
headers=self.headers_expected, json=body)
@mock.patch('requests.patch', return_value=FakeHTTPResponse(resp_path='tests/util/fake_post_response'))
def test_patch_entity_list(self, mock_patch):
self.customers[0].extended = Properties(a=1, prova=Properties(b=1))
self.customers[0].base.contacts.otherContacts = [Properties(email1=Properties(a=1))]
self.customers[0].patch()
body = {'extended': {'prova': {'b': 1, 'oggetto': None, 'list': []}, 'a': 1}, 'base':
{'contacts': {'otherContacts': [{'email1': {'a': 1}}]}}}
mock_patch.assert_called_with(self.base_url_customer + '/' + self.customers[0].id,
headers=self.headers_expected, json=body)
@mock.patch('requests.patch', return_value=FakeHTTPResponse(resp_path='tests/util/fake_post_response'))
def test_patch_entity_new_list(self, mock_patch):
self.customers[0].base.contacts = Properties(email='email')
self.customers[0].base.contacts.otherContacts = [Properties(email1=Properties(a=1))]
self.customers[0].patch()
body = {'base': {
'contacts': {'email': 'email', 'fax': None, 'mobilePhone': None, 'phone': None, 'mobileDevices': [],
'otherContacts': [{'email1': {'a': 1}}]}}}
mock_patch.assert_called_with(self.base_url_customer + '/' + self.customers[0].id,
headers=self.headers_expected, json=body)
@mock.patch('requests.patch', return_value=FakeHTTPResponse(resp_path='tests/util/fake_post_response'))
def test_patch_entity_new_list_with_entities(self, mock_patch):
self.customers[0].base.contacts = Properties(email='email')
self.customers[0].base.contacts.otherContacts = [Properties(email1=Properties(a=Properties(b=1)))]
self.customers[0].patch()
body = {'base': {
'contacts': {'email': 'email', 'fax': None, 'mobilePhone': None, 'phone': None, 'mobileDevices': [],
'otherContacts': [{'email1': {'a': {'b': 1}}}]}}}
mock_patch.assert_called_with(self.base_url_customer + '/' + self.customers[0].id,
headers=self.headers_expected, json=body)
@mock.patch('requests.patch', return_value=FakeHTTPResponse(resp_path='tests/util/fake_post_response'))
def test_patch_all_extended(self, mock_patch):
self.customers[0].extended = Properties()
self.customers[0].patch()
body = {'extended': {'prova': None}}
mock_patch.assert_called_with(self.base_url_customer + '/' + self.customers[0].id,
headers=self.headers_expected, json=body)
@mock.patch('requests.patch', return_value=FakeHTTPResponse(resp_path='tests/util/fake_post_response'))
def test_patch_all_base(self, mock_patch):
self.customers[0].base = Properties()
self.customers[0].patch()
body = {'base': {'pictureUrl': None, 'title': None, 'prefix': None, 'firstName': None, 'lastName': None,
'middleName': None, 'gender': None, 'dob': None, 'locale': None,
'timezone': None, 'contacts': None, 'address': None, 'credential': None, 'educations': [],
'likes': [], 'socialProfile': None, 'jobs': [], 'subscriptions': []}}
mock_patch.assert_called_with(self.base_url_customer + '/' + self.customers[0].id,
headers=self.headers_expected, json=body)
@mock.patch('requests.patch', return_value=FakeHTTPResponse(resp_path='tests/util/fake_post_response'))
def test_patch_elem_in_list(self, mock_patch):
self.customers[0].base.contacts.otherContacts[0].type = 'TYPE'
self.customers[0].patch()
body = {'base': {'contacts': {'otherContacts': [{'name': 'name', 'type': 'TYPE', 'value': 'value'},
{'name': 'Casa di piero', 'type': 'PHONE',
'value': '12343241'}]}}}
mock_patch.assert_called_with(self.base_url_customer + '/' + self.customers[0].id,
headers=self.headers_expected, json=body)
@mock.patch('requests.post',
return_value=FakeHTTPResponse(resp_path='tests/util/fake_conflict_response', status_code=409))
@mock.patch('requests.patch',
return_value=FakeHTTPResponse(resp_path='tests/util/fake_post_response', status_code=200))
def test_post_with_force_update(self, mock_patch, mock_post):
body = {'extra': 'extra', 'base': {'contacts': {'email': 'email@email.email'}}}
c = Customer.from_dict(node=self.node, attributes=body)
posted = c.post(force_update=True)
mock_patch.assert_called_with(self.base_url_customer + '/01', headers=self.headers_expected, json=body)
def test_create_customer_with_default_schema(self):
c = Customer(node=self.node, default_attributes={'prop': {'prop1': 'value1'}, 'prop2': 'value2'},
prop3=Properties(prop4='value4'))
internal = {'prop': {'prop1': 'value1'}, 'prop2': 'value2', 'prop3': {'prop4': 'value4'}}
assert c.attributes == internal, c.attributes
def test_from_dict_no_props(self):
c = Customer.from_dict(node=self.node)
assert c.attributes == {}, c.attributes
prop = {}
c = Customer.from_dict(node=self.node, attributes= prop)
assert c.attributes is prop, c.attributes
def test_customer_patch_new_prop(self):
c = Customer(node=self.node, default_attributes={'prop': {'prop1': 'value1'}, 'prop2': 'value2'},
prop3=Properties(prop4='value4'))
c.prop5 = Properties(prop6='value5')
assert c.mute == {'prop5': {'prop6': 'value5'}}, c.mute
@mock.patch('requests.put', return_value=FakeHTTPResponse())
def test_put_no_timezone(self, mock_put):
c = Customer(node=self.node, id='01')
c.base.timezone = None
c.put()
params_expected= {'id':'01', 'base': {'contacts': {}, 'timezone':'Europe/Rome'}, 'extended': {},
'tags':{'manual':[], 'auto':[]}}
mock_put.assert_called_with(self.base_url_customer + '/01', headers=self.headers_expected, json=params_expected) | 53.387755 | 120 | 0.654678 | 3,334 | 28,776 | 5.485303 | 0.077684 | 0.065398 | 0.062008 | 0.03248 | 0.631616 | 0.595363 | 0.575678 | 0.556102 | 0.532098 | 0.502187 | 0 | 0.011136 | 0.213581 | 28,776 | 539 | 120 | 53.387755 | 0.796995 | 0.037427 | 0 | 0.334076 | 0 | 0 | 0.114294 | 0.027707 | 0 | 0 | 0 | 0 | 0.309577 | 1 | 0.144766 | false | 0.004454 | 0.037862 | 0 | 0.184855 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2027748f2846da6b6c70c5892c0dc16fc59d2b8e | 461 | py | Python | yi/core/dtypes.py | soloist-v/yi | d7c04fe6266441d2629ba35f69c9fc659a52b370 | [
"MIT"
] | null | null | null | yi/core/dtypes.py | soloist-v/yi | d7c04fe6266441d2629ba35f69c9fc659a52b370 | [
"MIT"
] | null | null | null | yi/core/dtypes.py | soloist-v/yi | d7c04fe6266441d2629ba35f69c9fc659a52b370 | [
"MIT"
] | null | null | null | import numpy as np
import ctypes as ct
char = ct.c_char
ubyte = ct.c_ubyte
bool = np.bool
float = np.float
float16 = np.float16
float32 = np.float32
float64 = np.float64
uint8 = np.uint8
byte = np.byte
uint = np.uint
uint16 = np.uint16
uint32 = np.uint32
uint64 = np.uint64
int = np.int
int16 = np.int16
short = np.short
ushort = ct.c_ushort
int32 = np.int32
int64 = np.int64
long = np.long
ulong = ct.c_ulong
longlong = np.longlong
ulonglong = ct.c_ulonglong
| 17.074074 | 26 | 0.73102 | 82 | 461 | 4.04878 | 0.365854 | 0.045181 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.099217 | 0.169197 | 461 | 26 | 27 | 17.730769 | 0.767624 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.08 | 0 | 0.08 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2029145c9d4ea9c9155795f998247b31586f13e7 | 3,769 | py | Python | reinforcement_learning/rl_tic_tac_toe_coach_customEnv/tic_tac_toe_game.py | jpmarques19/tensorflwo-test | 0ff8b06e0415075c7269820d080284a42595bb2e | [
"Apache-2.0"
] | 5 | 2019-01-19T23:53:35.000Z | 2022-01-29T14:04:31.000Z | reinforcement_learning/rl_tic_tac_toe_coach_customEnv/tic_tac_toe_game.py | jpmarques19/tensorflwo-test | 0ff8b06e0415075c7269820d080284a42595bb2e | [
"Apache-2.0"
] | 6 | 2020-01-28T23:08:49.000Z | 2022-02-10T00:27:19.000Z | reinforcement_learning/rl_tic_tac_toe_coach_customEnv/tic_tac_toe_game.py | jpmarques19/tensorflwo-test | 0ff8b06e0415075c7269820d080284a42595bb2e | [
"Apache-2.0"
] | 8 | 2020-12-14T15:49:24.000Z | 2022-03-23T18:38:36.000Z | from ipywidgets import widgets, HBox, VBox, Layout
from IPython.display import display
from functools import partial
import numpy as np
class TicTacToeGame(object):
'''
Tic-tac-toe game within a Jupyter Notebook
Opponent is Xs and starts the game.
This is assumed to be a predictor object from a SageMaker RL trained agent
'''
def __init__(self, agent):
self.board = np.zeros((3, 3))
self.game_over = False
self.turn = 'X'
self.agent = agent
def start(self):
self.board = np.zeros((3, 3))
self.game_over = False
self.turn = 'X'
self.draw_board()
self.move_agent()
def mark_board(self):
Xs = np.argwhere(self.board == 1)
for X in Xs:
self.spaces[X[0] * 3 + X[1]].description = 'X'
Os = np.argwhere(self.board == -1)
for O in Os:
self.spaces[O[0] * 3 + O[1]].description = 'O'
def click_space(self, action, space):
row = action // 3
col = action % 3
if self.game_over:
return
if self.board[row, col] != 0:
self.text_box.value = 'Invalid'
return
if self.turn == 'O':
self.board[row, col] = -1
self.mark_board()
if check_win(self.board) == -1:
self.text_box.value = 'Os Win'
self.game_over = True
else:
self.turn = 'X'
self.text_box.value = 'Xs Turn'
self.move_agent()
def draw_board(self):
self.text_box = widgets.Text(value='Xs Turn', layout=Layout(width='100px', height='50px'))
self.spaces = []
for i in range(9):
space = widgets.Button(description='',
disabled=False,
button_style='',
tooltip='Click to make move',
icon='',
layout=Layout(width='75px', height='75px'))
self.spaces.append(space)
space.on_click(partial(self.click_space, i))
board = VBox([HBox([self.spaces[0], self.spaces[1], self.spaces[2]]),
HBox([self.spaces[3], self.spaces[4], self.spaces[5]]),
HBox([self.spaces[6], self.spaces[7], self.spaces[8]])])
display(VBox([board, self.text_box]))
return
def move_agent(self):
if self.game_over:
return
if self.turn == 'X':
# Take the first empty space with the highest preference from the agent
for action in np.argsort(-np.array(self.agent.predict(self.board.flatten())[1][0])):
row = action // 3
col = action % 3
if self.board[row, col] == 0:
self.board[action // 3, action % 3] = 1
break
self.mark_board()
if check_win(self.board) == 1:
self.text_box.value = 'Xs Win'
self.game_over = True
elif (self.board != 0).all():
self.text_box.value = 'Draw'
else:
self.turn = 'O'
self.text_box.value = 'Os Turn'
def check_win(board):
v = board.sum(axis=0)
h = board.sum(axis=1)
dd = board[0, 0] + board[1, 1] + board[2, 2]
du = board[2, 0] + board[1, 1] + board[0, 2]
if max(v.max(), h.max()) == 3 or dd == 3 or du == 3:
return 1
elif min(v.min(), h.min()) == -3 or dd == -3 or du == -3:
return -1
else:
return 0
| 30.893443 | 98 | 0.477846 | 468 | 3,769 | 3.777778 | 0.260684 | 0.073529 | 0.049774 | 0.054299 | 0.288462 | 0.229638 | 0.20362 | 0.159502 | 0.130091 | 0.109729 | 0 | 0.030396 | 0.397718 | 3,769 | 121 | 99 | 31.14876 | 0.748458 | 0.061024 | 0 | 0.295455 | 0 | 0 | 0.024751 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.079545 | false | 0 | 0.045455 | 0 | 0.215909 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
202a5da8266f29b01ac5f81206b1cb497b57c352 | 2,714 | py | Python | src/evaluate.py | buswinka/DetectStereocillia | 7205680d9861cb50a447fe730696d2631f8256ba | [
"MIT"
] | null | null | null | src/evaluate.py | buswinka/DetectStereocillia | 7205680d9861cb50a447fe730696d2631f8256ba | [
"MIT"
] | null | null | null | src/evaluate.py | buswinka/DetectStereocillia | 7205680d9861cb50a447fe730696d2631f8256ba | [
"MIT"
] | 1 | 2022-03-20T03:05:20.000Z | 2022-03-20T03:05:20.000Z | from src.model import faster_rcnn_bundle, faster_rcnn_cilia, mask_rcnn, keypoint_rcnn
import torch
import src.utils
import torch.optim
import torchvision.ops as ops
import src.transforms as t
import torchvision.transforms.functional as TF
import numpy as np
import PIL
from typing import Dict, List, Tuple
import os.path
class evaluate:
def __init__(self):
if torch.cuda.is_available(): device = 'cuda:0'
else: device = 'cpu'
models_path = os.path.join(os.getcwd(), 'models')
mask_rcnn.load_state_dict(torch.load(os.path.join(models_path, 'mask_rcnn.mdl')))
mask_rcnn.eval().to(device)
faster_rcnn_cilia.load_state_dict(torch.load('/media/DataStorage/Dropbox (Partners HealthCare)/DetectStereocillia/models/faster_rcnn_cilia_missing.mdl'))
faster_rcnn_cilia.eval().to(device)
keypoint_rcnn.load_state_dict(torch.load(os.path.join(models_path, 'keypoint_rcnn.mdl')))
keypoint_rcnn.eval().to(device)
self.mask_rcnn = mask_rcnn
self.keypoint_rcnn = keypoint_rcnn
self.faster_rcnn_cilia = faster_rcnn_cilia
def __call__(self, eval_path: str) \
-> Tuple[np.ndarray, Dict[str, torch.Tensor], Dict[str, torch.Tensor], Dict[str, torch.Tensor]]:
"""
Evaluates all models on image specified by eval_path
:param eval_path:
:return: Tuple[np.ndarray, Dict[str, torch.Tensor], Dict[str, torch.Tensor], Dict[str, torch.Tensor]]
-np.ndarray
"""
if torch.cuda.is_available(): device = 'cuda:0'
else: device = 'cpu'
image = TF.to_tensor(PIL.Image.open(eval_path))
im_ = t.stack_image()(t.normalize()({'image': image}))['image']
image = torch.cat((image, image, image), dim=0)
larger_boi = src.utils.image(image.unsqueeze(0))
with torch.no_grad():
masks = self.mask_rcnn(image.unsqueeze(0).to(device))[0]
keypoints = self.keypoint_rcnn(image.unsqueeze(0).to(device))[0]
boxes = self.faster_rcnn_cilia(im_.unsqueeze(0).to(device))[0]
index = ops.nms(masks['boxes'], masks['scores'], 0.5)
masks['masks'] = masks['masks'][index, :, :]
masks['scores'] = masks['scores'][index]
masks['labels'] = masks['labels'][index]
masks['boxes'] = masks['boxes'][index, :]
index = ops.nms(boxes['boxes'], boxes['scores'], 0.35)
boxes['scores'] = boxes['scores'][index]
boxes['labels'] = boxes['labels'][index]
boxes['boxes'] = boxes['boxes'][index, :]
larger_boi.add_partial_maks(x=0, y=0, model_output=masks, threshold=0.50)
return larger_boi.render_mat(), masks, keypoints, boxes
| 36.675676 | 161 | 0.648858 | 367 | 2,714 | 4.618529 | 0.27248 | 0.047198 | 0.061947 | 0.063717 | 0.246018 | 0.221829 | 0.221829 | 0.188791 | 0.188791 | 0.188791 | 0 | 0.009268 | 0.204864 | 2,714 | 73 | 162 | 37.178082 | 0.776182 | 0.069639 | 0 | 0.083333 | 0 | 0 | 0.108195 | 0.037545 | 0 | 0 | 0 | 0 | 0 | 1 | 0.041667 | false | 0 | 0.229167 | 0 | 0.3125 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
202b86ff46d75ae6771b1867dc89ef0af73924b6 | 4,763 | py | Python | src/solver.py | nataboll/ellipsoids | efd6b5d4bf221aa08a657f6265cb8a175289f979 | [
"MIT"
] | null | null | null | src/solver.py | nataboll/ellipsoids | efd6b5d4bf221aa08a657f6265cb8a175289f979 | [
"MIT"
] | null | null | null | src/solver.py | nataboll/ellipsoids | efd6b5d4bf221aa08a657f6265cb8a175289f979 | [
"MIT"
] | null | null | null | from src.data import Data
import numpy as np
from scipy.optimize import minimize
import matplotlib.pyplot as plt
# area of ellipse
# def f(x):
# return np.pi * (x[0] * x[3] - x[1] * x[2]) ** 2
def f(x):
return np.pi * (1 / float(x[0] ** 2 * x[1] ** 2))
class Solver:
def __init__(self, data):
self.data = data
self.initial_guess = [1, 1, 1, data.center[0], data.center[1]]
# constraint function (x[0] == a, x[1] == b, x[2] == c, x[3] == d, x[4] == alpha, x[5] == beta)
# def h(self, x, number):
# det = x[0] * x[3] - x[1] * x[2]
# if det == 0:
# return - 1 # 1 is more than 0 so the constraint does not hold
# else:
# return -((1 / det ** 2) * ((x[3] * self.data.df.iloc[0, number] - x[1] * (self.data.df.iloc[1, number]
# - x[4] * (det ** 2))) ** 2
# + (x[0] * self.data.df.iloc[1, number] - x[2] * self.data.df.iloc[0, number]
# - x[5] * (det ** 2)) ** 2) - 1)
def h(self, x, number):
return 1 - ((x[0] ** 2 * self.data.new_df.iloc[0, number]
+ x[0] * x[2] * self.data.new_df.iloc[1, number] - x[3]) ** 2
+ (x[0] * x[2] * self.data.new_df.iloc[0, number]
+ (x[1] ** 2 + x[2] ** 2) * self.data.new_df.iloc[1, number] - x[4]) ** 2)
# variables used for finding out whether to discard the point
point_cost = 10
square_cost = 1 # cost of one m^2 of area
square = 0.0 # ellipse area - target function value
x = 0.0 # elements of S
y = 0.0
z = 0.0
alpha = 0.0 # shift vector
beta = 0.0
vector = np.zeros(5)
data = Data() # Data object will be transferred here
initial_guess = np.zeros(5)
def set_fields(self, x, y, z, alpha, beta):
# self.a = a
# self.b = b
# self.c = c
# self.d = d
# self.alpha = alpha
# self.beta = beta
# self.vector = [a, b, c, d, alpha, beta]
self.x = x
self.y = y
self.z = z
self.alpha = alpha
self.beta = beta
self.vector = [x, y, z, alpha, beta]
def restrictions(self): # counting all restrictions and assembling together
cons = list() # list of dictionaries
h_list = list() # list of constraints - functions h_i
# number of restrictions == number of points left (columns in new_df)
for i in range(len(self.data.new_df.columns)):
h_list.append(lambda x: self.h(x, i))
cons.append({'type': 'ineq', 'fun': h_list[i]}) # appending each constraint as a dictionary
return cons
def optimize(self): # computing matrix S and vector (alpha, beta)^T
w = self.minimal_result()
self.set_fields(w[0], w[1], w[2], w[3], w[4])
current_square = f(w[0:3]) # latest calculated square
# print("\n" + "Starting square is " + str(current_square))
self.data.discard_point(False)
while True:
self.square = current_square
w = self.minimal_result()
current_square = f(w[0:3])
delta_square = self.square - current_square # area change
if delta_square * self.square_cost < self.point_cost:
break
self.set_fields(w[0], w[1], w[2], w[3], w[4])
self.data.discard_point(False)
# counting optimal values for points in new_df
def minimal_result(self):
result = minimize(f, self.initial_guess, constraints=self.restrictions())
return result.x
def display(self):
# Let ellipse be (x y)*Q*(x y)^T + L^T*(x y) + c
# set edges for of displayed field
edge = 40.0
x_min = -edge
x_max = edge
y_min = -edge
y_max = edge
axes = plt.gca()
axes.set_xlim([x_min, x_max])
axes.set_ylim([y_min, y_max])
x = np.linspace(-20.0, 20.0, 100)
y = np.linspace(-20.0, 20.0, 100)
xx, yy = np.meshgrid(x, y)
# draw the ellipse
# ellipse = ((self.a ** 2 + self.c ** 2) * xx) ** 2 + 2 * (self.a * self.b + self.c * self.d) * xx * yy \
# + ((self.b ** 2 + self.d ** 2) * yy) ** 2 - 2 * (self.alpha * self.a + self.beta * self.c) * xx \
# - 2 * (self.alpha * self.b + self.beta * self.d) * yy + self.alpha ** 2 + self.beta ** 2 - 1
ellipse = 0
plt.contour(xx, yy, ellipse, [0])
# just draw points
for i in range(len(self.data.df.columns)):
plt.plot(self.data.df.iloc[0, i], self.data.df.iloc[1, i], 'bo')
plt.show()
| 35.281481 | 117 | 0.503674 | 720 | 4,763 | 3.266667 | 0.226389 | 0.054422 | 0.029762 | 0.035714 | 0.26148 | 0.204507 | 0.175595 | 0.103316 | 0.065901 | 0.019558 | 0 | 0.042949 | 0.344951 | 4,763 | 134 | 118 | 35.544776 | 0.710897 | 0.39324 | 0 | 0.108108 | 0 | 0 | 0.004574 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.108108 | false | 0 | 0.054054 | 0.027027 | 0.378378 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
20319180ed84fbe3b13d01be4d5e3308273c3a54 | 1,218 | py | Python | tests/conftest.py | mzaglia/bdc-db | bce8ba164a336dd8f7638ed4c0a5c1a4ad80e3a5 | [
"MIT"
] | null | null | null | tests/conftest.py | mzaglia/bdc-db | bce8ba164a336dd8f7638ed4c0a5c1a4ad80e3a5 | [
"MIT"
] | null | null | null | tests/conftest.py | mzaglia/bdc-db | bce8ba164a336dd8f7638ed4c0a5c1a4ad80e3a5 | [
"MIT"
] | null | null | null | #
# This file is part of Brazil Data Cube Database module.
# Copyright (C) 2019-2020 INPE.
#
# Brazil Data Cube Database moduleis free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
#
"""Config test fixtures."""
from typing import List
import pytest
from flask import Flask
from bdc_db.cli import create_app
from bdc_db.fixtures.cli import load_fixtures
from bdc_db.models import Collection, Tile, db
@pytest.fixture
def app() -> Flask:
"""Create flask app and set app_context."""
_app = create_app()
with _app.app_context():
db.drop_all()
db.create_all()
yield app
db.close_all_sessions()
db.drop_all()
@pytest.fixture
def db_context(app: Flask):
"""Create database context to load fixtures."""
load_fixtures()
yield app
@pytest.fixture
def tiles(db_context) -> List[Tile]:
"""Retrieve all collections on database loaded from fixtures."""
return Tile.query().filter().all()
@pytest.fixture
def collections(db_context) -> List[Collection]:
"""Retrieve all collections on database loaded from fixtures."""
return Collection.query().filter().all()
| 22.981132 | 92 | 0.702791 | 172 | 1,218 | 4.866279 | 0.412791 | 0.062127 | 0.076464 | 0.052569 | 0.133811 | 0.133811 | 0.133811 | 0.133811 | 0.133811 | 0 | 0 | 0.008138 | 0.192939 | 1,218 | 52 | 93 | 23.423077 | 0.843337 | 0.383415 | 0 | 0.32 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.16 | false | 0 | 0.24 | 0 | 0.48 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
203206409e2548bf6472b00d0f31ff2df6928c52 | 2,805 | py | Python | code/ner/evaluate.py | miquelcanalesteve/mtextos2122 | e400090575b9f23469cf5a071523b8dafec1c0cf | [
"CC-BY-4.0"
] | null | null | null | code/ner/evaluate.py | miquelcanalesteve/mtextos2122 | e400090575b9f23469cf5a071523b8dafec1c0cf | [
"CC-BY-4.0"
] | null | null | null | code/ner/evaluate.py | miquelcanalesteve/mtextos2122 | e400090575b9f23469cf5a071523b8dafec1c0cf | [
"CC-BY-4.0"
] | 5 | 2022-02-09T15:13:31.000Z | 2022-03-07T20:07:24.000Z | import argparse
import logging
import os
import numpy as np
import torch
import utils
import model.net as net
from model.data_loader import DataLoader
parser = argparse.ArgumentParser()
parser.add_argument('--data_dir', default='data/small', help="Directory containing the dataset")
parser.add_argument('--model_dir', default='experiments/base_model', help="Directory containing params.json")
parser.add_argument('--restore_file', default='best', help="name of the file in --model_dir \
containing weights to load")
def evaluate(model, loss_fn, data_iterator, metrics, params, num_steps):
model.eval()
summ = []
for _ in range(num_steps):
data_batch, labels_batch = next(data_iterator)
output_batch = model(data_batch)
loss = loss_fn(output_batch, labels_batch)
output_batch = output_batch.data.cpu().numpy()
labels_batch = labels_batch.data.cpu().numpy()
summary_batch = {metric: metrics[metric](output_batch, labels_batch)
for metric in metrics}
summary_batch['loss'] = loss.item()
summ.append(summary_batch)
metrics_mean = {metric:np.mean([x[metric] for x in summ]) for metric in summ[0]}
metrics_string = " ; ".join("{}: {:05.3f}".format(k, v) for k, v in metrics_mean.items())
logging.info("- Eval metrics : " + metrics_string)
return metrics_mean
if __name__ == '__main__':
args = parser.parse_args()
json_path = os.path.join(args.model_dir, 'params.json')
assert os.path.isfile(json_path), "No json configuration file found at {}".format(json_path)
params = utils.Params(json_path)
params.cuda = torch.cuda.is_available() # use GPU is available
torch.manual_seed(230)
if params.cuda: torch.cuda.manual_seed(230)
utils.set_logger(os.path.join(args.model_dir, 'evaluate.log'))
logging.info("Creating the dataset...")
data_loader = DataLoader(args.data_dir, params)
data = data_loader.load_data(['test'], args.data_dir)
test_data = data['test']
params.test_size = test_data['size']
test_data_iterator = data_loader.data_iterator(test_data, params)
logging.info("- done.")
model = net.Net(params).cuda() if params.cuda else net.Net(params)
loss_fn = net.loss_fn
metrics = net.metrics
logging.info("Starting evaluation")
utils.load_checkpoint(os.path.join(args.model_dir, args.restore_file + '.pth.tar'), model)
num_steps = (params.test_size + 1) // params.batch_size
test_metrics = evaluate(model, loss_fn, test_data_iterator, metrics, params, num_steps)
save_path = os.path.join(args.model_dir, "metrics_test_{}.json".format(args.restore_file))
utils.save_dict_to_json(test_metrics, save_path)
| 33 | 109 | 0.685918 | 387 | 2,805 | 4.736434 | 0.281654 | 0.026187 | 0.034915 | 0.030551 | 0.08838 | 0.08838 | 0.028369 | 0 | 0 | 0 | 0 | 0.004844 | 0.190374 | 2,805 | 84 | 110 | 33.392857 | 0.80229 | 0.00713 | 0 | 0 | 0 | 0 | 0.118218 | 0.007905 | 0 | 0 | 0 | 0 | 0.018182 | 1 | 0.018182 | false | 0 | 0.145455 | 0 | 0.181818 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
20331abdaf8bf7d676a5d3d8fb00324bcc3c7cde | 1,124 | py | Python | cynergy/errors/ContainerException.py | LALAYANG/IOCynergy | 1821423680f741d8ca06bcc6a02c8b21156f9ba0 | [
"MIT"
] | 12 | 2017-12-14T02:02:25.000Z | 2019-07-31T10:42:23.000Z | cynergy/errors/ContainerException.py | LALAYANG/IOCynergy | 1821423680f741d8ca06bcc6a02c8b21156f9ba0 | [
"MIT"
] | 1 | 2019-09-10T07:11:27.000Z | 2019-10-22T20:18:19.000Z | cynergy/errors/ContainerException.py | LALAYANG/IOCynergy | 1821423680f741d8ca06bcc6a02c8b21156f9ba0 | [
"MIT"
] | 2 | 2021-08-15T08:56:13.000Z | 2021-11-05T17:06:15.000Z | from typing import Type
class ContainerException(Exception):
def __init__(self, cls: Type, message):
self.cls = cls
super(ContainerException, self).__init__(message)
class ClassNotFoundException(ContainerException):
def __init__(self, cls: Type):
super(ClassNotFoundException, self).__init__(cls,
'Could not find registered implementation for class: "{}"'.format(
cls.__name__))
class ConfigProviderRequiredException(ContainerException):
def __init__(self, cls: Type, argument_name):
self.cls = cls
self.argument = argument_name
super(ConfigProviderRequiredException, self).__init__(cls,
'The argument "{}" requires config provider for class '
'"{}" and you did not configure one'.format(argument_name,
cls.__name__))
| 44.96 | 120 | 0.509786 | 84 | 1,124 | 6.404762 | 0.392857 | 0.065056 | 0.061338 | 0.078067 | 0.167286 | 0.133829 | 0 | 0 | 0 | 0 | 0 | 0 | 0.419039 | 1,124 | 24 | 121 | 46.833333 | 0.82389 | 0 | 0 | 0.222222 | 0 | 0 | 0.127224 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.166667 | false | 0 | 0.055556 | 0 | 0.388889 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2034a75a259e4f6a84df0a1bdb6f0f7a485dcf5a | 841 | py | Python | scripts/testcase_parser/kattis.py | metaflow/contests | 5e9ffcb72c3e7da54b5e0818b1afa59f5778ffa2 | [
"MIT"
] | 1 | 2019-05-12T23:41:00.000Z | 2019-05-12T23:41:00.000Z | scripts/testcase_parser/kattis.py | metaflow/contests | 5e9ffcb72c3e7da54b5e0818b1afa59f5778ffa2 | [
"MIT"
] | null | null | null | scripts/testcase_parser/kattis.py | metaflow/contests | 5e9ffcb72c3e7da54b5e0818b1afa59f5778ffa2 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
from bs4 import BeautifulSoup
from urllib.parse import urlsplit
import json
import sys
import utils
url = sys.argv[1]
path = sys.argv[2]
url = urlsplit(url)
host = url.netloc
if not host.endswith('kattis.com'):
exit(1)
name = url.path.rstrip('/').split('/')[-1] # /problems/skiresort
with open(path, 'r') as myfile:
info = json.loads(myfile.read().replace('\n', ''))
soup = BeautifulSoup(info['content'], 'lxml')
tables = soup.find_all('table', class_='sample')
cases = []
for t in tables:
row = t.find_all('tr')[1]
if not row:
continue
td = row.find('td')
test_case = []
test_case.append(td.find('pre').string)
td = row.find_all('td')[1]
test_case.append(td.find('pre').string)
cases.append(test_case)
if utils.save_cases(name, cases):
utils.open_problem(name)
| 23.361111 | 64 | 0.655172 | 129 | 841 | 4.193798 | 0.511628 | 0.05915 | 0.033272 | 0.05915 | 0.107209 | 0.107209 | 0.107209 | 0 | 0 | 0 | 0 | 0.01 | 0.167658 | 841 | 35 | 65 | 24.028571 | 0.762857 | 0.047562 | 0 | 0.068966 | 0 | 0 | 0.061327 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.172414 | 0 | 0.172414 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
203526c026fed195000636faf1d1dd48a896aced | 18,756 | py | Python | src/libs/loss.py | dylanturpin/6-PACK | a8d94cea97ed0f459431f409792038abb14f02d4 | [
"MIT"
] | 1 | 2020-06-23T10:03:18.000Z | 2020-06-23T10:03:18.000Z | src/libs/loss.py | dylanturpin/6-PACK | a8d94cea97ed0f459431f409792038abb14f02d4 | [
"MIT"
] | null | null | null | src/libs/loss.py | dylanturpin/6-PACK | a8d94cea97ed0f459431f409792038abb14f02d4 | [
"MIT"
] | null | null | null | import pdb
from torch.nn.modules.loss import _Loss
from torch.autograd import Variable
import math
import torch
import time
import numpy as np
import torch.nn as nn
import random
import torch.backends.cudnn as cudnn
import torch.nn.functional as F
import torch.distributions as tdist
import copy
import pymesh
import pyvista
from libs.sinkhorn import SinkhornOT
class Loss(_Loss):
def __init__(self, num_key, num_cate, loss_weights, loss_sep_type='euclidean', loss_surf_type='surface'):
super(Loss, self).__init__(True)
self.num_key = num_key
self.num_cate = num_cate
self.oneone = Variable(torch.ones(1)).cuda()
self.normal = tdist.Normal(torch.tensor([0.0]), torch.tensor([0.0005]))
self.pconf = torch.ones(num_key) / num_key
self.pconf = Variable(self.pconf).cuda()
self.sym_axis = Variable(torch.from_numpy(np.array([0, 1, 0]).astype(np.float32))).cuda().view(1, 3, 1)
self.threezero = Variable(torch.from_numpy(np.array([0, 0, 0]).astype(np.float32))).cuda()
self.zeros = torch.FloatTensor([0.0 for j in range(num_key-1) for i in range(num_key)]).cuda()
self.select1 = torch.tensor([i for j in range(num_key-1) for i in range(num_key)]).cuda()
self.select2 = torch.tensor([(i%num_key) for j in range(1, num_key) for i in range(j, j+num_key)]).cuda()
self.loss_att_weight = loss_weights['loss_att_weight']
self.Kp_dis_weight = loss_weights['Kp_dis_weight']
self.Kp_cent_dis_weight = loss_weights['Kp_cent_dis_weight']
self.loss_rot_weight = loss_weights['loss_rot_weight']
self.loss_surf_weight = loss_weights['loss_surf_weight']
self.loss_sep_weight = loss_weights['loss_sep_weight']
self.kp_to_mesh_dist_scale = loss_weights['kp_to_mesh_dist_scale']
self.loss_sep_type = loss_sep_type
self.loss_surf_type = loss_surf_type
self.sinkhorn_loss = SinkhornOT()
def estimate_rotation(self, pt0, pt1, sym_or_not):
pconf2 = self.pconf.view(1, self.num_key, 1)
cent0 = torch.sum(pt0 * pconf2, dim=1).repeat(1, self.num_key, 1).contiguous()
cent1 = torch.sum(pt1 * pconf2, dim=1).repeat(1, self.num_key, 1).contiguous()
diag_mat = torch.diag(self.pconf).unsqueeze(0)
x = (pt0 - cent0).transpose(2, 1).contiguous()
y = pt1 - cent1
pred_t = cent1 - cent0
cov = torch.bmm(torch.bmm(x, diag_mat), y).contiguous().squeeze(0)
u, _, v = torch.svd(cov)
#dev = cov.device
#trivial_solution = torch.tensor(0.)
#try:
#svd = GESVD()
#u, _, v = svd(cov.cpu())
#u = u.to(dev)
#v = v.to(dev)
#except:
#print('---- svd ERROR, using trivial solution -----')
#u = torch.eye(cov.shape[0]).to(dev)
#v = torch.eye(cov.shape[0]).to(dev)
#trivial_solution = torch.tensor(1.)
u = u.transpose(1, 0).contiguous()
d = torch.det(torch.mm(v, u)).contiguous().view(1, 1, 1).contiguous()
u = u.transpose(1, 0).contiguous().unsqueeze(0)
ud = torch.cat((u[:, :, :-1], u[:, :, -1:] * d), dim=2)
v = v.transpose(1, 0).contiguous().unsqueeze(0)
pred_r = torch.bmm(ud, v).transpose(2, 1).contiguous()
if sym_or_not:
pred_r = torch.bmm(pred_r, self.sym_axis).contiguous().view(-1).contiguous()
return pred_r, trivial_solution
def estimate_pose(self, pt0, pt1):
pconf2 = self.pconf.view(1, self.num_key, 1)
cent0 = torch.sum(pt0 * pconf2, dim=1).repeat(1, self.num_key, 1).contiguous()
cent1 = torch.sum(pt1 * pconf2, dim=1).repeat(1, self.num_key, 1).contiguous()
diag_mat = torch.diag(self.pconf).unsqueeze(0)
x = (pt0 - cent0).transpose(2, 1).contiguous()
y = pt1 - cent1
pred_t = cent1 - cent0
cov = torch.bmm(torch.bmm(x, diag_mat), y).contiguous().squeeze(0)
u, _, v = torch.svd(cov)
u = u.transpose(1, 0).contiguous()
d = torch.det(torch.mm(v, u)).contiguous().view(1, 1, 1).contiguous()
u = u.transpose(1, 0).contiguous().unsqueeze(0)
ud = torch.cat((u[:, :, :-1], u[:, :, -1:] * d), dim=2)
v = v.transpose(1, 0).contiguous().unsqueeze(0)
pred_r = torch.bmm(ud, v).transpose(2, 1).contiguous()
return pred_r, pred_t[:, 0, :].view(1, 3)
def change_to_ver(self, Kp):
pconf2 = self.pconf.view(1, self.num_key, 1)
cent0 = torch.sum(Kp * pconf2, dim=1).view(-1).contiguous()
num_kp = self.num_key
ver_Kp_1 = Kp[:, :, 1].view(1, num_kp, 1).contiguous()
kk_1 = Kp[:, :, 0].view(1, num_kp, 1).contiguous()
kk_2 = Kp[:, :, 2].view(1, num_kp, 1).contiguous()
rad = torch.cat((kk_1, kk_2), dim=2).contiguous()
ver_Kp_2 = torch.norm(rad, dim=2).view(1, num_kp, 1).contiguous()
tmp_aim_0 = torch.cat((Kp[:, 1:, :], Kp[:, 0:1, :]), dim=1).contiguous()
aim_0_x = tmp_aim_0[:, :, 0].view(-1).contiguous()
aim_0_y = tmp_aim_0[:, :, 2].view(-1).contiguous()
aim_1_x = Kp[:, :, 0].view(-1).contiguous()
aim_1_y = Kp[:, :, 2].view(-1).contiguous()
angle = torch.atan2(aim_1_y, aim_1_x) - torch.atan2(aim_0_y, aim_0_x)
angle[angle < 0] += 2 * math.pi
ver_Kp_3 = angle.view(1, num_kp, 1).contiguous() * 0.01
ver_Kp = torch.cat((ver_Kp_1, ver_Kp_2, ver_Kp_3), dim=2).contiguous()
return ver_Kp, cent0
def forward(self, Kp_fr, Kp_to, anc_fr, anc_to, att_fr, att_to, r_fr, t_fr, r_to, t_to, mesh, faces, scale, cate, geodesic, curvature):
sym_or_not = False
num_kp = self.num_key
num_anc = len(anc_fr[0])
############ Attention Loss
gt_t_fr = t_fr.view(1, 1, 3).repeat(1, num_anc, 1)
min_fr = torch.min(torch.norm(anc_fr - gt_t_fr, dim=2).view(-1))
loss_att_fr = torch.sum(((torch.norm(anc_fr - gt_t_fr, dim=2).view(1, num_anc) - min_fr) * att_fr).contiguous().view(-1))
gt_t_to = t_to.view(1, 1, 3).repeat(1, num_anc, 1)
min_to = torch.min(torch.norm(anc_to - gt_t_to, dim=2).view(-1))
loss_att_to = torch.sum(((torch.norm(anc_to - gt_t_to, dim=2).view(1, num_anc) - min_to) * att_to).contiguous().view(-1))
loss_att = (loss_att_fr + loss_att_to).contiguous() / 2.0
############# Different View Loss
gt_Kp_fr = torch.bmm(Kp_fr - t_fr, r_fr).contiguous()
gt_Kp_to = torch.bmm(Kp_to - t_to, r_to).contiguous()
if sym_or_not:
ver_Kp_fr, cent_fr = self.change_to_ver(gt_Kp_fr)
ver_Kp_to, cent_to = self.change_to_ver(gt_Kp_to)
Kp_dis = torch.mean(torch.norm((ver_Kp_fr - ver_Kp_to), dim=2), dim=1)
Kp_cent_dis = (torch.norm(cent_fr - self.threezero) + torch.norm(cent_to - self.threezero)) / 2.0
else:
Kp_dis = torch.mean(torch.norm((gt_Kp_fr - gt_Kp_to), dim=2), dim=1)
cent_fr = torch.mean(gt_Kp_fr, dim=1).view(-1).contiguous()
cent_to = torch.mean(gt_Kp_to, dim=1).view(-1).contiguous()
Kp_cent_dis = (torch.norm(cent_fr - self.threezero) + torch.norm(cent_to - self.threezero)) / 2.0
############# Pose Error Loss
if self.loss_rot_weight > 0.:
rot_Kp_fr = (Kp_fr - t_fr).contiguous()
rot_Kp_to = (Kp_to - t_to).contiguous()
rot = torch.bmm(r_to, r_fr.transpose(2, 1))
if sym_or_not:
rot = torch.bmm(rot, self.sym_axis).view(-1)
pred_r = self.estimate_rotation(rot_Kp_fr, rot_Kp_to, sym_or_not)
loss_rot = (torch.acos(torch.sum(pred_r * rot) / (torch.norm(pred_r) * torch.norm(rot)))).contiguous()
loss_rot = loss_rot
else:
pred_r, trivial_svd_solution = self.estimate_rotation(rot_Kp_fr, rot_Kp_to, sym_or_not)
frob_sqr = torch.sum(((pred_r - rot) * (pred_r - rot)).view(-1)).contiguous()
frob = torch.sqrt(frob_sqr).unsqueeze(0).contiguous()
cc = torch.cat([self.oneone, frob / (2 * math.sqrt(2))]).contiguous()
loss_rot = 2.0 * torch.mean(torch.asin(torch.min(cc))).contiguous()
else:
loss_rot = torch.zeros(1).to(Kp_fr.device)
trivial_svd_solution = torch.zeros(1).to(Kp_fr.device)
############# Close To Surface Loss
if self.loss_surf_type == 'surface':
bs = 1
num_p = 1
num_point_mesh = self.num_key
full_mesh = pymesh.form_mesh(mesh.squeeze().cpu().numpy(), faces.squeeze().cpu().numpy())
sq_dist, face_indices, closest_points_fr = pymesh.distance_to_mesh(full_mesh, gt_Kp_fr.squeeze().detach().cpu().numpy())
closest_points_fr = torch.Tensor(closest_points_fr).to(gt_Kp_fr.device)
loss_surf_fr = torch.mean(torch.norm(closest_points_fr - gt_Kp_fr.squeeze(), dim=1))
#loss_surf_fr = torch.mean(torch.abs(closest_points_fr - gt_Kp_fr.squeeze())**2)
#
sq_dist, face_indices, closest_points_to = pymesh.distance_to_mesh(full_mesh, gt_Kp_to.squeeze().detach().cpu().numpy())
closest_points_to = torch.Tensor(closest_points_to).to(gt_Kp_to.device)
#loss_surf_to = torch.mean(torch.abs(closest_points_to - gt_Kp_to.squeeze())**2)
loss_surf_to = torch.mean(torch.norm(closest_points_fr - gt_Kp_fr.squeeze(), dim=1))
loss_surf = (loss_surf_fr + loss_surf_to).contiguous() / 2.0
elif self.loss_surf_type == 'volume':
pymesh_mesh = pymesh.form_mesh(mesh.squeeze().cpu().numpy(), faces.squeeze().cpu().numpy())
pd_faces = pymesh_mesh.faces
threes = np.array([3]*pd_faces.shape[0])[:, None]
pd_faces = np.concatenate((threes, pd_faces), axis=1)
pd_points = pymesh_mesh.vertices
pyvista_mesh = pyvista.PolyData(pd_points, pd_faces)
# from
kp_grid = pyvista.PolyData(gt_Kp_fr.squeeze().cpu().detach().numpy())
kp_grid.compute_implicit_distance(pyvista_mesh,inplace=True)
implicit_distances_fr = kp_grid.get_array('implicit_distance')
implicit_distances_fr = torch.tensor(implicit_distances_fr).to(Kp_fr.device)
sq_dist, face_indices, closest_points_fr = pymesh.distance_to_mesh(pymesh_mesh, gt_Kp_fr.squeeze().detach().cpu().numpy())
closest_points_fr = torch.Tensor(closest_points_fr).to(gt_Kp_fr.device)
loss_surf_fr = torch.sum(torch.abs(closest_points_fr - gt_Kp_fr.squeeze()), dim=1)
loss_surf_fr[implicit_distances_fr < 0] = 0
loss_surf_fr = torch.mean(loss_surf_fr)
# to
kp_grid = pyvista.PolyData(gt_Kp_to.squeeze().cpu().detach().numpy())
kp_grid.compute_implicit_distance(pyvista_mesh,inplace=True)
implicit_distances_to = kp_grid.get_array('implicit_distance')
implicit_distances_to = torch.tensor(implicit_distances_to).to(Kp_to.device)
sq_dist, face_indices, closest_points_to = pymesh.distance_to_mesh(pymesh_mesh, gt_Kp_to.squeeze().detach().cpu().numpy())
closest_points_to = torch.Tensor(closest_points_to).to(gt_Kp_to.device)
loss_surf_to = torch.sum(torch.abs(closest_points_to - gt_Kp_to.squeeze()), dim=1)
loss_surf_to[implicit_distances_to < 0] = 0
loss_surf_to = torch.mean(loss_surf_to)
loss_surf = (loss_surf_fr + loss_surf_to).contiguous() / 2.0
############# Separate Loss
if self.loss_sep_type == 'euclidean':
scale = scale.view(-1)
max_rad = torch.norm(scale).item()
gt_Kp_fr_select1 = torch.index_select(gt_Kp_fr, 1, self.select1).contiguous()
gt_Kp_fr_select2 = torch.index_select(gt_Kp_fr, 1, self.select2).contiguous()
loss_sep_fr = torch.norm((gt_Kp_fr_select1 - gt_Kp_fr_select2), dim=2).view(-1).contiguous()
# zero separation loss for kps outside the mesh volume
#mask1 = implicit_distances_fr[self.select1] <= 0
#mask2 = implicit_distances_fr[self.select2] <= 0
#mask = (mask1 & mask2).float()
#loss_sep_fr = loss_sep_fr * mask
#implicit_distances_fr = implicit_distances_fr.float()
#loss_sep_fr = loss_sep_fr * torch.exp(-implicit_distances_fr[self.select1]) * torch.exp(-implicit_distances_fr[self.select2])
#thresh = geodesic.max() / (self.num_key/2)
thresh = geodesic.max() / 4.
loss_sep_fr = torch.max(self.zeros, thresh - loss_sep_fr).contiguous()
loss_sep_fr = torch.mean(loss_sep_fr).contiguous()
gt_Kp_to_select1 = torch.index_select(gt_Kp_to, 1, self.select1).contiguous()
gt_Kp_to_select2 = torch.index_select(gt_Kp_to, 1, self.select2).contiguous()
loss_sep_to = torch.norm((gt_Kp_to_select1 - gt_Kp_to_select2), dim=2).view(-1).contiguous()
# zero separation loss for kps outside the mesh volume
#mask1 = implicit_distances_to[self.select1] <= 0
#mask2 = implicit_distances_to[self.select2] <= 0
#mask = (mask1 & mask2).float()
#implicit_distances_to = implicit_distances_to.float()
#loss_sep_to = loss_sep_to * torch.exp(-implicit_distances_to[self.select1]) * torch.exp(-implicit_distances_to[self.select2])
#thresh = geodesic.max() / (self.num_key/2)
thresh = geodesic.max() / 4.
loss_sep_to = torch.max(self.zeros, thresh - loss_sep_to).contiguous()
loss_sep_to = torch.mean(loss_sep_to).contiguous()
loss_sep = (loss_sep_fr + loss_sep_to) / 2.0
elif self.loss_sep_type == 'curvature':
geodesic = geodesic.squeeze()
curvature = curvature.squeeze()
D = (geodesic + geodesic.t())/2
loss_sep = torch.tensor(0.)
kp_to_mesh_dist = torch.abs(mesh[:,None,:] - gt_Kp_fr)
kp_to_mesh_dist = kp_to_mesh_dist.sum(dim=2) # 500 by 8
kp_to_mesh_dist_min, _ = kp_to_mesh_dist.min(dim=0)
kp_to_mesh_dist_max, _ = kp_to_mesh_dist.max(dim=0)
kp_to_mesh_dist = (kp_to_mesh_dist - kp_to_mesh_dist_min[None,:])/(kp_to_mesh_dist_max[None,:]-kp_to_mesh_dist_min[None,:])
kp_to_mesh_dist *= 10
smax = torch.nn.functional.softmax(-kp_to_mesh_dist,dim=0)
smax = smax.transpose(1,0)
mu_sum = smax.sum(dim=0)/self.num_key
curvature = (curvature - curvature.min()) / (curvature.max() - curvature.min())
curv_score = (curvature[None,:] * D).sum(dim=1) * 0.1
mu_curvature = torch.nn.functional.softmax(curv_score)
sinkhorn_dist = self.sinkhorn_loss.apply(mu_sum[None,:],mu_curvature[None,:],D)
loss_sep = sinkhorn_dist.mean()
elif self.loss_sep_type == 'coverage':
geodesic = geodesic.squeeze()
mesh = mesh.squeeze()
D = (geodesic + geodesic.t())/2
kp_to_mesh_dist = torch.abs(mesh[:,None,:] - gt_Kp_fr)
kp_to_mesh_dist = kp_to_mesh_dist.sum(dim=2) # 500 by 8
kp_to_mesh_dist_min, _ = kp_to_mesh_dist.min(dim=0)
kp_to_mesh_dist_max, _ = kp_to_mesh_dist.max(dim=0)
kp_to_mesh_dist = (kp_to_mesh_dist - kp_to_mesh_dist_min[None,:])/(kp_to_mesh_dist_max[None,:]-kp_to_mesh_dist_min[None,:])
kp_to_mesh_dist *= self.kp_to_mesh_dist_scale
smax = torch.nn.functional.softmax(-kp_to_mesh_dist,dim=0)
smax = smax.transpose(1,0)
mu_sum = smax.sum(dim=0)/self.num_key
n = mu_sum.shape[0]
mu_uniform = torch.ones_like(mu_sum)*(1/n)
sinkhorn_dist = self.sinkhorn_loss.apply(mu_sum[None,:],mu_uniform[None,:],D,1e-3,100)
loss_sep = sinkhorn_dist.mean()
########### SUM UP
loss_att_scaled = self.loss_att_weight * loss_att
Kp_dis_scaled = self.Kp_dis_weight * Kp_dis
Kp_cent_dis_scaled = self.Kp_cent_dis_weight * Kp_cent_dis
loss_rot_scaled = self.loss_rot_weight * loss_rot
loss_surf_scaled = self.loss_surf_weight * loss_surf
loss_sep_scaled = self.loss_sep_weight * loss_sep
loss = loss_att_scaled + Kp_dis_scaled + Kp_cent_dis_scaled + loss_rot_scaled + loss_surf_scaled + loss_sep_scaled
score = (loss_att * 4.0 + Kp_dis * 3.0 + Kp_cent_dis + loss_rot * 0.2).item()
losses_dict = {
'loss': loss,
'loss_att': loss_att,
'Kp_dis': Kp_dis,
'Kp_cent_dis': Kp_cent_dis,
'loss_rot': loss_rot,
'trivial_svd_solution': trivial_svd_solution,
'loss_surf': loss_surf,
'loss_sep': loss_sep,
'loss_att_scaled': loss_att_scaled,
'Kp_dis_scaled': Kp_dis_scaled,
'Kp_cent_dis_scaled': Kp_cent_dis_scaled,
'loss_rot_scaled': loss_rot_scaled,
'loss_surf_scaled': loss_surf_scaled,
'loss_sep_scaled': loss_sep_scaled}
print(cate.view(-1).item(), loss_att.item(), Kp_dis.item(), Kp_cent_dis.item(), loss_rot.item(), loss_surf.item(), loss_sep)
return loss, score, losses_dict
def ev(self, Kp_fr, Kp_to, att_to):
ori_Kp_fr = Kp_fr
ori_Kp_to = Kp_to
new_r, new_t = self.estimate_pose(Kp_fr, Kp_to)
Kp_to = torch.bmm((ori_Kp_to - new_t), new_r)
Kp_dis = torch.mean(torch.norm((Kp_fr - Kp_to), dim=2), dim=1)
new_t *= 1000.0
return ori_Kp_fr, new_r.detach().cpu().numpy()[0], new_t.detach().cpu().numpy()[0], Kp_dis.item(), att_to
def ev_zero(self, Kp_fr, att_fr):
pconf2 = self.pconf.view(1, self.num_key, 1)
new_t = torch.sum(Kp_fr * pconf2, dim=1).view(1, 3).contiguous()
kp_dis = torch.norm(new_t.view(-1))
new_t *= 1000.0
return new_t.detach().cpu().numpy()[0], att_fr, kp_dis.item()
def inf(self, Kp_fr, Kp_to):
ori_Kp_to = Kp_to
new_r, new_t = self.estimate_pose(Kp_fr, Kp_to)
Kp_to = torch.bmm((ori_Kp_to - new_t), new_r)
Kp_dis = torch.mean(torch.norm((Kp_fr - Kp_to), dim=2), dim=1)
new_t *= 1000.0
return new_r.detach().cpu().numpy()[0], new_t.detach().cpu().numpy()[0], Kp_dis.item()
def inf_zero(self, Kp_fr):
pconf2 = self.pconf.view(1, self.num_key, 1)
new_t = torch.sum(Kp_fr * pconf2, dim=1).view(1, 3).contiguous()
Kp_dis = torch.norm(new_t.view(-1))
new_t *= 1000.0
return new_t.detach().cpu().numpy()[0], Kp_dis.item()
| 44.870813 | 139 | 0.61447 | 2,871 | 18,756 | 3.698363 | 0.079763 | 0.026747 | 0.023357 | 0.035035 | 0.653701 | 0.556508 | 0.492748 | 0.4466 | 0.418911 | 0.407139 | 0 | 0.027354 | 0.241789 | 18,756 | 417 | 140 | 44.978417 | 0.719288 | 0.074163 | 0 | 0.320144 | 0 | 0 | 0.021326 | 0.001217 | 0 | 0 | 0 | 0 | 0 | 1 | 0.032374 | false | 0 | 0.057554 | 0 | 0.122302 | 0.003597 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2035bc483e311f9599f980f47de2dacaa8317d2b | 1,789 | py | Python | setup.py | galaddirie/django-cassiopeia | e3e75e6c815cfc96e3b7ef5991aa1265221a2122 | [
"MIT"
] | null | null | null | setup.py | galaddirie/django-cassiopeia | e3e75e6c815cfc96e3b7ef5991aa1265221a2122 | [
"MIT"
] | null | null | null | setup.py | galaddirie/django-cassiopeia | e3e75e6c815cfc96e3b7ef5991aa1265221a2122 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import sys
from setuptools import setup, find_packages
from os import path
this_directory = path.abspath(path.dirname(__file__))
with open(path.join(this_directory, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
install_requires = [
"datapipelines>=1.0.7",
"merakicommons>=1.0.7",
"cassiopeia",
"Pillow",
"arrow",
"requests",
"Django>=3.0.1",
"wheel",
]
# Require python 3.6
if sys.version_info.major != 3 and sys.version_info.minor != 6:
sys.exit("'django-cassiopeia' requires Python >= 3.6!")
setup(
name="django-cassiopeia",
version="2.1.1",
author="Paaksing",
author_email="paaksingtech@gmail.com",
url="https://github.com/paaksing/django-cassiopeia",
description="Django Integration of the Riot Games Developer API Wrapper 'cassiopeia'",
long_description=long_description,
long_description_content_type='text/markdown',
keywords=["Django", "LoL", "League of Legends", "Riot Games", "API", "REST"],
classifiers=[
"Development Status :: 4 - Beta",
"Programming Language :: Python :: 3.6",
"Environment :: Web Environment",
"Operating System :: OS Independent",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Topic :: Games/Entertainment",
"Topic :: Games/Entertainment :: Real Time Strategy",
"Topic :: Games/Entertainment :: Role-Playing",
"Topic :: Software Development :: Libraries :: Python Modules",
"Natural Language :: English",
"Framework :: Django :: 3.0",
],
license="MIT",
packages=find_packages(exclude=("tests",)),
zip_safe=True,
install_requires=install_requires,
include_package_data=True
)
| 30.844828 | 90 | 0.648966 | 206 | 1,789 | 5.524272 | 0.592233 | 0.052724 | 0.02109 | 0.052724 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.016819 | 0.202348 | 1,789 | 57 | 91 | 31.385965 | 0.780659 | 0.0218 | 0 | 0 | 0 | 0 | 0.463959 | 0.012586 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.0625 | 0 | 0.0625 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2036a59a849efccd1eebf5b74851410e2d9a4249 | 710 | py | Python | Solver/Solver.py | MapleNSteel/mpcGenerationToolkit | e0b43e34247447cb8c591bb2f7d69834e4b7d982 | [
"MIT"
] | null | null | null | Solver/Solver.py | MapleNSteel/mpcGenerationToolkit | e0b43e34247447cb8c591bb2f7d69834e4b7d982 | [
"MIT"
] | null | null | null | Solver/Solver.py | MapleNSteel/mpcGenerationToolkit | e0b43e34247447cb8c591bb2f7d69834e4b7d982 | [
"MIT"
] | null | null | null | import time
import numpy as np
from cvxopt import matrix, solvers
from sympy import pprint
solvers.options['show_progress'] = False
solvers.options['maxiters'] = 1
def getSolution(code_gen, x_0, u_0, x_ref, u_ref, params):
A = code_gen.A_mat(x_0[:,0:1], x_0, u_0, params)
b = code_gen.b_mat(x_0[:,0:1], x_0, u_0, params)
P = code_gen.P_mat(x_0, u_0, params)
q = code_gen.q_mat(x_0, u_0, x_ref, u_ref)
#pprint(A)
#pprint(b)
if(code_gen.noineq != True):
G = code_gen.G_mat(x_0, u_0, params)
h = code_gen.h_mat(x_0, u_0, params)
return solvers.qp(matrix(P), matrix(q), matrix(G), matrix(h), matrix(A), matrix(b))
else:
return solvers.qp(matrix(P), matrix(q), None, None, matrix(A), matrix(b))
| 32.272727 | 85 | 0.690141 | 146 | 710 | 3.116438 | 0.273973 | 0.03956 | 0.046154 | 0.061538 | 0.347253 | 0.340659 | 0.254945 | 0.127473 | 0.074725 | 0.074725 | 0 | 0.034483 | 0.142254 | 710 | 21 | 86 | 33.809524 | 0.712644 | 0.025352 | 0 | 0 | 0 | 0 | 0.030435 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.058824 | false | 0 | 0.235294 | 0 | 0.411765 | 0.058824 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2037a567b77e305e0d1ccdea12da25c068d7248b | 3,460 | py | Python | fhir/resources/DSTU2/enrollmentrequest.py | cstoltze/fhir.resources | 52f99738935b7313089d89daf94d73ce7d167c9d | [
"BSD-3-Clause"
] | 144 | 2019-05-08T14:24:43.000Z | 2022-03-30T02:37:11.000Z | fhir/resources/DSTU2/enrollmentrequest.py | cstoltze/fhir.resources | 52f99738935b7313089d89daf94d73ce7d167c9d | [
"BSD-3-Clause"
] | 82 | 2019-05-13T17:43:13.000Z | 2022-03-30T16:45:17.000Z | fhir/resources/DSTU2/enrollmentrequest.py | cstoltze/fhir.resources | 52f99738935b7313089d89daf94d73ce7d167c9d | [
"BSD-3-Clause"
] | 48 | 2019-04-04T14:14:53.000Z | 2022-03-30T06:07:31.000Z | # -*- coding: utf-8 -*-
"""
Profile: https://www.hl7.org/fhir/DSTU2/enrollmentrequest.html
Release: DSTU2
Version: 1.0.2
Revision: 7202
"""
from typing import List as ListType
from pydantic import Field
from . import domainresource, fhirtypes
class EnrollmentRequest(domainresource.DomainResource):
"""Enroll in coverage.
This resource provides the insurance enrollment details to the insurer
regarding a specified coverage.
"""
resource_type = Field("EnrollmentRequest", const=True)
identifier: ListType[fhirtypes.IdentifierType] = Field(
None,
alias="identifier",
title="Business Identifier",
description="The Response business identifier.",
)
ruleset: fhirtypes.CodingType = Field(
None,
alias="ruleset",
title="Resource version",
description=(
"The version of the style of resource contents. This should be"
"mapped to the allowable profiles for this and supporting resources."
),
)
originalRuleset: fhirtypes.CodingType = Field(
None,
alias="originalRuleset",
title="Original version",
description=(
"The style (standard) and version of the original material "
"which was converted into this resource."
),
)
created: fhirtypes.DateTime = Field(
None,
alias="created",
title="Creation date",
description="The date when this resource was created.",
)
target: fhirtypes.ReferenceType = Field(
None,
alias="target",
title="Target",
description="The Insurer who is target of the request.",
# note: Listed Resource Type(s) should be allowed as Reference.
enum_reference_types=["Organization"],
)
provider: fhirtypes.ReferenceType = Field(
None,
alias="provider",
title="Responsible practitioner",
description=(
"The practitioner who is responsible for the services rendered to the "
"patient."
),
# note: Listed Resource Type(s) should be allowed as Reference.
enum_reference_types=["Practitioner"],
)
organization: fhirtypes.ReferenceType = Field(
None,
alias="organization",
title="Responsible organization",
description=(
"The organization which is responsible for the"
"services rendered to the patient."
),
# note: Listed Resource Type(s) should be allowed as Reference.
enum_reference_types=["Organization"],
)
subject: fhirtypes.ReferenceType = Field(
...,
alias="subject",
title="The subject to be enrolled",
description="Patient Resource.",
# note: Listed Resource Type(s) should be allowed as Reference.
enum_reference_types=["Patient"],
)
coverage: fhirtypes.ReferenceType = Field(
...,
alias="coverage",
title="Insurance information",
description="Reference to the program or plan identification, underwriter or payor.",
# note: Listed Resource Type(s) should be allowed as Reference.
enum_reference_types=["Coverage"],
)
relationship: fhirtypes.CodingType = Field(
...,
alias="relationship",
title="Patient relationship to subscriber",
description="The relationship of the patient to the subscriber.",
)
| 30.350877 | 93 | 0.629191 | 342 | 3,460 | 6.333333 | 0.330409 | 0.051708 | 0.045245 | 0.050785 | 0.289474 | 0.209141 | 0.209141 | 0.209141 | 0.209141 | 0.209141 | 0 | 0.004393 | 0.276301 | 3,460 | 113 | 94 | 30.619469 | 0.860623 | 0.162717 | 0 | 0.243902 | 0 | 0 | 0.345778 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.036585 | 0 | 0.182927 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
203badb19861f80a63c8028ac6ffdd250300130d | 799 | py | Python | software/feature_space.py | aM3z/primality-testing | b9e8395ab346ffd46f5b014996da2207c50dcc0a | [
"MIT"
] | 1 | 2021-03-15T17:26:32.000Z | 2021-03-15T17:26:32.000Z | software/feature_space.py | aM3z/primality-testing | b9e8395ab346ffd46f5b014996da2207c50dcc0a | [
"MIT"
] | null | null | null | software/feature_space.py | aM3z/primality-testing | b9e8395ab346ffd46f5b014996da2207c50dcc0a | [
"MIT"
] | null | null | null | from base_b import convert
from random import randint
DATA_DIR = "../data/"
def get_prime(partition):
primes = list()
#filename = glob.glob(DATA_DIR + "primes" + str(partition))
filename = DATA_DIR + "primes" + str(partition)
with open(filename, "r") as f:
content = f.readlines()
line = None
while not line:
index = randint(4,len(content))
line = content[index].strip()
return line
#for line in content[4:]:
# line = line.strip()
# if line:
# primes += line.split()
#return primes
def get_space(partitions, base):
prime_strings = get_primes(partitions)
space = list()
for string in prime_strings:
base_b = convert(int(string), base)
space.append(base_b)
print(space)
| 21.594595 | 63 | 0.605757 | 101 | 799 | 4.683168 | 0.445545 | 0.031712 | 0.054968 | 0.067653 | 0.105708 | 0 | 0 | 0 | 0 | 0 | 0 | 0.003448 | 0.274093 | 799 | 36 | 64 | 22.194444 | 0.812069 | 0.20025 | 0 | 0 | 0 | 0 | 0.023734 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.1 | false | 0 | 0.1 | 0 | 0.25 | 0.05 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
203de3b0e97c73fd699b6af21861e33253964a9d | 6,947 | py | Python | dataLoader.py | stephen-w-bailey/fast-n-deep-faces | 53173c6367dfa3a20d3193ad7a0e77ac1e898f02 | [
"BSD-3-Clause"
] | 40 | 2020-06-26T10:56:39.000Z | 2022-01-26T10:43:34.000Z | dataLoader.py | stephen-w-bailey/fast-n-deep-faces | 53173c6367dfa3a20d3193ad7a0e77ac1e898f02 | [
"BSD-3-Clause"
] | 7 | 2020-07-01T07:10:11.000Z | 2022-03-07T00:07:41.000Z | dataLoader.py | stephen-w-bailey/fast-n-deep-faces | 53173c6367dfa3a20d3193ad7a0e77ac1e898f02 | [
"BSD-3-Clause"
] | 13 | 2020-06-30T11:57:41.000Z | 2022-02-20T17:24:00.000Z | import matplotlib.pyplot as plt
import numpy as np
import pickle
import tensorflow as tf
import UVGenerator
class DataLoader():
def __init__(self,pg,sampleFile=None):
self.pg = pg
self.numV = np.sum(self.pg.active)
pg.setPose(pg.defaultPose)
self.neutral = pg.getVertices()[pg.active]
def generator(self):
cache = []
maxCache = 10000
while True:
if len(cache) < maxCache:
pose = self.pg.setRandomPose()
mesh = self.pg.getVertices()[self.pg.active]
cache.append((pose,mesh))
if len(cache) == maxCache:
print('cache full')
else:
idx = np.random.choice(len(cache))
pose,mesh = cache[idx]
yield dict(pose=pose,mesh=mesh)
def process(self,data):
pose,mesh = data['pose'],data['mesh']
pose.set_shape(self.pg.defaultPose.shape)
mesh.set_shape((self.numV,3))
return dict(pose=pose,mesh=mesh)
def createDataset(self,batchsize):
dataset = tf.data.Dataset.from_generator(self.generator,
dict(pose=tf.float32,
mesh=tf.float32))
dataset = dataset.map(self.process)
dataset = dataset.batch(batchsize)
dataset = dataset.prefetch(5)
iter = dataset.make_one_shot_iterator()
element = iter.get_next()
return element
class LinearDataLoader(DataLoader):
def __init__(self,pg,linearModel):
DataLoader.__init__(self,pg)
with open(linearModel,'rb') as file:
data = pickle.load(file)
self.x = data['weights'].astype('float32')
self.weights = data['ssdWeights'].astype('float32')
self.restBones = data['ssdRestBones'].astype('float32')
self.rest = data['ssdRest']
self.k = data['k']
def process(self,data):
data = DataLoader.process(self,data)
pose,mesh = data['pose'],data['mesh']
pose = tf.concat((pose,tf.ones((1,))),0)
bones = tf.reshape(tf.matmul(pose[np.newaxis],self.x),(self.k,4,3))
approx = []
for i in range(self.k):
R = bones[i,:3]
t = bones[i,3][np.newaxis]
tRest = self.restBones[i,3]
approx.append(tf.matmul(self.rest-tRest,R)+t)
approx = tf.stack(approx,0)
weights = self.weights.T[...,np.newaxis]
approx = tf.reduce_sum(weights*approx,0)
data = dict(pose=pose,mesh=mesh,linear=approx)
return data
class ImageDataLoader(DataLoader):
def __init__(self,pg,uvFile,linearModel=None,makeImages=False):
DataLoader.__init__(self,pg)
self.makeImages = makeImages
if linearModel is not None:
self.linearModel = LinearDataLoader(pg,linearModel)
else:
self.linearModel = None
with open(uvFile,'rb') as file:
data = pickle.load(file)
self.faces = data['originalFaces']
self.numV = np.max(self.faces)+1
self.uv = data['uv'][:self.numV].astype('float32')
self.uv = self.uv
self.vCharts = data['vCharts'][:self.numV]
if 'parameter_mask' in data:
self.mask = data['parameter_mask']
else:
self.mask = None
def process(self,data):
data = DataLoader.process(self,data)
mesh = data['mesh']
self.usedVerts = []
self.usedUVs = []
if self.linearModel is not None:
data = self.linearModel.process(data)
else:
data['linear'] = self.neutral
mesh = mesh - data['linear']
for i in range(np.max(self.vCharts)+1):
idx = np.arange(self.numV)[self.vCharts==i]
if len(idx) == 0:
data['image-'+str(i)] = 'empty'
continue
ref = self.faces.reshape(-1)
usedFaces = [True if v in idx else False for v in ref]
usedFaces = np.sum(np.asarray(usedFaces).reshape((-1,3)),-1) > 0
faceIdx = np.arange(len(self.faces))[usedFaces]
idx = np.arange(len(self.vCharts))[self.vCharts==i]
if len(idx) == 0:
raise ValueError('Chart index '+str(i)+' has no assigned verties')
meshPart = tf.gather(mesh,idx)
image,usedVerts = UVGenerator.mapMeshToImage(meshPart[np.newaxis],self.uv[idx],self.imageSize,self.imageSize)
if not self.makeImages:
image = tf.zeros((self.imageSize,self.imageSize,3))
self.usedUVs.append(self.uv[idx[usedVerts]])
self.usedVerts.append(idx[usedVerts])
image = image[0]
data['image-'+str(i)] = image
return data
def createDataset(self,batchsize,imageSize):
self.imageSize = imageSize
return DataLoader.createDataset(self,batchsize)
class AnimationLoader(ImageDataLoader):
def __init__(self,pg,animData,uvFile,linearModel=None,fixToRange=False):
ImageDataLoader.__init__(self,pg,uvFile,linearModel)
newAnim = animData.copy()
if fixToRange:
for i,node in enumerate(pg.nodes):
node = [n for n in node] # Copy the data so modification won't change the original
frac = 0.1*(node[3]-node[2])
node[2] += frac
node[3] -= frac
if np.any(newAnim[:,i]<node[2]):
print('Found '+str(np.sum(newAnim[:,i]<node[2]))+' values for '+str(node[:2])+' below '+str(node[2]))
if np.any(newAnim[:,i]>node[3]):
print('Found '+str(np.sum(newAnim[:,i]>node[3]))+' values for '+str(node[:2])+' above '+str(node[3]))
newAnim[:,i] = np.minimum(np.maximum(newAnim[:,i],node[2]),node[3])
diff = np.sum(np.square(newAnim-animData),1)
print('Clamped values in '+str(np.sum(diff>0))+' frames')
animData = newAnim
print('Checking if animation was correctly modified')
for i,node in enumerate(pg.nodes):
node = [n for n in node] # Copy the data so modification won't change the original
frac = 0.1*(node[3]-node[2])
node[2] += frac
node[3] -= frac
if np.any(animData[:,i]<node[2]):
print('Found '+str(np.sum(animData[:,i]<node[2]))+' values for '+str(node[:2])+' below '+str(node[2]))
if np.any(animData[:,i]>node[3]):
print('Found '+str(np.sum(animData[:,i]>node[3]))+' values for '+str(node[:2])+' above '+str(node[3]))
self.animData = animData
def generator(self):
for d in self.animData:
self.pg.setPose(d)
mesh = self.pg.getVertices()[self.pg.active]
yield dict(pose=d,mesh=mesh)
| 38.594444 | 122 | 0.554196 | 849 | 6,947 | 4.489988 | 0.208481 | 0.025184 | 0.018363 | 0.013641 | 0.290923 | 0.248951 | 0.229014 | 0.200682 | 0.151889 | 0.131165 | 0 | 0.014709 | 0.305168 | 6,947 | 179 | 123 | 38.810056 | 0.775016 | 0.015978 | 0 | 0.215686 | 0 | 0 | 0.056052 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.071895 | false | 0 | 0.03268 | 0 | 0.163399 | 0.045752 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
203e0415618b04f5477d288195f92e0ca902d4e2 | 2,105 | py | Python | cop/email_smaple/mail.py | maksim-urbanovich/Python_Task3 | d0842c2caa39030ced55a342a709c9d0a2c8acce | [
"MIT"
] | null | null | null | cop/email_smaple/mail.py | maksim-urbanovich/Python_Task3 | d0842c2caa39030ced55a342a709c9d0a2c8acce | [
"MIT"
] | null | null | null | cop/email_smaple/mail.py | maksim-urbanovich/Python_Task3 | d0842c2caa39030ced55a342a709c9d0a2c8acce | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import smtplib
from smtplib import SMTPException
from email.mime.text import MIMEText
import csv
import codecs
import logging
from logging import StreamHandler
from logging.handlers import RotatingFileHandler
import sys
import time
LOGGER = 'logger'
user = "robot.artsiom.mishuta"
password = "robot1234"
# Logger initialization
logger = logging.getLogger(LOGGER)
logger.setLevel(logging.DEBUG)
# Dump handler initialization
console = StreamHandler(sys.stdout)
console.setLevel(logging.DEBUG)
logger.addHandler(console)
# File handler initialization if need
logfile = RotatingFileHandler("log1.txt", backupCount=10, maxBytes=13107200)
logfile.setLevel(logging.DEBUG)
log_format = '%(asctime)s - %(levelname)s - %(message)s'
log_datefmt = '%d-%m-%Y %H:%M:%S'
file_formatter = logging.Formatter(log_format, log_datefmt)
logfile.setFormatter(file_formatter)
logger.addHandler(logfile)
logfile.doRollover()
server = smtplib.SMTP("smtp.gmail.com", 587)
server.ehlo()
server.starttls()
server.login(user, password)
body = None
subject = None
with codecs.open('body.txt', 'rb') as myfile:
body=myfile.read()
with codecs.open('subject.txt', 'rb') as myfile:
subject=myfile.read()
with codecs.open('eggs.csv', 'rb') as csvfile:
spamreader = csv.reader(csvfile, delimiter=',', quotechar='|')
for row in spamreader:
try:
msg = MIMEText(body.format(name=row[1]))
msg['Subject'] = subject
msg['From'] = '{0}@gmail.com'.format(user)
msg['To'] = row[0]
server.sendmail("{0}@gmail.com".format(user), ['{0}@gmail.com'.format(user) ,row[0]], msg.as_string())
logger.info("email: \n -------------------------------- \n {0} \n -------------------------------- \n send to {1}".format(msg.as_string(), msg['To'] ))
time.sleep(5)
except SMTPException as e:
logger.error("email: \n -------------------------------- \n {0}\n -------------------------------- \n was not send to {1}".format(msg.as_string(), msg['To'] ))
logger.error(e)
server.quit()
| 29.236111 | 172 | 0.636105 | 262 | 2,105 | 5.076336 | 0.396947 | 0.02406 | 0.045113 | 0.033835 | 0.137594 | 0.058647 | 0.043609 | 0.043609 | 0.043609 | 0 | 0 | 0.016968 | 0.160095 | 2,105 | 71 | 173 | 29.647887 | 0.735294 | 0.050831 | 0 | 0 | 0 | 0.019608 | 0.207831 | 0.074799 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0.039216 | 0.196078 | 0 | 0.196078 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
20453193afb607d368c958b88cbf1cdb70f61f43 | 717 | py | Python | python/leetcode/280_wiggle_sort.py | yxun/Notebook | 680ae89a32d3f7d4fdcd541e66cea97e29efbd26 | [
"Apache-2.0"
] | 1 | 2021-10-04T13:26:32.000Z | 2021-10-04T13:26:32.000Z | python/leetcode/280_wiggle_sort.py | yxun/Notebook | 680ae89a32d3f7d4fdcd541e66cea97e29efbd26 | [
"Apache-2.0"
] | 3 | 2020-03-24T19:34:42.000Z | 2022-01-21T20:15:39.000Z | python/leetcode/280_wiggle_sort.py | yxun/Notebook | 680ae89a32d3f7d4fdcd541e66cea97e29efbd26 | [
"Apache-2.0"
] | 1 | 2021-04-01T20:56:50.000Z | 2021-04-01T20:56:50.000Z | #%%
"""
- Wiggle Sort
- https://leetcode.com/problems/wiggle-sort/
- Medium
Given an unsorted array nums, reorder it in-place such that nums[0] <= nums[1] >= nums[2] <= nums[3]....
Example:
Input: nums = [3,5,2,1,6,4]
Output: One possible answer is [3,5,1,6,2,4]
"""
#%%
##
class S1:
def wiggleSort(self, nums):
"""
:type nums: List[int]
:rtype: None, modify nums in-place
"""
if not nums: return
nums2 = sorted(nums)
l, r = 0, len(nums)-1
for i in range(len(nums)):
if i % 2 == 0:
nums[i] = nums2[l]
l += 1
else:
nums[i] = nums2[r]
r -= 1
return
| 19.916667 | 104 | 0.474198 | 101 | 717 | 3.366337 | 0.544554 | 0.058824 | 0.058824 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.057018 | 0.364017 | 717 | 35 | 105 | 20.485714 | 0.688596 | 0.444909 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.076923 | false | 0 | 0 | 0 | 0.230769 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2046a9865a03989fbda1a01607042dce01486fd9 | 4,843 | py | Python | code/old/pyTorch_baxter_feed_forward_simple2.py | NLesniak/DeepLearning | c1e18a5c0fdab9c6bf4412c142c7db9c15baeef3 | [
"MIT"
] | 11 | 2020-06-05T15:38:21.000Z | 2021-08-31T21:33:11.000Z | code/old/pyTorch_baxter_feed_forward_simple2.py | NLesniak/DeepLearning | c1e18a5c0fdab9c6bf4412c142c7db9c15baeef3 | [
"MIT"
] | 7 | 2019-10-23T15:30:51.000Z | 2019-12-09T22:49:42.000Z | code/old/pyTorch_baxter_feed_forward_simple2.py | NLesniak/DeepLearning | c1e18a5c0fdab9c6bf4412c142c7db9c15baeef3 | [
"MIT"
] | 10 | 2020-06-18T05:43:37.000Z | 2021-10-31T14:30:24.000Z | ## Add modules that are necessary
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
from sympy import *
import matplotlib.pyplot as plt
import operator
from IPython.core.display import display
import torch
from torch.autograd import Variable
import torch.utils.data as data_utils
import torch.nn.init as init
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data import Dataset, DataLoader
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
init_printing(use_unicode=True)
import pandas as pd
import numpy as np
from torch import nn
import torch
from torch.autograd import Variable
from torch.utils.data import DataLoader, Dataset, TensorDataset
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
import matplotlib.pyplot as plt
import torch.nn.functional as F
from sklearn.metrics import confusion_matrix
##read in the data
shared = pd.read_table("data/baxter.0.03.subsample.shared")
shared.head()
meta = pd.read_table("data/metadata.tsv")
##check and visualize the data
meta.head()
shared.head()
## remove unnecessary columns from meta
meta = meta[['sample','dx']]
##rename the column name "Group" to match the "sample" in meta
shared = shared.rename(index=str, columns={"Group":"sample"})
##merge the 2 datasets on sample
data=pd.merge(meta,shared,on=['sample'])
##remove adenoma samples
data= data[data.dx.str.contains("adenoma") == False]
##drop all except OTU columns for x
x = data.drop(["sample", "dx", "numOtus", "label"], axis=1)
## Cancer =1 Normal =0
diagnosis = { "cancer":1, "normal":0}
##generate y which only has diagnosis as 0 and 1
y = data["dx"].replace(diagnosis)
##drop if NA elements
y.dropna()
x.dropna()
##split the data to generate training and test sets %80-20
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.5, random_state=82089)
scaler = StandardScaler()
x_train = scaler.fit_transform(x_train)
scaler = StandardScaler()
transformed = scaler.fit_transform(x_test)
test_set = torch.from_numpy(transformed).float()
test_valid = torch.from_numpy(y_test.as_matrix()).float()
class NeuralNet(nn.Module):
def __init__(self, input_size, hidden_size, num_classes):
super(NeuralNet, self).__init__()
self.fc1 = nn.Linear(input_size, hidden_size)
self.relu = nn.ReLU()
self.fc2 = nn.Linear(hidden_size, num_classes)
def forward(self, x):
out = self.fc1(x)
out = self.relu(out)
out = self.fc2(out)
return out
# Hyper-parameters
input_size = 6920
hidden_size = 5
num_classes = 2
learning_rate = 0.0007
batch_size = 50
batch_no = len(x_train) // batch_size
import torch.optim as optim
model = NeuralNet(input_size, hidden_size, num_classes)
# Loss and optimizer
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
#optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate, momentum=0.9)
from sklearn.utils import shuffle
def train(epochs):
for epoch in range(epochs):
if epoch % 5 == 0:
print('Epoch {}'.format(epoch+1))
x_train2, y_train2 = shuffle(x_train, y_train)
for i in range(batch_no):
start = i * batch_size
end = start + batch_size
inputs = Variable(torch.from_numpy(x_train2[start:end])).float()
labels = Variable(torch.from_numpy(y_train2.values[start:end])).long()
model.train()
optimizer.zero_grad()
y_pred = model(inputs)
loss = criterion(y_pred, labels)
print ("epoch #",epoch)
print ("loss: ", loss.item())
pred = torch.max(y_pred, 1)[1].eq(labels).sum()
print ("acc:(%) ", 100*pred/len(inputs))
loss.backward()
optimizer.step()
train(3)
p_train = model(torch.from_numpy(x_train).float())
p_train = torch.max(p_train,1)[1]
len(p_train)
p_train = p_train.data.numpy()
accuracy_score(y_train, p_train)
# Test the model
# In test phase, we don't need to compute gradients (for memory efficiency)
def test(epochs):
model.eval()
input = Variable(torch.from_numpy(x_test)).float()
label = Variable(torch.from_numpy(y_test.values)).long()
for epoch in range(epochs):
with torch.no_grad():
y_pred = model(input)
loss = criterion(y_pred, label)
print ("epoch #",epoch)
print ("loss: ", loss.item())
pred = torch.max(y_pred, 1)[1].eq(label).sum()
print ("acc (%): ", 100*pred/len(input))
test(10)
pred = model(torch.from_numpy(x_test).float())
pred = torch.max(pred,1)[1]
len(pred)
pred = pred.data.numpy()
accuracy_score(y_test, pred)
cm = confusion_matrix(y_test, pred)
| 32.072848 | 92 | 0.694817 | 726 | 4,843 | 4.495868 | 0.287879 | 0.027574 | 0.034314 | 0.026961 | 0.27114 | 0.174632 | 0.110907 | 0.066789 | 0.066789 | 0.066789 | 0 | 0.016418 | 0.182531 | 4,843 | 150 | 93 | 32.286667 | 0.808032 | 0.137312 | 0 | 0.208696 | 0 | 0 | 0.04028 | 0.007959 | 0 | 0 | 0 | 0 | 0 | 1 | 0.034783 | false | 0 | 0.243478 | 0 | 0.295652 | 0.069565 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2046beee5b0531f6aba2f13452eaf086c8cff9b9 | 890 | py | Python | tests/layout/test_waveguide.py | joamatab/zeropdk | feed134fc3243655f93cfd5b3bd5b65ea928bf48 | [
"MIT"
] | 17 | 2019-08-22T15:55:50.000Z | 2022-02-02T20:52:00.000Z | tests/layout/test_waveguide.py | joamatab/zeropdk | feed134fc3243655f93cfd5b3bd5b65ea928bf48 | [
"MIT"
] | 1 | 2020-09-29T00:43:38.000Z | 2020-10-27T07:15:01.000Z | tests/layout/test_waveguide.py | joamatab/zeropdk | feed134fc3243655f93cfd5b3bd5b65ea928bf48 | [
"MIT"
] | 3 | 2019-09-04T07:48:35.000Z | 2021-06-16T09:39:42.000Z | import numpy as np
import pytest
from ..context import zeropdk # noqa
from zeropdk.layout.waveguides import waveguide_dpolygon
from zeropdk.layout import insert_shape
import klayout.db as kdb
@pytest.fixture
def top_cell():
def _top_cell():
layout = kdb.Layout()
layout.dbu = 0.001
TOP = layout.create_cell("TOP")
return TOP, layout
return _top_cell
def test_waveguide(top_cell):
t = np.linspace(-1, 1, 100)
ex = kdb.DPoint(1, 0)
ey = kdb.DPoint(0, 1)
# list of points depicting a parabola
points_list = 100 * t * ex + 100 * t ** 2 * ey
dbu = 0.001
width = 1
wg = waveguide_dpolygon(points_list, width, dbu, smooth=True)
# write to test_waveguide.gds (we should see a parabola)
TOP, layout = top_cell()
layer = "1/0"
insert_shape(TOP, layer, wg)
TOP.write("tests/tmp/test_waveguide.gds")
| 23.421053 | 65 | 0.65618 | 134 | 890 | 4.231343 | 0.410448 | 0.061728 | 0.059965 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.039941 | 0.240449 | 890 | 37 | 66 | 24.054054 | 0.798817 | 0.106742 | 0 | 0 | 0 | 0 | 0.042984 | 0.035398 | 0 | 0 | 0 | 0 | 0 | 1 | 0.115385 | false | 0 | 0.230769 | 0 | 0.423077 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2048324c1071d236d03d6f7a8c261d1007dcdeaf | 9,072 | py | Python | bin/Utils/CraftCache.py | C-EO/craft | a52ead76ce400cc745876bd679eba6f62da70ee5 | [
"BSD-2-Clause"
] | 55 | 2016-11-20T17:08:19.000Z | 2022-03-11T22:19:43.000Z | bin/Utils/CraftCache.py | C-EO/craft | a52ead76ce400cc745876bd679eba6f62da70ee5 | [
"BSD-2-Clause"
] | 17 | 2017-09-20T07:52:17.000Z | 2021-12-03T10:03:00.000Z | bin/Utils/CraftCache.py | C-EO/craft | a52ead76ce400cc745876bd679eba6f62da70ee5 | [
"BSD-2-Clause"
] | 29 | 2016-12-10T15:00:11.000Z | 2021-12-02T12:54:05.000Z | import atexit
import json
import os
import pickle
import re
import shutil
import subprocess
import tempfile
import time
import urllib.error
import urllib.request
import sys
from pathlib import Path
from CraftCore import CraftCore, AutoImport
from Blueprints.CraftVersion import CraftVersion
from CraftOS.osutils import OsUtils
from CraftStandardDirs import CraftStandardDirs
from Utils import GetFiles
class CraftCache(object):
RE_TYPE = re.Pattern if sys.version_info >= (3,7) else re._pattern_type
_version = 9
_cacheLifetime = (60 * 60 * 24) * 1 # days
class NonPersistentCache(object):
def __init__(self):
self.applicationLocations = {}
def __init__(self):
self.version = CraftCache._version
self.cacheCreationTime = time.time()
self._outputCache = {}
self._helpCache = {}
self._versionCache = {}
self._nightlyVersions = {}
self._jsonCache = {}
# defined in blueprintSearch
self.availablePackages = None
# non persistent cache
self._nonPersistentCache = CraftCache.NonPersistentCache()
def __getstate__(self):
state = dict(self.__dict__)
del state["_nonPersistentCache"]
return state
def __setstate__(self, state):
self.__dict__ = state
self._nonPersistentCache = CraftCache.NonPersistentCache()
@staticmethod
def _loadInstance():
utilsCache = CraftCache()
if os.path.exists(CraftCache._cacheFile()):
with open(CraftCache._cacheFile(), "rb") as f:
try:
data = pickle.load(f)
except Exception as e:
CraftCore.log.warning(f"Cache corrupted: {e}")
return utilsCache
if data.version != CraftCache._version or (
time.time() - data.cacheCreationTime) > CraftCache._cacheLifetime:
CraftCore.log.debug("Clear cache")
else:
utilsCache = data
return utilsCache
@staticmethod
def _cacheFile():
return os.path.join(CraftStandardDirs.etcDir(), "cache.pickle")
@staticmethod
@atexit.register
def _save():
try:
if not os.path.isdir(os.path.dirname(CraftCache._cacheFile())):
return
if isinstance(CraftCore.cache, AutoImport):
return
with open(CraftCache._cacheFile(), "wb") as f:
pick = pickle.Pickler(f, protocol=pickle.HIGHEST_PROTOCOL)
pick.dump(CraftCore.cache)
except Exception as e:
CraftCore.log.warning(f"Failed to save cache {e}", exc_info=e, stack_info=True)
os.remove(CraftCache._cacheFile())
def clear(self):
CraftCore.log.debug("Clear utils cache")
CraftCore.cache = CraftCache()
def findApplication(self, app, path=None, forceCache:bool=False) -> str:
if app in self._nonPersistentCache.applicationLocations:
appLocation = self._nonPersistentCache.applicationLocations[app]
if os.path.exists(appLocation):
return appLocation
else:
self._helpCache.clear()
# don't look in the build dir etc
_cwd = os.getcwd()
os.chdir(CraftCore.standardDirs.craftRoot())
appLocation = shutil.which(str(app), path=path)
os.chdir(_cwd)
if appLocation:
if OsUtils.isWin():
# prettify command
path, ext = os.path.splitext(appLocation)
appLocation = path + ext.lower()
if forceCache or Path(CraftCore.standardDirs.craftRoot()) in Path(appLocation).parents:
CraftCore.log.debug(f"Adding {app} to app cache {appLocation}")
self._nonPersistentCache.applicationLocations[app] = appLocation
else:
CraftCore.log.debug(f"Craft was unable to locate: {app}, in {path}")
return None
return appLocation
def getCommandOutput(self, app:str, command:str, testName:str=None) -> (int, str):
if not testName:
testName = f"\"{app}\" {command}"
app = self.findApplication(app)
if not app:
return (-1, None)
if testName not in self._outputCache:
CraftCore.log.debug(f"\"{app}\" {command}")
# TODO: port away from shell=True
completeProcess = subprocess.run(f"\"{app}\" {command}",
shell=True,
stdout=subprocess.PIPE, stderr=subprocess.STDOUT,
universal_newlines=True, errors="backslashreplace")
CraftCore.log.debug(f"{testName} Result: ExitedCode: {completeProcess.returncode} Output: {completeProcess.stdout}")
self._outputCache[testName] = (completeProcess.returncode, completeProcess.stdout)
return self._outputCache[testName]
# TODO: rename, cleanup
def checkCommandOutputFor(self, app, command, helpCommand="-h") -> str:
if not (app, command) in self._helpCache:
_, output = self.getCommandOutput(app, helpCommand)
if not output:
return False
if type(command) == str:
supports = command in output
else:
supports = command.match(output) is not None
self._helpCache[(app, command)] = supports
CraftCore.log.debug("%s %s %s" % (app, "supports" if supports else "does not support", command))
return self._helpCache[(app, command)]
def getVersion(self, app, pattern=None, versionCommand=None) -> CraftVersion:
app = self.findApplication(app)
if not app:
return None
if app in self._versionCache:
return self._versionCache[app]
if not pattern:
pattern = re.compile(r"(\d+\.\d+(?:\.\d+)?)")
if not versionCommand:
versionCommand = "--version"
if not isinstance(pattern, CraftCache.RE_TYPE):
raise Exception("getVersion can only handle a compiled regular expression as pattern")
_, output = self.getCommandOutput(app, versionCommand)
if not output:
return None
match = pattern.search(output)
if not match:
CraftCore.log.warning(f"Could not detect pattern: {pattern.pattern} in {output}")
return None
appVersion = CraftVersion(match.group(1))
self._versionCache[app] = appVersion
CraftCore.log.debug(f"getVersion: {app}[{appVersion}]")
return appVersion
def cacheJsonFromUrl(self, url, timeout=10) -> object:
CraftCore.log.debug(f"Fetch Json: {url}")
if not url in self._jsonCache:
if os.path.isfile(url):
with open(url, "rt", encoding="UTF-8") as jsonFile:
# don't cache local manifest
return json.loads(jsonFile.read())
else:
with tempfile.TemporaryDirectory() as tmp:
if not GetFiles.getFile(url, tmp, "manifest.json", quiet=True):
# TODO: provide the error code and only cache 404...
self._jsonCache[url] = {}
return {}
with open(os.path.join(tmp, "manifest.json"), "rt", encoding="UTF-8") as jsonFile:
data = jsonFile.read()
self._jsonCache[url] = json.loads(data)
CraftCore.log.debug(f"cacheJsonFromUrl: {url}\n{data}")
return self._jsonCache.get(url, {})
def getNightlyVersionsFromUrl(self, url, pattern, timeout=10) -> [str]:
"""
Returns a list of possible version number matching the regular expression in pattern.
:param url: The url to look for the nightly builds.
:param pattern: A regular expression to match the version.
:param timeout:
:return: A list of matching strings or [None]
"""
if url not in self._nightlyVersions:
if CraftCore.settings.getboolean("General", "WorkOffline"):
CraftCore.debug.step("Nightly builds unavailable for %s in offline mode." % url)
return []
try:
with urllib.request.urlopen(url, timeout=timeout) as fh:
data = str(fh.read(), "UTF-8")
vers = re.findall(pattern, data)
if not vers:
print(data)
raise Exception("Pattern %s does not match." % pattern)
out = list(set(vers))
self._nightlyVersions[url] = out
CraftCore.log.debug(f"Found nightlies for {url}: {out}")
return out
except Exception as e:
CraftCore.log.warning("Nightly builds unavailable for %s: %s" % (url, e))
return self._nightlyVersions.get(url, [])
| 40.681614 | 128 | 0.585097 | 932 | 9,072 | 5.611588 | 0.262876 | 0.034417 | 0.035755 | 0.027533 | 0.07782 | 0.045698 | 0.03652 | 0.029446 | 0 | 0 | 0 | 0.003548 | 0.316578 | 9,072 | 222 | 129 | 40.864865 | 0.84 | 0.054343 | 0 | 0.183784 | 0 | 0 | 0.094304 | 0.006107 | 0 | 0 | 0 | 0.004505 | 0 | 1 | 0.075676 | false | 0 | 0.102703 | 0.005405 | 0.335135 | 0.010811 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
204c706394af50cdc13dca585672f3febe695893 | 8,425 | py | Python | prune/visualize.py | PaulLerner/Plumcot | 7e5a6fed1bc3d099a0644f8baba77304b952ca57 | [
"MIT"
] | 1 | 2021-06-18T13:42:20.000Z | 2021-06-18T13:42:20.000Z | prune/visualize.py | PaulLerner/Plumcot | 7e5a6fed1bc3d099a0644f8baba77304b952ca57 | [
"MIT"
] | 2 | 2020-09-01T09:45:17.000Z | 2020-09-22T14:32:01.000Z | prune/visualize.py | PaulLerner/Prune | 7e5a6fed1bc3d099a0644f8baba77304b952ca57 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# encoding: utf-8
"""Usage:
visualize.py gecko (<hypotheses_path>|<database.task.protocol>) <uri> [--map --tag_na --database.task.protocol=<database.task.protocol> --embeddings=<embeddings>]
visualize.py speakers (<hypotheses_path>|<database.task.protocol>) <uri>
visualize.py update_distances <json_path> <uri> <database.task.protocol>
visualize.py stats <database.task.protocol> [--set=<set> --filter_unk --crop=<crop> --hist --verbose]
visualize.py -h | --help
gecko options:
<hypotheses_path> Path to the hypotheses (rttm file) you want to convert to gecko-json
<database.task.protocol> Experimental protocol (e.g. "Etape.SpeakerDiarization.TV")
<uri> Uri of the hypothesis you want to convert to gecko-json
--embeddings=<embeddings> Path to precomputed embeddings
--database.task.protocol=<d.t.p> Experimental protocol (e.g. "Etape.SpeakerDiarization.TV")
--map Map hypothesis label with reference
--tag_na Tag not annotated parts of the hypothesis as "#not_annotated#"
Only available if annotated is provided
stats options:
<database.task.protocol> Experimental protocol (e.g. "Etape.SpeakerDiarization.TV")
"""
import os
from pathlib import Path
import json
from docopt import docopt
import matplotlib.pyplot as plt
from matplotlib.cm import get_cmap
import seaborn as sns
sns.set_style("whitegrid", {'axes.grid': False})
import re
import numpy as np
from pyannote.core import Annotation, Segment
from pyannote.audio.features import Precomputed
from pyannote.database.util import load_rttm
from pyannote.database import get_protocol, get_annotated
from pyannote.metrics.diarization import DiarizationErrorRate
import pyannote.database
from Plumcot import Plumcot
import Plumcot as PC
from prune.convert import *
from prune.features import *
from prune.utils import TIMESTAMP
DATA_PATH = Path(PC.__file__).parent / "data"
def color_gen():
cm = get_cmap('Set1')
while True:
x = np.random.rand()
r, g, b, alpha = cm(x, bytes=True)
color = f'#{r:02x}{g:02x}{b:02x}'
yield color
def update_distances(args):
"""Loads user annotation from json path, converts it to pyannote `Annotation`
using regions timings.
From the annotation uri and precomputed embeddings, it computes the
in-cluster distances between every speech turns
Dumps the updated (with correct distances) JSON file to a timestamped file.
"""
json_path = Path(args['<json_path>'])
uri = args['<uri>']
with open(json_path, 'r') as file:
gecko_json = json.load(file)
hypothesis, _, _, _ = gecko_JSON_to_Annotation(gecko_json, uri, 'speaker')
colors = get_colors(uri)
precomputed = Precomputed(embeddings)
protocol = args['<database.task.protocol>']
protocol = get_protocol(protocol)
for reference in getattr(protocol, 'test')():
if reference['uri'] == uri:
features = precomputed(reference)
break
distances_per_speaker = get_distances_per_speaker(features, hypothesis)
gecko_json = annotation_to_GeckoJSON(hypothesis, distances_per_speaker, colors)
name = f"{json_path.stem}.{TIMESTAMP}.json"
updated_path = Path(json_path.parent, name)
with open(updated_path, 'w') as file:
json.dump(gecko_json, file)
print(f"succefully dumped {updated_path}")
def get_colors(uri):
db = Plumcot()
serie_uri = uri.split(".")[0]
if serie_uri not in db.get_protocols("Collection"):
# non PLUMCOT -> non-persistent colors for now
return {}
colors_dir = Path(DATA_PATH, serie_uri, 'colors')
colors_dir.mkdir(exist_ok=True)
colors_path = Path(colors_dir, f'{uri}.json')
if colors_path.exists():
with open(colors_path, "r") as file:
colors = json.load(file)
return colors
# else: extract from gecko_json or generate with matplotlib
fa = Path(DATA_PATH, serie_uri, 'forced-alignment')
# get manual annotation if exists else falls back to raw forced-alignment
annotation_json = Path(fa, f"{uri}.manual.json") if Path(fa,
f"{uri}.manual.json").exists() else Path(
fa, f"{uri}.json")
colors = {}
if annotation_json.exists():
# get colors
with open(annotation_json, 'r') as file:
annotation_json = json.load(file)
for monologue in annotation_json["monologues"]:
if not isinstance(monologue, dict):
continue
color = monologue["speaker"].get("color", next(color_gen()))
colors[monologue["speaker"]["id"]] = color
else: # no annotation -> falls back to character list
characters = db.get_characters(serie_uri)[uri]
colors = {character: next(color_gen()) for character in characters}
with open(colors_path, 'w') as file:
json.dump(colors, file)
return colors
def get_file(protocol, uri, embeddings=None):
for reference in protocol.files():
if reference['uri'] == uri:
if embeddings:
precomputed = Precomputed(embeddings)
features = precomputed(reference)
return reference, features
return reference
raise ValueError(f'{uri} is not in {protocol}')
def na():
while True:
yield "#not_annotated#"
def gecko(args):
hypotheses_path = args['<hypotheses_path>']
uri = args['<uri>']
colors = get_colors(uri)
distances = {}
if Path(hypotheses_path).exists():
hypotheses = load_rttm(hypotheses_path)
hypothesis = hypotheses[uri]
else: # protocol
protocol = get_protocol(args['<hypotheses_path>'])
reference = get_file(protocol, uri)
hypothesis = reference['annotation']
annotated = get_annotated(reference)
hypotheses_path = Path(hypotheses_path)
protocol = args['--database.task.protocol']
features = None
if protocol:
protocol = get_protocol(protocol)
embeddings = args['--embeddings']
reference, features = get_file(protocol, uri, embeddings=embeddings)
if args['--map']:
print(f"mapping {uri} with {protocol}")
diarizationErrorRate = DiarizationErrorRate()
annotated = get_annotated(reference)
optimal_mapping = diarizationErrorRate.optimal_mapping(
reference['annotation'], hypothesis, annotated)
hypothesis = hypothesis.rename_labels(mapping=optimal_mapping)
hypothesis = update_labels(hypothesis, distances) # tag unsure clusters
distances_per_speaker = get_distances_per_speaker(features,
hypothesis) if features else {}
if args['--tag_na']:
whole_file = Segment(0., annotated.segments_boundaries_[-1])
not_annotated = annotated.gaps(whole_file).to_annotation(na())
hypothesis = hypothesis.crop(annotated).update(not_annotated)
gecko_json = annotation_to_GeckoJSON(hypothesis, distances_per_speaker, colors)
if hypotheses_path.exists():
dir_path = hypotheses_path.parent
else:
dir_path = Path(".")
json_path = os.path.join(dir_path, f'{uri}.json')
with open(json_path, 'w') as file:
json.dump(gecko_json, file)
print(f"succefully dumped {json_path}")
def speakers(args):
hypotheses_path = args['<hypotheses_path>']
uri = args['<uri>']
if Path(hypotheses_path).exists():
hypotheses = load_rttm(hypotheses_path)
hypothesis = hypotheses[uri]
else: # protocol
distances = {}
protocol = get_protocol(args['<hypotheses_path>'])
reference = get_file(protocol, uri)
hypothesis = reference['annotation']
annotated = get_annotated(reference)
print(uri)
print(f"Number of speakers: {len(hypothesis.labels())}")
print(f"Chart:\n{hypothesis.chart()}")
if __name__ == '__main__':
args = docopt(__doc__)
if args['gecko']:
gecko(args)
if args['speakers']:
speakers(args)
if args['update_distances']:
update_distances(args)
if args['stats']:
from .stats import main as stats
stats(args)
| 36.158798 | 165 | 0.651395 | 998 | 8,425 | 5.349699 | 0.217435 | 0.044578 | 0.041206 | 0.013486 | 0.275332 | 0.233752 | 0.208841 | 0.189923 | 0.189923 | 0.15059 | 0 | 0.001717 | 0.239644 | 8,425 | 232 | 166 | 36.314655 | 0.83172 | 0.227418 | 0 | 0.271605 | 0 | 0 | 0.104685 | 0.024277 | 0 | 0 | 0 | 0 | 0 | 1 | 0.04321 | false | 0 | 0.12963 | 0 | 0.203704 | 0.037037 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
204d1e3c250e8ba9939e2ae7da3513ab0b097992 | 16,003 | py | Python | hopper_controller/src/ros_abstraction/leg_controller.py | CreedyNZ/Hopper_ROS | 1e6354109f034a7d1d41a5b39ddcb632cfee64b2 | [
"MIT"
] | 36 | 2018-12-19T18:03:08.000Z | 2022-02-21T16:20:12.000Z | hopper_controller/src/ros_abstraction/leg_controller.py | CreedyNZ/Hopper_ROS | 1e6354109f034a7d1d41a5b39ddcb632cfee64b2 | [
"MIT"
] | null | null | null | hopper_controller/src/ros_abstraction/leg_controller.py | CreedyNZ/Hopper_ROS | 1e6354109f034a7d1d41a5b39ddcb632cfee64b2 | [
"MIT"
] | 7 | 2019-08-11T20:31:27.000Z | 2021-09-19T04:34:18.000Z | from __future__ import division
import rospy
import tf2_ros
from threading import Event
from Queue import Queue, Empty
from hopper_controller.srv import MoveLegsToPosition, MoveCoreToPosition, MoveLegsUntilCollision, MoveLegsToRelativePosition, MoveBodyRelative, ReadCurrentLegPositions, ReadCurrentLegPositionsResponse
from std_srvs.srv import Empty, EmptyResponse
from visualization_msgs.msg import Marker
from hexapod.hexapod_ik_driver import LegPositions, Vector3, LegFlags
from hopper_feet_sensors.msg import FeetSensorData
from pyquaternion import Quaternion
class LegController(object):
def __init__(self, gait_engine):
super(LegController, self).__init__()
self.gait_engine = gait_engine
self.motion_queue = Queue()
self.tf_buffer = tf2_ros.Buffer()
self.tf_listener = tf2_ros.TransformListener(self.tf_buffer)
self.last_feet_msg = FeetSensorData()
self.marker_publisher = rospy.Publisher("leg_move_marker", Marker, queue_size=10)
# Subscribers
rospy.Subscriber("hopper/feet", FeetSensorData, self.on_feet_msg, queue_size=10)
rospy.Service('hopper/move_limbs_individual', MoveLegsToPosition, self.move_legs)
rospy.Service('hopper/move_body_core', MoveCoreToPosition, self.move_body)
rospy.Service('hopper/move_legs_until_collision', MoveLegsUntilCollision, self.move_until_hit)
rospy.Service('hopper/move_to_relaxed', Empty, self.move_to_relaxed)
rospy.Service('hopper/move_legs_to_relative_position', MoveLegsToRelativePosition, self.move_legs_relative)
rospy.Service('hopper/move_legs_to_relative_position_until_hit', MoveLegsToRelativePosition, self.move_legs_relative_until_hit)
rospy.Service('hopper/move_body_relative', MoveBodyRelative, self.move_body_relative)
rospy.Service('hopper/read_current_leg_positions', ReadCurrentLegPositions, self.read_current_leg_positions)
def on_feet_msg(self, feet_msg):
self.last_feet_msg = feet_msg
def move_legs(self, move_legs_cmd):
local_frame = "base_link"
command_frame = move_legs_cmd.header.frame_id
ros_transform = self.tf_buffer.lookup_transform(local_frame, command_frame, rospy.Time()).transform
frame_translation_ros, frame_rotation_ros = ros_transform.translation, ros_transform.rotation
frame_rotation = Quaternion(frame_rotation_ros.w, frame_rotation_ros.x, frame_rotation_ros.y, frame_rotation_ros.z)
frame_translation = Vector3.ros_vector3_to_overload_vector(frame_translation_ros)
move_legs_overloaded = LegPositions.ros_leg_positions_to_leg_positions(move_legs_cmd.leg_positions)
new_positions = LegPositions(
(move_legs_overloaded.left_front * frame_rotation + frame_translation) * 100.0
,(move_legs_overloaded.right_front * frame_rotation + frame_translation) * 100.0
,(move_legs_overloaded.left_middle * frame_rotation + frame_translation) * 100.0
,(move_legs_overloaded.right_middle * frame_rotation + frame_translation) * 100.0
,(move_legs_overloaded.left_rear * frame_rotation + frame_translation) * 100.0
,(move_legs_overloaded.right_rear * frame_rotation + frame_translation) * 100.0
)
current_positions = self.gait_engine.get_current_leg_positions()
desired_position = current_positions.update_from_other(new_positions, LegFlags(move_legs_cmd.selected_legs))
task_finished_event = Event()
self.motion_queue.put((task_finished_event, desired_position))
# debug marker
# self.display_marker(desired_position.left_front.x / 100, desired_position.left_front.y / 100, desired_position.left_front.z / 100)
task_finished_event.wait()
return True
def move_body(self, move_legs_cmd):
local_frame = "base_link"
command_frame = move_legs_cmd.header.frame_id
ros_transform = self.tf_buffer.lookup_transform(local_frame, command_frame, rospy.Time()).transform
frame_translation_ros, frame_rotation_ros = ros_transform.translation, ros_transform.rotation
frame_rotation = Quaternion(frame_rotation_ros.w, frame_rotation_ros.x, frame_rotation_ros.y, frame_rotation_ros.z)
frame_translation = Vector3.ros_vector3_to_overload_vector(frame_translation_ros)
move_vector_overload = (-Vector3.ros_vector3_to_overload_vector(move_legs_cmd.core_movement) * frame_rotation + frame_translation) * 100.0
current_positions = self.gait_engine.get_current_leg_positions()
new_positions = current_positions.transform(move_vector_overload, LegFlags(move_legs_cmd.used_legs))
task_finished_event = Event()
self.motion_queue.put((task_finished_event, new_positions))
task_finished_event.wait()
return True
def move_until_hit(self, move_legs_cmd):
local_frame = "base_link"
command_frame = move_legs_cmd.header.frame_id
ros_transform = self.tf_buffer.lookup_transform(local_frame, command_frame, rospy.Time()).transform
frame_translation_ros, frame_rotation_ros = ros_transform.translation, ros_transform.rotation
frame_rotation = Quaternion(frame_rotation_ros.w, frame_rotation_ros.x, frame_rotation_ros.y, frame_rotation_ros.z)
frame_translation = Vector3.ros_vector3_to_overload_vector(frame_translation_ros)
move_legs_overloaded = LegPositions.ros_leg_positions_to_leg_positions(move_legs_cmd.leg_positions)
new_positions = LegPositions(
(move_legs_overloaded.left_front * frame_rotation + frame_translation) * 100.0
,(move_legs_overloaded.right_front * frame_rotation + frame_translation) * 100.0
,(move_legs_overloaded.left_middle * frame_rotation + frame_translation) * 100.0
,(move_legs_overloaded.right_middle * frame_rotation + frame_translation) * 100.0
,(move_legs_overloaded.left_rear * frame_rotation + frame_translation) * 100.0
,(move_legs_overloaded.right_rear * frame_rotation + frame_translation) * 100.0
)
current_positions = self.gait_engine.get_current_leg_positions()
desired_position = current_positions.update_from_other(new_positions, LegFlags(move_legs_cmd.selected_legs))
# move check loop
move_done = False
move_dist = 0.5 # distance to move with each step in cm
colliding_legs = LegFlags.NONE
midstep_positions = current_positions.clone()
while not move_done:
still_moving = False
if not self.last_feet_msg.left_front:
still_moving = still_moving or midstep_positions.left_front.move_towards_at_speed(desired_position.left_front, move_dist)
else:
colliding_legs |= LegFlags.LEFT_FRONT
if not self.last_feet_msg.right_front:
still_moving = still_moving or midstep_positions.right_front.move_towards_at_speed(desired_position.right_front, move_dist)
else:
colliding_legs |= LegFlags.RIGHT_FRONT
if not self.last_feet_msg.left_middle:
still_moving = still_moving or midstep_positions.left_middle.move_towards_at_speed(desired_position.left_middle, move_dist)
else:
colliding_legs |= LegFlags.LEFT_MIDDLE
if not self.last_feet_msg.right_middle:
still_moving = still_moving or midstep_positions.right_middle.move_towards_at_speed(desired_position.right_middle, move_dist)
else:
colliding_legs |= LegFlags.RIGHT_MIDDLE
if not self.last_feet_msg.left_rear:
still_moving = still_moving or midstep_positions.left_rear.move_towards_at_speed(desired_position.left_rear, move_dist)
else:
colliding_legs |= LegFlags.LEFT_REAR
if not self.last_feet_msg.right_rear:
still_moving = still_moving or midstep_positions.right_rear.move_towards_at_speed(desired_position.right_rear, move_dist)
else:
colliding_legs |= LegFlags.RIGHT_REAR
if still_moving:
task_finished_event = Event()
self.motion_queue.put((task_finished_event, midstep_positions))
task_finished_event.wait()
else:
move_done = True
return int(colliding_legs), midstep_positions
def move_to_relaxed(self, srvs_request):
relaxed_pose = self.gait_engine.get_relaxed_pose()
task_finished_event = Event()
self.motion_queue.put((task_finished_event, relaxed_pose))
task_finished_event.wait()
return EmptyResponse()
def get_transform_for_link(self, from_frame_id, to_frame_id):
ros_transform = self.tf_buffer.lookup_transform(from_frame_id, to_frame_id, rospy.Time()).transform
frame_translation_ros, frame_rotation_ros = ros_transform.translation, ros_transform.rotation
frame_rotation = Quaternion(frame_rotation_ros.w, frame_rotation_ros.x, frame_rotation_ros.y, frame_rotation_ros.z)
frame_translation = Vector3.ros_vector3_to_overload_vector(frame_translation_ros)
return frame_translation, frame_rotation
def move_legs_relative(self, srvs_request):
current_positions = self.gait_engine.get_current_leg_positions() / 100.0 # convert to meters
target_positions = srvs_request.leg_positions
# for each leg
# left front
def position_for_foot(relative_vector, current_position):
relative_vector_overload = Vector3.ros_vector3_to_overload_vector(relative_vector)
return (current_position + relative_vector_overload) * 100.0
current_positions.left_front = position_for_foot(target_positions.left_front, current_positions.left_front)
current_positions.right_front = position_for_foot(target_positions.right_front, current_positions.right_front)
current_positions.left_middle = position_for_foot(target_positions.left_middle, current_positions.left_middle)
current_positions.right_middle = position_for_foot(target_positions.right_middle, current_positions.right_middle)
current_positions.left_rear = position_for_foot(target_positions.left_rear, current_positions.left_rear)
current_positions.right_rear = position_for_foot(target_positions.right_rear, current_positions.right_rear)
task_finished_event = Event()
self.motion_queue.put((task_finished_event, current_positions))
task_finished_event.wait()
return True
def move_legs_relative_until_hit(self, srvs_request):
current_positions = self.gait_engine.get_current_leg_positions()
desired_positions = current_positions.clone()
# for each leg
# left front
def position_for_foot(relative_vector, current_position):
relative_vector_overload = Vector3.ros_vector3_to_overload_vector(relative_vector) * 100.0
return (current_position + relative_vector_overload)
desired_positions.left_front = position_for_foot(srvs_request.leg_positions.left_front, desired_positions.left_front)
desired_positions.right_front = position_for_foot(srvs_request.leg_positions.right_front, desired_positions.right_front)
desired_positions.left_middle = position_for_foot(srvs_request.leg_positions.left_middle, desired_positions.left_middle)
desired_positions.right_middle = position_for_foot(srvs_request.leg_positions.right_middle, desired_positions.right_middle)
desired_positions.left_rear = position_for_foot(srvs_request.leg_positions.left_rear, desired_positions.left_rear)
desired_positions.right_rear = position_for_foot(srvs_request.leg_positions.right_rear, desired_positions.right_rear)
move_done = False
move_dist = 0.5 # distance to move with each step in cm
colliding_legs = LegFlags.NONE
midstep_positions = current_positions.clone()
while not move_done:
still_moving = False
if not self.last_feet_msg.left_front:
still_moving = still_moving or midstep_positions.left_front.move_towards_at_speed(desired_positions.left_front, move_dist)
else:
colliding_legs |= LegFlags.LEFT_FRONT
if not self.last_feet_msg.right_front:
still_moving = still_moving or midstep_positions.right_front.move_towards_at_speed(desired_positions.right_front, move_dist)
else:
colliding_legs |= LegFlags.RIGHT_FRONT
if not self.last_feet_msg.left_middle:
still_moving = still_moving or midstep_positions.left_middle.move_towards_at_speed(desired_positions.left_middle, move_dist)
else:
colliding_legs |= LegFlags.LEFT_MIDDLE
if not self.last_feet_msg.right_middle:
still_moving = still_moving or midstep_positions.right_middle.move_towards_at_speed(desired_positions.right_middle, move_dist)
else:
colliding_legs |= LegFlags.RIGHT_MIDDLE
if not self.last_feet_msg.left_rear:
still_moving = still_moving or midstep_positions.left_rear.move_towards_at_speed(desired_positions.left_rear, move_dist)
else:
colliding_legs |= LegFlags.LEFT_REAR
if not self.last_feet_msg.right_rear:
still_moving = still_moving or midstep_positions.right_rear.move_towards_at_speed(desired_positions.right_rear, move_dist)
else:
colliding_legs |= LegFlags.RIGHT_REAR
if still_moving:
task_finished_event = Event()
self.motion_queue.put((task_finished_event, midstep_positions))
task_finished_event.wait()
else:
move_done = True
return True
def move_body_relative(self, request):
current_positions = self.gait_engine.get_current_leg_positions()
corrected_rotation = Vector3.ros_vector3_to_overload_vector(request.rotation).rad_to_degree()
relative_vector_overload = Vector3.ros_vector3_to_overload_vector(request.translation) * 100.0
desired_position = (current_positions - relative_vector_overload).rotate(corrected_rotation)
task_finished_event = Event()
self.motion_queue.put((task_finished_event, desired_position))
task_finished_event.wait()
return True
def read_current_leg_positions(self, request):
current_positions = self.gait_engine.get_current_leg_positions() / 100.0
transform, rotation = self.get_transform_for_link(request.header.frame_id, "base_link")
current_positions = current_positions * rotation + transform
# response
response = ReadCurrentLegPositionsResponse()
response.leg_positions = current_positions
return response
def execute_motion(self):
try:
event, motion = self.motion_queue.get_nowait()
self.gait_engine.move_to_new_pose(motion, 22)
event.set()
except Empty:
rospy.logerr("Motion queue was empty")
def is_motion_queued(self):
return not self.motion_queue.empty()
def display_marker(self, x, y, z):
marker = Marker()
marker.header.frame_id = "base_link"
marker.header.stamp = rospy.Time()
marker.type = Marker.SPHERE
marker.action = Marker.ADD
marker.pose.orientation.w = 1.
marker.pose.position.x = x
marker.pose.position.y = y
marker.pose.position.z = z
marker.scale.x = 0.1
marker.scale.y = 0.1
marker.scale.z = 0.1
marker.color.a = 1.0
marker.color.r = 0.0
marker.color.g = 1.0
marker.color.b = 0.0
marker.lifetime = rospy.Duration(0)
marker.frame_locked = True
self.marker_publisher.publish(marker)
| 57.981884 | 200 | 0.727551 | 1,992 | 16,003 | 5.421687 | 0.093876 | 0.045741 | 0.033056 | 0.019444 | 0.758241 | 0.68713 | 0.633611 | 0.616667 | 0.574259 | 0.556204 | 0 | 0.010118 | 0.203274 | 16,003 | 275 | 201 | 58.192727 | 0.836941 | 0.020121 | 0 | 0.512295 | 0 | 0 | 0.021573 | 0.015637 | 0 | 0 | 0 | 0 | 0 | 1 | 0.065574 | false | 0 | 0.045082 | 0.004098 | 0.163934 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
204fbf91dd0345c4f0a3d33b337de07d901d56be | 1,004 | py | Python | featureExtractor/graph_features.py | MatrixBlake/AuthorProfilingAbuseDetection | 0abd109a23a52cfe4b8cfc65aac08eb9762705f6 | [
"MIT"
] | 10 | 2018-06-11T05:57:39.000Z | 2021-10-04T15:11:25.000Z | featureExtractor/graph_features.py | MatrixBlake/AuthorProfilingAbuseDetection | 0abd109a23a52cfe4b8cfc65aac08eb9762705f6 | [
"MIT"
] | 1 | 2020-12-06T13:05:35.000Z | 2021-02-10T08:01:13.000Z | featureExtractor/graph_features.py | MatrixBlake/AuthorProfilingAbuseDetection | 0abd109a23a52cfe4b8cfc65aac08eb9762705f6 | [
"MIT"
] | 7 | 2019-05-22T04:09:55.000Z | 2021-10-30T11:50:37.000Z | import numpy
import os
class GraphFeatures:
def __init__(self, CONFIG):
self.BASE = CONFIG['BASE']
self.EMBED_DIM = 200
self.authors = {}
with open(os.path.join(self.BASE, 'resources', 'authors.txt')) as authors:
for line in authors.readlines():
text_id, author_id = line.strip().split()
self.authors[text_id] = author_id
self.embeddings = {}
with open(os.path.join(self.BASE, 'resources', 'authors.emb')) as embeds:
for line in embeds.readlines():
tokens = line.strip().split()
author_id = tokens[0]
embed = [float(x) for x in tokens[1:]]
self.embeddings[author_id] = numpy.array(embed)
def extract(self, text_id):
author_id = self.authors.get(text_id, None)
if author_id is None:
return numpy.zeros(self.EMBED_DIM)
return self.embeddings.get(author_id, numpy.zeros(self.EMBED_DIM))
| 31.375 | 82 | 0.581673 | 127 | 1,004 | 4.456693 | 0.362205 | 0.09894 | 0.063604 | 0.074205 | 0.289753 | 0.14841 | 0.14841 | 0.14841 | 0.14841 | 0 | 0 | 0.007102 | 0.298805 | 1,004 | 31 | 83 | 32.387097 | 0.796875 | 0 | 0 | 0 | 0 | 0 | 0.043825 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.086957 | false | 0 | 0.086957 | 0 | 0.304348 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2050c46574beaa8d6730b8207eba67b732239e3d | 4,314 | py | Python | oase-root/libs/backyardlibs/monitoring_adapter/Grafana/Grafana_formatting.py | Masa-Yasuno/oase | 90f3cee73c0d9b3153808a4a72bd19984a4873f9 | [
"Apache-2.0"
] | 9 | 2020-03-25T07:51:47.000Z | 2022-02-07T00:07:28.000Z | oase-root/libs/backyardlibs/monitoring_adapter/Grafana/Grafana_formatting.py | Masa-Yasuno/oase | 90f3cee73c0d9b3153808a4a72bd19984a4873f9 | [
"Apache-2.0"
] | 1,164 | 2021-01-28T23:16:11.000Z | 2022-03-28T07:23:10.000Z | oase-root/libs/backyardlibs/monitoring_adapter/Grafana/Grafana_formatting.py | Masa-Yasuno/oase | 90f3cee73c0d9b3153808a4a72bd19984a4873f9 | [
"Apache-2.0"
] | 25 | 2020-03-17T06:48:30.000Z | 2022-02-15T15:13:44.000Z | # Copyright 2019 NEC Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
[概要]
Grafanaメッセージ整形処理
[引数]
HTTPリクエスト
[戻り値]
HTTPレスポンス
"""
import traceback
import datetime
import pytz
from datetime import datetime, timezone, timedelta
from django.conf import settings
from web_app.models.models import RuleType
from web_app.models.Grafana_monitoring_models import GrafanaMatchInfo
from libs.commonlibs import define as defs
from libs.commonlibs.oase_logger import OaseLogger
logger = OaseLogger.get_instance()
################################################
# メッセージを整形する
################################################
def message_formatting(grafana_message, rule_type_id, grafana_adapter_id):
"""
[メソッド概要]
一括用に取得データを整形する
"""
logger.logic_log('LOSI00001', 'grafana_message: %s, rule_type_id: %s, grafana_adapter_id: %s' % (len(grafana_message), rule_type_id, grafana_adapter_id))
result = True
form_data = {}
request_data_list = []
ruletypename = ''
try:
# データの有無確認
if len(grafana_message) <= 0 or rule_type_id is None or grafana_adapter_id is None:
logger.system_log('LOSM30027', 'Grafana', len(grafana_message), rule_type_id, grafana_adapter_id)
result = False
raise
# ルール種別名称取得
ruletypename = RuleType.objects.get(pk=rule_type_id, disuse_flag=str(defs.ENABLE)).rule_type_name
# grafana_message内のresultをループ
for data_dic in grafana_message:
# データの不整合があった場合はそのデータを無視する
if result == False:
continue
for i, d in enumerate(data_dic['evinfo']):
if isinstance(d, str):
d = d.replace('\n', '\\n')
data_dic['evinfo'][i] = d
request_data = { 'decisiontable' : ruletypename,
'requesttype' : '1',
'eventdatetime' : '',
'eventinfo' : data_dic['evinfo'],
}
# eventdatetimeの取得 lastchangeを2019/12/25 00:00:00の形式に変換する
request_data['eventdatetime'] = datetime.fromtimestamp(
int(data_dic['evtime']),
pytz.timezone(getattr(settings, 'TIME_ZONE'))
).strftime("%Y/%m/%d %H:%M:%S")
request_data_list.append(request_data)
form_data['request'] = request_data_list
except RuleType.DoesNotExist as e:
result = False
logger.system_log('LOSM30012', rule_type_id)
logger.logic_log('LOSM00001', 'rule_type_id: %s, Traceback: %s' % (rule_type_id, traceback.format_exc()))
except Exception as e:
if result:
result = False
logger.system_log('LOSM30013')
logger.logic_log('LOSM00001', 'e: %s, Traceback: %s' % (e, traceback.format_exc()))
logger.logic_log('LOSI00002', 'result: %s' % (result))
return result, form_data
################################################
# リクエスト用データへ整形
################################################
def formatting_eventinfo(key_list, data_dic, eventinfo):
"""
[メソッド概要]
リクエスト用にデータを整形する
[引数]
key_list : 設定されているgrafana項目
data_dic : 取得してきたgrafanaのメッセージ
eventinfo : リクエスト用イベントデータ
[戻り値]
True : 整形成功
False: 整形失敗
"""
hosts_list = []
# リクエスト用データへ整形開始
for grafana_key in key_list:
# Grafana項目名が存在したら配列に追加
if grafana_key in data_dic and data_dic[grafana_key] != None:
eventinfo.append(data_dic[grafana_key])
# grafana_response_keyとeventinfoの数が合わなかったらデータ作成終了
if len(key_list) != len(eventinfo):
logger.system_log('LOSM30014', len(eventinfo), len(key_list))
return False
return True
| 28.95302 | 157 | 0.607557 | 473 | 4,314 | 5.348837 | 0.401691 | 0.031621 | 0.035573 | 0.026087 | 0.075099 | 0.054545 | 0.054545 | 0.054545 | 0.038735 | 0.038735 | 0 | 0.019988 | 0.257765 | 4,314 | 148 | 158 | 29.148649 | 0.770144 | 0.241076 | 0 | 0.05 | 0 | 0 | 0.108208 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.033333 | false | 0 | 0.15 | 0 | 0.233333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
205195453564dfb12dd43dd630b3ac86bfdd8b65 | 6,077 | py | Python | flask_occam/mixins.py | bprinty/Flask-Occam | 4ceae0d810656c187c37eaf78ef323b7d06c6b51 | [
"MIT"
] | 2 | 2020-04-25T12:40:39.000Z | 2021-06-03T08:21:17.000Z | flask_occam/mixins.py | bprinty/Flask-Occam | 4ceae0d810656c187c37eaf78ef323b7d06c6b51 | [
"MIT"
] | 5 | 2019-10-25T04:40:32.000Z | 2021-05-26T15:07:40.000Z | flask_occam/mixins.py | bprinty/Flask-Occam | 4ceae0d810656c187c37eaf78ef323b7d06c6b51 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
#
# Database mixins
#
# ------------------------------------------------
# imports
# -------
from flask import current_app
# helpers
# -------
def current_db():
return current_app.extensions['sqlalchemy'].db
# mixins
# ------
class ModelMixin(object):
"""
Example database mixin to be used in extension.
"""
def __repr__(self):
display = self.name if hasattr(self, 'name') else self.id
return '<{}({})>'.format(self.__class__.__name__, display)
def json(self):
"""
Return dictionary with model properties. This
method should be overriden by models to account
for model-specific nuances in what to include
in return payloads.
"""
from sqlalchemy import inspect
result = {}
mapper = inspect(self.__class__)
for column in mapper.attrs:
result[column.key] = getattr(self, column.key)
return result
def commit(self):
"""
Commit change using session and return item.
"""
db = current_db()
db.session.commit()
return self
@classmethod
def get(cls, *args, **filters):
"""
Get single item using filter_by query.
"""
if len(args) == 1 and len(filters) == 0:
filters['id'] = args[0]
return cls.query.filter_by(**filters).first()
def update(self, *args, **kwargs):
"""
Update current item with specified data.
"""
# normalize inputs
if len(args) == 1 and isinstance(args[0], dict):
kwargs.update(args[0])
# use init method to include param parsing
obj = self.__class__(**kwargs)
# set params
for key in kwargs:
if hasattr(self, key):
setattr(self, key, getattr(obj, key))
del obj
db = current_db()
db.session.flush()
return self
@classmethod
def create(cls, *args, **kwargs):
"""
Create new record using specified arguments.
"""
self = cls(*args, **kwargs)
db = current_db()
db.session.add(self)
db.session.flush()
return self
def delete(self):
"""
Delete current model object.
"""
db = current_db()
db.session.delete(self)
db.session.flush()
return
@classmethod
def all(cls, limit=None, offset=0):
"""
Return all data with specified limit and offset
"""
return cls.find(limit=limit, offset=offset)
@classmethod
def find(cls, limit=None, offset=0, **filters):
"""
Search database with specified limit, offset,
and filter criteria.
"""
query = cls.query.filter_by(**filters).offset(offset)
if limit is not None:
query = query.limit(limit)
return query.all()
@classmethod
def count(cls):
"""
Return total number of items in database.
"""
return cls.query.count()
@classmethod
def upsert(cls, *args, **kwargs):
"""
Upsert specified data into database. If the data
doesn't exist in the database, it will be created,
otherwise, the record will be updated. This method
automatically detects unique keys by which to query
the database for existing records.
.. note:: The performance of this could be improved
by doing bulk operations for querying and
the create/update process.
"""
from sqlalchemy import inspect
# parse inputs
data, multiple = [], True
if len(kwargs):
data.append(kwargs)
multiple = False
elif len(args) == 1 and isinstance(args[0], (list, tuple)):
data = args[0]
else:
data = args
# gather unique columns for querying existing data
unique = []
mapper = inspect(cls)
for col in mapper.attrs:
if hasattr(col, 'columns'):
if col.columns[0].unique or col.columns[0].primary_key:
unique.append(col.key)
# query for data and create or update
result = []
for record in data:
# query using unique parameters
params = {k: record[k] for k in unique if k in record}
item = cls.get(**params) if len(params) else None
# update if item exists
if item is not None:
item.update(**record)
# create if it doesn't
else:
item = cls.create(**record)
result.append(item)
return result if multiple else result[0]
@classmethod
def load(cls, data, action=None):
"""
Helper for loading data into application via config file. Using
the following model definition as an example:
.. code-block:: python
class Item(db.Model):
__tablename__ = 'item'
# basic
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(255), nullable=False, unique=True, index=True)
archived = db.Column(db.Boolean, default=False)
You can seed data from the following config file:
.. code-block:: yaml
- name: item 1
archived: True
- name: item 2
archived: False
Into the application using:
.. code-block:: python
# via model directly
User.seed('config.yml')
# via db
db.seed.users('config.yml')
Arguments:
data (str): File handle or path to config file.
action (callable): Function to call on each loaded item.
Takes single created item as input.
"""
db = current_db()
loader = getattr(db.load, cls.__table__.name)
return loader(data=data, action=action)
| 27.008889 | 89 | 0.53974 | 684 | 6,077 | 4.73538 | 0.302632 | 0.030256 | 0.016981 | 0.016054 | 0.092004 | 0.016054 | 0.016054 | 0 | 0 | 0 | 0 | 0.005093 | 0.353793 | 6,077 | 224 | 90 | 27.129464 | 0.81971 | 0.363666 | 0 | 0.244444 | 0 | 0 | 0.009397 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.144444 | false | 0 | 0.033333 | 0.011111 | 0.333333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2053a68db0bd72f652dee47d009977bcd39210e3 | 2,577 | py | Python | ephios/core/signals.py | garinm90/ephios | 7d04d3287ae16ee332e31add1f25829b199f29a5 | [
"MIT"
] | null | null | null | ephios/core/signals.py | garinm90/ephios | 7d04d3287ae16ee332e31add1f25829b199f29a5 | [
"MIT"
] | null | null | null | ephios/core/signals.py | garinm90/ephios | 7d04d3287ae16ee332e31add1f25829b199f29a5 | [
"MIT"
] | null | null | null | from django.db.models.signals import post_save
from django.dispatch import receiver
from ephios.core import mail
from ephios.core.models import LocalParticipation
from ephios.core.plugins import PluginSignal
# PluginSignals are only send out to enabled plugins.
register_consequence_handlers = PluginSignal()
"""
This signal is sent out to get all known consequence handlers. Receivers should return a list of
subclasses of ``ephios.core.consequences.BaseConsequenceHandler``.
"""
register_signup_methods = PluginSignal()
"""
This signal is sent out to get all known signup methods. Receivers should return a list of
subclasses of ``ephios.core.signup.methods.BaseSignupMethod``.
"""
footer_link = PluginSignal()
"""
This signal is sent out to get links for that page footer. Receivers should return a dict of
with keys being the text and values being the url to link to.
Receivers will receive a ``request`` keyword argument.
"""
administration_settings_section = PluginSignal()
"""
This signal is sent out to get sections for administration settings. Receivers should return a list of dicts
containing key-value-pairs for 'label', 'url' and a boolean flag 'active'.
Receivers will receive a ``request`` keyword argument.
"""
participant_from_request = PluginSignal()
"""
This signal is sent out to get a participant from a request with an unauthenticated user.
Return a subclass of AbstractParticipant or None if you cannot provide a participant.
The first non-None return-value will be used.
Receivers will receive a ``request`` keyword argument.
"""
event_forms = PluginSignal()
"""
This signal is sent out to get a list of form instances to show on the event create and update views.
You receive an `event` and `request` keyword arg you should use to create an instance of your form.
Subclass `BaseEventPluginForm` to customize the rendering behavior.
If all forms are valid, `save` will be called on your form.
"""
@receiver(
register_consequence_handlers,
dispatch_uid="ephios.core.signals.register_base_consequence_handlers",
)
def register_base_consequence_handlers(sender, **kwargs):
from ephios.core.consequences import (
QualificationConsequenceHandler,
WorkingHoursConsequenceHandler,
)
return [WorkingHoursConsequenceHandler, QualificationConsequenceHandler]
@receiver(
post_save,
sender=LocalParticipation,
dispatch_uid="ephios.core.signals.send_participation_state_changed_mail",
)
def send_participation_state_changed_mail(sender, instance, **kwargs):
mail.participation_state_changed(instance)
| 35.30137 | 108 | 0.788902 | 344 | 2,577 | 5.81686 | 0.357558 | 0.03998 | 0.065967 | 0.071964 | 0.306347 | 0.245377 | 0.231384 | 0.166917 | 0.130935 | 0.093953 | 0 | 0 | 0.141638 | 2,577 | 72 | 109 | 35.791667 | 0.904611 | 0.01979 | 0 | 0.071429 | 0 | 0 | 0.100181 | 0.100181 | 0 | 0 | 0 | 0 | 0 | 1 | 0.071429 | false | 0 | 0.214286 | 0 | 0.321429 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
20556093e3cbd97c50d9535880c7159834951ef0 | 2,116 | py | Python | byceps/services/shop/order/actions/create_ticket_bundles.py | GyBraLAN/byceps | b53087849c10a531b66d08999116fa1bef312a7f | [
"BSD-3-Clause"
] | null | null | null | byceps/services/shop/order/actions/create_ticket_bundles.py | GyBraLAN/byceps | b53087849c10a531b66d08999116fa1bef312a7f | [
"BSD-3-Clause"
] | null | null | null | byceps/services/shop/order/actions/create_ticket_bundles.py | GyBraLAN/byceps | b53087849c10a531b66d08999116fa1bef312a7f | [
"BSD-3-Clause"
] | null | null | null | """
byceps.services.shop.order.actions.create_ticket_bundles
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:Copyright: 2006-2021 Jochen Kupperschmidt
:License: Revised BSD (see `LICENSE` file for details)
"""
from .....typing import UserID
from ....ticketing.dbmodels.ticket_bundle import TicketBundle
from ....ticketing import (
category_service as ticket_category_service,
ticket_bundle_service,
)
from ...article.transfer.models import ArticleNumber
from .. import log_service
from ..transfer.action import ActionParameters
from ..transfer.order import Order, OrderID
from ._ticketing import create_tickets_sold_event, send_tickets_sold_event
def create_ticket_bundles(
order: Order,
article_number: ArticleNumber,
bundle_quantity: int,
initiator_id: UserID,
parameters: ActionParameters,
) -> None:
"""Create ticket bundles."""
category_id = parameters['category_id']
ticket_quantity = parameters['ticket_quantity']
owned_by_id = order.placed_by_id
order_number = order.order_number
category = ticket_category_service.get_category(category_id)
for _ in range(bundle_quantity):
bundle = ticket_bundle_service.create_bundle(
category.party_id,
category.id,
ticket_quantity,
owned_by_id,
order_number=order_number,
used_by_id=owned_by_id,
)
_create_order_log_entry(order.id, bundle)
tickets_sold_event = create_tickets_sold_event(
order.id, initiator_id, category_id, owned_by_id, ticket_quantity
)
send_tickets_sold_event(tickets_sold_event)
def _create_order_log_entry(
order_id: OrderID, ticket_bundle: TicketBundle
) -> None:
event_type = 'ticket-bundle-created'
data = {
'ticket_bundle_id': str(ticket_bundle.id),
'ticket_bundle_category_id': str(ticket_bundle.ticket_category_id),
'ticket_bundle_ticket_quantity': ticket_bundle.ticket_quantity,
'ticket_bundle_owner_id': str(ticket_bundle.owned_by_id),
}
log_service.create_entry(event_type, order_id, data)
| 29.388889 | 75 | 0.71172 | 252 | 2,116 | 5.579365 | 0.257937 | 0.110953 | 0.068279 | 0.036273 | 0.175676 | 0.122333 | 0 | 0 | 0 | 0 | 0 | 0.004611 | 0.180057 | 2,116 | 71 | 76 | 29.802817 | 0.805764 | 0.111059 | 0 | 0.041667 | 0 | 0 | 0.074411 | 0.051927 | 0 | 0 | 0 | 0 | 0 | 1 | 0.041667 | false | 0 | 0.166667 | 0 | 0.208333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
205660960327a896bf31bd564783248e520146a9 | 1,108 | py | Python | dexter/admin/widgets.py | CodeForAfrica/mma-dexter | 10d7f0c51bb935399c708a432699e06418049a33 | [
"Apache-2.0"
] | 12 | 2015-06-14T05:50:39.000Z | 2021-09-12T17:03:47.000Z | dexter/admin/widgets.py | CodeForAfrica/mma-dexter | 10d7f0c51bb935399c708a432699e06418049a33 | [
"Apache-2.0"
] | 32 | 2019-07-25T06:17:31.000Z | 2019-08-05T02:41:42.000Z | dexter/admin/widgets.py | CodeForAfricaLabs/mma-dexter | 10d7f0c51bb935399c708a432699e06418049a33 | [
"Apache-2.0"
] | 9 | 2016-04-04T03:08:22.000Z | 2020-02-19T09:55:45.000Z | from cgi import escape
from wtforms import widgets
from wtforms.compat import text_type
from wtforms.widgets.core import html_params, HTMLString
class CheckboxSelectWidget(widgets.Select):
""" Select widget that is a list of checkboxes
"""
def __call__(self, field, **kwargs):
if 'id' in kwargs:
del kwargs['id']
class_ = kwargs.pop('class', '').replace('form-control', '')
kwargs['class'] = ''
kwargs['name'] = field.name
html = ['<div class="checkbox-list %s">' % class_]
for val, label, selected in field.iter_choices():
html.append(self.render_option(val, label, selected, **kwargs))
html.append('</div>')
return HTMLString(''.join(html))
@classmethod
def render_option(cls, value, label, selected, **kwargs):
options = dict(kwargs, value=value)
options['type'] = 'checkbox'
if selected:
options['checked'] = True
return HTMLString('<div class="checkbox"><label><input %s> %s</label></div>' % (html_params(**options), escape(text_type(label))))
| 33.575758 | 138 | 0.620036 | 130 | 1,108 | 5.184615 | 0.453846 | 0.048961 | 0.047478 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.231047 | 1,108 | 32 | 139 | 34.625 | 0.79108 | 0.037906 | 0 | 0 | 0 | 0 | 0.133776 | 0.028463 | 0 | 0 | 0 | 0 | 0 | 1 | 0.086957 | false | 0 | 0.173913 | 0 | 0.391304 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2059823110054469bda0c1cbc025a00aa2057e4b | 6,702 | py | Python | openspeech/lm/transformer_lm.py | CanYouImagine/openspeech | 095d78828a9caed0151727897f35534231947846 | [
"Apache-2.0",
"MIT"
] | 207 | 2021-07-22T02:04:47.000Z | 2022-03-31T07:24:12.000Z | openspeech/lm/transformer_lm.py | tqslj2/openspeech | 10307587f08615224df5a868fb5249c68c70b12d | [
"Apache-2.0",
"MIT"
] | 81 | 2021-07-21T16:52:22.000Z | 2022-03-31T14:56:54.000Z | openspeech/lm/transformer_lm.py | tqslj2/openspeech | 10307587f08615224df5a868fb5249c68c70b12d | [
"Apache-2.0",
"MIT"
] | 43 | 2021-07-21T16:33:27.000Z | 2022-03-23T09:43:49.000Z | # MIT License
#
# Copyright (c) 2021 Soohwan Kim and Sangchun Ha and Soyoung Cho
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import torch
import torch.nn as nn
from typing import Optional, Tuple
from openspeech.lm.openspeech_lm import OpenspeechLanguageModelBase
from openspeech.modules import (
TransformerEmbedding,
PositionalEncoding,
Linear,
PositionwiseFeedForward,
MultiHeadAttention,
get_attn_pad_mask,
get_attn_subsequent_mask,
)
class TransformerForLanguageModelLayer(nn.Module):
def __init__(
self,
d_model: int = 768,
num_attention_heads: int = 8,
d_ff: int = 2048,
dropout_p: float = 0.3,
) -> None:
super(TransformerForLanguageModelLayer, self).__init__()
self.attention_prenorm = nn.LayerNorm(d_model)
self.attention = MultiHeadAttention(d_model, num_attention_heads)
self.feed_forward_prenorm = nn.LayerNorm(d_model)
self.feed_forward = PositionwiseFeedForward(d_model=d_model, d_ff=d_ff, dropout_p=dropout_p)
def forward(
self,
inputs: torch.Tensor,
mask: Optional[torch.Tensor] = None,
) -> Tuple[torch.Tensor, torch.Tensor]:
residual = inputs
inputs = self.attention_prenorm(inputs)
outputs, _ = self.attention(inputs, inputs, inputs, mask)
outputs += residual
residual = outputs
outputs = self.feed_forward_prenorm(outputs)
outputs = self.feed_forward(outputs)
outputs += residual
return outputs
class TransformerForLanguageModel(OpenspeechLanguageModelBase):
"""
Language Modelling is the core problem for a number of of natural language processing tasks such as speech to text,
conversational system, and text summarization. A trained language model learns the likelihood of occurrence
of a word based on the previous sequence of words used in the text.
Args:
num_classes (int): number of classification
max_length (int): max decoding length (default: 128)
d_model (int): dimension of model (default: 768)
d_ff (int): dimension of feed forward network (default: 1536)
num_attention_heads (int): number of attention heads (default: 8)
pad_id (int, optional): index of the pad symbol (default: 0)
sos_id (int, optional): index of the start of sentence symbol (default: 1)
eos_id (int, optional): index of the end of sentence symbol (default: 2)
num_layers (int, optional): number of transformer layers (default: 2)
dropout_p (float, optional): dropout probability of decoders (default: 0.2)
Inputs:, inputs, input_lengths
inputs (torch.LongTensor): A input sequence passed to decoders. `IntTensor` of size ``(batch, seq_length)``
input_lengths (torch.LongTensor): The length of input tensor. ``(batch)``
Returns:
* logits (torch.FloatTensor): Log probability of model predictions.
"""
def __init__(
self,
num_classes: int,
max_length: int = 128,
d_model: int = 768,
num_attention_heads: int = 8,
d_ff: int = 1536,
pad_id: int = 0,
sos_id: int = 1,
eos_id: int = 2,
num_layers: int = 2,
dropout_p: float = 0.3,
):
super(TransformerForLanguageModel, self).__init__()
self.d_model = d_model
self.num_classes = num_classes
self.num_layers = num_layers
self.max_length = max_length
self.eos_id = eos_id
self.sos_id = sos_id
self.pad_id = pad_id
self.embedding = TransformerEmbedding(num_classes, pad_id, d_model)
self.positional_encoding = PositionalEncoding(d_model)
self.input_dropout = nn.Dropout(p=dropout_p)
self.layers = nn.ModuleList([
TransformerForLanguageModelLayer(
d_model=d_model,
num_attention_heads=num_attention_heads,
d_ff=d_ff,
dropout_p=dropout_p,
) for _ in range(num_layers)
])
self.fc = nn.Sequential(
nn.LayerNorm(d_model),
Linear(d_model, d_model, bias=False),
nn.Tanh(),
Linear(d_model, num_classes, bias=False),
)
def forward_step(self, inputs, input_lengths):
pad_mask = get_attn_pad_mask(
inputs, input_lengths, inputs.size(1)
)
subsequent_mask = get_attn_subsequent_mask(inputs)
mask = torch.gt((pad_mask + subsequent_mask), 0)
outputs = self.embedding(inputs) + self.positional_encoding(inputs.size(1))
outputs = self.input_dropout(outputs)
for layer in self.layers:
outputs = layer(inputs=outputs, mask=mask)
step_outputs = self.fc(outputs).log_softmax(dim=-1)
return step_outputs
def forward(self, inputs: torch.Tensor, input_lengths: torch.Tensor) -> torch.Tensor:
"""
Forward propagate a `encoder_outputs` for training.
Args:
inputs (torch.LongTensor): A input sequence passed to decoders. `IntTensor` of size ``(batch, seq_length)``
input_lengths (torch.LongTensor): The length of input tensor. ``(batch)``
Returns:
* logits (torch.FloatTensor): Log probability of model predictions.
"""
logits = list()
step_outputs = self.forward_step(inputs, input_lengths)
for di in range(step_outputs.size(1)):
step_output = step_outputs[:, di, :]
logits.append(step_output)
return torch.stack(logits, dim=1)
| 38.965116 | 119 | 0.661594 | 837 | 6,702 | 5.130227 | 0.286738 | 0.025151 | 0.023754 | 0.011178 | 0.205636 | 0.162785 | 0.119236 | 0.119236 | 0.108989 | 0.108989 | 0 | 0.01087 | 0.258729 | 6,702 | 171 | 120 | 39.192982 | 0.853462 | 0.402716 | 0 | 0.132653 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.05102 | false | 0 | 0.05102 | 0 | 0.153061 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2059a11952c6bb57e1ddde4398fa3d4afdd9f065 | 4,540 | py | Python | app.py | Mohitsharma44/uodashboard | 9654511817b06c6ce73f7cbcdd1415ba0a78004f | [
"MIT"
] | null | null | null | app.py | Mohitsharma44/uodashboard | 9654511817b06c6ce73f7cbcdd1415ba0a78004f | [
"MIT"
] | 1 | 2017-07-21T14:41:51.000Z | 2017-07-21T16:19:55.000Z | app.py | Mohitsharma44/uodashboard | 9654511817b06c6ce73f7cbcdd1415ba0a78004f | [
"MIT"
] | null | null | null | import os
import json
import time
import base64
from io import StringIO
from tornado import websocket, web, ioloop, gen, escape
# list of clients to push the data to
clients = []
class BaseHandler(web.RequestHandler):
def get_current_user(self):
return self.get_secure_cookie("user")
class AudubonHandler(BaseHandler):
"""
Class to handle the landing page
"""
@web.asynchronous
@web.authenticated
def get(self):
name = escape.xhtml_escape(self.current_user)
self.render("audubon.html",
title="UO Live",
cam1="D6",
cam2="D9")
class HadiveHandler(BaseHandler):
"""
Class to handle the Hadive project page
"""
@web.asynchronous
@web.authenticated
def get(self):
name = escape.xhtml_escape(self.current_user)
self.render("hadive.html",
title="HaDiVe")
class IndexHandler(web.RequestHandler):
"""
Class to handle index page
"""
def get(self):
self.render("index.html", title="index")
class LoginHandler(BaseHandler):
def get(self):
try:
if self.get_current_user():
self.redirect(self.get_argument('next', '/'))
return
error_msg = self.get_argument("error")
except:
error_msg = ""
self.render("login.html", errormessage=error_msg)
def post(self):
username = self.get_argument("username", "")
passwd = self.get_argument("password", "")
# __TODO: Steps for Authentication
if username == "mohit":
self.set_current_user(username)
self.redirect(self.request.headers.get('referer', '/'))
#self.redirect(self.get_argument("next", u"/"))
else:
error_msg = u"?error=" + escape.url_escape("Login incorrect")
self.redirect(u"/login" + error_msg)
def set_current_user(self, user):
if user:
self.set_secure_cookie("user", escape.json_encode(user))
else:
self.clear_cookie("user")
class LogoutHandler(BaseHandler):
"""
Class to handle logout
"""
def get(self):
self.clear_cookie("user")
self.redirect(self.get_argument("next", u"/"))
class RealtimeHandler(websocket.WebSocketHandler):
"""
Class to handle the sockets
"""
def check_origin(self, origin):
"""
Accept all cross-origin traffic
"""
return True
def open(self):
if self.get_secure_cookie("user"):
self.write_message("Socket opened")
if not self in clients:
clients.append(self)
else:
self.close(code=401, reason="Unauthorized")
return
def on_message(self, message):
print("Message Recieved: " + message)
def on_close(self):
if self in clients:
clients.remove(self)
print("Socket closed")
class ApiHandler(web.RequestHandler):
"""
Class to handle the data received
"""
def get(self):
pass
def post(self, *args):
# Got an image? Push it to the clients
#print(self.request.body)
self.file1 = self.request.files['file1'][0]
with open("test.png", "wb") as fh:
fh.write(self.file1.body)
self.orig_fname = self.file1['filename']
print("Got :"+str(self.orig_fname))
data = {"cam_name": str(self.request.headers['cam_name']),
"fname": str(self.orig_fname),
"updatetime": str(time.strftime("%c")),
"img": str(base64.b64encode(self.file1['body']))
}
for client in clients:
client.write_message(data)
# Send OK to the uploader and close
self.write("OK")
settings = {
'login_url': '/login',
'cookie_secret': 'L8LwECiNRxq2N0N2eGxx9MZlrpmuMEimlydNX/vt1LM=',
'template_path': 'templates/',
'compiled_template_cache': 'False',
'debug': True,
'static_path': os.path.join(os.path.dirname(__file__), "static")
}
app = web.Application(
[
(r'/login', LoginHandler),
(r'/logout', LogoutHandler),
(r'/realtime', RealtimeHandler),
(r'/upload', ApiHandler),
(r'/', IndexHandler),
(r'/projects/audubon', AudubonHandler),
(r'/projects/hadive', HadiveHandler),
],
**settings,
)
if __name__ == "__main__":
app.listen(8888)
ioloop.IOLoop.instance().start()
| 27.515152 | 73 | 0.581278 | 501 | 4,540 | 5.141717 | 0.347305 | 0.024457 | 0.03028 | 0.024845 | 0.172748 | 0.110637 | 0.110637 | 0.070652 | 0.070652 | 0.070652 | 0 | 0.008964 | 0.287445 | 4,540 | 164 | 74 | 27.682927 | 0.787326 | 0.094053 | 0 | 0.166667 | 0 | 0 | 0.124937 | 0.016775 | 0 | 0 | 0 | 0.006098 | 0 | 1 | 0.122807 | false | 0.017544 | 0.052632 | 0.008772 | 0.280702 | 0.026316 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
6443f914d5d0b5b31f9340a5022b1d6bb4a641d5 | 11,233 | py | Python | orcoursetrion/tests/base.py | kboots-mit/orcoursetrion | 06f62a903077a1ddfe662e2b7214ba1f97933eb0 | [
"BSD-2-Clause"
] | 2 | 2015-06-13T16:14:26.000Z | 2015-10-27T17:19:12.000Z | orcoursetrion/tests/base.py | kboots-mit/orcoursetrion | 06f62a903077a1ddfe662e2b7214ba1f97933eb0 | [
"BSD-2-Clause"
] | 43 | 2015-02-16T17:06:14.000Z | 2018-09-07T15:33:14.000Z | orcoursetrion/tests/base.py | kboots-mit/orcoursetrion | 06f62a903077a1ddfe662e2b7214ba1f97933eb0 | [
"BSD-2-Clause"
] | 4 | 2015-05-27T19:43:59.000Z | 2022-03-17T19:01:11.000Z | # -*- coding: utf-8 -*-
"""
Test base class with commonly used methods and variables
"""
import json
import re
import unittest
import httpretty
class TestGithubBase(unittest.TestCase):
"""Test Github actions and backing library."""
OAUTH2_TOKEN = '12345'
ORG = 'NOT_REAL'
URL = 'http://localhost/'
TEST_COURSE = 'devops.001'
TEST_TERM = 'Spring_2999'
TEST_NEW_TERM = 'Spring_9999'
TEST_DESCRIPTION = 'foo'
TEST_PREFIX = 'testo'
TEST_REPO = '{0}-{1}-{2}'.format(
TEST_PREFIX, TEST_COURSE.replace('.', ''), TEST_TERM
)
TEST_RERUN_REPO = '{0}-{1}-{2}'.format(
TEST_PREFIX, TEST_COURSE.replace('.', ''), TEST_NEW_TERM
)
TEST_TEAM = 'Test-Deploy'
TEST_TEAM_ID = 1
TEST_TEAM_MEMBERS = ['archlight', 'bizarnage', 'chemistro', 'dreadnought']
TEST_STAGING_GR = 'http://gr/'
TEST_PRODUCTION_GR = 'http://prod-gr/'
def callback_repo_check(self, request, uri, headers, status_code=404):
"""Handle mocked API request for repo existence check."""
self.assertEqual(
request.headers['Authorization'],
'token {0}'.format(self.OAUTH2_TOKEN)
)
# Handle the new "rerun" repo differently
if self.TEST_RERUN_REPO in uri:
status_code = 404
return (status_code, headers, json.dumps({'message': 'testing'}))
def callback_repo_create(self, request, uri, headers, status_code=201):
"""Mock repo creation API call."""
# Disabling unused-argument because this is a callback with
# required method signature.
# pylint: disable=unused-argument
self.assertEqual(
request.headers['Authorization'],
'token {0}'.format(self.OAUTH2_TOKEN)
)
repo_dict = json.loads(request.body)
self.assertTrue(
repo_dict['name'] in [self.TEST_REPO, self.TEST_RERUN_REPO]
)
self.assertEqual(repo_dict['description'], self.TEST_DESCRIPTION)
self.assertEqual(repo_dict['private'], True)
return (status_code, headers, json.dumps({'html_url': 'testing'}))
def callback_team_list(
self, request, uri, headers, status_code=200, more=False
):
"""Mock team listing API call."""
# All arguments needed for tests
# pylint: disable=too-many-arguments
self.assertEqual(
request.headers['Authorization'],
'token {0}'.format(self.OAUTH2_TOKEN)
)
page1 = [
{
'id': 1,
'name': self.TEST_TEAM
},
{
'id': 1,
'name': self.TEST_REPO
}
]
page2 = [
{
'id': 3,
'name': 'Other Team'
},
]
current_page = request.querystring.get('page', [u'1'])
current_page = int(current_page[0])
if current_page == 2:
body = page2
else:
body = page1
if more and current_page == 1:
headers['Link'] = (
'<{uri}?page=2>; rel="next",'
'<{uri}?page=2>; rel="last"'
).format(uri=uri)
if status_code == 404:
return (status_code, headers, json.dumps({'error': 'error'}))
return (status_code, headers, json.dumps(body))
def callback_team_members(
self, request, uri, headers,
status_code=200, members=None
):
"""
Return team membership list
"""
# Disabling unused-argument because this is a callback with
# required method signature.
# pylint: disable=unused-argument,too-many-arguments
if members is None:
members = self.TEST_TEAM_MEMBERS
self.assertEqual(
request.headers['Authorization'],
'token {0}'.format(self.OAUTH2_TOKEN)
)
return (status_code, headers, json.dumps(
[dict(login=x) for x in members]
))
def callback_team_create(
self, request, uri, headers, status_code=201, read_only=True
):
"""
Create a new team as requested
"""
# Disabling unused-argument because this is a callback with
# required method signature.
# pylint: disable=unused-argument,too-many-arguments
self.assertEqual(
request.headers['Authorization'],
'token {0}'.format(self.OAUTH2_TOKEN)
)
json_body = json.loads(request.body)
for item in ['name', 'permission']:
self.assertTrue(item in json_body.keys())
if read_only:
self.assertEqual(json_body['permission'], 'pull')
else:
self.assertEqual(json_body['permission'], 'push')
return (status_code, headers, json.dumps({'id': 2}))
@staticmethod
def callback_team_membership(
request, uri, headers, success=True, action_list=None
):
"""Manage both add and delete of team membership.
``action_list`` is a list of tuples with (``username``,
``added (bool)``) to track state of membership since this will
get called multiple times in one library call.
"""
# pylint: disable=too-many-arguments
username = uri.rsplit('/', 1)[1]
if not success:
status_code = 500
if request.method == 'DELETE':
if success:
status_code = 204
action_list.append((username, False))
if request.method == 'PUT':
status_code = 200
action_list.append((username, True))
return (status_code, headers, '')
def callback_team_repo(self, request, uri, headers, status_code=204):
"""Mock adding a repo to a team API call."""
self.assertEqual(
request.headers['Authorization'],
'token {0}'.format(self.OAUTH2_TOKEN)
)
self.assertIsNotNone(re.match(
'{url}teams/[13]/repos/{org}/({repo}|{rerun_repo})'.format(
url=re.escape(self.URL),
org=self.ORG,
repo=re.escape(self.TEST_REPO),
rerun_repo=re.escape(self.TEST_RERUN_REPO)
),
uri
))
if status_code == 422:
return (status_code, headers, json.dumps({
"message": "Validation Failed",
}))
return (status_code, headers, '')
def register_repo_check(self, body):
"""Register repo check URL and method."""
httpretty.register_uri(
httpretty.GET,
re.compile(
'^{url}repos/{org}/({repo}|{repo_rerun})$'.format(
url=self.URL,
org=self.ORG,
repo=re.escape(self.TEST_REPO),
repo_rerun=re.escape(self.TEST_RERUN_REPO)
)
),
body=body
)
def register_repo_create(self, body):
"""Register url for repo create."""
httpretty.register_uri(
httpretty.POST,
'{url}orgs/{org}/repos'.format(
url=self.URL,
org=self.ORG,
),
body=body
)
def register_hook_create(self, body, status):
"""
Simple hook creation URL registration.
"""
test_url = '{url}repos/{org}/{repo}/hooks'.format(
url=self.URL,
org=self.ORG,
repo=self.TEST_REPO
)
# Register for hook endpoint
httpretty.register_uri(
httpretty.POST,
test_url,
body=body,
status=status
)
def register_hook_list(self, body=None, status=200):
"""
Simple hook list URL.
"""
if body is None:
body = json.dumps(
[{
'url': '{url}repos/{org}/{repo}/hooks/1'.format(
url=self.URL, org=self.ORG, repo=self.TEST_REPO
)
}]
)
test_url = '{url}repos/{org}/{repo}/hooks'.format(
url=self.URL,
org=self.ORG,
repo=self.TEST_REPO
)
# Register for hook endpoint
httpretty.register_uri(
httpretty.GET,
test_url,
body=body,
status=status
)
def register_hook_delete(self, status=204):
"""
Simple hook list URL.
"""
test_url = '{url}repos/{org}/{repo}/hooks/1'.format(
url=self.URL,
org=self.ORG,
repo=self.TEST_REPO
)
# Register for hook endpoint
httpretty.register_uri(
httpretty.DELETE,
test_url,
body='',
status=status
)
def register_team_list(self, body):
"""
Team listing API.
"""
httpretty.register_uri(
httpretty.GET,
'{url}orgs/{org}/teams'.format(
url=self.URL,
org=self.ORG,
),
body=body
)
def register_team_create(self, body):
"""
Create team URL/method
"""
httpretty.register_uri(
httpretty.POST,
'{url}orgs/{org}/teams'.format(
url=self.URL,
org=self.ORG,
),
body=body
)
def register_team_members(self, body):
"""
Team membership list API.
"""
httpretty.register_uri(
httpretty.GET,
re.compile(
r'^{url}teams/\d+/members$'.format(
url=re.escape(self.URL)
)
),
body=body
)
def register_team_membership(self, body):
"""
Register adding and removing team members.
"""
url_regex = re.compile(r'^{url}teams/\d+/memberships/\w+$'.format(
url=re.escape(self.URL),
))
httpretty.register_uri(
httpretty.PUT, url_regex, body=body
)
httpretty.register_uri(
httpretty.DELETE, url_regex, body=body
)
def register_team_repo_add(self, body):
"""
Register team repo addition.
"""
httpretty.register_uri(
httpretty.PUT,
re.compile(
r'^{url}teams/\d+/repos/{org}/({repo}|{rerun_repo})$'.format(
url=self.URL,
org=self.ORG,
repo=re.escape(self.TEST_REPO),
rerun_repo=re.escape(self.TEST_RERUN_REPO)
)
),
body=body
)
def register_create_file(self, status=201):
"""
File creation API
"""
httpretty.register_uri(
httpretty.PUT,
re.compile(
r'^{url}repos/{org}/{repo}/contents/.+$'.format(
url=re.escape(self.URL),
org=re.escape(self.ORG),
repo=re.escape(self.TEST_REPO),
)
),
status=status
)
| 30.691257 | 78 | 0.517404 | 1,171 | 11,233 | 4.823228 | 0.176772 | 0.037181 | 0.025496 | 0.061615 | 0.554533 | 0.489731 | 0.434667 | 0.38403 | 0.350921 | 0.304178 | 0 | 0.014266 | 0.363483 | 11,233 | 365 | 79 | 30.775342 | 0.775664 | 0.129796 | 0 | 0.411765 | 0 | 0 | 0.100477 | 0.044032 | 0 | 0 | 0 | 0 | 0.047794 | 1 | 0.066176 | false | 0 | 0.014706 | 0 | 0.172794 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
6444fc949f84882592e7af529a265c5ac81da84e | 4,799 | py | Python | evm/scripts/bench_evm384.py | jnordberg/benchmarking | f212eeda06210e64a71e6180091a279342f6f215 | [
"Apache-2.0"
] | 21 | 2018-12-10T18:46:12.000Z | 2021-11-22T04:42:49.000Z | evm/scripts/bench_evm384.py | jnordberg/benchmarking | f212eeda06210e64a71e6180091a279342f6f215 | [
"Apache-2.0"
] | 94 | 2018-12-11T22:52:12.000Z | 2021-02-02T23:07:25.000Z | evm/scripts/bench_evm384.py | jnordberg/benchmarking | f212eeda06210e64a71e6180091a279342f6f215 | [
"Apache-2.0"
] | 7 | 2018-12-11T22:45:58.000Z | 2021-11-22T04:42:03.000Z | #!/usr/bin/env python3
import re
import subprocess
import nanodurationpy as durationpy
import csv
import time
import datetime
import os
import shutil
import shlex
import json
# output paths should be mounted docker volumes
RESULT_CSV_OUTPUT_PATH = "/evmraceresults"
RESULT_CSV_FILENAME = "evm_benchmarks_evmone384.csv"
EVMONE_BENCH_INFOS = [
{
"command": "/root/evmone-evm384-v1/build/bin/evmone-bench --benchmark_format=json --benchmark_color=false --benchmark_min_time=5 /root/evm384_f6m_mul/build/v1-f6m_mul_bench.bin 00 74229fc665e6c3f4401905c1a454ea57c8931739d05a074fd60400f19684d680a9e1305c25f13613dcc6cdd6e6e57d0800000000000000000000000000000000",
"bench_name": "evm384-synth-loop-v1"
},
{
"command": "/root/evmone-evm384-v2/build/bin/evmone-bench --benchmark_format=json --benchmark_color=false --benchmark_min_time=5 /root/evm384_f6m_mul/build/v2-f6m_mul_bench.bin 00 74229fc665e6c3f4401905c1a454ea57c8931739d05a074fd60400f19684d680a9e1305c25f13613dcc6cdd6e6e57d0800000000000000000000000000000000",
"bench_name": "evm384-synth-loop-v2"
},
{
"command": "/root/evmone-evm384-v2-unsafe/build/bin/evmone-bench --benchmark_format=json --benchmark_color=false --benchmark_min_time=5 /root/mem-check-disable-evm384_f6m_mul/build/v2-f6m_mul_bench.bin 00 74229fc665e6c3f4401905c1a454ea57c8931739d05a074fd60400f19684d680a9e1305c25f13613dcc6cdd6e6e57d0800000000000000000000000000000000",
"bench_name": "evm384-synth-loop-v3"
}
]
"""
root@472ab2fd1fc1:~/evm384_f6m_mul# /root/evmone-evm384-v2/build/bin/evmone-bench ~/evm384_f6m_mul/build/v2-f6m_mul_bench.bin "00" "74229fc665e6c3f4401905c1a454ea57c8931739d05a074fd60400f19684d680a9e1305c25f13613dcc6cdd6e6e57d0800000000000000000000000000000000"
Benchmarking evmone
2020-06-18 20:52:56
Running /root/evmone-evm384-v2/build/bin/evmone-bench
Run on (4 X 2294.68 MHz CPU s)
CPU Caches:
L1 Data 32K (x2)
L1 Instruction 32K (x2)
L2 Unified 256K (x2)
L3 Unified 51200K (x2)
-------------------------------------------------------------------------------------------------------
Benchmark Time CPU Iterations UserCounters...
-------------------------------------------------------------------------------------------------------
/root/evm384_f6m_mul/build/v2-f6m_mul_bench.bin 18156 us 18156 us 39 gas_rate=322.266M/s gas_used=5.85118M
"""
def do_evmone_bench(evmone_bench_cmd):
evmone_cmd = shlex.split(evmone_bench_cmd)
print("running evmone benchmark...\n{}".format(evmone_bench_cmd))
stdoutlines = []
with subprocess.Popen(evmone_cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, bufsize=1, universal_newlines=True) as p:
for line in p.stdout: # b'\n'-separated lines
print(line, end='')
stdoutlines.append(line) # pass bytes as is
p.wait()
json_result = json.loads("".join(stdoutlines[2:]))
benchmarks = json_result['benchmarks']
benchmark_results = benchmarks[0]
gasused = int(benchmark_results['gas_used'])
total_time = str(benchmark_results['real_time']) + benchmark_results['time_unit']
time = durationpy.from_str(total_time)
return {'gas_used': gasused, 'time': time.total_seconds()}
def saveResults(precompile_benchmarks):
# move existing csv file to backup-datetime-folder
ts = time.time()
date_str = datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d')
ts_folder_name = "backup-{}-{}".format(date_str, round(ts))
dest_backup_path = os.path.join(RESULT_CSV_OUTPUT_PATH, ts_folder_name)
result_file = "{}/{}".format(RESULT_CSV_OUTPUT_PATH, RESULT_CSV_FILENAME)
# back up existing result csv file
if os.path.isfile(result_file):
os.makedirs(dest_backup_path)
shutil.move(result_file, dest_backup_path)
print("existing {} moved to {}".format(RESULT_CSV_FILENAME, dest_backup_path))
with open(result_file, 'w', newline='') as bench_result_file:
fieldnames = ['engine', 'test_name', 'total_time', 'gas_used']
writer = csv.DictWriter(bench_result_file, fieldnames=fieldnames)
writer.writeheader()
for test_result in precompile_benchmarks:
writer.writerow({"engine": test_result['engine'], "test_name" : test_result['bench_name'], "gas_used" : test_result['gas_used'], "total_time" : test_result['time']})
def main():
all_bench_resuls = []
for evmone_bench_info in EVMONE_BENCH_INFOS:
evmone_cmd = evmone_bench_info['command']
bench_result = do_evmone_bench(evmone_cmd)
bench_result['bench_name'] = evmone_bench_info['bench_name']
bench_result['engine'] = "evmone384"
all_bench_resuls.append(bench_result)
saveResults(all_bench_resuls)
if __name__ == "__main__":
main()
| 43.234234 | 339 | 0.709731 | 571 | 4,799 | 5.698774 | 0.313485 | 0.050707 | 0.022127 | 0.029195 | 0.34327 | 0.333436 | 0.333436 | 0.333436 | 0.305163 | 0.305163 | 0 | 0.134238 | 0.140029 | 4,799 | 110 | 340 | 43.627273 | 0.654228 | 0.039175 | 0 | 0 | 0 | 0.043478 | 0.36495 | 0.249932 | 0 | 0 | 0 | 0 | 0 | 1 | 0.043478 | false | 0 | 0.144928 | 0 | 0.202899 | 0.043478 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
64482cc0c9d8191912e52468d78f9daed36c3761 | 17,755 | py | Python | etsin_finder_search/catalog_record_converter.py | CSCfi/etsin-finder-search | c76888de65a3c32d98f78f863850a374606420c1 | [
"MIT"
] | null | null | null | etsin_finder_search/catalog_record_converter.py | CSCfi/etsin-finder-search | c76888de65a3c32d98f78f863850a374606420c1 | [
"MIT"
] | 9 | 2017-11-11T10:35:41.000Z | 2021-01-21T10:58:50.000Z | etsin_finder_search/catalog_record_converter.py | CSCfi/etsin-finder-search | c76888de65a3c32d98f78f863850a374606420c1 | [
"MIT"
] | 2 | 2018-03-06T08:19:48.000Z | 2019-03-20T06:55:16.000Z | # This file is part of the Etsin service
#
# Copyright 2017-2018 Ministry of Education and Culture, Finland
#
# :author: CSC - IT Center for Science Ltd., Espoo Finland <servicedesk@csc.fi>
# :license: MIT
from etsin_finder_search.reindexing_log import get_logger
from etsin_finder_search.utils import \
catalog_record_has_preferred_identifier, \
get_catalog_record_preferred_identifier, \
catalog_record_has_identifier, \
get_catalog_record_identifier, \
get_catalog_record_dataset_version_set, \
get_catalog_record_data_catalog_title, \
get_catalog_record_data_catalog_identifier
log = get_logger(__name__)
class CRConverter:
def convert_metax_cr_json_to_es_data_model(self, metax_cr_json):
es_dataset = {}
if metax_cr_json.get('research_dataset', False) and \
catalog_record_has_identifier(metax_cr_json) and \
catalog_record_has_preferred_identifier(metax_cr_json):
es_dataset['identifier'] = get_catalog_record_identifier(metax_cr_json)
es_dataset['preferred_identifier'] = get_catalog_record_preferred_identifier(metax_cr_json)
es_dataset['dataset_version_set'] = get_catalog_record_dataset_version_set(metax_cr_json)
es_dataset['data_catalog'] = get_catalog_record_data_catalog_title(metax_cr_json)
es_dataset['data_catalog_identifier'] = get_catalog_record_data_catalog_identifier(metax_cr_json)
m_rd = metax_cr_json['research_dataset']
if 'organization_name_fi' not in es_dataset:
es_dataset['organization_name_fi'] = []
if 'organization_name_en' not in es_dataset:
es_dataset['organization_name_en'] = []
if metax_cr_json.get('date_modified', False):
es_dataset['date_modified'] = metax_cr_json.get('date_modified')
else:
es_dataset['date_modified'] = metax_cr_json.get('date_created')
if m_rd.get('title', False):
es_dataset['title'] = m_rd.get('title')
if m_rd.get('description', False):
es_dataset['description'] = m_rd.get('description')
if m_rd.get('keyword', False):
es_dataset['keyword'] = m_rd.get('keyword')
if metax_cr_json.get('preservation_state', False):
es_dataset['preservation_state'] = metax_cr_json.get('preservation_state')
if metax_cr_json.get('preservation_identifier', False):
es_dataset['preservation_identifier'] = metax_cr_json.get('preservation_identifier')
if metax_cr_json.get('preservation_dataset_version', False):
es_dataset['preservation_dataset_version'] = metax_cr_json.get('preservation_dataset_version')
if metax_cr_json.get('preservation_dataset_origin_version', False):
es_dataset['preservation_dataset_origin_version'] = metax_cr_json.get('preservation_dataset_origin_version')
for m_other_identifier_item in m_rd.get('other_identifier', []):
if 'other_identifier' not in es_dataset:
es_dataset['other_identifier'] = []
es_other_identifier = {}
if m_other_identifier_item.get('notation'):
es_other_identifier['notation'] = m_other_identifier_item.get('notation')
if m_other_identifier_item.get('type', False):
es_other_identifier['type'] = {}
self._convert_metax_obj_containing_identifier_and_label_to_es_model(
m_other_identifier_item.get('type'), es_other_identifier['type'], 'pref_label')
es_dataset['other_identifier'].append(es_other_identifier)
if m_rd.get('access_rights', False):
if 'access_rights' not in es_dataset:
es_dataset['access_rights'] = {}
es_access_rights = es_dataset['access_rights']
CRConverter._add_descriptive_field_to_output_obj( m_rd.get('access_rights'), es_access_rights)
if m_rd.get('access_rights').get('license', False):
m_license = m_rd.get('access_rights').get('license')
self._convert_metax_obj_containing_identifier_and_label_to_es_model(m_license, es_access_rights,
'title', 'license')
if m_rd.get('access_rights').get('access_type', False):
es_dataset['access_rights']['access_type'] = {}
es_access_type = es_dataset['access_rights']['access_type']
m_type = m_rd.get('access_rights').get('access_type')
self._convert_metax_obj_containing_identifier_and_label_to_es_model(m_type, es_access_type,
'pref_label')
if m_rd.get('theme', False):
if 'theme' not in es_dataset:
es_dataset['theme'] = []
m_theme = m_rd.get('theme')
self._convert_metax_obj_containing_identifier_and_label_to_es_model(m_theme, es_dataset, 'pref_label',
'theme')
if m_rd.get('field_of_science', False):
if 'field_of_science' not in es_dataset:
es_dataset['field_of_science'] = []
m_field_of_science = m_rd.get('field_of_science')
self._convert_metax_obj_containing_identifier_and_label_to_es_model(m_field_of_science, es_dataset,
'pref_label', 'field_of_science')
for m_is_output_of_item in m_rd.get('infrastructure', []):
if 'infrastructure' not in es_dataset:
es_dataset['infrastructure'] = []
m_infrastructure = {}
self._convert_metax_obj_containing_identifier_and_label_to_es_model(m_is_output_of_item, m_infrastructure,
'pref_label')
es_dataset['infrastructure'].append(m_infrastructure)
for m_is_output_of_item in m_rd.get('is_output_of', []):
if m_is_output_of_item.get('has_funding_agency', []):
self._convert_metax_langstring_name_to_es_model(m_is_output_of_item.get('has_funding_agency'), es_dataset, 'organization_name')
if m_is_output_of_item.get('source_organization', []):
self._convert_metax_langstring_name_to_es_model(m_is_output_of_item.get('source_organization'), es_dataset, 'organization_name')
if m_rd.get('is_output_of', []):
if 'project_name_fi' not in es_dataset:
es_dataset['project_name_fi'] = []
if 'project_name_en' not in es_dataset:
es_dataset['project_name_en'] = []
self._convert_metax_langstring_name_to_es_model(m_rd.get('is_output_of'), es_dataset, 'project_name')
if 'is_output_of' not in es_dataset:
es_dataset['is_output_of'] = []
for project in m_rd.get('is_output_of'):
es_dataset['is_output_of'].append({'name': project['name']})
if 'file_type' not in es_dataset and (m_rd.get('files', False) or m_rd.get('remote_resources', False)):
es_dataset['file_type'] = []
for m_is_output_of_item in m_rd.get('files', []) + m_rd.get('remote_resources', []):
if 'file_type' in m_is_output_of_item:
m_file_type = {}
self._convert_metax_obj_containing_identifier_and_label_to_es_model(m_is_output_of_item['file_type'], m_file_type,
'pref_label')
es_dataset['file_type'].append(m_file_type)
if m_rd.get('contributor', False):
es_dataset['contributor'] = []
self._convert_metax_org_or_person_to_es_model(m_rd.get('contributor'), es_dataset, 'contributor')
self._convert_metax_langstring_name_to_es_model(m_rd.get('contributor'), es_dataset, 'organization_name')
if m_rd.get('publisher', False):
es_dataset['publisher'] = []
self._convert_metax_org_or_person_to_es_model(m_rd.get('publisher'), es_dataset, 'publisher')
self._convert_metax_langstring_name_to_es_model(m_rd.get('publisher'), es_dataset, 'organization_name')
if m_rd.get('curator', False):
es_dataset['curator'] = []
self._convert_metax_org_or_person_to_es_model(m_rd.get('curator'), es_dataset, 'curator')
self._convert_metax_langstring_name_to_es_model(m_rd.get('curator'), es_dataset, 'organization_name')
if m_rd.get('creator', False):
es_dataset['creator'] = []
self._convert_metax_org_or_person_to_es_model(m_rd.get('creator'), es_dataset, 'creator')
self._convert_metax_creator_name_to_es_model(m_rd.get('creator'), es_dataset, 'creator_name')
self._convert_metax_langstring_name_to_es_model(m_rd.get('creator'), es_dataset, 'organization_name')
if m_rd.get('rights_holder', False):
es_dataset['rights_holder'] = []
self._convert_metax_org_or_person_to_es_model(m_rd.get('rights_holder'), es_dataset, 'rights_holder')
self._convert_metax_langstring_name_to_es_model(m_rd.get('rights_holder'), es_dataset, 'organization_name')
return es_dataset
@staticmethod
def _convert_metax_obj_containing_identifier_and_label_to_es_model(m_input, es_output, m_input_label_field,
es_array_relation_name=''):
"""
If m_input is not array, set identifier and label directly on es_output.
If m_input is array, add a es_array_relation_name array relation to es_output, which will contain objects
having identifier and label each
:param m_input:
:param es_output:
:param m_input_label_field:
:param es_array_relation_name:
:return:
"""
if isinstance(m_input, list) and es_array_relation_name:
output = []
for obj in m_input:
m_input_label_is_array = isinstance(obj.get(m_input_label_field), list)
out_obj = {
'identifier': obj.get('identifier', ''),
m_input_label_field: obj.get(m_input_label_field, [] if m_input_label_is_array else {})
}
CRConverter._add_descriptive_field_to_output_obj(obj, out_obj)
output.append(out_obj)
es_output[es_array_relation_name] = output
elif isinstance(m_input, dict):
m_input_label_is_array = isinstance(m_input.get(m_input_label_field), list)
es_output['identifier'] = m_input.get('identifier', '')
es_output[m_input_label_field] = m_input.get(m_input_label_field, [] if m_input_label_is_array else {})
CRConverter._add_descriptive_field_to_output_obj(m_input, es_output)
@staticmethod
def _add_descriptive_field_to_output_obj(input_obj, output_obj):
if 'description' in input_obj:
output_obj['description'] = input_obj['description']
if 'definition' in input_obj:
output_obj['definition'] = input_obj['definition']
def _convert_metax_org_or_person_to_es_model(self, m_input, es_output, relation_name):
"""
:param m_input:
:param es_output:
:param relation_name:
:return:
"""
if isinstance(m_input, list):
output = []
for m_obj in m_input:
org_or_person = self._get_converted_single_org_or_person_es_model(m_obj)
if org_or_person is not None:
output.append(org_or_person)
else:
output = {}
if m_input:
org_or_person = self._get_converted_single_org_or_person_es_model(m_input)
if org_or_person is not None:
output = org_or_person
es_output[relation_name] = output
def _convert_metax_creator_name_to_es_model(self, m_input, es_output, relation_name):
"""
:param m_input:
:param es_output:
:param relation_name:
:return:
"""
output = []
if isinstance(m_input, list):
for m_obj in m_input:
name = self._get_converted_creator_name_es_model(m_obj)
if name is not None:
output.extend(name)
else:
if m_input:
name = self._get_converted_creator_name_es_model(m_input)
if name is not None:
output = name
es_output[relation_name] = output
def _convert_metax_langstring_name_to_es_model(self, m_input, es_output, relation_name_base):
"""
Converts an object with langstring name to two lists, one for Finnish and one for English name.
:param m_input:
:param es_output:
:param relation_name:
:return:
"""
output_fi = []
output_en = []
if isinstance(m_input, list):
for m_obj in m_input:
name_fi = self._get_converted_langstring_name_es_model(m_obj, 'fi')
name_en = self._get_converted_langstring_name_es_model(m_obj, 'en')
if name_fi is not None and name_en is not None:
output_fi.append(name_fi)
output_en.append(name_en)
if 'is_part_of' in m_obj:
self._convert_metax_langstring_name_to_es_model(m_obj['is_part_of'], es_output, relation_name_base)
if 'member_of' in m_obj:
self._convert_metax_langstring_name_to_es_model(m_obj['member_of'], es_output, relation_name_base)
else:
if m_input:
output_fi.append(self._get_converted_langstring_name_es_model(m_input, 'fi'))
output_en.append(self._get_converted_langstring_name_es_model(m_input, 'en'))
if 'is_part_of' in m_input:
self._convert_metax_langstring_name_to_es_model(m_input['is_part_of'], es_output, relation_name_base)
if 'member_of' in m_input:
self._convert_metax_langstring_name_to_es_model(m_input['member_of'], es_output, relation_name_base)
if output_fi is not None and output_en is not None:
es_output[relation_name_base + '_fi'].extend(output_fi)
es_output[relation_name_base + '_en'].extend(output_en)
def _get_converted_single_org_or_person_es_model(self, m_obj):
out_obj = self._get_es_person_or_org_common_data_obj_from_metax_agent_obj(m_obj)
if out_obj is None:
return None
agent_type = m_obj.get('@type')
if agent_type == 'Person' and m_obj.get('member_of', False):
org = self._get_es_person_or_org_common_data_obj_from_metax_agent_obj(m_obj.get('member_of'))
if org is not None:
out_obj.update({
'belongs_to_org': org
})
elif agent_type == 'Organization' and m_obj.get('is_part_of', False):
org = self._get_es_person_or_org_common_data_obj_from_metax_agent_obj(m_obj.get('is_part_of'))
if org is not None:
out_obj.update({
'belongs_to_org': org
})
return out_obj
def _get_converted_creator_name_es_model(self, m_obj):
person_or_org = self._get_es_person_or_org_common_data_obj_from_metax_agent_obj(m_obj)
if person_or_org is None:
return None
out_obj = list(person_or_org['name'].values())
return out_obj
def _get_converted_langstring_name_es_model(self, m_obj, lang):
if not isinstance(m_obj.get('name'), dict):
return None
if lang == 'fi':
preferred_order = ['fi', 'und', 'en']
elif lang == 'en':
preferred_order = ['en', 'und', 'fi']
else:
return None
for language in preferred_order:
try:
return m_obj['name'][language]
except KeyError:
continue
# If name is not available in preferred languages, choose any name
out_obj = list(m_obj['name'].values())[0]
return out_obj
@staticmethod
def _get_es_person_or_org_common_data_obj_from_metax_agent_obj(m_obj):
if not m_obj or 'name' not in m_obj or '@type' not in m_obj:
log.warning("Agent object does not have either name or @type")
return None
if m_obj['@type'] not in ['Agent', 'Person', 'Organization']:
log.warning("Agent object's @type is not one of allowed values")
return None
# Name should be langstring
name = m_obj['name']
if not isinstance(name, dict):
name = {'und': m_obj['name']}
ret_obj = {
'name': name,
'agent_type': m_obj['@type']
}
if m_obj.get('identifier', False):
ret_obj['identifier'] = m_obj['identifier']
return ret_obj
| 45.293367 | 148 | 0.610926 | 2,242 | 17,755 | 4.351026 | 0.077163 | 0.060892 | 0.025833 | 0.026653 | 0.667863 | 0.609534 | 0.470425 | 0.405843 | 0.318811 | 0.286212 | 0 | 0.000724 | 0.299859 | 17,755 | 391 | 149 | 45.409207 | 0.784008 | 0.050239 | 0 | 0.171004 | 0 | 0 | 0.143527 | 0.016903 | 0 | 0 | 0 | 0 | 0 | 1 | 0.037175 | false | 0 | 0.007435 | 0 | 0.092937 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
6448339d56590e86a2c7d522cbb83b6740c5c6f1 | 3,350 | py | Python | ndb/src/google/cloud/ndb/exceptions.py | juan-rael/google-cloud-python | 1d762a11ec8d76a7413aecdc4748699e662c4976 | [
"Apache-2.0"
] | null | null | null | ndb/src/google/cloud/ndb/exceptions.py | juan-rael/google-cloud-python | 1d762a11ec8d76a7413aecdc4748699e662c4976 | [
"Apache-2.0"
] | null | null | null | ndb/src/google/cloud/ndb/exceptions.py | juan-rael/google-cloud-python | 1d762a11ec8d76a7413aecdc4748699e662c4976 | [
"Apache-2.0"
] | null | null | null | # Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Classes representing legacy Google App Engine exceptions.
Unless otherwise noted, these are meant to act as shims for the exception
types defined in the ``google.appengine.api.datastore_errors`` module in
legacy Google App Engine runtime.
"""
__all__ = [
"Error",
"ContextError",
"BadValueError",
"BadArgumentError",
"Rollback",
"BadFilterError",
]
class Error(Exception):
"""Base datastore error type."""
class ContextError(Error):
"""Indicates an NDB call being made without a context.
Raised whenever an NDB call is made outside of a context
established by :meth:`google.cloud.ndb.client.Client.context`.
"""
def __init__(self):
super(ContextError, self).__init__(
"No current context. NDB calls must be made in context "
"established by google.cloud.ndb.Client.context."
)
class BadValueError(Error):
"""Indicates a property value or filter value is invalid.
Raised by ``Entity.__setitem__()``, ``Query.__setitem__()``, ``Get()``,
and others.
"""
class BadArgumentError(Error):
"""Indicates an invalid argument was passed.
Raised by ``Query.Order()``, ``Iterator.Next()``, and others.
"""
class Rollback(Error):
"""Allows a transaction to be rolled back instead of committed.
Note that *any* exception raised by a transaction function will cause a
rollback. Hence, this exception type is purely for convenience.
"""
class BadQueryError(Error):
"""Raised by Query when a query or query string is invalid."""
class BadFilterError(Error):
"""Indicates a filter value is invalid.
Raised by ``Query.__setitem__()`` and ``Query.Run()`` when a filter string
is invalid.
"""
def __init__(self, filter):
self.filter = filter
message = "invalid filter: {}.".format(self.filter).encode("utf-8")
super(BadFilterError, self).__init__(message)
class NoLongerImplementedError(NotImplementedError):
"""Indicates a legacy function that is intentionally left unimplemented.
In the vast majority of cases, this should only be raised by classes,
functions, or methods that were only been used internally in legacy NDB and
are no longer necessary because of refactoring. Legacy NDB did a poor job
of distinguishing between internal and public API. Where we have determined
that something is probably not a part of the public API, we've removed it
in order to keep the supported API as clean as possible. It's possible that
in some cases we've guessed wrong. Get in touch with the NDB development
team if you think this is the case.
"""
def __init__(self):
super(NoLongerImplementedError, self).__init__("No longer implemented")
| 31.603774 | 79 | 0.707164 | 449 | 3,350 | 5.184855 | 0.461025 | 0.025773 | 0.014175 | 0.013746 | 0.024055 | 0.024055 | 0 | 0 | 0 | 0 | 0 | 0.003377 | 0.204478 | 3,350 | 105 | 80 | 31.904762 | 0.870169 | 0.668657 | 0 | 0.074074 | 0 | 0 | 0.224554 | 0.033578 | 0 | 0 | 0 | 0 | 0 | 1 | 0.111111 | false | 0 | 0 | 0 | 0.407407 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
6448a7050ade05bceafdf46782f6495e4df28414 | 5,074 | py | Python | sourcecode.py | hansalemaos/PDFImage2TXT | cb85c8d3496595128c2985f028dc001c525611c1 | [
"Apache-2.0",
"MIT"
] | 3 | 2021-12-08T22:46:39.000Z | 2022-02-22T02:51:00.000Z | sourcecode.py | hansalemaos/PDFImage2TXT | cb85c8d3496595128c2985f028dc001c525611c1 | [
"Apache-2.0",
"MIT"
] | null | null | null | sourcecode.py | hansalemaos/PDFImage2TXT | cb85c8d3496595128c2985f028dc001c525611c1 | [
"Apache-2.0",
"MIT"
] | 1 | 2021-12-18T02:38:43.000Z | 2021-12-18T02:38:43.000Z | # coding: utf-8 -*-
import pathlib
from tkinter.filedialog import askopenfilename
from tkinter import Tk
import os
import re
import textwrap
import easyocr
import string
reader = easyocr.Reader(['de','en', 'pt'])
erlaubtezeichen = string.ascii_letters+string.digits
from pdf2jpg import pdf2jpg
wrapper = textwrap.TextWrapper(width=70)
def datei_auswaehlen(message='Please select Image or scanned PDF'):
content = ''
Tk().withdraw()
filetypes = [
('PDF Files', '.pdf'), ('Images', '.jpg .png .bmp .jpeg .tif .tiff')
]
datei = askopenfilename(title=message, filetypes=filetypes)
pathlibpfad = pathlib.Path(datei)
return pathlibpfad.suffix, str(pathlibpfad)
def getListOfFiles(dirName):
listOfFile = os.listdir(dirName)
allFiles = list()
for entry in listOfFile:
fullPath = os.path.join(dirName, entry)
if os.path.isdir(fullPath):
allFiles = allFiles + getListOfFiles(fullPath)
else:
allFiles.append(fullPath)
return allFiles
def pdf_auslesen(inputpath, outputpath, pages='ALL'):
result = pdf2jpg.convert_pdf2jpg(inputpath, outputpath, dpi=300, pages=pages)
return result
def bildverarbeiten(suffix, inputpath):
textdatei = inputpath.replace(suffix, '.txt')
ganzertext = []
result2 = reader.readtext(inputpath)
for resu in result2:
anhangen = resu[-2]
anhangen = anhangen.strip()
bindestrichamende = re.findall(r'\s*[-–]+\s*$', anhangen)
if not any(bindestrichamende):
anhangen = anhangen + ' '
if any(bindestrichamende):
anhangen = re.sub(r'\s*[-–]+\s*$', '', anhangen)
ganzertext.append(anhangen)
fertigertext = ''.join(ganzertext).strip()
var = '\n'.join(wrapper.wrap(fertigertext))
print(f'{var}\n\nsaved to: {textdatei}\n')
with open(textdatei, encoding='utf-8', mode='w') as f:
f.write(fertigertext)
def pdf_datei_verarbeiten(inputpath, pagenumbers):
outputpath = re.sub(r'\\[^\\]+$', '', inputpath)
pdf_auslesen(inputpath, outputpath, pages=pagenumbers)
outputdurchsuchen = outputpath + '\\' + inputpath.split('\\')[-1].replace('.pdf', '.pdf_dir')
nachumbenennung = outputdurchsuchen.split('\\')[-1]
neuerordnername = re.sub(fr'[^{erlaubtezeichen}]+', '_', nachumbenennung).strip('_')
neuerordnernameganz = outputpath + '\\' + neuerordnername
os.rename(outputdurchsuchen, neuerordnernameganz)
allekonvertiertendateien = getListOfFiles(neuerordnernameganz)
allekonvertiertendateien = [a for a in allekonvertiertendateien if any(re.findall(r'\.jpg$', a))]
neuedateinamen = []
for konvda in allekonvertiertendateien:
originaldateiname = konvda
ersterteil = re.sub(r'\\[^\\]+$', '', konvda)
konvda = re.findall(r'\\([^\\]+)$', konvda)[0]
konvda = re.sub(r'\.jpg$', '', konvda)
konvda = re.sub(fr'[^{erlaubtezeichen}]+', '_', konvda).strip('_')
konvda = ersterteil + '\\' + konvda + '.jpg'
os.rename(originaldateiname, konvda)
neuedateinamen.append(konvda)
fuerrtfdatei = ''
endatei = ''
for seitenzahl, einzelneseite in enumerate(neuedateinamen):
textdatei = re.sub(r'\.jpg\s*$', '.txt', einzelneseite)
result2 = reader.readtext(einzelneseite)
ganzertext = []
for resu in result2:
anhangen = resu[-2]
anhangen = anhangen.strip()
bindestrichamende = re.findall(r'\s*[-–]+\s*$', anhangen)
if not any(bindestrichamende):
anhangen = anhangen + ' '
if any(bindestrichamende):
anhangen = re.sub(r'\s*[-–]+\s*$', '', anhangen)
ganzertext.append(anhangen)
fertigertext = ''.join(ganzertext).strip()
var = '\n'.join(wrapper.wrap(fertigertext))
print(f'{var}\n\nsaved to: {textdatei}\n')
fuerrtfdatei = fuerrtfdatei + f'''Page {seitenzahl + 1}\n{fertigertext}\n\n-------------------------------------------------------------------------\n\n'''
endatei = textdatei
with open(textdatei, encoding='utf-8', mode='w') as f:
f.write(fertigertext)
endatei = re.sub(r'(\\)(\d+_*)(.*)\.txt$', '\g<1>complete_\g<3>.txt', endatei)
# print(endatei)
with open(endatei, mode='w', encoding='utf-8') as f:
f.write(fuerrtfdatei)
print(1000 * '\n')
print('Image / PDF to TXT written by Johannes Fischer www.queroestudaralemao.com.br')
print('Thanks to\nhttps://github.com/JaidedAI/EasyOCR\nhttps://github.com/pankajr141/pdf2jpg\n\nfor 99% of the work')
suffix, inputpath = datei_auswaehlen()
if suffix == '.pdf':
pagenumbers = str(input('Page numbers (separated by comma), "ALL" for whole document'))
pagenumbers_zahlen = re.findall(r'\d+', pagenumbers)
if any(pagenumbers_zahlen):
pagenumbers = ','.join(pagenumbers_zahlen).strip(',')
elif not any(pagenumbers_zahlen):
pagenumbers = 'ALL'
pdf_datei_verarbeiten(inputpath, pagenumbers)
elif suffix != '.pdf':
bildverarbeiten(suffix, inputpath)
| 40.269841 | 163 | 0.631257 | 542 | 5,074 | 5.878229 | 0.309963 | 0.014124 | 0.013183 | 0.005022 | 0.263026 | 0.216573 | 0.216573 | 0.216573 | 0.216573 | 0.216573 | 0 | 0.008644 | 0.20201 | 5,074 | 125 | 164 | 40.592 | 0.777229 | 0.006307 | 0 | 0.27027 | 0 | 0.009009 | 0.15522 | 0.042477 | 0 | 0 | 0 | 0 | 0 | 1 | 0.045045 | false | 0 | 0.081081 | 0 | 0.153153 | 0.045045 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
64495a18dcefc4b3378d128ce25affb7b2d24c94 | 8,371 | py | Python | arspb/policies.py | jadenvc/ARS | f4082b3a71b1f8255e8d74ebfe4aa4bda668145f | [
"BSD-2-Clause"
] | null | null | null | arspb/policies.py | jadenvc/ARS | f4082b3a71b1f8255e8d74ebfe4aa4bda668145f | [
"BSD-2-Clause"
] | null | null | null | arspb/policies.py | jadenvc/ARS | f4082b3a71b1f8255e8d74ebfe4aa4bda668145f | [
"BSD-2-Clause"
] | null | null | null | # Lint as: python3
'''
Policy class for computing action from weights and observation vector.
Horia Mania --- hmania@berkeley.edu
Aurelia Guy
Benjamin Recht
'''
"""Policy class for computing action from weights and observation vector.
It is a modified policy class from third_party/py/ARS/code/policies.py.
"""
import arspb.filter as ars_filter
import numpy as np
from six.moves import range
import arspb.env_utils
class Policy(object):
"""A policy class in reinforcement learning."""
def __init__(self, policy_params, update_filter=True):
"""Initializes the policy.
Args:
policy_params: The parameters of a policy, which includes dimensionality
of the observations, actions, the bounds of the actions, the parameters
of the internal observation filter and the weights of the policy.
update_filter: Whether to update the internal filters when the policy is
used. This filter is used to normalize different observations into a
similar range, which ease the learning.
"""
self.ob_dim = policy_params["ob_dim"]
self.ac_dim = policy_params["ac_dim"]
self.action_low = policy_params["action_lower_bound"]
self.action_high = policy_params["action_upper_bound"]
self.weights = np.empty(0)
# A filter for updating statistics of the observations and normalizing
# inputs to the policies
self.observation_filter = ars_filter.ars_filter(
policy_params["ob_filter"], shape=self.ob_dim)
self.update_filter = update_filter
def update_weights(self, new_weights):
self.weights[:] = new_weights[:]
return
def get_weights(self):
return self.weights
def get_observation_filter(self):
return self.observation_filter
def get_weights_plus_stats(self):
mu, std = self.observation_filter.get_stats()
aux = np.asarray([self.weights, mu, std])
return aux
def reset(self):
pass
def act(self, ob):
raise NotImplementedError
def copy(self):
raise NotImplementedError
class LinearPolicy(Policy):
"""Linear policy class that computes action as <w, ob>."""
def __init__(self, policy_params, update_filter=True):
"""Initializes the linear policy. See the base class for more details."""
Policy.__init__(self, policy_params, update_filter=update_filter)
if isinstance(self.ob_dim, dict):
self.ob_dim = sum(self.ob_dim.values())
self.weights = np.zeros(self.ac_dim * self.ob_dim, dtype=np.float64)
if "weights" in policy_params:
self.update_weights(policy_params["weights"])
mean = policy_params.get("observation_filter_mean", None)
std = policy_params.get("observation_filter_std", None)
if policy_params["ob_filter"]=="MeanStdFilter" and update_filter==False:
self.observation_filter.mean = mean
self.observation_filter.std = std
def act(self, ob):
"""Maps the observation to action.
Args:
ob: The observations in reinforcement learning.
Returns:
actions: The actions in reinforcement learning.
"""
ob = self.observation_filter(ob, update=self.update_filter)
if isinstance(ob, dict):
ob = env_utils.flatten_observations(ob)
matrix_weights = np.reshape(self.weights, (self.ac_dim, self.ob_dim))
normalized_actions = np.clip(np.dot(matrix_weights, ob), -1.0, 1.0)
actions = (
normalized_actions * (self.action_high - self.action_low) / 2.0 +
(self.action_low + self.action_high) / 2.0)
return actions
#with bias
class LinearPolicy2(Policy):
"""Linear policy class that computes action as <w, ob>+bias."""
def __init__(self, policy_params, update_filter=True):
"""Initializes the linear policy. See the base class for more details."""
Policy.__init__(self, policy_params, update_filter=update_filter)
if isinstance(self.ob_dim, dict):
self.ob_dim = sum(self.ob_dim.values())
self.weights = np.zeros(self.ac_dim * self.ob_dim+self.ac_dim, dtype=np.float64)
if "weights" in policy_params:
self.update_weights(policy_params["weights"])
mean = policy_params.get("observation_filter_mean", None)
std = policy_params.get("observation_filter_std", None)
if policy_params["ob_filter"]=="MeanStdFilter" and update_filter==False:
self.observation_filter.mean = mean
self.observation_filter.std = std
def act(self, ob):
"""Maps the observation to action.
Args:
ob: The observations in reinforcement learning.
Returns:
actions: The actions in reinforcement learning.
"""
ob = self.observation_filter(ob, update=self.update_filter)
if isinstance(ob, dict):
ob = env_utils.flatten_observations(ob)
num_weights = self.ac_dim*self.ob_dim
matrix_weights = np.reshape(self.weights[:num_weights], (self.ac_dim, self.ob_dim))
bias_weights = self.weights[num_weights:]
normalized_actions = np.clip(np.dot(matrix_weights, ob)+bias_weights, -1.0, 1.0)
actions = (
normalized_actions * (self.action_high - self.action_low) / 2.0 +
(self.action_low + self.action_high) / 2.0)
return actions
class FullyConnectedNeuralNetworkPolicy(Policy):
"""Feed-forward fully connected neural network policy."""
def __init__(self, policy_params, update_filter=True):
"""Initializes the linear policy. See the base class for more details."""
Policy.__init__(self, policy_params, update_filter=update_filter)
if isinstance(self.ob_dim, dict):
self.ob_dim = sum(self.ob_dim.values())
if "policy_network_size" in policy_params:
self._hidden_layer_sizes = policy_params["policy_network_size"]
else:
self._hidden_layer_sizes = []
layer_id = 0
key = f"hidden_layer_size{layer_id}"
while key in policy_params and policy_params[key] > 0:
self._hidden_layer_sizes.append(policy_params[key])
layer_id += 1
key = f"hidden_layer_size{layer_id}"
self._activation = policy_params.get("activation", "tanh")
if self._activation == "tanh":
self._activation = np.tanh
elif self._activation == "clip":
self._activation = lambda x: np.clip(x, -1.0, 1.0)
self._layer_sizes = [self.ob_dim]
self._layer_sizes.extend(self._hidden_layer_sizes)
self._layer_sizes.append(self.ac_dim)
self._layer_weight_start_idx = []
self._layer_weight_end_idx = []
num_weights = 0
num_layers = len(self._layer_sizes)
for ith_layer in range(num_layers - 1):
self._layer_weight_start_idx.append(num_weights)
num_weights += (
self._layer_sizes[ith_layer] * self._layer_sizes[ith_layer + 1])
self._layer_weight_end_idx.append(num_weights)
self.weights = np.zeros(num_weights, dtype=np.float64)
#print("policy params", policy_params)
if "weights" in policy_params:
print("WE FOUND WEIGHTS !!!!!!!!!!!!!!")
self.update_weights(policy_params["weights"])
mean = policy_params.get("observation_filter_mean", None)
std = policy_params.get("observation_filter_std", None)
n = policy_params.get("init_timesteps", None)
if policy_params["ob_filter"]=="MeanStdFilter" and update_filter==False:
self.observation_filter.mean = mean
self.observation_filter.std = std
def act(self, ob):
"""Maps the observation to action.
Args:
ob: The observations in reinforcement learning.
Returns:
actions: The actions in reinforcement learning.
"""
# print("before action", ob)
ob = self.observation_filter(ob, update=self.update_filter)
# print("before action 2", ob)
if isinstance(ob, dict):
ob = env_utils.flatten_observations(ob)
ith_layer_result = ob
num_layers = len(self._layer_sizes)
for ith_layer in range(num_layers - 1):
mat_weight = np.reshape(
self.weights[self._layer_weight_start_idx[ith_layer]:
self._layer_weight_end_idx[ith_layer]],
(self._layer_sizes[ith_layer + 1], self._layer_sizes[ith_layer]))
ith_layer_result = np.dot(mat_weight, ith_layer_result)
ith_layer_result = self._activation(ith_layer_result)
normalized_actions = ith_layer_result
actions = (
normalized_actions * (self.action_high - self.action_low) / 2.0 +
(self.action_low + self.action_high) / 2.0)
return actions
| 35.470339 | 87 | 0.699797 | 1,142 | 8,371 | 4.867776 | 0.155867 | 0.07987 | 0.027523 | 0.025184 | 0.604425 | 0.56467 | 0.554056 | 0.540205 | 0.530131 | 0.506206 | 0 | 0.006237 | 0.195556 | 8,371 | 235 | 88 | 35.621277 | 0.819275 | 0.196392 | 0 | 0.482014 | 0 | 0 | 0.071596 | 0.029481 | 0 | 0 | 0 | 0 | 0 | 1 | 0.100719 | false | 0.007194 | 0.028777 | 0.014388 | 0.208633 | 0.007194 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
644c0eea66543b024562d743d76998786e9627ec | 1,657 | py | Python | tests/operators/test_dbt_operator.py | jennybrown8/airflow-dbt | bd63ae8317770dfb490ae32548ce0e96834c7b3c | [
"MIT"
] | 2 | 2020-08-12T20:01:35.000Z | 2020-08-12T20:01:38.000Z | tests/operators/test_dbt_operator.py | jennybrown8/airflow-dbt | bd63ae8317770dfb490ae32548ce0e96834c7b3c | [
"MIT"
] | 1 | 2021-04-22T23:52:00.000Z | 2021-04-22T23:52:00.000Z | tests/operators/test_dbt_operator.py | jennybrown8/airflow-dbt | bd63ae8317770dfb490ae32548ce0e96834c7b3c | [
"MIT"
] | 1 | 2021-03-12T20:51:54.000Z | 2021-03-12T20:51:54.000Z | import datetime
from unittest import TestCase, mock
from airflow import DAG, configuration
from airflow_dbt.hooks.dbt_hook import DbtCliHook
from airflow_dbt.operators.dbt_operator import (
DbtSeedOperator,
DbtSnapshotOperator,
DbtRunOperator,
DbtTestOperator
)
class TestDbtOperator(TestCase):
def setUp(self):
configuration.conf.load_test_config()
args = {
'owner': 'airflow',
'start_date': datetime.datetime(2020, 2, 27)
}
self.dag = DAG('test_dag_id', default_args=args)
@mock.patch.object(DbtCliHook, 'run_cli')
def test_dbt_run(self, mock_run_cli):
operator = DbtRunOperator(
task_id='run',
dag=self.dag
)
operator.execute(None)
mock_run_cli.assert_called_once_with('run')
@mock.patch.object(DbtCliHook, 'run_cli')
def test_dbt_test(self, mock_run_cli):
operator = DbtTestOperator(
task_id='test',
dag=self.dag
)
operator.execute(None)
mock_run_cli.assert_called_once_with('test')
@mock.patch.object(DbtCliHook, 'run_cli')
def test_dbt_snapshot(self, mock_run_cli):
operator = DbtSnapshotOperator(
task_id='snapshot',
dag=self.dag
)
operator.execute(None)
mock_run_cli.assert_called_once_with('snapshot')
@mock.patch.object(DbtCliHook, 'run_cli')
def test_dbt_seed(self, mock_run_cli):
operator = DbtSeedOperator(
task_id='seed',
dag=self.dag
)
operator.execute(None)
mock_run_cli.assert_called_once_with('seed')
| 29.070175 | 56 | 0.640314 | 193 | 1,657 | 5.217617 | 0.259067 | 0.0715 | 0.079444 | 0.099305 | 0.484608 | 0.397219 | 0.397219 | 0.397219 | 0.397219 | 0.234359 | 0 | 0.005724 | 0.261919 | 1,657 | 56 | 57 | 29.589286 | 0.817661 | 0 | 0 | 0.24 | 0 | 0 | 0.059747 | 0 | 0 | 0 | 0 | 0 | 0.08 | 1 | 0.1 | false | 0 | 0.1 | 0 | 0.22 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
644d64204bad398a95c41f4ac02a80097e324fcd | 2,966 | py | Python | tasks/NamedEntityRecognition.py | cmathx/transformers-tutorial | 1d27100eda528c58952ff96935effa5fbf784422 | [
"MIT"
] | 1 | 2021-01-19T00:45:36.000Z | 2021-01-19T00:45:36.000Z | tasks/NamedEntityRecognition.py | cmathx/transformers-tutorial | 1d27100eda528c58952ff96935effa5fbf784422 | [
"MIT"
] | null | null | null | tasks/NamedEntityRecognition.py | cmathx/transformers-tutorial | 1d27100eda528c58952ff96935effa5fbf784422 | [
"MIT"
] | null | null | null | from transformers import pipeline
nlp = pipeline("ner")
sequence = """Hugging Face Inc. is a company based in New York City. Its headquarters are in DUMBO, therefore very
close to the Manhattan Bridge which is visible from the window."""
print(nlp(sequence))
#[
# {'word': 'Hu', 'score': 0.9995632767677307, 'entity': 'I-ORG'},
# {'word': '##gging', 'score': 0.9915938973426819, 'entity': 'I-ORG'},
# {'word': 'Face', 'score': 0.9982671737670898, 'entity': 'I-ORG'},
# {'word': 'Inc', 'score': 0.9994403719902039, 'entity': 'I-ORG'},
# {'word': 'New', 'score': 0.9994346499443054, 'entity': 'I-LOC'},
# {'word': 'York', 'score': 0.9993270635604858, 'entity': 'I-LOC'},
# {'word': 'City', 'score': 0.9993864893913269, 'entity': 'I-LOC'},
# {'word': 'D', 'score': 0.9825621843338013, 'entity': 'I-LOC'},
# {'word': '##UM', 'score': 0.936983048915863, 'entity': 'I-LOC'},
# {'word': '##BO', 'score': 0.8987102508544922, 'entity': 'I-LOC'},
# {'word': 'Manhattan', 'score': 0.9758241176605225, 'entity': 'I-LOC'},
# {'word': 'Bridge', 'score': 0.990249514579773, 'entity': 'I-LOC'}
#]
from transformers import AutoModelForTokenClassification, AutoTokenizer
import torch
model = AutoModelForTokenClassification.from_pretrained("dbmdz/bert-large-cased-finetuned-conll03-english")
tokenizer = AutoTokenizer.from_pretrained("bert-base-cased")
label_list = [
"O", # Outside of a named entity
"B-MISC", # Beginning of a miscellaneous entity right after another miscellaneous entity
"I-MISC", # Miscellaneous entity
"B-PER", # Beginning of a person's name right after another person's name
"I-PER", # Person's name
"B-ORG", # Beginning of an organisation right after another organisation
"I-ORG", # Organisation
"B-LOC", # Beginning of a location right after another location
"I-LOC" # Location
]
sequence = "Hugging Face Inc. is a company based in New York City. Its headquarters are in DUMBO, therefore very" \
"close to the Manhattan Bridge."
# Bit of a hack to get the tokens with the special tokens
tokens = tokenizer.tokenize(tokenizer.decode(tokenizer.encode(sequence)))
inputs = tokenizer.encode(sequence, return_tensors="pt")
outputs = model(inputs).logits
predictions = torch.argmax(outputs, dim=2)
print([(token, label_list[prediction]) for token, prediction in zip(tokens, predictions[0].numpy())])
#[('[CLS]', 'O'), ('Hu', 'I-ORG'), ('##gging', 'I-ORG'), ('Face', 'I-ORG'), ('Inc', 'I-ORG'), ('.', 'O'), ('is', 'O'), ('a', 'O'), ('company', 'O'), ('based', 'O'), ('in', 'O'), ('New', 'I-LOC'), ('York', 'I-LOC'), ('City', 'I-LOC'), ('.', 'O'), ('Its', 'O'), ('headquarters', 'O'), ('are', 'O'), ('in', 'O'), ('D', 'I-LOC'), ('##UM', 'I-LOC'), ('##BO', 'I-LOC'), (',', 'O'), ('therefore', 'O'), ('very', 'O'), ('##c', 'O'), ('##lose', 'O'), ('to', 'O'), ('the', 'O'), ('Manhattan', 'I-LOC'), ('Bridge', 'I-LOC'), ('.', 'O'), ('[SEP]', 'O')] | 57.038462 | 541 | 0.608227 | 379 | 2,966 | 4.746702 | 0.313984 | 0.037799 | 0.044469 | 0.054475 | 0.125625 | 0.125625 | 0.125625 | 0.125625 | 0.125625 | 0.125625 | 0 | 0.082137 | 0.154417 | 2,966 | 52 | 541 | 57.038462 | 0.635167 | 0.589346 | 0 | 0 | 0 | 0.074074 | 0.35399 | 0.040747 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.111111 | 0 | 0.111111 | 0.074074 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
644f0bed868677fe86feb9405b9bdbfa79b1d42d | 2,723 | py | Python | poet_distributed/niches/minigrid/minigrid.py | gdicker1/poet | 388a239d957e719eff1e774f5a8587496ca15474 | [
"Apache-2.0"
] | null | null | null | poet_distributed/niches/minigrid/minigrid.py | gdicker1/poet | 388a239d957e719eff1e774f5a8587496ca15474 | [
"Apache-2.0"
] | null | null | null | poet_distributed/niches/minigrid/minigrid.py | gdicker1/poet | 388a239d957e719eff1e774f5a8587496ca15474 | [
"Apache-2.0"
] | null | null | null | from ..core import Niche
from .model import Model, simulate
from .env import minigridhard_custom, Env_config
from collections import OrderedDict
DEFAULT_ENV = Env_config(
name='default_env',
lava_prob=[0., 0.1],
obstacle_lvl=[0., 1.],
box_to_ball_prob=[0., 0.3],
door_prob=[0., 0.3],
wall_prob=[0., 0.3])
class MiniGridNiche(Niche):
def __init__(self, env_configs, seed, init='random', stochastic=False):
self.model = Model(minigridhard_custom)
if not isinstance(env_configs, list):
env_configs = [env_configs]
self.env_configs = OrderedDict()
for env in env_configs:
self.env_configs[env.name] = env
self.seed = seed
self.stochastic = stochastic
self.model.make_env(seed=seed, env_config=DEFAULT_ENV)
self.init = init
def __getstate__(self):
return {"env_configs": self.env_configs,
"seed": self.seed,
"stochastic": self.stochastic,
"init": self.init,
}
def __setstate__(self, state):
self.model = Model(minigridhard_custom)
self.env_configs = state["env_configs"]
self.seed = state["seed"]
self.stochastic = state["stochastic"]
self.model.make_env(seed=self.seed, env_config=DEFAULT_ENV)
self.init = state["init"]
def add_env(self, env):
env_name = env.name
assert env_name not in self.env_configs.keys()
self.env_configs[env_name] = env
def delete_env(self, env_name):
assert env_name in self.env_configs.keys()
self.env_configs.pop(env_name)
def initial_theta(self):
if self.init == 'random':
return self.model.get_random_model_params()
elif self.init == 'zeros':
import numpy as np
return np.zeros(self.model.param_count)
else:
raise NotImplementedError(
'Undefined initialization scheme `{}`'.format(self.init))
def rollout(self, theta, random_state, eval=False):
self.model.set_model_params(theta)
total_returns = 0
total_length = 0
if self.stochastic:
seed = random_state.randint(1000000)
else:
seed = self.seed
#print('self.env_configs.values()', self.env_configs.values())
for env_config in self.env_configs.values():
returns, lengths = simulate(
self.model, seed=seed, train_mode=not eval, num_episode=1, env_config_this_sim=env_config)
total_returns += returns[0]
total_length += lengths[0]
return total_returns / len(self.env_configs), total_length
| 35.363636 | 106 | 0.614763 | 340 | 2,723 | 4.685294 | 0.258824 | 0.119272 | 0.11425 | 0.013183 | 0.24231 | 0.140615 | 0.081607 | 0.042687 | 0 | 0 | 0 | 0.013238 | 0.278737 | 2,723 | 76 | 107 | 35.828947 | 0.797862 | 0.022402 | 0 | 0.060606 | 0 | 0 | 0.045847 | 0 | 0 | 0 | 0 | 0 | 0.030303 | 1 | 0.106061 | false | 0 | 0.075758 | 0.015152 | 0.257576 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
6451a5afeb68d14a7e51707380bfe62ee1b2d8cf | 697 | py | Python | data/addresses.py | katwyp/python_training | 0dc4b8ed146ff6322864039a9e77fbd5cd62ef85 | [
"Apache-2.0"
] | null | null | null | data/addresses.py | katwyp/python_training | 0dc4b8ed146ff6322864039a9e77fbd5cd62ef85 | [
"Apache-2.0"
] | null | null | null | data/addresses.py | katwyp/python_training | 0dc4b8ed146ff6322864039a9e77fbd5cd62ef85 | [
"Apache-2.0"
] | null | null | null | from model.address import Address
testdata = [
Address(firstname="", lastname="", address="", homephone="", mobile="", workphone="", secondaryphone="",
email="", email2="", email3=""),
Address(firstname="address1", lastname="address1", address="address1",
homephone="address1", mobile="address1", workphone="address1", secondaryphone="address1",
email="address1", email2="address1", email3="address1"),
Address(firstname="address2", lastname="address2", address="address2",
homephone="address2", mobile="address2", workphone="address2", secondaryphone="address2",
email="address2", email2="address2", email3="address2"),
]
| 49.785714 | 108 | 0.652798 | 59 | 697 | 7.711864 | 0.271186 | 0.105495 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.044444 | 0.160689 | 697 | 13 | 109 | 53.615385 | 0.733333 | 0 | 0 | 0 | 0 | 0 | 0.229555 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.090909 | 0 | 0.090909 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
6454be3091761768943878624a7f7adabf1b301c | 7,176 | py | Python | converters/usfm2html_converter.py | unfoldingWord-dev/tx-job-handler | 5364ed079bbd5b6528eeb6d12f2ca5c696e84f4f | [
"MIT"
] | 1 | 2020-11-25T04:07:37.000Z | 2020-11-25T04:07:37.000Z | converters/usfm2html_converter.py | unfoldingWord-dev/tx-job-handler | 5364ed079bbd5b6528eeb6d12f2ca5c696e84f4f | [
"MIT"
] | 52 | 2018-10-25T05:49:30.000Z | 2022-03-16T22:31:57.000Z | converters/usfm2html_converter.py | unfoldingWord-dev/tx-job-handler | 5364ed079bbd5b6528eeb6d12f2ca5c696e84f4f | [
"MIT"
] | null | null | null | import os
import tempfile
import string
from bs4 import BeautifulSoup
from shutil import copyfile
from rq_settings import prefix, debug_mode_flag
from app_settings.app_settings import AppSettings
from general_tools.file_utils import write_file, remove_tree, get_files
from converters.converter import Converter
from tx_usfm_tools.transform import UsfmTransform
class Usfm2HtmlConverter(Converter):
def convert(self):
AppSettings.logger.debug("Processing the Bible USFM files …")
# Find the first directory that has usfm files.
files = get_files(directory=self.files_dir, exclude=self.EXCLUDED_FILES)
# convert_only_list = self.check_for_exclusive_convert()
convert_only_list = [] # Not totally sure what the above line did
current_dir = os.path.dirname(os.path.realpath(__file__))
with open(os.path.join(current_dir, 'templates', 'template.html')) as template_file:
# Simple HTML template which includes $title and $content fields
template_html = string.Template(template_file.read())
template_html = template_html.safe_substitute(lang=self.manifest_dict['dublin_core']['language']['identifier'])
# Convert usfm files and copy across other files
num_successful_books = num_failed_books = 0
for filename in sorted(files):
if filename.endswith('.usfm'):
base_name = os.path.basename(filename)
if convert_only_list and (base_name not in convert_only_list): # see if this is a file we are to convert
continue
# Convert the USFM file
self.log.info(f"Converting Bible USFM file: {base_name} …") # Logger also issues DEBUG msg
# Copy just the single file to be converted into a single scratch folder
scratch_dir = tempfile.mkdtemp(prefix='tX_convert_usfm_scratch_')
delete_scratch_dir_flag = True # Set to False for debugging this code
copyfile(filename, os.path.join(scratch_dir, os.path.basename(filename)))
filebase = os.path.splitext(os.path.basename(filename))[0]
# Do the actual USFM -> HTML conversion
warning_list = UsfmTransform.buildSingleHtml(scratch_dir, scratch_dir, filebase)
if warning_list:
for warning_msg in warning_list:
self.log.warning(f"{filebase} - {warning_msg}")
# This code seems to be cleaning up or adjusting the converted HTML file
html_filename = filebase + '.html'
with open(os.path.join(scratch_dir, html_filename), 'rt', encoding='utf-8') as html_file:
converted_html = html_file.read()
converted_html_length = len(converted_html)
# AppSettings.logger.debug(f"### Usfm2HtmlConverter got converted html of length {converted_html_length:,}")
# AppSettings.logger.debug(f"Got converted html: {converted_html[:500]}{' …' if len(converted_html)>500 else ''}")
if '</p></p></p>' in converted_html:
AppSettings.logger.debug(f"Usfm2HtmlConverter got multiple consecutive paragraph closures in converted {html_filename}")
# Now what are we doing with the converted html ???
template_soup = BeautifulSoup(template_html, 'html.parser')
template_soup.head.title.string = self.repo_subject
converted_soup = BeautifulSoup(converted_html, 'html.parser')
content_div = template_soup.find('div', id='content')
content_div.clear()
if converted_soup and converted_soup.body:
content_div.append(converted_soup.body)
content_div.body.unwrap()
num_successful_books += 1
else:
content_div.append("ERROR! NOT CONVERTED!")
self.log.warning(f"USFM parsing or conversion error for {base_name}")
AppSettings.logger.debug(f"Got converted html: {converted_html[:600]}{' …' if len(converted_html)>600 else ''}")
if not converted_soup:
AppSettings.logger.debug(f"No converted_soup")
elif not converted_soup.body:
AppSettings.logger.debug(f"No converted_soup.body")
# from bs4.diagnose import diagnose
# diagnose(converted_html)
num_failed_books += 1
output_filepath = os.path.join(self.output_dir, html_filename)
#print("template_soup type is", type(template_soup)) # <class 'bs4.BeautifulSoup'>
template_soup_string = str(template_soup)
write_file(output_filepath, template_soup_string)
template_soup_string_length = len(template_soup_string)
# AppSettings.logger.debug(f"### Usfm2HtmlConverter wrote souped-up html of length {template_soup_string_length:,} from {converted_html_length:,}")
if '</p></p></p>' in template_soup_string:
AppSettings.logger.warning(f"Usfm2HtmlConverter got multiple consecutive paragraph closures in {html_filename}")
if template_soup_string_length < converted_html_length * 0.67: # What is the 33% or so that's lost ???
AppSettings.logger.debug(f"### Usfm2HtmlConverter wrote souped-up html of length {template_soup_string_length:,} from {converted_html_length:,} = {template_soup_string_length*100.0/converted_html_length}%")
self.log.warning(f"Usfm2HtmlConverter possibly lost converted html for {html_filename}")
AppSettings.logger.info(f"Usfm2HtmlConverter {html_filename} was {converted_html_length:,} now {template_soup_string_length:,}")
# AppSettings.logger.debug(f"Usfm2HtmlConverter {html_filename} was: {converted_html}")
write_file(os.path.join(scratch_dir,filebase+'.converted.html'), template_soup_string)
if prefix and debug_mode_flag:
delete_scratch_dir_flag = False
#print("Got converted x2 html:", str(template_soup)[:500])
# self.log.info(f"Converted {os.path.basename(filename)} to {os.path.basename(html_filename)}.")
if delete_scratch_dir_flag:
remove_tree(scratch_dir)
else:
# Directly copy over files that are not USFM files
try:
output_filepath = os.path.join(self.output_dir, os.path.basename(filename))
if not os.path.exists(output_filepath):
copyfile(filename, output_filepath)
except:
pass
if num_failed_books and not num_successful_books:
self.log.error(f"Conversion of all books failed!")
self.log.info(f"Finished processing {num_successful_books} Bible USFM files.")
return True
# end of convert()
# end of Usfm2HtmlConverter class
| 61.862069 | 226 | 0.636148 | 845 | 7,176 | 5.20355 | 0.246154 | 0.073914 | 0.045031 | 0.047078 | 0.264499 | 0.178758 | 0.178758 | 0.140096 | 0.075051 | 0.051399 | 0 | 0.008312 | 0.279125 | 7,176 | 115 | 227 | 62.4 | 0.839358 | 0.206522 | 0 | 0.02381 | 0 | 0.011905 | 0.187456 | 0.046466 | 0 | 0 | 0 | 0 | 0 | 1 | 0.011905 | false | 0.011905 | 0.119048 | 0 | 0.154762 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
6456d6157ce3260e6b735493caaee1a68c6e74f6 | 11,764 | py | Python | xen-4.6.0/tools/python/xen/migration/legacy.py | StanPlatinum/VMI-as-a-Service | 5828a9c73815ad7e043428e7e56dc0715aaa60a1 | [
"MIT"
] | 3 | 2019-08-31T19:58:24.000Z | 2020-10-02T06:50:22.000Z | xen-4.6.0/tools/python/xen/migration/legacy.py | StanPlatinum/VMI-as-a-Service | 5828a9c73815ad7e043428e7e56dc0715aaa60a1 | [
"MIT"
] | 1 | 2020-10-16T19:13:49.000Z | 2020-10-16T19:13:49.000Z | xen-4.6.0/tools/python/xen/migration/legacy.py | StanPlatinum/ROP-detection-inside-VMs | 7b39298dd0791711cbd78fd0730b819b755cc995 | [
"MIT"
] | 1 | 2021-06-06T21:10:21.000Z | 2021-06-06T21:10:21.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Legacy migration stream information.
Documentation and record structures for legacy migration, for both libxc
and libxl.
"""
"""
Libxc:
SAVE/RESTORE/MIGRATE PROTOCOL
=============================
The general form of a stream of chunks is a header followed by a
body consisting of a variable number of chunks (terminated by a
chunk with type 0) followed by a trailer.
For a rolling/checkpoint (e.g. remus) migration then the body and
trailer phases can be repeated until an external event
(e.g. failure) causes the process to terminate and commit to the
most recent complete checkpoint.
HEADER
------
unsigned long : p2m_size
extended-info (PV-only, optional):
If first unsigned long == ~0UL then extended info is present,
otherwise unsigned long is part of p2m. Note that p2m_size above
does not include the length of the extended info.
extended-info:
unsigned long : signature == ~0UL
uint32_t : number of bytes remaining in extended-info
1 or more extended-info blocks of form:
char[4] : block identifier
uint32_t : block data size
bytes : block data
defined extended-info blocks:
"vcpu" : VCPU context info containing vcpu_guest_context_t.
The precise variant of the context structure
(e.g. 32 vs 64 bit) is distinguished by
the block size.
"extv" : Presence indicates use of extended VCPU context in
tail, data size is 0.
p2m (PV-only):
consists of p2m_size bytes comprising an array of xen_pfn_t sized entries.
BODY PHASE - Format A (for live migration or Remus without compression)
----------
A series of chunks with a common header:
int : chunk type
If the chunk type is +ve then chunk contains guest memory data, and the
type contains the number of pages in the batch:
unsigned long[] : PFN array, length == number of pages in batch
Each entry consists of XEN_DOMCTL_PFINFO_*
in bits 31-28 and the PFN number in bits 27-0.
page data : PAGE_SIZE bytes for each page marked present in PFN
array
If the chunk type is -ve then chunk consists of one of a number of
metadata types. See definitions of XC_SAVE_ID_* below.
If chunk type is 0 then body phase is complete.
BODY PHASE - Format B (for Remus with compression)
----------
A series of chunks with a common header:
int : chunk type
If the chunk type is +ve then chunk contains array of PFNs corresponding
to guest memory and type contains the number of PFNs in the batch:
unsigned long[] : PFN array, length == number of pages in batch
Each entry consists of XEN_DOMCTL_PFINFO_*
in bits 31-28 and the PFN number in bits 27-0.
If the chunk type is -ve then chunk consists of one of a number of
metadata types. See definitions of XC_SAVE_ID_* below.
If the chunk type is -ve and equals XC_SAVE_ID_COMPRESSED_DATA, then the
chunk consists of compressed page data, in the following format:
unsigned long : Size of the compressed chunk to follow
compressed data : variable length data of size indicated above.
This chunk consists of compressed page data.
The number of pages in one chunk depends on
the amount of space available in the sender's
output buffer.
Format of compressed data:
compressed_data = <deltas>*
delta = <marker, run*>
marker = (RUNFLAG|SKIPFLAG) bitwise-or RUNLEN [1 byte marker]
RUNFLAG = 0
SKIPFLAG = 1 << 7
RUNLEN = 7-bit unsigned value indicating number of WORDS in the run
run = string of bytes of length sizeof(WORD) * RUNLEN
If marker contains RUNFLAG, then RUNLEN * sizeof(WORD) bytes of data following
the marker is copied into the target page at the appropriate offset indicated by
the offset_ptr
If marker contains SKIPFLAG, then the offset_ptr is advanced
by RUNLEN * sizeof(WORD).
If chunk type is 0 then body phase is complete.
There can be one or more chunks with type XC_SAVE_ID_COMPRESSED_DATA,
containing compressed pages. The compressed chunks are collated to form
one single compressed chunk for the entire iteration. The number of pages
present in this final compressed chunk will be equal to the total number
of valid PFNs specified by the +ve chunks.
At the sender side, compressed pages are inserted into the output stream
in the same order as they would have been if compression logic was absent.
Until last iteration, the BODY is sent in Format A, to maintain live
migration compatibility with receivers of older Xen versions.
At the last iteration, if Remus compression was enabled, the sender sends
a trigger, XC_SAVE_ID_ENABLE_COMPRESSION to tell the receiver to parse the
BODY in Format B from the next iteration onwards.
An example sequence of chunks received in Format B:
+16 +ve chunk
unsigned long[16] PFN array
+100 +ve chunk
unsigned long[100] PFN array
+50 +ve chunk
unsigned long[50] PFN array
XC_SAVE_ID_COMPRESSED_DATA TAG
N Length of compressed data
N bytes of DATA Decompresses to 166 pages
XC_SAVE_ID_* other xc save chunks
0 END BODY TAG
Corner case with checkpoint compression:
At sender side, after pausing the domain, dirty pages are usually
copied out to a temporary buffer. After the domain is resumed,
compression is done and the compressed chunk(s) are sent, followed by
other XC_SAVE_ID_* chunks.
If the temporary buffer gets full while scanning for dirty pages,
the sender stops buffering of dirty pages, compresses the temporary
buffer and sends the compressed data with XC_SAVE_ID_COMPRESSED_DATA.
The sender then resumes the buffering of dirty pages and continues
scanning for the dirty pages.
For e.g., assume that the temporary buffer can hold 4096 pages and
there are 5000 dirty pages. The following is the sequence of chunks
that the receiver will see:
+1024 +ve chunk
unsigned long[1024] PFN array
+1024 +ve chunk
unsigned long[1024] PFN array
+1024 +ve chunk
unsigned long[1024] PFN array
+1024 +ve chunk
unsigned long[1024] PFN array
XC_SAVE_ID_COMPRESSED_DATA TAG
N Length of compressed data
N bytes of DATA Decompresses to 4096 pages
+4 +ve chunk
unsigned long[4] PFN array
XC_SAVE_ID_COMPRESSED_DATA TAG
M Length of compressed data
M bytes of DATA Decompresses to 4 pages
XC_SAVE_ID_* other xc save chunks
0 END BODY TAG
In other words, XC_SAVE_ID_COMPRESSED_DATA can be interleaved with
+ve chunks arbitrarily. But at the receiver end, the following condition
always holds true until the end of BODY PHASE:
num(PFN entries +ve chunks) >= num(pages received in compressed form)
TAIL PHASE
----------
Content differs for PV and HVM guests.
HVM TAIL:
"Magic" pages:
uint64_t : I/O req PFN
uint64_t : Buffered I/O req PFN
uint64_t : Store PFN
Xen HVM Context:
uint32_t : Length of context in bytes
bytes : Context data
Qemu context:
char[21] : Signature:
"QemuDeviceModelRecord" : Read Qemu save data until EOF
"DeviceModelRecord0002" : uint32_t length field followed by that many
bytes of Qemu save data
"RemusDeviceModelState" : Currently the same as "DeviceModelRecord0002".
PV TAIL:
Unmapped PFN list : list of all the PFNs that were not in map at the close
unsigned int : Number of unmapped pages
unsigned long[] : PFNs of unmapped pages
VCPU context data : A series of VCPU records, one per present VCPU
Maximum and present map supplied in XC_SAVE_ID_VCPUINFO
bytes: : VCPU context structure. Size is determined by size
provided in extended-info header
bytes[128] : Extended VCPU context (present IFF "extv" block
present in extended-info header)
Shared Info Page : 4096 bytes of shared info page
"""
CHUNK_end = 0
CHUNK_enable_verify_mode = -1
CHUNK_vcpu_info = -2
CHUNK_hvm_ident_pt = -3
CHUNK_hvm_vm86_tss = -4
CHUNK_tmem = -5
CHUNK_tmem_extra = -6
CHUNK_tsc_info = -7
CHUNK_hvm_console_pfn = -8
CHUNK_last_checkpoint = -9
CHUNK_hvm_acpi_ioports_location = -10
CHUNK_hvm_viridian = -11
CHUNK_compressed_data = -12
CHUNK_enable_compression = -13
CHUNK_hvm_generation_id_addr = -14
CHUNK_hvm_paging_ring_pfn = -15
CHUNK_hvm_monitor_ring_pfn = -16
CHUNK_hvm_sharing_ring_pfn = -17
CHUNK_toolstack = -18
CHUNK_hvm_ioreq_server_pfn = -19
CHUNK_hvm_nr_ioreq_server_pages = -20
chunk_type_to_str = {
CHUNK_end : "end",
CHUNK_enable_verify_mode : "enable_verify_mode",
CHUNK_vcpu_info : "vcpu_info",
CHUNK_hvm_ident_pt : "hvm_ident_pt",
CHUNK_hvm_vm86_tss : "hvm_vm86_tss",
CHUNK_tmem : "tmem",
CHUNK_tmem_extra : "tmem_extra",
CHUNK_tsc_info : "tsc_info",
CHUNK_hvm_console_pfn : "hvm_console_pfn",
CHUNK_last_checkpoint : "last_checkpoint",
CHUNK_hvm_acpi_ioports_location : "hvm_acpi_ioports_location",
CHUNK_hvm_viridian : "hvm_viridian",
CHUNK_compressed_data : "compressed_data",
CHUNK_enable_compression : "enable_compression",
CHUNK_hvm_generation_id_addr : "hvm_generation_id_addr",
CHUNK_hvm_paging_ring_pfn : "hvm_paging_ring_pfn",
CHUNK_hvm_monitor_ring_pfn : "hvm_monitor_ring_pfn",
CHUNK_hvm_sharing_ring_pfn : "hvm_sharing_ring_pfn",
CHUNK_toolstack : "toolstack",
CHUNK_hvm_ioreq_server_pfn : "hvm_ioreq_server_pfn",
CHUNK_hvm_nr_ioreq_server_pages : "hvm_nr_ioreq_server_pages",
}
# Up to 1024 pages (4MB) at a time
MAX_BATCH = 1024
# Maximum #VCPUs currently supported for save/restore
MAX_VCPU_ID = 4095
"""
Libxl:
Legacy "toolstack" record layout:
Version 1:
uint32_t version
QEMU physmap data:
uint32_t count
libxl__physmap_info * count
The problem is that libxl__physmap_info was declared as:
struct libxl__physmap_info {
uint64_t phys_offset;
uint64_t start_addr;
uint64_t size;
uint32_t namelen;
char name[];
};
Which has 4 bytes of padding at the end in a 64bit build, thus not the
same between 32 and 64bit builds.
Because of the pointer arithmatic used to construct the record, the 'name' was
shifted up to start at the padding, leaving the erronious 4 bytes at the end
of the name string, after the NUL terminator.
Instead, the information described here has been changed to fit in a new
EMULATOR_XENSTORE_DATA record made of NUL terminated strings.
"""
| 37.227848 | 82 | 0.650714 | 1,638 | 11,764 | 4.518926 | 0.250916 | 0.023777 | 0.015131 | 0.020535 | 0.235612 | 0.1709 | 0.148744 | 0.148744 | 0.144285 | 0.144285 | 0 | 0.025101 | 0.302363 | 11,764 | 315 | 83 | 37.346032 | 0.876812 | 0.021081 | 0 | 0 | 0 | 0 | 0.151338 | 0.035037 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
645788a314377b0b95dd2d2eb4ba83dae9fbf763 | 2,102 | py | Python | aoc2018/dec10/solve.py | robfalck/AoC2018 | 9cc6a94d11d70ea11df4999df2fdf955cc5c155a | [
"Apache-2.0"
] | null | null | null | aoc2018/dec10/solve.py | robfalck/AoC2018 | 9cc6a94d11d70ea11df4999df2fdf955cc5c155a | [
"Apache-2.0"
] | null | null | null | aoc2018/dec10/solve.py | robfalck/AoC2018 | 9cc6a94d11d70ea11df4999df2fdf955cc5c155a | [
"Apache-2.0"
] | null | null | null | from __future__ import print_function, division, absolute_import
import re
import numpy as np
re_posvel = re.compile(r'position=<(.+),(.+)> velocity=<(.+),(.+)>')
np.set_printoptions(linewidth=1024, edgeitems=1000)
def parse(s):
"""
Parse out the positions and velocities.
"""
match = (re_posvel.search(s))
px, py, vx, vy = [int(g) for g in match.groups()]
return px, py, vx, vy
def print_message(pos):
"""
Print the message by first populating a numpy.array of the pixels.
Then transpose it to fix the x-y axes.
Then loop through each row and col, printing each point in the array if appropriate.
"""
min_x = np.min(pos[:, 0])
min_y = np.min(pos[:, 1])
dx = int(np.max(pos[:, 0]) - min_x)
dy = int(np.max(pos[:, 1]) - min_y)
sky = np.zeros((dx + 1, dy + 1), dtype=int)
for x, y in pos:
sky[x - min_x, y - min_y] = 1
sky = sky.T
r, c = sky.shape
for i in range(r):
for j in range(c):
char = '#' if sky[i, j] == 1 else ' '
print(char, end='')
print()
def solve(data):
# Parse out the data.
pos = np.zeros((len(data), 2), dtype=int)
vel = np.zeros((len(data), 2), dtype=int)
for i, line in enumerate(data):
px_i, py_i, vx_i, vy_i = parse(line)
pos[i, :] = px_i, py_i
vel[i, :] = vx_i, vy_i
# When the variance in the y position of the points is minimized, we assume the message
# is ready. This isn't foolproof but it's probably good enough for our purposes.
var_prev = 1E23
for tick in range(1000000):
var = np.var(pos[:, 1])
if var > var_prev:
print('at {0} ticks'.format(tick - 1))
pos -= vel
print_message(pos.copy())
break
pos += vel
var_prev = var
if __name__ == '__main__':
with open('test_input.txt', 'r') as f:
lines = [s.strip() for s in f.readlines()]
solve(data=lines)
print()
with open('input.txt', 'r') as f:
lines = [s.strip() for s in f.readlines()]
solve(data=lines)
| 26.275 | 91 | 0.5647 | 335 | 2,102 | 3.432836 | 0.391045 | 0.005217 | 0.01913 | 0.013913 | 0.144348 | 0.132174 | 0.132174 | 0.092174 | 0.092174 | 0.092174 | 0 | 0.020722 | 0.288297 | 2,102 | 79 | 92 | 26.607595 | 0.747995 | 0.198382 | 0 | 0.122449 | 0 | 0 | 0.053528 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.061224 | false | 0 | 0.061224 | 0 | 0.142857 | 0.163265 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
645a955508585ae81c1238e16367225f63eb2ee3 | 9,638 | py | Python | fun/regexp.py | jburgy/blog | 60ecb19069916fe7718f0f90c946d2d6af836d3e | [
"Apache-2.0"
] | 1 | 2019-05-06T12:43:33.000Z | 2019-05-06T12:43:33.000Z | fun/regexp.py | jburgy/blog | 60ecb19069916fe7718f0f90c946d2d6af836d3e | [
"Apache-2.0"
] | null | null | null | fun/regexp.py | jburgy/blog | 60ecb19069916fe7718f0f90c946d2d6af836d3e | [
"Apache-2.0"
] | 1 | 2021-03-14T16:50:51.000Z | 2021-03-14T16:50:51.000Z | # -*-coding:utf8;-*-
""" `Regular Expression Search Algorithm`_
After playing with `Thompson's construction`_ in `C`_ and `x86`_, I decided to take
another look in python many years later. Tokenization and conversion to postfix are
lifted almost verbatim from the C implementation apart for the fact they rely on
python generators which are a very natural way to express single pass algorithms.
My original intent was to generate python bytecode. Unfortunately, Ken's original
paper relies on an `indirect branch`_ which python bytecode doesn't have. So I created
two implementations instead. The first one compiles the regular expression to an
implicit virtual machine. The virtual machine only understands 3 instructions which are
encoded by type since python lists are heterogeneous.
str
is an immediate matching instruction. The next character in the stream is compared
to the value of the instruction
tuple of ints
represent ε transitions. Ints correspond to offsets in the instruction list which
are added to the current search space. An offset past the end of the instruction
list means the search completed successfully
This implementation is very clever. The compiler maintains a stack of dangling offsets
and only mutates at most the last two. On the flip side, it's not exactly intuitive
unless you're a compiler expert.
The second implementation compiles the regular expression to a direct graph just like
python's standard `graphlib`_ module. Scanning a string with this graph is more
pythonic than the first implementation by relying on :code:`__getitem__`.
`Epsilon transitions`_ are represented by :code:`""` keys in (sub-)dictionaries that
have them. The docstring for :code:`Graph` illustrates this with examples.
Both implementations use the same 2 lists that Ken's NNODE and CNODE "functional
routines" manipulate. :code:`Instructions.__call__` and :code:`Graph.__call__` call
them simply :code:`c` for current and :code:`n` for next. :code:`c` will be on average
slightly larger in the first implementation since every :code:`|` appends to it. The
second implementation uses dictionaries to match the current character against all
possible branches at once. That difference is best understood by looking at how each
implementation compiles :code:`"a|b|c"`
.. _Regular Expression Search Algorithm: http://www.oilshell.org/archive/Thompson-1968.pdf # noqa E501
.. _Thompson's construction: https://en.wikipedia.org/wiki/Thompson%27s_construction
.. _C: https://swtch.com/~rsc/regexp/regexp-bytecode.c.txt
.. _x86: https://swtch.com/~rsc/regexp/regexp-x86.c.txt
.. _indirect branch: https://en.wikipedia.org/wiki/Indirect_branch
.. _graphlib: https://docs.python.org/3/library/graphlib.html
.. _Epsilon transitions: https://en.wikipedia.org/wiki/Epsilon_transition
"""
from enum import IntEnum
from typing import Iterable, Union
Token = IntEnum("Token", "LPAREN RPAREN ALTERN CONCAT KLEENE")
TokenOrChar = Union[str, Token]
TokenOrCharOrNone = Union[None, str, Token]
recognize = {
"(": Token.LPAREN,
")": Token.RPAREN,
"*": Token.KLEENE,
"|": Token.ALTERN,
}.get
parens = {
Token.LPAREN: 1,
Token.RPAREN: -1,
}
def tokenize(regexp: str) -> Iterable[TokenOrChar]:
concat, escape, nparen = False, False, 0
for char in regexp:
token = recognize(char)
if escape:
if not token:
yield "\\"
yield char
escape = False
elif char == "\\":
escape = True # lookahead
elif token:
if concat and token is Token.LPAREN:
yield Token.CONCAT
yield token
nparen += parens.get(token, 0)
concat = token not in {Token.LPAREN, Token.ALTERN}
else:
if concat:
yield Token.CONCAT
yield char
concat = True
if nparen:
raise ValueError("unabalanced parentheses")
def postfix(tokens: Iterable[TokenOrChar]) -> Iterable[TokenOrCharOrNone]:
stack = [Token.LPAREN]
for token in tokens:
if token is Token.LPAREN:
stack.append(token)
elif isinstance(token, Token):
while token < stack[-1]:
yield stack.pop()
if token is Token.RPAREN:
stack.pop()
else:
stack.append(token)
else:
yield token
yield from reversed(stack[1:])
if Token.KLEENE in stack:
yield None
class Instructions(list):
"""
>>> Instructions('a')
[(1,), 'a']
>>> Instructions('a*')
[(2,), 'a', (1, 3)]
>>> Instructions('ab')
[(1,), 'a', (3,), 'b']
>>> Instructions('a|b')
[(5,), 'a', (6,), 'b', (6,), (3, 1)]
>>> Instructions('a|b|c')
[(9,), 'a', (10,), 'b', (8,), 'c', (8,), (5, 3), (10,), (7, 1)]
>>> Instructions('a*b')
[(2,), 'a', (1, 3), 'b']
>>> Instructions('a*|b')
[(5,), 'a', (6,), 'b', (6,), (1, 3, 2)]
>>> Instructions('a*b*')
[(2,), 'a', (4,), 'b', (1, 3, 5)]
>>> Instructions('a*|b')
[(5,), 'a', (6,), 'b', (6,), (1, 3, 2)]
>>> Instructions('a(b|c)*d')
[(1,), 'a', (8,), 'b', (8,), 'c', (8,), (5, 3), (7, 9), 'd']
>>> Instructions('(a|b)(c|d)')
[(5,), 'a', (6,), 'b', (6,), (3, 1), (11,), 'c', (12,), 'd', (12,), (9, 7)]
"""
def __init__(self, regexp: str):
stack: list = []
for token in postfix(tokenize(regexp)):
pc = len(self)
if token == Token.CONCAT:
stack.pop()
elif token == Token.KLEENE:
last = stack[-1]
self.append(self[last])
self[last] = (pc,)
elif token == Token.ALTERN:
last = stack.pop()
prev = stack[-1]
self.append((pc + 2,))
self.append(self[last] + self[prev])
self[prev] = (pc + 1,)
self[last] = (pc + 2,)
elif token is None:
self[-1] += (pc,)
elif pc and isinstance(self[-1], tuple) and len(self[-1]) == 1:
stack.append(pc - 1)
self[-1] += (pc,)
self.append(token)
else:
stack.append(pc)
self.append((pc + 1,))
self.append(token)
def __call__(self, string: str) -> bool:
c = [0] # Let's start at the very beginning
m = len(self)
for char in string:
n = []
for p in c:
try:
op = self[p] # str to match or jump targets
except IndexError:
return True
if isinstance(op, tuple):
c.extend(op)
elif char == op:
n.append(p + 1)
if m in n:
return True
# end of current targets
c = n
return False
class Graph(dict):
"""
>>> Graph('a')
{'a': None}
>>> Graph('a*') # ellipsis means cycle
{'a': {...}, '': None}
>>> Graph('abcd')
{'a': {'b': {'c': {'d': None}}}}
>>> Graph('a|b|c|d')
{'a': None, 'b': None, 'c': None, 'd': None}
>>> Graph('ab*')
{'a': {'b': {...}, '': None}}
>>> Graph('a*b')
{'a': {...}, '': {'b': None}}
>>> Graph('a*b*')
{'a': {...}, '': {'b': {...}, '': None}}
>>> Graph('a(b|c)')
{'a': {'b': None, 'c': None}}
>>> Graph('(a|b)c')
{'a': {'c': None}, 'b': {'c': None}}
>>> Graph('a(b|c)*d')
{'a': {'b': {...}, 'c': {...}, '': {'d': None}}}
>>> Graph('(a|b)(c|d)')
{'a': {'c': None, 'd': None}, 'b': {'c': None, 'd': None}}
"""
@staticmethod
def append(next, node) -> None:
for n, p in next:
n[p] = node
def __new__(cls, regexp: str):
# maintain a stack of tuples whose head represents the
# recently compiled regular expression fragment and rest
# are "exits"
stack: list = []
head = super().__new__(cls)
rest: tuple[tuple["Graph", str], ...] = (
(head, ""),
) # empty regex matches everything
for token in postfix(tokenize(regexp)):
if token is Token.CONCAT:
node, next = stack.pop()
cls.append(next, head)
head = node
elif token is Token.KLEENE:
cls.append(rest, head)
rest = ((head, ""),)
elif token is Token.ALTERN:
node, next = stack.pop()
head.update(node)
rest = tuple((head if n is node else n, p) for n, p in next) + rest
elif isinstance(token, str): # help mypy
stack.append((head, rest))
head = super().__new__(cls)
rest = ((head, token),)
cls.append(rest, None)
return head
def __init__(self, regexp: str):
super().__init__(self)
def __call__(self, string: str) -> bool:
c = [self]
for ch in string:
n = []
for p in c:
if p is None:
return True
try:
c.append(p[""]) # ε
except KeyError:
pass
try:
n.append(p[ch])
except KeyError:
pass
if None in n:
return True
c = n
return False
if __name__ == "__main__":
# print(Graph("a*b")("aaaaaaaaaaaaaaaaaaaaaaaaaab"))
print(Graph("a(b|c)*d"))
| 34.92029 | 103 | 0.534862 | 1,201 | 9,638 | 4.234804 | 0.270608 | 0.009438 | 0.007078 | 0.006292 | 0.145301 | 0.082776 | 0.053873 | 0.033425 | 0.033425 | 0.033425 | 0 | 0.015469 | 0.315833 | 9,638 | 275 | 104 | 35.047273 | 0.755839 | 0.462855 | 0 | 0.318182 | 0 | 0 | 0.018347 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.051948 | false | 0.012987 | 0.012987 | 0 | 0.123377 | 0.006494 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
645ad370c4b50b933af7d6c182f859d5bc0ad82b | 2,785 | py | Python | python/v1/buyers/publisher_profiles/get_publisher_profiles.py | googleads/authorized-buyers-marketplace-api-samples | 73ae731785a19aa418fe5561831605b7c209c651 | [
"Apache-2.0"
] | null | null | null | python/v1/buyers/publisher_profiles/get_publisher_profiles.py | googleads/authorized-buyers-marketplace-api-samples | 73ae731785a19aa418fe5561831605b7c209c651 | [
"Apache-2.0"
] | null | null | null | python/v1/buyers/publisher_profiles/get_publisher_profiles.py | googleads/authorized-buyers-marketplace-api-samples | 73ae731785a19aa418fe5561831605b7c209c651 | [
"Apache-2.0"
] | 1 | 2022-01-09T18:06:59.000Z | 2022-01-09T18:06:59.000Z | #!/usr/bin/python
#
# Copyright 2021 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Gets a single publisher profile for the given account and profile IDs."""
import argparse
import os
import pprint
import sys
sys.path.insert(0, os.path.abspath('../../..'))
from googleapiclient.errors import HttpError
import util
_PUB_PROFILE_NAME_TEMPLATE = 'buyers/%s/publisherProfiles/%s'
DEFAULT_BUYER_RESOURCE_ID = 'ENTER_BUYER_RESOURCE_ID_HERE'
DEFAULT_PUB_PROFILE_RESOURCE_ID = 'ENTER_PUB_PROFILE_RESOURCE_ID_HERE'
def main(marketplace, args):
account_id = args.account_id
publisher_profile_id = args.publisher_profile_id
print(f'Get publisher profile "{publisher_profile_id}" for account '
f'"{account_id}":')
try:
# Construct and execute the request.
response = marketplace.buyers().publisherProfiles().get(
name=_PUB_PROFILE_NAME_TEMPLATE % (
account_id, publisher_profile_id)).execute()
except HttpError as e:
print(e)
sys.exit(1)
pprint.pprint(response)
if __name__ == '__main__':
try:
service = util.get_service(version='v1')
except IOError as ex:
print(f'Unable to create marketplace service - {ex}')
print('Did you specify the key file in util.py?')
sys.exit(1)
parser = argparse.ArgumentParser(
description=('Get a publisher profile for the given buyer account ID '
'and publisher profile ID.'))
# Required fields.
parser.add_argument(
'-a', '--account_id', default=DEFAULT_BUYER_RESOURCE_ID,
help=('The resource ID of the buyers resource under which the '
'publisherProfiles resource is being accessed. This will be used '
'to construct the name used as a path parameter for the '
'publisherProfiles.get request.'))
parser.add_argument(
'-p', '--publisher_profile_id', default=DEFAULT_PUB_PROFILE_RESOURCE_ID,
help=('The resource ID of the buyers.publisherProfiles resource that '
'is being accessed. This will be used to construct the name used '
'as a path parameter for the publisherProfiles.get request.'))
main(service, parser.parse_args())
| 34.382716 | 80 | 0.694434 | 370 | 2,785 | 5.07027 | 0.389189 | 0.076759 | 0.057569 | 0.031983 | 0.228145 | 0.147122 | 0.147122 | 0.147122 | 0.147122 | 0.10661 | 0 | 0.00551 | 0.217953 | 2,785 | 80 | 81 | 34.8125 | 0.855831 | 0.254937 | 0 | 0.133333 | 0 | 0 | 0.376522 | 0.099367 | 0 | 0 | 0 | 0 | 0 | 1 | 0.022222 | false | 0 | 0.133333 | 0 | 0.155556 | 0.133333 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
645d11df00b3707ac1991bf8fa1726c7105c09fb | 152 | py | Python | codeforces/anirudhak47/616/E.py | anirudhakulkarni/codes | d7a907951033b57314dfc0b837123aaa5c25a39a | [
"MIT"
] | 3 | 2020-07-09T16:15:42.000Z | 2020-07-17T13:19:42.000Z | codeforces/anirudhak47/616/E.py | anirudhakulkarni/codes | d7a907951033b57314dfc0b837123aaa5c25a39a | [
"MIT"
] | null | null | null | codeforces/anirudhak47/616/E.py | anirudhakulkarni/codes | d7a907951033b57314dfc0b837123aaa5c25a39a | [
"MIT"
] | 1 | 2020-07-17T13:19:48.000Z | 2020-07-17T13:19:48.000Z | n,m=map(int,input().split())
mod=10**9+7
sum=(n%mod)*(m%mod)
i=1
j=1
res=0
while j<=n:
res+=(n//j)*j
i+=1
j=i
print((sum-res)%mod) | 13.818182 | 29 | 0.493421 | 36 | 152 | 2.083333 | 0.5 | 0.053333 | 0.08 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.067227 | 0.217105 | 152 | 11 | 30 | 13.818182 | 0.563025 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0.090909 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
646093a875532db734c0ea4b2ff24388f33c4f32 | 2,633 | py | Python | Paper_Specific_Versions/2019_DTI/Code/subjects_lists/lists_stats_dMRI.py | adamwild/AD-ML | e4ac0b7d312ab482b9b52bb3f5c6745cc06431e9 | [
"MIT"
] | null | null | null | Paper_Specific_Versions/2019_DTI/Code/subjects_lists/lists_stats_dMRI.py | adamwild/AD-ML | e4ac0b7d312ab482b9b52bb3f5c6745cc06431e9 | [
"MIT"
] | null | null | null | Paper_Specific_Versions/2019_DTI/Code/subjects_lists/lists_stats_dMRI.py | adamwild/AD-ML | e4ac0b7d312ab482b9b52bb3f5c6745cc06431e9 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
__author__ = ["Junhao Wen", "Simona Bottani", "Jorge Samper-Gonzalez"]
__copyright__ = "Copyright 2016-2018 The Aramis Lab Team"
__credits__ = ["Junhao Wen"]
__license__ = "See LICENSE.txt file"
__version__ = "0.1.0"
__status__ = "Development"
import numpy as np
import pandas as pd
import os
def statistics_cn_ad_mci_M00(dwi_bids, output_dir):
'''
This is a function to calculate the demographic information for the chosen population
:param output_dir: where files with lists have been saved from the previous step
:param dwi_bids: BIDS directory for dwi
:return: it prints all the statistics (the subjects are the same presents in dict which are from the subjects_list chosen)
'''
diagnosis = ['AD', 'CN', 'MCI', 'pMCI', 'sMCI']#, 'CN-', 'CN+', 'MCI-', 'MCI+', 'pMCI+', 'pMCI-', 'sMCI+', 'sMCI-']
participants_tsv = pd.io.parsers.read_csv(os.path.join(dwi_bids, 'participants.tsv'), sep='\t')
for label in diagnosis:
path_diagnosis = pd.io.parsers.read_csv(os.path.join(output_dir, label + '_ADNI' +'.tsv'),
sep='\t').participant_id
sex = []
age = []
mmse = []
cdr = []
for sub in path_diagnosis.values:
ses = pd.io.parsers.read_csv(os.path.join(dwi_bids, sub, sub + '_sessions.tsv'), sep='\t')
sex.append(participants_tsv[participants_tsv.participant_id == sub].sex.item())
age.append(ses[ses.session_id == 'ses-M00'].age.item())
mmse.append(ses[ses.session_id == 'ses-M00'].MMS.item())
cdr.append(ses[ses.session_id == 'ses-M00'].cdr_global.item())
age_m = np.mean(np.asarray(age))
age_u = np.std(np.asarray(age))
mmse_m = np.mean(np.asarray(mmse))
mmse_u = np.std(np.asarray(mmse))
N_women = len([x for x in range(len(age)) if sex[x] == 'F'])
N_men = len([x for x in range(len(age)) if sex[x] == 'M'])
print ('**** ' + label + ' *****')
print ('Group of len : ' + str(len(age)))
print ('N male = ' + str(N_men) + ' N female = ' + str(N_women))
print ('AGE = ' + str(age_m) + ' +/- ' + str(age_u) + ' range ' + str(np.min(np.asarray(age))) + ' / ' + str(
np.max(np.asarray(age))))
print (
'MMSE = ' + str(mmse_m) + ' +/- ' + str(mmse_u) + ' range ' + str(np.min(np.asarray(mmse))) + ' / ' + str(
np.max(np.asarray(mmse))))
print ('CDR:' + str(cdr.count(0)) + '(0); ' + str(cdr.count(0.5)) + '(0.5); ' + str(
cdr.count(1)) + '(1); ' + str(cdr.count(2)) + '(2); ')
| 46.192982 | 126 | 0.567793 | 372 | 2,633 | 3.846774 | 0.360215 | 0.050314 | 0.033543 | 0.031447 | 0.262055 | 0.194969 | 0.194969 | 0.106219 | 0.086653 | 0.086653 | 0 | 0.015144 | 0.247626 | 2,633 | 56 | 127 | 47.017857 | 0.707219 | 0.159134 | 0 | 0 | 0 | 0 | 0.152015 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.02439 | false | 0 | 0.073171 | 0 | 0.097561 | 0.146341 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
6462624a7ae5dc8c88954bd7349a57466f638697 | 2,102 | py | Python | tests/diffraction/test_diffraction_r_vortex.py | VasilyevEvgeny/self-focusing_3D | c90b4d78d2d72365566f8a49b325bd48127b1e44 | [
"MIT"
] | null | null | null | tests/diffraction/test_diffraction_r_vortex.py | VasilyevEvgeny/self-focusing_3D | c90b4d78d2d72365566f8a49b325bd48127b1e44 | [
"MIT"
] | null | null | null | tests/diffraction/test_diffraction_r_vortex.py | VasilyevEvgeny/self-focusing_3D | c90b4d78d2d72365566f8a49b325bd48127b1e44 | [
"MIT"
] | null | null | null | from numpy.random import randint
from core import BeamR, Propagator, SweepDiffractionExecutorR, BeamVisualizer, xlsx_to_df
from tests.diffraction.test_diffraction import TestDiffraction
NAME = 'diffraction_r_vortex'
class TestDiffractionRVortex(TestDiffraction):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._add_prefix(NAME)
self.__M = randint(1, 4)
self.__m = self.__M
self._p = 1.0
self._eps = 0.02
self._png_name = NAME
self._horizontal_line = 1 / 2
def process(self):
beam = BeamR(medium=self._medium.info,
M=self.__M,
m=self.__m,
p_0_to_p_vortex=self._p_0_to_p_vortex,
lmbda=self._lmbda,
r_0=self._radius,
n_r=512)
visualizer = BeamVisualizer(beam=beam,
maximum_intensity='local',
normalize_intensity_to=beam.i_0,
plot_type='volume')
propagator = Propagator(args=self._args,
beam=beam,
diffraction=SweepDiffractionExecutorR(beam=beam),
n_z=self._n_z,
dz_0=beam.z_diff / self._n_z,
const_dz=True,
print_current_state_every=0,
plot_beam_every=0,
visualizer=visualizer)
propagator.propagate()
return propagator.logger.track_filename, propagator.manager.results_dir, propagator.beam.z_diff
def test_diffraction_r_vortex(self):
track_filename, path_to_save_plot, z_diff = self.process()
df = xlsx_to_df(track_filename, normalize_z_to=1)
df['i_max / i_0'] /= df['i_max / i_0'][0]
self._add_analytics_to_df(df)
self._check(df)
if self._flag_plot:
self._plot(df, path_to_save_plot, z_diff)
| 33.365079 | 103 | 0.540913 | 232 | 2,102 | 4.487069 | 0.353448 | 0.024015 | 0.017291 | 0.009606 | 0.073007 | 0.036503 | 0 | 0 | 0 | 0 | 0 | 0.017557 | 0.376784 | 2,102 | 62 | 104 | 33.903226 | 0.777099 | 0 | 0 | 0 | 0 | 0 | 0.025214 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.066667 | false | 0 | 0.066667 | 0 | 0.177778 | 0.022222 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
6465d405858eb834ed1583ad68a1aa90c937d20f | 506 | py | Python | NGASpider/nga_topic_wordcloud.py | liamcraft118/PythonLearning | 65949b7972afb13231f4f8d842a1b561a02072fb | [
"MIT"
] | null | null | null | NGASpider/nga_topic_wordcloud.py | liamcraft118/PythonLearning | 65949b7972afb13231f4f8d842a1b561a02072fb | [
"MIT"
] | null | null | null | NGASpider/nga_topic_wordcloud.py | liamcraft118/PythonLearning | 65949b7972afb13231f4f8d842a1b561a02072fb | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from DB import DB
from wordcloud import WordCloud
import jieba
# class WordCloud(object):
if __name__ == '__main__':
db = DB()
db.connect()
result = db.readAllTitle()
titles = ''
for title in result:
titles += title[0]
words = " ".join(jieba.cut(titles))
wordcloud = WordCloud(font_path="simsun.ttf").generate(words)
import matplotlib.pyplot as plt
plt.imshow(wordcloud, interpolation='bilinear')
plt.axis("off")
plt.show()
| 20.24 | 65 | 0.63834 | 62 | 506 | 5.064516 | 0.629032 | 0.095541 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.005076 | 0.221344 | 506 | 24 | 66 | 21.083333 | 0.791878 | 0.090909 | 0 | 0 | 0 | 0 | 0.065646 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.25 | 0 | 0.25 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
6465e979caebd8c2c3c49bbbfea16b8f79b275ca | 6,611 | py | Python | granola/main.py | metergroup/GRANOLA | 24abcf9e6429d81047cda24a4343af5563d4e353 | [
"Apache-2.0"
] | 3 | 2022-02-13T02:38:28.000Z | 2022-03-22T16:59:15.000Z | granola/main.py | metergroup/GRANOLA | 24abcf9e6429d81047cda24a4343af5563d4e353 | [
"Apache-2.0"
] | 27 | 2022-02-13T18:22:48.000Z | 2022-03-31T18:18:41.000Z | granola/main.py | metergroup/GRANOLA | 24abcf9e6429d81047cda24a4343af5563d4e353 | [
"Apache-2.0"
] | null | null | null | """
This module is deprecated and will be removed in version 1.0. Please use :mod:`~granola.breakfast_cereal` instead
"""
from granola.breakfast_cereal import Cereal
from granola.utils import deprecation
class MockSerial(Cereal):
"""
Deprecated version of :class:`~granola.breakfast_cereal.Cereal`. Will be removed in 1.0
Switch to using :class:`~granola.breakfast_cereal.Cereal` and the new interface.
"""
def __init__(self, config_key, config_path="config.json", command_readers=None, hooks=None):
deprecation("MockSerial is deprecated. Please use granola.breakfast_cereal.Cereal instead.", "1.0")
command_readers = command_readers or []
hooks = hooks or []
config = self._load_json_config(config_key=config_key, config_path=config_path)
config = self._check_and_normalize_config_deprecation(config)
config = self._check_and_normalize_command_readers_deprecations(config, command_readers)
config = self._check_and_normalize_hook_deprecations(config, hooks)
super(MockSerial, self).__init__(data_path_root=config_path, **config)
def _check_and_normalize_config_deprecation(self, config):
command_readers = config.setdefault("command_readers", {})
if "canned_queries" in config:
self._check_and_normalize_canned_queries_deprecation(config, command_readers)
if "getters_and_setters" in config:
self._check_and_normalize_getters_and_setters_deprecation(config, command_readers)
return config
def _check_and_normalize_getters_and_setters_deprecation(self, config, command_readers):
deprecation(
"Specifically GettersAnd_setters Command Reader through the outermost config key"
" 'getters_and_setters is deprecated. Please use the 'command_reader' section instead."
" See https://granola.readthedocs.io/en/latest/config/config.html for more details."
)
# Check for old form of variable substitution pre jinja
getters_and_setters = config["getters_and_setters"]
start_not_in = "variable_start_string" not in getters_and_setters
end_not_in = "variable_end_string" not in getters_and_setters
if start_not_in and end_not_in:
deprecation(
"'GettersAndSetters' variable declaration follows old format"
"\nSwitch to using ``Cereal``, which defaults to traditional jinja2 formatting ({{ var }}),"
"\nor specify explicitly your variable_start_string and variable_end_string inside"
" getters and setters (ex: 'variable_start_string': '`')",
"1.0",
)
# specify getters and setters variable start and end as the old way
getters_and_setters["variable_start_string"] = "`"
getters_and_setters["variable_end_string"] = "`"
# swap canned_queries for "command_readers": {"CannedQueries": ...}
command_readers["GettersAndSetters"] = config.pop("getters_and_setters")
getters = command_readers["GettersAndSetters"].get("getters", [])
for getter in getters:
if "getter" in getter:
deprecation(
"Using 'getter' key inside"
" config['getters_and_setters']['getters']['getter']"
"is deprecated and will be removed in a future release."
"\nSwitch to using the key 'cmd' instead.",
"1.0",
)
# swap getters key for cmd
getter["cmd"] = getter.pop("getter")
setters = command_readers["GettersAndSetters"].get("setters", [])
for setter in setters:
if "setter" in setter:
deprecation(
"Using 'setter' key inside "
" config['getters_and_setters']['setters']['setter']"
"is deprecated and will be removed in a future release."
"\nSwitch to using the key 'cmd' instead.",
"1.0",
)
# swap setters key for cmd
setter["cmd"] = setter.pop("setter")
def _check_and_normalize_canned_queries_deprecation(self, config, command_readers):
deprecation(
"Specifically CannedQueries Command Reader through the outermost config key 'canned_queries"
" is deprecated. Please use the 'command_reader' section instead."
" See https://granola.readthedocs.io/en/latest/config/config.html for more details.",
"1.0",
)
# swap canned_queries for "command_readers": {"CannedQueries": ...}
command_readers["CannedQueries"] = config.pop("canned_queries")
cr = command_readers["CannedQueries"]
if "files" in cr:
deprecation("canned_queries key 'files' has been deprecated. Use the key 'data' instead.", "1.0")
# swap file key for data
cr["data"] = cr.pop("files")
data = cr.get("data", [])
if isinstance(data, dict):
deprecation(
"canned_queries['data'] as a dictionary has been deprecated."
"\nEither use a list of files or list of dictionaries of cmds and responses instead"
"See configuration section of documentation.",
"1.0",
)
# Turn dictionary of keys mapped to their values into just a list
new_data = [value for value in data.values()]
cr["data"] = new_data
def _check_and_normalize_command_readers_deprecations(self, config, command_readers):
config_readers = config["command_readers"]
for command_reader in command_readers:
str_not_in = command_reader not in config_readers
obj_not_in = getattr(command_reader, "__name__", "") not in config_readers
cls_not_in = command_reader.__class__.__name__ not in config_readers
if str_not_in and obj_not_in and cls_not_in:
config["command_readers"][command_reader] = {}
return config
def _check_and_normalize_hook_deprecations(self, config, hooks):
config_hooks = config.setdefault("hooks", {})
for hook in hooks:
str_not_in = hook not in config_hooks
obj_not_in = getattr(hook, "__name__", "") not in config_hooks
cls_not_in = hook.__class__.__name__ not in config_hooks
if str_not_in and obj_not_in and cls_not_in:
config["hooks"][hook] = {}
return config
| 48.255474 | 114 | 0.638179 | 766 | 6,611 | 5.219321 | 0.185379 | 0.030015 | 0.063782 | 0.022511 | 0.433467 | 0.311906 | 0.224862 | 0.148074 | 0.148074 | 0.117559 | 0 | 0.003958 | 0.273786 | 6,611 | 136 | 115 | 48.610294 | 0.828786 | 0.101951 | 0 | 0.19802 | 0 | 0.019802 | 0.314586 | 0.040657 | 0 | 0 | 0 | 0 | 0 | 1 | 0.059406 | false | 0 | 0.019802 | 0 | 0.118812 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
646d72d43695b19e055ac78f71a7b68738a02570 | 6,314 | py | Python | train.py | yhgon/cmtf | 7a3ffc3a59a7c546a00d3b73be58f7d1c2f1f0cf | [
"MIT"
] | null | null | null | train.py | yhgon/cmtf | 7a3ffc3a59a7c546a00d3b73be58f7d1c2f1f0cf | [
"MIT"
] | null | null | null | train.py | yhgon/cmtf | 7a3ffc3a59a7c546a00d3b73be58f7d1c2f1f0cf | [
"MIT"
] | null | null | null | from cmtf import CompressiveTransformer
from cmtf_ar_wrapper import AutoregressiveWrapper
import argparse
import random
import tqdm
import gzip
import numpy as np
import torch
import torch.optim as optim
from torch.nn import functional as F
from torch.utils.data import DataLoader, Dataset
# constants
def parse_args(parser):
"""
Parse commandline arguments.
"""
build_model = parser.add_argument_group('model setup')
build_model.add_argument( '--num_tokens', type=int, default=256, help='')
build_model.add_argument( '--dim', type=int, default=512, help='')
build_model.add_argument( '--depth', type=int, default= 8, help='')
build_model.add_argument( '--heads', type=int, default= 8, help='')
build_model.add_argument( '--seq_len', type=int, default=512, help='')
build_model.add_argument( '--mem_len', type=int, default=512, help='')
build_model.add_argument( '--cmem_len', type=int, default=128, help='')
build_model.add_argument( '--num_mem_layers', type=int, default= 3, help='')
training = parser.add_argument_group('training setup')
training.add_argument( '--validate_every', type=int, default=100, help='')
training.add_argument( '--generate_every', type=int, default=200, help='')
training.add_argument( '--prime_length', type=int, default= 512, help='')
training.add_argument( '--generate_length', type=int, default=1024, help='')
optimization = parser.add_argument_group('optimization setup')
optimization.add_argument( '--optimizer', type=str, default='adam', help='Optimization algorithm')
optimization.add_argument( '--learning_rate', type=int, default=1e-4, help='learning rate')
optimization.add_argument( '--num_batches', type=int, default=100000, help='max iteration')
optimization.add_argument( '--batch_size', type=int, default=16, help='batch size')
optimization.add_argument( '--max_batch_size', type=int, default=4, help='gradient accumulation')
dataset = parser.add_argument_group('dataset parameters')
dataset.add_argument('--zip_filename', type=str, default='/content/enwik8.gz', help='Path to training filelist')
dataset.add_argument('--num_segments', type=int, default = 4, help='num_segments')
return parser
# helpers
def cycle(loader):
while True:
for data in loader:
yield data
def decode_token(token):
return str(chr(max(32, token)))
def decode_tokens(tokens):
return ''.join(list(map(decode_token, tokens)))
# prepare enwik8 data
def prepare_dataset(zip_filename):
with gzip.open(zip_filename) as file:
X = np.fromstring(file.read(int(95e6)), dtype=np.uint8)
trX, vaX = np.split(X, [int(90e6)])
data_train, data_val = torch.from_numpy(trX), torch.from_numpy(vaX)
return data_train, data_val
class TextSamplerDataset(Dataset):
def __init__(self, data, seq_len, segments):
super().__init__()
self.data = data
self.seq_len = seq_len
self.segments = segments
self.total_len = seq_len * segments
def __getitem__(self, index):
rand_start = torch.randint(0, self.data.size(0) - self.total_len - 1, (1,))
full_seq = self.data[rand_start: rand_start + self.total_len + 1].long()
return full_seq.cuda()
def __len__(self):
return self.data.size(0) // self.total_len
# training
def run(args):
# build model
print("prepare model")
model = CompressiveTransformer(
num_tokens = args.num_tokens,
dim = args.dim,
depth = args.depth,
seq_len = args.seq_len,
mem_len = args.mem_len,
cmem_len = args.cmem_len,
heads = args.heads,
memory_layers = [*range(args.depth-3+1,args.depth+1,1)]
)
model = AutoregressiveWrapper(model)
model.cuda()
# prepare dataset
print("prepare dataset")
data_train, data_val = prepare_dataset(args.zip_filename)
train_dataset = TextSamplerDataset(data_train, args.seq_len , args.num_segments)
val_dataset = TextSamplerDataset(data_val, args.seq_len , args.num_segments)
train_loader = cycle(DataLoader(train_dataset, batch_size = args.batch_size))
val_loader = cycle(DataLoader(val_dataset, batch_size = args.batch_size))
# optimizer
optim = torch.optim.Adam(model.parameters(), lr=args.learning_rate)
print("start loop")
for i in tqdm.tqdm(range(args.num_batches), mininterval=10., desc='training'):
model.train()
grad_accum_every = args.batch_size / args.max_batch_size
for mlm_loss, aux_loss, is_last in model(next(train_loader), max_batch_size = args.max_batch_size, return_loss = True):
loss = mlm_loss + aux_loss
(loss / grad_accum_every).backward()
print(f' {i:d} training loss: {mlm_loss.item():.4f} | aux_loss: {aux_loss.item():.4f}')
if is_last:
torch.nn.utils.clip_grad_norm_(model.parameters(), 0.5)
optim.step()
optim.zero_grad()
if i % args.validate_every == 0:
model.eval()
with torch.no_grad():
for loss, aux_loss, _ in model(next(val_loader), return_loss = True):
print(f'validation loss: {loss.item():.4f}')
if i % args.generate_every == 0:
model.eval()
inp = random.choice(val_dataset)[:-1]
inp = inp[:args.prime_length]
prime = decode_tokens(inp)
print(f'%s \n\n %s', (prime, '*' * 100))
sample = model.generate(inp, args.generate_length)
output_str = decode_tokens(sample)
print(output_str)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='PyTorch WaveGrad Training',
allow_abbrev=False)
parser = parse_args(parser)
args, _ = parser.parse_known_args()
print(args)
### additional configuration from config file
#with open(args.config) as f:
# config = ConfigWrapper(**json.load(f))
run( args)
| 34.883978 | 127 | 0.63478 | 792 | 6,314 | 4.830808 | 0.243687 | 0.066127 | 0.062206 | 0.04391 | 0.168845 | 0.109775 | 0.068479 | 0.05541 | 0.05541 | 0.023523 | 0 | 0.016062 | 0.240735 | 6,314 | 180 | 128 | 35.077778 | 0.782019 | 0.03611 | 0 | 0.017094 | 0 | 0.008547 | 0.104507 | 0.006934 | 0 | 0 | 0 | 0 | 0 | 1 | 0.076923 | false | 0 | 0.094017 | 0.025641 | 0.230769 | 0.068376 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
646eb195f8628e5692003244f139891bc4d4d41e | 1,094 | py | Python | scripts/template_file.py | Aracthor/cpp-maker | 6e5f2fb553ecfb849704629f37c57f801fdd072f | [
"MIT"
] | null | null | null | scripts/template_file.py | Aracthor/cpp-maker | 6e5f2fb553ecfb849704629f37c57f801fdd072f | [
"MIT"
] | null | null | null | scripts/template_file.py | Aracthor/cpp-maker | 6e5f2fb553ecfb849704629f37c57f801fdd072f | [
"MIT"
] | null | null | null | #!/usr/bin/python3
## template_file.py for cpp-maker in /home/aracthor/programs/projects/cpp-maker
##
## Made by Aracthor
##
## Started on Mon Sep 7 12:03:26 2015 Aracthor
## Last Update Wed Sep 9 10:27:36 2015 Aracthor
##
from files import File
class TemplateFile(File):
def __init__(self, path, project):
File.__init__(self, path, project)
def generateData(self, configs, definition):
self.writeNamespacesEntry(configs.namespaces)
for member in definition.getters:
if member != definition.getters[0]:
self.writeEmptyLine()
self.writeLine(member.return_type)
if member.return_type == "bool":
self.writeLine(configs.class_name + "::is" + member.name.title() + "() const")
else:
self.writeLine(configs.class_name + "::get" + member.name.title() + "() const")
self.writeLine("{")
self.writeLine(self.indentation + "return m_" + member.name + ";")
self.writeLine("}")
self.writeNamespacesExit(configs.namespaces)
| 34.1875 | 95 | 0.617916 | 124 | 1,094 | 5.33871 | 0.524194 | 0.117825 | 0.077039 | 0.057402 | 0.087613 | 0 | 0 | 0 | 0 | 0 | 0 | 0.02934 | 0.252285 | 1,094 | 31 | 96 | 35.290323 | 0.779951 | 0.187386 | 0 | 0 | 0 | 0 | 0.046911 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.111111 | false | 0 | 0.055556 | 0 | 0.222222 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
64709e39f1c3abfd144073c9040d15f4940e15fc | 592 | py | Python | tests/test_gym_registers.py | domingoesteban/robolearn_envs | 1e10f315abcbb034e613b3b5a7a48662a839c81b | [
"BSD-3-Clause"
] | 2 | 2020-08-20T15:46:55.000Z | 2022-02-16T13:45:59.000Z | tests/test_gym_registers.py | domingoesteban/robolearn_envs | 1e10f315abcbb034e613b3b5a7a48662a839c81b | [
"BSD-3-Clause"
] | null | null | null | tests/test_gym_registers.py | domingoesteban/robolearn_envs | 1e10f315abcbb034e613b3b5a7a48662a839c81b | [
"BSD-3-Clause"
] | 1 | 2020-10-03T11:28:15.000Z | 2020-10-03T11:28:15.000Z | import gym
import numpy as np
from context import robolearn_envs
all_envs = gym.envs.registry.all()
robolearn_env_ids = [env_spec.id for env_spec in all_envs
if env_spec.id.startswith('RoboLearn-')]
for env_id in robolearn_env_ids:
print('-'*15)
print("Environment: %s" % env_id)
env = gym.make(env_id)
obs = env.reset()
print("\t Reset: OK")
for t in range(50):
print('\t Step %d: OK' % t)
obs, reward, done, info = \
env.step(np.zeros(np.prod(env.action_space.shape)))
env.close()
print("\t Close: OK")
| 23.68 | 63 | 0.613176 | 92 | 592 | 3.793478 | 0.445652 | 0.060172 | 0.08596 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.009009 | 0.25 | 592 | 24 | 64 | 24.666667 | 0.777027 | 0 | 0 | 0 | 0 | 0 | 0.108108 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.166667 | 0 | 0.166667 | 0.277778 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
6470d5944c522fadb85e8b00e6d96b7ccc8e434e | 7,468 | py | Python | xor_nn/genetic_algorithm.py | sgalella/GeneticAlgorithm-XOR | 2e7de2e2fc6dfd8bb27d9e09c7be86f75c6db87c | [
"MIT"
] | null | null | null | xor_nn/genetic_algorithm.py | sgalella/GeneticAlgorithm-XOR | 2e7de2e2fc6dfd8bb27d9e09c7be86f75c6db87c | [
"MIT"
] | null | null | null | xor_nn/genetic_algorithm.py | sgalella/GeneticAlgorithm-XOR | 2e7de2e2fc6dfd8bb27d9e09c7be86f75c6db87c | [
"MIT"
] | null | null | null | import numpy as np
from tqdm import tqdm
from . import mutation, recombination, selection
class GeneticAlgorithm:
"""
Genetic algorithm for TSP.
"""
def __init__(self, lower_bound=-5, upper_bound=5, alpha=0.5, num_iterations=1000, population_size=100, offspring_size=20, mutation_rate=0.2,
mutation_type=mutation.uniform, recombination_type=recombination.arithmetic, selection_type=selection.genitor):
"""
Initializes the algorithm.
"""
self.lower_bound = lower_bound
self.upper_bound = upper_bound
self.alpha = alpha
self.num_iterations = num_iterations
self.population_size = population_size
self.offspring_size = offspring_size
self.mutation_rate = mutation_rate
self.mutation_type = mutation_type
self.recombination_type = recombination_type
self.selection_type = selection_type
assert self.offspring_size < self.population_size, "Population size has to be greater than the number of selected individuals"
def __repr__(self):
"""
Visualizes algorithm parameters when printing.
"""
return (f"Iterations: {self.num_iterations}\n"
f"Population size: {self.population_size}\n"
f"Num selected: {self.num_selected}\n"
f"Mutation rate: {self.mutation_rate}\n")
def random_initial_population(self):
"""
Generates random population of individuals.
Returns:
population (np.array): Population containg the different individuals.
"""
# Initialize population
population = (self.upper_bound - self.lower_bound) * np.random.random((self.population_size, 9)) + self.lower_bound
return population
def sigmoid(self, x):
"""
Performs the sigmoid activation function on x.
Args:
x (np.array): Weighted value of neurons at a given layer.
Returns:
Sigmoid activation function function on x.
"""
return 1 / (1 + np.exp(-x))
def forward_pass(self, x, individual):
"""
Performs the forward pass of the network.
Args:
x (np.array): Input to the neural network.
individual (np.array): Values of the weights of the network.
Returns:
y (np.array): Value of the output layer.
"""
w11, w12, w21, w22, wy1, wy2, b1, b2, by = individual
x1, x2 = x
h1 = self.sigmoid(w11 * x1 + w12 * x2 + b1)
h2 = self.sigmoid(w21 * x1 + w22 * x2 + b2)
y = self.sigmoid(wy1 * h1 + wy2 * h2 + by)
return y
def compute_fitness(self, population):
"""
Computes the fitness for each individual by calculating the accuracy of the network to emulate a XOR.
Args:
population (np.array): Population containg the different individuals.
Returns:
fitness_population (np.array): Fitness of the population.
"""
inputs = np.array([[0, 0], [0, 1], [1, 0], [1, 1]])
outputs = np.array([0, 1, 1, 0])
fitness_population = np.zeros([len(population), 1])
for idx, individual in enumerate(population):
fitness = 0
for x, output in zip(inputs, outputs):
fitness += (output - self.forward_pass(x, individual)) ** 2
fitness_population[idx] = np.exp(-fitness)
return fitness_population.flatten()
def generate_next_population(self, population, mutation, recombination, selection):
"""
Generates the population for the next iteration.
Args:
population (np.array): Population containg the different individuals.
Returns:
next_population, fitness_population (tuple): Returns tuple containing the next population and its fitness
"""
# Initialize new offspring
offspring = (self.upper_bound - self.lower_bound) * np.random.random((self.offspring_size, 9)) + self.lower_bound
# Recombinate best individuals
for individual in range(0, self.offspring_size, 2):
idx_parent1, idx_parent2 = np.random.choice(self.population_size, size=2, replace=False)
new_individual1, new_individual2 = recombination(population[idx_parent1], population[idx_parent2], self.alpha)
offspring[individual] = new_individual1
offspring[individual + 1] = new_individual2
# Add mutation
for idx in range(len(population)):
if np.random.random() < self.mutation_rate:
individual_mutated = mutation(population[idx], self.upper_bound, self.lower_bound)
population[idx] = individual_mutated
# Group populations
temporal_population = np.vstack((population, offspring))
fitness_population = self.compute_fitness(temporal_population)
# Select next generation with probability fitness / total_fitness
survivors = selection(fitness_population)
survivors = survivors[:self.population_size]
return (temporal_population[survivors], fitness_population[survivors])
def run(self):
"""
Runs the algorithm.
Returns:
solutions, max_fitness, mean_fitness (tuple): Returns tuple containing the solutions the fitness mean and max along the iterations
"""
# Initialize first population
population = self.random_initial_population()
# Initialize fitness variables
mean_fitness = []
max_fitness = []
diversity_genotype = []
diversity_phenotype = []
# Initialize best_fitness
best_fitness_all = 0
# Iterate through generations
for iteration in tqdm(range(self.num_iterations), ncols=75):
population, fitness = self.generate_next_population(population, self.mutation_type, self.recombination_type,
self.selection_type)
# Save statistics iteration
best_fitness_iteration = np.max(fitness)
mean_fitness_iteration = np.mean(fitness)
diversity_genotype_iteration = np.unique(population, axis=0).shape[0]
diversity_phenotype_iteration = np.unique(fitness).shape[0]
max_fitness.append(best_fitness_iteration)
mean_fitness.append(mean_fitness_iteration)
diversity_genotype.append(diversity_genotype_iteration)
diversity_phenotype.append(diversity_phenotype_iteration)
# Keep best individuals
if best_fitness_iteration > best_fitness_all:
solutions = []
for best_individual in population[np.where(fitness == best_fitness_iteration)]:
if not any((best_individual == individual).all() for individual in solutions):
solutions.append(best_individual)
best_fitness_all = best_fitness_iteration
elif best_fitness_iteration == best_fitness_all:
for best_individual in population[np.where(fitness == best_fitness_iteration)]:
if not any((best_individual == individual).all() for individual in solutions):
solutions.append(best_individual)
return (np.asarray(solutions), max_fitness, mean_fitness, diversity_genotype, diversity_phenotype)
| 40.150538 | 144 | 0.635913 | 819 | 7,468 | 5.616606 | 0.223443 | 0.028696 | 0.021304 | 0.017609 | 0.239565 | 0.14913 | 0.128261 | 0.128261 | 0.115652 | 0.115652 | 0 | 0.016024 | 0.281334 | 7,468 | 185 | 145 | 40.367568 | 0.841066 | 0.21639 | 0 | 0.067416 | 0 | 0 | 0.04038 | 0.016444 | 0 | 0 | 0 | 0 | 0.011236 | 1 | 0.089888 | false | 0.022472 | 0.033708 | 0 | 0.213483 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
647564ba63394f150f2633cb8c189e18207d6ab2 | 2,185 | py | Python | ps_tree/views.py | ITCase/ps_tree | a2152feb53d5f041b43203ee8ccaae65cb9d13e4 | [
"MIT"
] | null | null | null | ps_tree/views.py | ITCase/ps_tree | a2152feb53d5f041b43203ee8ccaae65cb9d13e4 | [
"MIT"
] | 12 | 2015-06-15T11:50:48.000Z | 2015-07-07T09:03:37.000Z | ps_tree/views.py | ITCase/ps_tree | a2152feb53d5f041b43203ee8ccaae65cb9d13e4 | [
"MIT"
] | null | null | null | import transaction
from pyramid.httpexceptions import HTTPInternalServerError
from pyramid.view import view_config
from pyramid_sacrud.security import (PYRAMID_SACRUD_DELETE,
PYRAMID_SACRUD_UPDATE)
from sacrud.common import pk_to_list
from . import CONFIG_MODELS, PS_TREE_GET_TREE, PS_TREE_PAGE_MOVE
def get_model(settings, tablename):
for model in settings[CONFIG_MODELS]:
if model.__tablename__ == tablename:
return model
return None
@view_config(
route_name=PS_TREE_GET_TREE,
permission=PS_TREE_GET_TREE,
renderer='json'
)
def get_tree(request):
def fields(node):
node_list_of_pk = pk_to_list(node, True),
url_delete = request.route_url(
PYRAMID_SACRUD_DELETE,
table=node.__tablename__,
pk=pk_to_list(node))
url_update = request.route_url(
PYRAMID_SACRUD_UPDATE,
table=node.__tablename__,
pk=pk_to_list(node))
return {
'url_delete': url_delete,
'url_update': url_update,
'list_of_pk': node_list_of_pk,
}
table = get_model(request.registry.settings,
request.matchdict['tablename'])
return table.get_tree(request.dbsession, json=True, json_fields=fields)
@view_config(
route_name=PS_TREE_PAGE_MOVE,
permission=PS_TREE_PAGE_MOVE,
renderer='json'
)
def page_move(request):
method = request.matchdict['method']
node_id = request.matchdict['node_id']
target_id = request.matchdict['target_id']
tablename = request.matchdict['tablename']
table = get_model(request.registry.settings, tablename)
pk = table.get_pk_column()
page = request.dbsession.query(table).filter(pk == node_id).one()
if method == 'inside':
page.move_inside(target_id)
elif method == 'after':
page.move_after(target_id)
elif method == 'before':
page.move_before(target_id)
else:
raise HTTPInternalServerError("Unavailable method {}".format(method))
try:
request.dbsession.commit()
except AssertionError:
transaction.commit()
return ''
| 29.931507 | 77 | 0.667277 | 262 | 2,185 | 5.221374 | 0.248092 | 0.040936 | 0.023392 | 0.028509 | 0.187135 | 0.135965 | 0.046784 | 0.046784 | 0 | 0 | 0 | 0 | 0.240275 | 2,185 | 72 | 78 | 30.347222 | 0.824096 | 0 | 0 | 0.129032 | 0 | 0 | 0.053089 | 0 | 0 | 0 | 0 | 0 | 0.016129 | 1 | 0.064516 | false | 0 | 0.096774 | 0 | 0.241935 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
647674bcd02180e2d4d59bf2cb132ab346717ae5 | 1,068 | py | Python | Contests/Facebook Hacker Cup 2018/Qualification/Tourist/tourist.py | PK-100/Competitive_Programming | d0863feaaa99462b2999e85dcf115f7a6c08bb8d | [
"MIT"
] | 70 | 2018-06-25T21:20:15.000Z | 2022-03-24T03:55:17.000Z | Contests/Facebook Hacker Cup 2018/Qualification/Tourist/tourist.py | An3sha/Competitive_Programming | ee7eadf51939a360d0b004d787ebabda583e92f0 | [
"MIT"
] | 4 | 2018-09-04T13:12:20.000Z | 2021-06-20T08:29:12.000Z | Contests/Facebook Hacker Cup 2018/Qualification/Tourist/tourist.py | An3sha/Competitive_Programming | ee7eadf51939a360d0b004d787ebabda583e92f0 | [
"MIT"
] | 24 | 2018-12-26T05:15:32.000Z | 2022-01-23T23:04:54.000Z | '''
Author: Amitrajit Bose
Problem: Facebook HackerCup Qualification Round
'''
with open("Output.txt", "w") as text_file:
testcase=int(input())
for _ in range(testcase):
#entire program will be contained here now
n,k,v=[int(x) for x in input().strip().split()]
citynames=[]
#citydict={}
out=[]
for i in range(n):
x=str(input().strip())
citynames.append(x)
#citydict[x]=0
tot=k*(v-1)
d1=tot//n
d2=tot%n
cnt=0
for i in range(n):
#citydict[citynames[i]]+=d1
if(i<d2):
#citydict[citynames[i]]+=1
cnt+=1
#cnt = number of cities that are more visited
morevisited=cnt
lessvisited=n-cnt
#print(citydict)
#print("k=",k)
#print("lessvisited=",lessvisited)
if(k<=lessvisited):
for i in range(n-lessvisited,n-lessvisited+k):
out.append(citynames[i])
else:
moretovisit=k-lessvisited
for i in range(moretovisit):
out.append(citynames[i])
for i in range(n-lessvisited,n):
out.append(citynames[i])
print("Case #{0}: ".format(str(_+1)),end="",file=text_file)
print(*out,file=text_file)
| 22.723404 | 61 | 0.647004 | 165 | 1,068 | 4.157576 | 0.381818 | 0.061224 | 0.043732 | 0.080175 | 0.155977 | 0.120991 | 0.069971 | 0 | 0 | 0 | 0 | 0.012514 | 0.176966 | 1,068 | 47 | 62 | 22.723404 | 0.767918 | 0.273408 | 0 | 0.172414 | 0 | 0 | 0.028909 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0.068966 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
647716fe60d32877837b9b44ced048b662a0498e | 1,635 | py | Python | src/cli.py | jconradhanson/BEAT | 47a828c486e674323782c11b78be63aae003c45d | [
"MIT"
] | null | null | null | src/cli.py | jconradhanson/BEAT | 47a828c486e674323782c11b78be63aae003c45d | [
"MIT"
] | null | null | null | src/cli.py | jconradhanson/BEAT | 47a828c486e674323782c11b78be63aae003c45d | [
"MIT"
] | null | null | null | import logging
import argparse
from beat import beat
from definitions import path_log
# LOGGING CONFIGURATION
logging.basicConfig(
level=logging.INFO,
format='%(asctime)s %(message)s',
datefmt='%m/%d/%Y %I:%M:%S %p'
)
logging.root.addHandler(logging.FileHandler(path_log, mode='w', encoding='UTF-8'))
logging.getLogger("easyprocess").setLevel(logging.WARNING)
# COMMAND LINE ARGUMENT PARSER
parser = argparse.ArgumentParser()
# POSITIONAL ARGS
parser.add_argument('subject', type=str,
help='the subject you want to search')
parser.add_argument('state_code', type=str,
help='the two letter state abbreviation for where you want to search the subject')
# OPTIONAL ARGS
parser.add_argument('-c', '--city', type=str,
help='the city you want to begin the search at (cities are searched alphabetically)')
args = parser.parse_args()
subject = args.subject.strip()
state_code = args.state_code.strip().upper()
# VALIDATE ARG VALUES & RUN BEAT
if len(state_code) != 2:
print(f"\"{state_code}\"")
logging.error('State Code is invalid. Must be two letters.')
elif not isinstance(state_code, str):
logging.error('State Code is invalid. Must be a string.')
elif not isinstance(subject, str):
logging.error('Subject is invalid. Must be a string.')
else:
if args.city:
city = args.city.strip()
if not isinstance(city, str):
logging.error('City is invalid. Must be a string.')
else:
beat(subject=subject, state_code=state_code, start_city=city)
else:
beat(subject=subject, state_code=state_code)
| 35.543478 | 105 | 0.687462 | 227 | 1,635 | 4.876652 | 0.405286 | 0.097561 | 0.046974 | 0.054201 | 0.186992 | 0.186992 | 0.180668 | 0.137308 | 0 | 0 | 0 | 0.001511 | 0.190214 | 1,635 | 45 | 106 | 36.333333 | 0.834592 | 0.06789 | 0 | 0.081081 | 0 | 0 | 0.277339 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.108108 | 0 | 0.108108 | 0.027027 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
6477ef9e1d25e92daab6a18794994b24710c4290 | 5,678 | py | Python | tests/payments/four/request_apm_payments_four_integration_test.py | riaz-bordie-cko/checkout-sdk-python | d9bc073306c1a98544c326be693ed722576ea895 | [
"MIT"
] | null | null | null | tests/payments/four/request_apm_payments_four_integration_test.py | riaz-bordie-cko/checkout-sdk-python | d9bc073306c1a98544c326be693ed722576ea895 | [
"MIT"
] | null | null | null | tests/payments/four/request_apm_payments_four_integration_test.py | riaz-bordie-cko/checkout-sdk-python | d9bc073306c1a98544c326be693ed722576ea895 | [
"MIT"
] | null | null | null | from __future__ import absolute_import
import os
import pytest
import checkout_sdk
from checkout_sdk.common.common import Address, CustomerRequest, Phone
from checkout_sdk.common.common_four import Product
from checkout_sdk.common.enums import Currency, Country
from checkout_sdk.payments.payment_apm_four import RequestIdealSource, RequestTamaraSource
from checkout_sdk.payments.payments import ProcessingSettings
from checkout_sdk.payments.payments_apm import RequestSofortSource
from checkout_sdk.payments.payments_four import PaymentRequest
from tests.checkout_test_utils import assert_response, SUCCESS_URL, FAILURE_URL, retriable
def test_should_request_ideal_payment(four_api):
request_source = RequestIdealSource()
request_source.bic = 'INGBNL2A'
request_source.description = 'ORD50234E89'
request_source.language = 'nl'
payment_request = PaymentRequest()
payment_request.source = request_source
payment_request.amount = 1000
payment_request.currency = Currency.EUR
payment_request.capture = True
payment_request.success_url = SUCCESS_URL
payment_request.failure_url = FAILURE_URL
payment_response = retriable(callback=four_api.payments.request_payment,
payment_request=payment_request)
assert_response(payment_response,
'http_response',
'id',
'status',
'_links',
'_links.self',
'_links.redirect')
payment_details = retriable(callback=four_api.payments.get_payment_details,
payment_id=payment_response.id)
assert_response(payment_details,
'http_response',
'id',
'requested_on',
'source',
'amount',
'currency',
'payment_type',
'status')
def test_should_request_sofort_payment(four_api):
payment_request = PaymentRequest()
payment_request.source = RequestSofortSource()
payment_request.amount = 100
payment_request.currency = Currency.EUR
payment_request.capture = True
payment_request.success_url = SUCCESS_URL
payment_request.failure_url = FAILURE_URL
payment_response = retriable(callback=four_api.payments.request_payment,
payment_request=payment_request)
assert_response(payment_response,
'http_response',
'id',
'status',
'_links',
'_links.self',
'_links.redirect')
payment_details = retriable(callback=four_api.payments.get_payment_details,
payment_id=payment_response.id)
assert_response(payment_details,
'http_response',
'id',
'requested_on',
'source',
'amount',
'currency',
'payment_type',
'status')
@pytest.mark.skip(reason='preview')
def test_should_request_tamara_payment():
address = Address()
address.address_line1 = 'Cecilia Chapman'
address.address_line2 = '711-2880 Nulla St.'
address.city = 'Mankato'
address.state = 'Mississippi'
address.zip = '96522'
address.country = Country.SA
payment_request_source = RequestTamaraSource()
payment_request_source.billing_address = address
processing_settings = ProcessingSettings()
processing_settings.aft = True
processing_settings.tax_amount = 500
processing_settings.shipping_amount = 1000
phone = Phone()
phone.number = '113 496 0000'
phone.country_code = '+966'
customer_request = CustomerRequest()
customer_request.name = 'Cecilia Chapman'
customer_request.email = 'c.chapman@example.com'
customer_request.phone = phone
product = Product()
product.name = 'Item name'
product.quantity = 3
product.unit_price = 100
product.total_amount = 100
product.tax_amount = 19
product.discount_amount = 2
product.reference = 'some description about item'
product.image_url = 'https://some_s3bucket.com'
product.url = 'https://some.website.com/item'
product.sku = '123687000111'
payment_request = PaymentRequest()
payment_request.source = payment_request_source
payment_request.amount = 10000
payment_request.currency = Currency.SAR
payment_request.capture = True
payment_request.success_url = SUCCESS_URL
payment_request.failure_url = FAILURE_URL
payment_request.processing = processing_settings
payment_request.processing_channel_id = 'pc_zs5fqhybzc2e3jmq3efvybybpq'
payment_request.customer = customer_request
payment_request.reference = 'ORD-5023-4E89'
payment_request.items = [product]
preview_api = checkout_sdk.OAuthSdk() \
.client_credentials(client_id=os.environ.get('CHECKOUT_FOUR_PREVIEW_OAUTH_CLIENT_ID'),
client_secret=os.environ.get('CHECKOUT_FOUR_PREVIEW_OAUTH_CLIENT_SECRET')) \
.build()
payment_response = retriable(callback=preview_api.payments.request_payment,
payment_request=payment_request)
assert_response(payment_response,
'id',
'reference',
'status',
'_links',
'customer',
'customer.id',
'customer.name',
'customer.email',
'customer.phone')
| 36.397436 | 104 | 0.646178 | 555 | 5,678 | 6.29009 | 0.248649 | 0.140361 | 0.030077 | 0.026353 | 0.449728 | 0.386709 | 0.34546 | 0.34546 | 0.321398 | 0.321398 | 0 | 0.021453 | 0.277563 | 5,678 | 155 | 105 | 36.632258 | 0.829595 | 0 | 0 | 0.443609 | 0 | 0 | 0.119232 | 0.022543 | 0 | 0 | 0 | 0 | 0.045113 | 1 | 0.022556 | false | 0 | 0.090226 | 0 | 0.112782 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
6479a772a0aa85678bea2bbb58a2808af7469318 | 4,524 | py | Python | sandbox/lib/jumpscale/Jumpscale/clients/ssh/SSHClientFactory.py | threefoldtech/threebot_prebuilt | 1f0e1c65c14cef079cd80f73927d7c8318755c48 | [
"Apache-2.0"
] | null | null | null | sandbox/lib/jumpscale/Jumpscale/clients/ssh/SSHClientFactory.py | threefoldtech/threebot_prebuilt | 1f0e1c65c14cef079cd80f73927d7c8318755c48 | [
"Apache-2.0"
] | null | null | null | sandbox/lib/jumpscale/Jumpscale/clients/ssh/SSHClientFactory.py | threefoldtech/threebot_prebuilt | 1f0e1c65c14cef079cd80f73927d7c8318755c48 | [
"Apache-2.0"
] | null | null | null | from Jumpscale import j
from .SSHClient import SSHClient
from .SSHClientParamiko import SSHClientParamiko
from .SSHClientBase import SSHClientBase
class SSHClientFactory(j.baseclasses.object_config_collection_testtools):
__jslocation__ = "j.clients.ssh"
_CHILDCLASS = SSHClientBase
_SCHEMATEXT = _CHILDCLASS._SCHEMATEXT
def _init(self, **kwargs):
self._clients = {}
self._SSHClientBaseClass = SSHClientBase
def _childclass_selector(self, jsxobject):
"""
gives a creator of a factory the ability to change the type of child to be returned
:return:
"""
if jsxobject.client_type == "pssh":
return SSHClient
elif j.core.platformtype.myplatform.platform_is_osx:
# return SSHClientParamiko
return SSHClient
else:
return SSHClientParamiko
def test(self):
"""
kosmos 'j.clients.ssh.test()'
"""
wg = self.master.wireguard_server
# wg.executor.file_write("/var/test", "test")
# r = wg.executor.file_read("/var/test")
# assert r == "test"
#
# wg.executor.file_write("/var/test", b"test")
# r = wg.executor.file_read("/var/test")
# assert r == "test" or r == b"test"
# wg.config["test"] = ["bb"]
# wg.save()
wg.server_start()
j.shell()
w
# TODO:*1 create docker make sure default key is used in the docker
# d = j.sal.docker.create(name='test', ports='22:8022', vols='', volsro='', stdout=True, base='phusion/baseimage',
# nameserver=['8.8.8.8'], replace=True, cpu=None, mem=0,
# myinit=True, sharecode=True)
# TODO: then connect to the just created docker and do some more tests
# addr = "104.248.87.200"
# port = 22
#
# # make sure we enforce pssh
# cl = j.clients.ssh.get(name="remote1", addr=addr, port=port, client_type="pssh")
cl = j.clients.digitalocean.get_testvm_sshclient(delete=False)
ex = cl.executor
cl.reset()
assert ex.state == {}
assert cl._connected == None
assert ex.env_on_system_msgpack == b""
assert ex.config_msgpack == b""
rc, out, err = ex.execute("ls /")
assert rc == 0
assert err == ""
assert out.endswith("\n")
ex.state_set("bla")
assert ex.state == {"bla": None}
assert ex.state_exists("bla")
assert ex.state_exists("blabla") == False
assert ex.state_get("bla") == None
ex.state_reset()
assert ex.state_exists("bla") == False
assert ex.state == {}
ex.state_set("bla", 1)
assert ex.state == {"bla": 1}
e = ex.env_on_system
assert e["HOME"] == "/root"
ex.file_write("/tmp/1", "a")
assert ex.file_read("/tmp/1").strip() == "a"
ftp = cl.sftp
stat = cl.sftp_stat("/tmp/1")
statdir = cl.sftp_stat("/tmp")
assert stat.filesize == 1
assert ex.path_isdir("/tmp")
assert ex.path_isfile("/tmp") == False
assert ex.path_isfile("/tmp/1")
path = ex.download("/tmp/1", "/tmp/something.txt")
path = ex.upload("/tmp/something.txt", "/tmp/2")
assert ex.file_read("/tmp/2").strip() == "a"
assert j.sal.fs.readFile("/tmp/something.txt").strip() == "a"
j.sal.fs.remove("/tmp/something.txt")
j.sal.fs.createDir("/tmp/8888")
j.sal.fs.createDir("/tmp/8888/5")
j.sal.fs.writeFile("/tmp/8888/1.txt", "a")
j.sal.fs.writeFile("/tmp/8888/2.txt", "a")
j.sal.fs.writeFile("/tmp/8888/5/3.txt", "a")
path = ex.upload("/tmp/8888")
r = ex.find("/tmp/8888")
assert r == ["/tmp/8888/1.txt", "/tmp/8888/2.txt", "/tmp/8888/5", "/tmp/8888/5/3.txt"]
ex.download("/tmp/8888", "/tmp/8889")
r2 = j.sal.fs.listFilesAndDirsInDir("/tmp/8889")
r2.sort()
assert r2 == ["/tmp/8889/1.txt", "/tmp/8889/2.txt", "/tmp/8889/5", "/tmp/8889/5/3.txt"]
cl.executor.delete("/tmp/8888")
r2 = ex.find("/tmp/8888")
assert r2 == []
j.sal.fs.remove("/tmp/8888")
j.sal.fs.remove("/tmp/8889")
cl.reset()
assert ex.state == {}
assert cl._connected == None
assert ex.env_on_system_msgpack == b""
assert ex.config_msgpack == b""
self._log_info("TEST FOR SSHCLIENT IS OK")
| 31.2 | 122 | 0.556366 | 585 | 4,524 | 4.205128 | 0.309402 | 0.058537 | 0.02439 | 0.021951 | 0.268293 | 0.17561 | 0.127642 | 0.127642 | 0.106504 | 0.106504 | 0 | 0.044739 | 0.283599 | 4,524 | 144 | 123 | 31.416667 | 0.714286 | 0.20336 | 0 | 0.156627 | 0 | 0 | 0.136016 | 0 | 0 | 0 | 0 | 0.006944 | 0.349398 | 1 | 0.036145 | false | 0 | 0.048193 | 0 | 0.168675 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
647c690b82071beffb86046caafc6c207204c7e0 | 4,316 | py | Python | taxcalc_arp/taxcalc/validation/taxsim27/prepare_taxcalc_input.py | hdoupe/Tax-Cruncher-ARP | c8c960c085d0883915f99ac2ea4630c928af4c16 | [
"MIT"
] | 1 | 2021-10-01T14:22:32.000Z | 2021-10-01T14:22:32.000Z | taxcalc_arp/taxcalc/validation/taxsim27/prepare_taxcalc_input.py | hdoupe/Tax-Cruncher-ARP | c8c960c085d0883915f99ac2ea4630c928af4c16 | [
"MIT"
] | 1 | 2021-03-16T13:57:36.000Z | 2021-03-16T13:57:36.000Z | taxcalc_arp/taxcalc/validation/taxsim27/prepare_taxcalc_input.py | hdoupe/Tax-Cruncher-ARP | c8c960c085d0883915f99ac2ea4630c928af4c16 | [
"MIT"
] | 1 | 2021-03-16T13:44:38.000Z | 2021-03-16T13:44:38.000Z | """
Translates TAXSIM-27 input file to Tax-Calculator tc input file.
"""
# CODING-STYLE CHECKS:
# pycodestyle prepare_tc_input.py
# pylint --disable=locally-disabled prepare_tc_input.py
import argparse
import os
import sys
import numpy as np
import pandas as pd
def main():
"""
High-level logic.
"""
# parse command-line arguments:
usage_str = 'python prepare_tc_input.py INPUT OUTPUT [--help]'
parser = argparse.ArgumentParser(
prog='',
usage=usage_str,
description=('Translates TAXSIM-27 input file into a Tax-Calculator '
'CSV-formatted tc input file. '
'Any pre-existing OUTPUT file contents are overwritten. '
'For details on Internet TAXSIM version 27 INPUT '
'format, go to '
'https://users.nber.org/~taxsim/taxsim27/'))
parser.add_argument('INPUT', nargs='?', default='',
help=('INPUT is name of file that contains '
'TAXSIM-27 input.'))
parser.add_argument('OUTPUT', nargs='?', default='',
help=('OUTPUT is name of file that will contain '
'CSV-formatted Tax-Calculator tc input.'))
args = parser.parse_args()
# check INPUT filename
if args.INPUT == '':
sys.stderr.write('ERROR: must specify INPUT file name\n')
sys.stderr.write('USAGE: {}\n'.format(usage_str))
return 1
if not os.path.isfile(args.INPUT):
emsg = 'INPUT file named {} does not exist'.format(args.INPUT)
sys.stderr.write('ERROR: {}\n'.format(emsg))
return 1
# check OUTPUT filename
if args.OUTPUT == '':
sys.stderr.write('ERROR: must specify OUTPUT file name\n')
sys.stderr.write('USAGE: {}\n'.format(usage_str))
return 1
if os.path.isfile(args.OUTPUT):
os.remove(args.OUTPUT)
# read TAXSIM-27 INPUT file into a pandas DataFrame
ivar = pd.read_csv(args.INPUT, delim_whitespace=True,
header=None, names=range(1, 28))
# translate INPUT variables into OUTPUT variables
invar = translate(ivar)
# write OUTPUT file containing Tax-Calculator input variables
invar.to_csv(args.OUTPUT, index=False)
# return no-error exit code
return 0
# end of main function code
def translate(ivar):
"""
Translate TAXSIM-27 input variables into Tax-Calculator input variables.
Both ivar and returned invar are pandas DataFrame objects.
"""
assert isinstance(ivar, pd.DataFrame)
invar = pd.DataFrame()
invar['RECID'] = ivar.loc[:, 1]
invar['FLPDYR'] = ivar.loc[:, 2]
# no Tax-Calculator use of TAXSIM variable 3, state code
mstat = ivar.loc[:, 4]
assert np.all(np.logical_or(mstat == 1, mstat == 2))
invar['age_head'] = ivar.loc[:, 5]
invar['age_spouse'] = ivar.loc[:, 6]
num_deps = ivar.loc[:, 7]
mars = np.where(mstat == 1, np.where(num_deps > 0, 4, 1), 2)
assert np.all(np.logical_or(mars == 1,
np.logical_or(mars == 2, mars == 4)))
invar['MARS'] = mars
invar['f2441'] = ivar.loc[:, 8]
invar['n24'] = ivar.loc[:, 9]
num_eitc_qualified_kids = ivar.loc[:, 10]
invar['EIC'] = np.minimum(num_eitc_qualified_kids, 3)
num_taxpayers = np.where(mars == 2, 2, 1)
invar['XTOT'] = num_taxpayers + num_deps
invar['e00200p'] = ivar.loc[:, 11]
invar['e00200s'] = ivar.loc[:, 12]
invar['e00200'] = invar['e00200p'] + invar['e00200s']
invar['e00650'] = ivar.loc[:, 13]
invar['e00600'] = invar['e00650']
invar['e00300'] = ivar.loc[:, 14]
invar['p22250'] = ivar.loc[:, 15]
invar['p23250'] = ivar.loc[:, 16]
invar['e02000'] = ivar.loc[:, 17]
invar['e00800'] = ivar.loc[:, 18]
invar['e01700'] = ivar.loc[:, 19]
invar['e01500'] = invar['e01700']
invar['e02400'] = ivar.loc[:, 20]
invar['e02300'] = ivar.loc[:, 21]
# no Tax-Calculator use of TAXSIM variable 22, non-taxable transfers
# no Tax-Calculator use of TAXSIM variable 23, rent paid
invar['e18500'] = ivar.loc[:, 24]
invar['e18400'] = ivar.loc[:, 25]
invar['e32800'] = ivar.loc[:, 26]
invar['e19200'] = ivar.loc[:, 27]
return invar
if __name__ == '__main__':
sys.exit(main())
| 37.530435 | 78 | 0.599398 | 567 | 4,316 | 4.492063 | 0.356261 | 0.06596 | 0.02552 | 0.020024 | 0.180605 | 0.153514 | 0.080879 | 0.040832 | 0.040832 | 0.040832 | 0 | 0.060888 | 0.254171 | 4,316 | 114 | 79 | 37.859649 | 0.730351 | 0.181186 | 0 | 0.060241 | 0 | 0 | 0.220178 | 0 | 0 | 0 | 0 | 0 | 0.036145 | 1 | 0.024096 | false | 0 | 0.060241 | 0 | 0.144578 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
647cfe0af9da296040c33beb3819d628a1608217 | 1,510 | py | Python | tests/integration_tests/resources/plugins/mock-rest-plugin/setup.py | TS-at-WS/cloudify-manager | 3e062e8dec16c89d2ab180d0b761cbf76d3f7ddc | [
"Apache-2.0"
] | 124 | 2015-01-22T22:28:37.000Z | 2022-02-26T23:12:06.000Z | tests/integration_tests/resources/plugins/mock-rest-plugin/setup.py | TS-at-WS/cloudify-manager | 3e062e8dec16c89d2ab180d0b761cbf76d3f7ddc | [
"Apache-2.0"
] | 345 | 2015-01-08T15:49:40.000Z | 2022-03-29T08:33:00.000Z | tests/integration_tests/resources/plugins/mock-rest-plugin/setup.py | TS-at-WS/cloudify-manager | 3e062e8dec16c89d2ab180d0b761cbf76d3f7ddc | [
"Apache-2.0"
] | 77 | 2015-01-07T14:04:35.000Z | 2022-03-07T22:46:00.000Z | # ***************************************************************************
# * Copyright (c) 2013 GigaSpaces Technologies Ltd. All rights reserved
# *
# * Licensed under the Apache License, Version 2.0 (the "License");
# * you may not use this file except in compliance with the License.
# * You may obtain a copy of the License at
# *
# * http://www.apache.org/licenses/LICENSE-2.0
# *
# * Unless required by applicable law or agreed to in writing, software
# * distributed under the License is distributed on an "AS IS" BASIS,
# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# * See the License for the specific language governing permissions and
# * limitations under the License.
# ***************************************************************************/
from setuptools import setup
from setuptools.command.install import install
class InstallCommand(install):
user_options = install.user_options + [
('do-not-fail', None, 'for testing')]
boolean_options = install.boolean_options + ['do-not-fail']
def initialize_options(self):
install.initialize_options(self)
self.do_not_fail = None
def finalize_options(self):
install.finalize_options(self)
if not self.do_not_fail:
raise RuntimeError('No one asked me not to fail, so I did')
setup(
name='mock-rest-plugin',
version='4.2',
packages=['mock_rest_plugin'],
cmdclass={
'install': InstallCommand,
}
)
| 33.555556 | 79 | 0.619868 | 181 | 1,510 | 5.093923 | 0.574586 | 0.065076 | 0.039046 | 0.034707 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.00821 | 0.193377 | 1,510 | 44 | 80 | 34.318182 | 0.748768 | 0.515894 | 0 | 0 | 0 | 0 | 0.156643 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.095238 | false | 0 | 0.095238 | 0 | 0.333333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
647d10de27c79bcb57ed12505d2e4cdc3b96770f | 914 | py | Python | python/privatelink-rds/stacks/vpc_stack.py | aarondodd/aws-cdk-examples | 7ac382fe99656df19a4f96159c11e0aa546acaf4 | [
"Apache-2.0"
] | null | null | null | python/privatelink-rds/stacks/vpc_stack.py | aarondodd/aws-cdk-examples | 7ac382fe99656df19a4f96159c11e0aa546acaf4 | [
"Apache-2.0"
] | null | null | null | python/privatelink-rds/stacks/vpc_stack.py | aarondodd/aws-cdk-examples | 7ac382fe99656df19a4f96159c11e0aa546acaf4 | [
"Apache-2.0"
] | 1 | 2022-01-31T03:13:37.000Z | 2022-01-31T03:13:37.000Z | from aws_cdk import (
core,
aws_ec2 as ec2
)
class VpcStack(core.Stack):
def __init__(self, scope: core.Construct, id: str,
vpc_cidr,
**kwargs
) -> None:
super().__init__(scope, id, **kwargs)
# SubnetType.ISOLATED used as we don't want Internet traffic possible for this demo
self.vpc = ec2.Vpc(self, "VPC",
max_azs = 2,
cidr = vpc_cidr,
subnet_configuration = [
ec2.SubnetConfiguration(
subnet_type = ec2.SubnetType.ISOLATED,
name = "PrivateIngress",
cidr_mask = 28
), ec2.SubnetConfiguration(
subnet_type = ec2.SubnetType.ISOLATED,
name = "DB",
cidr_mask = 28
)
],
)
core.CfnOutput(self, "Output",
value = self.vpc.vpc_id) | 29.483871 | 91 | 0.503282 | 91 | 914 | 4.846154 | 0.549451 | 0.122449 | 0.126984 | 0.145125 | 0.258503 | 0.258503 | 0.258503 | 0.258503 | 0 | 0 | 0 | 0.022018 | 0.40372 | 914 | 31 | 92 | 29.483871 | 0.787156 | 0.088621 | 0 | 0.148148 | 0 | 0 | 0.030048 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.037037 | false | 0 | 0.037037 | 0 | 0.111111 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
647ed57ed963f5240abc63291aaba067a117c593 | 2,266 | py | Python | lifesaver/bot/config.py | slice/discord.py-lifesaver | 00c38112a512efd964cbbf0533096eff0a29f79f | [
"MIT"
] | 12 | 2017-12-21T03:44:52.000Z | 2021-02-05T02:09:13.000Z | lifesaver/bot/config.py | slice/lifesaver | 00c38112a512efd964cbbf0533096eff0a29f79f | [
"MIT"
] | 9 | 2017-12-21T01:56:07.000Z | 2020-12-31T00:01:20.000Z | lifesaver/bot/config.py | slice/lifesaver | 00c38112a512efd964cbbf0533096eff0a29f79f | [
"MIT"
] | 2 | 2017-12-21T01:52:07.000Z | 2019-12-17T01:51:50.000Z | # encoding: utf-8
__all__ = ["BotConfig", "BotLoggingConfig"]
from typing import Any, Dict, List, Optional, Union
from lifesaver.config import Config
YES_EMOJI = "\N{WHITE HEAVY CHECK MARK}"
NO_EMOJI = "\N{CROSS MARK}"
OK_EMOJI = "\N{OK HAND SIGN}"
DEFAULT_EMOJIS = {
"generic": {"yes": YES_EMOJI, "no": NO_EMOJI, "ok": OK_EMOJI,},
}
class BotLoggingConfig(Config):
#: The logging level to use.
level: str = "INFO"
#: The file to output to.
file: str = "bot.log"
#: The logging format.
format: str = "[{asctime}] [{levelname}] {name}: {message}"
#: The time logging format.
time_format: str = "%Y-%m-%d %H:%M:%S"
class BotConfig(Config):
#: The token of the bot.
token: str
#: The custom bot class to instantiate when using the CLI module.
#:
#: It is formatted as an import path and class separated by a colon, like::
#:
#: coolbot.bot:CustomBotClass
bot_class: Optional[str] = None
#: The custom config class to use when using the CLI.
config_class: Optional[str] = None
#: The logging config to use when using the CLI. See :class:`BotLoggingConfig`.
logging: BotLoggingConfig
#: The path to load extensions from.
extensions_path: str = "./exts"
#: The path for cog-specific configuration files.
cog_config_path: str = "./config"
#: Ignores bots when processing commands.
ignore_bots: bool = True
#: The command prefix to use. Can be a string or a list of strings.
command_prefix: Union[List[str], str] = "!"
#: The intent flag used when connecting to the gateway.
intents: Union[List[str], str] = "default"
#: The bot's description. Shown in the help command.
description: str = "A Discord bot."
#: A tribool describing how the bot should decide to DM help messages.
#: See :attr:`discord.ext.commands.DefaultHelpCommand.dm_help`.
dm_help: Optional[bool] = None
#: Determines whether mentions work as a prefix.
command_prefix_include_mentions: bool = True
#: Enables the hot reloader.
hot_reload: bool = False
#: The global bot emoji table.
emojis: Dict[str, Any] = DEFAULT_EMOJIS
#: PostgreSQL access credentials.
postgres: Optional[Dict[str, Any]] = None
| 27.634146 | 83 | 0.660194 | 309 | 2,266 | 4.757282 | 0.443366 | 0.013605 | 0.02449 | 0.030612 | 0.058503 | 0.027211 | 0 | 0 | 0 | 0 | 0 | 0.00057 | 0.225508 | 2,266 | 81 | 84 | 27.975309 | 0.837037 | 0.440424 | 0 | 0 | 0 | 0 | 0.162641 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.066667 | 0 | 0.766667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
64808ef89b84bb0c8f0cd239793909c476cae64b | 1,162 | py | Python | function_zoo.py | RamiroFuentes/Procesos-de-separacion-II | d68873f8ee3e9eb081f1040f9510335746b1a0b4 | [
"MIT"
] | null | null | null | function_zoo.py | RamiroFuentes/Procesos-de-separacion-II | d68873f8ee3e9eb081f1040f9510335746b1a0b4 | [
"MIT"
] | null | null | null | function_zoo.py | RamiroFuentes/Procesos-de-separacion-II | d68873f8ee3e9eb081f1040f9510335746b1a0b4 | [
"MIT"
] | null | null | null | # Librerias
import numpy as np
from numpy import poly1d,polyfit
import matplotlib.pyplot as plt
from sympy import Symbol
import pandas as pd
# Para imprimir en formato LaTex
from sympy.interactive import printing
printing.init_printing(use_latex=True)
def Rachford_Rice_4(z,k,Li,Ls,p):
psi = np.arange(Li,Ls+p,p)
f_psi = []
for value in psi:
f_psi.append((z[0]*(1-k[0])/(1+value*(k[0]-1))) + (z[1]*(1-k[1])/(1+value*(k[1]-1))) + (z[2]*(1-k[2])/(1+value*(k[2]-1))) + (z[3]*(1-k[3])/(1+value*(k[3]-1))) )
return psi,f_psi
def find_root_interval(given):
i = 0
for value in given:
if value >=0:
P_1 = i
P_0 = i-1
else:
i += 1
return P_0,P_1
def interp(x_1,x_2,y_1,y_2,y_n):
x_n = x_1 -((x_1-x_2)*(y_1-y_n)/(y_1-y_2))
return x_n , y_n
def interp_y(x_1,x_2,y_1,y_2,x_n):
y_n = y_1 + (y_2-y_1)*((x_n-x_1)/(x_2-x_1))
return x_n , y_n
def fracc(z_i,Psi_c,k_i):
x_i = []
y_i = []
i = 0
for element in z_i:
x_i.append(z_i[i]/(1+Psi_c*(k_i[i]-1)))
y_i.append(k_i[i]*x_i[i])
i += 1
return x_i,y_i | 24.208333 | 168 | 0.557659 | 252 | 1,162 | 2.337302 | 0.238095 | 0.020374 | 0.025467 | 0.027165 | 0.120543 | 0.101868 | 0.039049 | 0.027165 | 0 | 0 | 0 | 0.067285 | 0.258176 | 1,162 | 48 | 169 | 24.208333 | 0.616009 | 0.035284 | 0 | 0.162162 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.135135 | false | 0 | 0.162162 | 0 | 0.432432 | 0.054054 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
6480abc07da7997c5213c7de64b16b108725ed90 | 840 | py | Python | Python/lc_523_continuous_subarray_sum.py | cmattey/leetcode_problems | fe57e668db23f7c480835c0a10f363d718fbaefd | [
"MIT"
] | 6 | 2019-07-01T22:03:25.000Z | 2020-04-06T15:17:46.000Z | Python/lc_523_continuous_subarray_sum.py | cmattey/leetcode_problems | fe57e668db23f7c480835c0a10f363d718fbaefd | [
"MIT"
] | null | null | null | Python/lc_523_continuous_subarray_sum.py | cmattey/leetcode_problems | fe57e668db23f7c480835c0a10f363d718fbaefd | [
"MIT"
] | 1 | 2020-04-01T22:31:41.000Z | 2020-04-01T22:31:41.000Z | # Time: O(n)
# Space: O(n) , min(n,k) since only storing num%k element in map.
class Solution:
def checkSubarraySum(self, nums: List[int], k: int) -> bool:
"""
Lots of edge cases to take care of with 0.
if a%k == b%k, then b-a%k==0
eg: if a%k==3, b%k==3, then (a+3)%k==0, (b+3)%k==0, -> (b+3-(a+3))%k==0 -> b-a%k==0
"""
imap = {}
run_sum = 0
imap[0] = -1
for index, num in enumerate(nums):
run_sum +=num
if k!=0:
run_sum = run_sum%k
if run_sum in imap:
if index-imap[run_sum]>1:
return True
else: # notice the else condition, allows run_sum imap to grow larger, even if already exists, by not updating
imap[run_sum]=index
return False
| 30 | 122 | 0.492857 | 137 | 840 | 2.963504 | 0.437956 | 0.118227 | 0.022167 | 0.029557 | 0.036946 | 0 | 0 | 0 | 0 | 0 | 0 | 0.032075 | 0.369048 | 840 | 27 | 123 | 31.111111 | 0.733962 | 0.39881 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.066667 | false | 0 | 0 | 0 | 0.266667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
648156a0f59f1f2c08d7464a8925a30d4b6a2f12 | 1,180 | py | Python | api/routers/gifs.py | janaSunrise/AIO-API-discord-bots | e280959f7023bb9d745c843e823b980ccf627438 | [
"Apache-2.0"
] | 2 | 2021-02-13T08:08:23.000Z | 2021-05-09T13:10:54.000Z | api/routers/gifs.py | janaSunrise/AIO-API-discord-bots | e280959f7023bb9d745c843e823b980ccf627438 | [
"Apache-2.0"
] | null | null | null | api/routers/gifs.py | janaSunrise/AIO-API-discord-bots | e280959f7023bb9d745c843e823b980ccf627438 | [
"Apache-2.0"
] | 1 | 2021-04-17T19:44:52.000Z | 2021-04-17T19:44:52.000Z | from fastapi import APIRouter, Request
from api.core import log_error
router = APIRouter(
prefix="/gifs",
tags=["GIF generation endpoints"],
responses={
404: {"description": "Not found"},
},
)
# -- Router paths --
@router.get("/wink")
@log_error()
async def wink(request: Request) -> dict:
"""Get a random wink gif."""
http_client = request.app.state.http_client
async with http_client.session.get("https://some-random-api.ml/animu/wink") as resp:
json = await resp.json()
return {"url": json["link"]}
@router.get("/pat")
@log_error()
async def pat(request: Request) -> dict:
"""Get a random pat gif."""
http_client = request.app.state.http_client
async with http_client.session.get("https://some-random-api.ml/animu/pat") as resp:
json = await resp.json()
return {"url": json["link"]}
@router.get("/hug")
@log_error()
async def hug(request: Request) -> dict:
"""Get a random hug gif."""
http_client = request.app.state.http_client
async with http_client.session.get("https://some-random-api.ml/animu/hug") as resp:
json = await resp.json()
return {"url": json["link"]}
| 24.081633 | 88 | 0.64322 | 164 | 1,180 | 4.54878 | 0.292683 | 0.120643 | 0.052279 | 0.064343 | 0.66756 | 0.66756 | 0.55496 | 0.55496 | 0.55496 | 0.55496 | 0 | 0.003132 | 0.188136 | 1,180 | 48 | 89 | 24.583333 | 0.775574 | 0.015254 | 0 | 0.4 | 0 | 0 | 0.178108 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.066667 | 0 | 0.166667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
6483fef3d85844e7f3c1dc2eb3bf6958fc1d4faa | 565 | py | Python | feature_extract_pc/demo.py | zzc-1998/NR-3DQA | 4773b598a92f707892fabafd7061b758dfb37508 | [
"MIT"
] | 5 | 2021-08-04T08:24:28.000Z | 2022-02-16T15:30:18.000Z | feature_extract_pc/demo.py | zzc-1998/NR-QA-for-point-cloud | 4773b598a92f707892fabafd7061b758dfb37508 | [
"MIT"
] | null | null | null | feature_extract_pc/demo.py | zzc-1998/NR-QA-for-point-cloud | 4773b598a92f707892fabafd7061b758dfb37508 | [
"MIT"
] | null | null | null | from feature_extract import get_feature_vector
import time
#demo
objpath = "models/hhi_5.ply"
start = time.time()
features = get_feature_vector(objpath)
end = time.time()
time_cost = end-start
#show the features
cnt = 0
for feature_domain in ['l','a','b','curvature','anisotropy','linearity','planarity','sphericity']:
for param in ["mean","std","entropy","ggd1","ggd2","aggd1","aggd2","aggd3","aggd4","gamma1","gamma2"]:
print(feature_domain + "_" + param + ": " + str(features[cnt]))
cnt = cnt + 1
print("Cost " + str(time_cost) + " sec.")
| 29.736842 | 106 | 0.661947 | 77 | 565 | 4.714286 | 0.623377 | 0.066116 | 0.088154 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.022727 | 0.143363 | 565 | 18 | 107 | 31.388889 | 0.727273 | 0.037168 | 0 | 0 | 0 | 0 | 0.245387 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.153846 | 0 | 0.153846 | 0.153846 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
64850fc8936473f12079768631779f5e9426371c | 784 | py | Python | docs/pyxamples/glyphs.py | JuliaPackageMirrors/Bokeh.jl | 47ec974a2759f81069e5143d74c22876e60f8ec2 | [
"MIT",
"BSD-3-Clause"
] | 31 | 2015-02-23T03:21:48.000Z | 2017-05-13T13:10:22.000Z | docs/pyxamples/glyphs.py | JuliaPackageMirrors/Bokeh.jl | 47ec974a2759f81069e5143d74c22876e60f8ec2 | [
"MIT",
"BSD-3-Clause"
] | 35 | 2015-01-03T16:38:01.000Z | 2017-05-19T19:55:14.000Z | docs/pyxamples/glyphs.py | JuliaPackageMirrors/Bokeh.jl | 47ec974a2759f81069e5143d74c22876e60f8ec2 | [
"MIT",
"BSD-3-Clause"
] | 13 | 2015-01-06T12:38:32.000Z | 2017-05-16T05:54:38.000Z | import numpy as np
from bokeh.plotting import *
N = 100
x = np.linspace(0, 4*np.pi, N)
y = np.sin(x)
output_file("glyphs.html", title="glyphs.py example")
TOOLS = "pan,wheel_zoom,box_zoom,reset,save,box_select"
p2 = figure(title="Another Legend Example", tools=TOOLS)
p2.circle(x, y, legend="sin(x)")
p2.line(x, y, legend="sin(x)")
p2.line(x, 2*y, legend="2*sin(x)",
line_dash=[4, 4], line_color="orange", line_width=2)
p2.square(x, 3*y, legend="3*sin(x)",
fill_color=None, line_color="green")
p2.line(x, 3*y, legend="3*sin(x)",
fill_color=None, line_color="green")
show(p2)
from bokeh.document import Document
from bokeh.protocol import serialize_json
doc = Document()
doc.add(p2)
json = serialize_json(doc.dump(), indent=2)
open('glyphs.json', 'w').write(json) | 23.757576 | 56 | 0.690051 | 142 | 784 | 3.71831 | 0.415493 | 0.045455 | 0.039773 | 0.041667 | 0.225379 | 0.225379 | 0.225379 | 0.225379 | 0.155303 | 0.155303 | 0 | 0.033333 | 0.119898 | 784 | 33 | 57 | 23.757576 | 0.731884 | 0 | 0 | 0.086957 | 0 | 0 | 0.202548 | 0.057325 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.173913 | 0 | 0.173913 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
64855ec485719a4209443f6e47f00e7cc576af85 | 2,103 | py | Python | unit_tests/test_main.py | cloverleaf/npm | 4845bc7e483291f2a2a847c242613bae2c6a11d2 | [
"MIT"
] | null | null | null | unit_tests/test_main.py | cloverleaf/npm | 4845bc7e483291f2a2a847c242613bae2c6a11d2 | [
"MIT"
] | 4 | 2021-01-29T11:17:53.000Z | 2021-09-04T16:01:28.000Z | unit_tests/test_main.py | cloverleaf/npm | 4845bc7e483291f2a2a847c242613bae2c6a11d2 | [
"MIT"
] | 1 | 2021-11-23T09:59:36.000Z | 2021-11-23T09:59:36.000Z | import pytest
import json
import subprocess
defaultMinLength = 4
defaultMaxLength = 512
sites = {}
with open("../data/sites.json", 'r') as json_file:
sites = json.load(json_file)
def test_lengths():
for length in range(defaultMinLength, defaultMaxLength):
batcmd = "node -e \"console.log(require('../index').process('a', 'a', false, {}))\"".format(length)
response = subprocess.check_output(batcmd, shell=True).decode("utf-8")[:-1]
# print(response, len(response), length)
assert len(response) == length
def requirements_lengths():
for site in sites:
if "requirements" in sites[site]:
assert sites[site]["minLength"] > len(sites[site]["requirements"])
def test_all_presets():
results = {}
with open("results.json", 'r') as json_file:
results = json.load(json_file)
with open("configs.json", 'r') as json_file:
configs = json.load(json_file)
for config in configs:
for site in sites:
siteCMD = site.replace("\'", "\\'").replace(" ", "\\ ")
password = configs[config]["password"]
length = configs[config]["length"]
batcmd = "node -e \"console.log(require('../index').process('{}', '{}', true, {}))\"".format(siteCMD, password, length)
response = subprocess.check_output(batcmd, shell=True).decode("utf-8")[:-1]
if config not in results:
print("Adding {} config".format(config))
results[config] = {}
if site in results[config]:
assert response == results[config][site]["result"], "Preset \"{}\" not functioning correctly.".format(site)
else:
print("Adding {} to config {}".format(site, config))
results[config][site] = {"result": response}
with open("results.json", 'w', encoding='utf-8') as json_file:
json.dump(results, json_file, ensure_ascii=False, indent=4)
| 31.38806 | 136 | 0.555873 | 225 | 2,103 | 5.128889 | 0.32 | 0.055459 | 0.034662 | 0.028596 | 0.214038 | 0.175043 | 0.175043 | 0.175043 | 0.105719 | 0.105719 | 0 | 0.006689 | 0.289111 | 2,103 | 66 | 137 | 31.863636 | 0.765217 | 0.018069 | 0 | 0.1 | 0 | 0 | 0.127191 | 0 | 0 | 0 | 0 | 0 | 0.075 | 1 | 0.075 | false | 0.05 | 0.075 | 0 | 0.15 | 0.05 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |