hexsha string | size int64 | ext string | lang string | max_stars_repo_path string | max_stars_repo_name string | max_stars_repo_head_hexsha string | max_stars_repo_licenses list | max_stars_count int64 | max_stars_repo_stars_event_min_datetime string | max_stars_repo_stars_event_max_datetime string | max_issues_repo_path string | max_issues_repo_name string | max_issues_repo_head_hexsha string | max_issues_repo_licenses list | max_issues_count int64 | max_issues_repo_issues_event_min_datetime string | max_issues_repo_issues_event_max_datetime string | max_forks_repo_path string | max_forks_repo_name string | max_forks_repo_head_hexsha string | max_forks_repo_licenses list | max_forks_count int64 | max_forks_repo_forks_event_min_datetime string | max_forks_repo_forks_event_max_datetime string | content string | avg_line_length float64 | max_line_length int64 | alphanum_fraction float64 | qsc_code_num_words_quality_signal int64 | qsc_code_num_chars_quality_signal float64 | qsc_code_mean_word_length_quality_signal float64 | qsc_code_frac_words_unique_quality_signal float64 | qsc_code_frac_chars_top_2grams_quality_signal float64 | qsc_code_frac_chars_top_3grams_quality_signal float64 | qsc_code_frac_chars_top_4grams_quality_signal float64 | qsc_code_frac_chars_dupe_5grams_quality_signal float64 | qsc_code_frac_chars_dupe_6grams_quality_signal float64 | qsc_code_frac_chars_dupe_7grams_quality_signal float64 | qsc_code_frac_chars_dupe_8grams_quality_signal float64 | qsc_code_frac_chars_dupe_9grams_quality_signal float64 | qsc_code_frac_chars_dupe_10grams_quality_signal float64 | qsc_code_frac_chars_replacement_symbols_quality_signal float64 | qsc_code_frac_chars_digital_quality_signal float64 | qsc_code_frac_chars_whitespace_quality_signal float64 | qsc_code_size_file_byte_quality_signal float64 | qsc_code_num_lines_quality_signal float64 | qsc_code_num_chars_line_max_quality_signal float64 | qsc_code_num_chars_line_mean_quality_signal float64 | qsc_code_frac_chars_alphabet_quality_signal float64 | qsc_code_frac_chars_comments_quality_signal float64 | qsc_code_cate_xml_start_quality_signal float64 | qsc_code_frac_lines_dupe_lines_quality_signal float64 | qsc_code_cate_autogen_quality_signal float64 | qsc_code_frac_lines_long_string_quality_signal float64 | qsc_code_frac_chars_string_length_quality_signal float64 | qsc_code_frac_chars_long_word_length_quality_signal float64 | qsc_code_frac_lines_string_concat_quality_signal float64 | qsc_code_cate_encoded_data_quality_signal float64 | qsc_code_frac_chars_hex_words_quality_signal float64 | qsc_code_frac_lines_prompt_comments_quality_signal float64 | qsc_code_frac_lines_assert_quality_signal float64 | qsc_codepython_cate_ast_quality_signal float64 | qsc_codepython_frac_lines_func_ratio_quality_signal float64 | qsc_codepython_cate_var_zero_quality_signal bool | qsc_codepython_frac_lines_pass_quality_signal float64 | qsc_codepython_frac_lines_import_quality_signal float64 | qsc_codepython_frac_lines_simplefunc_quality_signal float64 | qsc_codepython_score_lines_no_logic_quality_signal float64 | qsc_codepython_frac_lines_print_quality_signal float64 | qsc_code_num_words int64 | qsc_code_num_chars int64 | qsc_code_mean_word_length int64 | qsc_code_frac_words_unique null | qsc_code_frac_chars_top_2grams int64 | qsc_code_frac_chars_top_3grams int64 | qsc_code_frac_chars_top_4grams int64 | qsc_code_frac_chars_dupe_5grams int64 | qsc_code_frac_chars_dupe_6grams int64 | qsc_code_frac_chars_dupe_7grams int64 | qsc_code_frac_chars_dupe_8grams int64 | qsc_code_frac_chars_dupe_9grams int64 | qsc_code_frac_chars_dupe_10grams int64 | qsc_code_frac_chars_replacement_symbols int64 | qsc_code_frac_chars_digital int64 | qsc_code_frac_chars_whitespace int64 | qsc_code_size_file_byte int64 | qsc_code_num_lines int64 | qsc_code_num_chars_line_max int64 | qsc_code_num_chars_line_mean int64 | qsc_code_frac_chars_alphabet int64 | qsc_code_frac_chars_comments int64 | qsc_code_cate_xml_start int64 | qsc_code_frac_lines_dupe_lines int64 | qsc_code_cate_autogen int64 | qsc_code_frac_lines_long_string int64 | qsc_code_frac_chars_string_length int64 | qsc_code_frac_chars_long_word_length int64 | qsc_code_frac_lines_string_concat null | qsc_code_cate_encoded_data int64 | qsc_code_frac_chars_hex_words int64 | qsc_code_frac_lines_prompt_comments int64 | qsc_code_frac_lines_assert int64 | qsc_codepython_cate_ast int64 | qsc_codepython_frac_lines_func_ratio int64 | qsc_codepython_cate_var_zero int64 | qsc_codepython_frac_lines_pass int64 | qsc_codepython_frac_lines_import int64 | qsc_codepython_frac_lines_simplefunc int64 | qsc_codepython_score_lines_no_logic int64 | qsc_codepython_frac_lines_print int64 | effective string | hits int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
e517fa480acd67dfee5f3aaa95a82cf7997e2c8a | 6,551 | py | Python | layers/modules/precision_loss.py | laycoding/ssd.pytorch | 6b9263d9d59e348398335dc91d59af658f2e8d35 | [
"MIT"
] | null | null | null | layers/modules/precision_loss.py | laycoding/ssd.pytorch | 6b9263d9d59e348398335dc91d59af658f2e8d35 | [
"MIT"
] | null | null | null | layers/modules/precision_loss.py | laycoding/ssd.pytorch | 6b9263d9d59e348398335dc91d59af658f2e8d35 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
from data import coco as cfg
from ..box_utils import match, log_sum_exp, decode, nms
class PrecisionLoss(nn.Module):
"""SSD Weighted Loss Function
Compute Targets:
1) Produce Confidence Target Indices by matching ground truth boxes
with (default) 'priorboxes' that have jaccard index > threshold parameter
(default threshold: 0.5).
2) Produce localization target by 'encoding' variance into offsets of ground
truth boxes and their matched 'priorboxes'.
3) Hard negative mining to filter the excessive number of negative examples
that comes with using a large number of default bounding boxes.
(default negative:positive ratio 3:1)
Objective Loss:
L(x,c,l,g) = (Lconf(x, c) + αLloc(x,l,g)) / N
Where, Lconf is the CrossEntropy Loss and Lloc is the SmoothL1 Loss
weighted by α which is set to 1 by cross val.
Args:
c: class confidences,
l: predicted boxes,
g: ground truth boxes
N: number of matched default boxes
See: https://arxiv.org/pdf/1512.02325.pdf for more details.
"""
def __init__(self, num_classes, overlap_thresh, prior_for_matching,
bkg_label, top_k, encode_target, nms_thresh, conf_thresh,
use_gpu=True):
super(PrecisionLoss, self).__init__()
self.use_gpu = use_gpu
self.num_classes = num_classes
self.threshold = overlap_thresh
self.background_label = bkg_label
self.encode_target = encode_target
self.use_prior_for_matching = prior_for_matching
self.variance = cfg['variance']
self.top_k = top_k
if nms_thresh <= 0:
raise ValueError('nms_threshold must be non negative.')
self.nms_thresh = nms_thresh
self.softmax = nn.Softmax(dim=-1)
self.conf_thresh = conf_thresh
def forward(self, predictions, targets):
"""Multibox Loss
Args:
predictions (tuple): A tuple containing loc preds, conf preds,
and prior boxes from SSD net.
conf shape: torch.size(batch_size,num_priors,num_classes)
loc shape: torch.size(batch_size,num_priors,4)
priors shape: torch.size(num_priors,4)
targets (tensor): Ground truth boxes and labels for a batch,
shape: [batch_size,num_objs,5] (last idx is the label).
"""
loc_data, conf_data, priors = predictions
# torch.save(loc_data, 'inter/loc_data.pt')
# torch.save(conf_data, 'inter/conf_data.pt')
# torch.save(priors, 'inter/priors.pt')
# torch.save(targets, 'inter/targets.pt')
num = loc_data.size(0)
priors = priors[:loc_data.size(1), :]
# confused here, why stuck at loc_data size 1
num_priors = (priors.size(0))
# prior_data = priors.view(1, num_priors, 4)
# print(prior_data.size())
num_classes = self.num_classes
# match priors (default boxes) and ground truth boxes
loc_t = torch.Tensor(num, num_priors, 4)
# [num, num_priors, 4]
conf_t = torch.LongTensor(num, num_priors)
# [num_priors] top class label for each prior
for idx in range(num):
truths = targets[idx][:, :-1].data
labels = targets[idx][:, -1].data
defaults = priors.data
match(self.threshold, truths, defaults, self.variance, labels,
loc_t, conf_t, idx)
if self.use_gpu:
loc_t = loc_t.cuda()
conf_t = conf_t.cuda()
# wrap targets
loc_t = Variable(loc_t, requires_grad=False)
conf_t = Variable(conf_t, requires_grad=False)
conf_preds = self.softmax(conf_data.view(num, num_priors,
self.num_classes))
# print(conf_preds.max()) 0.98
conf_preds_trans = conf_preds.transpose(2,1)
# [num, num_classes, num_priors]
conf_p = torch.zeros(num, num_priors, num_classes).cuda()
# [num, num_priors, num_classes]
loc_p = torch.zeros(num, num_priors, 4).cuda()
# Decode predictions into bboxes
for i in range(num):
decoded_boxes = decode(loc_data[i], priors, self.variance)
# For each class, perform nms
conf_scores = conf_preds_trans[i].clone()
for cl in range(1, self.num_classes):
c_mask = conf_scores[cl].gt(self.conf_thresh)
scores = conf_scores[cl][c_mask]
if scores.size(0) == 0:
continue
# fliter low conf predictions
l_mask = c_mask.unsqueeze(1).expand_as(decoded_boxes)
boxes = Variable(decoded_boxes[l_mask].view(-1, 4), requires_grad=False)
# idx of highest scoring and non-overlapping boxes per class
# boxes [num_priors(has been flitered), 4] location preds for i'th image
ids, count = nms(boxes, scores, self.nms_thresh, self.top_k)
conf_p[i, c_mask, cl] = conf_preds[i, c_mask, cl] # [num, num_priors, num_classes]
loc_p[i, l_mask[:,0].nonzero()[ids][:count]] = loc_data[i, l_mask[:,0].nonzero()[ids][:count]] # [num, num_priors, 4]
# check each result if match the ground truth
effect_conf = conf_p.sum(2) != 0
effect_conf_idx = effect_conf.unsqueeze(2).expand_as(conf_p)
effect_loc_idx = effect_conf.unsqueeze(2).expand_as(loc_t)
# [num, num_priors, num_classes] binary metric, thousands will be True in million
# torch.save(conf_preds, 'inter/conf_preds.pt')
# torch.save(effect_conf, 'inter/effect_conf.pt')
# torch.save(effect_loc, 'inter/effect_loc.pt')
# torch.save(conf_p, 'inter/conf_p.pt')
# torch.save(conf_t, 'inter/conf_t.pt')
# torch.save(effect_conf, 'inter/effect_conf.pt')
loss_c = F.cross_entropy(conf_p[effect_conf_idx].view(-1, num_classes), conf_t[effect_conf].view(-1), size_average=False)
loss_l = F.smooth_l1_loss(loc_p[effect_loc_idx], loc_t[effect_loc_idx], size_average=False)
# conf_p [num*num_p, num_classes] conf_t [num*num_p, 1(label)]
N = effect_conf_idx.data.sum()
loss_l /= N.float()
loss_c /= N.float()
return loss_l, loss_c
| 47.471014 | 133 | 0.615479 | 909 | 6,551 | 4.226623 | 0.247525 | 0.042166 | 0.031234 | 0.024727 | 0.112441 | 0.089276 | 0.077564 | 0.019781 | 0.019781 | 0 | 0 | 0.012343 | 0.282705 | 6,551 | 137 | 134 | 47.817518 | 0.805278 | 0.412914 | 0 | 0 | 0 | 0 | 0.011778 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.028571 | false | 0 | 0.085714 | 0 | 0.142857 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e51b72feb934b5d22f70cc68b5686a57177d55f4 | 18,195 | py | Python | umbrella-sampling_1D_reweight_bs.py | cbatton/umbrella_sampling_pymbar | 9133c0079afe916da5f11051052828b67288efb3 | [
"MIT"
] | null | null | null | umbrella-sampling_1D_reweight_bs.py | cbatton/umbrella_sampling_pymbar | 9133c0079afe916da5f11051052828b67288efb3 | [
"MIT"
] | null | null | null | umbrella-sampling_1D_reweight_bs.py | cbatton/umbrella_sampling_pymbar | 9133c0079afe916da5f11051052828b67288efb3 | [
"MIT"
] | null | null | null | # Example illustrating the application of MBAR to compute a 1D PMF from an umbrella sampling simulation.
#
# The data represents an umbrella sampling simulation for the magnetization of the Ising model
# Adapted from one of the pymbar example scripts for 1D PMFs
import numpy as np # numerical array library
import pymbar # multistate Bennett acceptance ratio
import os
from pymbar import timeseries # timeseries analysis
from pymbar.utils import logsumexp
from glob import glob
from matplotlib.ticker import AutoMinorLocator
from scipy.optimize import brentq
import scipy.signal as signal
from scipy.signal import savgol_filter
kB = 1.0 # Boltzmann constant
# Parameters
temperature = 3.0 # assume a single temperature -- can be overridden with data from param file
N_max = 50000 # maximum number of snapshots/simulation
N_max_ref = 50000 # maximum number of snapshots/simulation
folders_top = glob("*/") # total number of temperatures
folders_1 = []
curdir = os.getcwd()
for i in range(len(folders_top)):
os.chdir(curdir+'/'+folders_top[i])
folders_bottom = glob("*/")
for j in range(len(folders_bottom)):
os.chdir(curdir+'/'+folders_top[i]+'/'+folders_bottom[j])
folders_1.append(os.getcwd())
os.chdir(curdir)
K = len(folders_1)
T_k = np.ones(K,float)*temperature # inital temperatures are all equal
beta = 1.0 / (kB * temperature) # inverse temperature of simulations
mag_min = -1580 # min for magnetization
mag_max = 1580 # max for magnetization
mag_nbins = 395 # number of bins for magnetization
# Need to delete ext terms
# Allocate storage for simulation data
N_max = 50000
N_k = np.zeros([K], np.int32) # N_k[k] is the number of snapshots from umbrella simulation k
K_k = np.zeros([K], np.float64) # K_1_k[k] is the spring constant 1 for umbrella simulation k
mu_k = np.zeros([K], np.float64) # mu_k[k] is the chemical potential for umbrella simulation k
mag0_k = np.zeros([K], np.float64) # mag0_k[k] is the spring center location for umbrella simulation k
mag_kn = np.zeros([K,N_max], np.float64) # mag_kn[k,n] is the magnetization for snapshot n from umbrella simulation k
u_kn = np.zeros([K,N_max], np.float64) # u_kn[k,n] is the reduced potential energy without umbrella restraints of snapshot n of umbrella simulation k
g_k = np.zeros([K],np.float32);
# Read in umbrella spring constants and centers.
# Go through directories and read
umbrella_index = 0
for i in range(K):
infile = open(folders_1[i]+'/param')
for line in infile:
line_strip = line.strip()
if line_strip.startswith('harmon'):
print(line_strip)
line_split = line_strip.split()[1]
K_k[i] = float(line_split)
if line_strip.startswith('window'):
print(line_strip)
line_split = line_strip.split()[1]
mag0_k[i] = float(line_split)
if line_strip.startswith('T'):
print(line_strip)
line_split = line_strip.split()[1]
T_k[i] = float(line_split)
if line_strip.startswith('h_external'):
print(line_strip)
line_split = line_strip.split()[1]
mu_k[i] = float(line_split)
beta_k = 1.0/(kB*T_k) # beta factor for the different temperatures
print(beta_k)
print(mu_k)
if (np.min(T_k) == np.max(T_k)):
DifferentTemperatures = False # if all the temperatures are the same, then we don't have to read in energies.
# Read the simulation data
for i in range(K):
k = i
string_base = folders_1[i]
# Read magnetization data.
filename_mag = string_base+'/mbar_data.txt'
print("Reading %s..." % filename_mag)
infile = open(filename_mag, 'r')
lines = infile.readlines()
infile.close()
# Parse data.
n = 0
for line in lines:
tokens = line.split()
mag = float(tokens[2]) # Magnetization
u_kn[k,n] = float(tokens[1]) - float(tokens[0]) + mu_k[k]*mag # reduced potential energy without umbrella restraint and external field
mag_kn[k,n] = mag
n += 1
N_k[k] = n
# Compute correlation times for potential energy and magnetization
# timeseries. If the temperatures differ, use energies to determine samples; otherwise, magnetization
g_k[k] = timeseries.statisticalInefficiency(mag_kn[k,0:N_k[k]])
print("Correlation time for set %5d is %10.3f" % (k,g_k[k]))
indices = timeseries.subsampleCorrelatedData(mag_kn[k,0:N_k[k]], g=g_k[k])
# Subsample data.
N_k[k] = len(indices)
u_kn[k,0:N_k[k]] = u_kn[k,indices]
mag_kn[k,0:N_k[k]] = mag_kn[k,indices]
N_max = np.max(N_k) # shorten the array size
# At this point, start diverting from the usual path and allow a method that allows us to perform blocking/bootstrapping analysis
mag_n = mag_kn[0,0:N_k[0]] # mag_n[k] is the magnetization from some simulation snapshot
u_n = u_kn[0,0:N_k[0]] # u_n[k] is the potential energy from some snapshot that has mag value mag_n[k]
# Now append values
allN = N_k.sum()
for k in range(1,K):
mag_n = np.append(mag_n, mag_kn[k,0:N_k[k]])
u_n = np.append(u_n, u_kn[k,0:N_k[k]])
# Bootstrap time
N_bs = 20 # number of bootstrap samples
N_bs_start = 0 # index to start with outputs
np.random.seed(0)
# Some variable to skip output #
mbar_ref = []
mbar_count = 0
for N_ in range(N_bs_start,N_bs_start+N_bs):
print("Iteration %d" % (N_))
f_bs = open('mbar_'+str(N_)+'.txt', 'w')
print("Iteration %d" % (N_), file=f_bs)
# Select random samples
g_reduction = 50
N_red = np.random.randint(allN, size=allN//g_reduction)
N_red = np.sort(N_red)
N_k_red = np.zeros([K], np.int32)
N_cumsum = np.cumsum(N_k)
N_cumsum = np.hstack((np.array([0]), N_cumsum))
# Determine N_k_red by binning
for i in range(K):
N_bin = (N_cumsum[i] <= N_red[:]) & (N_red[:] < N_cumsum[i+1])
N_k_red[i] = N_bin.sum()
u_n_red = u_n[N_red]
mag_n_red = mag_n[N_red]
u_kn_red = np.zeros((K, allN//g_reduction))
for k in range(K):
# Compute from umbrella center k
dmag = mag_n_red[:] - mag0_k[k]
# Compute energy of samples with respect to umbrella potential k
u_kn_red[k,:] = beta_k[k]*(u_n_red[:] + (K_k[k]/2.0) * (dmag/1575.0)**2 - mu_k[k]*mag_n_red[:])
# Construct magnetization bins
print("Binning data...", file=f_bs)
delta_mag = (mag_max - mag_min) / float(mag_nbins)
# compute bin centers
bin_center_i_mag = np.zeros([mag_nbins], np.float64)
for i in range(mag_nbins):
bin_center_i_mag[i] = mag_min + delta_mag/2 + delta_mag * i
# Bin data
bin_n = np.zeros([allN//g_reduction], np.int64)+mag_nbins+10
nbins = 0
bin_counts = list()
bin_centers = list() # bin_centers[i] is a tuple that gives the center of bin i
for j in range(mag_nbins):
# Determine which configurations lie in this bin
in_bin = (bin_center_i_mag[j]-delta_mag/2 <= mag_n_red[:]) & (mag_n_red[:] < bin_center_i_mag[j]+delta_mag/2)
# Count number of configurations in this bin
bin_count = in_bin.sum()
if (bin_count > 0):
# store bin
bin_centers.append(bin_center_i_mag[j])
bin_counts.append( bin_count )
# assign these conformations to the bin index
bin_n[np.where(in_bin)[0]] = nbins
# increment number of bins
nbins += 1
# Get total number of things that were binned
bin_counts_np = np.array(bin_counts)
bin_count_total = bin_counts_np.sum()
bin_count_ideal = allN
# Make array with total combinations of bin_center_i_mag and bin_center_i_mag
bin_center_possible = np.zeros((mag_nbins,1))
bin_center_empty = np.zeros((mag_nbins,1))
for i in range(mag_nbins):
bin_center_possible[i] = bin_center_i_mag[i]
# Determine empty bins
for i in range(nbins):
for k in range(mag_nbins):
if((bin_centers[i] == bin_center_i_mag[k])):
bin_center_empty[k] = 1
print("%d bins were populated:" % nbins, file=f_bs)
for i in range(nbins):
print("bin %5d (%6.5f) %12d conformations" % (i, bin_centers[i], bin_counts[i]), file=f_bs)
print("%d empty bins" % (mag_nbins-nbins), file=f_bs)
for j in range(mag_nbins):
if(bin_center_empty[j] == 0):
print("bin (%6.5f)" % (bin_center_possible[j]), file=f_bs)
print("%d / %d data used" % (bin_count_total, bin_count_ideal), file=f_bs)
# Initialize MBAR.
print("Running MBAR...", file=f_bs)
if(mbar_count == 0):
mbar = pymbar.MBAR(u_kn_red, N_k_red, verbose = True, relative_tolerance=1e-10)
mbar_ref = mbar.f_k
mbar_count = mbar_count+1
else:
mbar = pymbar.MBAR(u_kn_red, N_k_red, verbose = True, relative_tolerance=1e-10, initial_f_k=mbar_ref)
print('At reweighting step', file=f_bs)
# Now have weights, time to have some fun reweighting
u_n_red_original = u_n_red.copy()
T_targets_low = np.linspace(2.0,3.0,26)
T_targets_high = np.linspace(3.025, 3.7, 28)
T_targets = np.hstack((T_targets_low, T_targets_high))
low_comp_storage = np.zeros(T_targets.shape)
high_comp_storage = np.zeros(T_targets.shape)
mu_1_storage = np.zeros(T_targets.shape)
mu_2_storage = np.zeros(T_targets.shape)
mu_storage = np.zeros(T_targets.shape)
# Compute PMF in unbiased potential (in units of kT) at kT = 1
(f_i, df_i) = mbar.computePMF(u_n_red, bin_n, nbins)
# Show free energy and uncertainty of each occupied bin relative to lowest free energy
print("1D PMF", file=f_bs)
print("", file=f_bs)
print("%8s %6s %8s %10s %10s" % ('bin', 'mass', 'N', 'f', 'df'), file=f_bs)
for i in range(nbins):
print('%8d %10.8e %8d %10.10e %10.10e' % (i, bin_centers[i], bin_counts[i], f_i[i], df_i[i]), file=f_bs)
# Write out PMF to file
f_ = open('free_energy_'+str(mag_nbins)+'_original_'+str(N_)+'.txt', 'w')
print("PMF (in units of kT)", file=f_)
print("%8s %6s %8s %10s %10s" % ('bin', 'mass', 'N', 'f', 'df'), file=f_)
for i in range(nbins):
print('%8d %10.8g %8d %16.16e %16.16e' % (i, bin_centers[i], bin_counts[i], f_i[i], df_i[i]), file=f_)
f_.close()
for j in range(len(T_targets)):
print("Reweighting at temperature "+str(T_targets[j]), file=f_bs)
# reweight to temperature of interest
u_n_red = u_n_red_original.copy()
beta_reweight = 1.0/(kB*T_targets[j]) # beta factor for the different temperatures
u_n_red = beta_reweight*u_n_red
# Compute PMF in unbiased potential (in units of kT) at kT = 1
(f_i_base, df_i_base) = mbar.computePMF(u_n_red, bin_n, nbins)
mu_low = -1.0
mu_high = 1.0
# Now have mu_low and mu_high, use a bounded method to find mu which causes
# f_i(comp_low) \approx f_i(comp_high)
# let's use scipy's minimize_scalar solver for this
# Have to define a function that we want to operate on
def free_diff_comp(mu, f_i_base, bin_centers, beta_reweight):
f_i = f_i_base - beta_reweight*mu*bin_centers
mid_comp = int(3.0*nbins/4.0)
f_i_low_comp = f_i[0:mid_comp].min()
f_i_high_comp = f_i[mid_comp:nbins].min()
return f_i_high_comp-f_i_low_comp
print("", file=f_bs)
print("Finding mu_eq_1", file=f_bs)
# Find minimum
mu_eq_1 = brentq(free_diff_comp, a=mu_low, b=mu_high, args=(f_i_base, np.array(bin_centers), beta_reweight))
mu_1_storage[j] = mu_eq_1
print("mu_eq_1 %17.17e"%(mu_eq_1), file=f_bs)
print("", file=f_bs)
# Now output results
# Reweight to mu_eq
f_i = f_i_base.copy()
f_i = f_i - beta_reweight*mu_eq_1*np.array(bin_centers)
f_i -= f_i.min()
# Show free energy and uncertainty of each occupied bin relative to lowest free energy
print("1D PMF with mu_eq_1", file=f_bs)
print("", file=f_bs)
print("%8s %6s %8s %10s" % ('bin', 'mass', 'N', 'f'), file=f_bs)
for i in range(nbins):
print('%8d %10.8g %8d %10.8e' % (i, bin_centers[i], bin_counts[i], f_i[i]), file=f_bs)
f_ = open('mu_eq_1_'+str(mag_nbins)+'_'+str(T_targets[j])+'_'+str(N_)+'.txt', 'w')
print("%17.17e"%(mu_eq_1), file=f_)
f_.close()
# Write out PMF to file
f_ = open('pmf_eq_1_'+str(mag_nbins)+'_'+str(T_targets[j])+'_'+str(N_)+'.txt', 'w')
print("PMF with mu_eq_1 (in units of kT)", file=f_)
print("%8s %6s %8s %10s" % ('bin', 'mass', 'N', 'f'), file=f_)
for i in range(nbins):
print('%8d %10.8g %8d %16.16e' % (i, bin_centers[i], bin_counts[i], f_i[i]), file=f_)
f_.close()
# Write out probability to file
p_i=np.exp(-f_i-logsumexp(-f_i))
f_ = open('p_i_eq_1_'+str(mag_nbins)+'_'+str(T_targets[j])+'_'+str(N_)+'.txt', 'w')
print("PMF with mu_eq_1 (in units of kT)", file=f_)
print("%8s %6s %8s %10s" % ('bin', 'mass', 'N', 'p'), file=f_)
for i in range(nbins):
print('%8d %10.8g %8d %16.16e' % (i, bin_centers[i], bin_counts[i], p_i[i]), file=f_)
f_.close()
# Now do it such that areas under peaks are the same
def free_diff_comp_area(mu, f_i_base, nbins, bin_centers, beta_reweight):
f_i = f_i_base - beta_reweight*mu*bin_centers
p_i=np.exp(-f_i-logsumexp(-f_i))
# Determine mid_comp
# Filter f_i to determine where to divide peak
f_i_filter = savgol_filter(f_i, window_length=41, polyorder=3)
f_i_filter_2 = savgol_filter(f_i_filter, window_length=41, polyorder=3)
rel_max = signal.argrelmax(f_i_filter_2, order=10)
# print rel_max
npeak = nbins//2
if(len(rel_max[0]) == 0):
npeak = nbins//2
else:
npeak = signal.argrelmax(f_i_filter_2, order=10)[0].max()
# As bin size is equal for now, can just do naive sum as equivalent to
# midpoint rule barring a constant factor
low_area = np.trapz(p_i[0:npeak], x = bin_centers[0:npeak])
high_area = np.trapz(p_i[npeak:nbins], x = bin_centers[npeak:nbins])
return high_area-low_area
print("", file=f_bs)
print("Finding mu_eq_2", file=f_bs)
# Find minimum
mu_eq_2 = brentq(free_diff_comp_area, a=mu_eq_1-0.05, b=mu_high+0.05, args=(f_i_base, nbins, np.array(bin_centers), beta_reweight))
mu_2_storage[j] = mu_eq_2
print("mu_eq_2 %17.17e"%(mu_eq_2), file=f_bs)
print("", file=f_bs)
# Now output results
# Reweight to mu_eq
f_i = f_i_base.copy()
f_i = f_i - beta_reweight*mu_eq_2*np.array(bin_centers)
f_i -= f_i.min()
# Show free energy and uncertainty of each occupied bin relative to lowest free energy
print("1D PMF with mu_eq_2", file=f_bs)
print("", file=f_bs)
print("%8s %6s %8s %10s %10s" % ('bin', 'mass', 'N', 'f', 'df'), file=f_bs)
for i in range(nbins):
print('%8d %10.8g %8d %10.8e %10.8e' % (i, bin_centers[i], bin_counts[i], f_i[i], df_i[i]), file=f_bs)
f_ = open('mu_eq_2_'+str(mag_nbins)+'_'+str(T_targets[j])+'_'+str(N_)+'.txt', 'w')
print("%17.17e"%(mu_eq_2), file=f_)
f_.close()
# Write out PMF to file
f_ = open('pmf_eq_2_'+str(mag_nbins)+'_'+str(T_targets[j])+'_'+str(N_)+'.txt', 'w')
print("PMF with mu_eq_2 (in units of kT)", file=f_)
print("%8s %6s %8s %10s %10s" % ('bin', 'mass', 'N', 'f', 'df'), file=f_)
for i in range(nbins):
print('%8d %10.8g %8d %16.16e %16.16e' % (i, bin_centers[i], bin_counts[i], f_i[i], df_i[i]), file=f_)
f_.close()
# Get compositions
p_i=np.exp(-f_i-logsumexp(-f_i))
f_ = open('p_i_eq_2_'+str(mag_nbins)+'_'+str(T_targets[j])+'_'+str(N_)+'.txt', 'w')
print("PMF with mu_eq_1 (in units of kT)", file=f_)
print("%8s %6s %8s %10s" % ('bin', 'mass', 'N', 'p'), file=f_)
for i in range(nbins):
print('%8d %10.8g %8d %16.16e' % (i, bin_centers[i], bin_counts[i], p_i[i]), file=f_)
f_.close()
# Determine mid_comp
f_i_filter = savgol_filter(f_i, window_length=41, polyorder=3)
f_i_filter_2 = savgol_filter(f_i_filter, window_length=41, polyorder=3)
rel_max = signal.argrelmax(f_i_filter_2, order=10)
npeak = nbins//2
if(len(rel_max[0]) == 0):
npeak = nbins//2
print('Weird divergence at %8d' % (j), file=f_bs)
else:
npeak = signal.argrelmax(f_i_filter_2, order=10)[0].max()
bin_centers_np = np.array(bin_centers)
p_i_mass = bin_centers_np*p_i
mass_avg = p_i_mass.sum()
bin_closest = np.abs(bin_centers-mass_avg)
print("mass_avg %17.17e"%(mass_avg))
# Now get entry that is closest to value
mid_comp = np.argmin(bin_closest)
mid_comp = npeak
# Take
low_comp = p_i_mass[0:mid_comp].sum()/p_i[0:mid_comp].sum()
high_comp = p_i_mass[mid_comp:nbins].sum()/p_i[mid_comp:nbins].sum()
print(low_comp, high_comp, T_targets[j])
low_comp_storage[j] = low_comp/1575.0
high_comp_storage[j] = high_comp/1575.0
f_ = open('composition_reweight_'+str(N_)+'.txt', 'w')
print('T phi_low phi_high', end=' ', file=f_)
print("%10s %10s %10s" % ('T', 'phi_low', 'phi_high'), file=f_)
for i in range(len(T_targets)):
print('%16.16e %16.16e %16.16e' % (T_targets[i], low_comp_storage[i], high_comp_storage[i]), file=f_)
f_.close()
f_ = open('mu_reweight'+str(N_)+'.txt', 'w')
print("%10s %10s %10s" % ('T', 'mu_peaks', 'mu_area'), file=f_)
for i in range(len(T_targets)):
print('%16.16e %16.16e %16.16e' % (T_targets[i], mu_1_storage[i], mu_2_storage[i]), file=f_)
f_.close()
f_bs.close()
| 42.912736 | 149 | 0.62061 | 3,040 | 18,195 | 3.468092 | 0.129605 | 0.026558 | 0.020582 | 0.01878 | 0.447975 | 0.399886 | 0.355971 | 0.319738 | 0.288058 | 0.257043 | 0 | 0.032523 | 0.242924 | 18,195 | 423 | 150 | 43.014184 | 0.732849 | 0.216928 | 0 | 0.277228 | 0 | 0 | 0.0947 | 0.001484 | 0 | 0 | 0 | 0 | 0 | 1 | 0.006601 | false | 0 | 0.033003 | 0 | 0.046205 | 0.211221 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e51e71629e6870db5d4127796afc6d44a91db669 | 1,511 | py | Python | module1-introduction-to-sql/321_assignment_notes.py | Edudeiko/DS-Unit-3-Sprint-2-SQL-and-Databases | e164db12684286e50a9e585da475ca34692c55d7 | [
"MIT"
] | null | null | null | module1-introduction-to-sql/321_assignment_notes.py | Edudeiko/DS-Unit-3-Sprint-2-SQL-and-Databases | e164db12684286e50a9e585da475ca34692c55d7 | [
"MIT"
] | null | null | null | module1-introduction-to-sql/321_assignment_notes.py | Edudeiko/DS-Unit-3-Sprint-2-SQL-and-Databases | e164db12684286e50a9e585da475ca34692c55d7 | [
"MIT"
] | null | null | null |
import os
import pandas as pd
import sqlite3
CSV_FILEPATH = os.path.join(os.path.dirname(__file__), "..", "data", "buddymove_holidayiq.csv")
DB_FILEPATH = os.path.join(os.path.dirname(__file__), "..", "data", "buddymove_holidayiq.db")
connection = sqlite3.connect(DB_FILEPATH)
table_name = "reviews2"
df = pd.read_csv(CSV_FILEPATH)
# assigns a column label "id" for the index column
df.index.rename("id", inplace=True)
df.index += 1 # starts ids at 1 instead of 0
print(df.head())
df.to_sql(table_name, con=connection)
cursor = connection.cursor()
cursor.execute(f"SELECT count(distinct id) as review_count FROM {table_name};")
results = cursor.fetchone()
print(results, "RECORDS")
# Other approach
# conn = sqlite3.connect("buddymove_holidayiq.sqlite3")
# data.to_sql('review', conn, if_exists = 'replace')
# curs = conn.cursor()
# query = "SELECT * FROM review"
# results = curs.execute(query).fetchall()
# print("There are", len(results), "rows")
# ----------------------------------------
# (Stretch) What are the average number of reviews for each category?
conn = sqlite3.connect("buddymove_holidayiq.sqlite3")
curs = conn.cursor()
categories = ['Sports', 'Religious', 'Nature', 'Theatre', 'Shopping', 'Picnic']
query = "SELECT * FROM review"
length = len(curs.execute(query).fetchall())
for item in categories:
query = f"SELECT SUM({item}) FROM review"
results = curs.execute(query).fetchall()
print(f'Average number of reviews for {item} column:', round(results[0][0]/length))
| 30.836735 | 95 | 0.698213 | 206 | 1,511 | 5.004854 | 0.432039 | 0.023278 | 0.046557 | 0.069835 | 0.331717 | 0.28322 | 0.199806 | 0.199806 | 0.110572 | 0.110572 | 0 | 0.009063 | 0.123759 | 1,511 | 48 | 96 | 31.479167 | 0.769637 | 0.291198 | 0 | 0 | 0 | 0 | 0.280718 | 0.068053 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.12 | 0 | 0.12 | 0.12 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e51e8a5a943efe4c5fabe22a092353ca252b4062 | 971 | py | Python | eyesore/decision_graph/compacting/_1_similar_actions_compacter.py | twizmwazin/hacrs | 3c9386b0fa5f5ea6b93b2bc8b3c4eed6abceec6a | [
"BSD-2-Clause"
] | 2 | 2019-11-07T02:55:40.000Z | 2021-12-30T01:37:43.000Z | eyesore/decision_graph/compacting/_1_similar_actions_compacter.py | twizmwazin/hacrs | 3c9386b0fa5f5ea6b93b2bc8b3c4eed6abceec6a | [
"BSD-2-Clause"
] | null | null | null | eyesore/decision_graph/compacting/_1_similar_actions_compacter.py | twizmwazin/hacrs | 3c9386b0fa5f5ea6b93b2bc8b3c4eed6abceec6a | [
"BSD-2-Clause"
] | 2 | 2019-09-27T12:01:50.000Z | 2019-10-09T21:39:52.000Z | from .. import ActionsNode
from ..visitor import Visitor
class SimilarActionsCompacter(Visitor):
def _visit_actions_node(self, node, replacements):
"""
:param node:
:type node: ActionsNode
:return:
"""
compact_successors = replacements[node.successor]
#import ipdb
#ipdb.set_trace()
assert len(compact_successors) < 2, "The {} visitor returned more than one successor for an ActionNode, this is" \
"not allowed. Got: {}".format(self, compact_successors)
compact_successor = compact_successors[0]
if isinstance(compact_successor, ActionsNode) and compact_successor.get_action_type() == node.get_action_type():
node.actions_info = node.actions_info + compact_successor.actions_info
node.successor = compact_successor.successor
else:
node.successor = compact_successor
return [node]
| 37.346154 | 122 | 0.642636 | 99 | 971 | 6.090909 | 0.464646 | 0.159204 | 0.043118 | 0.056385 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.002837 | 0.273944 | 971 | 25 | 123 | 38.84 | 0.852482 | 0.07518 | 0 | 0 | 0 | 0 | 0.109813 | 0 | 0 | 0 | 0 | 0 | 0.071429 | 1 | 0.071429 | false | 0 | 0.142857 | 0 | 0.357143 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e520edd0d04c2e5662e9df5187c8127d43b40f48 | 7,416 | py | Python | boost-hic.py | CellFateNucOrg/Boost-HiC | 637299b0ba41f6511015a6249efb150cf5991804 | [
"MIT"
] | null | null | null | boost-hic.py | CellFateNucOrg/Boost-HiC | 637299b0ba41f6511015a6249efb150cf5991804 | [
"MIT"
] | null | null | null | boost-hic.py | CellFateNucOrg/Boost-HiC | 637299b0ba41f6511015a6249efb150cf5991804 | [
"MIT"
] | null | null | null | #!/usr/bin/python3 -u
import argparse
import logging
import os
import h5py
import numpy as np
import pandas as pd
import sys
# my own toolkit
import HiCutils
import convert
import utils
DEFAULT_OUTPUT_FOLDER = './boosted/'
logging.basicConfig(level=logging.DEBUG)
logging.getLogger("").setLevel(logging.INFO)
logger = logging.getLogger(f'Boos-HiC')
p = argparse.ArgumentParser()
p.add_argument("operation", default="boost", choices=["boost", "sample"],
help="Operation to be executed")
p.add_argument("-m", "--matrixfilename", required=True,
help="contact map stored in tab separated file as : "
"bin_i / bin_j / counts_ij Only no zero values are stored. Contact map are symmetric. "
"Alternatively, you can provide a cooler format file (.cool), in this case no --bedfilename is needed.")
p.add_argument("-b", "--bedfilename", help="bed file of genomic coordinate of each bin")
p.add_argument("-c", "--chromosomes", nargs='+', help="Which chromosomes to boost, otherwise all chromosomes")
p.add_argument("-o", "--output_prefix", default=None,
help="Prefix for output files, including the output folder. "
f"If not given, it will be in subfolder '{DEFAULT_OUTPUT_FOLDER}' plus basename of the input matrixfilename "
"without its file extension.")
p.add_argument("-f", "--format", default="cool", choices=["cool", "hdf5"], help="output file format")
p.add_argument("-g", "--genome_assembly", default="ce11", help="genome assembly as metadata for .cool file")
p.add_argument("-k", "--keep_filtered_bins", action='store_true',
help="Whether to keep filtered out bins, otherwise they will be removed from the result matrix. "
"Not used yet.")
p.add_argument("-a", "--alpha", default=0.24, type=float,
help="AFTER a lot of test : 0.24 is always a good and safe compromise, you must use this value")
args = p.parse_args(sys.argv[1:])
# input file
Operation = args.operation
bedfilename = args.bedfilename
matrixfilename = args.matrixfilename
chromosomes = args.chromosomes
format = args.format
keep_filtered_bins = args.keep_filtered_bins
genome_assembly = args.genome_assembly
alpha = args.alpha
if args.output_prefix:
output_prefix = args.output_prefix
else:
if not os.path.exists(DEFAULT_OUTPUT_FOLDER):
os.mkdir(DEFAULT_OUTPUT_FOLDER)
output_prefix = DEFAULT_OUTPUT_FOLDER + os.path.splitext(os.path.basename(matrixfilename))[0]
# alternative in the same folder of the input matrix
# output_prefix = os.path.splitext(matrixfilename)[0]
###
def BoostHiC(amat):
normmat = HiCutils.SCN(np.copy(amat))
ff_normmat = HiCutils.fastFloyd(1 / np.power(np.copy(normmat), alpha))
FFmat = np.power(ff_normmat, -1 / alpha) # to dist, FF, to contact in one line
boostedmat = HiCutils.adjustPdS(normmat, FFmat)
return boostedmat
def Sample(amat, repositoryout):
percentofsample = [0.1, 1., 10.]
for j in percentofsample:
logger.info(f"Value of sample: {j}")
chrmat_s = np.copy(amat)
chrmat = HiCutils.downsample_basic(chrmat_s, j)
fh5 = h5py.File(repositoryout + "inputmat_sampleat_" + str(j) + "_percent.hdf5", "w")
fh5['data'] = chrmat
fh5.close()
# ## CODE EXECUTION ## #
# load the data
logger.info("LOADING MATRIX")
if matrixfilename.endswith('.cool'):
D, total, resolution, D_cooler = convert.loadabsdatafile_cool(matrixfilename)
else:
D, total, resolution = convert.loadabsdatafile(bedfilename)
D_cooler = None
print(*D.items(), sep='\n')
print(f'Total bins:{total} resolution:{resolution}')
bins_boosted = pd.DataFrame(columns=['chrom', 'start', 'end'])
pixels_boosted = pd.DataFrame(columns=['bin1_id', 'bin2_id', 'count'])
chroms = chromosomes if chromosomes else D.keys()
bin_offs = 0
for chrom in chroms:
repositoryout = f'{output_prefix}_{chrom}_'
if D_cooler:
basemat = D_cooler.matrix(balance=False).fetch(chrom)
else:
beginfend = D[chrom][0]
endfend = D[chrom][1]
logger.info(f"Chromosome {chrom} data fend : {beginfend},{endfend}")
basemat = convert.loadmatrixselected(matrixfilename, beginfend, endfend)
# matrix filtering
logger.info("FILTERING")
bins_num = basemat.shape[0]
pos_out = HiCutils.get_outliers(basemat)
utils.savematrixasfilelist3(pos_out, repositoryout + "filteredbin.txt")
basematfilter = basemat[np.ix_(~pos_out, ~pos_out)]
basematfilter = np.copy(basematfilter)
# basematfilter=basematfilter[0:1000,0:1000]
logger.info(f'len(basemat):{len(basemat)}, len(basematfilter):{len(basematfilter)}')
if format is None or format == "hdf5":
fh5 = h5py.File(repositoryout + "inputmat.hdf5", "w")
fh5['data'] = basemat
fh5.close()
if format is None or format == "cool":
convert.hic_to_cool(basemat, chrom, resolution, repositoryout + "inputmat.cool",
genome_assembly=genome_assembly)
if format is None or format == "hdf5":
fh5 = h5py.File(repositoryout + "inputmat_filtered.hdf5", "w")
fh5['data'] = basematfilter
fh5.close()
if format is None or format == "cool":
convert.hic_to_cool(basematfilter, chrom, resolution, repositoryout + "inputmat_filtered.cool",
genome_assembly=genome_assembly)
if Operation == "boost":
logger.info("Boost Hic")
boosted = BoostHiC(basematfilter)
# save
if format is None or format == "hdf5":
fh5 = h5py.File(repositoryout + "boostedmat.hdf5", "w")
fh5['data'] = boosted
fh5.close()
if format is None or format == "cool":
filtered_bins = pos_out if keep_filtered_bins else None
chrom_bins, chrom_pixels = convert.get_bins_pixels(boosted, chrom, resolution,
bin_offs=bin_offs, bins_num=bins_num,
filtered_bins=filtered_bins)
# save as cool
cool_file = f"{repositoryout}boosted.cool"
convert.create_cool(chrom_bins, chrom_pixels, resolution, cool_file, genome_assembly=genome_assembly)
# collecting all boosted chromosomes in one
bins_boosted = pd.concat([bins_boosted, chrom_bins])
pixels_boosted = pd.concat([pixels_boosted, chrom_pixels])
bin_offs += bins_num
elif Operation == "sample":
logger.info("SAMPLING")
Sample(basematfilter, repositoryout)
if Operation == "boost" and format is None or format == "cool": # combined file support only for .cool
repositoryout = output_prefix + (f'_{"_".join(chromosomes)}_' if chromosomes else '_')
cool_file = f"{repositoryout}boosted{'_kfb' if keep_filtered_bins else ''}.cool"
convert.create_cool(bins_boosted, pixels_boosted, resolution, cool_file, genome_assembly=genome_assembly)
cmd = f'cooler balance --cis-only --force {cool_file}'
logger.info(f'CALL: {cmd}')
os.system(cmd)
resolutions = [5000, 10000, 20000, 50000, 100000, 200000, 500000, 1000000]
resolutions_str = ','.join([str(r) for r in resolutions])
cmd = f'cooler zoomify -r "{resolutions_str}" {cool_file}'
logger.info(f'CALL: {cmd}')
os.system(cmd)
| 42.136364 | 129 | 0.663026 | 934 | 7,416 | 5.130621 | 0.284797 | 0.035058 | 0.022538 | 0.020451 | 0.14399 | 0.116027 | 0.097245 | 0.078047 | 0.078047 | 0.070952 | 0 | 0.018051 | 0.215615 | 7,416 | 175 | 130 | 42.377143 | 0.805742 | 0.050432 | 0 | 0.137681 | 0 | 0.014493 | 0.242023 | 0.040598 | 0 | 0 | 0 | 0 | 0 | 1 | 0.014493 | false | 0 | 0.072464 | 0 | 0.094203 | 0.014493 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e52143225da56c9f67ad6d80e159ab308dbbde12 | 5,560 | py | Python | tests/test_agents.py | fkamrani/adversarial-policies | 53e129c2083f6557ddc18dbb39e4e633a2d7ab9b | [
"MIT"
] | 211 | 2019-02-22T08:07:25.000Z | 2022-03-14T10:44:20.000Z | tests/test_agents.py | fkamrani/adversarial-policies | 53e129c2083f6557ddc18dbb39e4e633a2d7ab9b | [
"MIT"
] | 51 | 2019-02-08T01:39:49.000Z | 2022-02-15T21:21:46.000Z | tests/test_agents.py | fkamrani/adversarial-policies | 53e129c2083f6557ddc18dbb39e4e633a2d7ab9b | [
"MIT"
] | 41 | 2019-04-23T05:01:49.000Z | 2022-03-16T06:51:19.000Z | import gym
from ilqr import iLQR
import numpy as np
import pytest
from aprl.agents.monte_carlo import (
MonteCarloParallel,
MonteCarloSingle,
MujocoResettableWrapper,
receding_horizon,
)
from aprl.agents.mujoco_lqr import (
MujocoFiniteDiffCost,
MujocoFiniteDiffDynamicsBasic,
MujocoFiniteDiffDynamicsPerformance,
)
dynamics_list = [MujocoFiniteDiffDynamicsBasic, MujocoFiniteDiffDynamicsPerformance]
@pytest.mark.parametrize("dynamics_cls", dynamics_list)
def test_lqr_mujoco(dynamics_cls):
"""Smoke test for MujcooFiniteDiff{Dynamics,Cost}.
Jupyter notebook experiments/mujoco_control.ipynb has quantitative results
attained; for efficiency, we only run for a few iterations here."""
env = gym.make("Reacher-v2").unwrapped
env.seed(42)
env.reset()
dynamics = dynamics_cls(env)
cost = MujocoFiniteDiffCost(env)
N = 10
ilqr = iLQR(dynamics, cost, N)
x0 = dynamics.get_state()
us_init = np.array([env.action_space.sample() for _ in range(N)])
xs, us = ilqr.fit(x0, us_init, n_iterations=3)
assert x0.shape == xs[0].shape
assert xs.shape[0] == N + 1
assert us.shape == (N, 2)
assert env.action_space.contains(us[0])
def rollout(env, actions):
obs, rews, dones, infos = [], [], [], []
for a in actions:
ob, rew, done, info = env.step(a)
obs.append(ob)
rews.append(rew)
dones.append(done)
infos.append(info)
obs = np.array(obs)
rews = np.array(rews)
dones = np.array(dones)
return obs, rews, dones, infos
def make_mujoco_env(env_name, seed):
env = gym.make(env_name)
env = MujocoResettableWrapper(env.unwrapped)
env.seed(seed)
env.reset()
return env
MONTE_CARLO_ENVS = ["Reacher-v2", "HalfCheetah-v2", "Hopper-v2"]
@pytest.mark.parametrize("env_name", MONTE_CARLO_ENVS)
def test_mujoco_reset_env(env_name, horizon=10, seed=42):
env = make_mujoco_env(env_name, seed)
state = env.get_state()
actions = [env.action_space.sample() for _ in range(horizon)]
first_obs, first_rews, first_dones, _first_infos = rollout(env, actions)
env.set_state(state)
second_obs, second_rews, second_dones, _second_infos = rollout(env, actions)
np.testing.assert_almost_equal(second_obs, first_obs, decimal=5)
np.testing.assert_almost_equal(second_rews, first_rews, decimal=5)
assert (first_dones == second_dones).all()
def check_monte_carlo(
kind, score_thresholds, total_horizon, planning_horizon, trajectories, seed=42
):
def f(env_name):
# Setup
env = make_mujoco_env(env_name, seed)
if kind == "single":
mc = MonteCarloSingle(env, planning_horizon, trajectories)
elif kind == "parallel":
env_fns = [lambda: make_mujoco_env(env_name, seed) for _ in range(2)]
mc = MonteCarloParallel(env_fns, planning_horizon, trajectories)
else: # pragma: no cover
raise ValueError("Unrecognized kind '{}'".format(kind))
mc.seed(seed)
# Check for side-effects
state = env.get_state()
_ = mc.best_action(state)
assert (env.get_state() == state).all(), "Monte Carlo search has side effects"
# One receding horizon rollout of Monte Carlo search
total_rew = 0
prev_done = False
for i, (a, ob, rew, done, info) in enumerate(receding_horizon(mc, env)):
assert not prev_done, "should terminate if env returns done"
prev_done = done
assert env.action_space.contains(a)
assert env.observation_space.contains(ob)
total_rew += rew
if i >= total_horizon:
break
assert i == total_horizon or done
# Check it does better than random sequences
random_rews = []
for i in range(10):
env.action_space.np_random.seed(seed + i)
action_seq = [env.action_space.sample() for _ in range(total_horizon)]
env.set_state(state)
_, rews, _, _ = rollout(env, action_seq)
random_rew = sum(rews)
random_rews.append(random_rew)
assert total_rew >= random_rew, "random sequence {}".format(i)
print(
f"Random actions on {env_name} for {total_horizon} obtains "
f"mean {np.mean(random_rews)} s.d. {np.std(random_rews)}"
)
# Check against pre-defined score threshold
assert total_rew >= score_thresholds[env_name]
# Cleanup
if kind == "parallel":
mc.close()
with pytest.raises(BrokenPipeError):
mc.best_action(state)
return f
MC_SINGLE_THRESHOLDS = {
"Reacher-v2": -11, # tested -9.5, random -17.25 s.d. 1.5
"HalfCheetah-v2": 19, # tested 21.6, random -4.2 s.d. 3.7
"Hopper-v2": 29, # tested 31.1, random 15.2 s.d. 5.9
}
MC_PARALLEL_THRESHOLDS = {
"Reacher-v2": -17, # tested at -15.3; random -25.8 s.d. 1.8
"HalfCheetah-v2": 33, # tested at 35.5; random -6.0 s.d. 7.1
"Hopper-v2": 52, # tested at 54.7; random 21.1 s.d. 13.2
}
_test_mc_single = check_monte_carlo(
"single", MC_SINGLE_THRESHOLDS, total_horizon=20, planning_horizon=10, trajectories=100
)
_test_mc_parallel = check_monte_carlo(
"parallel", MC_PARALLEL_THRESHOLDS, total_horizon=30, planning_horizon=15, trajectories=200
)
test_mc_single = pytest.mark.parametrize("env_name", MONTE_CARLO_ENVS)(_test_mc_single)
test_mc_parallel = pytest.mark.parametrize("env_name", MONTE_CARLO_ENVS)(_test_mc_parallel)
| 34.534161 | 95 | 0.658453 | 747 | 5,560 | 4.697456 | 0.262383 | 0.023938 | 0.023938 | 0.018239 | 0.128242 | 0.112283 | 0.080365 | 0.039327 | 0.027358 | 0.027358 | 0 | 0.02524 | 0.230396 | 5,560 | 160 | 96 | 34.75 | 0.794812 | 0.107194 | 0 | 0.063492 | 0 | 0 | 0.081612 | 0.008708 | 0 | 0 | 0 | 0 | 0.111111 | 1 | 0.047619 | false | 0 | 0.047619 | 0 | 0.119048 | 0.007937 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e52d87ade902887855b10cfb23d4264c5e93b1d3 | 7,190 | py | Python | scripts/msh_process.py | mou3adb/spread_the_particle | 6cc666fded62f07380ed1e3ed52969c436295906 | [
"MIT"
] | 4 | 2020-08-18T18:33:05.000Z | 2021-05-18T23:55:56.000Z | scripts/msh_process.py | mou3adb/spread_the_particle | 6cc666fded62f07380ed1e3ed52969c436295906 | [
"MIT"
] | null | null | null | scripts/msh_process.py | mou3adb/spread_the_particle | 6cc666fded62f07380ed1e3ed52969c436295906 | [
"MIT"
] | 2 | 2021-03-03T18:57:06.000Z | 2021-05-18T20:43:44.000Z | """
Gmsh format 2.2
"""
import numpy as np
from flow import Flow
from element import Element
from element_search import find_neighbors
from text.text_flow import write_flow
from text.text_elements import write_elements
from text.text_geometries import write_geometries
#==============================================================================
def intIt(l):
return np.array([int(e) for e in l])
def floatIt(l):
return np.array([float(e) for e in l])
def extract_msh(path_msh):
f = open(path_msh, 'r')
nodes_X, nodes_Y = [], []
elements = []
line = f.readline()
# ...
# $Nodes\n
# n_nodes
# ...
while line != '$Nodes\n':
line = f.readline()
line = f.readline()
n_nodes = int(line.strip())
for i in range(n_nodes):
# line = id x y z
line = f.readline()
coord = floatIt(line.strip().split())
nodes_X.append(coord[1])
nodes_Y.append(coord[2])
# ...
# $Elements\n
# n_elements
# ...
while line != '$Elements\n':
line = f.readline()
line = f.readline()
n_elements = int(line.strip())
count = 0
for i in range(n_elements):
# element_id element_type ... ... nodes_id
line = f.readline()
coord = intIt(line.strip().split())
element_type = coord[1]
if element_type == 9: # 6-node second order triangle
count += 1
e = Element(count)
e.nodes = np.array(coord[-6:])
elements.append(e)
# if element_type == 1: # 2-node line
# e.element_type = 1
# e.nodes = coord[-2:]
#
# elif element_type == 2: # 3-node triangle
# e.element_type = 2
# e.nodes = coord[-3:]
#
# elif element_type == 3: # 4-node quadrangle
# e.element_type = 3
# e.nodes = coord[-4:]
#
# elif element_type == 8: # 3-node second order line
# e.element_type = 8
# e.nodes = coord[-3:]
#
# elif element_type == 9: # 6-node second order triangle
# e.element_type = 9
# e.nodes = coord[-6:]
#
# elif element_type == 10: # 9-node second order quadrangle
# e.element_type = 10
# e.nodes = coord[-9:]
#
# elif element_type == 15: # 1-node point
# e.element_type = 15
# e.nodes = coord[-1:]
#
# elements.append(e)
f.close()
return np.array(nodes_X), np.array(nodes_Y), np.array(elements)
def generate_poiseuille(path_msh, parent_folder):
single_nodes_X, single_nodes_Y, elements = extract_msh(path_msh)
d = np.max(single_nodes_Y) - np.min(single_nodes_Y)
y_middle = np.min(single_nodes_Y) + d/2
n_nodes = len(single_nodes_X)
mu = 1e-3
p = 2*mu*single_nodes_X
U = d**2/4 - (single_nodes_Y - y_middle)**2
V = np.zeros(n_nodes)
nodes_X, nodes_Y = np.array([]), np.array([])
Us, Vs, ps = np.array([]), np.array([]), np.array([])
Nt = 101
times = np.linspace(0, 1, Nt)
for t in times:
nodes_X = np.vstack([nodes_X, single_nodes_X]) if nodes_X.size else single_nodes_X
nodes_Y = np.vstack([nodes_Y, single_nodes_Y]) if nodes_Y.size else single_nodes_Y
Us = np.vstack([Us, U]) if Us.size else U
Vs = np.vstack([Vs, V]) if Vs.size else V
ps = np.vstack([ps, p]) if ps.size else p
Re, Ur = 1e-3*1*d/mu, np.inf # Reynolds number and reduced velocity are not
# defined in the Hagen-Poiseuille problem
flow = Flow()
flow.Re, flow.Ur = Re, Ur
flow.times = times
flow.nodes_X, flow.nodes_Y = nodes_X, nodes_Y
flow.Us, flow.Vs, flow.ps = Us, Vs, ps
write_flow(flow, parent_folder + 'flows/poiseuille')
find_neighbors(elements)
write_elements(elements, parent_folder + 'elements/poiseuille')
write_geometries(np.array([]), parent_folder + 'geometries/poiseuille')
def generate_periodic(path_msh, parent_folder):
single_nodes_X, single_nodes_Y, elements = extract_msh(path_msh)
d = np.max(single_nodes_Y) - np.min(single_nodes_Y)
Nt = 101
times = np.linspace(0, 1, Nt)
period = 0.25
w = 2*np.pi/period
# U = U0*cos(wt) with U0 = 1
# Navier-Stokes, uniform:
# rho dU/dt + 0 = - dp/dx with rho = 1
# dp/dx = rhoU0*w*sin(wt)
# p = p0 + rhoU0*w*sin(wt) with p0 = 0
nodes_X, nodes_Y = np.array([]), np.array([])
Us, Vs, ps = np.array([]), np.array([]), np.array([])
for t in times:
nodes_X = np.vstack([nodes_X, single_nodes_X]) if nodes_X.size else single_nodes_X
nodes_Y = np.vstack([nodes_Y, single_nodes_Y]) if nodes_Y.size else single_nodes_Y
U = 0*nodes_X + np.cos(w*t)
V = 0*nodes_X
p = 0*nodes_X + w*np.sin(w*t)
Us = np.vstack([Us, U]) if Us.size else U
Vs = np.vstack([Vs, V]) if Vs.size else V
ps = np.vstack([ps, p]) if ps.size else p
Re, Ur = 1*1*d/1e-6, np.inf
flow = Flow()
flow.Re, flow.Ur = Re, Ur
flow.times = times
flow.nodes_X, flow.nodes_Y = nodes_X, nodes_Y
flow.Us, flow.Vs, flow.ps = Us, Vs, ps
write_flow(flow, parent_folder + 'flows/periodic')
find_neighbors(elements)
write_elements(elements, parent_folder + 'elements/periodic')
write_geometries(np.array([]), parent_folder + 'geometries/periodic')
def generate_inviscid(path_msh, parent_folder):
single_nodes_X, single_nodes_Y, elements = extract_msh(path_msh)
rs = np.sqrt(single_nodes_X**2 + single_nodes_Y**2)
thetas = np.arctan2(single_nodes_Y, single_nodes_X)
Ur, Utheta, p = [], [], []
for r, theta in zip(rs, thetas):
if r == 0:
Ur.append(0)
Utheta.append(0)
p.append(0)
else:
Ur.append((1 - (0.5/r)**2)*np.cos(theta))
Utheta.append((1 + (0.5/r)**2)*np.sin(theta))
p.append(2*(0.5/r)**2 * np.cos(2*theta) - (0.5/r)**4)
Ur = np.array(Ur)
Utheta = np.array(Utheta)
p = np.array(p)
U = Ur*np.cos(thetas) - Utheta*np.sin(thetas)
V = Ur*np.sin(thetas) - Utheta*np.cos(thetas)
nodes_X, nodes_Y = np.array([]), np.array([])
Us, Vs, ps = np.array([]), np.array([]), np.array([])
Nt = 101
times = np.linspace(0, 1, Nt)
for t in times:
nodes_X = np.vstack([nodes_X, single_nodes_X]) if nodes_X.size else single_nodes_X
nodes_Y = np.vstack([nodes_Y, single_nodes_Y]) if nodes_Y.size else single_nodes_Y
Us = np.vstack([Us, U]) if Us.size else U
Vs = np.vstack([Vs, V]) if Vs.size else V
ps = np.vstack([ps, p]) if ps.size else p
Re, Ur = 1e+6, 0.
flow = Flow()
flow.Re, flow.Ur = Re, Ur
flow.times = times
flow.nodes_X, flow.nodes_Y = nodes_X, nodes_Y
flow.Us, flow.Vs, flow.ps = Us, Vs, ps
write_flow(flow, parent_folder + 'flows/potential')
find_neighbors(elements)
write_elements(elements, parent_folder + 'elements/potential')
write_geometries(np.array([[5,407,404,408,405,409,406,410,6,414,411,415,412,416,413,417]]),
parent_folder + 'geometries/potential')
| 28.085938 | 95 | 0.577886 | 1,111 | 7,190 | 3.576958 | 0.139514 | 0.057373 | 0.051334 | 0.030196 | 0.526422 | 0.509562 | 0.501007 | 0.4615 | 0.423251 | 0.376447 | 0 | 0.030044 | 0.268567 | 7,190 | 255 | 96 | 28.196078 | 0.725613 | 0.180807 | 0 | 0.434783 | 0 | 0 | 0.030724 | 0.003605 | 0 | 0 | 0 | 0 | 0 | 1 | 0.043478 | false | 0 | 0.050725 | 0.014493 | 0.115942 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e5318d57c5b94068601a78c0c8bed490f74a1be5 | 1,394 | py | Python | custard/tests/settings.py | kunitoki/django-custard | 3cf3aa5acf84de2f653e96469e2f9c42813df50a | [
"MIT"
] | 6 | 2015-06-15T07:40:26.000Z | 2016-06-27T08:01:34.000Z | custard/tests/settings.py | kunitoki/django-custard | 3cf3aa5acf84de2f653e96469e2f9c42813df50a | [
"MIT"
] | 3 | 2015-03-11T22:43:01.000Z | 2015-06-07T21:50:36.000Z | custard/tests/settings.py | kunitoki/django-custard | 3cf3aa5acf84de2f653e96469e2f9c42813df50a | [
"MIT"
] | 6 | 2015-03-11T22:19:57.000Z | 2021-03-10T15:40:52.000Z | # Django settings for testproject project.
import os
DIRNAME = os.path.dirname(__file__)
DEBUG = True
TEMPLATE_DEBUG = DEBUG
DEBUG_PROPAGATE_EXCEPTIONS = True
ADMINS = ()
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(DIRNAME, 'db.sqlite3'),
'TEST_NAME': os.path.join(DIRNAME, 'test_db.sqlite3'),
}
}
TIME_ZONE = 'Europe/Rome'
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
USE_I18N = True
USE_L10N = True
MEDIA_ROOT = ''
MEDIA_URL = ''
SECRET_KEY = 'vaO4Y<g#YRWG8;Md8noiLp>.w(w~q_b=|1`?9<x>0KxA%UB!63'
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
)
ROOT_URLCONF = 'custard.tests.urls'
TEMPLATE_DIRS = ()
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.admin',
'custard',
'custard.tests',
)
TEST_RUNNER = 'django.test.runner.DiscoverRunner'
STATIC_URL = '/static/'
| 22.483871 | 65 | 0.705165 | 161 | 1,394 | 5.944099 | 0.546584 | 0.122257 | 0.020899 | 0.029258 | 0.043887 | 0 | 0 | 0 | 0 | 0 | 0 | 0.013502 | 0.149928 | 1,394 | 61 | 66 | 22.852459 | 0.794093 | 0.028694 | 0 | 0 | 0 | 0.021277 | 0.504811 | 0.377498 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.021277 | 0 | 0.021277 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e5320220c6e0bdc466d30a55af8b5a6073894184 | 2,753 | py | Python | home/utils.py | ryankicks/collection-pipeline | 2f4b6f154baba90aad39d490fd1dc170ba7ae4e4 | [
"MIT"
] | null | null | null | home/utils.py | ryankicks/collection-pipeline | 2f4b6f154baba90aad39d490fd1dc170ba7ae4e4 | [
"MIT"
] | null | null | null | home/utils.py | ryankicks/collection-pipeline | 2f4b6f154baba90aad39d490fd1dc170ba7ae4e4 | [
"MIT"
] | null | null | null | from inspect import stack
import logging
from time import mktime
import pytz
from datetime import *
from calendar import timegm
# from django.http import HttpResponse, HttpResponseRedirect, HttpResponseRedirectBase
from django.conf import settings
from django.utils import timezone
from social.apps.django_app.default.models import UserSocialAuth
import twitter
from twitter import *
EPOCH = 1970
_EPOCH_ORD = date(EPOCH, 1, 1).toordinal()
class Tz:
# assumes a date, unless you pass date_format, and then assumes it needs to be parsed
@staticmethod
def convert_to_utc(naive, date_format=None, user_tz=None):
if date_format:
naive = datetime.strptime (naive, date_format)
# if not specified, default to user context
if not user_tz:
user_tz = timezone.get_current_timezone()
local_dt = user_tz.localize(naive, is_dst=None)
utc_dt = local_dt.astimezone(pytz.utc)
return utc_dt
@staticmethod
def convert_to_local(dt, user_tz=None):
# if not specified, default to user context
if not user_tz:
user_tz = timezone.get_current_timezone()
local_dt = dt.astimezone(user_tz)
return local_dt
class Logger():
@staticmethod
def info(str):
LOGGER.info(str)
@staticmethod
def exception(str):
LOGGER.exception(str)
class Twitter:
@staticmethod
def get_twitter(user):
from django.conf import settings
consumer_key = settings.SOCIAL_AUTH_TWITTER_KEY
consumer_secret = settings.SOCIAL_AUTH_TWITTER_SECRET
access_token_key = settings.TWITTER_ACCESS_TOKEN
access_token_secret = settings.TWITTER_ACCESS_TOKEN_SECRET
usa = UserSocialAuth.objects.get(user=user, provider='twitter')
if usa:
access_token = usa.extra_data['access_token']
if access_token:
access_token_key = access_token['oauth_token']
access_token_secret = access_token['oauth_token_secret']
if not access_token_key or not access_token_secret:
raise Exception('No user for twitter API call')
api = twitter.Api(
base_url='https://api.twitter.com/1.1',
consumer_key=consumer_key,
consumer_secret=consumer_secret,
access_token_key=access_token_key,
access_token_secret=access_token_secret)
return api
@staticmethod
def get_access_tokens(user):
usa = UserSocialAuth.objects.get(user=user, provider='twitter')
access_token = usa.extra_data['access_token']
return access_token
| 28.677083 | 89 | 0.661097 | 336 | 2,753 | 5.166667 | 0.285714 | 0.126728 | 0.058756 | 0.034562 | 0.288594 | 0.193548 | 0.193548 | 0.154378 | 0.096774 | 0.096774 | 0 | 0.004 | 0.27352 | 2,753 | 95 | 90 | 28.978947 | 0.864 | 0.091537 | 0 | 0.25 | 0 | 0 | 0.048878 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.09375 | false | 0 | 0.1875 | 0 | 0.390625 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e533e0071445be02c8ad1c3692ec2dd70b4b8806 | 2,193 | py | Python | test/python/transpiler/test_preset_passmanagers.py | chowington/qiskit-terra | a782c64c736fedd6a541bb45dbf89737a52b7c39 | [
"Apache-2.0"
] | null | null | null | test/python/transpiler/test_preset_passmanagers.py | chowington/qiskit-terra | a782c64c736fedd6a541bb45dbf89737a52b7c39 | [
"Apache-2.0"
] | null | null | null | test/python/transpiler/test_preset_passmanagers.py | chowington/qiskit-terra | a782c64c736fedd6a541bb45dbf89737a52b7c39 | [
"Apache-2.0"
] | 1 | 2019-06-13T08:07:26.000Z | 2019-06-13T08:07:26.000Z | # -*- coding: utf-8 -*-
# This code is part of Qiskit.
#
# (C) Copyright IBM 2017, 2019.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""Tests preset pass manager functionalities"""
from qiskit.test import QiskitTestCase
from qiskit.compiler import transpile
from qiskit import QuantumCircuit, ClassicalRegister, QuantumRegister
from qiskit.test.mock import FakeTenerife, FakeMelbourne, FakeRueschlikon, FakeTokyo
class TestPresetPassManager(QiskitTestCase):
"""Test preset passmanagers work as expected."""
def test_no_coupling_map(self):
"""Test that coupling_map can be None"""
q = QuantumRegister(2, name='q')
test = QuantumCircuit(q)
test.cz(q[0], q[1])
for level in [0, 1, 2, 3]:
with self.subTest(level=level):
test2 = transpile(test, basis_gates=['u1', 'u2', 'u3', 'cx'],
optimization_level=level)
self.assertIsInstance(test2, QuantumCircuit)
class TestFakeBackendTranspiling(QiskitTestCase):
"""Test transpiling on mock backends work properly"""
def setUp(self):
q = QuantumRegister(2)
c = ClassicalRegister(2)
self._circuit = QuantumCircuit(q, c)
self._circuit.h(q[0])
self._circuit.cx(q[0], q[1])
self._circuit.measure(q, c)
def test_optimization_level(self):
"""Test several backends with all optimization levels"""
for backend in [FakeTenerife(), FakeMelbourne(), FakeRueschlikon(), FakeTokyo()]:
for optimization_level in range(4):
result = transpile(
[self._circuit],
backend=backend,
optimization_level=optimization_level
)
self.assertIsInstance(result, QuantumCircuit)
| 35.95082 | 89 | 0.653443 | 260 | 2,193 | 5.45 | 0.488462 | 0.059986 | 0.014114 | 0.06916 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.018856 | 0.250342 | 2,193 | 60 | 90 | 36.55 | 0.843066 | 0.320109 | 0 | 0 | 0 | 0 | 0.00619 | 0 | 0 | 0 | 0 | 0 | 0.064516 | 1 | 0.096774 | false | 0.032258 | 0.129032 | 0 | 0.290323 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e5342b8791c68216bf30896c7274b41364db27db | 4,291 | py | Python | scripts/sptk/visualize_spectrogram.py | funcwj/kaldi_enhan | 50e4da07c4e7fce7439da9be2b0bb1a0079491c3 | [
"Apache-2.0"
] | 35 | 2018-04-02T06:09:26.000Z | 2019-02-19T08:27:10.000Z | scripts/sptk/visualize_spectrogram.py | funcwj/kaldi_enhan | 50e4da07c4e7fce7439da9be2b0bb1a0079491c3 | [
"Apache-2.0"
] | 3 | 2018-11-08T10:21:34.000Z | 2019-01-24T02:49:47.000Z | scripts/sptk/visualize_spectrogram.py | funcwj/kaldi_enhan | 50e4da07c4e7fce7439da9be2b0bb1a0079491c3 | [
"Apache-2.0"
] | 17 | 2018-03-08T06:59:31.000Z | 2019-02-19T08:27:41.000Z | #!/usr/bin/env python
# coding=utf-8
# wujian@2020
import argparse
from pathlib import Path
import matplotlib.pyplot as plt
import numpy as np
from libs.data_handler import SpectrogramReader
from libs.opts import StftParser
from libs.utils import get_logger
default_font = "Times New Roman"
default_font_size = 10
default_dpi = 200
default_fmt = "jpg"
logger = get_logger(__name__)
def save_figure(key, mat, dest, cmap="jet", hop=256, sr=16000, title=""):
"""
Save figure to disk
"""
def sub_plot(ax, mat, num_frames, num_bins, xticks=True, title=""):
ax.imshow(np.transpose(mat),
origin="lower",
cmap=cmap,
aspect="auto",
interpolation="none")
if xticks:
xp = np.linspace(0, num_frames - 1, 5)
ax.set_xticks(xp)
ax.set_xticklabels([f"{t:.2f}" for t in (xp * hop / sr)],
fontproperties=default_font)
ax.set_xlabel("Time (s)", fontdict={"family": default_font})
else:
ax.set_xticks([])
yp = np.linspace(0, num_bins - 1, 6)
fs = np.linspace(0, sr / 2, 6) / 1000
ax.set_yticks(yp)
ax.set_yticklabels([f"{t:.1f}" for t in fs],
fontproperties=default_font)
ax.set_ylabel("Frequency (kHz)", fontdict={"family": default_font})
if title:
ax.set_title(title, fontdict={"family": default_font})
logger.info(f"Plot TF-mask of utterance {key} to {dest}.{default_fmt}...")
if mat.ndim == 3:
N, T, F = mat.shape
else:
T, F = mat.shape
N = 1
fig, ax = plt.subplots(nrows=N)
if N != 1:
ts = title.split(";")
for i in range(N):
if len(ts) == N:
sub_plot(ax[i], mat[i], T, F, xticks=i == N - 1, title=ts[i])
else:
sub_plot(ax[i], mat[i], T, F, xticks=i == N - 1)
else:
sub_plot(ax, mat, T, F, title=title)
fig.savefig(f"{dest}.{default_fmt}", dpi=default_dpi, format=default_fmt)
plt.close(fig)
def run(args):
cache_dir = Path(args.cache_dir)
cache_dir.mkdir(parents=True, exist_ok=True)
stft_kwargs = {
"frame_len": args.frame_len,
"frame_hop": args.frame_hop,
"round_power_of_two": args.round_power_of_two,
"window": args.window,
"center":
args.center # false to comparable with kaldi
}
reader = SpectrogramReader(args.wav_scp,
**stft_kwargs,
apply_abs=True,
apply_log=True,
transpose=True)
for key, mat in reader:
if mat.ndim == 3 and args.index >= 0:
mat = mat[args.index]
save_figure(key,
mat,
cache_dir / key.replace(".", "-"),
cmap=args.cmap,
hop=args.frame_hop,
sr=args.sr,
title=args.title)
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Command to visualize audio spectrogram.",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
parents=[StftParser.parser])
parser.add_argument("wav_scp", type=str, help="Read specifier of audio")
parser.add_argument("--sr",
type=int,
default=16000,
help="Sample frequency (Hz)")
parser.add_argument("--cache-dir",
type=str,
default="spectrogram",
help="Directory to dump spectrograms")
parser.add_argument("--cmap",
choices=["binary", "jet", "hot"],
default="jet",
help="Colormap used when save figures")
parser.add_argument("--index",
type=int,
default=-1,
help="Channel index to plot, -1 means all")
parser.add_argument("--title",
type=str,
default="",
help="Title of the pictures")
args = parser.parse_args()
run(args)
| 33.523438 | 78 | 0.519226 | 507 | 4,291 | 4.242604 | 0.372781 | 0.018596 | 0.04742 | 0.034868 | 0.051139 | 0.023245 | 0.023245 | 0.023245 | 0.023245 | 0.023245 | 0 | 0.017078 | 0.358658 | 4,291 | 127 | 79 | 33.787402 | 0.764535 | 0.022372 | 0 | 0.093458 | 0 | 0 | 0.118295 | 0.005508 | 0 | 0 | 0 | 0 | 0 | 1 | 0.028037 | false | 0 | 0.065421 | 0 | 0.093458 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e535e6dd0129597b74bb4ecff114f37663cccbbf | 9,246 | py | Python | pyocni/adapters/httpResponse_Formater.py | MarouenMechtri/CNG-Manager | 9535b721e7b832d72fd7bba6d2a29e76a0d4bdb7 | [
"Apache-2.0"
] | 1 | 2015-02-28T21:26:07.000Z | 2015-02-28T21:26:07.000Z | pyocni/adapters/httpResponse_Formater.py | MarouenMechtri/CNG-Manager | 9535b721e7b832d72fd7bba6d2a29e76a0d4bdb7 | [
"Apache-2.0"
] | null | null | null | pyocni/adapters/httpResponse_Formater.py | MarouenMechtri/CNG-Manager | 9535b721e7b832d72fd7bba6d2a29e76a0d4bdb7 | [
"Apache-2.0"
] | null | null | null | # Copyright 2010-2012 Institut Mines-Telecom
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Created on Jun 21, 2012
@author: Bilel Msekni
@contact: bilel.msekni@telecom-sudparis.eu
@author: Houssem Medhioub
@contact: houssem.medhioub@it-sudparis.eu
@organization: Institut Mines-Telecom - Telecom SudParis
@license: Apache License, Version 2.0
"""
try:
import simplejson as json
except ImportError:
import json
import pyocni.adapters.cnv_toHTTP as extractor
from webob import Response
class To_HTTP_Text_Plain():
"""
Converts Response data from application/occi+json object to HTTP text/plain descriptions
"""
def format_to_text_plain_categories(self, var):
"""
Format JSON categories into HTTP text/plain categories
Args:
@param var: JSON categories
"""
resp = ""
if var.has_key('kinds'):
items = var['kinds']
for item in items:
resp += "Category :" + cnv_JSON_category(item, "kind") + "\n"
if var.has_key('mixins'):
items = var['mixins']
for item in items:
resp += "Category :" + cnv_JSON_category(item, "mixin") + "\n"
if var.has_key('actions'):
items = var['actions']
for item in items:
resp += "Category :" + cnv_JSON_category(item, "action") + "\n"
return resp
def format_to_text_plain_entities(self, var):
"""
Convert a JSON resource description into a text/plain resource description
Args:
@param var: JSON resource description
"""
response = ""
if var.has_key('resources'):
items = var['resources']
for item in items:
cat, link, att = cnv_JSON_Resource(item)
for c in cat:
response += "Category: " + c + "\n"
for l in link:
response += "Link: " + l + "\n"
for a in att:
response += "X-OCCI-Attribute: " + a + "\n"
response = response[:-1] + ",\n"
response = response[:-2]
if var.has_key('links'):
items = var['links']
response += ",\n"
for item in items:
cat, link, att = cnv_JSON_Resource(item)
for c in cat:
response += "Category: " + c + "\n"
for l in link:
response += "Link: " + l + "\n"
for a in att:
response += "X-OCCI-Attribute: " + a + "\n"
response = response[:-1] + ",\n"
response = response[:-2]
return response
def format_to_text_plain_locations(self, var):
"""
Converts JSON locations into HTTP locations
Args:
var: JSON locations
"""
locs = ""
for item in var:
locs += "Location: " + item + "\n"
return locs
def format_to_text_plain_x_locations(self, var):
"""
Converts JSON locations into HTTP locations
Args:
var: JSON locations
"""
locs = ""
for item in var:
locs += "X-OCCI-Location: " + item + "\n"
return locs
class To_HTTP_Text_OCCI():
"""
Converts Response data from application/occi+json object to HTTP text/occi descriptions
"""
def format_to_text_occi_categories(self, var):
"""
Format JSON categories into HTTP text/plain categories
Args:
@param var: JSON categories
"""
resp = Response()
resp.headers.clear()
value = ""
if var.has_key('kinds'):
items = var['kinds']
for item in items:
value = cnv_JSON_category(item, "kind") + ",\n"
resp.headers.add('Category', value[:-2])
if var.has_key('mixins'):
items = var['mixins']
for item in items:
value = cnv_JSON_category(item, "mixin") + ",\n"
resp.headers.add('Category', value[:-2])
if var.has_key('actions'):
items = var['actions']
for item in items:
value = cnv_JSON_category(item, "action") + ",\n"
resp.headers.add('Category', value[:-2])
return resp.headers
def format_to_text_occi_entities(self, var):
"""
Convert a JSON resource description into a text/occi resource description
Args:
@param var: JSON resource description
"""
response = Response()
response.headers.clear()
if var.has_key('resources'):
items = var['resources']
for item in items:
cat, link, att = cnv_JSON_Resource(item)
for c in cat:
response.headers.add("Category", c)
for l in link:
response.headers.add("Link", l)
for a in att:
response.headers.add("X-OCCI-Attribute", a)
if var.has_key('links'):
items = var['links']
for item in items:
cat, link, att = cnv_JSON_Resource(item)
for c in cat:
response.headers.add("Category", c)
for l in link:
response.headers.add("Link", l)
for a in att:
response.headers.add("X-OCCI-Attribute", a)
return response.headers
def format_to_text_occi_locations(self, var):
"""
Converts JSON locations into HTTP locations
Args:
var: JSON locations
"""
locs = ""
resp = Response()
resp.headers.clear()
for item in var:
locs += item + ","
resp.headers.add("Location", locs[:-1])
return resp.headers
def format_to_text_x_occi_locations(self, var):
"""
Converts JSON locations into HTTP locations
Args:
var: JSON locations
"""
locs = ""
resp = Response()
resp.headers.clear()
for item in var:
locs += item + ","
resp.headers.add("X-OCCI-Location", locs[:-1])
return resp.headers
class To_HTTP_Text_URI_List():
"""
Converts Response data from application/occi+json object to HTTP text/uri descriptions
"""
def __init__(self):
pass
def check_for_uri_locations(self, var):
"""
Checks for the existence of path URIs in a JSON location object
Args:
@param var: JSON location object
"""
resp = ""
for item in var:
resp += item + "\n"
return resp, True
def cnv_JSON_category(category, type):
"""
Converts a json category into a HTTP category
Args:
@param category: JSON category
@param type: Category type = (kind || mixin || action)
"""
http_cat = extractor.extract_term_from_category(category) + ';'
http_cat += "scheme=\"" + extractor.extract_scheme_from_category(category) + "\";"
http_cat += "class=\"" + type + "\";"
title = extractor.extract_title_from_category(category)
if title is not None:
http_cat += "title=\"" + title + "\";"
rel = extractor.extract_related_from_category(category)
if rel is not None:
http_cat += "rel=\"" + rel + "\";"
attributes = extractor.extract_attributes_from_category(category)
if attributes is not None:
http_cat += "attributes=\"" + attributes + "\";"
actions = extractor.extract_actions_from_category(category)
if actions is not None:
http_cat += "actions=\"" + actions + "\";"
location = extractor.extract_location_from_category(category)
if location is not None:
http_cat += "location=\"" + location + "\";"
return http_cat
def cnv_JSON_Resource(json_object):
"""
Converts a JSON Resource into a HTTP Resource
"""
res_cat = list()
res_links = list()
res_cat.append(extractor.extract_kind_from_entity(json_object))
items = extractor.extract_mixin_from_entity(json_object)
if items is not None:
res_cat.extend(items)
var = extractor.extract_attributes_from_entity(json_object)
if var is not None:
res_att = var
else:
res_att = list()
items = extractor.extract_internal_link_from_entity(json_object)
if items is not None:
res_links.extend(items)
items = extractor.extract_actions_from_entity(json_object)
if items is not None:
res_links.extend(items)
return res_cat, res_links, res_att
| 29.259494 | 92 | 0.561865 | 1,075 | 9,246 | 4.693953 | 0.156279 | 0.020809 | 0.026754 | 0.021799 | 0.608205 | 0.530321 | 0.510107 | 0.491677 | 0.480182 | 0.44134 | 0 | 0.004698 | 0.33236 | 9,246 | 315 | 93 | 29.352381 | 0.812733 | 0.228856 | 0 | 0.559524 | 0 | 0 | 0.093403 | 0.007196 | 0 | 0 | 0 | 0 | 0 | 1 | 0.071429 | false | 0.005952 | 0.029762 | 0 | 0.184524 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e5377b6da443630c0d016aff8eb0fc9c6d8663e8 | 3,358 | py | Python | venv/lib/python3.6/site-packages/ansible_collections/community/hashi_vault/tests/unit/plugins/module_utils/authentication/test_auth_approle.py | usegalaxy-no/usegalaxy | 75dad095769fe918eb39677f2c887e681a747f3a | [
"MIT"
] | 1 | 2020-01-22T13:11:23.000Z | 2020-01-22T13:11:23.000Z | venv/lib/python3.6/site-packages/ansible_collections/community/hashi_vault/tests/unit/plugins/module_utils/authentication/test_auth_approle.py | usegalaxy-no/usegalaxy | 75dad095769fe918eb39677f2c887e681a747f3a | [
"MIT"
] | 12 | 2020-02-21T07:24:52.000Z | 2020-04-14T09:54:32.000Z | venv/lib/python3.6/site-packages/ansible_collections/community/hashi_vault/tests/unit/plugins/module_utils/authentication/test_auth_approle.py | usegalaxy-no/usegalaxy | 75dad095769fe918eb39677f2c887e681a747f3a | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright (c) 2021 Brian Scholer (@briantist)
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import pytest
from ansible_collections.community.hashi_vault.tests.unit.compat import mock
from ansible_collections.community.hashi_vault.plugins.module_utils._auth_method_approle import (
HashiVaultAuthMethodApprole,
)
from ansible_collections.community.hashi_vault.plugins.module_utils._hashi_vault_common import (
HashiVaultAuthMethodBase,
HashiVaultValueError,
)
@pytest.fixture
def option_dict():
return {
'auth_method': 'approle',
'secret_id': None,
'role_id': None,
'mount_point': None,
}
@pytest.fixture
def secret_id():
return 'opaque'
@pytest.fixture
def role_id():
return 'fake-role'
@pytest.fixture
def auth_approle(adapter, warner):
return HashiVaultAuthMethodApprole(adapter, warner)
@pytest.fixture
def approle_login_response(fixture_loader):
return fixture_loader('approle_login_response.json')
class TestAuthApprole(object):
def test_auth_approle_is_auth_method_base(self, auth_approle):
assert isinstance(auth_approle, HashiVaultAuthMethodApprole)
assert issubclass(HashiVaultAuthMethodApprole, HashiVaultAuthMethodBase)
def test_auth_approle_validate_direct(self, auth_approle, adapter, role_id):
adapter.set_option('role_id', role_id)
auth_approle.validate()
@pytest.mark.parametrize('opt_patch', [
{},
{'secret_id': 'secret_id-only'},
])
def test_auth_approle_validate_xfailures(self, auth_approle, adapter, opt_patch):
adapter.set_options(**opt_patch)
with pytest.raises(HashiVaultValueError, match=r'Authentication method approle requires options .*? to be set, but these are missing:'):
auth_approle.validate()
@pytest.mark.parametrize('use_token', [True, False], ids=lambda x: 'use_token=%s' % x)
@pytest.mark.parametrize('mount_point', [None, 'other'], ids=lambda x: 'mount_point=%s' % x)
def test_auth_approle_authenticate(self, auth_approle, client, adapter, secret_id, role_id, mount_point, use_token, approle_login_response):
adapter.set_option('secret_id', secret_id)
adapter.set_option('role_id', role_id)
adapter.set_option('mount_point', mount_point)
expected_login_params = {
'secret_id': secret_id,
'role_id': role_id,
'use_token': use_token,
}
if mount_point:
expected_login_params['mount_point'] = mount_point
def _set_client_token(*args, **kwargs):
if kwargs['use_token']:
client.token = approle_login_response['auth']['client_token']
return approle_login_response
with mock.patch.object(client.auth.approle, 'login', side_effect=_set_client_token) as approle_login:
response = auth_approle.authenticate(client, use_token=use_token)
approle_login.assert_called_once_with(**expected_login_params)
assert response['auth']['client_token'] == approle_login_response['auth']['client_token']
assert (client.token == approle_login_response['auth']['client_token']) is use_token
| 33.919192 | 144 | 0.712627 | 405 | 3,358 | 5.575309 | 0.311111 | 0.068202 | 0.070859 | 0.031887 | 0.251107 | 0.193534 | 0.139947 | 0.139947 | 0.052259 | 0 | 0 | 0.00327 | 0.180465 | 3,358 | 98 | 145 | 34.265306 | 0.817224 | 0.047052 | 0 | 0.132353 | 0 | 0 | 0.125782 | 0.008448 | 0 | 0 | 0 | 0 | 0.073529 | 1 | 0.147059 | false | 0 | 0.073529 | 0.073529 | 0.323529 | 0.014706 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e53b8de7cefb3da0c77b80958c4124a1178847f8 | 905 | py | Python | ST_DM/KDD2022-DuMapper/DME/arch/utils/ll_2_mc.py | zhangyimi/Research | 866f91d9774a38d205d6e9a3b1ee6293748261b3 | [
"Apache-2.0"
] | 1 | 2022-03-18T08:32:37.000Z | 2022-03-18T08:32:37.000Z | ST_DM/KDD2022-DuMapper/DME/arch/utils/ll_2_mc.py | green9989/Research | 94519a72e7936c77f62a31709634b72c09aabf74 | [
"Apache-2.0"
] | null | null | null | ST_DM/KDD2022-DuMapper/DME/arch/utils/ll_2_mc.py | green9989/Research | 94519a72e7936c77f62a31709634b72c09aabf74 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# coding=utf-8
"""
Copyright (c) 2020 Baidu.com, Inc. All Rights Reserved
File: ll_2_mc.py
func: 墨卡托与经纬度间相互转换
Author: yuwei09(yuwei09@baidu.com)
Date: 2021/07/21
"""
import math
SCALE_S = 20037508.34
def lonLat2Mercator(x, y):
"""Convert longitude/latitude to Mercator coordinate"""
mx = x * SCALE_S / 180.
my = math.log(math.tan((90. + y) * math.pi / 360.)) / (math.pi / 180.)
my = y * SCALE_S / 180.
return mx, my
def Mercator2LonLat(x, y):
"""Convert Mercotor point to longitude/latitude cooridinat"""
lx = x / SCALE_S * 180.
ly = y / SCALE_S * 180.
ly = 180 / math.pi * (2 * math.atan(math.exp(ly * math.pi / 180.)) - math.pi / 2)
return lx, ly
if __name__ == '__main__':
x, y = 12962922.3800, 4832335.0200
lx, ly = Mercator2LonLat(x, y)
print(lx, ly)
# lx, ly = bd09mc_to_bd09ll(x, y)
# print(lx, ly)
| 23.205128 | 85 | 0.612155 | 141 | 905 | 3.808511 | 0.510638 | 0.055866 | 0.067039 | 0.037244 | 0.040968 | 0 | 0 | 0 | 0 | 0 | 0 | 0.123741 | 0.232044 | 905 | 38 | 86 | 23.815789 | 0.648921 | 0.362431 | 0 | 0 | 0 | 0 | 0.014388 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.125 | false | 0 | 0.0625 | 0 | 0.3125 | 0.0625 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e5435081eee984010a042f8f54a44d659b9e9dc8 | 1,978 | py | Python | pypret/io/tests/test_io.py | liam-clink/pypret | c84e954efc12137c6b5ade4fae920d60a15d4875 | [
"MIT"
] | 36 | 2019-03-16T18:38:10.000Z | 2022-02-15T14:25:30.000Z | pypret/io/tests/test_io.py | liam-clink/pypret | c84e954efc12137c6b5ade4fae920d60a15d4875 | [
"MIT"
] | 1 | 2019-06-24T21:32:14.000Z | 2019-07-03T12:46:28.000Z | pypret/io/tests/test_io.py | liam-clink/pypret | c84e954efc12137c6b5ade4fae920d60a15d4875 | [
"MIT"
] | 12 | 2019-07-23T22:03:55.000Z | 2022-01-06T08:50:52.000Z | """ This module tests the io subpackage implementation.
Author: Nils Geib, nils.geib@uni-jena.de
"""
import numpy as np
from pypret import io
from pprint import pformat
from os import remove
class IO1(io.IO):
x = 1
def squared(self):
return self.x * self.x
def __repr__(self):
return "IO1(x={0})".format(self.x)
class Grid(io.IO):
_io_store = ['N', 'dx', 'x0']
def __init__(self, N, dx, x0=0.0):
# This is _not_ called upon loading from storage
self.N = N
self.dx = dx
self.x0 = x0
self._post_init()
def _post_init(self):
# this is called upon loading from storage
# calculate the grids
n = np.arange(self.N)
self.x = self.x0 + n * self.dx
def __repr__(self):
return "TestIO1(N={0}, dx={1}, x0={2})".format(
self.N, self.dx, self.x0)
def test_io():
# test flat arrays
_assert_io(np.arange(5))
_assert_io(np.arange(5, dtype=np.complex128))
# test nested structures of various types
_assert_io([{'a': 1.0, 'b': np.uint16(1)}, np.random.rand(10),
True, None, "hello", 1231241512354134123412353124, b"bytes"])
_assert_io([[[1]], [[[[1], 2], 3], 4], 5])
# Test custom objects
_assert_io(IO1())
_assert_io(Grid(128, 0.23, x0=-2.3))
def _assert_io(x):
""" This is slightly hacky: we use pprint to recursively print the objects
and compare the resulting strings to make sure they are the same. This
only works as pprint sorts the dictionary entries by their keys before
printing.
This requires custom objects to implement __repr__.
"""
io.save(x, "test.hdf5")
x2 = io.load("test.hdf5")
remove("test.hdf5")
s1 = pformat(x)
s2 = pformat(x2)
if s1 != s2:
print(s1)
print(s2)
assert False
if __name__ == "__main__":
test_io()
| 26.026316 | 79 | 0.577351 | 285 | 1,978 | 3.838596 | 0.417544 | 0.051188 | 0.019196 | 0.031079 | 0.082267 | 0 | 0 | 0 | 0 | 0 | 0 | 0.060345 | 0.296259 | 1,978 | 75 | 80 | 26.373333 | 0.725575 | 0.280586 | 0 | 0.046512 | 0 | 0 | 0.070336 | 0 | 0 | 0 | 0 | 0 | 0.186047 | 1 | 0.162791 | false | 0 | 0.093023 | 0.069767 | 0.418605 | 0.069767 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e543a1470c327269bc5d0fa3125cb8ab3fe77488 | 14,682 | py | Python | src/main/python/main.py | wong-justin/quick-bible | 035db43eca2c811792e32b123fa81f679ac5f168 | [
"MIT"
] | null | null | null | src/main/python/main.py | wong-justin/quick-bible | 035db43eca2c811792e32b123fa81f679ac5f168 | [
"MIT"
] | null | null | null | src/main/python/main.py | wong-justin/quick-bible | 035db43eca2c811792e32b123fa81f679ac5f168 | [
"MIT"
] | null | null | null |
from utils import *
from shared import *
from updating import MyAppContext
from threading import Thread
import re
import sys
import os
class BooksPage(Page, FilterableList):
'''Lists books from Gen->Rev and connects to next chapters page.
First page of application.'''
def __init__(self):
Page.__init__(self)
FilterableList.__init__(self)
self.set_items(BOOK_NAMES)
# self.set_items([c for c in 'abcdefghijklmnopqrstuvwxyz']) # for testing
self.itemActivated.connect(self.on_book_selected)
def on_book_selected(self, book_item):
# book_item is QtListItem
book = book_item.text()
# show content
if has_chapters(book):
# go to chapter screen
self.nav.to(ChaptersPage, state=get_num_chapters(book))
else:
# skip to verses screen
self.nav.to(VersesPage, state=data.bible[book]) # or get_bible_content(data.curr_scripture.inc(bok))
# widget cleanup
self.nav.set_title(data.curr_scripture.inc(book, inplace=True))
self.searchbox.deactivate()
self.show_all() # reset any searches when naving back
def keyPressEvent(self, event):
if event.key() == Qt.Key_Escape:
QApplication.exit(2)#RESTART_EXIT_CODE)
if ctrl_f_event(event):
self.nav.to(SearchResultsPage, state=lambda: iter_verses_in_whole_bible())
self.searchbox.deactivate()
else:
FilterableList.keyPressEvent(self, event) # this is 0th page; don't need nav back
class ChaptersPage(Page, FilterableList):
'''List of chapter numbers 1->n for given book and connects to next verses page.'''
def __init__(self):
Page.__init__(self)
FilterableList.__init__(self)
self.itemActivated.connect(self.on_chapter_selected)
def load_state(self, state):
num_chapters = state
self.set_items(range(1, num_chapters+1))
def on_chapter_selected(self, chapter_item):
chapter = chapter_item.text()
data.curr_scripture.inc(chapter, inplace=True)
# show the content
verses = get_bible_content(data.curr_scripture)
self.nav.to(VersesPage, state=verses)
# widget cleanup
self.nav.set_title(str(data.curr_scripture))
self.searchbox.deactivate()
self.show_all() # reset any searches when naving back
def keyPressEvent(self, event):
if not self.search_is_active() and event.key() == Qt.Key_Backspace:
self.nav.back()
self.nav.set_title(data.curr_scripture.dec(inplace=True))
elif ctrl_f_event(event):
# book_scripture = data.curr_scripture
self.nav.to(SearchResultsPage, state=lambda: iter_verses_in_book(data.curr_scripture))
self.searchbox.deactivate()
else:
FilterableList.keyPressEvent(self, event)
class VersesPage(Page, QTextEdit, Filterable):
'''Formats dict of verses {num: text} into text display.
Filterable by verse num, isolating and highlighting text.'''
def __init__(self):
Page.__init__(self)
QTextEdit.__init__(self)
Filterable.__init__(self)
# style
self.setReadOnly(True)
set_font_size(self, 11)
def load_state(self, state):
# state = dict of verses in chapter
self.verses = state
self.show_all()
def show_all(self):
# render
html = format_to_html(self.verses)
self.set_html(html)
def set_html(self, html):
# wrapping textEdit.setHtml to keep scroll position
scroll_pos = self.verticalScrollBar().value()
self.setHtml(html) # this resets scroll
self.verticalScrollBar().setValue(scroll_pos)
def filter_items(self, pattern):
# highlight verse, given number
# make sure the verse is there
if pattern not in self.verses.keys():
self.show_all()
return
n = int(pattern)
verse = self.verses[str(n)]
# divide text around verse
pre_verses = dict_where_keys(self.verses, lambda k: int(k) < n)
main_verse = {n: verse}
post_verses = dict_where_keys(self.verses, lambda k: int(k) > n)
pre, main, post = (format_to_html(vs) for vs in (pre_verses, main_verse, post_verses))
html = (
OPACITY_TEMPLATE.format(pre) +
f' {main} ' +
OPACITY_TEMPLATE.format(post)
)
self.set_html(html)
# find verse position in text widget
plain_verse = to_plaintext(main)
plain_start = self.toPlainText().index(plain_verse)
c = self.textCursor()
c.setPosition(plain_start)
self.setTextCursor(c)
# scroll to verse position
rect = self.cursorRect()
top = rect.top()
vbar = self.verticalScrollBar()
vbar.setValue(vbar.value() + top) # top of verse is top of screen
if not vbar.value() == vbar.maximum(): # avoid edge case of last verse: it stays maximum scroll, else hiding last line
vbar.triggerAction(QAbstractSlider.SliderSingleStepSub) # but in general content looks nicer when not pinned to top
def change_highlighted_scripture(self, diff):
pattern = self.searchbox.text()
# allow new highlight from beginning or end
if pattern == '':
last_verse = list(self.verses.keys())[-1]
n = (1 if diff == 1 else last_verse)
# else make sure a verse is already selected
elif pattern not in self.verses.keys():
return
# make sure new verse within bounds
else:
n = int(pattern) + diff
if str(n) not in self.verses.keys():
return
# update searchbox, which triggers new highlight filter and updates user
self.searchbox.activate(str(n))
def keyPressEvent(self, event):
keypress = event.key()
# nav back when backspacing without searchbox
if not self.search_is_active() and keypress == Qt.Key_Backspace:
self.nav.back()
self.nav.set_title(data.curr_scripture.dec(inplace=True))
self.verticalScrollBar().setValue(0) # scroll back to top
elif event.modifiers() == Qt.ControlModifier:
# scripture up/down
if keypress in (Qt.Key_Down, Qt.Key_Up):
diff = (1 if keypress == Qt.Key_Down else -1)
self.change_highlighted_scripture(diff)
# search this chapter
elif keypress == Qt.Key_F:
self.nav.to(SearchResultsPage, state=lambda: scriptures_with_verses(data.curr_scripture, self.verses))
self.searchbox.deactivate()
self.verticalScrollBar().setValue(0) # scroll back to top
# scroll
elif keypress in (Qt.Key_Down, Qt.Key_Up):
QTextEdit.keyPressEvent(self, event)
# keypress goes to searchbox
else:
Filterable.keyPressEvent(self, event)
class SearchResultDelegate(QStyledItemDelegate):
# custom list item rendering,
# mainly just to format a title and subtitle while looking like default list widget item
def paint(self, painter, option, index):
# turns item text into title and subtitle.
# imitates standard list widget item style on select.
# title bolded, subtitle beneath.
# maybe custom eliding for ellipsis on both left and right, focused around match?
# or at least on right, with match surely in view starting from left
painter.save()
item = index.data(Qt.DisplayRole) # default item data is at role 0
# custom data was passed into this item, no longer usual type str
title = str(item['scripture']) + '\n'
subtitle = '\n' + item['text']
given_rect = option.rect # from size hint
states = option.state # bitwise OR of QStyle.State_ flags
if states & QStyle.State_Selected:
palette = QApplication.palette()
painter.setPen(palette.color(QPalette.HighlightedText))
painter.fillRect(given_rect, palette.color(QPalette.Highlight))
# text inset by small margin
text_rect = given_rect.adjusted(2, 2, -2, -2)
# draw title text
em_font = QFont(option.font) # copy
em_font.setWeight(QFont.Bold)
painter.setFont(em_font)
painter.drawText(text_rect, option.displayAlignment, title)
# draw subtitle text
painter.setFont(option.font) # back to default font
# painter.translate(3, 0) # slight indent under title might look nice
elided_subtitle = QFontMetrics(QFont(option.font)).elidedText(subtitle, Qt.ElideRight, text_rect.width())#, Qt.TextShowMnemonic)
# elided_subtitle = painter.fontMetrics().elidedText(subtitle, Qt.ElideRight, text_rect.width())#, Qt.TextShowMnemonic)
painter.drawText(text_rect, option.displayAlignment, elided_subtitle)
painter.restore()
def sizeHint(self, option, index):
# fit to width, creating ellipsis on long text with no need for horiz scroll
# default height seems to have been n*line_height of str in option.data(Qt.DisplayRole)
s = QSize()
font_metrics = QFontMetrics(option.font)
line_height = font_metrics.height()
extra = 4 # produces more comfortable line spacing; 'elbow room'
s.setHeight(2*line_height + extra) # 1 line for title, subtitle each
s.setWidth(0) # don't allow horiz scroll when there's wide items
return s
class SearchResultsPage(Page, FilterableList):
'''Searches given verses by regex from searchbox and shows matches in list.'''
def __init__(self):
self.default_placeholder_msg = 'search regex:'
Page.__init__(self)
FilterableList.__init__(self, placeholder=self.default_placeholder_msg)
self.setItemDelegate(SearchResultDelegate(self)) # custom rendering of list item
# self.itemActivated.connect(self.on_result_item_selected)
# dummy searchbox serves as visual prompt on empty screen
# gives better communication to user
self.fake_searchbox = SearchBox(None)
add_grid_child(self, self.fake_searchbox, Qt.AlignRight | Qt.AlignBottom, grid=self.layout())
self.fake_searchbox.show()
# to decrease stalling when doing a large search?
# self._thread = None
# batches aren't working/helping, maybe because it's a listwidget instead of listview
# QListView.setLayoutMode(self, QListView.Batched)
# self.setBatchSize(5)
# self.setUniformItemSizes(True) # don't think it's helping
# maybe implement a list view instead of a list widget?
def load_state(self, state):
# state = callable that produces iter of verses in desired scope
self.verses_iter_factory = state
scope = str(data.curr_scripture)
self.nav.set_title('Search ' + scope)
self.show_all() # trigger empty search display
def show_all(self):
# called when searchbox is empty, which means
# show placeholder and extra searchbox prompt for user.
self.clear()
self.fake_searchbox.show()
self.placeholder.setText(self.default_placeholder_msg)
def show_items(self, items):
# replaced by custom filter_items, so override and do nothing
return
# def on_result_item_selected(self, item):
# # callback for list widget selection
# d = item.data(Qt.DisplayRole)
# self.nav.to(SearchedVersePage, state=d['location'])
def filter_items(self, search_text):
# show matches of search in a list
self.fake_searchbox.hide() # could be showing if this is first char of search
self.placeholder.setText(self.default_placeholder_msg) # could be diff if last search was error
try:
re.compile(search_text)
except re.error:
self.placeholder.setText('invalid regex')
self.clear()
return
self.clear()
# items = []
for scripture, verse_text in self.verses_iter_factory():
match = re.search(search_text, verse_text)
if match is not None:
item = QListWidgetItem()#self)
item.setData(Qt.DisplayRole, {
'scripture': scripture,
'text': verse_text.replace('\n', ' '),
})
# items.append(item)
self.addItem(item)
# for i in items:
# self.addItem(i)
# print(self.item(100).data(0))
# when finished iter and no matches
if self.is_empty():
self.placeholder.setText('no results')
else:
self.placeholder.setText('')
def is_empty(self):
# return QListWidget.count(self) == 0 # works if you used addItem
return self.itemAt(0, 0) is None # works with just making ListItem(self), not having called addItem
def keyPressEvent(self, event):
empty_search = not self.search_is_active() or self.searchbox.text() == ''
if empty_search and event.key() == Qt.Key_Backspace:
self.nav.back()
self.nav.set_title(str(data.curr_scripture))
# self.clear()
else:
FilterableList.keyPressEvent(self, event)
class Main(QWidget):
# outer window shown; wraps child and restores settings from last session
def __init__(self, child):
super().__init__()
layout = MarginGrid()
layout.addWidget(child, 0, 0)
self.setLayout(layout)
child.setParent(self)
self.settings = QSettings(str(RESOURCE_DIR / 'settings.ini'), QSettings.IniFormat) # I can specify the location
# self.settings = QSettings('FastBible', 'FastBible') # saved in some OS specific location
default = bytes('', encoding='utf-8')
geometry = self.settings.value('geometry', default)
self.restoreGeometry(geometry)
def closeEvent(self, event):
geometry = self.saveGeometry()
self.settings.setValue('geometry', geometry)
super().closeEvent(event)
# --- run
if __name__ == '__main__':
appctxt = MyAppContext()
set_theme(appctxt.app)
init_data()
main = Main(PageManager(BooksPage, ChaptersPage, VersesPage, SearchResultsPage))
main.show()
main.setWindowTitle('Bible')
# exit_code = appctxt.app.exec_()
# sys.exit(exit_code)
appctxt.app.run()
| 36.431762 | 136 | 0.63697 | 1,783 | 14,682 | 5.102075 | 0.261357 | 0.012312 | 0.022425 | 0.016159 | 0.217874 | 0.196548 | 0.141365 | 0.122348 | 0.092888 | 0.062218 | 0 | 0.003364 | 0.271216 | 14,682 | 402 | 137 | 36.522388 | 0.846822 | 0.288721 | 0 | 0.266094 | 0 | 0 | 0.012614 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.107296 | false | 0 | 0.030043 | 0.008584 | 0.193133 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e54612725dff063fe507222226db0fb8875e240a | 4,100 | py | Python | synapse/tools/cryo/cat.py | vertexmc/synapse | bd1f8ab1abcbaac20dc9afb9ad385cf831278ada | [
"Apache-2.0"
] | null | null | null | synapse/tools/cryo/cat.py | vertexmc/synapse | bd1f8ab1abcbaac20dc9afb9ad385cf831278ada | [
"Apache-2.0"
] | 4 | 2017-10-03T21:50:40.000Z | 2017-11-20T15:49:38.000Z | synapse/tools/cryo/cat.py | vertexmc/synapse | bd1f8ab1abcbaac20dc9afb9ad385cf831278ada | [
"Apache-2.0"
] | null | null | null | import sys
import json
import pprint
import argparse
import logging
import synapse.common as s_common
import synapse.cryotank as s_cryotank
import synapse.lib.cell as s_cell
import synapse.lib.output as s_output
import synapse.lib.msgpack as s_msgpack
logger = logging.getLogger(__name__)
def _except_wrap(it, error_str_func):
''' Wrap an iterator and adds a bit of context to the exception message '''
item_no = 0
while True:
item_no += 1
try:
yield next(it)
except StopIteration:
return
except Exception as e:
extra_context = error_str_func(item_no)
e.args = (extra_context + ': ' + str(e.args[0]), ) + e.args[1:]
raise
def main(argv, outp=s_output.stdout):
pars = argparse.ArgumentParser(prog='cryo.cat', description='display data items from a cryo cell')
pars.add_argument('cryocell', help='The cell descriptor and cryo tank path (cell://<host:port>/<name>).')
pars.add_argument('--list', default=False, action='store_true', help='List tanks in the remote cell and return')
pars.add_argument('--offset', default=0, type=int, help='Begin at offset index')
pars.add_argument('--size', default=10, type=int, help='How many items to display')
pars.add_argument('--timeout', default=10, type=int, help='The network timeout setting')
pars.add_argument('--authfile', help='Path to your auth file for the remote cell')
group = pars.add_mutually_exclusive_group()
group.add_argument('--jsonl', action='store_true', help='Input/Output items in jsonl format')
group.add_argument('--msgpack', action='store_true', help='Input/Output items in msgpack format')
pars.add_argument('--verbose', '-v', default=False, action='store_true', help='Verbose output')
pars.add_argument('--ingest', '-i', default=False, action='store_true',
help='Reverses direction: feeds cryotank from stdin in msgpack or jsonl format')
pars.add_argument('--omit-offset', default=False, action='store_true',
help="Don't output offsets of objects. This is recommended to be used when jsonl/msgpack"
" output is used.")
opts = pars.parse_args(argv)
if opts.verbose:
logger.setLevel(logging.INFO)
if not opts.authfile:
logger.error('Currently requires --authfile until neuron protocol is supported')
return 1
if opts.ingest and not opts.jsonl and not opts.msgpack:
logger.error('Must specify exactly one of --jsonl or --msgpack if --ingest is specified')
return 1
authpath = s_common.genpath(opts.authfile)
auth = s_msgpack.loadfile(authpath)
netw, path = opts.cryocell[7:].split('/', 1)
host, portstr = netw.split(':')
addr = (host, int(portstr))
logger.info('connecting to: %r', addr)
cuser = s_cell.CellUser(auth)
with cuser.open(addr, timeout=opts.timeout) as sess:
cryo = s_cryotank.CryoClient(sess)
if opts.list:
for name, info in cryo.list(timeout=opts.timeout):
outp.printf('%s: %r' % (name, info))
return 0
if opts.ingest:
if opts.msgpack:
fd = sys.stdin.buffer
item_it = _except_wrap(s_msgpack.iterfd(fd), lambda x: 'Error parsing item %d' % x)
else:
fd = sys.stdin
item_it = _except_wrap((json.loads(s) for s in fd), lambda x: ('Failure parsing line %d of input' % x))
cryo.puts(path, item_it)
else:
for item in cryo.slice(path, opts.offset, opts.size, opts.timeout):
i = item[1] if opts.omit_offset else item
if opts.jsonl:
outp.printf(json.dumps(i, sort_keys=True))
elif opts.msgpack:
sys.stdout.write(s_msgpack.en(i))
else:
outp.printf(pprint.pformat(i))
return 0
if __name__ == '__main__': # pragma: no cover
logging.basicConfig()
sys.exit(main(sys.argv[1:]))
| 38.679245 | 119 | 0.628537 | 557 | 4,100 | 4.509874 | 0.332136 | 0.048169 | 0.053742 | 0.045382 | 0.094745 | 0.078822 | 0.029459 | 0.029459 | 0 | 0 | 0 | 0.005552 | 0.253171 | 4,100 | 105 | 120 | 39.047619 | 0.814827 | 0.020976 | 0 | 0.084337 | 0 | 0 | 0.224857 | 0.006988 | 0 | 0 | 0 | 0 | 0 | 1 | 0.024096 | false | 0 | 0.120482 | 0 | 0.204819 | 0.048193 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e546ebdb04dff83307e0ea85b193a4c434f9cc11 | 3,905 | py | Python | bdd100k/eval/lane_test.py | bdd100k/bdd100k | c8b54044038d2a03dcb10dcc6d9aef361639ffec | [
"BSD-3-Clause"
] | 193 | 2020-09-22T09:48:17.000Z | 2022-03-31T20:49:24.000Z | bdd100k/eval/lane_test.py | bdd100k/bdd100k | c8b54044038d2a03dcb10dcc6d9aef361639ffec | [
"BSD-3-Clause"
] | 60 | 2020-09-28T15:44:40.000Z | 2022-03-31T07:58:58.000Z | bdd100k/eval/lane_test.py | bdd100k/bdd100k | c8b54044038d2a03dcb10dcc6d9aef361639ffec | [
"BSD-3-Clause"
] | 41 | 2020-09-27T02:52:20.000Z | 2022-02-21T03:33:39.000Z | """Test cases for lane.py."""
import os
import unittest
import numpy as np
from ..common.utils import list_files
from .lane import (
eval_lane_per_threshold,
evaluate_lane_marking,
get_foreground,
get_lane_class,
sub_task_funcs,
)
class TestGetLaneClass(unittest.TestCase):
"""Test cases for the lane specific channel extraction."""
def test_partialled_classes(self) -> None:
"""Check the function that partial get_lane_class."""
for num in range(255):
byte = np.array(num, dtype=np.uint8)
if num & 8:
self.assertTrue(get_lane_class(byte, 1, 3, 1))
else:
self.assertTrue(get_lane_class(byte, 0, 3, 1))
self.assertTrue(get_foreground(byte))
if num & (1 << 5):
self.assertTrue(sub_task_funcs["direction"](byte, 1))
else:
self.assertTrue(sub_task_funcs["direction"](byte, 0))
if num & (1 << 4):
self.assertTrue(sub_task_funcs["style"](byte, 1))
else:
self.assertTrue(sub_task_funcs["style"](byte, 0))
class TestEvalLanePerThreshold(unittest.TestCase):
"""Test cases for the per image per threshold lane marking evaluation."""
def test_two_parallel_lines(self) -> None:
"""Check the correctness of the function in general cases."""
a = np.zeros((10, 10), dtype=bool)
b = np.zeros((10, 10), dtype=bool)
a[3, 3:7] = True
b[7, 3:7] = True
for radius in [1, 2, 3]:
self.assertAlmostEqual(eval_lane_per_threshold(a, b, radius), 0.0)
for radius in [4, 5, 6]:
self.assertAlmostEqual(eval_lane_per_threshold(a, b, radius), 1.0)
def test_two_vertical_lines(self) -> None:
"""Check the correctness of the function in general cases."""
a = np.zeros((10, 10), dtype=bool)
b = np.zeros((10, 10), dtype=bool)
a[3, 3:6] = True
b[5:8, 7] = True
self.assertAlmostEqual(eval_lane_per_threshold(a, b, 2), 0.0)
self.assertAlmostEqual(eval_lane_per_threshold(a, b, 3), 1 / 3)
self.assertAlmostEqual(eval_lane_per_threshold(a, b, 4), 2 / 3)
self.assertAlmostEqual(eval_lane_per_threshold(a, b, 5), 1.0)
class TestEvaluateLaneMarking(unittest.TestCase):
"""Test cases for the evaluate_lane_marking function."""
def test_mock_cases(self) -> None:
"""Check the peformance of the mock case."""
cur_dir = os.path.dirname(os.path.abspath(__file__))
gt_dir = "{}/testcases/lane/gts".format(cur_dir)
res_dir = "{}/testcases/lane/res".format(cur_dir)
result = evaluate_lane_marking(
list_files(gt_dir, ".png", with_prefix=True),
list_files(res_dir, ".png", with_prefix=True),
nproc=1,
)
data_frame = result.pd_frame()
data_arr = data_frame.to_numpy()
gt_data_arr = np.array(
[
[70.53328267, 80.9831119, 100.0],
[100.0, 100.0, 100.0],
[70.53328267, 80.9831119, 100.0],
[100.0, 100.0, 100.0],
[99.82147748, 100.0, 100.0],
[100.0, 100.0, 100.0],
[100.0, 100.0, 100.0],
[75.33066961, 79.34917317, 100.0],
[71.02916505, 86.25984707, 100.0],
[100.0, 100.0, 100.0],
[96.43828133, 100.0, 100.0],
[94.79621737, 100.0, 100.0],
[85.26664133, 90.49155595, 100.0],
[85.26664133, 90.49155595, 100.0],
[92.17697636, 95.70112753, 100.0],
[87.57008634, 92.22807981, 100.0],
]
)
data_arr = data_frame.to_numpy()
self.assertTrue(np.isclose(data_arr, gt_data_arr).all())
if __name__ == "__main__":
unittest.main()
| 35.5 | 78 | 0.568246 | 522 | 3,905 | 4.070881 | 0.270115 | 0.054588 | 0.059294 | 0.067765 | 0.494118 | 0.475294 | 0.381647 | 0.340235 | 0.234824 | 0.188235 | 0 | 0.136248 | 0.297055 | 3,905 | 109 | 79 | 35.825688 | 0.637887 | 0.100896 | 0 | 0.216867 | 0 | 0 | 0.024784 | 0.012104 | 0 | 0 | 0 | 0 | 0.168675 | 1 | 0.048193 | false | 0 | 0.060241 | 0 | 0.144578 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e548c969786feae43c81dceea46b23eaaf132846 | 2,991 | py | Python | user_program/old/firmware_tester.py | dekuNukem/USB4VC | 66c4f0b4a4acd7cec6654ea0dd4da026edf5d24c | [
"MIT"
] | 78 | 2022-02-07T16:48:11.000Z | 2022-03-31T12:25:35.000Z | user_program/old/firmware_tester.py | dekuNukem/USB4VC | 66c4f0b4a4acd7cec6654ea0dd4da026edf5d24c | [
"MIT"
] | 1 | 2022-02-26T20:16:08.000Z | 2022-02-26T20:24:04.000Z | user_program/old/firmware_tester.py | dekuNukem/USB4VC | 66c4f0b4a4acd7cec6654ea0dd4da026edf5d24c | [
"MIT"
] | 1 | 2022-02-24T03:34:15.000Z | 2022-02-24T03:34:15.000Z | import os
import sys
import time
import spidev
import RPi.GPIO as GPIO
PBOARD_RESET_PIN = 25
PBOARD_BOOT0_PIN = 12
SLAVE_REQ_PIN = 16
GPIO.setmode(GPIO.BCM)
GPIO.setup(PBOARD_RESET_PIN, GPIO.IN)
GPIO.setup(PBOARD_BOOT0_PIN, GPIO.IN)
GPIO.setup(SLAVE_REQ_PIN, GPIO.IN, pull_up_down=GPIO.PUD_DOWN)
is_dfu = False
def enter_dfu():
# RESET LOW: Enter reset
GPIO.setup(PBOARD_RESET_PIN, GPIO.OUT)
GPIO.output(PBOARD_RESET_PIN, GPIO.LOW)
time.sleep(0.05)
# BOOT0 HIGH: Boot into DFU mode
GPIO.setup(PBOARD_BOOT0_PIN, GPIO.OUT)
GPIO.output(PBOARD_BOOT0_PIN, GPIO.HIGH)
time.sleep(0.05)
# Release RESET, BOOT0 still HIGH, STM32 now in DFU mode
GPIO.setup(PBOARD_RESET_PIN, GPIO.IN)
time.sleep(1)
def exit_dfu():
# Release BOOT0
GPIO.setup(PBOARD_BOOT0_PIN, GPIO.IN)
# Activate RESET
GPIO.setup(PBOARD_RESET_PIN, GPIO.OUT)
GPIO.output(PBOARD_RESET_PIN, GPIO.LOW)
time.sleep(0.05)
# Release RESET, BOOT0 is LOW, STM32 boots in normal mode
GPIO.setup(PBOARD_RESET_PIN, GPIO.IN)
time.sleep(0.2)
def flash_firmware(fw_path):
for x in range(5):
print(f"----------------- {fw_path.split('/')[-1]} -----------------")
enter_dfu()
if is_dfu:
exit_code = os.system(f'sudo dfu-util --device ,0483:df11 -a 0 -D {fw_path}') >> 8
else:
exit_code = os.system(f'sudo stm32flash -w {fw_path} -a 0x3b /dev/i2c-1') >> 8
exit_dfu()
if exit_code != 0:
for x in range(5):
print("!!!!!!!!!!!!!!!!! TEST FLASH FAILED !!!!!!!!!!!!!!!!!")
exit()
if(len(sys.argv) < 3):
print (__file__ + ' payload_fw test_fw')
exit()
os.system("clear")
pcard_spi = spidev.SpiDev(0, 0)
pcard_spi.max_speed_hz = 2000000
payload_fw_path = sys.argv[1]
test_fw_path = sys.argv[2]
if '.dfu' in payload_fw_path.lower() or '.dfu' in test_fw_path.lower():
is_dfu = True
flash_firmware(test_fw_path)
req_result = []
for x in range(10):
req_result.append(GPIO.input(SLAVE_REQ_PIN))
time.sleep(0.1)
print(req_result)
if 0 not in req_result or 1 not in req_result or req_result.count(0) <= 3 or req_result.count(1) <= 3:
for x in range(5):
print("!!!!!!!!!!!!!!!!! SLAVE REQ ERROR !!!!!!!!!!!!!!!!!")
exit()
while 1:
if len(input("Press enter to continue\n")) == 0:
break;
flash_firmware(payload_fw_path)
SPI_MOSI_MAGIC = 0xde
SPI_MOSI_MSG_TYPE_INFO_REQUEST = 1
nop_spi_msg_template = [SPI_MOSI_MAGIC] + [0]*31
info_request_spi_msg_template = [SPI_MOSI_MAGIC, 0, SPI_MOSI_MSG_TYPE_INFO_REQUEST] + [0]*29
this_msg = list(info_request_spi_msg_template)
pcard_spi.xfer(this_msg)
time.sleep(0.1)
response = pcard_spi.xfer(list(nop_spi_msg_template))
time.sleep(0.1)
print(response)
if response[0] != 205:
for x in range(5):
print("!!!!!!!!!!!!!!!!! WRONG RESPONSE !!!!!!!!!!!!!!!!!")
else:
print("----------------- OK OK OK OK OK OK -----------------")
print("----------------- OK OK OK OK OK OK -----------------") | 27.694444 | 102 | 0.638248 | 482 | 2,991 | 3.721992 | 0.261411 | 0.046823 | 0.06243 | 0.070234 | 0.438127 | 0.371795 | 0.256968 | 0.133779 | 0.133779 | 0.133779 | 0 | 0.037866 | 0.17887 | 2,991 | 108 | 103 | 27.694444 | 0.69259 | 0.064527 | 0 | 0.317073 | 0 | 0 | 0.170129 | 0.008596 | 0 | 0 | 0.002865 | 0 | 0 | 1 | 0.036585 | false | 0 | 0.060976 | 0 | 0.097561 | 0.109756 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e54bb2a421e7b64f44e6913ef7732630a953e801 | 8,394 | py | Python | dataset/text.py | scfrank/deep-generative-lm | 70067fcda82aa035bba805ce6c2709097166a7a4 | [
"MIT"
] | null | null | null | dataset/text.py | scfrank/deep-generative-lm | 70067fcda82aa035bba805ce6c2709097166a7a4 | [
"MIT"
] | null | null | null | dataset/text.py | scfrank/deep-generative-lm | 70067fcda82aa035bba805ce6c2709097166a7a4 | [
"MIT"
] | null | null | null | """
Text datatset iterators, as an extension of the PyTorch Dataset class.
class SimpleTextData(): reads a text file line by line up to a specified sequence length.
class SimpleTextDataSplit(): extends SimpleTextData() by splitting the data in train and val sets.
class TextDataPadded(): extends SimpleTextData() by padding the text up to the specified sequence length.
"""
import os.path as osp
import sys
import numpy as np
import torch
from torch.utils.data import Dataset
# We include the path of the toplevel package in the system path so we can always use absolute imports within the package.
toplevel_path = osp.abspath(osp.join(osp.dirname(__file__), ".."))
if toplevel_path not in sys.path:
sys.path.insert(1, toplevel_path)
from util.error import InvalidLengthError # noqa: E402
__author__ = "Tom Pelsmaeker"
__copyright__ = "Copyright 2020"
class SimpleTextData(Dataset):
"""Dataset of text that reads the first N tokens from each line in the given textfile as data.
Args:
file(str): name of the file containing the text data already converted to indices.
seq_len(int): maximum length of sequences. Longer sequences will be cut at this length.
"""
def __init__(self, file, seq_len):
if seq_len == 0:
self._seq_len = len(max(open(file, "r"), key=len).split())
else:
self._seq_len = seq_len
self._data = [
line.split()[: self._seq_len] for line in open(file, "r") if line != "\n"
]
self._data_len = len(self._data)
def __len__(self):
return self._data_len
def __getitem__(self, idx):
return torch.LongTensor(self._data[idx])
class TextDataSplit(SimpleTextData):
"""Dataset of text that allows a train/validation split from a single file. Extends SimpleTextData().
Args:
file(str): name of the file containing the text data already converted to indices.
seq_len(int): maximum length of sequences. Longer sequences will be cut at this length.
train(bool): True when training, False when testing.
"""
def __init__(self, file, seq_len, train):
super().__init__(file, seq_len)
if train:
self._data = self._data[: int(self.data.shape[0] * 0.9), :]
else:
self._data = self._data[int(self.data.shape[0] * 0.9) :, :]
self._data_len = self.data.shape[0]
class TextDataUnPadded(SimpleTextData):
"""
Dataset of text that prepares sequences for padding, but does not pad them yet. Extends SimpleTextData().
Args:
file(str): name of the file containing the text data already converted to indices.
seq_len(int): maximum length of sequences. shorter sequences will be padded to this length.
pad_token(int): token that is appended to sentences shorter than seq_len.
"""
def __init__(self, file, seq_len, pad_token):
super().__init__(file, seq_len)
# This class also provides reversed sequences that are needed in certain generative model training
self._reverse_data = [
line.split()[: self._seq_len][::-1]
for line in open(file, "r")
if line != "\n"
]
self._pad_token = pad_token
def __getitem__(self, idx):
return self._data[idx], self._reverse_data[idx], self._pad_token
class TextDataPadded(TextDataUnPadded):
"""
Dataset of text that pads sequences up to the specified sequence length. Extends TextDataUnPadded().
Args:
file(str): name of the file containing the text data already converted to indices.
seq_len(int): maximum length of sequences. shorter sequences will be padded to this length.
pad_token(int): token that is appended to sentences shorter than seq_len.
"""
def __init__(self, file, seq_len, pad_token):
super().__init__(file, seq_len, pad_token)
self._seq_lens = []
for line in self._data:
self._seq_lens.append(len(line))
if len(line) < self._seq_len:
line.extend([pad_token] * (self._seq_len - len(line)))
for reverse_line in self._reverse_data:
if len(reverse_line) < self._seq_len:
reverse_line.extend([pad_token] * (self._seq_len - len(reverse_line)))
self._seq_lens = torch.LongTensor(self._seq_lens)
self._data = torch.from_numpy(np.array(self._data, dtype=np.int64))
self._reverse_data = torch.from_numpy(
np.array(self._reverse_data, dtype=np.int64)
)
self._mask = 1.0 - (self._data == pad_token).float()
def __getitem__(self, idx):
return (
self._data[idx],
self._seq_lens[idx],
self._mask[idx],
self._reverse_data[idx],
)
def sort_collate(batch):
"""Custom collate_fn for DataLoaders, sorts data based on sequence lengths.
Note that it is assumed that the variable on which to sort will be in the second position of the input tuples.
Args:
batch(list of tuples): a batch of data provided by a DataLoader given a Dataset, i.e a list of length batch_size
of tuples, where each tuple contains the variables of the DataSet at a single index.
Returns:
list of tensors: the batch of data, with a tensor of length batch_size per variable in the DataSet,
sorted according to the second variable which is assumed to be length information. The list contains
[data, lengths, ...].
Raises:
InvalidLengthError: if the input has less than two variables per index.
"""
if len(batch[0]) < 2:
raise InvalidLengthError(
"Batch needs to contain at least data (batch[0]) and lengths (batch[1])."
)
# Unpack batch from list of tuples [(x_i, y_i, ...), ...] to list of tensors [x, y, ...]
batch = [torch.stack([b[i] for b in batch]) for i in range(len(batch[0]))]
# Get lengths from second tensor in batch and sort all batch data based on those lengths
_, indices = torch.sort(batch[1], descending=True)
batch = [data[indices] for data in batch]
return batch
def sort_pad_collate(batch):
"""Custom collate_fn for DataLoaders, pads data and sorts based on sequence lengths.
This collate function works together with the TextDataUnPadded Dataset, that provides a batch of data in the correct
format for this function to pad and sort.
Args:
batch(list of tuples): a batch of data provided by a DataLoader given a Dataset, i.e a list of length batch_size
of tuples, where each tuple contains the variables of the DataSet at a single index. Each tuple must contain
(data_i, reversed_data_i, pad_token).
Returns:
list of tensors: the batch of data, with a tensor of length batch_size per variable in the DataSet,
sorted according to the second variable which is assumed to be length information. The list contains:
[data, lengths, mask, reversed data].
Raises:
InvalidLengthError: if the input does not have three variables per index.
"""
if len(batch[0]) != 3:
raise InvalidLengthError(
"Batch needs to contain data (batch[0]), reverse_data (batch[1]) and pad_token (batch[2])."
)
# Unpack batch from list of tuples [(x_i, y_i, ...), ...] to list of lists [x, y, ...]
batch = [[b[i] for b in batch] for i in range(len(batch[0]))]
# Pad tensors
x_len = torch.tensor([len(line) for line in batch[0]])
max_len = x_len.max().item()
pad_token = batch[2][0]
for line in batch[0]:
if len(line) < max_len:
line.extend([pad_token] * (max_len - len(line)))
for line in batch[1]:
if len(line) < max_len:
line.extend([pad_token] * (max_len - len(line)))
# Store data tensors in correct format and order
batch[0] = torch.from_numpy(np.array(batch[0], dtype=np.int64))
batch.append(torch.from_numpy(np.array(batch[1], dtype=np.int64)))
# Store length and mask in correct format and order
batch[1] = x_len
batch[2] = 1.0 - (batch[0] == pad_token).float()
# Get lengths from second tensor in batch and sort all batch data based on those lengths
_, indices = torch.sort(batch[1], descending=True)
batch = [data[indices] for data in batch]
return batch
| 38.861111 | 122 | 0.65928 | 1,220 | 8,394 | 4.382787 | 0.186066 | 0.025809 | 0.014962 | 0.012717 | 0.594352 | 0.535814 | 0.465682 | 0.429026 | 0.41743 | 0.403217 | 0 | 0.008381 | 0.246605 | 8,394 | 215 | 123 | 39.04186 | 0.837128 | 0.481296 | 0 | 0.212121 | 0 | 0.010101 | 0.047827 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.10101 | false | 0 | 0.060606 | 0.040404 | 0.262626 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e54c9a30192e6b4af7abb9251e624d83d9672e92 | 2,938 | py | Python | object_detection/pytorch/demo/webcam.py | lamyiowce/training | da4c959b5a7b65091b850872cdd4014d768c087c | [
"Apache-2.0"
] | 567 | 2018-09-13T05:07:49.000Z | 2020-11-23T11:52:11.000Z | object_detection/pytorch/demo/webcam.py | lamyiowce/training | da4c959b5a7b65091b850872cdd4014d768c087c | [
"Apache-2.0"
] | 222 | 2018-09-14T10:15:39.000Z | 2020-11-20T22:21:09.000Z | object_detection/pytorch/demo/webcam.py | ltechkorea/mlperf-training | 498b945dd914573bdbf7a871eaeebd9388b60b76 | [
"Apache-2.0"
] | 279 | 2018-09-16T12:40:29.000Z | 2020-11-17T14:22:52.000Z | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import argparse
import cv2
from maskrcnn_benchmark.config import cfg
from predictor import COCODemo
import time
def main():
parser = argparse.ArgumentParser(description="PyTorch Object Detection Webcam Demo")
parser.add_argument(
"--config-file",
default="../configs/caffe2/e2e_mask_rcnn_R_50_FPN_1x_caffe2.yaml",
metavar="FILE",
help="path to config file",
)
parser.add_argument(
"--confidence-threshold",
type=float,
default=0.7,
help="Minimum score for the prediction to be shown",
)
parser.add_argument(
"--min-image-size",
type=int,
default=224,
help="Smallest size of the image to feed to the model. "
"Model was trained with 800, which gives best results",
)
parser.add_argument(
"--show-mask-heatmaps",
dest="show_mask_heatmaps",
help="Show a heatmap probability for the top masks-per-dim masks",
action="store_true",
)
parser.add_argument(
"--masks-per-dim",
type=int,
default=2,
help="Number of heatmaps per dimension to show",
)
parser.add_argument(
"opts",
help="Modify model config options using the command-line",
default=None,
nargs=argparse.REMAINDER,
)
args = parser.parse_args()
# load config from file and command-line arguments
cfg.merge_from_file(args.config_file)
cfg.merge_from_list(args.opts)
cfg.freeze()
# prepare object that handles inference plus adds predictions on top of image
coco_demo = COCODemo(
cfg,
confidence_threshold=args.confidence_threshold,
show_mask_heatmaps=args.show_mask_heatmaps,
masks_per_dim=args.masks_per_dim,
min_image_size=args.min_image_size,
)
cam = cv2.VideoCapture(0)
while True:
start_time = time.time()
ret_val, img = cam.read()
composite = coco_demo.run_on_opencv_image(img)
print("Time: {:.2f} s / img".format(time.time() - start_time))
cv2.imshow("COCO detections", composite)
if cv2.waitKey(1) == 27:
break # esc to quit
cv2.destroyAllWindows()
if __name__ == "__main__":
main()
| 31.255319 | 88 | 0.663036 | 387 | 2,938 | 4.896641 | 0.509044 | 0.031662 | 0.053826 | 0.016887 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.014831 | 0.242682 | 2,938 | 93 | 89 | 31.591398 | 0.836854 | 0.26855 | 0 | 0.119403 | 0 | 0 | 0.266542 | 0.036133 | 0 | 0 | 0 | 0 | 0 | 1 | 0.014925 | false | 0 | 0.074627 | 0 | 0.089552 | 0.014925 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e55136ee5d85881c01e65dc049c23752a163d827 | 8,328 | py | Python | changes/api/build_details.py | bowlofstew/changes | ebd393520e0fdb07c240a8d4e8747281b6186e28 | [
"Apache-2.0"
] | null | null | null | changes/api/build_details.py | bowlofstew/changes | ebd393520e0fdb07c240a8d4e8747281b6186e28 | [
"Apache-2.0"
] | null | null | null | changes/api/build_details.py | bowlofstew/changes | ebd393520e0fdb07c240a8d4e8747281b6186e28 | [
"Apache-2.0"
] | null | null | null | from __future__ import absolute_import
from collections import defaultdict
from flask_restful.reqparse import RequestParser
from itertools import groupby
from sqlalchemy.orm import contains_eager, joinedload, subqueryload_all
from uuid import UUID
from changes.api.base import APIView
from changes.api.serializer.models.testcase import TestCaseWithOriginCrumbler
from changes.config import db
from changes.constants import Result, Status
from changes.models import (
Build, BuildPriority, Source, Event, FailureReason, Job, TestCase,
BuildSeen, User
)
from changes.utils.originfinder import find_failure_origins
def find_changed_tests(current_build, previous_build, limit=25):
current_job_ids = [j.id.hex for j in current_build.jobs]
previous_job_ids = [j.id.hex for j in previous_build.jobs]
if not (current_job_ids and previous_job_ids):
return []
current_job_clause = ', '.join(
':c_job_id_%s' % i for i in range(len(current_job_ids))
)
previous_job_clause = ', '.join(
':p_job_id_%s' % i for i in range(len(previous_job_ids))
)
params = {}
for idx, job_id in enumerate(current_job_ids):
params['c_job_id_%s' % idx] = job_id
for idx, job_id in enumerate(previous_job_ids):
params['p_job_id_%s' % idx] = job_id
# find all tests that have appeared in one job but not the other
# we have to build this query up manually as sqlalchemy doesnt support
# the FULL OUTER JOIN clause
query = """
SELECT c.id AS c_id,
p.id AS p_id
FROM (
SELECT label_sha, id
FROM test
WHERE job_id IN (%(current_job_clause)s)
) as c
FULL OUTER JOIN (
SELECT label_sha, id
FROM test
WHERE job_id IN (%(previous_job_clause)s)
) as p
ON c.label_sha = p.label_sha
WHERE (c.id IS NULL OR p.id IS NULL)
""" % {
'current_job_clause': current_job_clause,
'previous_job_clause': previous_job_clause
}
total = db.session.query(
'count'
).from_statement(
'SELECT COUNT(*) FROM (%s) as a' % (query,)
).params(**params).scalar()
if not total:
return {
'total': 0,
'changes': [],
}
results = db.session.query(
'c_id', 'p_id'
).from_statement(
'%s LIMIT %d' % (query, limit)
).params(**params)
all_test_ids = set()
for c_id, p_id in results:
if c_id:
all_test_ids.add(c_id)
else:
all_test_ids.add(p_id)
test_map = dict(
(t.id, t) for t in TestCase.query.filter(
TestCase.id.in_(all_test_ids),
).options(
joinedload('job', innerjoin=True),
)
)
diff = []
for c_id, p_id in results:
if p_id:
diff.append(('-', test_map[UUID(p_id)]))
else:
diff.append(('+', test_map[UUID(c_id)]))
return {
'total': total,
'changes': sorted(diff, key=lambda x: (x[1].package, x[1].name)),
}
def get_failure_reasons(build):
from changes.buildfailures import registry
rows = FailureReason.query.filter(
FailureReason.build_id == build.id,
)
failure_reasons = []
for row in rows:
failure_reasons.append({
'id': row.reason,
'reason': registry[row.reason].get_html_label(build),
'step_id': row.step_id,
'job_id': row.job_id,
'data': dict(row.data or {}),
})
return failure_reasons
def get_parents_last_builds(build):
# A patch have only one parent, while a revision can have more.
if build.source.patch:
parents = [build.source.patch.parent_revision_sha]
elif build.source.revision:
parents = build.source.revision.parents
if parents:
parent_builds = list(Build.query.filter(
Build.project == build.project,
Build.status == Status.finished,
Build.id != build.id,
Source.patch_id == None, # NOQA
).join(
Source, Build.source_id == Source.id,
).options(
contains_eager('source').joinedload('revision'),
).filter(
Source.revision_sha.in_(parents)
).order_by(Build.date_created.desc()))
if parent_builds:
# This returns a list with the last build of each revision.
return [
list(builds)[0]
for sha, builds in groupby(
parent_builds,
lambda rev: rev.source.revision_sha
)
]
return []
class BuildDetailsAPIView(APIView):
post_parser = RequestParser()
post_parser.add_argument('priority', choices=BuildPriority._member_names_)
def get(self, build_id):
build = Build.query.options(
joinedload('project', innerjoin=True),
joinedload('author'),
joinedload('source').joinedload('revision'),
subqueryload_all('stats'),
).get(build_id)
if build is None:
return '', 404
try:
most_recent_run = Build.query.filter(
Build.project == build.project,
Build.date_created < build.date_created,
Build.status == Status.finished,
Build.id != build.id,
Source.patch_id == None, # NOQA
).join(
Source, Build.source_id == Source.id,
).options(
contains_eager('source').joinedload('revision'),
joinedload('author'),
).order_by(Build.date_created.desc())[0]
except IndexError:
most_recent_run = None
jobs = list(Job.query.filter(
Job.build_id == build.id,
))
# identify failures
test_failures = TestCase.query.options(
joinedload('job', innerjoin=True),
).filter(
TestCase.job_id.in_([j.id for j in jobs]),
TestCase.result == Result.failed,
).order_by(TestCase.name.asc())
num_test_failures = test_failures.count()
test_failures = test_failures[:25]
failures_by_job = defaultdict(list)
for failure in test_failures:
failures_by_job[failure.job].append(failure)
failure_origins = find_failure_origins(
build, test_failures)
for test_failure in test_failures:
test_failure.origin = failure_origins.get(test_failure)
# identify added/removed tests
if most_recent_run and build.status == Status.finished:
changed_tests = find_changed_tests(build, most_recent_run)
else:
changed_tests = []
seen_by = list(User.query.join(
BuildSeen, BuildSeen.user_id == User.id,
).filter(
BuildSeen.build_id == build.id,
))
extended_serializers = {
TestCase: TestCaseWithOriginCrumbler(),
}
event_list = list(Event.query.filter(
Event.item_id == build.id,
).order_by(Event.date_created.desc()))
context = self.serialize(build)
context.update({
'jobs': jobs,
'seenBy': seen_by,
'events': event_list,
'failures': get_failure_reasons(build),
'testFailures': {
'total': num_test_failures,
'tests': self.serialize(test_failures, extended_serializers),
},
'testChanges': self.serialize(changed_tests, extended_serializers),
'parents': self.serialize(get_parents_last_builds(build)),
})
return self.respond(context)
def post(self, build_id):
build = Build.query.options(
joinedload('project', innerjoin=True),
joinedload('author'),
joinedload('source').joinedload('revision'),
).get(build_id)
if build is None:
return '', 404
args = self.post_parser.parse_args()
if args.priority is not None:
build.priority = BuildPriority[args.priority]
db.session.add(build)
context = self.serialize(build)
return self.respond(context, serialize=False)
| 31.074627 | 79 | 0.587416 | 975 | 8,328 | 4.809231 | 0.213333 | 0.022393 | 0.017914 | 0.014929 | 0.259117 | 0.204948 | 0.178076 | 0.178076 | 0.143741 | 0.121135 | 0 | 0.002604 | 0.308237 | 8,328 | 267 | 80 | 31.191011 | 0.811317 | 0.040226 | 0 | 0.266055 | 0 | 0 | 0.099587 | 0.006138 | 0 | 0 | 0 | 0 | 0 | 1 | 0.022936 | false | 0 | 0.059633 | 0 | 0.137615 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e55245ae534826a12c1057928c01b0d967155c85 | 633 | py | Python | test/test_one_or_greater.py | kant/stream-daemon | 729bc576b74dcd9f1e2021a2433d176d33c413c9 | [
"MIT"
] | 2 | 2016-06-06T22:50:21.000Z | 2018-01-17T16:14:05.000Z | test/test_one_or_greater.py | kant/stream-daemon | 729bc576b74dcd9f1e2021a2433d176d33c413c9 | [
"MIT"
] | null | null | null | test/test_one_or_greater.py | kant/stream-daemon | 729bc576b74dcd9f1e2021a2433d176d33c413c9 | [
"MIT"
] | 1 | 2018-08-27T19:57:03.000Z | 2018-08-27T19:57:03.000Z | import unittest
from Monitor import five_or_greater
class MockProject(object):
def __init__(self, message_count, keyword_counts):
self.message_count = message_count
self.keyword_counts = keyword_counts
class TestOneOrGreater(unittest.TestCase):
def test_some_above_some_below(self):
total = 1000
sample_dataset = {
"keep1" : 1000,
"keep2" : 800,
"not1" : 5,
"keep3" : 100,
"not2" : 1,
}
project = MockProject(total, sample_dataset)
self.assertEquals(five_or_greater(project), ["keep1", "keep2", "keep3",]) | 30.142857 | 81 | 0.616114 | 67 | 633 | 5.522388 | 0.567164 | 0.097297 | 0.07027 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.052863 | 0.28278 | 633 | 21 | 81 | 30.142857 | 0.762115 | 0 | 0 | 0 | 0 | 0 | 0.059937 | 0 | 0 | 0 | 0 | 0 | 0.055556 | 1 | 0.111111 | false | 0 | 0.111111 | 0 | 0.333333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e55aa2f041ab96556aa76a0a7df9e2eb922247e5 | 1,570 | py | Python | row.py | txt/se4dm | c38c742039eaa7a15730eb655c4eed067c8a5409 | [
"Unlicense"
] | null | null | null | row.py | txt/se4dm | c38c742039eaa7a15730eb655c4eed067c8a5409 | [
"Unlicense"
] | 9 | 2015-10-30T12:46:53.000Z | 2015-11-25T03:27:49.000Z | row.py | txt/se4dm | c38c742039eaa7a15730eb655c4eed067c8a5409 | [
"Unlicense"
] | 2 | 2018-06-22T15:23:44.000Z | 2020-11-05T01:47:54.000Z | from __future__ import print_function, division
import sys
sys.dont_write_bytecode = True
"""
# Rows
"""
from lib import *
class Row:
n = -1
def __init__(i,t):
Row.n = i.n = Row.n + 1
i.t, i.dists = t,{}
def dist(j,k):
if j.n == k.n : return 0
if j.n > k.n : return k.dist(j)
key = (j.n, k.n)
if not key in j.dists :
j.dists[key] = dist(i.t,j,k)
return j.dists[key]
def furthest(j,lst=None,best=-1,better=gt):
lst = lst or t.rows
out = j
for k in lst:
tmp = dist(i.t,j,k)
if tmp and better(tmp,best):
out,best = k,tmp
return best
def closest(j,lst=None):
return j.furthest(lst,best=1e32,better=lt)
def knn(i,k=1,lst=None):
lst = lst or t.rows
out = {}
for r1 in lst:
for r2 in lst:
all = [(dist(i.t,r1,r2),r2) for r2 in lst]
out[r1] = sorted(all)[:k]
return out
def dist(t,j,k):
def colxy(cols,xs,ys):
for col in cols:
x = xs[col.pos]
y = ys[col.pos]
if x == "?" and y=="?": continue
yield col,x,y
def far(col,x,y):
y = col.norm(y)
x = 0 if y > 0.5 else 1
return x,y
#---------
n = all = 0
for col in colsxy(t.indep.syms,j,k):
if x== "?" or y == "?":
n += 1
all += 1
else:
inc = 0 if x == y else 1
n += 1
all += inc
for col,x,y in colxy(t.indep.nums,j,k):
if x == "?" : x,y = far(col,x,y)
elif y == "?" : y,x = far(col,y,x)
else : x,y = col.norm(x), col.norm(y)
n += 1
all += (x-y)**2
return all**0.5 / n**0.5
| 22.112676 | 50 | 0.499363 | 302 | 1,570 | 2.559603 | 0.235099 | 0.023286 | 0.020699 | 0.015524 | 0.093144 | 0.072445 | 0 | 0 | 0 | 0 | 0 | 0.028999 | 0.319108 | 1,570 | 70 | 51 | 22.428571 | 0.694107 | 0.005732 | 0 | 0.081967 | 0 | 0 | 0.003886 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.131148 | false | 0 | 0.04918 | 0.016393 | 0.311475 | 0.016393 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e55cd024add940dff887d317c65342a61070e10c | 306 | py | Python | hyperparams.py | nce3xin/spam | 908421d5cf2dd103e2a7044bf1c8586aaf5f2ada | [
"MIT"
] | 1 | 2019-03-13T10:49:25.000Z | 2019-03-13T10:49:25.000Z | hyperparams.py | nce3xin/spam | 908421d5cf2dd103e2a7044bf1c8586aaf5f2ada | [
"MIT"
] | null | null | null | hyperparams.py | nce3xin/spam | 908421d5cf2dd103e2a7044bf1c8586aaf5f2ada | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Mon Jul 9 11:17:24 2018
@author: nce3xin
"""
seed_num=1
learning_rate=1e-3
#epochs=109
#epochs=90
epochs=20
batch_size=16
log_interval=1
no_cuda=False
MODEL='LSTM'
cnn_out_dims=25
CNN_mapping=False
normalization=False
standard_scale=False
min_max_scaler=False | 11.769231 | 35 | 0.754902 | 54 | 306 | 4.074074 | 0.851852 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.104089 | 0.120915 | 306 | 26 | 36 | 11.769231 | 0.713755 | 0.310458 | 0 | 0 | 0 | 0 | 0.019802 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e560f9f2e17600df62f3bea76144c341a81a3cc7 | 7,581 | py | Python | source/02_ssd_large/lib/model.py | toshi-k/kaggle-3d-object-detection-for-autonomous-vehicles | af2e0db16281fb997a9bd5149c478095128a627e | [
"MIT"
] | 24 | 2019-11-28T05:54:58.000Z | 2021-06-14T07:38:30.000Z | source/03_ssd_small/lib/model.py | toshi-k/kaggle-3d-object-detection-for-autonomous-vehicles | af2e0db16281fb997a9bd5149c478095128a627e | [
"MIT"
] | null | null | null | source/03_ssd_small/lib/model.py | toshi-k/kaggle-3d-object-detection-for-autonomous-vehicles | af2e0db16281fb997a9bd5149c478095128a627e | [
"MIT"
] | 5 | 2019-12-06T05:59:32.000Z | 2021-09-16T13:30:29.000Z | import math
from pathlib import Path
import numpy as np
import torch
from torch import nn
import torch.nn.functional as F
from torch.autograd import Variable
from torchvision import models
from PIL import Image
from lib.default_box import dbox_params
from lib.visualize import Visualizer
from common import numpy2pil
def set_batch_norm_eval(model):
bn_count = 0
bn_training = 0
for module in model.modules():
if isinstance(module, torch.nn.modules.batchnorm.BatchNorm2d):
if module.training:
bn_training += 1
module.eval()
bn_count += 1
module.weight.requires_grad = False
module.bias.requires_grad = False
print('{} BN modules are set to eval'.format(bn_count))
class Model(nn.Module):
def __init__(self):
super().__init__()
self.num_classes = 10
self.outoput_channel = self.num_classes + 7
resnet34 = models.resnet34(pretrained=True)
self.resnet34_main = nn.Sequential(
resnet34.conv1,
resnet34.bn1,
resnet34.relu,
resnet34.maxpool,
resnet34.layer1,
resnet34.layer2,
resnet34.layer3
)
self.conv_ex1 = resnet34.layer4
self.conv_ex2 = nn.Sequential(
nn.Conv2d(512, 256, kernel_size=1, padding=0, stride=1),
nn.BatchNorm2d(256),
nn.ReLU(inplace=True),
nn.Conv2d(256, 512, kernel_size=3, padding=1, stride=2),
nn.BatchNorm2d(512),
nn.ReLU(inplace=True)
)
self.conv_up2 = nn.Sequential(
nn.ConvTranspose2d(512, 256, kernel_size=3, padding=1, stride=1),
nn.BatchNorm2d(256),
nn.ReLU(inplace=True),
nn.ConvTranspose2d(256, 512, kernel_size=2, padding=0, stride=2),
nn.BatchNorm2d(512),
nn.ReLU(inplace=True)
)
# self.conv_ex3 = nn.Sequential(nn.Conv2d(512, 128, kernel_size=1, padding=0, stride=1),
# nn.ReLU(inplace=True),
# nn.Conv2d(128, 256, kernel_size=3, padding=1, stride=2),
# nn.ReLU(inplace=True)
# )
# self.ex0_intermediate = nn.Conv2d(256, 4 * self.outoput_channel, kernel_size=3, padding=1, stride=1)
self.ex1_intermediate = nn.Sequential(
nn.Conv2d(1024, 512, kernel_size=3, padding=1, stride=1),
nn.Softplus(),
nn.Conv2d(512, 4 * self.outoput_channel, kernel_size=1, padding=0, stride=1)
)
# self.ex2_intermediate = nn.Conv2d(512, 4 * self.outoput_channel, kernel_size=3, padding=1, stride=1)
# self.ex3_intermediate = nn.Conv2d(256, 32, kernel_size=3, padding=1, stride=1)
@staticmethod
def header(h, img_size):
batch_size = len(h)
step = img_size / h.shape[-1]
points = np.arange(step / 2 - 0.5, img_size, step, dtype=np.float32)
assignment, x, y, length, width, z, height, rotate = torch.split(
h, [10, 1, 1, 1, 1, 1, 1, 1], dim=2)
x_points = np.tile(points.reshape(1, 1, 1, h.shape[-1], 1), (batch_size, len(dbox_params), 1, 1, h.shape[-1]))
y_points = np.tile(points.reshape(1, 1, 1, 1, h.shape[-1]), (batch_size, len(dbox_params), 1, h.shape[-1], 1))
rotate_vars = dbox_params['rotate_vars'].values
rotate_vars = np.tile(rotate_vars.reshape(1, len(rotate_vars), 1, 1, 1),
(batch_size, 1, 1, h.shape[-1], h.shape[-1]))
length_shifts = dbox_params['length_shifts'].values
length_shifts = np.tile(length_shifts.reshape(1, len(length_shifts), 1, 1, 1),
(batch_size, 1, 1, h.shape[-1], h.shape[-1]))
width_shifts = dbox_params['width_shifts'].values
width_shifts = np.tile(width_shifts.reshape(1, len(width_shifts), 1, 1, 1),
(batch_size, 1, 1, h.shape[-1], h.shape[-1]))
height_shifts = dbox_params['height_shifts'].values
height_shifts = np.tile(height_shifts.reshape(1, len(height_shifts), 1, 1, 1),
(batch_size, 1, 1, h.shape[-1], h.shape[-1]))
assignment = torch.softmax(assignment, dim=2) # [batch_size, dbox, channel, x, y]
x_abs = torch.tanh(x) * step + torch.from_numpy(x_points).cuda()
y_abs = torch.tanh(y) * step + torch.from_numpy(y_points).cuda()
z_abs = z + 1010.0
length_abs = torch.exp(length * 0.1 + math.log2(step) / 1.5) * torch.from_numpy(length_shifts).cuda() + 1
width_abs = torch.exp(width * 0.1 + math.log2(step) / 1.5) * torch.from_numpy(width_shifts).cuda() + 1
height_abs = torch.exp(height * 0.1 + math.log2(step) / 1.5) * torch.from_numpy(height_shifts).cuda() + 1
rotate_abs = torch.atan(rotate) + torch.from_numpy(rotate_vars).cuda()
return torch.cat([assignment, x_abs, y_abs, length_abs, width_abs, z_abs, height_abs, rotate_abs], dim=2)
def forward_main(self, x):
list_output = list()
main_out = self.resnet34_main.forward(x)
ex1_down = F.relu(self.conv_ex1(main_out))
ex2_down = self.conv_ex2(ex1_down)
ex1_up = self.conv_up2(ex2_down)
ex1_out = torch.cat([ex1_down, ex1_up], 1)
ex1_branch = self.ex1_intermediate(ex1_out) # 24x24
list_output.append(ex1_branch)
return list_output
def forward(self, x):
list_output = list()
list_main = self.forward_main(x)
for out in list_main:
size = out.shape[-1]
h = self.header(out.reshape(-1, 4, self.outoput_channel, size, size), img_size=x.shape[-1])
list_output.append(h.reshape(-1, 4 * self.outoput_channel, size, size))
return list_output
def build_model():
model = Model()
model.cuda()
return model
if __name__ == '__main__':
dir_debug = Path('_debug')
dir_debug.mkdir(exist_ok=True)
model = build_model()
print(model)
viz = Visualizer('colors.json')
# 768 x 768
in_arr1 = np.zeros((2, 3, 768, 768), dtype=np.float32)
in_tensor1 = torch.from_numpy(in_arr1)
out_vars1 = model.forward(in_tensor1.cuda())
[print(out_var.shape) for out_var in out_vars1]
out_var_numpy1 = [tensor.cpu().data.numpy() for tensor in out_vars1]
out_var_numpy_batch1 = [[tensor[b, :, :, :] for tensor in out_var_numpy1] for b in range(2)]
img = viz.draw_predicted_boxes(out_var_numpy_batch1[0], dbox_params, img_size=in_arr1.shape[-1])
numpy2pil(img).save(dir_debug / 'sample_1-0.png')
img = viz.draw_predicted_boxes(out_var_numpy_batch1[1], dbox_params, img_size=in_arr1.shape[-1])
numpy2pil(img).save(dir_debug / 'sample_1-1.png')
# 1024 x 1024
in_arr2 = np.zeros((2, 3, 1024, 1024), dtype=np.float32)
in_tensor2 = torch.from_numpy(in_arr2)
out_vars2 = model.forward(in_tensor2.cuda())
[print(out_var.shape) for out_var in out_vars2]
out_var_numpy2 = [tensor.cpu().data.numpy() for tensor in out_vars2]
out_var_numpy_batch2 = [[tensor[b, :, :, :] for tensor in out_var_numpy2] for b in range(2)]
img = viz.draw_predicted_boxes(out_var_numpy_batch2[0], dbox_params, img_size=in_arr2.shape[-1])
numpy2pil(img).save(dir_debug / 'sample_2-0.png')
img = viz.draw_predicted_boxes(out_var_numpy_batch2[1], dbox_params, img_size=in_arr2.shape[-1])
numpy2pil(img).save(dir_debug / 'sample_2-1.png')
| 34.616438 | 118 | 0.61298 | 1,092 | 7,581 | 4.044872 | 0.165751 | 0.012225 | 0.020602 | 0.021734 | 0.396423 | 0.35952 | 0.347295 | 0.313335 | 0.238397 | 0.221417 | 0 | 0.065341 | 0.25709 | 7,581 | 218 | 119 | 34.775229 | 0.718928 | 0.086136 | 0 | 0.115108 | 0 | 0 | 0.022993 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.043165 | false | 0 | 0.086331 | 0 | 0.165468 | 0.028777 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e5616dbad551125f0ff82bbdd7078f807585a1f9 | 2,560 | py | Python | tests/datastructures_tests/physical_data_tests.py | czbiohub/reconstruct-order | e729ae3871aea0a5ec2d42744a9448c7f0a93037 | [
"Unlicense"
] | 6 | 2019-10-30T23:00:01.000Z | 2021-03-02T19:09:07.000Z | tests/datastructures_tests/physical_data_tests.py | czbiohub/ReconstructOrder | e729ae3871aea0a5ec2d42744a9448c7f0a93037 | [
"Unlicense"
] | 14 | 2019-07-08T22:51:29.000Z | 2019-07-13T15:44:01.000Z | tests/datastructures_tests/physical_data_tests.py | mehta-lab/reconstruct-order | e729ae3871aea0a5ec2d42744a9448c7f0a93037 | [
"Unlicense"
] | 2 | 2020-05-02T23:28:36.000Z | 2020-07-16T23:46:46.000Z | import numpy as np
import pytest, os
from numpy.testing import assert_array_equal
from ReconstructOrder.datastructures.physical_data import PhysicalData
def test_basic_constructor_nparray():
"""
test assignment using numpy arrays
"""
phys = PhysicalData()
phys.I_trans = np.ones((512, 512))
phys.polarization = 2 * np.ones((512, 512))
phys.retard = 3 * np.ones((512, 512))
phys.depolarization = 4 * np.ones((512, 512))
phys.azimuth = 5 * np.ones((512, 512))
phys.azimuth_degree = 6 * np.ones((512, 512))
phys.azimuth_vector = 7 * np.ones((512, 512))
assert_array_equal(phys.I_trans, np.ones((512, 512)))
assert_array_equal(phys.polarization, 2*np.ones((512, 512)))
assert_array_equal(phys.retard, 3*np.ones((512, 512)))
assert_array_equal(phys.depolarization, 4*np.ones((512, 512)))
assert_array_equal(phys.azimuth, 5*np.ones((512, 512)))
assert_array_equal(phys.azimuth_degree, 6*np.ones((512, 512)))
assert_array_equal(phys.azimuth_vector, 7*np.ones((512, 512)))
def test_basic_constructor_memap(setup_temp_data):
"""
test assignment using memory mapped files
"""
mm = setup_temp_data
phys = PhysicalData()
phys.I_trans = mm
phys.polarization = 2 * mm
phys.retard = 3 * mm
phys.depolarization = 4 * mm
phys.azimuth = 5 * mm
phys.azimuth_degree = 6 * mm
phys.azimuth_vector = 7 * mm
assert_array_equal(phys.I_trans, mm)
assert_array_equal(phys.polarization, 2*mm)
assert_array_equal(phys.retard, 3*mm)
assert_array_equal(phys.depolarization, 4*mm)
assert_array_equal(phys.azimuth, 5*mm)
assert_array_equal(phys.azimuth_degree, 6*mm)
assert_array_equal(phys.azimuth_vector, 7*mm)
def test_instances():
"""
test instance attributes
"""
phs1 = PhysicalData()
phs2 = PhysicalData()
with pytest.raises(AssertionError):
assert(phs1 == phs2)
with pytest.raises(AssertionError):
phs1.retard = 1
phs2.retard = 2
assert(phs1.retard == phs2.retard)
def test_private_access(setup_physical_data):
"""
test that private attributes are not accessible
"""
phys = setup_physical_data
with pytest.raises(AttributeError):
print(phys.__I_trans)
print(phys.__retard)
# ==== Attribute assignment ==========
def test_assignment(setup_physical_data):
"""
test exception handling of improper assignment
"""
phys = setup_physical_data
with pytest.raises(TypeError):
phys.incorrect_attribute = 1 | 28.444444 | 70 | 0.679688 | 344 | 2,560 | 4.851744 | 0.209302 | 0.098862 | 0.143799 | 0.100659 | 0.525464 | 0.47154 | 0.37867 | 0.191132 | 0.070102 | 0 | 0 | 0.058077 | 0.199609 | 2,560 | 90 | 71 | 28.444444 | 0.756467 | 0.091406 | 0 | 0.109091 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.345455 | 1 | 0.090909 | false | 0 | 0.072727 | 0 | 0.163636 | 0.036364 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e5620e85ec34ab2ff5817e8825c91c57685d44ba | 6,349 | py | Python | 2019/14.py | IsaacG/Advent-of-Code | 1e970c6a4abc4a2025f7c70323e70aee64d0bc21 | [
"MIT"
] | 3 | 2020-12-19T09:01:03.000Z | 2021-12-16T13:05:03.000Z | 2019/14.py | IsaacG/Advent-of-Code | 1e970c6a4abc4a2025f7c70323e70aee64d0bc21 | [
"MIT"
] | null | null | null | 2019/14.py | IsaacG/Advent-of-Code | 1e970c6a4abc4a2025f7c70323e70aee64d0bc21 | [
"MIT"
] | null | null | null | #!/bin/python
"""Day 14: Space Stoichiometry.
Handle chemical reactions, converting ORE to FUEL.
"""
import collections
import math
import typer
from typing import Dict, List, Set, Tuple
import data
from lib import aoc
SAMPLE = data.D14
TRILLION = int(1e12)
class Reaction:
"""Wrapper around a single reaction."""
def __init__(self, product: Tuple[int, str], reactants: List[Tuple[int, str]]):
self._reactants = reactants
self.product_amt, self.product = product
self.reactants = {r[1] for r in self._reactants}
def needed(self, count: int) -> Tuple[List[Tuple[int, str]], int]:
"""Calculate much of of each reactant is needed to make `count` product.
Returns the reactants needed and the amount of product produced.
"""
factor = math.ceil(count / self.product_amt)
return [(factor * c, e) for c, e in self._reactants], factor * self.product_amt
class Day14(aoc.Challenge):
TESTS = (
aoc.TestCase(inputs=SAMPLE[0], part=1, want=165),
aoc.TestCase(inputs=SAMPLE[1], part=1, want=13312),
aoc.TestCase(inputs=SAMPLE[2], part=1, want=180697),
aoc.TestCase(inputs=SAMPLE[3], part=1, want=2210736),
aoc.TestCase(inputs=SAMPLE[1], part=2, want=82892753),
aoc.TestCase(inputs=SAMPLE[2], part=2, want=5586022),
aoc.TestCase(inputs=SAMPLE[3], part=2, want=460664),
)
def part1(self, reactions: Dict[str, Reaction]) -> int:
"""Calculate how much ore is needed for 1 unit of fuel."""
return self.ore_per_fuel(reactions, 1)
def part2(self, reactions: Dict[str, Reaction]) -> int:
"""Determine how much fuel can be made with 1e12 ore.
Use the `ore_per_fuel()` function to binary search from 0 to 2e12 / ore_per_fuel(1).
"""
low, high = 1, 2 * TRILLION // self.ore_per_fuel(reactions, 1)
while (high - low) > 1:
mid = (low + high) // 2
ore = self.ore_per_fuel(reactions, mid)
if ore == TRILLION:
# Unlikely to occur but it doesn't hurt to be safe.
return mid
elif ore > TRILLION:
high = mid
else:
low = mid
return low
def part2_via_reactions(self, reactions: Dict[str, Reaction]) -> int:
"""Solve part2 by actually running reactions until we run out of ore."""
# Track inventory of products as we run reactions and have leftovers.
inventory = {product: 0 for product in reactions}
inventory['ORE'] = TRILLION
def react(product: str, amount: int, inv: Dict[str, int]) -> bool:
"""Run a reaction to produce `amount` of `product` using mutatable inventory `inv`.
Returns a bool indicating if we can actually pull off the reaction. On False, `inv`
is a bit trashed.
"""
def _react(product, amount):
"""Closure on `inv` to avoid passing it around."""
# If we do not have enough ore and are trying to produce some, this reaction fails.
if product == 'ORE':
return False
needs, gets = reactions[product].needed(amount)
# Produce all the needed reactants to run the reaction.
# Some reactants might use up others to be formed, hence the loop.
while any(inv[reactant] < uses for uses, reactant in needs):
for uses, reactant in needs:
if inv[reactant] >= uses:
continue
# We need more of this reactant. Try to produce it. Mutates `inv`.
short = uses - inv[reactant]
if not _react(reactant, short):
return False
# Mutate `inv` and run the reaction. Use up reactants, produce product.
for uses, reactant in needs:
inv[reactant] -= uses
inv[product] += gets
return True
return _react(product, amount)
# Try to produce fuel in large quantities at first.
# Reduce reaction size as they fail.
volume = TRILLION // self.part1(reactions)
while True:
# Since failed reactions mutate the inventory, first see if they will work
# on a copy. Then actually update the inventory.
if react('FUEL', volume, inventory.copy()):
react('FUEL', volume, inventory)
else:
# Failed to produce 1 fuel. We are at the end.
if volume == 1:
return inventory['FUEL']
volume = volume // 2 or 1
def ore_per_fuel(self, reactions: Dict[str, Reaction], fuel: int) -> int:
"""Calculate how much ore is required to produce `fuel` units of fuel."""
_dependencies = {'ORE': set()} # type: Dict[str, Set[str]]
def dependencies(product: str) -> Set[str]:
"""Compute *all* reactants (recursively) involved in producing `product`."""
# Cache results for dynamic programming.
if product not in _dependencies:
# Collect all reactants ... recursively.
deps = set(reactions[product].reactants)
for reactant in list(deps):
deps.update(dependencies(reactant))
_dependencies[product] = deps
return _dependencies[product]
# Iteratively resolve all products to the reactants needed to produce them.
# Stop when we get down to just ore.
want = collections.defaultdict(int)
want['FUEL'] = fuel
while list(want.keys()) != ['ORE']:
# Find all products which are not also reactants of other products.
# If a product is also a reactant, we may need more of it so it cannot yet be solved.
products = {r for r in want.keys() if not any(r in dependencies(other) for other in want)}
for product in products:
# Add all the required reactants to the want list and remove the product.
for amount, reactant in reactions[product].needed(want[product])[0]:
want[reactant] += amount
del want[product]
return want['ORE']
def parse_input(self, puzzle_input: str) -> Dict[str, Reaction]:
"""Build a dictionary of material produced to Reaction."""
reactions = {} # type: Dict[str, Reaction]
def to_tuple(pair: str) -> Tuple[int, str]:
a, b = pair.split()
return (int(a), b)
for line in puzzle_input.split('\n'):
reactants, product = line.split('=>')
reaction = Reaction(
to_tuple(product),
[to_tuple(p) for p in reactants.split(', ')],
)
reactions[reaction.product] = reaction
return reactions
if __name__ == '__main__':
typer.run(Day14().run)
# vim:ts=2:sw=2:expandtab
| 35.668539 | 96 | 0.642778 | 877 | 6,349 | 4.602052 | 0.279361 | 0.013875 | 0.029485 | 0.039891 | 0.1167 | 0.087711 | 0 | 0 | 0 | 0 | 0 | 0.020088 | 0.247283 | 6,349 | 177 | 97 | 35.870057 | 0.82444 | 0.335486 | 0 | 0.059406 | 0 | 0 | 0.010968 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.108911 | false | 0 | 0.059406 | 0 | 0.326733 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e5636f16a4be081479c9bb8479ea7b652ed01784 | 530 | py | Python | src/pynauty/tests/test_autgrp.py | sammorley-short/pynauty-1 | 852ee738174179c242913ff2afa8b47715d0947b | [
"Apache-2.0"
] | 16 | 2021-02-05T10:15:57.000Z | 2022-03-07T21:51:09.000Z | src/pynauty/tests/test_autgrp.py | sammorley-short/pynauty-1 | 852ee738174179c242913ff2afa8b47715d0947b | [
"Apache-2.0"
] | 20 | 2021-01-31T11:48:56.000Z | 2022-01-25T15:16:05.000Z | src/pynauty/tests/test_autgrp.py | sammorley-short/pynauty-1 | 852ee738174179c242913ff2afa8b47715d0947b | [
"Apache-2.0"
] | 6 | 2021-02-18T11:55:17.000Z | 2021-08-21T03:24:58.000Z | #!/usr/bin/env python
import sys
from pynauty import autgrp, Version
import pytest
# List of graphs for testing
#
# Structure:
# [[name, Graph, numorbit, grpsize, generators]]
#
# numorbit, grpsize, generators was calculated by dreadnut
#
def test_autgrp(graph):
gname, g, numorbit, grpsize, gens = graph
print(Version())
print('%-17s ...' % gname, end=' ')
sys.stdout.flush()
generators, order, o2, orbits, orbit_no = autgrp(g)
assert generators == gens and orbit_no == numorbit and order == grpsize
| 24.090909 | 75 | 0.683019 | 68 | 530 | 5.279412 | 0.632353 | 0.125348 | 0.139276 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.006993 | 0.190566 | 530 | 21 | 76 | 25.238095 | 0.829837 | 0.309434 | 0 | 0 | 0 | 0 | 0.027933 | 0 | 0 | 0 | 0 | 0 | 0.1 | 1 | 0.1 | false | 0 | 0.3 | 0 | 0.4 | 0.2 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e565c470b23648889679a52fd97863eab35ec86e | 3,496 | py | Python | Game/images.py | mrElnekave/Hallow-Valley | 6c3ba0dc3932839941a00362da0212850b2b20a6 | [
"MIT"
] | null | null | null | Game/images.py | mrElnekave/Hallow-Valley | 6c3ba0dc3932839941a00362da0212850b2b20a6 | [
"MIT"
] | null | null | null | Game/images.py | mrElnekave/Hallow-Valley | 6c3ba0dc3932839941a00362da0212850b2b20a6 | [
"MIT"
] | null | null | null | import pygame, constants, copy
# pygame.init()
pygame.display.set_mode(constants.default_size)
current_path = constants.current_path + "Pixel Images\\"
def load_img(path, colorkey=(255,255,255)):
img = pygame.image.load(current_path + path).convert()
img.set_colorkey(colorkey)
return img
def create_path(path:str):
"""
:param path:path is the relative path from the pixel images folder
:return: the relative path from roots of project
"""
return current_path + path
def darken_except(pic, pos):
dark_picture = obscure(pic, (0,0,0), 200)
pygame.draw.circle(dark_picture, (255, 255, 255), pos, 20)
dark_picture.set_colorkey((255,255,255))
pic.blit(dark_picture, (0, 0))
pass
def switch_base():
global menu_base
if menu_base == menu_base_dark:
menu_base = menu_base_clear
else:
menu_base = menu_base_dark
def obscure(pic, color, alpha):
overlay = pygame.Surface(pic.get_size())
overlay.fill(color)
overlay.set_alpha(alpha)
return overlay
# intro
small_bolt = load_img("small_bolt.png", (0, 0, 0))
medium_bolt = load_img("medium_bolt.png", (0, 0, 0))
large_bolt = load_img("large_bolt.png", (0, 0, 0))
clearCloud = pygame.image.load(create_path("Clear Clouds.png"))
stormCloud = pygame.image.load(create_path("Storm Clouds.png"))
mountain_range_height = 200
menu_base = pygame.transform.scale(load_img("main_menu.png"), constants.size)
mountain_1 = load_img("Title Screen Mountain.png", (0, 0, 0))
mountain_2 = load_img("Title Screen Mountain 2.png", (0, 0, 0))
mountain_3 = load_img("Title Screen Mountain 3.png", (0, 0, 0))
pygame.draw.rect(menu_base, (139, 195, 74), pygame.Rect((0,mountain_range_height + mountain_1.get_height() - 20), menu_base.get_size()))
menu_base.blit(mountain_1, (-20, mountain_range_height))
menu_base.blit(mountain_2, (200, mountain_range_height))
menu_base.blit(mountain_3, (120, mountain_range_height))
menu_base_clear = copy.copy(menu_base)
menu_base = menu_base_clear
menu_base_clear.blit(pygame.transform.scale(clearCloud, (60,20)), (15,20))
menu_base_clear.blit(pygame.transform.scale(clearCloud, (70,30)), (70,40))
menu_base_clear.blit(clearCloud, (120,0))
menu_base_clear.blit(pygame.transform.scale(clearCloud, (79,30)), (250,30))
menu_base_clear.blit(clearCloud, (275,0))
menu_base_dark = copy.copy(menu_base)
dark_picture = obscure(menu_base_dark, (0,0,0), 200)
# drawing on all the lightnings
menu_base_dark.blit(dark_picture, (0, 0))
menu_base_dark.blit(pygame.transform.scale(stormCloud, (60,20)), (15,20))
menu_base_dark.blit(pygame.transform.scale(stormCloud, (70,30)), (70,40))
menu_base_dark.blit(stormCloud, (120,0))
menu_base_dark.blit(pygame.transform.scale(stormCloud, (79,30)), (250,30))
menu_base_dark.blit(stormCloud, (275,0))
menu_base_dark.blit(small_bolt, (40, 40))
menu_base_dark.blit(small_bolt, (200, 50))
menu_base_dark.blit(medium_bolt, (100, 70))
menu_base_dark.blit(medium_bolt, (350, 10))
menu_base_dark.blit(medium_bolt, (150, 20))
menu_base_dark.blit(medium_bolt, (300, 60))
# map and notifs
demo_map = pygame.image.load(create_path("Demo Map.png")).convert()
demo_map = pygame.transform.scale(demo_map,(360,360))
demo_mask = demo_map.copy()
demo_mask.fill((0, 0, 0))
simple_map = pygame.image.load(create_path("Simple Map.png")).convert() # 150 by 150
lava = pygame.image.load(create_path("Lava.png"))
poison = pygame.image.load(create_path("Poison Lake.png"))
cactus = pygame.image.load(create_path("Cactus1.png"))
| 36.041237 | 136 | 0.735984 | 564 | 3,496 | 4.324468 | 0.207447 | 0.121361 | 0.083641 | 0.078721 | 0.472325 | 0.266913 | 0.147191 | 0.115211 | 0.03854 | 0 | 0 | 0.066065 | 0.112414 | 3,496 | 96 | 137 | 36.416667 | 0.719948 | 0.05492 | 0 | 0.028986 | 0 | 0 | 0.073476 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.072464 | false | 0.014493 | 0.014493 | 0 | 0.130435 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e566f80f2af89ff3cebc3584e47b99d358ead339 | 481 | py | Python | example/get_concurrent.py | sojin-project/scrape-academy | 5a18f5b497a6b3b85049ec1a4451b6a333e84353 | [
"MIT"
] | null | null | null | example/get_concurrent.py | sojin-project/scrape-academy | 5a18f5b497a6b3b85049ec1a4451b6a333e84353 | [
"MIT"
] | null | null | null | example/get_concurrent.py | sojin-project/scrape-academy | 5a18f5b497a6b3b85049ec1a4451b6a333e84353 | [
"MIT"
] | null | null | null | # type: ignore
import asyncio
from scrapeacademy import context, run
async def get_concurrent(url):
# Get a same page 10 times simultaneously
tasks = [context.get(url) for _ in range(10)]
n = 1
while tasks:
done, tasks = await asyncio.wait(tasks, return_when=asyncio.FIRST_COMPLETED)
for result in done:
print(f"done #{n}", result.result()[:10])
n += 1
print("done")
run(get_concurrent("https://www.python.jp/"))
| 20.913043 | 84 | 0.632017 | 66 | 481 | 4.530303 | 0.606061 | 0.086957 | 0.026756 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.022099 | 0.247401 | 481 | 22 | 85 | 21.863636 | 0.803867 | 0.108108 | 0 | 0 | 0 | 0 | 0.08216 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.166667 | 0 | 0.166667 | 0.166667 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e569a5dbd4c731524441ab30a896814a5ca98109 | 22,303 | py | Python | cogs/device.py | quiprr/AutoTSS | 8d78db17ed5a7f6200955689bfb7580b7eba7183 | [
"MIT"
] | null | null | null | cogs/device.py | quiprr/AutoTSS | 8d78db17ed5a7f6200955689bfb7580b7eba7183 | [
"MIT"
] | null | null | null | cogs/device.py | quiprr/AutoTSS | 8d78db17ed5a7f6200955689bfb7580b7eba7183 | [
"MIT"
] | null | null | null | from aioify import aioify
from discord.ext import commands
import aiofiles
import aiohttp
import aiosqlite
import asyncio
import discord
import json
import shutil
class Device(commands.Cog):
def __init__(self, bot):
self.bot = bot
self.shutil = aioify(shutil, name='shutil')
self.utils = self.bot.get_cog('Utils')
@commands.group(name='device', invoke_without_command=True)
@commands.guild_only()
async def device_cmd(self, ctx: commands.Context) -> None:
prefix = await self.utils.get_prefix(ctx.guild.id)
embed = discord.Embed(title='Device Commands')
embed.add_field(name='Add a device', value=f'`{prefix}device add`', inline=False)
embed.add_field(name='Remove a device', value=f'`{prefix}device remove`', inline=False)
embed.add_field(name='List your devices', value=f'`{prefix}device list`', inline=False)
embed.set_footer(text=ctx.author.display_name, icon_url=ctx.author.avatar_url_as(static_format='png'))
await ctx.send(embed=embed)
@device_cmd.command(name='add')
@commands.guild_only()
@commands.max_concurrency(1, per=commands.BucketType.user)
async def add_device(self, ctx: commands.Context) -> None:
prefix = await self.utils.get_prefix(ctx.guild.id)
timeout_embed = discord.Embed(title='Add Device', description='No response given in 1 minute, cancelling.')
cancelled_embed = discord.Embed(title='Add Device', description='Cancelled.')
for embed in (timeout_embed, cancelled_embed):
embed.set_footer(text=ctx.author.display_name, icon_url=ctx.author.avatar_url_as(static_format='png'))
max_devices = 10 #TODO: Export this option to a separate config file
async with aiosqlite.connect('Data/autotss.db') as db, db.execute('SELECT devices from autotss WHERE user = ?', (ctx.author.id,)) as cursor:
try:
devices = json.loads((await cursor.fetchone())[0])
except TypeError:
devices = list()
await db.execute('INSERT INTO autotss(user, devices, enabled) VALUES(?,?,?)', (ctx.author.id, json.dumps(devices), True))
await db.commit()
if len(devices) > max_devices and await ctx.bot.is_owner(ctx.author) == False: # Error out if you attempt to add over 'max_devices' devices, and if you're not the owner of the bot
embed = discord.Embed(title='Error', description=f'You cannot add over {max_devices} devices to AutoTSS.')
await ctx.send(embed=embed)
return
device = dict()
async with aiohttp.ClientSession() as session:
for x in range(4): # Loop that gets all of the required information to save blobs with from the user
descriptions = (
'Enter a name for your device',
"Enter your device's identifier (e.g. `iPhone6,1`)",
"Enter your device's ECID (hex)",
"Enter your device's Board Config (e.g. `n51ap`). \
This value ends in `ap`, and can be found with [System Info](https://arx8x.github.io/depictions/systeminfo.html) \
under the `Platform` section, or by running `gssc | grep HWModelStr` in a terminal on your iOS device."
)
embed = discord.Embed(title='Add Device', description='\n'.join((descriptions[x], 'Type `cancel` to cancel.')))
embed.set_footer(text=ctx.author.display_name, icon_url=ctx.author.avatar_url_as(static_format='png'))
if x == 0:
message = await ctx.send(embed=embed)
else:
await message.edit(embed=embed)
# Wait for a response from the user, and error out if the user takes over 1 minute to respond
try:
response = await self.bot.wait_for('message', check=lambda message: message.author == ctx.author, timeout=60)
if x == 0:
answer = response.content # Don't make the device's name lowercase
else:
answer = response.content.lower()
except asyncio.exceptions.TimeoutError:
await message.edit(embed=timeout_embed)
return
# Delete the message
try:
await response.delete()
except discord.errors.NotFound:
pass
if answer.lower() == 'cancel' or answer.startswith(prefix):
await message.edit(embed=cancelled_embed)
return
# Make sure given information is valid
if x == 0:
device['name'] = answer
name_check = await self.utils.check_name(device['name'], ctx.author.id)
if name_check != True:
embed = discord.Embed(title='Error', description = f"Device name `{device['name']}` is not valid.")
if name_check == 0:
embed.description += " A device's name must be between 4 and 20 characters."
elif name_check == -1:
embed.description += " You cannot use a device's name more than once."
embed.set_footer(text=ctx.author.display_name, icon_url=ctx.author.avatar_url_as(static_format='png'))
await message.edit(embed=embed)
return
elif x == 1:
device['identifier'] = 'P'.join(answer.split('p'))
if await self.utils.check_identifier(session, device['identifier']) is False:
embed = discord.Embed(title='Error', description=f"Device Identifier `{device['identifier']}` is not valid.")
embed.set_footer(text=ctx.author.display_name, icon_url=ctx.author.avatar_url_as(static_format='png'))
await message.edit(embed=embed)
return
elif x == 2:
if answer.startswith('0x'):
device['ecid'] = answer[2:]
else:
device['ecid'] = answer
ecid_check = await self.utils.check_ecid(device['ecid'], ctx.author.id)
if ecid_check != True:
embed = discord.Embed(title='Error', description=f"Device ECID `{device['ecid']}` is not valid.")
embed.set_footer(text=f'{ctx.author.display_name} | This message will be censored in 5 seconds to protect your ECID(s).', icon_url=ctx.author.avatar_url_as(static_format='png'))
if ecid_check == -1:
embed.description += ' This ECID has already been added to AutoTSS.'
await message.edit(embed=embed)
embed.description = embed.description.replace(f"`{device['ecid']}` ", '')
embed.set_footer(text=ctx.author.display_name, icon_url=ctx.author.avatar_url_as(static_format='png'))
await asyncio.sleep(5)
await message.edit(embed=embed)
return
else:
device['boardconfig'] = answer
if await self.utils.check_boardconfig(session, device['identifier'], device['boardconfig']) is False:
embed = discord.Embed(title='Error', description=f"Device boardconfig `{device['boardconfig']}` is not valid.")
embed.set_footer(text=ctx.author.display_name, icon_url=ctx.author.avatar_url_as(static_format='png'))
await message.edit(embed=embed)
return
cpid = await self.utils.get_cpid(session, device['identifier'], device['boardconfig'])
generator_description = [
'Would you like to save blobs with a custom generator?',
'*If being ran on A12+ devices, you **will** need to provide a matching apnonce for SHSH blobs to be saved correctly.*',
'Guide for jailbroken A12+ devices: [Click here](https://ios.cfw.guide/tss-web#getting-generator-and-apnonce-jailbroken-a12-only)',
'Guide for nonjailbroken A12+ devices: [Click here](https://ios.cfw.guide/tss-computer#get-your-device-specific-apnonce-and-generator)',
'This value is hexadecimal, 16 characters long, and begins with `0x`.'
]
embed = discord.Embed(title='Add Device', description='\n'.join(generator_description)) # Ask the user if they'd like to save blobs with a custom generator
embed.add_field(name='Options', value='Type **yes** to add a custom generator, **cancel** to cancel adding this device, or anything else to skip.', inline=False)
embed.set_footer(text=ctx.author.display_name, icon_url=ctx.author.avatar_url_as(static_format='png'))
await message.edit(embed=embed)
try:
response = await self.bot.wait_for('message', check=lambda message: message.author == ctx.author, timeout=60)
answer = response.content.lower()
except asyncio.exceptions.TimeoutError:
await message.edit(embed=timeout_embed)
return
try:
await response.delete()
except discord.errors.NotFound:
pass
if answer == 'yes':
embed = discord.Embed(title='Add Device', description='Please enter the custom generator you wish to save blobs with.\nType `cancel` to cancel.')
embed.set_footer(text=ctx.author.display_name, icon_url=ctx.author.avatar_url_as(static_format='png'))
await message.edit(embed=embed)
try:
response = await self.bot.wait_for('message', check=lambda message: message.author == ctx.author, timeout=60)
answer = response.content.lower()
except asyncio.exceptions.TimeoutError:
await message.edit(embed=timeout_embed)
return
try:
await response.delete()
except discord.errors.NotFound:
pass
if answer == 'cancel' or answer.startswith(prefix):
await message.edit(embed=cancelled_embed)
return
else:
device['generator'] = answer
if await self.utils.check_generator(device['generator']) is False:
embed = discord.Embed(title='Error', description=f"Device Generator `{device['generator']}` is not valid.")
embed.set_footer(text=ctx.author.display_name, icon_url=ctx.author.avatar_url_as(static_format='png'))
await message.edit(embed=embed)
return
elif answer == 'cancel' or answer.startswith(prefix):
await message.edit(embed=cancelled_embed)
return
else:
device['generator'] = None
apnonce_description = [
'Would you like to save blobs with a custom apnonce?',
]
if device['generator'] is not None:
apnonce_description.append(f"This custom apnonce MUST match with your custom generator `{device['generator']}`, or else your SHSH blobs **will be invalid**.")
if cpid >= 32800:
if len(apnonce_description) == 2:
a12_apnonce_desc = 'This also MUST be done for your device, or else your SHSH blobs **will be invalid**. More info \
[here](https://www.reddit.com/r/jailbreak/comments/f5wm6l/tutorial_repost_easiest_way_to_save_a12_blobs/).'
else:
a12_apnonce_desc = 'This MUST be done for your device, or else your SHSH blobs **will be invalid**. More info \
[here](https://www.reddit.com/r/jailbreak/comments/f5wm6l/tutorial_repost_easiest_way_to_save_a12_blobs/).'
apnonce_description.append(a12_apnonce_desc)
apnonce_description.append('NOTE: This is **NOT** the same as your **generator**, which is hex, begins with `0x`, and is 16 characters long.')
embed = discord.Embed(title='Add Device', description='\n'.join(apnonce_description)) # Ask the user if they'd like to save blobs with a custom ApNonce
embed.add_field(name='Options', value='Type **yes** to add a custom apnonce, **cancel** to cancel adding this device, or anything else to skip.', inline=False)
embed.set_footer(text=ctx.author.display_name, icon_url=ctx.author.avatar_url_as(static_format='png'))
await message.edit(embed=embed)
try:
response = await self.bot.wait_for('message', check=lambda message: message.author == ctx.author, timeout=60)
answer = response.content.lower()
except asyncio.exceptions.TimeoutError:
await message.edit(embed=timeout_embed)
return
try:
await response.delete()
except discord.errors.NotFound:
pass
if answer == 'yes':
embed = discord.Embed(title='Add Device', description='Please enter the custom apnonce you wish to save blobs with.\nType `cancel` to cancel.')
embed.set_footer(text=ctx.author.display_name, icon_url=ctx.author.avatar_url_as(static_format='png'))
await message.edit(embed=embed)
try:
response = await self.bot.wait_for('message', check=lambda message: message.author == ctx.author, timeout=60)
answer = response.content.lower()
except asyncio.exceptions.TimeoutError:
await message.edit(embed=timeout_embed)
return
try:
await response.delete()
except discord.errors.NotFound:
pass
if answer == 'cancel' or answer.startswith(prefix):
await message.edit(embed=cancelled_embed)
return
else:
device['apnonce'] = answer
if await self.utils.check_apnonce(cpid, device['apnonce']) is False:
embed = discord.Embed(title='Error', description=f"Device ApNonce `{device['apnonce']}` is not valid.")
embed.set_footer(text=ctx.author.display_name, icon_url=ctx.author.avatar_url_as(static_format='png'))
await message.edit(embed=embed)
return
elif answer == 'cancel' or answer.startswith(prefix):
await message.edit(embed=cancelled_embed)
return
else:
device['apnonce'] = None
if 32800 <= cpid < 35072 and device['apnonce'] is None: # If A12+ and no apnonce was specified
embed = discord.Embed(title='Add Device')
apnonce_warning = (
'You are attempting to add an A12+ device while choosing to not specify a custom apnonce.',
'This will save **non-working SHSH blobs**.',
'Are you sure you want to do this?'
)
embed.add_field(name='Warning', value='\n'.join(apnonce_warning), inline=False)
embed.add_field(name='Options', value='Type **yes** to go back and add a custom apnonce, **cancel** to cancel adding this device, or anything else to skip.', inline=False)
embed.set_footer(text=ctx.author.display_name, icon_url=ctx.author.avatar_url_as(static_format='png'))
await message.edit(embed=embed)
try:
response = await self.bot.wait_for('message', check=lambda message: message.author == ctx.author, timeout=60)
answer = response.content.lower()
except asyncio.exceptions.TimeoutError:
await message.edit(embed=timeout_embed)
return
try:
await response.delete()
except discord.errors.NotFound:
pass
if answer == 'yes':
embed = discord.Embed(title='Add Device', description='Please enter the custom apnonce you wish to save blobs with.\nType `cancel` to cancel.')
embed.set_footer(text=ctx.author.display_name, icon_url=ctx.author.avatar_url_as(static_format='png'))
await message.edit(embed=embed)
try:
response = await self.bot.wait_for('message', check=lambda message: message.author == ctx.author, timeout=60)
answer = response.content.lower()
except asyncio.exceptions.TimeoutError:
await message.edit(embed=timeout_embed)
return
try:
await response.delete()
except discord.errors.NotFound:
pass
if answer == 'cancel' or answer.startswith(prefix):
await message.edit(embed=cancelled_embed)
return
else:
device['apnonce'] = answer
if await self.utils.check_apnonce(device['apnonce']) is False:
embed = discord.Embed(title='Error', description=f"Device ApNonce `{device['apnonce']}` is not valid.")
embed.set_footer(text=ctx.author.display_name, icon_url=ctx.author.avatar_url_as(static_format='png'))
await message.edit(embed=embed)
return
elif answer == 'cancel' or answer.startswith(prefix):
await message.edit(embed=cancelled_embed)
return
else:
device['apnonce'] = None
device['saved_blobs'] = list()
# Add device information into the database
devices.append(device)
async with aiosqlite.connect('Data/autotss.db') as db:
await db.execute('UPDATE autotss SET devices = ? WHERE user = ?', (json.dumps(devices), ctx.author.id))
await db.commit()
embed = discord.Embed(title='Add Device', description=f"Device `{device['name']}` added successfully!")
embed.set_footer(text=ctx.author.display_name, icon_url=ctx.author.avatar_url_as(static_format='png'))
await message.edit(embed=embed)
await self.utils.update_device_count()
@device_cmd.command(name='remove')
@commands.guild_only()
@commands.max_concurrency(1, per=commands.BucketType.user)
async def remove_device(self, ctx: commands.Context) -> None:
prefix = await self.utils.get_prefix(ctx.guild.id)
cancelled_embed = discord.Embed(title='Remove Device', description='Cancelled.')
invalid_embed = discord.Embed(title='Error', description='Invalid input given.')
timeout_embed = discord.Embed(title='Remove Device', description='No response given in 1 minute, cancelling.')
for x in (cancelled_embed, invalid_embed, timeout_embed):
x.set_footer(text=ctx.author.display_name, icon_url=ctx.author.avatar_url_as(static_format='png'))
async with aiosqlite.connect('Data/autotss.db') as db, db.execute('SELECT devices from autotss WHERE user = ?', (ctx.author.id,)) as cursor:
try:
devices = json.loads((await cursor.fetchone())[0])
except TypeError:
devices = list()
if len(devices) == 0:
embed = discord.Embed(title='Error', description='You have no devices added to AutoTSS.')
await ctx.send(embed=embed)
return
embed = discord.Embed(title='Remove Device', description="Choose the number of the device you'd like to remove.\nType `cancel` to cancel.")
embed.set_footer(text=ctx.author.display_name, icon_url=ctx.author.avatar_url_as(static_format='png'))
for x in range(len(devices)):
device_info = [
f"Name: `{devices[x]['name']}`",
f"Device Identifier: `{devices[x]['identifier']}`",
f"Boardconfig: `{devices[x]['boardconfig']}`"
]
if devices[x]['apnonce'] is not None:
device_info.append(f"Custom ApNonce: `{devices[x]['apnonce']}`")
embed.add_field(name=x + 1, value='\n'.join(device_info), inline=False)
message = await ctx.send(embed=embed)
try:
response = await self.bot.wait_for('message', check=lambda message: message.author == ctx.author, timeout=60)
answer = response.content.lower()
except asyncio.exceptions.TimeoutError:
await message.edit(embed=timeout_embed)
return
try:
await response.delete()
except:
pass
if answer == 'cancel' or answer.startswith(prefix):
await message.edit(embed=cancelled_embed)
return
try:
num = int(answer) - 1
except:
await message.edit(embed=invalid_embed)
return
if num not in range(len(devices)):
await message.edit(embed=invalid_embed)
return
embed = discord.Embed(title='Remove Device', description=f"Are you **absolutely sure** you want to delete `{devices[num]['name']}`?")
embed.add_field(name='Options', value='Type **yes** to delete your device & blobs from AutoTSS, or anything else to cancel.', inline=False)
embed.set_footer(text=ctx.author.display_name, icon_url=ctx.author.avatar_url_as(static_format='png'))
await message.edit(embed=embed)
try:
response = await self.bot.wait_for('message', check=lambda message: message.author == ctx.author, timeout=60)
answer = response.content.lower()
except asyncio.exceptions.TimeoutError:
await message.edit(embed=timeout_embed)
return
try:
await response.delete()
except discord.errors.NotFound:
pass
if answer == 'yes':
embed = discord.Embed(title='Remove Device', description='Removing device...')
embed.set_footer(text=ctx.author.display_name, icon_url=ctx.author.avatar_url_as(static_format='png'))
await message.edit(embed=embed)
async with aiofiles.tempfile.TemporaryDirectory() as tmpdir:
url = await self.utils.backup_blobs(tmpdir, devices[num]['ecid'])
if url is None:
embed = discord.Embed(title='Remove Device', description=f"Device `{devices[num]['name']}` removed.")
embed.set_footer(text=ctx.author.display_name, icon_url=ctx.author.avatar_url_as(static_format='png'))
await message.edit(embed=embed)
else:
await self.shutil.rmtree(f"Data/Blobs/{devices[num]['ecid']}")
embed = discord.Embed(title='Remove Device')
embed.description = f"Blobs from `{devices[num]['name']}`: [Click here]({url})"
embed.set_footer(text=ctx.author.display_name, icon_url=ctx.author.avatar_url_as(static_format='png'))
try:
await ctx.author.send(embed=embed)
embed.description = f"Device `{devices[num]['name']}` removed."
await message.edit(embed=embed)
except:
embed.description = f"Device `{devices[num]['name']}` removed.\nBlobs from `{devices[num]['name']}`: [Click here]({url})"
embed.set_footer(
text=f'{ctx.author.display_name} | This message will automatically be deleted in 15 seconds to protect your ECID(s).',
icon_url=ctx.author.avatar_url_as(static_format='png')
)
await message.edit(embed=embed)
await asyncio.sleep(15)
await ctx.message.delete()
await message.delete()
devices.pop(num)
async with aiosqlite.connect('Data/autotss.db') as db:
await db.execute('UPDATE autotss SET devices = ? WHERE user = ?', (json.dumps(devices), ctx.author.id))
await db.commit()
await message.edit(embed=embed)
await self.utils.update_device_count()
else:
await message.edit(embed=cancelled_embed)
@device_cmd.command(name='list')
@commands.guild_only()
async def list_devices(self, ctx: commands.Context) -> None:
async with aiosqlite.connect('Data/autotss.db') as db, db.execute('SELECT devices from autotss WHERE user = ?', (ctx.author.id,)) as cursor:
try:
devices = json.loads((await cursor.fetchone())[0])
except TypeError:
devices = list()
if len(devices) == 0:
embed = discord.Embed(title='Error', description='You have no devices added to AutoTSS.')
await ctx.send(embed=embed)
return
embed = discord.Embed(title=f"{ctx.author.display_name}'s Devices")
for device in devices:
device_info = [
f"Device Identifier: `{device['identifier']}`",
f"ECID: ||`{device['ecid']}`||",
f"Boardconfig: `{device['boardconfig']}`"
]
if device['generator'] is not None:
device_info.append(f"Custom generator: `{device['generator']}`")
if device['apnonce'] is not None:
device_info.append(f"Custom ApNonce: `{device['apnonce']}`")
embed.add_field(name=f"`{device['name']}`", value='\n'.join(device_info), inline=False)
embed.set_footer(text=f'{ctx.author.display_name} | This message will be censored in 10 seconds to protect your ECID(s).', icon_url=ctx.author.avatar_url_as(static_format='png'))
message = await ctx.send(embed=embed)
await asyncio.sleep(10)
for x in range(len(embed.fields)):
field_values = [value for value in embed.fields[x].value.split('\n') if 'ECID' not in value]
embed.set_field_at(index=x, name=embed.fields[x].name, value='\n'.join(field_values), inline=False)
embed.set_footer(text=ctx.author.display_name, icon_url=ctx.author.avatar_url_as(static_format='png'))
await message.edit(embed=embed)
def setup(bot):
bot.add_cog(Device(bot))
| 40.922936 | 183 | 0.706407 | 3,143 | 22,303 | 4.916322 | 0.106586 | 0.043101 | 0.044525 | 0.058439 | 0.736992 | 0.696026 | 0.666192 | 0.639658 | 0.622314 | 0.591639 | 0 | 0.005824 | 0.160875 | 22,303 | 544 | 184 | 40.998162 | 0.819824 | 0.027978 | 0 | 0.602353 | 0 | 0.042353 | 0.227478 | 0.02949 | 0 | 0 | 0 | 0.001838 | 0 | 1 | 0.004706 | false | 0.021176 | 0.021176 | 0 | 0.096471 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e56a51d213bf7149b8e6be6f0bd4f017978c2a3f | 1,365 | py | Python | fraktal_kocha_obiektowy.py | dkosztowniak/krzywaKocha | 1ece53d0fda51565eedd7e5427a82e72e019a21d | [
"MIT"
] | null | null | null | fraktal_kocha_obiektowy.py | dkosztowniak/krzywaKocha | 1ece53d0fda51565eedd7e5427a82e72e019a21d | [
"MIT"
] | null | null | null | fraktal_kocha_obiektowy.py | dkosztowniak/krzywaKocha | 1ece53d0fda51565eedd7e5427a82e72e019a21d | [
"MIT"
] | null | null | null | import turtle
class fraktalKocha(turtle.Turtle):
def __init__(self):
super().__init__(shape='classic', visible=False)
def krzywaKocha(self, d, n):
self.pendown()
if n == 0:
self.forward(d)
else:
self.krzywaKocha(d/3, n-1)
self.left(60)
self.krzywaKocha(d/3, n-1)
self.right(120)
self.krzywaKocha(d/3, n-1)
self.left(60)
self.krzywaKocha(d/3, n-1)
self.penup()
def platekKocha(self, d, n):
for i in range(3):
self.krzywaKocha(d, n)
self.right(120)
kolory = ('#ffbd20', '#20bd20', '#ff3c00', '#f000ff', '#004aff')
xPlatek = (-400, -400, 200, 200, -100)
yPlatek = (-50, 250, 250, -50, 150)
f = fraktalKocha()
turtle.title('Krzywa Kocha')
f.home()
f.speed(0) # 0..10 - najszybciej 0
f.penup()
f.pensize(2)
f.clear()
for n in range(5):
# Legenda
f.pencolor(kolory[n])
f.goto(-450+(turtle.window_width()//5)*n, -380)
f.write('n = ', True, align="left", font=("Arial", 12, "normal"))
f.write(n, True, align="left", font=("Arial", 12, "normal"))
f.goto(-480, -350)
for n in range(5):
f.pencolor(kolory[n])
f.krzywaKocha(turtle.window_width()//5, n)
for n in range(5):
f.pencolor(kolory[n])
f.goto(xPlatek[n], yPlatek[n])
f.platekKocha(200, n) | 24.375 | 69 | 0.556044 | 199 | 1,365 | 3.763819 | 0.371859 | 0.100134 | 0.106809 | 0.090788 | 0.416555 | 0.3498 | 0.316422 | 0.316422 | 0.316422 | 0.316422 | 0 | 0.091535 | 0.255678 | 1,365 | 56 | 70 | 24.375 | 0.645669 | 0.021245 | 0 | 0.318182 | 0 | 0 | 0.065967 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.068182 | false | 0 | 0.022727 | 0 | 0.113636 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e56e6882dba09fa5e87e1ace9bbb92be2582bd23 | 7,228 | py | Python | adafruit_circuitpython_libs/adafruit-circuitpython-bundle-py-20210214/lib/adafruit_ble_radio.py | jacoblb64/pico_rgb_keypad_hid | 3251ca6a98ef86d9f98c54f639c4d61810601a0b | [
"MIT"
] | 47 | 2021-02-15T23:02:36.000Z | 2022-03-04T21:30:03.000Z | adafruit_circuitpython_libs/adafruit-circuitpython-bundle-py-20210214/lib/adafruit_ble_radio.py | jacoblb64/pico_rgb_keypad_hid | 3251ca6a98ef86d9f98c54f639c4d61810601a0b | [
"MIT"
] | 7 | 2021-02-19T20:00:08.000Z | 2022-01-14T10:51:12.000Z | adafruit_circuitpython_libs/adafruit-circuitpython-bundle-py-20210214/lib/adafruit_ble_radio.py | jacoblb64/pico_rgb_keypad_hid | 3251ca6a98ef86d9f98c54f639c4d61810601a0b | [
"MIT"
] | 14 | 2021-02-20T17:40:56.000Z | 2022-01-01T19:53:38.000Z | # SPDX-FileCopyrightText: 2019 Nicholas H. Tollervey for Adafruit Industries
#
# SPDX-License-Identifier: MIT
"""
`adafruit_ble_radio`
================================================================================
Simple byte and string based inter-device communication via BLE.
* Author(s): Nicholas H.Tollervey for Adafruit Industries
**Hardware:**
Adafruit Feather nRF52840 Express <https://www.adafruit.com/product/4062>
Adafruit Circuit Playground Bluefruit <https://www.adafruit.com/product/4333>
**Software and Dependencies:**
* Adafruit CircuitPython firmware for the supported boards:
https://github.com/adafruit/circuitpython/releases
"""
import time
import struct
from micropython import const
from adafruit_ble import BLERadio
from adafruit_ble.advertising import Advertisement, LazyObjectField
from adafruit_ble.advertising.standard import ManufacturerData
__version__ = "0.3.3"
__repo__ = "https://github.com/adafruit/Adafruit_CircuitPython_BLE_Radio.git"
#: Maximum length of a message (in bytes).
MAX_LENGTH = 248
#: Amount of time to advertise a message (in seconds).
AD_DURATION = 0.5
_MANUFACTURING_DATA_ADT = const(0xFF)
_ADAFRUIT_COMPANY_ID = const(0x0822)
_RADIO_DATA_ID = const(0x0001) # TODO: check this isn't already taken.
class _RadioAdvertisement(Advertisement):
"""Broadcast arbitrary bytes as a radio message."""
match_prefixes = (struct.pack("<BH", 0xFF, _ADAFRUIT_COMPANY_ID),)
manufacturer_data = LazyObjectField(
ManufacturerData,
"manufacturer_data",
advertising_data_type=_MANUFACTURING_DATA_ADT,
company_id=_ADAFRUIT_COMPANY_ID,
key_encoding="<H",
)
@classmethod
def matches(cls, entry):
"""Checks for ID matches"""
if len(entry.advertisement_bytes) < 6:
return False
# Check the key position within the manufacturer data. We already know
# prefix matches so we don't need to check it twice.
return (
struct.unpack_from("<H", entry.advertisement_bytes, 5)[0] == _RADIO_DATA_ID
)
@property
def msg(self):
"""Raw radio data"""
if _RADIO_DATA_ID not in self.manufacturer_data.data:
return b""
return self.manufacturer_data.data[_RADIO_DATA_ID]
@msg.setter
def msg(self, value):
self.manufacturer_data.data[_RADIO_DATA_ID] = value
class Radio:
"""
Represents a connection through which one can send or receive strings
and bytes. The radio can be tuned to a specific channel upon initialisation
or via the `configure` method.
"""
def __init__(self, **args):
"""
Takes the same configuration arguments as the `configure` method.
"""
# For BLE related operations.
self.ble = BLERadio()
# The uid for outgoing message. Incremented by one on each send, up to
# 255 when it's reset to 0.
self.uid = 0
# Contains timestamped message metadata to mitigate report of
# receiving of duplicate messages within AD_DURATION time frame.
self.msg_pool = set()
# Handle user related configuration.
self.configure(**args)
def configure(self, channel=42):
"""
Set configuration values for the radio.
:param int channel: The channel (0-255) the radio is listening /
broadcasting on.
"""
if -1 < channel < 256:
self._channel = channel
else:
raise ValueError("Channel must be in range 0-255")
def send(self, message):
"""
Send a message string on the channel to which the radio is
broadcasting.
:param str message: The message string to broadcast.
"""
return self.send_bytes(message.encode("utf-8"))
def send_bytes(self, message):
"""
Send bytes on the channel to which the radio is broadcasting.
:param bytes message: The bytes to broadcast.
"""
# Ensure length of message.
if len(message) > MAX_LENGTH:
raise ValueError("Message too long (max length = {})".format(MAX_LENGTH))
advertisement = _RadioAdvertisement()
# Concatenate the bytes that make up the advertised message.
advertisement.msg = struct.pack("<BB", self._channel, self.uid) + message
self.uid = (self.uid + 1) % 255
# Advertise (block) for AD_DURATION period of time.
self.ble.start_advertising(advertisement)
time.sleep(AD_DURATION)
self.ble.stop_advertising()
def receive(self):
"""
Returns a message received on the channel on which the radio is
listening.
:return: A string representation of the received message, or else None.
"""
msg = self.receive_full()
if msg:
return msg[0].decode("utf-8").replace("\x00", "")
return None
def receive_full(self):
"""
Returns a tuple containing three values representing a message received
on the channel on which the radio is listening. If no message was
received then `None` is returned.
The three values in the tuple represent:
* the bytes received.
* the RSSI (signal strength: 0 = max, -255 = min).
* a microsecond timestamp: the value returned by time.monotonic() when
the message was received.
:return: A tuple representation of the received message, or else None.
"""
try:
for entry in self.ble.start_scan(
_RadioAdvertisement, minimum_rssi=-255, timeout=1, extended=True
):
# Extract channel and unique message ID bytes.
chan, uid = struct.unpack("<BB", entry.msg[:2])
if chan == self._channel:
now = time.monotonic()
addr = entry.address.address_bytes
# Ensure this message isn't a duplicate. Message metadata
# is a tuple of (now, chan, uid, addr), to (mostly)
# uniquely identify a specific message in a certain time
# window.
expired_metadata = set()
duplicate = False
for msg_metadata in self.msg_pool:
if msg_metadata[0] < now - AD_DURATION:
# Ignore expired entries and mark for removal.
expired_metadata.add(msg_metadata)
elif (chan, uid, addr) == msg_metadata[1:]:
# Ignore matched messages to avoid duplication.
duplicate = True
# Remove expired entries.
self.msg_pool = self.msg_pool - expired_metadata
if not duplicate:
# Add new message's metadata to the msg_pool and
# return it as a result.
self.msg_pool.add((now, chan, uid, addr))
msg = entry.msg[2:]
return (msg, entry.rssi, now)
finally:
self.ble.stop_scan()
return None
| 35.258537 | 87 | 0.606253 | 844 | 7,228 | 5.07346 | 0.325829 | 0.013078 | 0.012844 | 0.014012 | 0.113965 | 0.101822 | 0.083606 | 0.067258 | 0.046707 | 0.046707 | 0 | 0.01587 | 0.302573 | 7,228 | 204 | 88 | 35.431373 | 0.833565 | 0.417128 | 0 | 0.022727 | 0 | 0 | 0.045595 | 0 | 0 | 0 | 0.005152 | 0.004902 | 0 | 1 | 0.102273 | false | 0 | 0.068182 | 0 | 0.318182 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e56f6d2a048e2089110b635b9fc2860c2724c363 | 13,775 | py | Python | layers/MultiWaveletCorrelation.py | MAZiqing/FEDformer | 7914d39df829494a8172afb9676982c3789d491d | [
"MIT"
] | 7 | 2022-02-20T13:03:25.000Z | 2022-03-30T09:27:38.000Z | layers/MultiWaveletCorrelation.py | MAZiqing/FEDformer | 7914d39df829494a8172afb9676982c3789d491d | [
"MIT"
] | null | null | null | layers/MultiWaveletCorrelation.py | MAZiqing/FEDformer | 7914d39df829494a8172afb9676982c3789d491d | [
"MIT"
] | 4 | 2022-03-05T09:09:28.000Z | 2022-03-21T08:46:23.000Z | import torch
import numpy as np
import torch.nn as nn
import torch.nn.functional as F
from torch import Tensor
from typing import List, Tuple
import math
from functools import partial
from einops import rearrange, reduce, repeat
from torch import nn, einsum, diagonal
from math import log2, ceil
import pdb
from utils.masking import LocalMask
from layers.utils import get_filter
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
class MultiWaveletTransform(nn.Module):
"""
1D multiwavelet block.
"""
def __init__(self, ich=1, k=8, alpha=16, c=128,
nCZ=1, L=0, base='legendre', attention_dropout=0.1):
super(MultiWaveletTransform, self).__init__()
print('base', base)
self.k = k
self.c = c
self.L = L
self.nCZ = nCZ
self.Lk0 = nn.Linear(ich, c * k)
self.Lk1 = nn.Linear(c * k, ich)
self.ich = ich
self.MWT_CZ = nn.ModuleList(MWT_CZ1d(k, alpha, L, c, base) for i in range(nCZ))
def forward(self, queries, keys, values, attn_mask):
B, L, H, E = queries.shape
_, S, _, D = values.shape
if L > S:
zeros = torch.zeros_like(queries[:, :(L - S), :]).float()
values = torch.cat([values, zeros], dim=1)
keys = torch.cat([keys, zeros], dim=1)
else:
values = values[:, :L, :, :]
keys = keys[:, :L, :, :]
values = values.view(B, L, -1)
V = self.Lk0(values).view(B, L, self.c, -1)
for i in range(self.nCZ):
V = self.MWT_CZ[i](V)
if i < self.nCZ - 1:
V = F.relu(V)
V = self.Lk1(V.view(B, L, -1))
V = V.view(B, L, -1, D)
return (V.contiguous(), None)
class MultiWaveletCross(nn.Module):
"""
1D Multiwavelet Cross Attention layer.
"""
def __init__(self, in_channels, out_channels, seq_len_q, seq_len_kv, modes, c=64,
k=8, ich=512,
L=0,
base='legendre',
mode_select_method='random',
initializer=None, activation='tanh',
**kwargs):
super(MultiWaveletCross, self).__init__()
print('base', base)
self.c = c
self.k = k
self.L = L
H0, H1, G0, G1, PHI0, PHI1 = get_filter(base, k)
H0r = H0 @ PHI0
G0r = G0 @ PHI0
H1r = H1 @ PHI1
G1r = G1 @ PHI1
H0r[np.abs(H0r) < 1e-8] = 0
H1r[np.abs(H1r) < 1e-8] = 0
G0r[np.abs(G0r) < 1e-8] = 0
G1r[np.abs(G1r) < 1e-8] = 0
self.max_item = 3
self.attn1 = FourierCrossAttentionW(in_channels=in_channels, out_channels=out_channels, seq_len_q=seq_len_q,
seq_len_kv=seq_len_kv, modes=modes, activation=activation,
mode_select_method=mode_select_method)
self.attn2 = FourierCrossAttentionW(in_channels=in_channels, out_channels=out_channels, seq_len_q=seq_len_q,
seq_len_kv=seq_len_kv, modes=modes, activation=activation,
mode_select_method=mode_select_method)
self.attn3 = FourierCrossAttentionW(in_channels=in_channels, out_channels=out_channels, seq_len_q=seq_len_q,
seq_len_kv=seq_len_kv, modes=modes, activation=activation,
mode_select_method=mode_select_method)
self.attn4 = FourierCrossAttentionW(in_channels=in_channels, out_channels=out_channels, seq_len_q=seq_len_q,
seq_len_kv=seq_len_kv, modes=modes, activation=activation,
mode_select_method=mode_select_method)
self.T0 = nn.Linear(k, k)
self.register_buffer('ec_s', torch.Tensor(
np.concatenate((H0.T, H1.T), axis=0)))
self.register_buffer('ec_d', torch.Tensor(
np.concatenate((G0.T, G1.T), axis=0)))
self.register_buffer('rc_e', torch.Tensor(
np.concatenate((H0r, G0r), axis=0)))
self.register_buffer('rc_o', torch.Tensor(
np.concatenate((H1r, G1r), axis=0)))
self.Lk = nn.Linear(ich, c * k)
self.Lq = nn.Linear(ich, c * k)
self.Lv = nn.Linear(ich, c * k)
self.out = nn.Linear(c * k, ich)
self.modes1 = modes
def forward(self, q, k, v, mask=None):
B, N, H, E = q.shape # (B, N, H, E) torch.Size([3, 768, 8, 2])
_, S, _, _ = k.shape # (B, S, H, E) torch.Size([3, 96, 8, 2])
q = q.view(q.shape[0], q.shape[1], -1)
k = k.view(k.shape[0], k.shape[1], -1)
v = v.view(v.shape[0], v.shape[1], -1)
q = self.Lq(q)
q = q.view(q.shape[0], q.shape[1], self.c, self.k)
k = self.Lk(k)
k = k.view(k.shape[0], k.shape[1], self.c, self.k)
v = self.Lv(v)
v = v.view(v.shape[0], v.shape[1], self.c, self.k)
if N > S:
zeros = torch.zeros_like(q[:, :(N - S), :]).float()
v = torch.cat([v, zeros], dim=1)
k = torch.cat([k, zeros], dim=1)
else:
v = v[:, :N, :, :]
k = k[:, :N, :, :]
ns = math.floor(np.log2(N))
nl = pow(2, math.ceil(np.log2(N)))
extra_q = q[:, 0:nl - N, :, :]
extra_k = k[:, 0:nl - N, :, :]
extra_v = v[:, 0:nl - N, :, :]
q = torch.cat([q, extra_q], 1)
k = torch.cat([k, extra_k], 1)
v = torch.cat([v, extra_v], 1)
Ud_q = torch.jit.annotate(List[Tuple[Tensor]], [])
Ud_k = torch.jit.annotate(List[Tuple[Tensor]], [])
Ud_v = torch.jit.annotate(List[Tuple[Tensor]], [])
Us_q = torch.jit.annotate(List[Tensor], [])
Us_k = torch.jit.annotate(List[Tensor], [])
Us_v = torch.jit.annotate(List[Tensor], [])
Ud = torch.jit.annotate(List[Tensor], [])
Us = torch.jit.annotate(List[Tensor], [])
# decompose
for i in range(ns - self.L):
# print('q shape',q.shape)
d, q = self.wavelet_transform(q)
Ud_q += [tuple([d, q])]
Us_q += [d]
for i in range(ns - self.L):
d, k = self.wavelet_transform(k)
Ud_k += [tuple([d, k])]
Us_k += [d]
for i in range(ns - self.L):
d, v = self.wavelet_transform(v)
Ud_v += [tuple([d, v])]
Us_v += [d]
for i in range(ns - self.L):
dk, sk = Ud_k[i], Us_k[i]
dq, sq = Ud_q[i], Us_q[i]
dv, sv = Ud_v[i], Us_v[i]
Ud += [self.attn1(dq[0], dk[0], dv[0], mask)[0] + self.attn2(dq[1], dk[1], dv[1], mask)[0]]
Us += [self.attn3(sq, sk, sv, mask)[0]]
v = self.attn4(q, k, v, mask)[0]
# reconstruct
for i in range(ns - 1 - self.L, -1, -1):
v = v + Us[i]
v = torch.cat((v, Ud[i]), -1)
v = self.evenOdd(v)
v = self.out(v[:, :N, :, :].contiguous().view(B, N, -1))
return (v.contiguous(), None)
def wavelet_transform(self, x):
xa = torch.cat([x[:, ::2, :, :],
x[:, 1::2, :, :],
], -1)
d = torch.matmul(xa, self.ec_d)
s = torch.matmul(xa, self.ec_s)
return d, s
def evenOdd(self, x):
B, N, c, ich = x.shape # (B, N, c, k)
assert ich == 2 * self.k
x_e = torch.matmul(x, self.rc_e)
x_o = torch.matmul(x, self.rc_o)
x = torch.zeros(B, N * 2, c, self.k,
device=x.device)
x[..., ::2, :, :] = x_e
x[..., 1::2, :, :] = x_o
return x
class FourierCrossAttentionW(nn.Module):
def __init__(self, in_channels, out_channels, seq_len_q, seq_len_kv, modes=16, activation='tanh',
mode_select_method='random'):
super(FourierCrossAttentionW, self).__init__()
print('corss fourier correlation used!')
self.in_channels = in_channels
self.out_channels = out_channels
self.modes1 = modes
self.activation = activation
def forward(self, q, k, v, mask):
B, L, E, H = q.shape
xq = q.permute(0, 3, 2, 1) # size = [B, H, E, L] torch.Size([3, 8, 64, 512])
xk = k.permute(0, 3, 2, 1)
xv = v.permute(0, 3, 2, 1)
self.index_q = list(range(0, min(int(L // 2), self.modes1)))
self.index_k_v = list(range(0, min(int(xv.shape[3] // 2), self.modes1)))
# Compute Fourier coefficients
xq_ft_ = torch.zeros(B, H, E, len(self.index_q), device=xq.device, dtype=torch.cfloat)
xq_ft = torch.fft.rfft(xq, dim=-1)
for i, j in enumerate(self.index_q):
xq_ft_[:, :, :, i] = xq_ft[:, :, :, j]
xk_ft_ = torch.zeros(B, H, E, len(self.index_k_v), device=xq.device, dtype=torch.cfloat)
xk_ft = torch.fft.rfft(xk, dim=-1)
for i, j in enumerate(self.index_k_v):
xk_ft_[:, :, :, i] = xk_ft[:, :, :, j]
xqk_ft = (torch.einsum("bhex,bhey->bhxy", xq_ft_, xk_ft_))
if self.activation == 'tanh':
xqk_ft = xqk_ft.tanh()
elif self.activation == 'softmax':
xqk_ft = torch.softmax(abs(xqk_ft), dim=-1)
xqk_ft = torch.complex(xqk_ft, torch.zeros_like(xqk_ft))
else:
raise Exception('{} actiation function is not implemented'.format(self.activation))
xqkv_ft = torch.einsum("bhxy,bhey->bhex", xqk_ft, xk_ft_)
xqkvw = xqkv_ft
out_ft = torch.zeros(B, H, E, L // 2 + 1, device=xq.device, dtype=torch.cfloat)
for i, j in enumerate(self.index_q):
out_ft[:, :, :, j] = xqkvw[:, :, :, i]
out = torch.fft.irfft(out_ft / self.in_channels / self.out_channels, n=xq.size(-1)).permute(0, 3, 2, 1)
# size = [B, L, H, E]
return (out, None)
class sparseKernelFT1d(nn.Module):
def __init__(self,
k, alpha, c=1,
nl=1,
initializer=None,
**kwargs):
super(sparseKernelFT1d, self).__init__()
self.modes1 = alpha
self.scale = (1 / (c * k * c * k))
self.weights1 = nn.Parameter(self.scale * torch.rand(c * k, c * k, self.modes1, dtype=torch.cfloat))
self.weights1.requires_grad = True
self.k = k
def compl_mul1d(self, x, weights):
# (batch, in_channel, x ), (in_channel, out_channel, x) -> (batch, out_channel, x)
return torch.einsum("bix,iox->box", x, weights)
def forward(self, x):
B, N, c, k = x.shape # (B, N, c, k)
x = x.view(B, N, -1)
x = x.permute(0, 2, 1)
x_fft = torch.fft.rfft(x)
# Multiply relevant Fourier modes
l = min(self.modes1, N // 2 + 1)
# l = N//2+1
out_ft = torch.zeros(B, c * k, N // 2 + 1, device=x.device, dtype=torch.cfloat)
out_ft[:, :, :l] = self.compl_mul1d(x_fft[:, :, :l], self.weights1[:, :, :l])
x = torch.fft.irfft(out_ft, n=N)
x = x.permute(0, 2, 1).view(B, N, c, k)
return x
# ##
class MWT_CZ1d(nn.Module):
def __init__(self,
k=3, alpha=64,
L=0, c=1,
base='legendre',
initializer=None,
**kwargs):
super(MWT_CZ1d, self).__init__()
self.k = k
self.L = L
H0, H1, G0, G1, PHI0, PHI1 = get_filter(base, k)
H0r = H0 @ PHI0
G0r = G0 @ PHI0
H1r = H1 @ PHI1
G1r = G1 @ PHI1
H0r[np.abs(H0r) < 1e-8] = 0
H1r[np.abs(H1r) < 1e-8] = 0
G0r[np.abs(G0r) < 1e-8] = 0
G1r[np.abs(G1r) < 1e-8] = 0
self.max_item = 3
self.A = sparseKernelFT1d(k, alpha, c)
self.B = sparseKernelFT1d(k, alpha, c)
self.C = sparseKernelFT1d(k, alpha, c)
self.T0 = nn.Linear(k, k)
self.register_buffer('ec_s', torch.Tensor(
np.concatenate((H0.T, H1.T), axis=0)))
self.register_buffer('ec_d', torch.Tensor(
np.concatenate((G0.T, G1.T), axis=0)))
self.register_buffer('rc_e', torch.Tensor(
np.concatenate((H0r, G0r), axis=0)))
self.register_buffer('rc_o', torch.Tensor(
np.concatenate((H1r, G1r), axis=0)))
def forward(self, x):
B, N, c, k = x.shape # (B, N, k)
ns = math.floor(np.log2(N))
nl = pow(2, math.ceil(np.log2(N)))
extra_x = x[:, 0:nl - N, :, :]
x = torch.cat([x, extra_x], 1)
Ud = torch.jit.annotate(List[Tensor], [])
Us = torch.jit.annotate(List[Tensor], [])
# decompose
for i in range(ns - self.L):
# print('x shape',x.shape)
d, x = self.wavelet_transform(x)
Ud += [self.A(d) + self.B(x)]
Us += [self.C(d)]
x = self.T0(x) # coarsest scale transform
# reconstruct
for i in range(ns - 1 - self.L, -1, -1):
x = x + Us[i]
x = torch.cat((x, Ud[i]), -1)
x = self.evenOdd(x)
x = x[:, :N, :, :]
return x
def wavelet_transform(self, x):
xa = torch.cat([x[:, ::2, :, :],
x[:, 1::2, :, :],
], -1)
d = torch.matmul(xa, self.ec_d)
s = torch.matmul(xa, self.ec_s)
return d, s
def evenOdd(self, x):
B, N, c, ich = x.shape # (B, N, c, k)
assert ich == 2 * self.k
x_e = torch.matmul(x, self.rc_e)
x_o = torch.matmul(x, self.rc_o)
x = torch.zeros(B, N * 2, c, self.k,
device=x.device)
x[..., ::2, :, :] = x_e
x[..., 1::2, :, :] = x_o
return x | 36.345646 | 116 | 0.503739 | 2,008 | 13,775 | 3.323705 | 0.118028 | 0.01798 | 0.031316 | 0.014984 | 0.550195 | 0.488762 | 0.422835 | 0.401858 | 0.394966 | 0.35421 | 0 | 0.03286 | 0.335027 | 13,775 | 379 | 117 | 36.345646 | 0.695742 | 0.039637 | 0 | 0.419672 | 0 | 0 | 0.016315 | 0 | 0 | 0 | 0 | 0 | 0.006557 | 1 | 0.04918 | false | 0 | 0.045902 | 0.003279 | 0.144262 | 0.009836 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e56f87f0384ecbf57f48e8eac0641bcfd48082b7 | 5,636 | py | Python | perrec/cbr.py | Tbabm/PerRec | 1f711d70df8354156b37857719db0559876be08c | [
"MIT"
] | 3 | 2019-07-24T12:03:24.000Z | 2019-08-28T14:42:51.000Z | perrec/cbr.py | Tbabm/PerRec | 1f711d70df8354156b37857719db0559876be08c | [
"MIT"
] | null | null | null | perrec/cbr.py | Tbabm/PerRec | 1f711d70df8354156b37857719db0559876be08c | [
"MIT"
] | null | null | null | # encoding=utf-8
import os
import fire
import numpy as np
from scipy.sparse.csr import csr_matrix
from sklearn.base import BaseEstimator
from sklearn.model_selection import cross_validate
from sklearn.preprocessing import normalize
from sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer
from .common.similarities import SIM_FUNCTIONS
from .common.dataset import prepare_shuffled_dataset
from .common.scorers import map_scorer, trr_scorer, nr_scorer
from .executor import BaseExecutor
SCORING = {
'MAP': map_scorer,
'TRR': trr_scorer,
'NR': nr_scorer
}
def do_nothing_tokenizer(tokens):
return tokens
class PerRecCBR(BaseEstimator):
"""CBR component for recommending permission lists
Input: A list of used apis.
Output: The ranked permission list of the app.
"""
def __init__(self, sim_func="cosine"):
if callable(sim_func):
self.sim_func = sim_func
else:
self.sim_func = SIM_FUNCTIONS.get(sim_func, None)
if not self.sim_func:
raise ValueError("Error sim_func" + str(sim_func))
@staticmethod
def build_perm_docs(perm_vectors, api_vectors):
"""Build permission profiles
Args:
perm_vectors (Matrix): app perm vectors
api_vectors (Matrix): app api vectors
perm_list (List): list of permissions
"""
perm_docs = []
# for each column of permission vectors (e.g., each permission)
for col in perm_vectors.T:
# find the apps which require this permissions
if isinstance(col, csr_matrix):
col = col.toarray().reshape(-1, )
apps = np.where(col == 1)
# find the api vectors of such apps
cur_api_vectors = api_vectors[apps].toarray()
# construct permission doc
cur_perm_doc = cur_api_vectors.sum(axis=0)
perm_docs.append(cur_perm_doc)
return np.array(perm_docs)
def fit(self, X, y):
"""Build the profiles for training permissions
Args:
X (List(List(API))): The api lists of the training apps.
y (List(List(Perm))): The permission lists of all apps
Returns:
self object: return self
"""
# Steps:
# 1. build permission doc
# 2. calculate the tfidf vector for each permission doc as the profiles of permissions
# 3. build API CountVectorizer
self.api_vectorizer_ = CountVectorizer(binary=True, tokenizer=do_nothing_tokenizer,
preprocessor=None, lowercase=False)
self.train_api_vectors_ = self.api_vectorizer_.fit_transform(X)
self.perm_vectorizer_ = CountVectorizer(binary=True, tokenizer=do_nothing_tokenizer,
preprocessor=None, lowercase=False)
self.train_perm_vectors_ = self.perm_vectorizer_.fit_transform(y)
self.perm_list_ = self.perm_vectorizer_.get_feature_names()
# build permission doc
self.perm_docs_ = self.build_perm_docs(self.train_perm_vectors_, self.train_api_vectors_)
# idf = log(total_num / num) + 1
self.tfidf_transformer_ = TfidfTransformer(norm="l1", use_idf=True, smooth_idf=False)
tfidf_matrix = self.tfidf_transformer_.fit_transform(self.perm_docs_)
self.perm_profiles_ = normalize(tfidf_matrix, norm='l2', axis=1)
def transform(self, X, *fit_params):
"""Recommend permissions for new apps
Args:
X (List(List(API))): A list of apps for testing.
Returns:
Perms (List(List(Permission))): The ranked permission lists recommended for input apps
"""
# ranked the permissions
# construct app profiles (api vectors)
test_api_vectors = self.api_vectorizer_.transform(X)
# calculate the similarities between API vector and permission profiles
# test_num * perm_num
similarities = self.sim_func(test_api_vectors, self.perm_profiles_)
perm_scores = normalize(similarities, norm="l1", axis=1)
# for fusion
self.perm_scores_ = perm_scores
sorted_perm_index = np.argsort(-1.0 * perm_scores, 1)
# each row: perm_i, perm_j, per_k (sorted)
return np.take(self.perm_list_, sorted_perm_index)
def predict(self, X):
return self.transform(X)
class CBR(BaseExecutor):
def __init__(self, dataset, scoring, **kwargs):
super().__init__("CBR", dataset, scoring)
self.sim_func = kwargs.get("sim_func", "cosine")
self.smooth_idf = kwargs.get("smooth_idf", True)
def get_result_file(self, data_dir):
file_name = "_".join([self.name, self.sim_func, str(self.smooth_idf)])
return os.path.join(data_dir, file_name + ".json")
def construct_estimator(self):
return PerRecCBR(sim_func=self.sim_func)
def run(self):
api_lists = self.dataset.extract_api_lists()
perm_lists = self.dataset.extract_perm_lists()
estimator = self.construct_estimator()
scores = cross_validate(estimator, api_lists, perm_lists, scoring=self.scoring, cv=10,
n_jobs=-1, verbose=1, return_train_score=False)
return scores
def main(sim_func="cosine"):
dataset = prepare_shuffled_dataset()
scoring = SCORING
executor = CBR(dataset, scoring, sim_func=sim_func)
scores = executor.run()
print(scores['test_MAP'].mean())
if __name__ == "__main__":
fire.Fire({
'main': main
})
| 36.836601 | 98 | 0.649929 | 703 | 5,636 | 4.958748 | 0.271693 | 0.036145 | 0.025244 | 0.008032 | 0.104131 | 0.057946 | 0.057946 | 0.057946 | 0.057946 | 0.057946 | 0 | 0.004802 | 0.261001 | 5,636 | 152 | 99 | 37.078947 | 0.832173 | 0.225515 | 0 | 0.023256 | 0 | 0 | 0.022228 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.127907 | false | 0 | 0.139535 | 0.034884 | 0.372093 | 0.011628 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e570191797fda76257f543ceca066c63d6087a58 | 1,410 | py | Python | examples/tournament/tournament.py | gavento/orco | 07e90bf87246f4577c8e3653b34474a69cc5338e | [
"MIT"
] | null | null | null | examples/tournament/tournament.py | gavento/orco | 07e90bf87246f4577c8e3653b34474a69cc5338e | [
"MIT"
] | null | null | null | examples/tournament/tournament.py | gavento/orco | 07e90bf87246f4577c8e3653b34474a69cc5338e | [
"MIT"
] | null | null | null | import itertools
import random
import orco
# Function that trains "players"
@orco.builder()
def train_player(config):
# We will simulate trained players by a dictionary with a "strength" key
return {"strength": random.randint(0, 10)}
# Build function for "games"
@orco.builder()
def play_game(config):
player1 = train_player(config["player1"])
player2 = train_player(config["player2"])
yield
# Simulation of playing a game between two players,
# They just throw k-sided dices, where k is trength of the player
# The difference of throw is the result
r1 = random.randint(0, player1.value["strength"] * 2)
r2 = random.randint(0, player2.value["strength"] * 2)
return r1 - r2
# Build function for a tournament, return score for each player
@orco.builder()
def play_tournament(config):
# For evaluating a tournament, we need to know the results of games between
# each pair of its players.
games = [
play_game({"player1": p1, "player2": p2})
for (p1, p2) in itertools.product(config["players"], config["players"])
]
yield
score = {}
for game in games:
player1 = game.config["player1"]
player2 = game.config["player2"]
score.setdefault(player1, 0)
score.setdefault(player2, 0)
score[player1] += game.value
score[player2] -= game.value
return score
orco.run_cli()
| 26.603774 | 79 | 0.664539 | 188 | 1,410 | 4.946809 | 0.388298 | 0.035484 | 0.045161 | 0.03871 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.030247 | 0.226241 | 1,410 | 52 | 80 | 27.115385 | 0.822181 | 0.313475 | 0 | 0.16129 | 0 | 0 | 0.083507 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.096774 | false | 0 | 0.096774 | 0.032258 | 0.290323 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e57024f83b49dd4e2de2007ee984d24ae347e3b1 | 2,546 | py | Python | scripts/injection_ROI_visualization.py | karimi-ali/brainrender | 04be6a05fdfdd22424c4c499f4563943436faf6f | [
"BSD-3-Clause"
] | null | null | null | scripts/injection_ROI_visualization.py | karimi-ali/brainrender | 04be6a05fdfdd22424c4c499f4563943436faf6f | [
"BSD-3-Clause"
] | null | null | null | scripts/injection_ROI_visualization.py | karimi-ali/brainrender | 04be6a05fdfdd22424c4c499f4563943436faf6f | [
"BSD-3-Clause"
] | null | null | null | import os
from pathlib import Path
import brainrender
from brainrender import Scene, actor, Animation
from rich import color, print
from myterial import orange
from vedo import Volume, io, load, show
import numpy as np
import pandas as pd
import util
# path names and roi names
paths = util.get_paths()
roi_names = util.roi_names()
print(f"[{orange}]Running example: {Path(__file__).name}")
# Create a brainrender scene
scene = Scene(title="Injection ROIs", atlas_name='allen_mouse_10um')
# injection site meshes
mesh_names = [os.path.join(paths['data'], 'meshes', f'{roi}.obj') for roi in roi_names]
meshes = [load(cur_name) for cur_name in mesh_names]
# overlapping atlas rois
csv_names_atlas = [os.path.join(paths['data'], 'csv_acronyms', f'{roi}.csv') for roi in roi_names]
csv_atlas_acronym = [pd.read_csv(name) for name in csv_names_atlas]
colors = ['#6DB546', '#C30017', '#9D9D9C']
alpha_rois = 0.6
for cur_idx, cur_mesh in enumerate(meshes):
# Create the injection site actors
cur_actor = actor.Actor(cur_mesh,
name=roi_names[cur_idx],
color=colors[cur_idx],
alpha=alpha_rois)
scene.add(cur_actor)
scene.add_silhouette(cur_actor)
# Overlapping atlas
cur_overlapping_acronyms = list(csv_atlas_acronym[cur_idx]["acronym_keepSingleChild"])
# scene.add_brain_region(*cur_overlapping_acronyms,
# alpha=0.2,
# color=colors[cur_idx],
# hemisphere='right')
# Render and save screen shots
screen_shot_dir = os.path.join(paths['data'], 'screen_shots_no_region')
os.makedirs(screen_shot_dir, exist_ok = True)
camera_names = list(brainrender.camera.cameras.keys())
zoom_vals = [2.0, 0.8, 1.0, 1.0, 1.0, 1.0]
for idx, c in enumerate(camera_names):
scene.render(camera=c, zoom=zoom_vals[idx], interactive=False)
scene.screenshot(name=os.path.join(screen_shot_dir, f'{c}_alpha_{alpha_rois}.png'))
# Animation
animate_flag = True
if animate_flag:
anim = Animation(scene, screen_shot_dir, "ROI_inj_animation",size="6480x4200")
# Specify camera position and zoom at some key frames
# each key frame defines the scene's state after n seconds have passed
anim.add_keyframe(0, camera="top", zoom=0.3)
anim.add_keyframe(5, camera="sagittal", zoom=1.0)
anim.add_keyframe(9, camera="frontal", zoom=1.0)
anim.add_keyframe(
10,
camera="frontal",
)
# Make videos
anim.make_video(duration=10, fps=10) | 36.371429 | 98 | 0.688138 | 376 | 2,546 | 4.460106 | 0.364362 | 0.028623 | 0.023852 | 0.026834 | 0.082886 | 0.029815 | 0 | 0 | 0 | 0 | 0 | 0.025892 | 0.195994 | 2,546 | 70 | 99 | 36.371429 | 0.793356 | 0.193637 | 0 | 0 | 0 | 0 | 0.131992 | 0.045142 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.217391 | 0 | 0.217391 | 0.043478 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e57240c96406c5bb3b032eec708032b091e297a7 | 7,664 | py | Python | scripts/wk/exe.py | 2Shirt/WizardK | 82a2e7f85c80a52f892c1553e7a45ec0174e7bc6 | [
"MIT"
] | null | null | null | scripts/wk/exe.py | 2Shirt/WizardK | 82a2e7f85c80a52f892c1553e7a45ec0174e7bc6 | [
"MIT"
] | 178 | 2017-11-17T19:14:31.000Z | 2021-12-15T07:43:29.000Z | scripts/wk/exe.py | 2Shirt/WizardK | 82a2e7f85c80a52f892c1553e7a45ec0174e7bc6 | [
"MIT"
] | 1 | 2017-11-17T19:32:36.000Z | 2017-11-17T19:32:36.000Z | """WizardKit: Execution functions"""
#vim: sts=2 sw=2 ts=2
import json
import logging
import os
import re
import subprocess
import time
from threading import Thread
from queue import Queue, Empty
import psutil
# STATIC VARIABLES
LOG = logging.getLogger(__name__)
# Classes
class NonBlockingStreamReader():
"""Class to allow non-blocking reads from a stream."""
# pylint: disable=too-few-public-methods
# Credits:
## https://gist.github.com/EyalAr/7915597
## https://stackoverflow.com/a/4896288
def __init__(self, stream):
self.stream = stream
self.queue = Queue()
def populate_queue(stream, queue):
"""Collect lines from stream and put them in queue."""
while not stream.closed:
try:
line = stream.read(1)
except ValueError:
# Assuming the stream was closed
line = None
if line:
queue.put(line)
self.thread = start_thread(
populate_queue,
args=(self.stream, self.queue),
)
def stop(self):
"""Stop reading from input stream."""
self.stream.close()
def read(self, timeout=None):
"""Read from queue if possible, returns item from queue."""
try:
return self.queue.get(block=timeout is not None, timeout=timeout)
except Empty:
return None
def save_to_file(self, proc, out_path):
"""Continuously save output to file while proc is running."""
LOG.debug('Saving process %s output to %s', proc, out_path)
while proc.poll() is None:
out = b''
out_bytes = b''
while out is not None:
out = self.read(0.1)
if out:
out_bytes += out
with open(out_path, 'a', encoding='utf-8') as _f:
_f.write(out_bytes.decode('utf-8', errors='ignore'))
# Close stream to prevent 100% CPU usage
self.stream.close()
# Functions
def build_cmd_kwargs(cmd, minimized=False, pipe=True, shell=False, **kwargs):
"""Build kwargs for use by subprocess functions, returns dict.
Specifically subprocess.run() and subprocess.Popen().
NOTE: If no encoding specified then UTF-8 will be used.
"""
LOG.debug(
'cmd: %s, minimized: %s, pipe: %s, shell: %s, kwargs: %s',
cmd, minimized, pipe, shell, kwargs,
)
cmd_kwargs = {
'args': cmd,
'shell': shell,
}
# Strip sudo if appropriate
if cmd[0] == 'sudo':
if os.name == 'posix' and os.geteuid() == 0: # pylint: disable=no-member
cmd.pop(0)
# Add additional kwargs if applicable
for key in 'check cwd encoding errors stderr stdin stdout'.split():
if key in kwargs:
cmd_kwargs[key] = kwargs[key]
# Default to UTF-8 encoding
if not ('encoding' in cmd_kwargs or 'errors' in cmd_kwargs):
cmd_kwargs['encoding'] = 'utf-8'
cmd_kwargs['errors'] = 'ignore'
# Start minimized
if minimized:
startupinfo = subprocess.STARTUPINFO()
startupinfo.dwFlags = subprocess.STARTF_USESHOWWINDOW
startupinfo.wShowWindow = 6
cmd_kwargs['startupinfo'] = startupinfo
# Pipe output
if pipe:
cmd_kwargs['stderr'] = subprocess.PIPE
cmd_kwargs['stdout'] = subprocess.PIPE
# Done
LOG.debug('cmd_kwargs: %s', cmd_kwargs)
return cmd_kwargs
def get_json_from_command(cmd, check=True, encoding='utf-8', errors='ignore'):
"""Capture JSON content from cmd output, returns dict.
If the data can't be decoded then either an exception is raised
or an empty dict is returned depending on errors.
"""
LOG.debug('Loading JSON data from cmd: %s', cmd)
json_data = {}
try:
proc = run_program(cmd, check=check, encoding=encoding, errors=errors)
json_data = json.loads(proc.stdout)
except (subprocess.CalledProcessError, json.decoder.JSONDecodeError):
if errors != 'ignore':
raise
return json_data
def get_procs(name, exact=True, try_again=True):
"""Get process object(s) based on name, returns list of proc objects."""
LOG.debug('name: %s, exact: %s', name, exact)
processes = []
regex = f'^{name}$' if exact else name
# Iterate over all processes
for proc in psutil.process_iter():
if re.search(regex, proc.name(), re.IGNORECASE):
processes.append(proc)
# Try again?
if not processes and try_again:
time.sleep(1)
processes = get_procs(name, exact, try_again=False)
# Done
return processes
def kill_procs(name, exact=True, force=False, timeout=30):
"""Kill all processes matching name (case-insensitively).
NOTE: Under Posix systems this will send SIGINT to allow processes
to gracefully exit.
If force is True then it will wait until timeout specified and then
send SIGKILL to any processes still alive.
"""
LOG.debug(
'name: %s, exact: %s, force: %s, timeout: %s',
name, exact, force, timeout,
)
target_procs = get_procs(name, exact=exact)
for proc in target_procs:
proc.terminate()
# Force kill if necesary
if force:
results = psutil.wait_procs(target_procs, timeout=timeout)
for proc in results[1]: # Alive processes
proc.kill()
def popen_program(cmd, minimized=False, pipe=False, shell=False, **kwargs):
"""Run program and return a subprocess.Popen object."""
LOG.debug(
'cmd: %s, minimized: %s, pipe: %s, shell: %s',
cmd, minimized, pipe, shell,
)
LOG.debug('kwargs: %s', kwargs)
cmd_kwargs = build_cmd_kwargs(
cmd,
minimized=minimized,
pipe=pipe,
shell=shell,
**kwargs)
try:
# pylint: disable=consider-using-with
proc = subprocess.Popen(**cmd_kwargs)
except FileNotFoundError:
LOG.error('Command not found: %s', cmd)
raise
LOG.debug('proc: %s', proc)
# Done
return proc
def run_program(cmd, check=True, pipe=True, shell=False, **kwargs):
# pylint: disable=subprocess-run-check
"""Run program and return a subprocess.CompletedProcess object."""
LOG.debug(
'cmd: %s, check: %s, pipe: %s, shell: %s',
cmd, check, pipe, shell,
)
LOG.debug('kwargs: %s', kwargs)
cmd_kwargs = build_cmd_kwargs(
cmd,
check=check,
pipe=pipe,
shell=shell,
**kwargs)
try:
proc = subprocess.run(**cmd_kwargs)
except FileNotFoundError:
LOG.error('Command not found: %s', cmd)
raise
LOG.debug('proc: %s', proc)
# Done
return proc
def start_thread(function, args=None, daemon=True):
"""Run function as thread in background, returns Thread object."""
LOG.debug(
'Starting background thread for function: %s, args: %s, daemon: %s',
function, args, daemon,
)
args = args if args else []
thread = Thread(target=function, args=args, daemon=daemon)
thread.start()
return thread
def stop_process(proc, graceful=True):
"""Stop process.
NOTES: proc should be a subprocess.Popen obj.
If graceful is True then a SIGTERM is sent before SIGKILL.
"""
# Graceful exit
if graceful:
if os.name == 'posix' and os.geteuid() != 0: # pylint: disable=no-member
run_program(['sudo', 'kill', str(proc.pid)], check=False)
else:
proc.terminate()
time.sleep(2)
# Force exit
if os.name == 'posix' and os.geteuid() != 0: # pylint: disable=no-member
run_program(['sudo', 'kill', '-9', str(proc.pid)], check=False)
else:
proc.kill()
def wait_for_procs(name, exact=True, timeout=None):
"""Wait for all process matching name."""
LOG.debug('name: %s, exact: %s, timeout: %s', name, exact, timeout)
target_procs = get_procs(name, exact=exact)
procs = psutil.wait_procs(target_procs, timeout=timeout)
# Raise exception if necessary
if procs[1]: # Alive processes
raise psutil.TimeoutExpired(name=name, seconds=timeout)
if __name__ == '__main__':
print("This file is not meant to be called directly.")
| 26.797203 | 78 | 0.662578 | 1,059 | 7,664 | 4.716714 | 0.245515 | 0.034234 | 0.016817 | 0.013614 | 0.226026 | 0.191391 | 0.153554 | 0.126326 | 0.11031 | 0.11031 | 0 | 0.006956 | 0.212161 | 7,664 | 285 | 79 | 26.891228 | 0.820305 | 0.255219 | 0 | 0.261628 | 0 | 0.005814 | 0.125022 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.081395 | false | 0 | 0.052326 | 0 | 0.186047 | 0.005814 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e5738670ee63aa457dda6f798c0a759c27cefdc5 | 7,815 | py | Python | src/libnbnotify/plugins/bus.py | webnull/nbnotify | 54f7d0db0656053680466537aeba35f348147830 | [
"Python-2.0",
"OLDAP-2.7"
] | 1 | 2015-12-03T06:41:23.000Z | 2015-12-03T06:41:23.000Z | src/libnbnotify/plugins/bus.py | webnull/nbnotify | 54f7d0db0656053680466537aeba35f348147830 | [
"Python-2.0",
"OLDAP-2.7"
] | 2 | 2019-03-02T08:02:34.000Z | 2019-03-02T08:02:47.000Z | src/libnbnotify/plugins/bus.py | webnull/nbnotify | 54f7d0db0656053680466537aeba35f348147830 | [
"Python-2.0",
"OLDAP-2.7"
] | null | null | null | #-*- coding: utf-8 -*-
import libnbnotify
import socket
import ssl
import json
import asyncore
import re
import sys
from threading import Thread
import string
import random
import os
import BaseHTTPServer, SimpleHTTPServer
PluginInfo = {'Requirements' : { 'OS' : 'All'}, 'API': 2, 'Authors': 'webnull', 'domain': '', 'type': 'extension', 'isPlugin': False, 'Description': 'Remote control throught sockets'}
app = ""
def id_generator(size=6, chars=string.ascii_uppercase + string.digits):
return ''.join(random.choice(chars) for x in range(size))
class SocketInterface(SimpleHTTPServer.SimpleHTTPRequestHandler):
""" Very simple socket interface """
def log_message(self, format, *args):
return False
def ping(self, data=''):
return "pong";
def getConfigAndEntries(self, data=''):
""" Returns all configuration variables and links """
return [self.app.configGetSection('links'), self.app.Config.Config]
def getAllEntries(self, data=''):
""" Returns all links from database """
return self.app.configGetSection('links')
def notifyNewData(self, data):
""" Create new notification from data """
content = data['data']
title = data['title']
icon = data['icon']
pageID = data['pageid']
self.app.notifyNewData(content, title, icon, pageID)
def configSetKey(self, data):
""" Set configuration key """
Section = data['section']
Option = data['option']
Value = data['value']
return self.app.configSetKey(Section, Option, Value)
def saveConfiguration(self, data=''):
""" Force save configuration to file """
return self.app.saveConfiguration()
def configGetSection(self, data):
""" Returns section as dictionary
Args:
Section - name of section of ini file ([section] header)
Returns:
Dictionary - on success
False - on false
"""
return self.app.configGetSection(data)
def configGetKey(self, data):
""" Returns value of Section->Value configuration variable
Args:
Section - name of section of ini file ([section] header)
Key - variable name
Returns:
False - when section or key does not exists
False - when value of variable is "false" or "False" or just False
string value - value of variable
"""
Section = data['section']
Key = data['key']
return self.app.configGetKey(Section, Key)
def addPage(self, link):
""" Add page to database, return True if added sucessfuly """
return self.app.addPage(link)
def setType(self, data):
""" Set specified extension to handle specified link
Return md5 hash of link on success
"""
Link = data['link']
Type = data['type']
return self.app.setType(Link, Type)
def removePage(self, pageID):
""" Remove page with specified pageID """
return self.app.removePage(pageID)
def loadCommentsFromDB(self, data=''):
""" Reload comments cache from SQLite database """
return self.app.loadCommentsFromDB()
def configCheckChanges(self, data=''):
""" Reload configuration if changed """
return self.app.configCheckChanges()
def togglePlugin(self, data):
""" Activate or deactivate plugin
Plugin - name of plugin
Toggle - True or False
"""
Plugin = data['name']
Toggle = data['toggle']
if Toggle == True:
return self.app.togglePlugin(Plugin, 'activate')
return self.app.togglePlugin(Plugin, 'deactivate')
def do_POST(self):
contentLen = int(self.headers.getheader('content-length'))
postBody = self.rfile.read(contentLen)
# response
self.send_response(200)
self.send_header("Content-type", "text/html")
self.end_headers()
self.wfile.write(self.handle_read(postBody))
def do_GET(self):
self.send_response(200)
self.send_header("Content-type", "text/html")
self.end_headers()
self.wfile.write("Hello world.")
def handle_read(self, data):
global app
self.app = app
if data:
if data == "ping":
return "pong"
try:
#if t == False:
# return "Error: Cannot parse HTTP request, "+str(t)+", "+str(jsonData)
if data == False:
return "Error: Cannot parse HTTP request, empty request, "+str(jsonData)
text = json.loads(data)
if text['function'] == "handle_read" or text['function'] == "__init__" or text['function'] == "httpRequestParser":
return "Error: Function not avaliable"
if hasattr(self, text['function']):
exec("r = str(self."+text['function']+"(text['data']))")
else:
r = "Error: Function not found"
self.app.Logging.output("Socket::GET="+str(text['function'])+"&addr="+str(self.client_address[0]), "debug", False)
# send response
return json.dumps({'response': r})
except Exception as e:
self.app.Logging.output("SubgetSocketInterface: Cannot parse json data, is the client bugged? "+str(e), "warning", True)
return "Error: "+str(e)
class SocketServer:
""" Very simple connections listener """
host = "127.0.0.1"
port = 9954
def __init__(self, host, port):
self.host = host
self.port = port
def serve(self):
httpd = BaseHTTPServer.HTTPServer((self.host, self.port), SocketInterface)
httpd.serve_forever()
class PluginMain(libnbnotify.Plugin):
name = "bus"
host = "127.0.0.1"
port = 9954
bus = ""
def _pluginInit(self):
#self.initSSL()
global app
app = self.app
self.host = str(self.app.Config.getKey("bus_socket", "host", "127.0.0.1"))
if self.app.Config.getKey("bus_socket", "port") == False:
self.app.Config.setKey("bus_socket", "port", 9954)
else:
try:
self.port = int(self.app.Config.getKey("bus_socket", "port"))
except ValueError:
self.port = 9954
self.app.Config.setKey("bus_socket", "port", 9954)
if self.app.cli == False:
self.startServer()
return True
else:
return False
#def initSSL(self):
# path = os.path.expanduser("~/.nbnotify/ssl")
# create ssl directory
# if not os.path.isdir(path):
# os.mkdir(path)
# if not os.path.isfile(path+"/private.pem"):
# passwd = id_generator(size=32)
# self.app.Logging.output("Cannot find SSL cert, creating new one...", "debug", True)
# os.system("openssl genrsa -out "+path+"/private.pem 1024")
# os.system("openssl rsa -in "+path+"/private.pem -pubout > "+path+"/public.pem")
def startServer(self):
try:
self.app.Logging.output("Socket server is running on "+str(self.host)+":"+str(self.port), "debug", False)
self.bus = SocketServer(self.host, self.port)
self.thread = Thread(target=self.bus.serve)
self.thread.setDaemon(True)
self.thread.start()
except Exception as e:
self.app.Logging.output("Only one instance of nbnotify is allowed, "+str(e), "debug", False)
sys.exit(0)
| 29.490566 | 183 | 0.572105 | 863 | 7,815 | 5.144844 | 0.290846 | 0.044144 | 0.038063 | 0.022523 | 0.179279 | 0.136036 | 0.12973 | 0.09009 | 0.056757 | 0.056757 | 0 | 0.010277 | 0.302751 | 7,815 | 264 | 184 | 29.602273 | 0.804551 | 0.209853 | 0 | 0.191176 | 0 | 0 | 0.134382 | 0.003761 | 0 | 0 | 0 | 0 | 0 | 1 | 0.169118 | false | 0 | 0.088235 | 0.022059 | 0.492647 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e574327e0656040a8425a4febb6293a932d17cd0 | 3,738 | py | Python | main.py | Sujay-Paul/Clara-Music-Bot-Telegram | deb4623185e2b6d09f55e65c4e738c49e22ee1dc | [
"MIT"
] | 1 | 2022-01-11T16:43:57.000Z | 2022-01-11T16:43:57.000Z | main.py | Sujay-Paul/Clara-Music-Bot-Telegram | deb4623185e2b6d09f55e65c4e738c49e22ee1dc | [
"MIT"
] | 1 | 2021-10-01T17:01:48.000Z | 2021-10-01T17:01:48.000Z | main.py | Sujay-Paul/Clara-Music-Bot-Telegram | deb4623185e2b6d09f55e65c4e738c49e22ee1dc | [
"MIT"
] | 1 | 2021-10-01T16:59:49.000Z | 2021-10-01T16:59:49.000Z | from pyrogram import Client, filters
from pyrogram.types import (
InlineKeyboardButton,
InlineKeyboardMarkup
)
import youtube_dl
from youtube_search import YoutubeSearch
import requests
import json
import os
with open('./config.json', 'r') as config:
data = json.load(config)
bot_token = data['token']
api_id = data['api_id']
api_hash = data['api_hash']
bot = Client(
'Clara',
bot_token = bot_token,
api_id = api_id,
api_hash = api_hash
)
# Convert hh:mm:ss to seconds
def time_to_seconds(time):
stringt = str(time)
return sum(int(x) * 60 ** i for i, x in enumerate(reversed(stringt.split(':'))))
@bot.on_message(filters.command(['start']))
def start(client, message):
help_text = f'👋 Hello @{message.from_user.username}\n I\'m Clara, developed by Shambo, I can download songs from YouTube. Type /a song name\n e.g - `/a tokyo drift`'
message.reply_text(
text=help_text,
quote=False,
reply_markup=InlineKeyboardMarkup(
[
[
InlineKeyboardButton('Github', url='https://github.com/typhonshambo'),
]
]
)
)
@bot.on_message(filters.command(['a']))
def a(client, message):
query = ''
for i in message.command[1:]:
query += ' ' + str(i)
print(query)
m = message.reply('🔎 Searching the song...')
ydl_opts = {"format": "bestaudio[ext=m4a]"}
try:
results = []
count = 0
while len(results) == 0 and count < 6:
if count>0:
os.times.sleep(1)
results = YoutubeSearch(query, max_results=1).to_dict()
count += 1
# results = YoutubeSearch(query, max_results=1).to_dict()
try:
link = f"https://youtube.com{results[0]['url_suffix']}"
# print(results)
title = results[0]["title"]
thumbnail = results[0]["thumbnails"][0]
duration = results[0]["duration"]
## UNCOMMENT THIS IF YOU WANT A LIMIT ON DURATION. CHANGE 1800 TO YOUR OWN PREFFERED DURATION AND EDIT THE MESSAGE (30 minutes cap) LIMIT IN SECONDS
if time_to_seconds(duration) >= 1800: # duration limit
m.edit("Exceeded video duration limit : 30 mins")
return
views = results[0]["views"]
thumb_name = f'thumb{message.message_id}.jpg'
thumb = requests.get(thumbnail, allow_redirects=True)
open(thumb_name, 'wb').write(thumb.content)
except Exception as e:
print(e)
m.edit('Found nothing. Try changing the spelling a little.')
return
except Exception as e:
m.edit(
"✖️ Found Nothing. Sorry.\n\nTry another keywork or maybe spell it properly."
)
print(str(e))
return
m.edit("⏬ Downloading.")
try:
with youtube_dl.YoutubeDL(ydl_opts) as ydl:
info_dict = ydl.extract_info(link, download=False)
audio_file = ydl.prepare_filename(info_dict)
ydl.process_info(info_dict)
rep = f'🎧 **Title**: [{title[:35]}]({link})\n⏳ **Duration**: `{duration}`\n👁🗨 **Views**: `{views}`'
secmul, dur, dur_arr = 1, 0, duration.split(':')
for i in range(len(dur_arr)-1, -1, -1):
dur += (int(dur_arr[i]) * secmul)
secmul *= 60
message.reply_audio(audio_file, caption=rep, parse_mode='md',quote=False, title=title, duration=dur, thumb=thumb_name)
m.delete()
except Exception as e:
m.edit('❌ Error')
print(e)
try:
os.remove(audio_file)
os.remove(thumb_name)
except Exception as e:
print(e)
bot.run()
| 31.948718 | 169 | 0.581594 | 480 | 3,738 | 4.44375 | 0.391667 | 0.022504 | 0.03188 | 0.033755 | 0.108767 | 0.084388 | 0.040319 | 0.040319 | 0.040319 | 0 | 0 | 0.014667 | 0.288657 | 3,738 | 116 | 170 | 32.224138 | 0.783377 | 0.069288 | 0 | 0.142857 | 0 | 0 | 0.15985 | 0.024482 | 0 | 0 | 0 | 0 | 0 | 1 | 0.030612 | false | 0 | 0.071429 | 0 | 0.142857 | 0.05102 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e575ae4d7ce1c8acba084bc5319b860ff622e79d | 12,371 | py | Python | main.py | dzinghan/Bouncing-Ball-Simulation | b476af0df40cdd76a9d1256d95de1393748e9edc | [
"MIT"
] | null | null | null | main.py | dzinghan/Bouncing-Ball-Simulation | b476af0df40cdd76a9d1256d95de1393748e9edc | [
"MIT"
] | null | null | null | main.py | dzinghan/Bouncing-Ball-Simulation | b476af0df40cdd76a9d1256d95de1393748e9edc | [
"MIT"
] | null | null | null | '''
Bouncing Ball Simulation
This is an implementation of a bouncing ball simulation using mainly the Tkinter library in Python.
It includes physics and mechanics-related concepts such as gravity, air resistance, and collision.
Before the start of the simulation, the program prompts the user to enter a value for gravity and
air density. If you do not want to enter a value, please click on cancel or the window's exit button
and the default value is going to be applied (9.8 m/s^2 for gravity and 1.225 km/m^3 for air resistance).
If a vacuum setting is preferred, please enter 0 for both windows.
by Jing Han Sun
Updated September 21, 2020
'''
import tkinter as tk
from tkinter import simpledialog
import random
import math
import sys
class Visual(tk.Tk):
'''This is the main class the will run the simulation'''
#define width and height for window
HEIGHT = 500
WIDTH = 500
#define a list of colors for the balls
colors = ['#FF4325', '#E72020', #red
'#FF9333', #orange
'#FEFA5F', #yellow
'#89F45E', '#9DFFA7', '#278A2A', #green
'#6A8EFF', '#A8E5F9', '#1FFBF8', '#3253F4', '#2A438B', #blue
'#67419E', '#C280FF', '#E12FE1', '#F1BFFC', #purple
'#FCBFE9', '#FC22A0' #pink
]
def __init__(self, argv):
super().__init__()
#create canvas
self.canvas = tk.Canvas(self, width = self.WIDTH, height = self.HEIGHT, bg = 'white')
self.canvas.pack()
self.update()
#window title
self.title('Bouncing Balls')
#add label
self.label = tk.Label(self, text = 'Welcome!')
self.label.pack()
#add quit button
self.button = tk.Button(self, text = "Quit", fg = 'red', command = self.quit())
self.button.configure(width = 10, activebackground = "#33B5E5", relief = tk.FLAT)
#self.button_window = self.canvas.create_window(10, 10, anchor = tk.NW , window = self.button)
self.button.pack()
self.update()
#create dictionary to store info about circles (radius, dir_x, dir_y)
self.circles_id = {}
# ask the user to enter a value for gravity
gravity = simpledialog.askfloat("Input", "Please enter a value for gravity (e.g.: 9.8)")
if gravity is None:
# use Earth's gravitational constant if no value is entered
gravity = 9.8
air_density = simpledialog.askfloat("Input", "Please enter a value for air density (e.g.: 1.225)")
if air_density is None:
# use the air density at STP if no value is entered
air_density = 1.225
for i in range(6):
#set up a random radius
radius = random.randint(20, 30)
#set up a random initial center for each circle
cx = random.randint(radius + 10, self.WIDTH - radius - 10)
cy = random.randint(radius + 10, self.HEIGHT - radius - 10)
#set up a random initial direction for each circle, from 1 to 360 degrees
dir_x = random.randint(-10, 10)
dir_y = random.randint(-10, 10)
#create the circle
ids = self.canvas.create_oval(cx - radius, cy - radius,
cx + radius, cy + radius,
fill = random.choice(self.colors), outline = 'black')
#fill each list for each ball's characteristics
#circles_id = {ids: [radius, dir_x, dir_y]}
self.circles_id[ids] = [radius, dir_x, dir_y]
#boolean that returns true if 2 balls overlap
self.overlaps = False
#actual animation
while True:
self.move_circles()
#if it hits a wall
self.bounce()
self.collision()
self.gravity(gravity)
self.air_resistance(air_density)
def center(self, circle):
'''Get the center coordinates of a given ball'''
x0, y0, x1, y1 = self.canvas.coords(circle)
x = (x0 + x1) / 2
y = (y0 + y1) / 2
return x, y
def distance(self, circle1, circle2):
'''Get the distance between the center of 2 given balls'''
x1, y1 = self.center(circle1)
x2, y2 = self.center(circle2)
return math.sqrt((x2 - x1) ** 2 + (y2 - y1) ** 2)
def theta(self, x, y):
'''Get the angle in radians (between 0 and 2pi) of a ball's movement using its x and y directions'''
#first and fourth quadrant
if x > 0:
if y > 0:
return math.atan(y / x)
else:
return math.atan(y / x) + 2 * math.pi
#second and third quadrant
elif x < 0:
return math.atan(y / x) + math.pi
# x = 0 is undefined for arctan
else:
if y > 0:
return math.pi/2
else:
return 3 * math.pi/2
def overlap(self):
'''Return True if 2 balls overlap in the canvas'''
for circle1 in self.circles_id:
for circle2 in self.circles_id:
if circle1 != circle2 and \
self.distance(circle1, circle2) <= \
(self.circles_id.get(circle1)[0] + self.circles_id.get(circle2)[0]):
self.overlaps = True
return self.overlaps
def move_circles(self):
'''Movement of the balls in the frame using the generated direction for each ball'''
for i in self.circles_id:
dir_x = self.circles_id.get(i)[1]
dir_y = self.circles_id.get(i)[2]
self.canvas.move(i, dir_x, dir_y)
self.canvas.update()
def bounce(self):
'''When a ball hits one of the 4 borders of the window, it bounces off according to their initial hit angle'''
# x and y directions for a given ball
for i in self.circles_id:
dir_x = self.circles_id.get(i)[1]
dir_y = self.circles_id.get(i)[2]
#retrieve the initial coordinates of the ball
x0, y0, x1, y1 = self.canvas.coords(i)
#if it hits the left or right wall, reverse the x direction
if x0 <= 10 or x1 >= self.WIDTH - 10:
dir_x = -dir_x
# update the x direction in the direction list to continue moving
self.circles_id.get(i)[1] = dir_x
#while x0 <= 0 or x1 >= self.SIZE:
self.canvas.move(i, dir_x, dir_y)
self.canvas.update()
#if it hits the top or bottom wall, reverse the y direction
if y0 <= 10 or y1 >= self.HEIGHT - 10:
dir_y = -dir_y
#update the y direction in the direction list to continue moving
self.circles_id.get(i)[2] = dir_y
#while y0 <= 0 or y1 >= self.SIZE:
self.canvas.move(i, dir_x, dir_y)
self.canvas.update()
def collision(self):
'''Check for collisions between 2 balls in the canvas. When 2 balls collide, they will bounce away as an elastic
collision while conserving their momentum within the system involved'''
for circle1 in self.circles_id:
for circle2 in self.circles_id:
#check if the distance between 2 distinct balls is smaller than the sum of their radius
#if yes, it means collision
#give a bit of space for collision to avoid bug when overlapping
if -12 < self.distance(circle1, circle2) - \
(self.circles_id.get(circle1)[0] + self.circles_id.get(circle2)[0]) <= 0\
and circle1 != circle2:
#define initial x and y directions
x1 = self.circles_id.get(circle1)[1]
y1 = self.circles_id.get(circle1)[2]
x2 = self.circles_id.get(circle2)[1]
y2 = self.circles_id.get(circle2)[2]
#assume each ball weighs its radius squared with density pi^-1
m1 = (self.circles_id.get(circle1)[0]) ** 2
m2 = (self.circles_id.get(circle2)[0]) ** 2
#define initial speeds using the x and y directions
v1 = math.sqrt(x1 ** 2 + y1 ** 2)
v2 = math.sqrt(x2 ** 2 + y2 ** 2)
#define initial movement angles
theta1 = self.theta(x1, y1)
theta2 = self.theta(x2, y2)
#define the contact angle of the balls right before collision
phi = theta2 - theta1
# pi = pf (conservation of momentum)
#calculate the final x and y velocities after the collision
#source for the formula: https://en.wikipedia.org/wiki/Elastic_collision
x1 = ((v1 * math.cos(theta1 - phi) * (m1 - m2)) + 2 * m2 * v2 * math.cos(theta2 - phi)) \
* (math.cos(phi) / (m1 + m2)) + v1 * math.sin(theta1 - phi) * math.cos(phi + math.pi/2)
y1 = ((v1 * math.cos(theta1 - phi) * (m1 - m2)) + 2 * m2 * v2 * math.cos(theta2 - phi)) \
* (math.sin(phi) / (m1 + m2)) + v1 * math.sin(theta1 - phi) * math.sin(phi + math.pi/2)
x2 = ((v2 * math.cos(theta2 - phi) * (m2 - m1)) + 2 * m1 * v1 * math.cos(theta1 - phi)) \
* (math.cos(phi) / (m1 + m2)) + v2 * math.sin(theta2 - phi) * math.cos(phi + math.pi/2)
y2 = ((v2 * math.cos(theta2 - phi) * (m2 - m1)) + 2 * m1 * v1 * math.cos(theta1 - phi)) \
* (math.sin(phi) / (m1 + m2)) + v2 * math.sin(theta2 - phi) * math.sin(phi + math.pi/2)
#update the circles dictionary to make them continue moving after the collision
self.circles_id.get(circle1)[1] = x1
self.circles_id.get(circle1)[2] = y1
self.circles_id.get(circle2)[1] = x2
self.circles_id.get(circle2)[2] = y2
self.canvas.move(circle1, x1, y1)
self.canvas.move(circle2, x2, y2)
self.canvas.update()
#avoid pushing the ball out of the canvas when the collision happens near the canvas border
self.bounce()
def gravity(self, a):
'''Adds some gravity to the balls which attracts them to the ground'''
for i in self.circles_id:
vy = self.circles_id.get(i)[2]
#kinematic equation: (vf = vi + a * t) to apply the acceleration to the velocity
vy = vy + a / 5
#update the y velocity after applying gravity
self.circles_id.get(i)[2] = vy
# avoid pushing the ball out of the canvas when the collision happens near the canvas border
self.bounce()
def air_resistance(self, air_density):
'''Adds some air resistance to the balls which attracts them to the ground'''
for i in self.circles_id:
vx = self.circles_id.get(i)[1]
vy = self.circles_id.get(i)[2]
m = (self.circles_id.get(i)[0]) ** 2 / 1000
cd = 1.05 #drag coefficient of a cube
area = (self.circles_id.get(i)[0] / 1000) ** 2 * math.pi
#calculate the air resistance
#source for the formula: https://www.softschools.com/formulas/physics/air_resistance_formula/85/
fx = (air_density * cd * area * vx ** 2) / 2
fy = (air_density * cd * area * vy ** 2) / 2
#calculate the acceleration
ax = fx / m
ay = fy / m
# kinematic equation: (vf = vi + a * t) to apply the acceleration to the velocity
vx = vx + ax / 5
vy = vy + ay / 5
# update the y velocity after applying gravity
self.circles_id.get(i)[1] = vx
self.circles_id.get(i)[2] = vy
# avoid pushing the ball out of the canvas when the collision happens near the canvas border
self.bounce()
def drag(self):
self.canvas.bind('<B1-Motion>', self.move_circles())
if __name__ == '__main__':
Visual(sys.argv[1:]).mainloop()
| 38.780564 | 120 | 0.547167 | 1,660 | 12,371 | 4.019277 | 0.219277 | 0.052608 | 0.074041 | 0.067146 | 0.384742 | 0.346673 | 0.293615 | 0.279227 | 0.235911 | 0.218525 | 0 | 0.045098 | 0.351144 | 12,371 | 318 | 121 | 38.902516 | 0.786097 | 0.325681 | 0 | 0.209877 | 0 | 0 | 0.035906 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.067901 | false | 0 | 0.030864 | 0 | 0.17284 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e575dbc2852de3f51f4fc99d4e2297f4d5034e48 | 1,935 | py | Python | examples/validation/core/06_vuetify_components.py | Kitware/trame | 41c4d62e7a6f5dba41fd9305b314c87fa8ed7b6f | [
"Apache-2.0"
] | 42 | 2021-09-24T22:10:32.000Z | 2022-03-30T19:39:25.000Z | examples/validation/core/06_vuetify_components.py | Kitware/trame | 41c4d62e7a6f5dba41fd9305b314c87fa8ed7b6f | [
"Apache-2.0"
] | 31 | 2021-10-01T21:19:56.000Z | 2022-03-04T00:14:28.000Z | examples/validation/core/06_vuetify_components.py | Kitware/trame | 41c4d62e7a6f5dba41fd9305b314c87fa8ed7b6f | [
"Apache-2.0"
] | 7 | 2021-11-17T16:12:06.000Z | 2022-03-26T21:08:40.000Z | from trame.app import get_server
from trame.widgets import vtk, trame, vuetify
from trame.ui.vuetify import SinglePageLayout
# -----------------------------------------------------------------------------
# Trame setup
# -----------------------------------------------------------------------------
server = get_server()
state, ctrl = server.state, server.controller
def reset_resolution():
state.resolution = 6
# -----------------------------------------------------------------------------
# UI setup
# -----------------------------------------------------------------------------
layout = SinglePageLayout(server)
with layout:
# Validate client life cycle
trame.LifeCycleMonitor(events=("['created']",))
layout.icon.click = ctrl.reset_camera
layout.title.set_text("Cone")
layout.toolbar.dense = True
# Toolbar
with layout.toolbar as toolbar:
vuetify.VSpacer()
vuetify.VSlider(
hide_details=True,
v_model=("resolution", 6),
max=60,
min=3,
step=1,
style="max-width: 300px;",
)
vuetify.VSwitch(
hide_details=True,
v_model=("$vuetify.theme.dark",),
)
with vuetify.VBtn(icon=True, click=reset_resolution):
vuetify.VIcon("mdi-undo")
with layout.content:
with vuetify.VContainer(fluid=True, classes="pa-0 fill-height"):
with vtk.VtkView() as view:
ctrl.reset_camera = view.reset_camera
with vtk.VtkGeometryRepresentation():
vtk.VtkAlgorithm(
vtkClass="vtkConeSource", state=("{ resolution }",)
)
# -----------------------------------------------------------------------------
# start server
# -----------------------------------------------------------------------------
if __name__ == "__main__":
server.start()
| 29.769231 | 79 | 0.447028 | 155 | 1,935 | 5.451613 | 0.509677 | 0.031953 | 0.035503 | 0.03787 | 0.049704 | 0 | 0 | 0 | 0 | 0 | 0 | 0.006702 | 0.228941 | 1,935 | 64 | 80 | 30.234375 | 0.559651 | 0.277003 | 0 | 0.051282 | 0 | 0 | 0.086518 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.025641 | false | 0 | 0.076923 | 0 | 0.102564 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e5770a81b9b82cc0a7f14946858d355e97381b6c | 752 | py | Python | migrations/versions/4d998c6ec630_nobadges.py | Togohogo1/tag-dh | e6903a87b8e491d84d3dcee02912238e6a3cabbe | [
"MIT"
] | 4 | 2020-05-05T01:36:54.000Z | 2021-03-13T21:05:47.000Z | migrations/versions/4d998c6ec630_nobadges.py | Togohogo1/tag-dh | e6903a87b8e491d84d3dcee02912238e6a3cabbe | [
"MIT"
] | 1 | 2020-05-23T05:48:18.000Z | 2020-05-23T05:48:18.000Z | migrations/versions/4d998c6ec630_nobadges.py | Togohogo1/tag-dh | e6903a87b8e491d84d3dcee02912238e6a3cabbe | [
"MIT"
] | 1 | 2020-05-23T05:41:24.000Z | 2020-05-23T05:41:24.000Z | """nobadges
Revision ID: 4d998c6ec630
Revises: 7950a35f5dbd
Create Date: 2020-05-04 11:55:22.475532
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '4d998c6ec630'
down_revision = '7950a35f5dbd'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
with op.batch_alter_table('account') as batch_op:
batch_op.drop_column('badges')
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
with op.batch_alter_table('account') as batch_op:
batch_op.add_column(sa.Column('badges', sa.TEXT(), nullable=True))
# ### end Alembic commands ###
| 24.258065 | 74 | 0.695479 | 97 | 752 | 5.257732 | 0.536082 | 0.054902 | 0.082353 | 0.090196 | 0.337255 | 0.337255 | 0.337255 | 0.337255 | 0.337255 | 0.337255 | 0 | 0.081037 | 0.179521 | 752 | 30 | 75 | 25.066667 | 0.745543 | 0.385638 | 0 | 0.166667 | 0 | 0 | 0.117647 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.166667 | false | 0 | 0.166667 | 0 | 0.333333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e579720eabdbde95c5e282c52355449ab7cbf297 | 325 | bzl | Python | library.bzl | tintor/mono | 396edd39e45f536cac91b1fa6524f019244e4549 | [
"Apache-2.0"
] | 1 | 2020-09-27T05:07:20.000Z | 2020-09-27T05:07:20.000Z | library.bzl | tintor/mono | 396edd39e45f536cac91b1fa6524f019244e4549 | [
"Apache-2.0"
] | null | null | null | library.bzl | tintor/mono | 396edd39e45f536cac91b1fa6524f019244e4549 | [
"Apache-2.0"
] | null | null | null | def library(name, hdrs=[], srcs=[], deps=[], test_deps=[]):
native.cc_library(
name = name,
hdrs = [name + ".h"] + hdrs,
srcs = srcs,
deps = deps,
)
native.cc_test(
name = name + "_test",
srcs = [name + "_test.cc"],
deps = test_deps + [":" + name, "//:catch"],
args = ["-d=yes"],
)
| 21.666667 | 59 | 0.489231 | 39 | 325 | 3.923077 | 0.358974 | 0.143791 | 0.156863 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.276923 | 325 | 14 | 60 | 23.214286 | 0.651064 | 0 | 0 | 0 | 0 | 0 | 0.092308 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.076923 | false | 0 | 0 | 0 | 0.076923 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e58014cfc6228afdc63430dc2ea3095af62a76a8 | 760 | py | Python | Dataset/Leetcode/test/53/155.py | kkcookies99/UAST | fff81885aa07901786141a71e5600a08d7cb4868 | [
"MIT"
] | null | null | null | Dataset/Leetcode/test/53/155.py | kkcookies99/UAST | fff81885aa07901786141a71e5600a08d7cb4868 | [
"MIT"
] | null | null | null | Dataset/Leetcode/test/53/155.py | kkcookies99/UAST | fff81885aa07901786141a71e5600a08d7cb4868 | [
"MIT"
] | null | null | null | class Solution(object):
def XXX(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
def maxSub(arr,lo,hi):
if lo == hi:return arr[lo]
mid = (lo+hi) // 2
# 左最大
left = maxSub(arr,lo,mid)
# 右最大
right = maxSub(arr,mid+1,hi)
# 中间最大
leftMid,rightMid = float("-inf"),float("-inf")
tempL,tempR = 0,0
for i in range(mid,lo-1,-1):
tempL += arr[i]
leftMid = max(leftMid,tempL)
for i in range(mid+1,hi+1):
tempR += arr[i]
rightMid = max(rightMid,tempR)
return max(left,right,leftMid+rightMid)
return maxSub(nums,0,len(nums)-1)
| 28.148148 | 56 | 0.455263 | 95 | 760 | 3.642105 | 0.4 | 0.078035 | 0.063584 | 0.063584 | 0.080925 | 0 | 0 | 0 | 0 | 0 | 0 | 0.022124 | 0.405263 | 760 | 26 | 57 | 29.230769 | 0.743363 | 0.061842 | 0 | 0 | 0 | 0 | 0.011799 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.117647 | false | 0 | 0 | 0 | 0.294118 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e58484894c5f43692fe7341a176dcb84807da1d8 | 467 | py | Python | asab/proactor/__init__.py | TeskaLabs/asab | f28894b62bad192d8d30df01a8ad1b842ee2a2fb | [
"BSD-3-Clause"
] | 23 | 2018-03-07T18:58:13.000Z | 2022-03-29T17:11:47.000Z | asab/proactor/__init__.py | TeskaLabs/asab | f28894b62bad192d8d30df01a8ad1b842ee2a2fb | [
"BSD-3-Clause"
] | 87 | 2018-04-04T19:44:13.000Z | 2022-03-31T11:18:00.000Z | asab/proactor/__init__.py | TeskaLabs/asab | f28894b62bad192d8d30df01a8ad1b842ee2a2fb | [
"BSD-3-Clause"
] | 10 | 2018-04-30T16:40:25.000Z | 2022-03-09T10:55:24.000Z | import logging
import asab
from .service import ProactorService
#
L = logging.getLogger(__name__)
#
asab.Config.add_defaults(
{
'asab:proactor': {
'max_workers': '0',
'default_executor': True,
}
}
)
class Module(asab.Module):
'''
Proactor pattern based on loop.run_in_executor()
https://en.wikipedia.org/wiki/Proactor_pattern
'''
def __init__(self, app):
super().__init__(app)
self.service = ProactorService(app, "asab.ProactorService")
| 14.59375 | 61 | 0.704497 | 56 | 467 | 5.553571 | 0.660714 | 0.096463 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.002532 | 0.154176 | 467 | 31 | 62 | 15.064516 | 0.78481 | 0.205567 | 0 | 0 | 0 | 0 | 0.170391 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.0625 | false | 0 | 0.1875 | 0 | 0.3125 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e5851888f1c433217aef830313dc07ac613ce867 | 13,266 | py | Python | mywebsite/shop/views.py | Zadigo/ecommerce_template | a4572c3faeaeb9cd399351c0fd1f19a4ef94de27 | [
"MIT"
] | 16 | 2020-07-01T03:42:40.000Z | 2022-02-21T21:02:27.000Z | mywebsite/shop/views.py | Zadigo/ecommerce_template | a4572c3faeaeb9cd399351c0fd1f19a4ef94de27 | [
"MIT"
] | 14 | 2020-11-19T18:55:28.000Z | 2022-02-01T22:08:23.000Z | mywebsite/shop/views.py | Zadigo/ecommerce_template | a4572c3faeaeb9cd399351c0fd1f19a4ef94de27 | [
"MIT"
] | 7 | 2020-06-30T23:55:36.000Z | 2021-11-12T00:06:40.000Z | """
Conversion Tunnel
------
checkout > shipment > payment > success
Payment process
-------
1. On submitting the form, an AJAX request is
done using Stripe in order to get the token
2. An intermediate view is used afterwards to
process the payment ofn the backend side
3. If the payment was successful, a redirect is
done to the SuccessView
"""
import json
import random
from cart import models as cart_models
from django.contrib import messages
from django.contrib.auth.mixins import LoginRequiredMixin
from django.core import cache, paginator
from django.db import transaction
from django.db.models.aggregates import Avg
from django.http.response import Http404, HttpResponseForbidden, JsonResponse
from django.shortcuts import get_object_or_404, redirect, render, reverse
from django.utils.decorators import method_decorator
from django.utils.translation import gettext
from django.utils.translation import gettext_lazy as _
from django.views.decorators.cache import cache_page, never_cache
from django.views.decorators.csrf import csrf_exempt
from django.views.decorators.http import require_POST
from django.views.generic import DetailView, ListView, TemplateView, View
from shop import models, serializers, sizes, tasks, utilities
def create_vue_products(queryset):
items = []
for product in queryset:
images = product.images
variant = product.variant
base = {
'id': product.id,
'reference': product.reference,
'url': product.get_absolute_url(),
'collection': {
'name': product.collection.name
},
'name': product.name,
'price': str(product.get_price()),
'main_image': product.get_main_image_url,
'images': list(images.values('id', 'name', 'url', 'web_url', 'variant', 'main_image')),
'variant': list(variant.values('id', 'name', 'verbose_name', 'in_stock', 'active')),
'in_stock': product.in_stock,
'our_favorite': product.our_favorite,
'is_discounted': product.is_discounted,
'price_pre_tax': str(product.price_pre_tax),
'discounted_price': str(product.discounted_price),
'slug': product.slug
}
items.append(base)
return items
@method_decorator(cache_page(60 * 30), name='dispatch')
class IndexView(View):
"""Base view for the website's shop"""
def get(self, request, *args, **kwargs):
return render(request, 'pages/shop.html')
@method_decorator(cache_page(60 * 15), name='dispatch')
class ShopGenderView(View):
"""Base view for discovering the website's shop
by category e.g. gender
"""
def get(self, request, *args, **kwargs):
context = {}
gender = kwargs.get('gender')
collections = models.Collection.objects.filter(
gender=gender.title()
)
if collections.exists():
context = {'collections': collections[:3]}
return render(request, 'pages/shop_gender.html', context)
class ProductsView(ListView):
"""Main product's page"""
model = models.Collection
template_name = 'pages/collections.html'
context_object_name = 'products'
paginate_by = 12
ordering = '-created_on'
def get_queryset(self, **kwargs):
view_name = self.kwargs.get('collection')
try:
collection = self.model.objects.get(
view_name__exact=view_name
)
except:
raise Http404("La collection n'existe pas")
else:
queryset = collection.product_set.filter(
active=True, private=False
)
category = self.request.GET.get('category', None)
if category is None:
return queryset
authorized_categories = ['all', 'promos', 'favorites']
if category in authorized_categories:
if category == 'all':
return queryset
elif category == 'promos':
return queryset.filter(discounted=True)
elif category == 'favorites':
return queryset.filter(our_favorite=True)
else:
return queryset
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
products = self.get_queryset(**kwargs)
# Set a specific pagination number to
# active depending on which page we are
context['current_active_page'] = self.request.GET.get('page', 1)
klass = super().get_paginator(products, self.paginate_by)
# serialized_products = serializers.ProductSerializer(
# instance=klass.object_list,
# many=True
# )
# context['vue_products'] = serialized_products.data
# When passing to another category, the previous
# products are still in the cache which creates
# an issue
category = self.request.GET.get('category')
# if category is not None:
# cache.cache.delete('vue_products')
# Specific technique in order to include the
# product url, main_image url and images
# vue_products = cache.cache.get('vue_products', None)
vue_products = create_vue_products(klass.object_list)
# if vue_products is None:
# cache.cache.set('vue_products', vue_products, timeout=1200)
context['vue_products'] = json.dumps(vue_products)
collection = self.model.objects.get(
view_name__exact=self.kwargs.get('collection'),
gender=self.kwargs.get('gender').title()
)
context['collection'] = collection
return context
@method_decorator(cache_page(60 * 15), name='dispatch')
class ProductView(DetailView):
"""View the details of a given product"""
model = models.Product
template_name = 'pages/product.html'
context_object_name = 'product'
def post(self, request, **kwargs):
data = {'state': False}
product = super().get_object()
# TODO: Add a method function that prevent
# triggering the rest of the method with
# any kinds of post requests
cart = cart_models.Cart.cart_manager.add_to_cart(request, product)
if cart:
data.update({'state': True})
else:
messages.error(
request,
"Une erreur s'est produite - ADD-CA",
extra_tags='alert-danger'
)
return JsonResponse(data=data)
def get_queryset(self, **kwargs):
queryset = self.model.objects.all()
return queryset
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
product = super().get_object()
serialized_product = serializers.ProductSerializer(instance=product)
context['vue_product'] = serialized_product.data
suggested_products = self.model.objects\
.prefetch_related('images') \
.filter(active=True).exclude(id=product.id)[:3]
context['more'] = suggested_products
context['has_liked'] = False
if self.request.user.is_authenticated:
likes = models.Like.objects.filter(
product=product, user=self.request.user
)
if likes.exists():
context.update({'has_liked': True})
reviews = product.review_set.all()
context['reviews'] = reviews
context['reviews_avg'] = reviews.aggregate(Avg('rating'))
return context
@method_decorator(never_cache, name='dispatch')
class PreviewProductView(LoginRequiredMixin, DetailView):
"""
This is a custom view for previewing a product
in the semi-original context of the main product page
"""
model = models.Product
queryset = models.Product.objects.all()
template_name = 'pages/preview.html'
context_object_name = 'product'
http_method_names = ['get']
def get(self, request, *args, **kwargs):
content = super().get(request, *args, **kwargs)
if not request.user.is_admin:
return HttpResponseForbidden('You are not authorized on this page')
return content
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
product = super().get_object()
serialized_product = serializers.ProductSerializer(instance=product)
context['vue_product'] = serialized_product.data
return context
@method_decorator(cache_page(60 * 30), name='dispatch')
@method_decorator(csrf_exempt, name='dispatch')
class PrivateProductView(DetailView):
"""
This is a special custom viewing a product in a non
classified manner and one that does not appear in the
urls of the main site --; this can be perfect for testing
a product from a marketing perspective
"""
model = models.Product
queryset = models.Product.product_manager.private_products()
template_name = 'pages/product.html'
context_object_name = 'product'
def post(self, request, **kwargs):
product = super().get_object()
# TODO: Add a method function that prevent
# triggering the rest of the method with
# any kinds of post requests
cart = cart_models.Cart.cart_manager.add_to_cart(request, product)
if cart:
return JsonResponse(data={'success': 'success'})
else:
return JsonResponse(data={'failed': 'missing parameters'}, status=400)
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
product = super().get_object()
serialized_product = serializers.ProductSerializer(instance=product)
context['vue_product'] = serialized_product.data
return context
class SearchView(ListView):
"""Main page for displaying product searches"""
model = models.Product
template_name = 'pages/search.html'
context_object_name = 'products'
paginate_by = 10
def get_queryset(self, **kwargs):
searched_item = self.request.GET.get('q')
if searched_item is None:
return []
return self.model.product_manager.search_product(searched_item)
def get_context_data(self, **kwargs):
products = self.get_queryset(**kwargs)
context = super().get_context_data(**kwargs)
klass = super().get_paginator(self.get_queryset(**kwargs), self.paginate_by)
serialized_products = serializers.ProductSerializer(instance=klass.object_list, many=True)
context['vue_products'] = serialized_products.data
# TODO
collections = ['tops', 'pantalons']
random_collection = random.choice(collections)
collection = models.Collection.objects.get(view_name=random_collection)
proposed_products = collection.product_set.all()[:4]
context['proposed_products'] = proposed_products
return context
@method_decorator(cache_page(60 * 60), name='dispatch')
class SizeGuideView(TemplateView):
"""View for providing the customer with information
on sizes etc."""
template_name = 'pages/size_guide.html'
@require_POST
@transaction.atomic
def add_like(request, **kwargs):
data = {'state': False}
product = get_object_or_404(models.Product, id=kwargs['pk'])
if request.user.is_authenticated:
likes = product.like_set.filter(user=request.user)
if likes.exists():
return JsonResponse(data=data)
product.like_set.create(user=request.user)
else:
redirect_url = f"{reverse('accounts:login')}?next={product.get_absolute_url()}"
data.update({'redirect_url': redirect_url})
return JsonResponse(data=data)
@require_POST
def size_calculator(request, **kwargs):
"""Calcultes from customer's measurements
the correct size for him/her"""
# data = json.loads(request.body)
# bust = data['bust']
# chest = data['chest']
bust = request.POST.get('bust')
chest = request.POST.get('chest')
if bust is None and chest is None:
return JsonResponse(data={'state': False})
bust = int(bust)
chest = int(chest)
calculator = sizes.BraCalculator(bust, chest)
data = {
'state': True,
'result': calculator.get_full_bra_size,
'size': calculator.size,
'cup': calculator.cup
}
return JsonResponse(data=data)
@require_POST
@transaction.atomic
def add_review(request, **kwargs):
data = {
'state': False,
'message': "L'avis n'a pas pu être créé"
}
score = request.POST.get('score')
text = request.POST.get('text')
if request.user.is_authenticated:
product = get_object_or_404(models.Product, id=kwargs.get('pk'))
review = product.review_set.create(
user=request.user,
text=text,
rating=score
)
data.update({
'state': True,
'message': "Votre avis a été créé"
})
return JsonResponse(data=data)
| 34.546875 | 99 | 0.635761 | 1,509 | 13,266 | 5.45063 | 0.224652 | 0.017021 | 0.017021 | 0.01459 | 0.349787 | 0.304681 | 0.230881 | 0.210456 | 0.190517 | 0.169362 | 0 | 0.005388 | 0.25848 | 13,266 | 383 | 100 | 34.637076 | 0.830741 | 0.144957 | 0 | 0.332046 | 0 | 0 | 0.095558 | 0.011284 | 0 | 0 | 0 | 0.005222 | 0 | 1 | 0.065637 | false | 0 | 0.069498 | 0.003861 | 0.351351 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e585e2842a0d58243451c36eb2f3bb53a795288e | 245 | py | Python | docker/dataset/stock_analysis/run.py | jojees/operations | bb1a242efbbf56c9afbe4b9e4b5aa14218720e2b | [
"MIT"
] | null | null | null | docker/dataset/stock_analysis/run.py | jojees/operations | bb1a242efbbf56c9afbe4b9e4b5aa14218720e2b | [
"MIT"
] | 2 | 2019-09-22T11:24:19.000Z | 2019-09-22T11:38:49.000Z | docker/dataset/stock_analysis/run.py | jojees/operations | bb1a242efbbf56c9afbe4b9e4b5aa14218720e2b | [
"MIT"
] | null | null | null | """Application entry point."""
from webapp import init_app
app = init_app()
# Using a development configuration
app.config.from_object('config.DevConfig')
# print(app.config)
if __name__ == "__main__":
app.run(host="0.0.0.0", debug=False) | 22.272727 | 42 | 0.722449 | 36 | 245 | 4.611111 | 0.666667 | 0.036145 | 0.036145 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.018605 | 0.122449 | 245 | 11 | 43 | 22.272727 | 0.753488 | 0.314286 | 0 | 0 | 0 | 0 | 0.191358 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.2 | 0 | 0.2 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e585fd72caa5f846d1fe6076b952b19c250c6439 | 19,169 | py | Python | utils/yolo_utils.py | Dishoungh/martrec | 74d0cffa3c046509017e1fd121a474ee5b50a194 | [
"MIT"
] | null | null | null | utils/yolo_utils.py | Dishoungh/martrec | 74d0cffa3c046509017e1fd121a474ee5b50a194 | [
"MIT"
] | 1 | 2021-01-28T16:57:41.000Z | 2021-01-28T18:13:34.000Z | utils/yolo_utils.py | Dishoungh/martrec | 74d0cffa3c046509017e1fd121a474ee5b50a194 | [
"MIT"
] | null | null | null | import numpy as np
import cv2 as cv
import time
import os
import sys
import multiprocessing
def init(labelfile, config, weights):
# Get the labels
labels = open(labelfile).read().strip().split('\n')
# Initializing colors to represent each label uniquely
colors = np.random.randint(0, 255, size=(len(labels), 3), dtype='uint8')
# Load the weights and configuration to form the pretrained YOLOv3 model
net = cv.dnn.readNetFromDarknet(config, weights)
# Get the output layer names of the model
layer_names = net.getLayerNames()
layer_names = [layer_names[i[0] - 1] for i in net.getUnconnectedOutLayers()]
return labels, colors, net, layer_names
def parse_input_path(input_path):
data = []
# Get everything from input path
for path in os.listdir(input_path):
if ('.png' in path) or ('.jpg' in path) or ('jpeg' in path) or ('.mp4' in path) or ('.avi' in path):
data.append(path)
return data
def start_yolo_process(args):
fileslist = parse_input_path(args.input_path)
processes = []
tag = []
# Parse through Input Data Folder
for idx, file in enumerate(fileslist):
pid = 0
while ((pid < multiprocessing.cpu_count()) and (idx < len(fileslist))):
if ((idx + pid) < len(fileslist)):
# Create Processes
try:
tFile = fileslist[idx+pid]
in_path = str(args.input_path + tFile)
processed_path = str(args.processed_folder + tFile)
arguments = (tFile,
in_path,
processed_path,
tFile[:tFile.find('.')],
args.labels,
args.config,
args.weights,
args.output_path,
args.delay_time,
args.save_video,
args.option,
args.video_output_path,
args.confidence,
args.threshold,
pid,
False,
None)
process = multiprocessing.Process(target=yolo_process, args=arguments)
if in_path not in tag:
processes.append(process)
tag.append(in_path)
except Exception as err:
print("[ERROR] {e}".format(e=err))
pid += 1
# Execute Processes
for process in processes:
try:
process.start()
except Exception as err:
print("[ERROR] {e}".format(e=err))
for process in processes:
try:
process.join()
except Exception as err:
print("[ERROR] {e}".format(e=err))
processes.clear()
def yolo_process(file, file_path, done_path, output_name, labels, config, weights, save_path, delay_time, save_video, option, video_output_path, confidence, threshold, process_id, gui, gui_obj):
image_path = None
video_path = None
if ('.png' in file_path) or ('.jpg' in file_path) or ('.jpeg' in file_path):
image_path = file_path
if ('.mp4' in file_path) or ('.avi' in file_path):
video_path = file_path
# Initialize labels, colors, and pretrain model
try:
labels, colors, net, layer_names = init(labels,
config,
weights)
except Exception as err:
print("[ERROR] {e}".format(e=err))
# If both image and video files are given then raise error
if image_path is None and video_path is None:
print('[WARNING] Neither path to an image or path to video provided. Starting Inference on Webcam...')
# Do inference with given image
if image_path:
print('[INFO] Starting image processing of {ip}...'.format(ip=str(image_path)))
if not os.path.exists(image_path):
print("[ERROR] Image path does not exist. Exiting...")
sys.exit()
# Read the image
try:
img = cv.imread(image_path)
height, width = img.shape[:2]
except:
raise Exception('[ERROR] Image cannot be loaded!\n'
'Please check the path provided!')
finally:
img, _, _, _, _, _, _, _, _ = infer_image(net, layer_names, height, width, img, colors, labels, confidence, threshold)
save_image(img, output_name, save_path)
os.rename(file_path, done_path)
elif video_path:
print('[INFO] Starting video processing of {vp}...'.format(vp=str(video_path)))
if output_name is None:
print("[ERROR] No output name specified. Exiting...")
sys.exit()
if not os.path.exists(video_path):
print("[ERROR] Video path does not exist. Exiting...")
sys.exit()
# Read the video
try:
vid = cv.VideoCapture(video_path)
boxHeight, boxWidth = 0, 0
height, width = None, None
writer = None
except:
raise Exception('[ERROR] Video cannot be loaded!\n'
'Please check the path provided!')
finally:
timings = np.array([])
# Will attempt to count the number of frames in the video,
# This is dependent on the OpenCV version
try:
total = int(vid.get(cv.CAP_PROP_FRAME_COUNT))
except:
try:
total = int(vid.get(cv.CV_CAP_PROP_FRAME_COUNT))
except:
print("[WARNING] Have to count frames manually. This might take a while...")
total = count_frames_manual(vid)
print("[SUCCESS] Count complete...")
delay = delay_time
num_images = 0
# Scan each frame in video
while True:
grabbed, raw_frame = vid.read()
try:
labeled_frame = raw_frame.copy()
except:
labeled_frame = None
# Checking if the complete video is read
if not grabbed:
break
if width is None or height is None:
height, width = labeled_frame.shape[:2]
if writer is None and save_video is True:
# Initialize the video writer
fourcc = cv.VideoWriter_fourcc(*"MJPG")
writer = cv.VideoWriter(video_output_path, fourcc, 30,
(labeled_frame.shape[1], labeled_frame.shape[0]), True)
# Time frame inference and show progress
start = time.time()
if delay <= 0 and labeled_frame is not None:
labeled_frame, _, _, classids, _, xPos, yPos, boxWidth, boxHeight = infer_image(net,
layer_names,
height,
width,
labeled_frame,
colors,
labels,
confidence,
threshold)
try:
obj = labels[classids[0]]
except:
obj = None
# Descriptions of a typical freight truck
if (((obj == 'truck') and (boxWidth >= (boxHeight * 1.5))
and (boxHeight >= 0.4 * height)
and (boxWidth >= 0.7 * width))):
# Extract Timestamp from Video (TODO: Explore with this: https://www.geeksforgeeks.org/text-detection-and-extraction-using-opencv-and-ocr/)
try:
modified_name = output_name + ('_{time}'.format(time=str(int(vid.get(cv.CAP_PROP_POS_MSEC)))))
# print(modified_name)
except:
# print("[ERROR] Failed to get timestamp of video")
modified_name = output_name + '_?'
#report_image_attributes(modified_name, xPos, boxWidth, boxHeight, width, height)
if (option == 0) or (option == 2): # Save raw image
save_image(raw_frame, modified_name, save_path, True)
num_images += 1
if (option == 1) or (option == 2): # Save labeled image
save_image(labeled_frame, modified_name, save_path, False)
num_images += 1
if (option == 3): # Save Collage
try:
collage_name = str(modified_name + "_collage.png")
primary = raw_frame
# Capture secondary frame (10 frames over)
for i in range(10):
_, secondary = vid.read()
# Put two images vertically on a collage
save_image(np.vstack([primary, secondary]), collage_name, save_path, True)
num_images += 1
except Exception as err:
print("[ERROR] {e}".format(e=err))
delay = delay_time
delay -= 1
if save_video is True:
writer.write(labeled_frame)
end = time.time()
timings = np.append(timings, (end - start))
show_progress_bar(timings.size, total, num_images, np.average(timings), output_name, process_id)
# Return progress bar value
if gui is True:
gui_obj.bar['value'] = (timings.size / total) * 100
gui_obj.bar.update_idletasks()
# End process
print("\n[INFO] Cleaning up...")
if writer is not None:
writer.release()
vid.release()
os.rename(file_path, done_path)
else:
# Infer real-time on webcam
count = 0
vid = cv.VideoCapture(0)
while True:
_, frame = vid.read()
height, width = frame.shape[:2]
if count == 0:
frame, boxes, confidences, classids, index, _, _, _, _ = infer_image(net,
layer_names,
height,
width,
frame,
colors,
labels,
confidence,
threshold)
count += 1
else:
frame, boxes, confidences, classids, index, _, _, _, _ = infer_image(net,
layer_names,
height,
width,
frame,
colors,
labels,
confidence,
threshold,
boxes,
confidences,
classids,
index,
infer=False)
count = (count + 1) % 6
cv.imshow('webcam', frame)
if cv.waitKey(1) & 0xFF == ord('q'):
break
vid.release()
cv.destroyAllWindows()
print("[SUCCESS] Image Processing Complete...")
def report_image_attributes(modified_name, xPos, boxWidth, boxHeight, width, height):
print("Name: {n}".format(n=modified_name))
print("X Position: {x}".format(x=xPos))
print("BoxWidth: {bw}".format(bw=boxWidth))
print("BoxHeight: {bh}".format(bh=boxHeight))
print("Image Width: {iw}".format(iw=width))
print("Image Height: {ih}\n\n".format(ih=height))
def save_image(img, output_name, save_path, raw):
num = 1
while True:
if raw is True:
filename = '{s}{o}_{n}_raw.png'.format(s=save_path, o=output_name, n=num)
else:
filename = '{s}{o}_{n}_labeled.png'.format(s=save_path, o=output_name, n=num)
if os.path.isfile(filename):
num += 1
else:
cv.imwrite(filename, img)
break
def draw_labels_and_boxes(img, boxes, confidences, classids, idxs, colors, labels):
# If there are any detections
x, y, w, h = 0, 0, 0, 0
if len(idxs) > 0:
for i in idxs.flatten():
# Get the bounding box coordinates
x, y = boxes[i][0], boxes[i][1]
w, h = boxes[i][2], boxes[i][3]
# Get the unique color for this class
color = [int(c) for c in colors[classids[i]]]
# Draw the bounding box rectangle and label on the image
cv.rectangle(img, (x, y), (x + w, y + h), color, 2)
text = "{}: {:4f}".format(labels[classids[i]], confidences[i])
cv.putText(img, text, (x, y - 5), cv.FONT_HERSHEY_SIMPLEX, 0.5, color, 2)
return img, x, y, w, h
def generate_boxes_confidences_classids(outs, height, width, tconf):
boxes = []
confidences = []
classids = []
for out in outs:
for detection in out:
# Get the scores, class ID, and the confidence of the prediction
scores = detection[5:]
classid = np.argmax(scores)
confidence = scores[classid]
# Consider only the predictions that are above a certain confidence level
if confidence > tconf:
box = detection[0:4] * np.array([width, height, width, height])
centerX, centerY, bwidth, bheight = box.astype('int')
# Using the center x, y coordinates to derive the top
# and the left corner of the bounding box
x = int(centerX - (bwidth / 2))
y = int(centerY - (bheight / 2))
# Append to list
boxes.append([x, y, int(bwidth), int(bheight)])
confidences.append(float(confidence))
classids.append(classid)
return boxes, confidences, classids
def infer_image(net, layer_names, height, width, img, colors, labels, confidence, threshold,
boxes=None, confidences=None, classids=None, idxs=None, infer=True):
if infer:
# Constructing a blob from the input image
blob = cv.dnn.blobFromImage(img, 1 / 255.0, (416, 416),
swapRB=True, crop=False)
# Perform a forward pass of the YOLO object detector
net.setInput(blob)
# Getting the outputs from the output layers
outs = net.forward(layer_names)
# Generate the boxes, confidences, and classIDs
boxes, confidences, classids = generate_boxes_confidences_classids(outs, height, width, confidence)
# Apply Non-Maxima Suppression to suppress overlapping bounding boxes
idxs = cv.dnn.NMSBoxes(boxes, confidences, confidence, threshold)
if boxes is None or confidences is None or idxs is None or classids is None:
raise Exception('[ERROR] Required variables are set to None before drawing boxes on images.')
# Draw labels and boxes on the image
img, x, y, w, h = draw_labels_and_boxes(img, boxes, confidences, classids, idxs, colors, labels)
return img, boxes, confidences, classids, idxs, x, y, w, h
def show_progress_bar(count, total, num_images, diff, name, pid, status=''):
bar_length = 40
filled_length = int(round(bar_length * count / float(total)))
percentage = round(100.0 * count / float(total), 1)
bar = '=' * filled_length + '-' * (bar_length - filled_length)
sec_left = diff * (total - count)
sys.stdout.write("%s[%s] %s%s (%s) %s ...%s\r\n" % ('{p}:'.format(p=name),
str(bar),
str(percentage),
'%',
time.strftime('%Hh, %Mm, %Ss', time.gmtime(sec_left)),
'[{i}]'.format(i=num_images), status))
#sys.stdout.flush()
def count_frames_manual(video):
total = 0
while True:
grabbed, frame = video.read()
if not grabbed:
break
total += 1
video.release()
return total
| 42.787946 | 194 | 0.440816 | 1,807 | 19,169 | 4.559491 | 0.210293 | 0.025246 | 0.032043 | 0.012137 | 0.220658 | 0.181818 | 0.154995 | 0.125865 | 0.125865 | 0.116883 | 0 | 0.009754 | 0.475873 | 19,169 | 447 | 195 | 42.883669 | 0.810292 | 0.098284 | 0 | 0.271566 | 0 | 0 | 0.05883 | 0.001276 | 0 | 0 | 0.000232 | 0.002237 | 0 | 1 | 0.035144 | false | 0 | 0.019169 | 0 | 0.073482 | 0.067093 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e5882538b7bda731c247750333cac651bac62825 | 775 | py | Python | python_pyxel/pyxel1.py | Perceu/tiktok | c3da4d0a6300867737c1574a552100bdf5eed10f | [
"MIT"
] | null | null | null | python_pyxel/pyxel1.py | Perceu/tiktok | c3da4d0a6300867737c1574a552100bdf5eed10f | [
"MIT"
] | null | null | null | python_pyxel/pyxel1.py | Perceu/tiktok | c3da4d0a6300867737c1574a552100bdf5eed10f | [
"MIT"
] | null | null | null | from turtle import width
import pyxel
from random import randint
class App:
def __init__(self):
width, height = 720, 1280
pyxel.init(width, height)
self.raio = 10
self.color = 1
self.position_x = int(width/2)
self.position_y = int(height/2)
pyxel.run(self.update, self.draw)
def update(self):
if pyxel.btnp(pyxel.KEY_Q):
pyxel.quit()
self.raio = (self.raio + 10) % pyxel.width
if self.raio <= 10:
self.position_x = randint(200,500)
self.position_y = randint(200,1000)
self.color = (self.color + 1) % 15
def draw(self):
pyxel.cls(0)
pyxel.circb(self.position_x, self.position_y, self.raio, self.color)
App() | 23.484848 | 76 | 0.575484 | 106 | 775 | 4.103774 | 0.367925 | 0.165517 | 0.068966 | 0.064368 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.061682 | 0.309677 | 775 | 33 | 77 | 23.484848 | 0.751402 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.125 | false | 0 | 0.125 | 0 | 0.291667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e5893301e5d8b6496d1b088428d496d181d86dbc | 2,386 | py | Python | get_apis.py | Pemacope/Assessment_3 | 37591a8c2245b0d64dcb1b75326a7a82de45480f | [
"Unlicense"
] | null | null | null | get_apis.py | Pemacope/Assessment_3 | 37591a8c2245b0d64dcb1b75326a7a82de45480f | [
"Unlicense"
] | null | null | null | get_apis.py | Pemacope/Assessment_3 | 37591a8c2245b0d64dcb1b75326a7a82de45480f | [
"Unlicense"
] | null | null | null | from uk_covid19 import Cov19API
import geocoder
import logging
import requests
import json
logging.basicConfig(filename = "sys.log", encoding = 'utf-8')
#get_location function
def get_location():
"""This function gets the location of the user"""
current_location_data = geocoder.ip('me')
return current_location_data.city
#get news function
def get_news() -> None:
"""Getting data from news api"""
#Data request from the api
base_url = "https://newsapi.org/v2/top-headlines?"
with open('config.json', 'r') as config_file:
temp = json.load(config_file)
api_key = temp["keys"]["news_key"]
country = "gb"
complete_url = base_url + "country=" + country + "&apiKey=" + api_key
response = requests.get(complete_url, timeout = 10)
if response.status_code <= 400:
logging.info('News request failed')
#store news in file
with open('news.json', 'w') as news_file:
json.dump(response.json(), news_file)
#get weather function
def get_weather() -> None:
"""Getting data from weather API"""
base_url = "http://api.openweathermap.org/data/2.5/weather?"
with open('config.json', 'r') as config_file:
temp = json.load(config_file)
api_key = temp["keys"]["weather_key"]
city_name = get_location()
complete_url = base_url + "appid=" + api_key + "&q=" + city_name
response = requests.get(complete_url, timeout = 10)
if response.status_code >= 400:
logging.info('Weather request failed')
#store weather data in file
with open('weather.json', 'w') as weather_file:
json.dump(response.json(), weather_file)
#get uk covid numbers
def get_covid() -> None:
"""Getting data from uk covid api"""
city_name = get_location()
local_only = [
'areaName={}'.format(city_name)
]
data = {
"date": "date",
"areaName": "areaName",
"newCasesByPublishDate": "newCasesByPublishDate"
}
api = Cov19API(filters = local_only, structure = data)
covid_data = api.get_json()
#store covid data in file
with open('public_health_england.json', 'w') as covid_file:
json.dump(covid_data, covid_file)
| 28.404762 | 74 | 0.602263 | 291 | 2,386 | 4.769759 | 0.316151 | 0.028818 | 0.030259 | 0.041066 | 0.262248 | 0.201729 | 0.201729 | 0.201729 | 0.201729 | 0.201729 | 0 | 0.011608 | 0.277871 | 2,386 | 83 | 75 | 28.746988 | 0.793964 | 0.126991 | 0 | 0.170213 | 0 | 0 | 0.174442 | 0.034483 | 0 | 0 | 0 | 0 | 0 | 1 | 0.085106 | false | 0 | 0.106383 | 0 | 0.212766 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e58ccf5e4c1a6de4e6e01e0244878e63b72d84c5 | 5,522 | py | Python | benchmark/how_lineage_benchmark.py | ZhuofanXie/DataTracer | 718f58ca87f297e7541c910a53ca8dde8ed7b66e | [
"MIT"
] | null | null | null | benchmark/how_lineage_benchmark.py | ZhuofanXie/DataTracer | 718f58ca87f297e7541c910a53ca8dde8ed7b66e | [
"MIT"
] | null | null | null | benchmark/how_lineage_benchmark.py | ZhuofanXie/DataTracer | 718f58ca87f297e7541c910a53ca8dde8ed7b66e | [
"MIT"
] | null | null | null | import time
from time import time
import dask
import pandas as pd
from dask.diagnostics import ProgressBar
import datatracer
def transform_single_column(tables, column_info):
aggregation = column_info['aggregation']
column_name = column_info['source_col']['col_name']
fk = column_info['row_map']
if aggregation:
transformer = eval(aggregation)
return transformer(tables, fk, column_name)
else:
return tables[column_info['source_col']['table_name']][column_name].fillna(0.0).values
def produce_target_column(tables, map_info):
transformation = map_info['transformation']
if transformation:
transformed_columns = []
for col_info in map_info['lineage_columns']:
transformed_columns.append(transform_single_column(tables, col_info))
transformer = eval(transformation)
return transformer(transformed_columns)
else:
return None
def approx_equal(num, target, add_margin, multi_margin):
if target >= 0:
return (num <= target * (1 + multi_margin) + add_margin) and (num >=
target * (1 - multi_margin) - add_margin)
else:
return (num <= target * (1 - multi_margin) + add_margin) and (num >=
target * (1 + multi_margin) - add_margin)
def approx_equal_arrays(num, target, add_margin, multi_margin):
for n, t in zip(num, target):
if not approx_equal(n, t, add_margin, multi_margin):
return False
return True
@dask.delayed
def evaluate_single_lineage(constraint, tracer, tables):
field = constraint["fields_under_consideration"][0]
related_fields = constraint["related_fields"]
y_true = set()
for related_field in related_fields:
y_true.add((related_field["table"], related_field["field"]))
try:
start = time()
ret_dict = tracer.solve(tables, target_table=field["table"], target_field=field["field"])
y_pred = {(col['source_col']['table_name'], col['source_col']['col_name'])
for col in ret_dict['lineage_columns']}
end = time()
except BaseException:
return {
"table": field["table"],
"field": field["field"],
"precision": 0,
"inference_time": 0,
"status": "ERROR",
}
if len(y_pred) == len(y_true) and \
len(y_true.intersection(y_pred)) == len(y_pred):
predicted_target = produce_target_column(tables, ret_dict)
target_column = tables[field["table"]][field["field"]].fillna(0.0).values
if approx_equal_arrays(predicted_target, target_column, 1e-8, 1e-8):
precision = 1
else:
precision = 0
else:
precision = 0
return {
"table": field["table"],
"field": field["field"],
"precision": precision,
"inference_time": end - start,
"status": "OK",
}
@dask.delayed
def how_lineage(solver, target, datasets):
"""Benchmark the how lineage solver on the target dataset.
Args:
solver: The name of the how lineage pipeline.
target: The name of the target dataset.
datases: A dictionary mapping dataset names to (metadata, tables) tuples.
Returns:
A list of dictionaries mapping metric names to values for each deived column.
"""
datasets = datasets.copy()
metadata, tables = datasets.pop(target)
if not metadata.data.get("constraints"):
return {} # Skip dataset, no constraints found.
tracer = datatracer.DataTracer(solver)
tracer.fit(datasets)
list_of_metrics = []
for constraint in metadata.data["constraints"]:
list_of_metrics.append(evaluate_single_lineage(constraint, tracer, tables))
list_of_metrics = dask.compute(list_of_metrics)[0]
return list_of_metrics
def benchmark_how_lineage(data_dir, dataset_name=None, solver="datatracer.how_lineage.basic"):
"""Benchmark the how lineage solver.
This uses leave-one-out validation and evaluates the performance of the
solver on the specified datasets.
Args:
data_dir: The directory containing the datasets.
dataset_name: The target dataset to test on. If none is provided, will test on all available datasets by default.
solver: The name of the column map pipeline.
Returns:
A DataFrame containing the benchmark resuls.
"""
datasets = datatracer.load_datasets(data_dir)
dataset_names = list(datasets.keys())
if dataset_name is not None:
if dataset_name in dataset_names:
dataset_names = [dataset_name]
else:
return None
datasets = dask.delayed(datasets)
dataset_to_metrics = {}
for dataset_name in dataset_names:
dataset_to_metrics[dataset_name] = how_lineage(
solver=solver, target=dataset_name, datasets=datasets)
rows = []
with ProgressBar():
results = dask.compute(dataset_to_metrics)[0]
for dataset_name, list_of_metrics in results.items():
for metrics in list_of_metrics:
metrics["dataset"] = dataset_name
rows.append(metrics)
df = pd.DataFrame(rows)
dataset_col = df.pop('dataset')
table_col = df.pop('table')
field_col = df.pop('field')
df.insert(0, 'field', field_col)
df.insert(0, 'table', table_col)
df.insert(0, 'dataset', dataset_col)
return df
| 33.877301 | 121 | 0.641434 | 672 | 5,522 | 5.06994 | 0.227679 | 0.032286 | 0.02671 | 0.017611 | 0.154975 | 0.127972 | 0.066921 | 0.066921 | 0.040505 | 0.040505 | 0 | 0.005854 | 0.257515 | 5,522 | 162 | 122 | 34.08642 | 0.825122 | 0.143064 | 0 | 0.155172 | 0 | 0 | 0.086788 | 0.0116 | 0 | 0 | 0 | 0 | 0 | 1 | 0.060345 | false | 0 | 0.051724 | 0 | 0.232759 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e58f4679796a04818a079751cf89d2d05f6670ee | 2,132 | py | Python | admin.py | dikshith/allcode | b5563f9d9f1839c50396a2d4de70aac5bceb318f | [
"MIT"
] | null | null | null | admin.py | dikshith/allcode | b5563f9d9f1839c50396a2d4de70aac5bceb318f | [
"MIT"
] | null | null | null | admin.py | dikshith/allcode | b5563f9d9f1839c50396a2d4de70aac5bceb318f | [
"MIT"
] | null | null | null | # save this as app.py
from __main__ import app, ALLOWED_EXTENSIONS, UPLOAD_FOLDER
from flask import Flask, request, jsonify, abort, render_template, Flask, flash, redirect, url_for
from werkzeug.utils import secure_filename
import os
import io
import csv
from models import *
def allowed_file(filename):
return '.' in filename and \
filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS
def transform(text_file_contents):
return text_file_contents.replace("=", ",")
@app.route("/admin", methods=["GET", "POST"])
def admin():
if request.method == "POST":
table = request.form.get("table")
if 'csv' not in request.files:
flash('No file part')
return redirect(request.url)
file = request.files['csv']
# If the user does not select a file, the browser submits an
# empty file without a filename.
if file.filename == '':
flash('No selected file')
return redirect(request.url)
if file and allowed_file(file.filename):
# import pdb; pdb.set_trace()
flash('File uploaded Successfully!')
filename = secure_filename(file.filename)
file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))
# Read csv file
csv_input = csv.DictReader(open(os.path.join(app.config['UPLOAD_FOLDER'], filename)))
rel_version = release_version(6)
db.session.add(rel_version)
db.session.commit()
rel_id = rel_version.id
for row in csv_input:
print(row)
performance = performance_results(rel_id, row['Label'], int(row['# Samples']), int(row['Average']), int(row['Median']), int(row['90% Line']), int(row['95% Line']), int(row['99% Line']), int(row['Min']), int(row['Max']), float(row['Error %']), float(row['Throughput']), float(row['Received KB/sec']), float(row['Sent KB/sec']))
db.session.add(performance)
db.session.commit()
return redirect(request.url)
return render_template("admin/admin.html"), 404
| 37.403509 | 344 | 0.615385 | 271 | 2,132 | 4.730627 | 0.409594 | 0.037442 | 0.049142 | 0.056162 | 0.060842 | 0.060842 | 0.060842 | 0.060842 | 0 | 0 | 0 | 0.007477 | 0.247186 | 2,132 | 56 | 345 | 38.071429 | 0.791277 | 0.070826 | 0 | 0.128205 | 0 | 0 | 0.115949 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.076923 | false | 0 | 0.179487 | 0.051282 | 0.410256 | 0.025641 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e58f86674bdf77a8b5f16bc946bf19c653623803 | 2,057 | py | Python | BSSN_SF/BSSN_ID_function_string.py | kazewong/nrpytutorial | cc511325f37f01284b2b83584beb2a452556b3fb | [
"BSD-2-Clause"
] | null | null | null | BSSN_SF/BSSN_ID_function_string.py | kazewong/nrpytutorial | cc511325f37f01284b2b83584beb2a452556b3fb | [
"BSD-2-Clause"
] | null | null | null | BSSN_SF/BSSN_ID_function_string.py | kazewong/nrpytutorial | cc511325f37f01284b2b83584beb2a452556b3fb | [
"BSD-2-Clause"
] | null | null | null | # This module sets up an initial data function meant to
# be called in a pointwise manner at all gridpoints.
# Author: Zachariah B. Etienne
# zachetie **at** gmail **dot* com
from outputC import *
def BSSN_ID_function_string(cf,hDD,lambdaU,aDD,trK,alpha,vetU,betU):
returnstring = "void BSSN_ID(REAL xx0,REAL xx1,REAL xx2,REAL Cartxyz0,REAL Cartxyz1,REAL Cartxyz2,\n"
returnstring += "\tREAL *hDD00,REAL *hDD01,REAL *hDD02,REAL *hDD11,REAL *hDD12,REAL *hDD22,\n"
returnstring += "\tREAL *aDD00,REAL *aDD01,REAL *aDD02,REAL *aDD11,REAL *aDD12,REAL *aDD22,\n"
returnstring += "\tREAL *trK,\n"
returnstring += "\tREAL *lambdaU0,REAL *lambdaU1,REAL *lambdaU2,\n"
returnstring += "\tREAL *vetU0,REAL *vetU1,REAL *vetU2,\n"
returnstring += "\tREAL *betU0,REAL *betU1,REAL *betU2,\n"
returnstring += "\tREAL *alpha,REAL *cf) {\n"
returnstring += outputC([hDD[0][0], hDD[0][1], hDD[0][2], hDD[1][1], hDD[1][2], hDD[2][2],
aDD[0][0], aDD[0][1], aDD[0][2], aDD[1][1], aDD[1][2], aDD[2][2],
trK,
lambdaU[0], lambdaU[1], lambdaU[2],
vetU[0], vetU[1], vetU[2],
betU[0], betU[1], betU[2],
alpha, cf],
["*hDD00", "*hDD01", "*hDD02", "*hDD11", "*hDD12", "*hDD22",
"*aDD00", "*aDD01", "*aDD02", "*aDD11", "*aDD12", "*aDD22",
"*trK",
"*lambdaU0", "*lambdaU1", "*lambdaU2",
"*vetU0", "*vetU1", "*vetU2",
"*betU0", "*betU1", "*betU2",
"*alpha", "*cf"], filename="returnstring",
params="preindent=1,CSE_enable=True,outCverbose=False", # outCverbose=False to prevent
# enormous output files.
prestring="", poststring="")
returnstring += "}\n"
return returnstring
| 55.594595 | 115 | 0.500243 | 229 | 2,057 | 4.471616 | 0.401747 | 0.101563 | 0.123047 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.076812 | 0.32912 | 2,057 | 36 | 116 | 57.138889 | 0.665217 | 0.109869 | 0 | 0 | 0 | 0.107143 | 0.336623 | 0.024671 | 0 | 0 | 0 | 0 | 0 | 1 | 0.035714 | false | 0 | 0.035714 | 0 | 0.107143 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e5919a61e375fbb9499ebd4f58ee76df900f51b5 | 8,163 | py | Python | CollectData/instagram_downloader_public.py | erik1110/face-transformation | e9afab85340522c8e19d73b08cedced187d8ada0 | [
"MIT"
] | 1 | 2020-10-04T07:39:50.000Z | 2020-10-04T07:39:50.000Z | CollectData/instagram_downloader_public.py | erik1110/face-transformation | e9afab85340522c8e19d73b08cedced187d8ada0 | [
"MIT"
] | null | null | null | CollectData/instagram_downloader_public.py | erik1110/face-transformation | e9afab85340522c8e19d73b08cedced187d8ada0 | [
"MIT"
] | null | null | null | #!/usr/bin/python
# coding:utf-8
"""
Instagram Downloader
"""
import os
import logging
import time
import requests
from datetime import datetime
import tkinter as tk
from tkinter import ttk
from tkinter import messagebox
from selenium import webdriver
from webdriver_manager.chrome import ChromeDriverManager
from bs4 import BeautifulSoup as bs
class MyApp(object):
"""
define the GUI interface
"""
def __init__(self):
self.set_log()
self.root = tk.Tk()
self.root.title("Instgram Downloader")
self.root.geometry('500x250')
self.canvas = tk.Canvas(self.root, height=400, width=700)
self.canvas.pack(side='top')
self.setup_ui()
def set_log(self):
if not os.path.exists('./screenshot'):
os.mkdir('./screenshot')
if not os.path.exists('./log'):
os.mkdir('./log')
log_name = 'log/RPA_%Y%m%d_%H%M%S.log'
logging.basicConfig(level=logging.INFO,
filename=datetime.now().strftime(log_name),
filemode='w',
format='%(asctime)s - %(filename)s[line:%(lineno)d] - %(levelname)s: %(message)s')
self.logger = logging.getLogger(log_name)
def setup_ui(self):
"""
setup UI interface
"""
self.label_save_file = tk.Label(self.root, text='存檔資料夾:')
self.label_pattern = tk.Label(self.root, text = "選擇模式:")
self.label_id = tk.Label(self.root, text = "id or tag:")
self.label_limit = tk.Label(self.root, text='圖片上限:')
self.input_save_file = tk.Entry(self.root, width=30)
self.input_pattern = ttk.Combobox(self.root, values=["id", "tag"])
self.input_pattern.current(0)
self.input_limit = tk.Entry(self.root, width=30)
self.input_id = tk.Entry(self.root, width=30)
self.input_tag = tk.Entry(self.root, width=30)
self.login_button = tk.Button(self.root, command=self.run, text="Run", width=10, foreground = "black")
self.quit_button = tk.Button(self.root, command=self.quit, text="Quit", width=10, foreground = "black")
def gui_arrang(self):
"""
setup position of UI
"""
self.label_save_file.place(x=60, y=30)
self.label_pattern.place(x=60, y=70)
self.label_id.place(x=60, y=110)
self.label_limit.place(x=60, y=150)
self.input_save_file.place(x=130, y=30)
self.input_pattern.place(x=130, y=70)
self.input_id.place(x=130, y=110)
self.input_limit.place(x=130, y=150)
self.login_button.place(x=130, y=190)
self.quit_button.place(x=270, y=190)
def check(self):
"""
check the input of gui interface
return:
True
False
"""
# check your input
self.save_file = self.input_save_file.get()
self.pattern = self.input_pattern.get()
self.id = self.input_id.get()
if len(self.save_file) == 0 or len(self.pattern) == 0 or \
len(self.id)==0 or len(self.input_limit.get())==0:
messagebox.showinfo(title='System Alert', message='不得為空!')
self.logger.info('填選處為空值!')
return False
try:
self.limit = int(self.input_limit.get())
except:
messagebox.showinfo(title='System Alert', message='限制數應為整數!')
self.logger.info('限制數應為整數!')
return False
# check your save file
if not self.pattern in ['id','tag']:
messagebox.showinfo(title='System Alert', message=f'模式輸入有誤')
self.logger.warning('The pattern is wrong!')
return False
# check your save file
if self.save_file in ['log','screenshot']:
messagebox.showinfo(title='System Alert', message=f'該資料夾檔名不可使用!')
self.logger.warning('The file name is wrong!')
return False
if not os.path.exists(f'./{self.save_file}'):
os.mkdir(f'./{self.save_file}')
messagebox.showinfo(title='System Alert', message=f'已建立{self.save_file}的資料夾')
self.logger.info(f'Make dir:{self.save_file}')
return True
def download(self):
"""
download instagram photo
"""
# get driver
driver = webdriver.Chrome(ChromeDriverManager().install())
driver.maximize_window()
# create url
if self.pattern=='id':
user_id = self.id
elif self.pattern=='tag':
user_id = f'explore/tags/{self.id}/'
origin_url = 'https://www.instagram.com/' + user_id
driver.get(origin_url)
time.sleep(3)
SCROLL_PAUSE_TIME = 3
images_unique=[]
# Get scroll height
last_height = driver.execute_script("return document.body.scrollHeight")
while True:
# Wait to load page
time.sleep(SCROLL_PAUSE_TIME)
# Scroll down to bottom
driver.execute_script("window.scrollTo(0, document.body.scrollHeight);")
# Wait
time.sleep(1)
# show more if exists
try:
button_name = f'顯示更多 {user_id} 的貼文'
show_more = driver.find_element_by_xpath(f"//*[contains(text(),'{button_name}')]")
show_more.click()
except:
pass
# Wait to load page
time.sleep(SCROLL_PAUSE_TIME)
# Calculate new scroll height and compare with last scroll height
new_height = driver.execute_script("return document.body.scrollHeight")
if new_height == last_height:
driver.execute_script("window.scrollTo(document.body.scrollHeight,0);")
break
# This means that there is still photos to scrap
last_height = new_height
time.sleep(1)
# Retrive the html
html_to_parse = str(driver.page_source)
html = bs(html_to_parse,"html5lib")
# Get the image's url
images_url = html.findAll("img", {"class": "FFVAD"})
# Check if they are unique
in_first = set(images_unique)
in_second = set(images_url)
in_second_but_not_in_first = in_second - in_first
result = images_unique + list(in_second_but_not_in_first)
images_unique = result
# if the images greater than the limit, break
if len(images_unique)>self.limit:
break
num_images = len(images_unique)
self.logger.info(f'抓到{num_images}張圖片')
#Close the webdriver
driver.close()
for i, _ in enumerate(images_unique):
try:
# Save each image.jpg file
name=f"./{self.save_file}/{self.id}"+str(i)+".jpg"
with open(name, 'wb') as handler:
img_data = requests.get(images_unique[i].get("src")).content
handler.write(img_data)
except:
self.logger.warning('無法存取:{}'.format(images_unique[i]))
def run(self):
"""
when you click the button of run, it'll execute
"""
start_time = datetime.now()
if self.check():
self.download()
messagebox.showinfo(title='System Alert', message='程式執行完畢!')
else:
self.logger.warning('檢查不通過!')
end_time = datetime.now()
execution_time = (end_time-start_time).seconds
self.logger.info('Total Execution time:', execution_time, 's')
messagebox.showinfo(title='System Alert', message=f'執行時間:{execution_time}秒')
def quit(self):
"""
when you click the button of quit, it'll execute
"""
self.root.destroy()
def main():
"""
main function for MyApp
"""
# initial
app = MyApp()
# arrage gui
app.gui_arrang()
# run tkinter
tk.mainloop()
if __name__ == '__main__':
main() | 32.521912 | 111 | 0.565356 | 1,003 | 8,163 | 4.465603 | 0.267198 | 0.028578 | 0.021433 | 0.045323 | 0.231525 | 0.192677 | 0.145791 | 0.062291 | 0.016968 | 0 | 0 | 0.016074 | 0.3141 | 8,163 | 251 | 112 | 32.521912 | 0.78389 | 0.097513 | 0 | 0.104575 | 0 | 0.006536 | 0.125822 | 0.043807 | 0 | 0 | 0 | 0 | 0 | 1 | 0.058824 | false | 0.006536 | 0.071895 | 0 | 0.169935 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e59820d95011ac7776cfe69371f262bf5dfa9d62 | 3,475 | py | Python | src/preview_item.py | viraelin/yame | 2cd2bfa6143c3578ecede602dd1c05236122c1cf | [
"MIT"
] | null | null | null | src/preview_item.py | viraelin/yame | 2cd2bfa6143c3578ecede602dd1c05236122c1cf | [
"MIT"
] | null | null | null | src/preview_item.py | viraelin/yame | 2cd2bfa6143c3578ecede602dd1c05236122c1cf | [
"MIT"
] | null | null | null | # Copyright (C) 2022 viraelin
# License: MIT
from PyQt6.QtCore import *
from PyQt6.QtWidgets import *
from PyQt6.QtGui import *
import system
from layer_type_menu import LayerType
class PreviewTile(QGraphicsRectItem):
# def __init__(self, item: QStandardItem) -> None:
def __init__(self) -> None:
super().__init__()
# todo: use actual tile data/color
# index = item.index()
# color_str = index.siblingAtColumn(3).data(Qt.ItemDataRole.DisplayRole)
color_str = "#080808"
self.color = QColor(color_str)
# alpha = 0.9
# self.color.setAlphaF(alpha)
x = 0
y = 0
size = system.cell_size
rect = QRectF(x, y, size, size)
self.setRect(rect)
self.setZValue(400)
def snap(self, pos: QPointF) -> None:
pos = system.get_snap_pos(pos)
self.setX(pos.x())
self.setY(pos.y())
def boundingRect(self) -> QRectF:
pad = 4
return self.rect().adjusted(-pad, -pad, pad, pad)
def paint(self, painter: QPainter, option: QStyleOptionGraphicsItem, widget: QWidget) -> None:
pen = QPen()
pen.setStyle(Qt.PenStyle.SolidLine)
pen.setCapStyle(Qt.PenCapStyle.SquareCap)
pen.setJoinStyle(Qt.PenJoinStyle.MiterJoin)
pen.setColor(self.color)
brush = QBrush()
brush.setStyle(Qt.BrushStyle.NoBrush)
width = 2
hwidth = 1
rect = self.rect().adjusted(-hwidth, -hwidth, hwidth, hwidth)
pen.setWidth(width)
painter.setPen(pen)
painter.setBrush(brush)
painter.drawRect(rect)
class PreviewEntity(QGraphicsRectItem):
def __init__(self, item: QStandardItem) -> None:
super().__init__()
index = item.index()
width = index.siblingAtColumn(1).data(Qt.ItemDataRole.DisplayRole)
height = index.siblingAtColumn(2).data(Qt.ItemDataRole.DisplayRole)
color_str = index.siblingAtColumn(3).data(Qt.ItemDataRole.DisplayRole)
self.color = QColor(color_str)
alpha = 0.1
self.color.setAlphaF(alpha)
origin_name = index.siblingAtColumn(4).data(Qt.ItemDataRole.DisplayRole)
offset = system.OriginPoint[origin_name].value
self.offset = offset
x = 0
y = 0
rect = QRectF(x, y, width, height)
self.setRect(rect)
self.setZValue(400)
def snap(self, pos: QPointF) -> None:
# todo: this is copied from snapping GraphicsItem
offset = self.offset
width = self.rect().width()
height = self.rect().height()
ox = int(offset.x() * width)
oy = int(offset.y() * height)
offset = QPointF(ox, oy)
pos -= offset
pos = system.get_snap_pos(pos)
self.setX(pos.x())
self.setY(pos.y())
def paint(self, painter: QPainter, option: QStyleOptionGraphicsItem, widget: QWidget) -> None:
pen = QPen()
pen.setStyle(Qt.PenStyle.SolidLine)
pen.setCapStyle(Qt.PenCapStyle.SquareCap)
pen.setJoinStyle(Qt.PenJoinStyle.MiterJoin)
pen.setColor(self.color)
brush = QBrush()
brush.setColor(self.color)
brush.setStyle(Qt.BrushStyle.SolidPattern)
width = 4
hwidth = width / 2
rect = self.rect().adjusted(hwidth, hwidth, -hwidth, -hwidth)
pen.setWidth(width)
painter.setPen(pen)
painter.setBrush(brush)
painter.drawRect(rect)
| 29.201681 | 98 | 0.61295 | 396 | 3,475 | 5.292929 | 0.285354 | 0.030057 | 0.042939 | 0.069179 | 0.566794 | 0.566794 | 0.549141 | 0.474714 | 0.474714 | 0.41937 | 0 | 0.014578 | 0.26964 | 3,475 | 118 | 99 | 29.449153 | 0.811269 | 0.086906 | 0 | 0.506024 | 0 | 0 | 0.002213 | 0 | 0 | 0 | 0 | 0.008475 | 0 | 1 | 0.084337 | false | 0 | 0.060241 | 0 | 0.180723 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e59e35b470fb03223bde0bdc8a8066d61bc5a26b | 1,522 | py | Python | snippets/python/pipe_functional_programming/pipe_example.py | jerabaul29/config_scripts_snippets | c192f50c7cf90088862fd1f4d5678e0936cc375c | [
"MIT"
] | null | null | null | snippets/python/pipe_functional_programming/pipe_example.py | jerabaul29/config_scripts_snippets | c192f50c7cf90088862fd1f4d5678e0936cc375c | [
"MIT"
] | 6 | 2021-10-12T12:27:27.000Z | 2022-03-11T19:45:35.000Z | snippets/python/pipe_functional_programming/pipe_example.py | jerabaul29/config_scripts_snippets | c192f50c7cf90088862fd1f4d5678e0936cc375c | [
"MIT"
] | null | null | null | from pipe import Pipe
from pipe import select as pmap
from pipe import where as filter
from pipe import take
import functools
from icecream import ic
ic.configureOutput(prefix="", outputFunction=print)
"""
For my part, I like to stick to the usual functional programming terminology:
take
map
filter
reduce
"""
# add a reduce value
@Pipe
def preduce(iterable, function):
return functools.reduce(function, iterable)
def dummy_func(x):
print(f"processing at value {x}")
return x
print("----- test using a range() as input -----")
res_with_range = (range(100) | pmap(dummy_func)
| filter(lambda x: x % 2 == 0)
| take(2) )
print("*** what is the resulting object ***")
ic(res_with_range)
print("*** what happens when we force evaluation ***")
ic(list(res_with_range))
"""
This prints:
----- test using a range() as input -----
*** what is the resulting object ***
res_with_range: <generator object take at 0x7f60bd506d60>
*** what happens when we force evaluation ***
processing at value 0
processing at value 1
processing at value 2
processing at value 3
processing at value 4
list(res_with_range): [0, 2]
"""
print()
print("----- test using a range() as input but outputing a value not iterator -----")
res_with_reduce = (range(100) | pmap(dummy_func)
| filter(lambda x: x % 3 == 1)
| take(2)
| preduce(lambda x, y: x + y))
ic(res_with_reduce)
| 21.742857 | 85 | 0.631406 | 212 | 1,522 | 4.45283 | 0.358491 | 0.051907 | 0.108051 | 0.047669 | 0.273305 | 0.222458 | 0.131356 | 0.074153 | 0.074153 | 0 | 0 | 0.024561 | 0.250986 | 1,522 | 69 | 86 | 22.057971 | 0.803509 | 0.011827 | 0 | 0 | 0 | 0 | 0.210476 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.071429 | false | 0 | 0.214286 | 0.035714 | 0.357143 | 0.25 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e5a27f53e59e6bdb43b79f5ce55cc60189760583 | 5,790 | py | Python | lcrequest.py | nigelboid/lc-investor | 65f5de5c7a4c082fa3e090e4479a78d7432edfdb | [
"MIT"
] | 2 | 2015-10-13T02:54:49.000Z | 2015-11-12T21:59:34.000Z | lcrequest.py | nigelboid/lc-investor | 65f5de5c7a4c082fa3e090e4479a78d7432edfdb | [
"MIT"
] | null | null | null | lcrequest.py | nigelboid/lc-investor | 65f5de5c7a4c082fa3e090e4479a78d7432edfdb | [
"MIT"
] | null | null | null | #
# Import all necessary libraries
#
import requests
#
# Define some global constants
#
VERSION= '1.0.0'
# API request building blocks
API_VERSION= 'v1'
REQUEST_ROOT= 'https://api.lendingclub.com/api/investor/{}/'.format(API_VERSION)
REQUEST_LOANS= 'loans/listing?showAll=true'
REQUEST_ACCOUNTS= 'accounts/{}/'
REQUEST_SUMMARY= 'summary'
REQUEST_NOTES= 'detailednotes'
REQUEST_PORTFOLIOS= 'portfolios'
REQUEST_WITHDRAWAL= 'funds/withdraw'
REQUEST_HEADER= 'Authorization'
REQUEST_ORDERS= 'orders'
KEY_AID= 'aid'
KEY_LOAN_ID= 'loanId'
KEY_REQUESTED_AMOUNT= 'requestedAmount'
KEY_ORDERS= 'orders'
KEY_PORTFOLIO_NAME= 'portfolioName'
KEY_PORTFOLIO_DESCRIPTION= 'portfolioDescription'
KEY_PORTFOLIO_ID= 'portfolioId'
KEY_ERRORS= 'errors'
KEY_LOANS= 'loans'
KEY_NOTES= 'myNotes'
KEY_PORTFOLIOS= 'myPortfolios'
KEY_AMOUNT= 'amount'
# API request result codes
STATUS_CODE_OK= 200
#
# Define our Lending Club API class
#
class LCRequest:
# Constructor
def __init__(self, arguments):
self.token= arguments.token
self.id= arguments.id
self.debug= arguments.debug
self.requestHeader= {REQUEST_HEADER: self.token}
self.requestLoans= REQUEST_ROOT + REQUEST_LOANS
self.requestAccounts= REQUEST_ROOT + REQUEST_ACCOUNTS.format(self.id)
# Obtain available cash amount
def get_account_summary(self):
request= self.requestAccounts + REQUEST_SUMMARY
result= requests.get(request, headers=self.requestHeader)
if result.status_code == STATUS_CODE_OK:
return result.json()
else:
if self.debug:
raise Exception('Could not obtain account summary (status code {})'.format(result.status_code), self, request, self.requestHeader)
else:
raise Exception('Could not obtain account summary (status code {})'.format(result.status_code))
# Obtain all available notes ("In Funding")
def get_available_notes(self):
request= self.requestLoans
result= requests.get(request, headers=self.requestHeader)
if result.status_code == STATUS_CODE_OK:
if KEY_LOANS in result.json():
return result.json()[KEY_LOANS]
else:
if self.debug:
raise Exception('Received an empty response for available loans (result object {})'.format(result.json()), self, request, self.requestHeader)
else:
raise Exception('Received an empty response for available loans')
else:
if self.debug:
raise Exception('Could not obtain a list of available loans (status code {})'.format(result.status_code), self, request, self.requestHeader)
else:
raise Exception('Could not obtain a list of available loans (status code {})'.format(result.status_code))
# Obtain a list of all notes owned
def get_owned_notes(self):
request= self.requestAccounts + REQUEST_NOTES
result= requests.get(request, headers=self.requestHeader)
if result.status_code == STATUS_CODE_OK:
return result.json()[KEY_NOTES]
else:
if self.debug:
raise Exception('Could not obtain a list of owned notes (status code {})'.format(result.status_code), self, request, self.requestHeader)
else:
raise Exception('Could not obtain a list of owned notes (status code {})'.format(result.status_code))
# Obtain a list of all portfolios owned
def get_owned_portfolios(self):
request= self.requestAccounts + REQUEST_PORTFOLIOS
result= requests.get(request, headers=self.requestHeader)
if result.status_code == STATUS_CODE_OK:
return result.json()[KEY_PORTFOLIOS]
else:
if self.debug:
raise Exception('Could not obtain a list of owned portfolios (status code {})'.format(result.status_code), self, request, self.requestHeader)
else:
raise Exception('Could not obtain a list of owned portfolios (status code {})'.format(result.status_code))
# Create named portfolio
def create_portfolio(self, name, description):
request= self.requestAccounts + REQUEST_PORTFOLIOS
payload= {KEY_AID:self.id, KEY_PORTFOLIO_NAME:name, KEY_PORTFOLIO_DESCRIPTION:description}
result= requests.post(request, json=payload, headers=self.requestHeader)
if result.status_code == STATUS_CODE_OK:
return result.json()
else:
if self.debug:
raise Exception('Could not create the portfolio named "{}" with description "{}" (status code {})'.format(name, description, result.status_code), self, request, self.requestHeader, result.json()[KEY_ERRORS])
else:
raise Exception('Could not create the portfolio named "{}" with description "{}" (status code {})'.format(name, description, result.status_code)[KEY_ERRORS])
# Submit buy order
def submit_order(self, notes):
request= self.requestAccounts + REQUEST_ORDERS
payload= {KEY_AID:self.id, KEY_ORDERS:notes}
result= requests.post(request, json=payload, headers=self.requestHeader)
if result.status_code == STATUS_CODE_OK:
return result.json()
else:
if self.debug:
raise Exception('Order failed (status code {})'.format(result.status_code), self, request, self.requestHeader, result.json())
else:
raise Exception('Order failed (status code {})'.format(result.status_code))
# Submit withdrawal request
def submit_withdrawal(self, amount):
request= self.requestAccounts + REQUEST_WITHDRAWAL
payload= {KEY_AID:self.id, KEY_AMOUNT:amount}
result= requests.post(request, json=payload, headers=self.requestHeader)
if result.status_code == STATUS_CODE_OK:
return result.json()
else:
if self.debug:
raise Exception('Order failed (status code {})'.format(result.status_code), self, request, self.requestHeader, result.json())
else:
raise Exception('Order failed (status code {})'.format(result.status_code))
| 35.304878 | 215 | 0.721934 | 726 | 5,790 | 5.61157 | 0.15427 | 0.105547 | 0.082474 | 0.064801 | 0.623711 | 0.582474 | 0.566274 | 0.552037 | 0.552037 | 0.5162 | 0 | 0.00146 | 0.171848 | 5,790 | 163 | 216 | 35.521472 | 0.848175 | 0.063212 | 0 | 0.428571 | 0 | 0 | 0.202628 | 0.004811 | 0 | 0 | 0 | 0 | 0 | 1 | 0.071429 | false | 0 | 0.008929 | 0 | 0.151786 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e5a3174fd3725503784105163c310d47e1598ce0 | 8,317 | py | Python | exegis/analysis.py | gruel/AphorismToTEI | 6d33a353c4b4f159af62e061618ff03a1f09fb7f | [
"BSD-3-Clause"
] | null | null | null | exegis/analysis.py | gruel/AphorismToTEI | 6d33a353c4b4f159af62e061618ff03a1f09fb7f | [
"BSD-3-Clause"
] | null | null | null | exegis/analysis.py | gruel/AphorismToTEI | 6d33a353c4b4f159af62e061618ff03a1f09fb7f | [
"BSD-3-Clause"
] | null | null | null | """Module which contains the function to analyse aphorism and commentaries line
There are two functions which are treating the references ``[W1 W2]``
and the footnotes *XXX*.
The ``references`` function has to be used before the ``footnotes``.
:Authors: Jonathan Boyle, Nicolas Gruel <nicolas.gruel@manchester.ac.uk>
:Copyright: IT Services, The University of Manchester
"""
try:
from .baseclass import logger, XML_OSS, XML_N_OFFSET
except ImportError:
from baseclass import logger, XML_OSS, XML_N_OFFSET
# Define an Exception
class AnalysisException(Exception):
"""Class for exception
"""
pass
def references(line):
"""
This helper function searches a line of text for witness references
with the form ``[WW LL]`` and returns a string containing the original
text with each witness reference replaced with XML with the form
``<locus target="WW">LL</locus>``.
``\\n`` characters are added at the start and end of each XML insertion
so each instance of XML is on its own line.
It is intended this function is called by function main()
for each line of text from the main body of the text document before
processing footnote references using the _footnotes() function.
Parameters
----------
line : str
contains the line with the aphorism or the commentary to analyse.
Raises
------
AnalysisException
if references does not follow the convention ``[W1 W2]``.
e.g. will raise an exception if:
- ``[W1W2]`` : missing space between the two witnesses
- ``[W1 W2`` : missing ``]``
"""
# Create a string to contain the return value
result = ''
if not line:
return
while True:
# Try to partition this line at the first '[' character
text_before, sep, text_after = line.partition('[')
# Note: if sep is zero there are no more witnesses to add
# Add text_before to the result string
if text_before != '':
result += text_before
# If there is a witness to add start a new line
if sep != '':
result += '\n'
# If sep has zero length we can stop because there are no more
# witness _references
if sep == '':
break
# Try to split text_after at the first ']' character
reference, sep, line = text_after.partition(']')
# If this partition failed then something went wrong,
# so throw an error
if sep == '':
error = 'Unable to partition string {} at "]" ' \
'when looking for a reference'.format(line)
logger.error(error)
raise AnalysisException
# Partition the reference into witness and location (these are
# separated by the ' ' character)
witness, sep, page = reference.partition(' ')
# If this partition failed there is an error
if sep == '':
error = ('Unable to partition reference [{}] '
'because missing space probably'.format(reference))
logger.error(error)
raise AnalysisException
# Add the witness and location XML to the result string
result += '<locus target="' + witness.strip() + \
'">' + page.strip() + '</locus>'
# If text has zero length we can stop
if line == '':
break
else:
# There is more text to process so start a new line
result += '\n'
return result
def footnotes(string_to_process, next_footnote):
"""
This helper function takes a single string containing text and
processes any embedded footnote symbols (describing additions,
omissions, correxi, conieci and standard textual variations)
to generate XML. It also deals with any XML generated using
function _references().
The output is two lists of XML, one for the main text, the other
for the apparatus.
Parameters
----------
string_to_process: str
This string contains the text to be processed. This should contain
a single line from the text file being processed, e.g. a title,
aphorism or commentary. This string may already contain XML
generated using the _references() function i.e. XML
identifying witnesses with each <locus> XML on a new line.
next_footnote: int
reference the footnote to find.
Returns
-------
1. A Python list containing XML for the main text.
2. A Python list containing XML for the critical apparatus.
3. The number of the next footnote to be processed when this function
complete.
It is intended this function is called by main() on each line
of text from the main document body.
Raises
------
AnalysisException
if footnote in commentary can not be defined.
"""
# Create lists to contain the XML
xml_main = []
try:
while True:
# Use string partition to try to split this text at
# the next footnote symbol
footnote_symbol = '*' + str(next_footnote) + '*'
text_before_symbol, sep, string_to_process = \
string_to_process.partition(footnote_symbol)
# If the partition failed sep will have zero length and the next
# footnote is not in this line, hence we can stop
# processing and return
if sep == '':
# Add text_before_symbol to the XML and stop processing
for next_line in text_before_symbol.splitlines():
xml_main.append(XML_OSS * XML_N_OFFSET +
next_line.strip())
break
# We know sep has non-zero length and we are dealing with
# a footnote.
# Now use string partition to try to split text_before_symbol
# at a '#' character.
next_text_for_xml, sep, base_text = \
text_before_symbol.partition('#')
# If the above partition failed the footnote refers
# to a single word
if sep == '':
# Use rpartition to partition at the LAST space in the
# string before the footnote symbol
next_text_for_xml, sep, base_text = \
text_before_symbol.rpartition(' ')
# Check we succeeded in partitioning the text before the footnote
# at '#' or ' '. If we didn't there's an error.
if sep == '':
error = ('Unable to partition text before footnote symbol '
'{}'.format(footnote_symbol))
logger.error(error)
error = ('Probably missing a space or the "#" character '
'to determine the word(s) to apply the footnote')
logger.error(error)
raise AnalysisException
# Add the next_text_for_xml to xml_main
for next_line in next_text_for_xml.splitlines():
xml_main.append(XML_OSS * XML_N_OFFSET + next_line.strip())
# Create an anchor for the app (as advised)
xml_main.append(XML_OSS * XML_N_OFFSET +
'<anchor xml:id="begin_fn' +
str(next_footnote) + '"/>')
# Create XML for this textural variation for xml_main
# Add next_string to the xml_main and XML from a witness reference
for next_line in base_text.splitlines():
xml_main.append(XML_OSS * (XML_N_OFFSET+2) + next_line)
# End the anchor reference
xml_main.append(XML_OSS * XML_N_OFFSET +
'<anchor xml:id="end_fn' +
str(next_footnote) + '"/>')
# Increment the footnote number
next_footnote += 1
# Test to see if there is any more text to process
if string_to_process == '':
break
except (AttributeError, AnalysisException):
error = 'Cannot analyse aphorism or commentary ' \
'{}'.format(string_to_process)
logger.error(error)
raise AnalysisException
return xml_main, next_footnote
| 35.391489 | 79 | 0.597451 | 1,032 | 8,317 | 4.718023 | 0.236434 | 0.024646 | 0.012939 | 0.014377 | 0.204354 | 0.176422 | 0.167386 | 0.114397 | 0.079482 | 0.054631 | 0 | 0.002332 | 0.329686 | 8,317 | 234 | 80 | 35.542735 | 0.871031 | 0.504028 | 0 | 0.395062 | 0 | 0 | 0.103842 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.024691 | false | 0.012346 | 0.037037 | 0 | 0.111111 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e5a7b8b0481012f5ade5147dd6e2ed6513934354 | 1,695 | py | Python | update_damage_sheet.py | faith-grins/RS-RS-DamageRankings | 667387bb8971ea57d8ff669efb62ea7c2ef61f8e | [
"Apache-2.0"
] | null | null | null | update_damage_sheet.py | faith-grins/RS-RS-DamageRankings | 667387bb8971ea57d8ff669efb62ea7c2ef61f8e | [
"Apache-2.0"
] | null | null | null | update_damage_sheet.py | faith-grins/RS-RS-DamageRankings | 667387bb8971ea57d8ff669efb62ea7c2ef61f8e | [
"Apache-2.0"
] | null | null | null | import gspread
# If modifying these scopes, delete the file token.json.
SCOPES = ['https://www.googleapis.com/auth/spreadsheets']
CLIENT_SECRET_FILE = '.secrets/PythonSheetsApiSecret.json'
CREDENTIALS_TOKEN = '.secrets/token.json'
# The ID and range of a sample spreadsheet.
SPREADSHEET_ID = '1oc5TC_nGzLXk4sP3zhlyFeYt526cxXXVeDtvMDFWbno'
VALUE_RENDER_OPTION = 'FORMULA'
VALUE_INPUT_OPTION = 'RAW'
stats_starting_row = 4
stylte_stats_sheet = 'StyleStats'
style_stats_range = 'B4:T'
style_final_str_column = 'StyleStats!M4:M'
style_final_end_column = 'StyleStats!N4:N'
style_final_dex_column = 'StyleStats!O4:O'
style_final_agi_column = 'StyleStats!P4:P'
style_final_int_column = 'StyleStats!Q4:Q'
style_final_wil_column = 'StyleStats!R4:R'
style_final_lov_column = 'StyleStats!S4:S'
style_final_cha_column = 'StyleStats!T4:T'
class Character:
rows = []
name = ''
def login():
return gspread.oauth(credentials_filename=CLIENT_SECRET_FILE, authorized_user_filename=CREDENTIALS_TOKEN)
def get_styles(auth):
style_sheet = auth.open_by_key(SPREADSHEET_ID)
styles = style_sheet.worksheet(stylte_stats_sheet).get(style_stats_range, value_render_option=VALUE_RENDER_OPTION)
characters = {}
for i, s in enumerate(styles):
if s[0] not in characters:
characters[s[0]] = [i + stats_starting_row]
else:
characters[s[0]].append(i + stats_starting_row)
return characters
def update_sheet(auth, characters):
style_sheet = auth.open_by_key(SPREADSHEET_ID)
style_data_sheet = style_sheet.worksheet(stylte_stats_sheet)
style_data_sheet.update('A1', 'Testing')
if __name__ == '__main__':
update_sheet(login(), '')
| 30.818182 | 118 | 0.756342 | 231 | 1,695 | 5.186147 | 0.445887 | 0.066778 | 0.042571 | 0.03005 | 0.118531 | 0.118531 | 0.0601 | 0.0601 | 0 | 0 | 0 | 0.014384 | 0.138643 | 1,695 | 54 | 119 | 31.388889 | 0.806164 | 0.056637 | 0 | 0.051282 | 0 | 0 | 0.18985 | 0.049499 | 0 | 0 | 0 | 0 | 0 | 1 | 0.076923 | false | 0 | 0.025641 | 0.025641 | 0.230769 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e5a938e5e2ec369977b37c0a78e456d48e469534 | 350 | py | Python | Intermediate/json-data.py | abhishek8075374519/python-for-beginners | a3c0334751001c6468819af7c8ae7ec0993a48c3 | [
"MIT"
] | null | null | null | Intermediate/json-data.py | abhishek8075374519/python-for-beginners | a3c0334751001c6468819af7c8ae7ec0993a48c3 | [
"MIT"
] | null | null | null | Intermediate/json-data.py | abhishek8075374519/python-for-beginners | a3c0334751001c6468819af7c8ae7ec0993a48c3 | [
"MIT"
] | null | null | null | import json as j
# CONVERTING TO JSON
data = {
"Name": "John Doe",
"Age": "22"
}
y = j.dumps(data)
print(y)
# A LIST IS CONVERTED INTO JSON EQUIVALENT ARRAY
data = [1, 2, 3, 4, 5]
i = j.dumps(data)
print(i)
# READING FROM JSON
x = '{ "name":"John", "age":30, "city":"New York"}'
y = j.loads(x)
print(y)
print(y["age"])
| 16.666667 | 52 | 0.554286 | 59 | 350 | 3.288136 | 0.610169 | 0.092784 | 0.103093 | 0.154639 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.034351 | 0.251429 | 350 | 20 | 53 | 17.5 | 0.706107 | 0.237143 | 0 | 0.142857 | 0 | 0 | 0.26749 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.071429 | 0 | 0.071429 | 0.285714 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e5ac46d95f62c31e2cdcf0a830026f35e4bd572a | 620 | py | Python | code/python/echomesh/output/Registry.py | rec/echomesh | be668971a687b141660fd2e5635d2fd598992a01 | [
"MIT"
] | 30 | 2015-02-18T14:07:00.000Z | 2021-12-11T15:19:01.000Z | code/python/echomesh/output/Registry.py | rec/echomesh | be668971a687b141660fd2e5635d2fd598992a01 | [
"MIT"
] | 16 | 2015-01-01T23:17:24.000Z | 2015-04-18T23:49:27.000Z | code/python/echomesh/output/Registry.py | rec/echomesh | be668971a687b141660fd2e5635d2fd598992a01 | [
"MIT"
] | 31 | 2015-03-11T20:04:07.000Z | 2020-11-02T13:56:59.000Z | from __future__ import absolute_import, division, print_function, unicode_literals
from echomesh.util.registry.Module import register
from echomesh.output.OutputCache import OutputCache
REGISTRY = register(
__name__,
'Bidirectional',
'Offset',
'Output',
'Map',
'Spi',
'Test',
'Visualizer',
)
OUTPUT_CACHE = OutputCache()
def make_output(data):
if isinstance(data, dict):
return REGISTRY.make_from_description(data, default_type='output')
else:
return OUTPUT_CACHE.get_output(data)
def pause_outputs():
from echomesh.output.Output import pause_outputs
pause_outputs()
| 22.142857 | 82 | 0.73871 | 72 | 620 | 6.069444 | 0.527778 | 0.08238 | 0.08238 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.162903 | 620 | 27 | 83 | 22.962963 | 0.842004 | 0 | 0 | 0 | 0 | 0 | 0.082258 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.090909 | false | 0 | 0.181818 | 0 | 0.363636 | 0.045455 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e5aecff027dc26da16498680b52ebce4340235d7 | 12,433 | py | Python | migrations/versions/bdeeeacbec4d_initial_schema.py | Innopoints/backend | 723565ba3f63914a7dab03346696d89e28060d64 | [
"MIT"
] | 1 | 2020-11-30T17:41:36.000Z | 2020-11-30T17:41:36.000Z | migrations/versions/bdeeeacbec4d_initial_schema.py | Innopoints/backend | 723565ba3f63914a7dab03346696d89e28060d64 | [
"MIT"
] | 34 | 2020-04-18T19:31:27.000Z | 2021-03-19T13:56:56.000Z | migrations/versions/bdeeeacbec4d_initial_schema.py | Innopoints/backend | 723565ba3f63914a7dab03346696d89e28060d64 | [
"MIT"
] | null | null | null | """Initial schema
Revision ID: bdeeeacbec4d
Revises:
Create Date: 2020-04-11 11:20:18.814141
"""
import json
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
# revision identifiers, used by Alembic.
revision = 'bdeeeacbec4d'
down_revision = None
branch_labels = None
depends_on = None
DEFAULT_NOTIFICATIONS = {
'innostore': 'off',
'volunteering': 'off',
'project_creation': 'off',
'administration': 'off',
'service': 'email',
}
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('accounts',
sa.Column('full_name', sa.String(length=256), nullable=False),
sa.Column('group', sa.String(length=64), nullable=True),
sa.Column('email', sa.String(length=128), nullable=False),
sa.Column('telegram_username', sa.String(length=32), nullable=True),
sa.Column('is_admin', sa.Boolean(), nullable=False),
sa.Column('notification_settings', postgresql.JSONB(astext_type=sa.Text()), nullable=False, server_default=json.dumps(DEFAULT_NOTIFICATIONS)),
sa.PrimaryKeyConstraint('email')
)
op.create_table('colors',
sa.Column('value', sa.String(length=6), nullable=False),
sa.PrimaryKeyConstraint('value')
)
op.create_table('competences',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(length=128), nullable=False),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('name')
)
op.create_table('products',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(length=128), nullable=False),
sa.Column('type', sa.String(length=128), nullable=True),
sa.Column('description', sa.String(length=1024), nullable=False),
sa.Column('price', sa.Integer(), nullable=False),
sa.Column('addition_time', sa.DateTime(timezone=True), nullable=False),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('name', 'type', name='unique product')
)
op.create_table('sizes',
sa.Column('value', sa.String(length=3), nullable=False),
sa.PrimaryKeyConstraint('value')
)
op.create_table('notifications',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('recipient_email', sa.String(length=128), nullable=False),
sa.Column('is_read', sa.Boolean(), nullable=False),
sa.Column('payload', postgresql.JSONB(astext_type=sa.Text()), nullable=True),
sa.Column('timestamp', sa.DateTime(timezone=True), nullable=False),
sa.Column('type', sa.Enum('purchase_status_changed', 'new_arrivals', 'claim_innopoints', 'application_status_changed', 'service', 'manual_transaction', 'project_review_status_changed', 'all_feedback_in', 'added_as_moderator', 'out_of_stock', 'new_purchase', 'project_review_requested', name='notificationtype'), nullable=False),
sa.ForeignKeyConstraint(['recipient_email'], ['accounts.email'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_table('static_files',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('mimetype', sa.String(length=255), nullable=False),
sa.Column('owner_email', sa.String(length=128), nullable=False),
sa.ForeignKeyConstraint(['owner_email'], ['accounts.email'], ondelete='CASCADE'),
sa.PrimaryKeyConstraint('id')
)
op.create_table('varieties',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('product_id', sa.Integer(), nullable=False),
sa.Column('size', sa.String(length=3), nullable=True),
sa.Column('color', sa.String(length=6), nullable=True),
sa.ForeignKeyConstraint(['color'], ['colors.value'], ondelete='CASCADE'),
sa.ForeignKeyConstraint(['product_id'], ['products.id'], ondelete='CASCADE'),
sa.ForeignKeyConstraint(['size'], ['sizes.value'], ondelete='CASCADE'),
sa.PrimaryKeyConstraint('id')
)
op.create_table('product_images',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('variety_id', sa.Integer(), nullable=False),
sa.Column('image_id', sa.Integer(), nullable=False),
sa.Column('order', sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(['image_id'], ['static_files.id'], ondelete='CASCADE'),
sa.ForeignKeyConstraint(['variety_id'], ['varieties.id'], ondelete='CASCADE'),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('variety_id', 'order', deferrable='True', initially='DEFERRED', name='unique order indices')
)
op.create_table('projects',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(length=128), nullable=False),
sa.Column('image_id', sa.Integer(), nullable=True),
sa.Column('creation_time', sa.DateTime(timezone=True), nullable=False),
sa.Column('organizer', sa.String(length=128), nullable=True),
sa.Column('creator_email', sa.String(length=128), nullable=False),
sa.Column('admin_feedback', sa.String(length=1024), nullable=True),
sa.Column('review_status', sa.Enum('pending', 'approved', 'rejected', name='reviewstatus'), nullable=True),
sa.Column('lifetime_stage', sa.Enum('draft', 'ongoing', 'finalizing', 'finished', name='lifetimestage'), nullable=False),
sa.ForeignKeyConstraint(['creator_email'], ['accounts.email'], ),
sa.ForeignKeyConstraint(['image_id'], ['static_files.id'], ),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('name')
)
op.create_table('stock_changes',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('amount', sa.Integer(), nullable=False),
sa.Column('time', sa.DateTime(timezone=True), nullable=False),
sa.Column('status', sa.Enum('carried_out', 'pending', 'ready_for_pickup', 'rejected', name='stockchangestatus'), nullable=False),
sa.Column('account_email', sa.String(length=128), nullable=False),
sa.Column('variety_id', sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(['account_email'], ['accounts.email'], ondelete='CASCADE'),
sa.ForeignKeyConstraint(['variety_id'], ['varieties.id'], ondelete='CASCADE'),
sa.PrimaryKeyConstraint('id')
)
op.create_table('activities',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(length=128), nullable=True),
sa.Column('description', sa.String(length=1024), nullable=True),
sa.Column('start_date', sa.DateTime(timezone=True), nullable=True),
sa.Column('end_date', sa.DateTime(timezone=True), nullable=True),
sa.Column('project_id', sa.Integer(), nullable=False),
sa.Column('working_hours', sa.Integer(), nullable=False),
sa.Column('reward_rate', sa.Integer(), nullable=False),
sa.Column('fixed_reward', sa.Boolean(), nullable=False),
sa.Column('people_required', sa.Integer(), nullable=False),
sa.Column('telegram_required', sa.Boolean(), nullable=False),
sa.Column('application_deadline', sa.DateTime(timezone=True), nullable=True),
sa.Column('feedback_questions', sa.ARRAY(sa.String(length=1024)), nullable=False),
sa.Column('internal', sa.Boolean(), nullable=False, server_default='False'),
sa.CheckConstraint('(fixed_reward AND working_hours = 1) OR (NOT fixed_reward AND reward_rate = 70)', name='reward policy'),
sa.ForeignKeyConstraint(['project_id'], ['projects.id'], ondelete='CASCADE'),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('name', 'project_id', name='name is unique inside a project')
)
op.create_table('project_files',
sa.Column('project_id', sa.Integer(), nullable=False),
sa.Column('file_id', sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(['file_id'], ['static_files.id'], ),
sa.ForeignKeyConstraint(['project_id'], ['projects.id'], ),
sa.PrimaryKeyConstraint('project_id', 'file_id')
)
op.create_table('project_moderation',
sa.Column('project_id', sa.Integer(), nullable=False),
sa.Column('account_email', sa.String(length=128), nullable=False),
sa.ForeignKeyConstraint(['account_email'], ['accounts.email'], onupdate='CASCADE', ondelete='CASCADE'),
sa.ForeignKeyConstraint(['project_id'], ['projects.id'], ondelete='CASCADE'),
sa.PrimaryKeyConstraint('project_id', 'account_email')
)
op.create_table('activity_competence',
sa.Column('activity_id', sa.Integer(), nullable=False),
sa.Column('competence_id', sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(['activity_id'], ['activities.id'], ondelete='CASCADE'),
sa.ForeignKeyConstraint(['competence_id'], ['competences.id'], ondelete='CASCADE'),
sa.PrimaryKeyConstraint('activity_id', 'competence_id')
)
op.create_table('applications',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('applicant_email', sa.String(length=128), nullable=False),
sa.Column('activity_id', sa.Integer(), nullable=False),
sa.Column('comment', sa.String(length=1024), nullable=True),
sa.Column('application_time', sa.DateTime(timezone=True), nullable=False),
sa.Column('telegram_username', sa.String(length=32), nullable=True),
sa.Column('status', sa.Enum('approved', 'pending', 'rejected', name='applicationstatus'), nullable=False),
sa.Column('actual_hours', sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(['activity_id'], ['activities.id'], ondelete='CASCADE'),
sa.ForeignKeyConstraint(['applicant_email'], ['accounts.email'], ondelete='CASCADE'),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('applicant_email', 'activity_id', name='only one application')
)
op.create_table('feedback',
sa.Column('application_id', sa.Integer(), nullable=False),
sa.Column('time', sa.DateTime(timezone=True), nullable=False),
sa.Column('answers', sa.ARRAY(sa.String(length=1024)), nullable=False),
sa.ForeignKeyConstraint(['application_id'], ['applications.id'], ondelete='CASCADE'),
sa.PrimaryKeyConstraint('application_id'),
sa.UniqueConstraint('application_id')
)
op.create_table('reports',
sa.Column('application_id', sa.Integer(), nullable=False),
sa.Column('reporter_email', sa.String(length=128), nullable=False),
sa.Column('time', sa.DateTime(timezone=True), nullable=False),
sa.Column('rating', sa.Integer(), nullable=False),
sa.Column('content', sa.String(length=1024), nullable=True),
sa.ForeignKeyConstraint(['application_id'], ['applications.id'], ),
sa.ForeignKeyConstraint(['reporter_email'], ['accounts.email'], ondelete='CASCADE'),
sa.PrimaryKeyConstraint('application_id', 'reporter_email')
)
op.create_table('feedback_competence',
sa.Column('feedback_id', sa.Integer(), nullable=False),
sa.Column('competence_id', sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(['competence_id'], ['competences.id'], ondelete='CASCADE'),
sa.ForeignKeyConstraint(['feedback_id'], ['feedback.application_id'], ondelete='CASCADE'),
sa.PrimaryKeyConstraint('feedback_id', 'competence_id')
)
op.create_table('transactions',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('account_email', sa.String(length=128), nullable=False),
sa.Column('change', sa.Integer(), nullable=False),
sa.Column('stock_change_id', sa.Integer(), nullable=True),
sa.Column('feedback_id', sa.Integer(), nullable=True),
sa.CheckConstraint('(stock_change_id IS NULL) OR (feedback_id IS NULL)', name='not(feedback and stock_change)'),
sa.ForeignKeyConstraint(['account_email'], ['accounts.email'], ondelete='CASCADE'),
sa.ForeignKeyConstraint(['feedback_id'], ['feedback.application_id'], ondelete='SET NULL'),
sa.ForeignKeyConstraint(['stock_change_id'], ['stock_changes.id'], ondelete='SET NULL'),
sa.PrimaryKeyConstraint('id')
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('transactions')
op.drop_table('feedback_competence')
op.drop_table('reports')
op.drop_table('feedback')
op.drop_table('applications')
op.drop_table('activity_competence')
op.drop_table('project_moderation')
op.drop_table('project_files')
op.drop_table('activities')
op.drop_table('stock_changes')
op.drop_table('projects')
op.drop_table('product_images')
op.drop_table('varieties')
op.drop_table('static_files')
op.drop_table('notifications')
op.drop_table('sizes')
op.drop_table('products')
op.drop_table('competences')
op.drop_table('colors')
op.drop_table('accounts')
# ### end Alembic commands ###
| 50.54065 | 332 | 0.696292 | 1,490 | 12,433 | 5.681208 | 0.14094 | 0.086001 | 0.122268 | 0.133963 | 0.632605 | 0.606734 | 0.524631 | 0.47218 | 0.382044 | 0.270644 | 0 | 0.010424 | 0.120405 | 12,433 | 245 | 333 | 50.746939 | 0.763625 | 0.022762 | 0 | 0.257778 | 0 | 0 | 0.259969 | 0.013952 | 0 | 0 | 0 | 0 | 0 | 1 | 0.008889 | false | 0 | 0.017778 | 0 | 0.026667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e5b00d7e128695512faf3dee89d09b98e4ae7a89 | 636 | py | Python | build/lib/DateTimeTools/DayNotoDate.py | pshustov/DateTimeTools | e542fd3f0e3c5290faad09b7cf8a2751132d4dd3 | [
"MIT"
] | null | null | null | build/lib/DateTimeTools/DayNotoDate.py | pshustov/DateTimeTools | e542fd3f0e3c5290faad09b7cf8a2751132d4dd3 | [
"MIT"
] | null | null | null | build/lib/DateTimeTools/DayNotoDate.py | pshustov/DateTimeTools | e542fd3f0e3c5290faad09b7cf8a2751132d4dd3 | [
"MIT"
] | null | null | null | import numpy as np
from ._CFunctions import _CDayNotoDate
from ._CTConv import _CTConv
def DayNotoDate(Year,Doy):
'''
Converts year and day numbers to a date of the format yyyymmdd.
Inputs
======
Year : int32
Array or scalar of years
Doy : int32
Array or scalar of day numbers
Returns
=======
Date : int
Array or scalar of dates
'''
#convert the inputs into the exact dtypes required for C++
_n = _CTConv(np.size(Doy),'c_int')
_Year = _CTConv(Year,'c_int_ptr')
_Doy = _CTConv(Doy,'c_int_ptr')
_Date = np.zeros(_n,dtype='int32')
#call the C++ function
_CDayNotoDate(_n,_Year,_Doy,_Date)
return _Date
| 19.272727 | 64 | 0.698113 | 98 | 636 | 4.295918 | 0.479592 | 0.049881 | 0.092637 | 0.106888 | 0.095012 | 0 | 0 | 0 | 0 | 0 | 0 | 0.011696 | 0.193396 | 636 | 32 | 65 | 19.875 | 0.808967 | 0.522013 | 0 | 0 | 0 | 0 | 0.088889 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.1 | false | 0 | 0.3 | 0 | 0.5 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e5b241b8fb352b546133a594008c04d79660503c | 4,996 | py | Python | BigDataArchitecture/application_pod/application.py | Brebeck-Jan/AlphaBigDataTech | df7b63056e7067e366e72193ec8260dbc59b53bb | [
"MIT"
] | null | null | null | BigDataArchitecture/application_pod/application.py | Brebeck-Jan/AlphaBigDataTech | df7b63056e7067e366e72193ec8260dbc59b53bb | [
"MIT"
] | null | null | null | BigDataArchitecture/application_pod/application.py | Brebeck-Jan/AlphaBigDataTech | df7b63056e7067e366e72193ec8260dbc59b53bb | [
"MIT"
] | null | null | null | ##########################################################################################
##########################################################################################
# BigData - Application #
##########################################################################################
##########################################################################################
##########################################################################################
# import libraries #
##########################################################################################
import findspark
findspark.init()
from pyspark.sql import SparkSession
import happybase
from nltk.corpus import stopwords
import nltk
import pandas as pd
import pymongo
import sys
nltk.download("stopwords")
import time
##########################################################################################
# init spark #
##########################################################################################
spark=SparkSession.builder\
.master("local[*]")\
.appName("application")\
.getOrCreate()
sc=spark.sparkContext
##########################################################################################
# prerequisites #
##########################################################################################
# delete umlauts
def umlauts(word):
tempVar = word
tempVar = tempVar.replace('ä', 'ae')
tempVar = tempVar.replace('ö', 'oe')
tempVar = tempVar.replace('ü', 'ue')
tempVar = tempVar.replace('Ä', 'Ae')
tempVar = tempVar.replace('Ö', 'Oe')
tempVar = tempVar.replace('Ü', 'Ue')
tempVar = tempVar.replace('ß', 'ss')
return tempVar
# exclude punctuation
def lower_clean_str(x):
punc='!"#$%&\'()*+,./:;<=>?@[\\]^_`{|}~-„“'
lowercased_str = x.lower()
for ch in punc:
lowercased_str = lowercased_str.replace(ch, ' ')
return lowercased_str
##########################################################################################
# Application #
##########################################################################################
def application(news):
# create Pipelined RDD
df = sc.parallelize(news)
# remove punktuation and transform to lowercase
df = df.map(lower_clean_str)
#split sentences into list of words
df = df.flatMap(lambda satir: satir.split(" "))
# exclude whitespaces
df = df.filter(lambda x:x!='')
# count how many times each word occurs
count = df.map(lambda word:(word,1))
countRBK = count.reduceByKey(lambda x,y:(x+y)).sortByKey()
# rank words
countRBK = countRBK.map(lambda x:(x[1],x[0]))
countRBK = countRBK.sortByKey(False)
# get german stopwords and change their umlauts
stops =stopwords.words('german')
german_stopwords = []
for word in stops:
german_stopwords.append(umlauts(word))
# delete stopwords
countRBK = countRBK.filter(lambda x: x[1] not in german_stopwords)
# write result into pandas dataframe and export
export = pd.DataFrame(columns=['trend-word'])
for i in range(5):
export = export.append({'trend-word': countRBK.take(5)[i][1]}, ignore_index=True)
return export
##########################################################################################
# attaching database #
##########################################################################################
def data_from_datalake():
connection = happybase.Connection(host='lake-connection', port=9090, autoconnect=True)
table = connection.table('crawled_articles')
news = []
for k, data in table.scan():
news.append(data[b'data:title'].decode('utf-8'))
connection.close()
return news
##########################################################################################
# Run Application with Data #
##########################################################################################
def write_mongo(result):
# Create a MongoDB client
print(result)
# client = pymongo.MongoClient('mongodb://mongo-container:27017')
client = pymongo.MongoClient('mongodb://mongo-connection:27017')
# client = pymongo.MongoClient('mongodb://mongo-0.mongo-service')
# Specify the database to be used
db = client.news
# Specify the collectionlection to be used
collection = db.newscollection
dao_object = {"cat":"all","titles":[]}
# Insert a single document
for i in range(len(result)):
dao_object["titles"].append(result.iloc[i,0])
collection.update_one({"cat":"all"},{"$set": dao_object},upsert=True)
# Close the connection
client.close()
# run whole application
write_mongo(application(data_from_datalake()))
# time sleep, that the pod gets rebuild after completion
time.sleep(500) | 37.007407 | 90 | 0.45036 | 423 | 4,996 | 5.264775 | 0.420804 | 0.044005 | 0.066008 | 0.04176 | 0.127077 | 0.110912 | 0.074091 | 0.074091 | 0.074091 | 0.074091 | 0 | 0.006508 | 0.169536 | 4,996 | 135 | 91 | 37.007407 | 0.530007 | 0.232186 | 0 | 0 | 0 | 0 | 0.08044 | 0.013548 | 0 | 0 | 0 | 0 | 0 | 1 | 0.071429 | false | 0 | 0.128571 | 0 | 0.257143 | 0.014286 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e5bc642c6a81db7a4fc50c2b2445c823d9de303f | 12,795 | py | Python | alterations.py | AlexysAlves/Simulacao_de_trafego | 8193b8a47d284c1b84f2903d286d222f3984bbf1 | [
"MIT"
] | null | null | null | alterations.py | AlexysAlves/Simulacao_de_trafego | 8193b8a47d284c1b84f2903d286d222f3984bbf1 | [
"MIT"
] | null | null | null | alterations.py | AlexysAlves/Simulacao_de_trafego | 8193b8a47d284c1b84f2903d286d222f3984bbf1 | [
"MIT"
] | null | null | null | import random
import time
import threading
import pygame
import sys
# Default values of signal timers
defaultGreen = {0: 10, 1: 10, 2: 10, 3: 10}
defaultRed = 150
defaultYellow = 5
signals = []
noOfSignals = 4
currentGreen = 0 # Indicates which signal is green currently
nextGreen = (currentGreen + 1) % noOfSignals # Indicates which signal will turn green next
currentYellow = 0 # Indicates whether yellow signal is on or off
speeds = {'car': 2.25, 'bus': 1.8, 'truck': 1.8, 'bike': 2.5} # average speeds of vehicles
# Coordinates of vehicles' start
x = {'right': [0, 0, 0], 'down': [755, 727, 697], 'left': [1400, 1400, 1400], 'up': [602, 627, 657]}
y = {'right': [348, 370, 398], 'down': [0, 0, 0], 'left': [498, 466, 436], 'up': [800, 800, 800]}
vehicles = {'right': {0: [], 1: [], 2: [], 'crossed': 0}, 'down': {0: [], 1: [], 2: [], 'crossed': 0},
'left': {0: [], 1: [], 2: [], 'crossed': 0}, 'up': {0: [], 1: [], 2: [], 'crossed': 0}}
vehicleTypes = {0: 'car', 1: 'bus', 2: 'truck', 3: 'bike'}
directionNumbers = {0: 'right', 1: 'down', 2: 'left', 3: 'up'}
# Coordinates of signal image, timer, and vehicle count
signalCoods = [(530, 230), (810, 230), (810, 570), (530, 570)]
signalTimerCoods = [(530, 210), (810, 210), (810, 550), (530, 550)]
# Coordinates of stop lines
stopLines = {'right': 590, 'down': 330, 'left': 800, 'up': 535}
defaultStop = {'right': 580, 'down': 320, 'left': 810, 'up': 545}
# stops = {'right': [580,580,580], 'down': [320,320,320], 'left': [810,810,810], 'up': [545,545,545]}
# Gap between vehicles
stoppingGap = 15 # stopping gap
movingGap = 15 # moving gap
pygame.init()
simulation = pygame.sprite.Group()
class TrafficSignal:
def __init__(self, red, yellow, green):
self.red = red
self.yellow = yellow
self.green = green
self.signalText = ""
class Vehicle(pygame.sprite.Sprite):
def __init__(self, lane, vehicleClass, direction_number, direction):
pygame.sprite.Sprite.__init__(self)
self.lane = lane
self.vehicleClass = vehicleClass
self.speed = speeds[vehicleClass]
self.direction_number = direction_number
self.direction = direction
self.x = x[direction][lane]
self.y = y[direction][lane]
self.crossed = 0
vehicles[direction][lane].append(self)
self.index = len(vehicles[direction][lane]) - 1
path = "images/" + direction + "/" + vehicleClass + ".png"
self.image = pygame.image.load(path)
if (len(vehicles[direction][lane]) > 1 and vehicles[direction][lane][
self.index - 1].crossed == 0): # if more than 1 vehicle in the lane of vehicle before it has crossed stop line
if (direction == 'right'):
self.stop = vehicles[direction][lane][self.index - 1].stop - vehicles[direction][lane][
self.index - 1].image.get_rect().width - stoppingGap # setting stop coordinate as: stop coordinate of next vehicle - width of next vehicle - gap
elif (direction == 'left'):
self.stop = vehicles[direction][lane][self.index - 1].stop + vehicles[direction][lane][
self.index - 1].image.get_rect().width + stoppingGap
elif (direction == 'down'):
self.stop = vehicles[direction][lane][self.index - 1].stop - vehicles[direction][lane][
self.index - 1].image.get_rect().height - stoppingGap
elif (direction == 'up'):
self.stop = vehicles[direction][lane][self.index - 1].stop + vehicles[direction][lane][
self.index - 1].image.get_rect().height + stoppingGap
else:
self.stop = defaultStop[direction]
# Set new starting and stopping coordinate
if (direction == 'right'):
temp = self.image.get_rect().width + stoppingGap
x[direction][lane] -= temp
elif (direction == 'left'):
temp = self.image.get_rect().width + stoppingGap
x[direction][lane] += temp
elif (direction == 'down'):
temp = self.image.get_rect().height + stoppingGap
y[direction][lane] -= temp
elif (direction == 'up'):
temp = self.image.get_rect().height + stoppingGap
y[direction][lane] += temp
simulation.add(self)
def render(self, screen):
screen.blit(self.image, (self.x, self.y))
def move(self):
if (self.direction == 'right'):
if (self.crossed == 0 and self.x + self.image.get_rect().width > stopLines[
self.direction]): # if the image has crossed stop line now
self.crossed = 1
if ((self.x + self.image.get_rect().width <= self.stop or self.crossed == 1 or (
currentGreen == 0 and currentYellow == 0)) and (
self.index == 0 or self.x + self.image.get_rect().width < (
vehicles[self.direction][self.lane][self.index - 1].x - movingGap))):
# (if the image has not reached its stop coordinate or has crossed stop line or has green signal) and (it is either the first vehicle in that lane or it is has enough gap to the next vehicle in that lane)
self.x += self.speed # move the vehicle
elif (self.direction == 'down'):
if (self.crossed == 0 and self.y + self.image.get_rect().height > stopLines[self.direction]):
self.crossed = 1
if ((self.y + self.image.get_rect().height <= self.stop or self.crossed == 1 or (
currentGreen == 1 and currentYellow == 0)) and (
self.index == 0 or self.y + self.image.get_rect().height < (
vehicles[self.direction][self.lane][self.index - 1].y - movingGap))):
self.y += self.speed
elif (self.direction == 'left'):
if (self.crossed == 0 and self.x < stopLines[self.direction]):
self.crossed = 1
if ((self.x >= self.stop or self.crossed == 1 or (currentGreen == 2 and currentYellow == 0)) and (
self.index == 0 or self.x > (
vehicles[self.direction][self.lane][self.index - 1].x + vehicles[self.direction][self.lane][
self.index - 1].image.get_rect().width + movingGap))):
self.x -= self.speed
elif (self.direction == 'up'):
if (self.crossed == 0 and self.y < stopLines[self.direction]):
self.crossed = 1
if ((self.y >= self.stop or self.crossed == 1 or (currentGreen == 3 and currentYellow == 0)) and (
self.index == 0 or self.y > (
vehicles[self.direction][self.lane][self.index - 1].y + vehicles[self.direction][self.lane][
self.index - 1].image.get_rect().height + movingGap))):
self.y -= self.speed
# Initialization of signals with default values
def initialize():
ts1 = TrafficSignal(0, defaultYellow, defaultGreen[0])
signals.append(ts1)
ts2 = TrafficSignal(ts1.red + ts1.yellow + ts1.green, defaultYellow, defaultGreen[1])
signals.append(ts2)
ts3 = TrafficSignal(defaultRed, defaultYellow, defaultGreen[2])
signals.append(ts3)
ts4 = TrafficSignal(defaultRed, defaultYellow, defaultGreen[3])
signals.append(ts4)
repeat()
def repeat():
global currentGreen, currentYellow, nextGreen
while (signals[currentGreen].green > 0): # while the timer of current green signal is not zero
updateValues()
time.sleep(1)
currentYellow = 1 # set yellow signal on
# reset stop coordinates of lanes and vehicles
for i in range(0, 3):
for vehicle in vehicles[directionNumbers[currentGreen]][i]:
vehicle.stop = defaultStop[directionNumbers[currentGreen]]
while (signals[currentGreen].yellow > 0): # while the timer of current yellow signal is not zero
updateValues()
time.sleep(1)
currentYellow = 0 # set yellow signal off
# reset all signal times of current signal to default times
signals[currentGreen].green = defaultGreen[currentGreen]
signals[currentGreen].yellow = defaultYellow
signals[currentGreen].red = defaultRed
currentGreen = nextGreen # set next signal as green signal
nextGreen = (currentGreen + 1) % noOfSignals # set next green signal
signals[nextGreen].red = signals[currentGreen].yellow + signals[
currentGreen].green # set the red time of next to next signal as (yellow time + green time) of next signal
repeat()
# Update values of the signal timers after every second
def updateValues():
for i in range(0, noOfSignals):
if (i == currentGreen):
if (currentYellow == 0):
signals[i].green -= 1
else:
signals[i].yellow -= 1
else:
signals[i].red -= 1
# Generating vehicles in the simulation
def generateVehicles():
daytime = 360
sleeptime = 0
while (True):
lane_number = 2 # original version: random.randint(1,2)
cartype = [60, 70, 80, 100]
dist = [50, 100]
temp1 = random.randint(0, 99)
temp2 = random.randint(0, 99)
direction_number = 0
if (temp1 < cartype[0]):
vehicle_type = 0
elif (temp1 < cartype[1]):
vehicle_type = 1
elif (temp1 < cartype[2]):
vehicle_type = 2
elif (temp1 < cartype[3]):
vehicle_type = 3
if (temp2 < dist[0]):
direction_number = 0
elif (temp2 < dist[1]):
direction_number = 3
if (daytime < 360):
sleeptime = 5
elif (daytime >= 360 and daytime < 480):
sleeptime = 2
elif (daytime >= 480 and daytime < 720):
sleeptime = 3
elif (daytime >= 720 and daytime < 840):
sleeptime = 2
elif (daytime >= 840 and daytime < 1080):
sleeptime = 3
elif (daytime >= 1080 and daytime < 1260):
sleeptime = 1
elif (daytime >= 1260):
sleeptime = 4
Vehicle(lane_number, vehicleTypes[vehicle_type], direction_number, directionNumbers[direction_number])
time.sleep(sleeptime)
daytime += sleeptime
def turnp(probability):
rnumber = random.uniform(0, 1)
if rnumber > probability:
return False
else:
return True
class Main:
thread1 = threading.Thread(name="initialization",target=initialize, args=()) # initialization
thread1.daemon = True
thread1.start()
# Colours
black = (0, 0, 0)
white = (255, 255, 255)
# Screensize
screenWidth = 1400
screenHeight = 800
screenSize = (screenWidth, screenHeight)
# Setting background image i.e. image of intersection
background = pygame.image.load('images/intersection.png')
screen = pygame.display.set_mode(screenSize)
pygame.display.set_caption("SIMULATION")
# Loading signal images and font
redSignal = pygame.image.load('images/signals/red.png')
yellowSignal = pygame.image.load('images/signals/yellow.png')
greenSignal = pygame.image.load('images/signals/green.png')
font = pygame.font.Font(None, 30)
thread2 = threading.Thread(name="generateVehicles",target=generateVehicles, args=()) # Generating vehicles
thread2.daemon = True
thread2.start()
while True:
for event in pygame.event.get():
if event.type == pygame.QUIT:
sys.exit()
screen.blit(background,(0,0)) # display background in simulation
for i in range(0,noOfSignals): # display signal and set timer according to current status: green, yello, or red
if(i==currentGreen):
if(currentYellow==1):
signals[i].signalText = signals[i].yellow
screen.blit(yellowSignal, signalCoods[i])
else:
signals[i].signalText = signals[i].green
screen.blit(greenSignal, signalCoods[i])
else:
if(signals[i].red<=10):
signals[i].signalText = signals[i].red
else:
signals[i].signalText = "---"
screen.blit(redSignal, signalCoods[i])
signalTexts = ["","","",""]
# display signal timer
for i in range(0,noOfSignals):
signalTexts[i] = font.render(str(signals[i].signalText), True, white, black)
screen.blit(signalTexts[i],signalTimerCoods[i])
# display the vehicles
for vehicle in simulation:
screen.blit(vehicle.image, [vehicle.x, vehicle.y])
vehicle.move()
pygame.display.update()
Main() | 41.407767 | 220 | 0.590074 | 1,522 | 12,795 | 4.931012 | 0.167543 | 0.023984 | 0.025583 | 0.027981 | 0.300333 | 0.243038 | 0.223584 | 0.192538 | 0.167089 | 0.132445 | 0 | 0.046994 | 0.279875 | 12,795 | 309 | 221 | 41.407767 | 0.767528 | 0.138335 | 0 | 0.195122 | 0 | 0 | 0.031136 | 0.008558 | 0 | 0 | 0 | 0 | 0 | 1 | 0.036585 | false | 0 | 0.020325 | 0 | 0.130081 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e5bc9bb9de777c853bd717ee97128cd3e2825f2c | 6,327 | py | Python | pylbm/mpi_topology.py | Mopolino8/pylbm | b457ccdf1e7a1009807bd1136a276886f81a9e7d | [
"BSD-3-Clause"
] | 106 | 2016-09-13T07:19:17.000Z | 2022-03-19T13:41:55.000Z | pylbm/mpi_topology.py | Mopolino8/pylbm | b457ccdf1e7a1009807bd1136a276886f81a9e7d | [
"BSD-3-Clause"
] | 53 | 2017-09-18T04:51:19.000Z | 2022-01-19T21:36:23.000Z | pylbm/mpi_topology.py | gouarin/pylbm | fd4419933e05b85be364232fddedfcb4f7275e1f | [
"BSD-3-Clause"
] | 33 | 2016-06-17T13:21:17.000Z | 2021-11-11T16:57:46.000Z | # Authors:
# Loic Gouarin <loic.gouarin@polytechnique.edu>
# Benjamin Graille <benjamin.graille@math.u-psud.fr>
#
# License: BSD 3 clause
"""
Module which implements a Cartesian MPI topology
"""
import numpy as np
import mpi4py.MPI as mpi
from .options import options
class MpiTopology:
"""
Interface construction using a MPI topology.
Parameters
----------
dim : int
number of spatial dimensions (1, 2, or 3)
comm : comm
the default MPI communicator
period : list
boolean list that specifies if a direction is periodic or not.
Its size is dim.
Attributes
----------
dim : int
number of spatial dimensions (1, 2, or 3)
comm : comm
the communicator of the topology
split : tuple
number of processes in each direction
neighbors : list
list of the neighbors where we have to send and to receive messages
sendType : list
list of subarrays that defines the part of data to be send
sendTag : list
list of tags for the send messages
recvType : list
list of subarrays that defines the part of data to update during a receive message
recvTag : list
list of tags for the receive messages
Methods
-------
set_options :
defines command line options.
get_coords :
return the coords of the process in the MPI topology.
set_subarray :
create subarray for the send and receive message
update :
update a numpy array according to the subarrays and the topology.
"""
def __init__(self, dim, period, comm=mpi.COMM_WORLD):
self.dim = dim
self.set_options()
self.comm = comm
# if npx, npy and npz are all set to the default value (1)
# then Compute_dims performs the splitting of the domain
if self.npx == self.npy == self.npz == 1:
size = comm.Get_size()
split = mpi.Compute_dims(size, self.dim)
else:
split = (self.npx, self.npy, self.npz)
self.split = np.asarray(split[:self.dim])
self.cartcomm = comm.Create_cart(self.split, period)
def get_region_indices_(self, n, axis=0):
"""
1D region indices owned by each sub domain.
Parameters
----------
n : int
number of total discrete points for a given axis
axis : int
axis used in the MPI topology
Returns
-------
list
list of regions owned by each processes for a given axis
"""
region_indices = [0]
nproc = self.cartcomm.Get_topo()[0][axis]
for i in range(nproc):
region_indices.append(region_indices[-1] + n//nproc + ((n % nproc) > i))
return region_indices
def get_region_indices(self, nx, ny=None, nz=None):
"""
Region indices owned by each sub domain.
Parameters
----------
nx : int
number of total discrete points in x direction
ny : int
number of total discrete points in y direction
default is None
nz : int
number of total discrete points in z direction
default is None
Returns
-------
list
list of regions owned by each processes
"""
region_indices = [self.get_region_indices_(nx, 0)]
if ny is not None:
region_indices.append(self.get_region_indices_(ny, 1))
if nz is not None:
region_indices.append(self.get_region_indices_(nz, 2))
return region_indices
def get_coords(self):
"""
return the coords of the process in the MPI topology
as a numpy array.
"""
rank = self.cartcomm.Get_rank()
return np.asarray(self.cartcomm.Get_coords(rank))
def get_region(self, nx, ny=None, nz=None):
"""
Region indices owned by the sub domain.
Parameters
----------
nx : int
number of total discrete points in x direction
ny : int
number of total discrete points in y direction
default is None
nz : int
number of total discrete points in z direction
default is None
Returns
-------
list
region owned by the process
"""
region_indices = self.get_region_indices(nx, ny, nz)
coords = self.get_coords()
region = []
for i in range(coords.size):
region.append([region_indices[i][coords[i]],
region_indices[i][coords[i] + 1]
])
return region
def set_options(self):
"""
defines command line options.
"""
self.npx = int(options().npx)
self.npy = int(options().npy)
self.npz = int(options().npz)
def get_directions(dim):
"""
Return an array with all the directions around.
Parameters
----------
dim : int
number of spatial dimensions (1, 2, or 3)
Returns
-------
ndarray
all the possible directions with a stencil of 1
Examples
--------
>>> get_directions(1)
array([[-1],
[ 0],
[ 1]])
>>> get_directions(2)
array([[-1, -1],
[-1, 0],
[-1, 1],
[ 0, -1],
[ 0, 0],
[ 0, 1],
[ 1, -1],
[ 1, 0],
[ 1, 1]], dtype=int32)
"""
common_direction = np.array([-1, 0, 1])
if dim == 1:
directions = common_direction[:, np.newaxis]
elif dim == 2:
common_direction = common_direction[np.newaxis, :]
directions = np.empty((9, 2), dtype=np.int32)
directions[:, 0] = np.repeat(common_direction, 3, axis=1).flatten()
directions[:, 1] = np.repeat(common_direction, 3, axis=0).flatten()
elif dim == 3:
common_direction = common_direction[np.newaxis, :]
directions = np.empty((27, 3), dtype=np.int32)
directions[:, 0] = np.repeat(common_direction, 9, axis=1).flatten()
directions[:, 1] = np.repeat(np.repeat(common_direction, 3, axis=0), 3).flatten()
directions[:, 2] = np.repeat(common_direction, 9, axis=0).flatten()
return directions
| 26.472803 | 89 | 0.5633 | 793 | 6,327 | 4.417402 | 0.211854 | 0.074222 | 0.031402 | 0.031973 | 0.475592 | 0.439052 | 0.39509 | 0.343134 | 0.329717 | 0.2495 | 0 | 0.018792 | 0.335546 | 6,327 | 238 | 90 | 26.584034 | 0.814462 | 0.460408 | 0 | 0.066667 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.116667 | false | 0 | 0.05 | 0 | 0.266667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e5bd1c8d864738a7eee90d8736eddcad096e5f3d | 10,060 | py | Python | exact.py | jesgadiaz/ckc | 717e7289fff530ea5be4d6db94dc2936e355ed8c | [
"Apache-2.0"
] | 1 | 2020-02-20T10:01:03.000Z | 2020-02-20T10:01:03.000Z | exact.py | jesgadiaz/ckc | 717e7289fff530ea5be4d6db94dc2936e355ed8c | [
"Apache-2.0"
] | null | null | null | exact.py | jesgadiaz/ckc | 717e7289fff530ea5be4d6db94dc2936e355ed8c | [
"Apache-2.0"
] | 1 | 2019-12-05T05:30:50.000Z | 2019-12-05T05:30:50.000Z | from gurobipy import *
import math
import numpy as np
import heapq
def heap_sort(items):
heapq.heapify(items)
items[:] = [heapq.heappop(items) for i in range(len(items))]
return items
def createGraph(input_file, instance_format):
global n, m , k, matrix, ordered_sizes
if instance_format == 'orlib':
f = open(input_file, "r")
matrix = []
for i in range(0,n):
list = []
for j in range(0,n):
list.append(float("inf"))
matrix.append(list)
m = sum(1 for line in open(input_file))
#with open(input_file, "r") as f:
for i in range(0, m):
string = f.readline()
string = string.split()
if string is not "EOF":
v1 = int(string[0]) - 1
v2 = int(string[1]) - 1
weight = int(string[2])
matrix[v1][v2] = weight
matrix[v2][v1] = weight
f.close()
for i in range(0, n):
matrix[i][i] = 0
for i in range(0, n):
#print(i)
for j in range(0, n):
for l in range(0, n):
if matrix[i][j] == float("inf") or matrix[i][l] == float("inf"):
cost = float("inf")
else:
cost = matrix[i][j] + matrix[i][l]
if cost < matrix[j][l]:
matrix[j][l] = cost
ordered_sizes = []
for i in range(0, n):
for j in range(i, n):
ordered_sizes.append(matrix[i][j])
ordered_sizes = heap_sort(ordered_sizes)
elif instance_format == 'tsplib':
f = open(input_file, "r")
m = n
matrix = []
for i in range(0,n):
list = []
for j in range(0,n):
list.append(float("inf"))
matrix.append(list)
positions = []
for i in range(0, m):
string = f.readline()
string = string.split()
temp_position = []
temp_position.append(int(string[0])-1)
temp_position.append(float(string[1]))
temp_position.append(float(string[2]))
positions.append(temp_position)
for i in range(0, n):
for j in range(0, n):
dist_temp = math.sqrt(((positions[i][1] - positions[j][1]) * (positions[i][1] - positions[j][1])) + ((positions[i][2] - positions[j][2]) * (positions[i][2] - positions[j][2])))
matrix[i][j] = dist_temp
matrix[j][i] = dist_temp
f.close()
for i in range(0, n):
matrix[i][i] = 0
ordered_sizes = []
for i in range(0, n):
for j in range(i, n):
ordered_sizes.append(matrix[i][j])
ordered_sizes = heap_sort(ordered_sizes)
def run(r):
global total_runtime, k, runtime, num_centers, m, cap, input_file
prunedMatrix = []
for i in range(0,n):
list = []
for j in range(0,n):
list.append(float(0))
prunedMatrix.append(list)
for i in range(0,n):
for j in range(0,n):
if matrix[i][j] <= r:
prunedMatrix[i][j] = 1
try:
global m, num_centers, runtime, cap
m = Model("mip1")
#******************************************************************************************************
m.setParam("MIPGap", 0.0);
#******************************************************************************************************
y = []
for i in range(n):
y.append(0)
for i in range(n):
y[i] = m.addVar(vtype=GRB.BINARY, name="y%s" % str(i+1))
m.setObjective(sum(y), GRB.MINIMIZE)
temp_list = np.array(prunedMatrix).T.tolist()
for i in range(n):
m.addConstr(sum(np.multiply(temp_list[i], y).tolist()) >= 1)
x = []
for i in range(n):
temp = []
for j in range(n):
temp.append(0)
x.append(temp)
for i in range(n):
for j in range(n):
x[i][j] = m.addVar(vtype=GRB.BINARY, name="x%s%s" % (str(i+1), str(j+1)))
temp_list_2 = np.array(x).T.tolist()
for i in range(n):
m.addConstr(sum(temp_list_2[i]) * y[i] <= L)
for i in range(n):
for j in range(n):
#m.addConstr(x[i][j] <= y[j] * prunedMatrix[i][j])
#******************************************************************************************************
m.addConstr(x[i][j] <= y[j] * prunedMatrix[i][j] * (1-y[i]))
#******************************************************************************************************
for i in range(n):
#m.addConstr(sum(x[i]) == 1)
#******************************************************************************************************
m.addConstr(sum(x[i]) == 1 * (1-y[i]))
#******************************************************************************************************
m.optimize()
runtime = m.Runtime
print("The run time is %f" % runtime)
print("Obj:", m.objVal)
#******************************************************************************************************
dom_set_size = 0
solution = []
assignment = []
center = 0
vertex_j = 1
vertex_i = 1
for v in m.getVars():
varName = v.varName
if varName[0] == 'y':
if v.x == 1.0:
dom_set_size = dom_set_size + 1
solution.append(varName[1:])
else:
if vertex_j <= n:
if v.x == 1.0:
assignment.append([vertex_i, vertex_j])
else:
vertex_i = vertex_i + 1
vertex_j = 1
vertex_j = vertex_j + 1
print("Cap. dom. set cardinality: " + str(dom_set_size))
solution = [int(i) for i in solution]
#print("solution: " + str(solution))
#print("assignment: " + str(assignment))
print('{"instance": "%s",' % input_file)
print('"centers": [')
counter = 0
for center in solution:
counter = counter + 1
nodes = []
for node in assignment:
if node[1] == center:
nodes.append(node[0])
if counter == len(solution):
print('{ "center": ' + str(center) + ', "nodes": ' + str(nodes) + '}')
else:
print('{ "center": ' + str(center) + ', "nodes": ' + str(nodes) + '},')
print(']}')
#print('%s %g' % (v.varName, v.x))
#******************************************************************************************************
# {"instance": "/home/ckc/Escritorio/pr124.tsp",
# "outliers": [83,40,115,114],
# "centers": [ { "center": 59, "nodes": [28,32,33,34,35,54,57,58,59,60,61,64,65]},
# { "center": 102, "nodes": [101,102,103,104,105,106,107,108,109,110,111,112,113]},
# { "center": 8, "nodes": [8,9,10,11,12,13,14,15,16,46,47,48,49]},
# { "center": 79, "nodes": [77,78,79,91,92,93,94,95,96,97,98,99,123]},
# { "center": 6, "nodes": [0,1,2,3,4,5,6,7,26,27,29,30,31]},
# { "center": 36, "nodes": [19,20,21,22,23,24,25,36,37,38,39,55,56]},
# { "center": 16, "nodes": [17,18,40,41,42,43,44,45,50,51,52,53]},
# { "center": 96, "nodes": [72,73,74,75,76,80,116,117,118,119,120,121,122]},
# { "center": 89, "nodes": [84,85,86,87,88,89,90,100]},
# { "center": 64, "nodes": [62,63,66,67,68,69,70,71,81,82,83,114,115]}
# ]}
num_centers = dom_set_size
# num_centers = m.objVal
except GurobiError:
print("Error reported")
def binarySearch():
global total_runtime, k, runtime, num_centers, input_file
total_runtime = 0
not_done = True
upper = len(ordered_sizes) - 1
lower = 0
best_solution_size = float("inf")
while not_done:
#mid = math.ceil(lower + ((upper - lower)/2))
mid = math.ceil((upper + lower) /2)
mid_value = ordered_sizes[int(mid)]
if mid == upper:
not_done = False
run(mid_value)
total_runtime = total_runtime + runtime
else:
run(mid_value)
total_runtime = total_runtime + runtime
if num_centers <= k:
upper = mid
print("UPPER = MID")
if mid_value <= best_solution_size:
best_solution_size = mid_value
else:
lower = mid
print("LOWER = MID")
print("best solution size: " + str(best_solution_size))
print("total runtime: " + str(total_runtime))
if __name__ == "__main__":
global total_runtime, k, runtime, num_centers, L, n
if len(sys.argv) != 6:
print ("Wrong number of arguments")
print ("exact input_file_path n k L instance_format")
sys.exit()
input_file = sys.argv[1]
n = int(sys.argv[2])
k = int(sys.argv[3])
L = int(sys.argv[4])
instance_format = sys.argv[5]
createGraph(input_file, instance_format)
binarySearch()
| 37.677903 | 193 | 0.41004 | 1,152 | 10,060 | 3.488715 | 0.235243 | 0.057477 | 0.032844 | 0.057477 | 0.36427 | 0.329933 | 0.276437 | 0.233143 | 0.185369 | 0.185369 | 0 | 0.061296 | 0.375646 | 10,060 | 266 | 194 | 37.819549 | 0.57857 | 0.199006 | 0 | 0.348259 | 0 | 0 | 0.042515 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.019901 | false | 0 | 0.019901 | 0 | 0.044776 | 0.074627 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e5c851fb9a85bd589c9d3056d5470e792ff6484e | 2,551 | py | Python | war3structs/objects.py | sides/war3structs | 171c91240346e610e22cf10bab0c6d526996f855 | [
"MIT"
] | 10 | 2019-12-07T12:10:13.000Z | 2022-02-24T12:45:32.000Z | war3structs/objects.py | warlockbrawl/war3structs | 171c91240346e610e22cf10bab0c6d526996f855 | [
"MIT"
] | null | null | null | war3structs/objects.py | warlockbrawl/war3structs | 171c91240346e610e22cf10bab0c6d526996f855 | [
"MIT"
] | 3 | 2020-02-28T12:43:26.000Z | 2020-06-08T23:31:29.000Z | from construct import *
from .common import *
"""
Formats: w3u, w3t, w3b, w3h, w3d, w3a, w3q
Version: 1
The objects file contains data that the object editor would typically
manipulate. If dealing with abilities, doodads or upgrades, the
ObjectsWithVariationsFile is used instead of the ObjectsFile.
Optionally, the ObjectsBestFitFile can be used as well which tries to
parse the file with both formats--one should always fail when used
with the other, so it selects whichever didn't fail. Performance
should be really bad on this.
"""
class ObjectModificationTerminatorValidator(Validator):
def _validate(self, obj, ctx, path):
return obj in [b"\x00\x00\x00\x00", ctx._.new_object_id, ctx._.original_object_id]
ObjectModification = Struct(
"modification_id" / ByteId,
"value_type" / Enum(Integer, INT=0, REAL=1, UNREAL=2, STRING=3),
"value" / Switch(this.value_type, {
"INT" : Integer,
"REAL" : Float,
"UNREAL" : Float,
"STRING" : String
}),
"parent_object_id" / ObjectModificationTerminatorValidator(ByteId)
)
ObjectDefinition = Struct(
"original_object_id" / ByteId,
"new_object_id" / ByteId,
"modifications_count" / Integer,
"modifications" / Array(this.modifications_count, ObjectModification)
)
ObjectTable = Struct(
"objects_count" / Integer,
"objects" / Array(this.objects_count, ObjectDefinition)
)
ObjectsFile = Struct(
"version" / Integer,
"original_objects_table" / ObjectTable,
"custom_objects_table" / ObjectTable
)
ObjectModificationWithVariation = Struct(
"modification_id" / ByteId,
"value_type" / Enum(Integer, INT=0, REAL=1, UNREAL=2, STRING=3),
"variation" / Integer,
"ability_data_column" / Enum(Integer, A=0, B=1, C=2, D=3, F=4, G=5, H=6),
"value" / Switch(this.value_type, {
"INT" : Integer,
"REAL" : Float,
"UNREAL" : Float,
"STRING" : String
}),
"parent_object_id" / ObjectModificationTerminatorValidator(ByteId)
)
ObjectDefinitionWithVariations = Struct(
"original_object_id" / ByteId,
"new_object_id" / ByteId,
"modifications_count" / Integer,
"modifications" / Array(this.modifications_count, ObjectModificationWithVariation)
)
ObjectTableWithVariations = Struct(
"objects_count" / Integer,
"objects" / Array(this.objects_count, ObjectDefinitionWithVariations)
)
ObjectsWithVariationsFile = Struct(
"version" / Integer,
"original_objects_table" / ObjectTableWithVariations,
"custom_objects_table" / ObjectTableWithVariations
)
ObjectsBestFitFile = Select(ObjectsWithVariationsFile, ObjectsFile)
| 30.369048 | 86 | 0.73579 | 287 | 2,551 | 6.390244 | 0.428571 | 0.034896 | 0.030534 | 0.028353 | 0.430752 | 0.430752 | 0.387132 | 0.387132 | 0.387132 | 0.329335 | 0 | 0.014365 | 0.154057 | 2,551 | 83 | 87 | 30.73494 | 0.835496 | 0 | 0 | 0.466667 | 0 | 0 | 0.214391 | 0.021537 | 0 | 0 | 0 | 0 | 0 | 1 | 0.016667 | false | 0 | 0.033333 | 0.016667 | 0.083333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e5c8fc169ab19e9767386e1463980ba6e2c72681 | 3,078 | py | Python | lexicon.py | adamlek/swedish-lexical-blends | 5189bcc1680fda5ac32637dd63b895c091b56997 | [
"MIT"
] | null | null | null | lexicon.py | adamlek/swedish-lexical-blends | 5189bcc1680fda5ac32637dd63b895c091b56997 | [
"MIT"
] | null | null | null | lexicon.py | adamlek/swedish-lexical-blends | 5189bcc1680fda5ac32637dd63b895c091b56997 | [
"MIT"
] | null | null | null | import pickle
from collections import defaultdict
from helper_functions import format_lemma, get_blends_csv
from os import listdir
import networkx as nx
def saldo_obj(filename):
saldo = defaultdict(int)
with open(filename) as f:
for line in f:
if line.startswith('#'):
continue
line = line.split('\t')
pos = line[-2].upper()
lemma_id = line[0]
lemma = line[0].split('..')[0].lower()
mother = line[1]
father = line[2]
saldo[lemma] = (pos, father, mother, lemma_id)
return saldo
# def construct_network(saldo):
# G = nx.DiGraph()
# for k, (_, m, f, li) in saldo.items():
# if m not in G.nodes:
# G.add_node(m)
# if f not in G.nodes:
# G.add_node(m)
# if li not in G.nodes:
# G.add_node(li)
# if k not in G.nodes:
# G.add('_' + k)
# if G.has_edge(li, k):
# G[k][li]['weight'] += 1
# else:
# G.add_edge(k, li, weight=1)
# if G.jas
def get_candidates():
lexicon = 'saldo'
corpus = 'news'
candidate_folder = f'/home/adam/Documents/lexical_blends_project/{lexicon}_blends_candidates_noverlap_1/'
c_set = set()
for i, filename in enumerate(listdir(candidate_folder)):
blend = filename.split('_')[0]
print('### reading blend:', i, blend)
with open(candidate_folder+filename) as f:
for ln in f:
cw1, cw2 = ln.rstrip().split(',')
c_set.add(cw1)
c_set.add(cw2)
return c_set
def nst_obj(filename):
nst = defaultdict(int)
with open(filename, encoding='iso-8859-1') as f:
for i, line in enumerate(f):
if line.startswith('!') or line.startswith('-'):
continue
line = line.split(';')
seg = line[0]
pos = line[1]
sampa = line[11]
while '|' in pos:
pos = pos.split('|')[0]
nst[seg.lower()] = (pos, sampa)
return nst
if __name__ == '__main__':
#with open('/home/adam/Documents/lexical_blends_project/data/nst_lex.pickle', '+wb') as f:
# nst = nst_obj('/home/adam/data/NST_svensk_leksikon/swe030224NST.pron/swe030224NST.pron')
# pickle.dump(nst, f)
#with open('/home/adam/Documents/lexical_blends_project/data/saldo_lex.pickle', '+wb') as f:
# saldo = saldo_obj('/home/adam/data/saldo_2.3/saldo20v03.txt')
# pickle.dump(saldo, f)
with open('/home/adam/Documents/lexical_blends_project/data/nst_lex.pickle', 'rb') as f:
nst = pickle.load(f)
with open('/home/adam/Documents/lexical_blends_project/data/saldo_lex.pickle', 'rb') as f:
saldo = pickle.load(f)
c_set = get_candidates()
print(list(saldo.keys())[:100])
print(list(nst.keys())[:100])
n_set = set(nst.keys())
s_set = set(saldo.keys())
true = len(c_set.intersection(n_set))/len(c_set)
print(true)
| 29.596154 | 109 | 0.556855 | 414 | 3,078 | 3.987923 | 0.277778 | 0.033919 | 0.051484 | 0.072683 | 0.312538 | 0.264082 | 0.190188 | 0.17868 | 0.17868 | 0.152029 | 0 | 0.022263 | 0.299545 | 3,078 | 103 | 110 | 29.883495 | 0.743506 | 0.27258 | 0 | 0.033898 | 0 | 0 | 0.12291 | 0.095346 | 0 | 0 | 0 | 0 | 0 | 1 | 0.050847 | false | 0 | 0.084746 | 0 | 0.186441 | 0.067797 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e5cc7b20cfa963b9093b9d8a0f7b606c9c72c66a | 1,737 | py | Python | app.py | macloo/flask-form-exercise | e487c84cfe6fb995aa1615c2c6e3c6f1cef5a537 | [
"MIT"
] | null | null | null | app.py | macloo/flask-form-exercise | e487c84cfe6fb995aa1615c2c6e3c6f1cef5a537 | [
"MIT"
] | null | null | null | app.py | macloo/flask-form-exercise | e487c84cfe6fb995aa1615c2c6e3c6f1cef5a537 | [
"MIT"
] | null | null | null | from flask import Flask, render_template, redirect, url_for
from flask_bootstrap import Bootstrap
from flask_wtf import FlaskForm
from wtforms import StringField, SubmitField
from wtforms.validators import Required
import csv
app = Flask(__name__)
app.config['DEBUG'] = True
# Flask-WTF requires an enryption key - the string can be anything
app.config['SECRET_KEY'] = '8BYkEfBA6O6donzWlSihBXox7C0sKR6b'
# Flask-Bootstrap requires this line
Bootstrap(app)
# ---------------------------------------------------------------------------
# with Flask-WTF, each web form is represented by a class
# "RestForm" can be changed; "(FlaskForm)" cannot
# see the route for "/" to see how this is used
class RestForm(FlaskForm):
restaurant = StringField('Restaurant name', validators=[Required()])
submit = SubmitField('Submit')
# Exercise:
# add: address, city, state, zip, phone, url, cuisine, price_range
# make price_range a select element with choice of $ to $$$$
# make all fields required except submit
# ---------------------------------------------------------------------------
# all Flask routes below
@app.route('/', methods=['GET', 'POST'])
def index():
form = RestForm()
# Exercise:
# Make the form write a new row into restaurants.csv
# with if form.validate_on_submit()
return render_template('index.html', form=form)
@app.route('/restaurants')
def restaurants():
csvfile = open('restaurants.csv', newline='')
myreader = csv.reader(csvfile, delimiter=',')
list_of_rows = []
for row in myreader:
list_of_rows.append(row)
csvfile.close()
return render_template('rest.html',rests=list_of_rows)
# keep this as is
if __name__ == '__main__':
app.run(debug=True)
| 31.017857 | 77 | 0.65688 | 213 | 1,737 | 5.220657 | 0.497653 | 0.024281 | 0.026978 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.004087 | 0.154865 | 1,737 | 55 | 78 | 31.581818 | 0.753406 | 0.408751 | 0 | 0 | 0 | 0 | 0.129703 | 0.031683 | 0 | 0 | 0 | 0 | 0 | 1 | 0.071429 | false | 0 | 0.214286 | 0 | 0.464286 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e5cd738342d6d3e6c1d28d3273ebe5ae8755466f | 8,015 | py | Python | core_scripts/data_io/io_tools.py | tyanz/project-NN-Pytorch-scripts | 7e90df0f90b04088613d6efb667e147a366273fb | [
"BSD-3-Clause"
] | null | null | null | core_scripts/data_io/io_tools.py | tyanz/project-NN-Pytorch-scripts | 7e90df0f90b04088613d6efb667e147a366273fb | [
"BSD-3-Clause"
] | null | null | null | core_scripts/data_io/io_tools.py | tyanz/project-NN-Pytorch-scripts | 7e90df0f90b04088613d6efb667e147a366273fb | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
"""
io_tools
Functions to load data
"""
from __future__ import absolute_import
import os
import sys
import json
import numpy as np
__author__ = "Xin Wang"
__email__ = "wangxin@nii.ac.jp"
__copyright__ = "Copyright 2020, Xin Wang"
def f_read_raw_mat(filename, col, data_format='f4', end='l'):
"""read_raw_mat(filename,col,data_format='float',end='l')
Read the binary data from filename
Return data, which is a (N, col) array
filename: the name of the file, take care about '\\'
col: the number of column of the data
format: please use the Python protocal to write format
default: 'f4', float32
see for more format:
end: little endian 'l' or big endian 'b'?
default: 'l'
dependency: numpy
Note: to read the raw binary data in python, the question
is how to interprete the binary data. We can use
struct.unpack('f',read_data) to interprete the data
as float, however, it is slow.
"""
f = open(filename,'rb')
if end=='l':
data_format = '<'+data_format
elif end=='b':
data_format = '>'+data_format
else:
data_format = '='+data_format
datatype = np.dtype((data_format,(col,)))
data = np.fromfile(f,dtype=datatype)
f.close()
if data.ndim == 2 and data.shape[1] == 1:
return data[:,0]
else:
return data
def f_read_raw_mat_length(filename, data_format='f4'):
"""f_read_raw_mat_length(filename,data_format='float',end='l')
Read length of data
"""
f = open(filename,'rb')
tmp = f.seek(0, 2)
bytes_num = f.tell()
f.close()
if data_format == 'f4':
return int(bytes_num / 4)
else:
return bytes_num
def f_read_htk(filename, data_format='f4', end='l'):
"""read_htk(filename, data_format='f4', end='l')
Read HTK File and return the data as numpy.array
filename: input file name
data_format: the data_format of the data
default: 'f4' float32
end: little endian 'l' or big endian 'b'?
default: 'l'
"""
if end=='l':
data_format = '<'+data_format
data_formatInt4 = '<i4'
data_formatInt2 = '<i2'
elif end=='b':
data_format = '>'+data_format
data_formatInt4 = '>i4'
data_formatInt2 = '>i2'
else:
data_format = '='+data_format
data_formatInt4 = '=i4'
data_formatInt2 = '=i2'
head_type = np.dtype([('nSample',data_formatInt4), ('Period',data_formatInt4),
('SampleSize',data_formatInt2), ('kind',data_formatInt2)])
f = open(filename,'rb')
head_info = np.fromfile(f,dtype=head_type,count=1)
"""if end=='l':
data_format = '<'+data_format
elif end=='b':
data_format = '>'+data_format
else:
data_format = '='+data_format
"""
if 'f' in data_format:
sample_size = int(head_info['SampleSize'][0]/4)
else:
print("Error in read_htk: input should be float32")
return False
datatype = np.dtype((data_format,(sample_size,)))
data = np.fromfile(f,dtype=datatype)
f.close()
return data
def f_read_htk_length(filename, data_format='f4', end='l'):
"""read_htk(filename, data_format='f4', end='l')
Read HTK File and return the data as numpy.array
filename: input file name
data_format: the data_format of the data
default: 'f4' float32
end: little endian 'l' or big endian 'b'?
default: 'l'
"""
if end=='l':
data_format = '<'+data_format
data_formatInt4 = '<i4'
data_formatInt2 = '<i2'
elif end=='b':
data_format = '>'+data_format
data_formatInt4 = '>i4'
data_formatInt2 = '>i2'
else:
data_format = '='+data_format
data_formatInt4 = '=i4'
data_formatInt2 = '=i2'
head_type = np.dtype([('nSample',data_formatInt4), ('Period',data_formatInt4),
('SampleSize',data_formatInt2), ('kind',data_formatInt2)])
f = open(filename,'rb')
head_info = np.fromfile(f,dtype=head_type,count=1)
f.close()
sample_size = int(head_info['SampleSize'][0]/4)
return sample_size
def f_write_raw_mat(data,filename,data_format='f4',end='l'):
"""write_raw_mat(data,filename,data_format='',end='l')
Write the binary data from filename.
Return True
data: np.array
filename: the name of the file, take care about '\\'
data_format: please use the Python protocal to write data_format
default: 'f4', float32
end: little endian 'l' or big endian 'b'?
default: '', only when data_format is specified, end
is effective
dependency: numpy
Note: we can also write two for loop to write the data using
f.write(data[a][b]), but it is too slow
"""
if not isinstance(data, np.ndarray):
print("Error write_raw_mat: input shoul be np.array")
return False
f = open(filename,'wb')
if len(data_format)>0:
if end=='l':
data_format = '<'+data_format
elif end=='b':
data_format = '>'+data_format
else:
data_format = '='+data_format
datatype = np.dtype(data_format)
temp_data = data.astype(datatype)
else:
temp_data = data
temp_data.tofile(f,'')
f.close()
return True
def f_write_htk(data,targetfile,sampPeriod=50000,sampKind=9,data_format='f4',end='l'):
"""
write_htk(data,targetfile,
sampPeriod=50000,sampKind=9,data_format='f4',end='l')
"""
if data.ndim==1:
nSamples, vDim = data.shape[0], 1
else:
nSamples, vDim = data.shape
if data_format=='f4':
sampSize = vDim * 4;
else:
sampSize = vDim * 8;
f = open(targetfile,'wb')
if len(data_format)>0:
if end=='l':
data_format1 = '<i4'
data_format2 = '<i2'
elif end=='b':
data_format1 = '>i4'
data_format2 = '>i2'
else:
data_format1 = '=i4'
data_format2 = '=i2'
temp_data = np.array([nSamples, sampPeriod],
dtype=np.dtype(data_format))
temp_data.tofile(f, '')
temp_data = np.array([sampSize, sampKind], dtype=np.dtype(data_format2))
temp_data.tofile(f, '')
if len(data_format)>0:
if end=='l':
data_format = '<'+data_format
elif end=='b':
data_format = '>'+data_format
else:
data_format = '='+data_format
datatype = np.dtype(data_format)
temp_data = data.astype(datatype)
else:
temp_data = data
temp_data.tofile(f, '')
f.close()
return True
def read_dic(file_path):
""" dic = read_dic(file_path)
Read a json file from file_path and return a dictionary
Args:
file_path: string, path to the file
Returns:
dic: a dictionary
"""
try:
data = json.load( open(file_path) )
except IOError:
print("Cannot find %s" % (file_path))
sys.exit(1)
except json.decoder.JSONDecodeError:
print("Cannot parse %s" % (file_path))
sys.exit(1)
return data
def write_dic(dic, file_path):
""" write_dic(dic, file_path)
Write a dictionary to file
Args:
dic: dictionary to be dumped
file_path: file to store the dictionary
"""
try:
json.dump(dic, open(file_path, 'w'))
except IOError:
print("Cannot write to %s " % (file_path))
sys.exit(1)
def file_exist(file_path):
""" file_exit(file_path)
Whether file exists
"""
return os.path.isfile(file_path) or os.path.islink(file_path)
| 29.251825 | 86 | 0.572302 | 1,044 | 8,015 | 4.211686 | 0.177203 | 0.152377 | 0.076416 | 0.081874 | 0.622697 | 0.605413 | 0.538776 | 0.509666 | 0.46327 | 0.443712 | 0 | 0.019724 | 0.30418 | 8,015 | 273 | 87 | 29.358974 | 0.768693 | 0.283843 | 0 | 0.655844 | 0 | 0 | 0.073123 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.058442 | false | 0 | 0.032468 | 0 | 0.168831 | 0.032468 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e5cf36edc22c7f7e4d4a27e2cc587f5fe4069278 | 3,000 | py | Python | envs/flatland/utils/env_generators.py | BkyuChoi/HpicFlatland | bdbba7ce451eb72dc760993b96cec4772a08983c | [
"MIT"
] | 4 | 2021-01-15T10:49:33.000Z | 2021-12-31T08:11:35.000Z | envs/flatland/utils/env_generators.py | BkyuChoi/HpicFlatland | bdbba7ce451eb72dc760993b96cec4772a08983c | [
"MIT"
] | null | null | null | envs/flatland/utils/env_generators.py | BkyuChoi/HpicFlatland | bdbba7ce451eb72dc760993b96cec4772a08983c | [
"MIT"
] | null | null | null | import logging
import random
from typing import NamedTuple
from flatland.envs.malfunction_generators import malfunction_from_params
# from flatland.envs.rail_env import RailEnv
from envs.flatland.utils.gym_env_wrappers import FlatlandRenderWrapper as RailEnv
from flatland.envs.rail_generators import sparse_rail_generator
from flatland.envs.schedule_generators import sparse_schedule_generator
MalfunctionParameters = NamedTuple('MalfunctionParameters', [('malfunction_rate', float), ('min_duration', int), ('max_duration', int)])
def random_sparse_env_small(random_seed, max_width, max_height, observation_builder):
random.seed(random_seed)
size = random.randint(0, 5)
width = 20 + size * 5
height = 20 + size * 5
nr_cities = 2 + size // 2 + random.randint(0, 2)
nr_trains = min(nr_cities * 5, 5 + random.randint(0, 5)) # , 10 + random.randint(0, 10))
max_rails_between_cities = 2
max_rails_in_cities = 3 + random.randint(0, size)
malfunction_rate = 30 + random.randint(0, 100)
malfunction_min_duration = 3 + random.randint(0, 7)
malfunction_max_duration = 20 + random.randint(0, 80)
rail_generator = sparse_rail_generator(max_num_cities=nr_cities, seed=random_seed, grid_mode=False,
max_rails_between_cities=max_rails_between_cities,
max_rails_in_city=max_rails_in_cities)
# new version:
# stochastic_data = MalfunctionParameters(malfunction_rate, malfunction_min_duration, malfunction_max_duration)
stochastic_data = {'malfunction_rate': malfunction_rate, 'min_duration': malfunction_min_duration,
'max_duration': malfunction_max_duration}
schedule_generator = sparse_schedule_generator({1.: 0.25, 1. / 2.: 0.25, 1. / 3.: 0.25, 1. / 4.: 0.25})
while width <= max_width and height <= max_height:
try:
env = RailEnv(width=width, height=height, rail_generator=rail_generator,
schedule_generator=schedule_generator, number_of_agents=nr_trains,
malfunction_generator_and_process_data=malfunction_from_params(stochastic_data),
obs_builder_object=observation_builder, remove_agents_at_target=False)
print("[{}] {}x{} {} cities {} trains, max {} rails between cities, max {} rails in cities. Malfunction rate {}, {} to {} steps.".format(
random_seed, width, height, nr_cities, nr_trains, max_rails_between_cities,
max_rails_in_cities, malfunction_rate, malfunction_min_duration, malfunction_max_duration
))
return env
except ValueError as e:
logging.error(f"Error: {e}")
width += 5
height += 5
logging.info("Try again with larger env: (w,h):", width, height)
logging.error(f"Unable to generate env with seed={random_seed}, max_width={max_height}, max_height={max_height}")
return None
| 50.847458 | 149 | 0.687 | 374 | 3,000 | 5.200535 | 0.262032 | 0.041131 | 0.057584 | 0.053985 | 0.167095 | 0.167095 | 0.128535 | 0.112596 | 0.05964 | 0.05964 | 0 | 0.025214 | 0.22 | 3,000 | 58 | 150 | 51.724138 | 0.805983 | 0.065 | 0 | 0 | 0 | 0.022727 | 0.128571 | 0.023929 | 0 | 0 | 0 | 0 | 0 | 1 | 0.022727 | false | 0 | 0.159091 | 0 | 0.227273 | 0.022727 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e5cfab6e94dd6a1313e2d99802b5e11a6af2b20d | 616 | py | Python | api/permissions.py | andela-jmuli/wishlist | 39650f7545606aedfe0b32f39bcc883d9b38985c | [
"MIT"
] | 2 | 2017-10-07T09:26:46.000Z | 2019-01-20T01:34:13.000Z | api/permissions.py | mrmuli/wishlist | 39650f7545606aedfe0b32f39bcc883d9b38985c | [
"MIT"
] | null | null | null | api/permissions.py | mrmuli/wishlist | 39650f7545606aedfe0b32f39bcc883d9b38985c | [
"MIT"
] | null | null | null | from rest_framework import permissions
from models import Bucketlist
class IsOwnerOrReadOnly(permissions.BasePermission):
"""
Object-level permission to only allow owners of an object to edit it.
"""
def has_object_permission(self, request, view, obj):
"""
Read permissions are allowed to any request,
so we'll always allow GET, HEAD or OPTIONS requests.
"""
if request.method in permissions.SAFE_METHODS:
return True
if isinstance(obj, Bucketlist):
return obj.created_by == request.user
else:
return obj
| 29.333333 | 73 | 0.652597 | 73 | 616 | 5.438356 | 0.712329 | 0.04534 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.282468 | 616 | 20 | 74 | 30.8 | 0.89819 | 0.271104 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.1 | false | 0 | 0.2 | 0 | 0.7 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e5d01d592715818cd44140b5a297b191c7be3b94 | 1,438 | py | Python | mvp/presets.py | danbradham/mvp | 7471af9964ff789897792b23d59c597055d566f5 | [
"MIT"
] | 19 | 2016-02-26T18:43:31.000Z | 2021-04-10T18:29:29.000Z | mvp/presets.py | danbradham/mvp | 7471af9964ff789897792b23d59c597055d566f5 | [
"MIT"
] | null | null | null | mvp/presets.py | danbradham/mvp | 7471af9964ff789897792b23d59c597055d566f5 | [
"MIT"
] | 8 | 2015-12-14T15:10:09.000Z | 2021-06-12T04:20:36.000Z | # -*- coding: utf-8 -*-
import json
import glob
import os
from . import config
def get_presets():
'''Get a generator yielding preset name, data pairs'''
for path in config.PRESETS_PATH:
for f in glob.glob(os.path.join(path, '*.json')):
base = os.path.basename(f)
name = os.path.splitext(base)[0]
with open(f, 'r') as f:
data = json.loads(f.read())
yield name, data
def get_preset(name):
'''Get a preset by name'''
for n, s in get_presets():
if name == n:
return s
def find_preset(name):
'''Find the path to a given preset...'''
for path in config.PRESETS_PATH:
prospect = os.path.join(path, name + '.json')
if os.path.isfile(prospect):
return prospect
raise ValueError('Could not find a preset named %s', name)
def new_preset(name, data):
'''Create a new preset from viewport state data
:param name: Name of the preset
:param data: Viewport state dict
usage::
import mvp
active = mvp.Viewport.active()
mvp.new_preset('NewPreset1', active.get_state())
'''
preset_path = os.path.join(config.PRESETS_PATH[0], name + '.json')
with open(preset_path, 'w') as f:
f.write(json.dumps(data))
def del_preset(name):
preset_path = find_preset(name)
if os.path.exists(preset_path):
os.remove(preset_path)
| 21.787879 | 70 | 0.596662 | 205 | 1,438 | 4.102439 | 0.331707 | 0.049941 | 0.060642 | 0.035672 | 0.061831 | 0.061831 | 0 | 0 | 0 | 0 | 0 | 0.00385 | 0.277469 | 1,438 | 65 | 71 | 22.123077 | 0.805582 | 0.244089 | 0 | 0.066667 | 0 | 0 | 0.048216 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.166667 | false | 0 | 0.133333 | 0 | 0.366667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e5d3f1aaa2a4db3d649462b9a4b2872c64304957 | 911 | py | Python | wsdeval/formats/wordnet.py | frankier/finn-wsd-eval | 11671a7d87e16a9b45f5bea8a5db3d2f25f31d40 | [
"Apache-2.0"
] | null | null | null | wsdeval/formats/wordnet.py | frankier/finn-wsd-eval | 11671a7d87e16a9b45f5bea8a5db3d2f25f31d40 | [
"Apache-2.0"
] | 2 | 2018-09-22T08:38:23.000Z | 2019-03-22T13:11:34.000Z | wsdeval/formats/wordnet.py | frankier/finn-wsd-eval | 11671a7d87e16a9b45f5bea8a5db3d2f25f31d40 | [
"Apache-2.0"
] | null | null | null | import sys
from stiff.data.constants import UNI_POS_WN_MAP
from finntk.wordnet.reader import get_en_fi_maps
from finntk.wordnet.utils import pre_id_to_post, ss2pre
def lemmas_from_instance(wn, instance):
word = instance.attrib["lemma"]
pos = UNI_POS_WN_MAP[instance.attrib["pos"]]
lemmas = wn.lemmas(word, pos=pos)
return word, pos, lemmas
def write_lemma(keyout, inst_id, lemma):
fi2en, en2fi = get_en_fi_maps()
if lemma is None:
guess = "U"
else:
chosen_synset_fi_id = ss2pre(lemma.synset())
if chosen_synset_fi_id not in fi2en:
sys.stderr.write(
"No fi2en mapping found for {} ({})\n".format(
chosen_synset_fi_id, lemma
)
)
guess = "U"
else:
guess = pre_id_to_post(fi2en[chosen_synset_fi_id])
keyout.write("{} {}\n".format(inst_id, guess))
| 30.366667 | 62 | 0.626784 | 128 | 911 | 4.1875 | 0.40625 | 0.089552 | 0.104478 | 0.119403 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.010526 | 0.270033 | 911 | 29 | 63 | 31.413793 | 0.795489 | 0 | 0 | 0.16 | 0 | 0 | 0.058178 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.08 | false | 0 | 0.16 | 0 | 0.28 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e5d7325ca98374479c7e719100f4da872f071b99 | 5,653 | py | Python | pdfs/Commands/WWW.py | tmearnest/sbd | 92e59ed6286ff7b6a036688db086e47951f07cdd | [
"MIT"
] | null | null | null | pdfs/Commands/WWW.py | tmearnest/sbd | 92e59ed6286ff7b6a036688db086e47951f07cdd | [
"MIT"
] | null | null | null | pdfs/Commands/WWW.py | tmearnest/sbd | 92e59ed6286ff7b6a036688db086e47951f07cdd | [
"MIT"
] | null | null | null | from .Command import Command
class WWW(Command):
command = 'www'
help = "Spin up http server"
def set_args(self, subparser):
subparser.add_argument("--port","-P", help="Port number to listen on", type=int, default=5000)
def run(self, args):
import logging
import mimetypes
import os
import flask
import jinja2
from ..Database import Database
from ..HTMLBib import bibContext, authorNorm
from ..Exceptions import UserException
from ..Bibtex import unicodeNorm
if not args.debug:
logging.getLogger('werkzeug').setLevel(logging.ERROR)
Database(dataDir=args.data_dir)
flaskApp = flask.Flask("pdfs")
flaskApp.jinja_env.trim_blocks = True
flaskApp.jinja_env.lstrip_blocks = True
flaskApp.jinja_loader=jinja2.PackageLoader("pdfs")
def mkTagList(db):
if db.tags:
return ' '.join('<a class="tags" href="/tag/{0}">{0}</a>'.format(t) for t in sorted(db.tags))
def keySort(xs):
return sorted(xs, key=lambda x: x.key())
def doSearch(tag=None, text=None, author=None, title=None):
db = Database(dataDir=args.data_dir)
ctx = dict(article_dir=os.path.basename(os.path.dirname(db.dataDir)),
tags=mkTagList(db))
if tag:
ctx['entries'] = bibContext(keySort(filter(lambda x: tag in x.tags, db.works)))
ctx['search'] = "tag:" + tag
elif text:
entries, searchData = [], []
for result in db.search(text, formatter="html"):
entries.append(result['entry'])
searchData.append(result)
bctx = bibContext(entries)
for c,r in zip(bctx,searchData):
c['searchTxt'] = dict(score=r['score'], frags=r['frags'])
ctx['entries'] = bctx[::-1]
ctx['search'] = "text:" + text
elif author:
def isAuth(e):
n, au, ed = set(), e.author(), e.editor()
if au:
n.update(authorNorm(x.split(', ')[0]) for x in au.split(' and '))
if ed:
n.update(authorNorm(x.split(', ')[0]) for x in ed.split(' and '))
return author in n
matches = keySort(filter(isAuth, db.works))
ctx['entries'] = bibContext(matches)
ctx['search'] = "author:" + author
elif title:
def m(x):
return title.lower() in unicodeNorm(x.title()).lower()
ctx['entries'] = bibContext(keySort(filter(m, db.works)))
ctx['search'] = "title:" + title
else:
ctx['entries'] = bibContext(keySort(db.works))
return ctx
@flaskApp.route('/')
def listFiles():
return flask.render_template('bibliography.html', **doSearch())
@flaskApp.route('/search')
def searchFiles():
query=flask.request.args.get('q', '')
queryType=flask.request.args.get('t', '')
if queryType == "text":
ctx = doSearch(text=query)
elif queryType == "author":
ctx = doSearch(author=query)
elif queryType == "title":
ctx = doSearch(title=query)
elif queryType == "tag":
ctx = doSearch(tag=query)
else:
raise RuntimeError("got bad query {}:{}".format(queryType, query))
return flask.render_template('bibliography.html', **ctx)
@flaskApp.route('/author/<author>')
def listFilesByAuthor(author):
return flask.render_template('bibliography.html', **doSearch(author=author))
@flaskApp.route('/tag/<tag>')
def listFilesByTag(tag):
return flask.render_template('bibliography.html', **doSearch(tag=tag))
@flaskApp.route('/<key>.pdf')
def getPdf(key):
db = Database(dataDir=args.data_dir)
try:
pdfFile = next(filter(lambda x: x.key() == key, db.works)).files[0]
except StopIteration:
raise KeyError
resp = flask.make_response(open(os.path.join(db.dataDir, pdfFile), "rb").read())
resp.content_type = 'application/pdf'
return resp
@flaskApp.route('/attachment/<string:key>-<int:idx>.<string:ext>')
def getAttached(key, idx, ext):
db = Database(dataDir=args.data_dir)
try:
attFile = next(filter(lambda x: x.key() == key, db.works)).files[idx]
except StopIteration:
raise KeyError
filePath = os.path.join(db.dataDir, attFile)
resp = flask.make_response(open(filePath, "rb").read())
mime, _ = mimetypes.guess_type(filePath)
resp.content_type = mime or 'application/octet-stream'
return resp
@flaskApp.route('/<key>.bib')
def getBib(key):
db = Database(dataDir=args.data_dir)
e = db.find(key=key)
resp = flask.make_response(e.bibtex)
resp.content_type = 'text/plain'
return resp
try:
flaskApp.run(port=args.port)
except OSError as err:
if 'Address already in use' in str(err):
raise UserException("Port {} already in use.".format(args.port))
else:
raise
| 36.707792 | 109 | 0.5268 | 607 | 5,653 | 4.864909 | 0.29654 | 0.030816 | 0.032171 | 0.038943 | 0.211311 | 0.150356 | 0.12699 | 0.0447 | 0.0447 | 0.024382 | 0 | 0.003232 | 0.343181 | 5,653 | 153 | 110 | 36.947712 | 0.792082 | 0 | 0 | 0.137097 | 0 | 0 | 0.095348 | 0.016628 | 0 | 0 | 0 | 0 | 0 | 1 | 0.112903 | false | 0 | 0.080645 | 0.040323 | 0.314516 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e5da928182393c5f5c747b08972d2fb8f2ff9446 | 8,775 | py | Python | rss_temple/api/views/feed.py | murrple-1/rss_temple | 289197923b1e7d1213f1673d164337df17d7269b | [
"MIT"
] | null | null | null | rss_temple/api/views/feed.py | murrple-1/rss_temple | 289197923b1e7d1213f1673d164337df17d7269b | [
"MIT"
] | 8 | 2019-12-04T21:58:35.000Z | 2021-12-15T02:29:49.000Z | rss_temple/api/views/feed.py | murrple-1/rss_temple | 289197923b1e7d1213f1673d164337df17d7269b | [
"MIT"
] | null | null | null | from django.http import HttpResponse, HttpResponseBadRequest, HttpResponseNotFound, HttpResponseNotAllowed
from django.db import transaction
import requests
import ujson
from url_normalize import url_normalize
from api import models, query_utils, feed_handler, rss_requests, archived_feed_entry_util
from api.exceptions import QueryException
from api.context import Context
_OBJECT_NAME = 'feed'
def feed(request):
permitted_methods = {'GET'}
if request.method not in permitted_methods:
return HttpResponseNotAllowed(permitted_methods) # pragma: no cover
if request.method == 'GET':
return _feed_get(request)
def feeds_query(request):
permitted_methods = {'POST'}
if request.method not in permitted_methods:
return HttpResponseNotAllowed(permitted_methods) # pragma: no cover
if request.method == 'POST':
return _feeds_query_post(request)
def feed_subscribe(request):
permitted_methods = {'POST', 'PUT', 'DELETE'}
if request.method not in permitted_methods:
return HttpResponseNotAllowed(permitted_methods) # pragma: no cover
if request.method == 'POST':
return _feed_subscribe_post(request)
elif request.method == 'PUT':
return _feed_subscribe_put(request)
elif request.method == 'DELETE':
return _feed_subscribe_delete(request)
def _save_feed(url):
response = None
try:
response = rss_requests.get(url)
response.raise_for_status()
except requests.exceptions.RequestException:
raise QueryException('feed not found', 404)
with transaction.atomic():
d = feed_handler.text_2_d(response.text)
feed = feed_handler.d_feed_2_feed(d.feed, url)
feed.with_subscription_data()
feed.save()
feed_entries = []
for d_entry in d.get('entries', []):
feed_entry = None
try:
feed_entry = feed_handler.d_entry_2_feed_entry(d_entry)
except ValueError: # pragma: no cover
continue
feed_entry.feed = feed
feed_entries.append(feed_entry)
models.FeedEntry.objects.bulk_create(feed_entries)
return feed
def _feed_get(request):
context = Context()
context.parse_request(request)
context.parse_query_dict(request.GET)
url = request.GET.get('url')
if not url:
return HttpResponseBadRequest('\'url\' missing')
url = url_normalize(url)
field_maps = None
try:
fields = query_utils.get_fields__query_dict(request.GET)
field_maps = query_utils.get_field_maps(fields, _OBJECT_NAME)
except QueryException as e: # pragma: no cover
return HttpResponse(e.message, status=e.httpcode)
feed = None
try:
feed = models.Feed.annotate_subscription_data(
models.Feed.objects.all(), request.user).get(feed_url=url)
except models.Feed.DoesNotExist:
try:
feed = _save_feed(url)
except QueryException as e:
return HttpResponse(e.message, status=e.httpcode)
ret_obj = query_utils.generate_return_object(field_maps, feed, context)
content, content_type = query_utils.serialize_content(ret_obj)
return HttpResponse(content, content_type)
def _feeds_query_post(request):
context = Context()
context.parse_request(request)
context.parse_query_dict(request.GET)
if not request.body:
return HttpResponseBadRequest('no HTTP body') # pragma: no cover
json_ = None
try:
json_ = ujson.loads(request.body)
except ValueError: # pragma: no cover
return HttpResponseBadRequest('HTTP body cannot be parsed')
if type(json_) is not dict:
return HttpResponseBadRequest('JSON body must be object') # pragma: no cover
count = None
try:
count = query_utils.get_count(json_)
except QueryException as e: # pragma: no cover
return HttpResponse(e.message, status=e.httpcode)
skip = None
try:
skip = query_utils.get_skip(json_)
except QueryException as e: # pragma: no cover
return HttpResponse(e.message, status=e.httpcode)
sort = None
try:
sort = query_utils.get_sort(json_, _OBJECT_NAME)
except QueryException as e: # pragma: no cover
return HttpResponse(e.message, status=e.httpcode)
search = None
try:
search = query_utils.get_search(context, json_, _OBJECT_NAME)
except QueryException as e: # pragma: no cover
return HttpResponse(e.message, status=e.httpcode)
field_maps = None
try:
fields = query_utils.get_fields__json(json_)
field_maps = query_utils.get_field_maps(fields, _OBJECT_NAME)
except QueryException as e: # pragma: no cover
return HttpResponse(e.message, status=e.httpcode)
return_objects = None
try:
return_objects = query_utils.get_return_objects(json_)
except QueryException as e: # pragma: no cover
return HttpResponse(e.message, status=e.httpcode)
return_total_count = None
try:
return_total_count = query_utils.get_return_total_count(json_)
except QueryException as e: # pragma: no cover
return HttpResponse(e.message, status=e.httpcode)
feeds = models.Feed.annotate_search_vectors(models.Feed.annotate_subscription_data(
models.Feed.objects.all(), request.user)).filter(*search)
ret_obj = {}
if return_objects:
objs = []
for feed in feeds.order_by(
*sort)[skip:skip + count]:
obj = query_utils.generate_return_object(
field_maps, feed, context)
objs.append(obj)
ret_obj['objects'] = objs
if return_total_count:
ret_obj['totalCount'] = feeds.count()
content, content_type = query_utils.serialize_content(ret_obj)
return HttpResponse(content, content_type)
def _feed_subscribe_post(request):
user = request.user
url = request.GET.get('url')
if not url:
return HttpResponseBadRequest('\'url\' missing')
url = url_normalize(url)
feed = None
try:
feed = models.Feed.objects.get(feed_url=url)
except models.Feed.DoesNotExist:
try:
feed = _save_feed(url)
except QueryException as e:
return HttpResponse(e.message, status=e.httpcode)
custom_title = request.GET.get('customtitle')
existing_subscription_list = list(models.SubscribedFeedUserMapping.objects.filter(
user=user).values_list('feed__feed_url', 'custom_feed_title'))
existing_feed_urls = frozenset(t[0] for t in existing_subscription_list)
existing_custom_titles = frozenset(
t[1] for t in existing_subscription_list if t[1] is not None)
if custom_title is not None and custom_title in existing_custom_titles:
return HttpResponse('custom title already used', status=409)
if feed.feed_url in existing_feed_urls:
return HttpResponse('user already subscribed', status=409)
read_mapping_generator = archived_feed_entry_util.read_mapping_generator_fn(
feed, user)
with transaction.atomic():
models.SubscribedFeedUserMapping.objects.create(
user=user, feed=feed, custom_feed_title=custom_title)
archived_feed_entry_util.mark_archived_entries(read_mapping_generator)
return HttpResponse(status=204)
def _feed_subscribe_put(request):
user = request.user
url = request.GET.get('url')
if not url:
return HttpResponseBadRequest('\'url\' missing')
url = url_normalize(url)
custom_title = request.GET.get('customtitle')
subscribed_feed_mapping = None
try:
subscribed_feed_mapping = models.SubscribedFeedUserMapping.objects.get(
user=user, feed__feed_url=url)
except models.SubscribedFeedUserMapping.DoesNotExist:
return HttpResponseNotFound('not subscribed')
if custom_title is not None:
if models.SubscribedFeedUserMapping.objects.exclude(uuid=subscribed_feed_mapping.uuid).filter(user=user, custom_feed_title=custom_title).exists():
return HttpResponse('custom title already used', status=409)
subscribed_feed_mapping.custom_feed_title = custom_title
subscribed_feed_mapping.save(update_fields=['custom_feed_title'])
return HttpResponse(status=204)
def _feed_subscribe_delete(request):
url = request.GET.get('url')
if not url:
return HttpResponseBadRequest('\'url\' missing')
url = url_normalize(url)
count, _ = models.SubscribedFeedUserMapping.objects.filter(
user=request.user, feed__feed_url=url).delete()
if count < 1:
return HttpResponseNotFound('user not subscribed')
return HttpResponse(status=204)
| 30.681818 | 154 | 0.692877 | 1,063 | 8,775 | 5.480715 | 0.132643 | 0.055613 | 0.033471 | 0.039478 | 0.527291 | 0.479231 | 0.442499 | 0.427738 | 0.410917 | 0.396842 | 0 | 0.004099 | 0.221538 | 8,775 | 285 | 155 | 30.789474 | 0.848778 | 0.028946 | 0 | 0.455882 | 0 | 0 | 0.043269 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.044118 | false | 0 | 0.039216 | 0 | 0.259804 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e5de3b649cde237814d8eb0d0baa2f698e762515 | 7,959 | py | Python | fempagno/modules/motoremesh.py | giovap95/metis-fem | f8a67698d1531a862e541f79229c0e4486edde6c | [
"MIT"
] | null | null | null | fempagno/modules/motoremesh.py | giovap95/metis-fem | f8a67698d1531a862e541f79229c0e4486edde6c | [
"MIT"
] | 2 | 2020-05-08T21:51:44.000Z | 2020-05-13T13:41:41.000Z | fempagno/modules/motoremesh.py | giovap95/metis-fem | f8a67698d1531a862e541f79229c0e4486edde6c | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Fri Mar 27 18:54:01 2020
@author: giova
"""
import numpy as np
import sys
import meshio
# Creates a mesh class
class Mesh:
def __init__(self):
self.el_def = None
self.material = None
self.conn_table = None
self.cds_table = None
self.elements = None # globdof.shape[0]
self.nodes = None # max(max(globdof[:,-1]),max(globdof[:,-2]))+1
self.nodesperelem = None
self.dofspernode = None
self.totdofs = None
self.d = None # spatial dimensions
#---------------------------------------------------------------------------
# Functions below do not belong to mesh Class
#---------------------------------------------------------------------------
def el_mat(mesh,i):
""" Returns the material of the current element,
as defined in the material dictionary"""
el_mat = mesh.material[i]
return el_mat
def el_type(mesh, i):
""" Returns the element type of the current element""" #TODO: eliminate this function
el_type = mesh.elementType[i]
if el_type!=0 and el_type!=1:
print('\n','Element', i, 'ERROR! Element type not recognised')
sys.exit()
return el_type
def coordinates(mesh,i):
rows = mesh.conn_table[i]
cds = mesh.points[rows]
return cds
def NodesInElement(mesh,i):
NodesInElement=mesh.conn_table[i]
return NodesInElement
def get_key(my_dict,val):
""" Function to return key for any value. """
# This function returns the key if the first item in the array value
# of a dictionary is equal to val. If my_dict contains
# 'Fixed': array([667, 0]), get_key(my_dict,667) returns Fixed
for key, value in my_dict.items():
if val == value[0]:
return key
print("\n value",val,"doesn't exist as \'key\': array([value, 0]) in\n", my_dict)
sys.exit()
def GMSH(mesh_file):
sys.path.append("PRE")
# create a mesh object
mesh = meshio.read("D:/Documents/GitHub/metis-fem/fempagno/PRE/"+mesh_file+".msh")
# check if the mesh object contains attributes needed by pyFEM
# - pyFEM_MeshAttributes is a list of all the mesh attributes needed by pyFEM
# - we are going to reuse the attribute points and add the other attribute from pyFEM_MeshAttributes
pyFEM_MeshAttributes = ["d", "dofsNode", "elements", "elementMaterialTag", "elementType", "points"]
for attribute in pyFEM_MeshAttributes:
if attribute in dir(mesh):
if attribute == "points":
pass
else:
print("Error: meshio already contains the attribute",attribute)
print(" ...do something!")
sys.exit()
# add the missing attributes from pyFEM_MeshAttributes
# Note: it is assumed that the mesh is two-dimensional and that the
# domain is dicretized with triangular elements and that there are
# two degrees of freedom per node (i.e., this is a plain equilibrium problem)
mesh.elements = 0
mesh.nodes = len(mesh.points)
mesh.dofspernode = 2
mesh.totdofs=mesh.nodes*mesh.dofspernode
mesh.d = 2
mesh.dofsNode = 2
mesh.conn_table = []
mesh.material = []
mesh.el_def = []
mesh.elementType = []
mesh.material = []
meshing = False
quad = False
try:
dummy = mesh.cell_data_dict['gmsh:physical']['quad']
quad = True
except KeyError:
# print("No quadrilateral elements in mesh")
pass
triangle = False
try:
dummy = mesh.cell_data_dict['gmsh:physical']['triangle']
triangle = True
except KeyError:
# print("No triangular elements in mesh")
pass
if quad:
meshing = True
quads = len(mesh.cell_data_dict["gmsh:physical"]["quad"])
mesh.elements += quads
for t in range(quads):
mesh.conn_table.append(mesh.cells_dict["quad"][t])
materialTag=mesh.cell_data_dict["gmsh:physical"]["quad"][t]
# we assume that a physical surface in 2D is only used to identify
# elements with the same material property.
# GMSH identifies a physical group by a tag and a name.
# Tags are stores in cell_data_dict for each element.
# Tags and names are linked in field_data
# The function get_key returns the name (=key) for a given tag
key = get_key(mesh.field_data, materialTag)
mesh.material.append(key)
mesh.elementType.append('quad')
if triangle:
meshing = True
triangles = len(mesh.cell_data_dict["gmsh:physical"]["triangle"])
mesh.elements += triangles
for t in range(triangles):
mesh.conn_table.append(mesh.cells_dict["triangle"][t])
materialTag=mesh.cell_data_dict["gmsh:physical"]["triangle"][t]
# we assume that a physical surface in 2D is only used to identify
# elements with the same material property.
# GMSH identifies a physical group by a tag and a name.
# Tags are stores in cell_data_dict for each element.
# Tags and names are linked in field_data
# The function get_key returns the name (=key) for a given tag
key = get_key(mesh.field_data, materialTag)
mesh.material.append(key)
mesh.elementType.append('triangle')
if not meshing:
print("something went wrong: could not extract mesh data")
sys.exit()
mesh.points = mesh.points[:, 0:mesh.d] #resize to the number of spatial dimensions in the problem
# TODO: ...check that all the necessary attributes have been defined in a correct manner
# library of the possible elements
mesh.element_lib = { 'spring' : {'stiffness matrix' : {'evaluation' : 'closed form',
'domain' : None,
'rule' : None,
'points' : None}},
'bar' : {'stiffness matrix' : {'evaluation' : 'numerical integration',
'domain' : 'line',
'rule' : 'Gauss Legendre',
'points' : 2}},
'triangle' : {'stiffness matrix' : {'evaluation' : 'numerical integration',
'domain' : 'triangle',
'rule' : 'Gauss Legendre',
'points' : 1}},
'quad' : {'stiffness matrix' : {'evaluation' : 'numerical integration',
'domain' : 'quad',
'rule' : 'Gauss Legendre',
'points' : 4}}
}
return mesh
| 33.1625 | 107 | 0.489257 | 819 | 7,959 | 4.678877 | 0.274725 | 0.016701 | 0.025052 | 0.025052 | 0.306628 | 0.29358 | 0.253653 | 0.211378 | 0.190501 | 0.169102 | 0 | 0.008058 | 0.407463 | 7,959 | 239 | 108 | 33.301255 | 0.804495 | 0.280437 | 0 | 0.191304 | 0 | 0 | 0.138163 | 0.007597 | 0 | 0 | 0 | 0.004184 | 0 | 1 | 0.06087 | false | 0.026087 | 0.026087 | 0 | 0.147826 | 0.043478 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e5e256e472aa9a2645b2b2a6d05bcb536688a4a9 | 1,116 | py | Python | pygame/key-event- get-changed-states/main-using-get_pressed.py | whitmans-max/python-examples | 881a8f23f0eebc76816a0078e19951893f0daaaa | [
"MIT"
] | 140 | 2017-02-21T22:49:04.000Z | 2022-03-22T17:51:58.000Z | pygame/key-event- get-changed-states/main-using-get_pressed.py | whitmans-max/python-examples | 881a8f23f0eebc76816a0078e19951893f0daaaa | [
"MIT"
] | 5 | 2017-12-02T19:55:00.000Z | 2021-09-22T23:18:39.000Z | pygame/key-event- get-changed-states/main-using-get_pressed.py | whitmans-max/python-examples | 881a8f23f0eebc76816a0078e19951893f0daaaa | [
"MIT"
] | 79 | 2017-01-25T10:53:33.000Z | 2022-03-11T16:13:57.000Z | #!/usr/bin/env python3
#
# https://stackoverflow.com/a/48034477/1832058
#
import pygame
pygame.init()
screen = pygame.display.set_mode((300, 200))
pressed = pygame.key.get_pressed()
clock = pygame.time.Clock()
is_running = True
while is_running:
for event in pygame.event.get():
if event.type == pygame.QUIT:
is_running = False
elif event.type == pygame.KEYDOWN:
if event.key == pygame.K_ESCAPE:
is_running = False
last_pressed = pressed
pressed = pygame.key.get_pressed()
# --- get only keys which changed state ---
changed = [idx for idx in range(len(pressed)) if pressed[idx] != last_pressed[idx]]
print(changed)
# or
changed = [idx for idx, (a, b) in enumerate(zip(last_pressed, pressed)) if a != b]
print(changed)
# --- True/False for all keys ---
changed = [pressed[idx] != last_pressed[idx] for idx in range(len(pressed))]
print(changed)
# or
changed = [a != b for a, b in zip(last_pressed, pressed)]
print(changed)
# ---
clock.tick(25)
pygame.quit()
| 21.461538 | 87 | 0.612007 | 150 | 1,116 | 4.466667 | 0.373333 | 0.08209 | 0.080597 | 0.056716 | 0.222388 | 0.077612 | 0.077612 | 0 | 0 | 0 | 0 | 0.028812 | 0.253584 | 1,116 | 51 | 88 | 21.882353 | 0.77551 | 0.135305 | 0 | 0.32 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.04 | 0 | 0.04 | 0.16 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e5e25a60a68dadc256cd1d15a2436325f5c9ecdb | 828 | py | Python | epsagon/modules/sqlalchemy.py | Dryja/epsagon-python | 505b09268820593903afdce26e1bab7f64adc23b | [
"MIT"
] | 55 | 2018-09-30T11:46:01.000Z | 2022-03-15T13:37:26.000Z | epsagon/modules/sqlalchemy.py | Dryja/epsagon-python | 505b09268820593903afdce26e1bab7f64adc23b | [
"MIT"
] | 323 | 2018-10-04T15:42:08.000Z | 2022-02-20T11:26:40.000Z | epsagon/modules/sqlalchemy.py | Dryja/epsagon-python | 505b09268820593903afdce26e1bab7f64adc23b | [
"MIT"
] | 20 | 2018-10-11T14:47:16.000Z | 2022-01-20T11:07:29.000Z | """
sqlalchemy patcher module
"""
from __future__ import absolute_import
from epsagon.modules.general_wrapper import wrapper
from ..events.sqlalchemy import SqlAlchemyEventFactory
from ..utils import patch_once
def _wrapper(wrapped, instance, args, kwargs):
"""
General wrapper for sqlalchemy instrumentation.
:param wrapped: wrapt's wrapped
:param instance: wrapt's instance
:param args: wrapt's args
:param kwargs: wrapt's kwargs
:return: None
"""
return wrapper(SqlAlchemyEventFactory, wrapped, instance, args, kwargs)
def patch():
"""
patch module.
:return: None
"""
patch_once(
'sqlalchemy.orm.session',
'Session.__init__',
_wrapper
)
patch_once(
'sqlalchemy.orm.session',
'Session.close',
_wrapper
)
| 21.230769 | 75 | 0.665459 | 87 | 828 | 6.149425 | 0.367816 | 0.04486 | 0.071028 | 0.093458 | 0.134579 | 0.134579 | 0 | 0 | 0 | 0 | 0 | 0 | 0.237923 | 828 | 38 | 76 | 21.789474 | 0.847861 | 0.286232 | 0 | 0.352941 | 0 | 0 | 0.137996 | 0.083176 | 0 | 0 | 0 | 0 | 0 | 1 | 0.117647 | false | 0 | 0.235294 | 0 | 0.411765 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e5e7f6b3dfe5867d1dd051b77e4c526e95d5eaf6 | 5,100 | py | Python | hg2git.py | LukasPersonal/hg-fast-export | 77a770c2b856a49f0d58a035cd9e300c8c0203ac | [
"MIT"
] | null | null | null | hg2git.py | LukasPersonal/hg-fast-export | 77a770c2b856a49f0d58a035cd9e300c8c0203ac | [
"MIT"
] | 1 | 2021-09-30T17:11:13.000Z | 2021-09-30T17:11:13.000Z | hg2git.py | LukasPersonal/hg-fast-export | 77a770c2b856a49f0d58a035cd9e300c8c0203ac | [
"MIT"
] | null | null | null | #!/usr/bin/env python2
# pylint: disable=E1101.E0602
# Copyright (c) 2007, 2008 Rocco Rutte <pdmef@gmx.net> and others.
# License: MIT <http://www.opensource.org/licenses/mit-license.php>
import os
import re
import subprocess
import sys
from mercurial import error as hgerror
from mercurial import hg, templatefilters, ui
from mercurial.scmutil import binnode, revsymbol
PY2 = sys.version_info.major < 3
if PY2:
str = unicode # noqa: F821
fsencode = lambda s: s.encode(sys.getfilesystemencoding()) # noqa: E731
else:
from os import fsencode
# default git branch name
cfg_master = b"master"
# default origin name
origin_name = b""
# silly regex to see if user field has email address
user_re = re.compile(b"([^<]+) (<[^>]*>)$")
# silly regex to clean out user names
user_clean_re = re.compile(b'^["]([^"]+)["]$')
def set_default_branch(name):
global cfg_master
cfg_master = name.encode("utf8") if not isinstance(name, bytes) else name
def set_origin_name(name):
global origin_name
origin_name = name
def setup_repo(url):
try:
myui = ui.ui(interactive=False)
except TypeError:
myui = ui.ui()
myui.setconfig(b"ui", b"interactive", b"off")
# Avoids a warning when the repository has obsolete markers
myui.setconfig(b"experimental", b"evolution.createmarkers", True)
return myui, hg.repository(myui, fsencode(url)).unfiltered()
def fixup_user(user, authors):
user = user.strip(b'"')
if authors is not None:
# if we have an authors table, try to get mapping
# by defaulting to the current value of 'user'
user = authors.get(user, user)
name, mail, m = b"", b"", user_re.match(user)
if m is None:
# if we don't have 'Name <mail>' syntax, extract name
# and mail from hg helpers. this seems to work pretty well.
# if email doesn't contain @, replace it with devnull@localhost
name = templatefilters.person(user)
mail = b"<%s>" % templatefilters.email(user)
if b"@" not in mail:
mail = b"<devnull@localhost>"
else:
# if we have 'Name <mail>' syntax, everything is fine :)
name, mail = m.group(1), m.group(2)
# remove any silly quoting from username
m2 = user_clean_re.match(name)
if m2 is not None:
name = m2.group(1)
return b"%s %s" % (name, mail)
def get_branch(name):
# 'HEAD' is the result of a bug in mutt's cvs->hg conversion,
# other CVS imports may need it, too
if name == b"HEAD" or name == b"default" or name == b"":
name = cfg_master
if origin_name:
return origin_name + b"/" + name
return name
def get_changeset(ui, repo, revision, authors={}, encoding=""):
# Starting with Mercurial 4.6 lookup no longer accepts raw hashes
# for lookups. Work around it by changing our behaviour depending on
# how it fails
try:
node = repo.lookup(revision)
except (TypeError, hgerror.ProgrammingError):
node = binnode(revsymbol(repo, b"%d" % revision)) # We were given a numeric rev
except hgerror.RepoLookupError:
node = revision # We got a raw hash
(manifest, user, (time, timezone), files, desc, extra) = repo.changelog.read(node)
if encoding:
user = user.decode(encoding).encode("utf8")
desc = desc.decode(encoding).encode("utf8")
tz = b"%+03d%02d" % (-timezone // 3600, ((-timezone % 3600) // 60))
branch = get_branch(extra.get(b"branch", b"master"))
return (
node,
manifest,
fixup_user(user, authors),
(time, tz),
files,
desc,
branch,
extra,
)
def mangle_key(key):
return key
def load_cache(filename, get_key=mangle_key):
cache = {}
if not os.path.exists(filename):
return cache
f = open(filename, "rb")
linecount = 0
for line in f.readlines():
linecount += 1
fields = line.split(b" ")
if fields is None or not len(fields) == 2 or fields[0][0:1] != b":":
sys.stderr.write(
"Invalid file format in [%s], line %d\n" % (filename, linecount)
)
continue
# put key:value in cache, key without ^:
cache[get_key(fields[0][1:])] = fields[1].split(b"\n")[0]
f.close()
return cache
def save_cache(filename, cache):
f = open(filename, "wb")
for key, value in cache.items():
if not isinstance(key, bytes):
key = str(key).encode("utf8")
if not isinstance(value, bytes):
value = str(value).encode("utf8")
f.write(b":%s %s\n" % (key, value))
f.close()
def get_git_sha1(name, type="heads"):
try:
# use git-rev-parse to support packed refs
ref = "refs/%s/%s" % (type, name.decode("utf8"))
line = subprocess.check_output(
["git", "rev-parse", "--verify", "--quiet", ref.encode("utf8")]
)
if line is None or len(line) == 0:
return None
return line[0:40]
except subprocess.CalledProcessError:
return None
| 30.909091 | 88 | 0.612941 | 708 | 5,100 | 4.365819 | 0.372881 | 0.022646 | 0.011647 | 0.012941 | 0.016176 | 0 | 0 | 0 | 0 | 0 | 0 | 0.018533 | 0.259412 | 5,100 | 164 | 89 | 31.097561 | 0.799841 | 0.218627 | 0 | 0.095652 | 0 | 0 | 0.07049 | 0.005811 | 0 | 0 | 0 | 0 | 0 | 1 | 0.086957 | false | 0 | 0.069565 | 0.008696 | 0.252174 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e5e873bb87ab31b611a9db0a4399b14d3a1da37d | 331 | py | Python | l1-l100/7.py | ZucchiniY/Leetcode-cn | c26d080b7f8115a2edfc135742c1cad0105eccfc | [
"MIT"
] | null | null | null | l1-l100/7.py | ZucchiniY/Leetcode-cn | c26d080b7f8115a2edfc135742c1cad0105eccfc | [
"MIT"
] | null | null | null | l1-l100/7.py | ZucchiniY/Leetcode-cn | c26d080b7f8115a2edfc135742c1cad0105eccfc | [
"MIT"
] | null | null | null | """
最终的结果要考虑 Python 的int 类型会比一般语言的长,所以要考虑 32位这个范围。
"""
class Solution:
def reverse(self, x: int) -> int:
y = x
if x < 0:
y = -1 * x
y = str(y)[::-1]
if x < 0:
r = -1 * int(y)
else:
r = int(y)
return r if -2147483648 < r < 2147483647 else 0
| 18.388889 | 55 | 0.432024 | 45 | 331 | 3.177778 | 0.511111 | 0.083916 | 0.055944 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.149733 | 0.435045 | 331 | 17 | 56 | 19.470588 | 0.614973 | 0.138973 | 0 | 0.181818 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.090909 | false | 0 | 0 | 0 | 0.272727 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e5ede57f6e5a4c4f286e2ced6355288e52909591 | 1,947 | py | Python | binding/python/test_cobra_perf.py | Picovoice/cobra | 2684f7e873930b66fa5cd114ee06434a63760160 | [
"Apache-2.0"
] | 26 | 2021-09-17T20:11:52.000Z | 2022-03-13T01:33:22.000Z | binding/python/test_cobra_perf.py | Picovoice/cobra | 2684f7e873930b66fa5cd114ee06434a63760160 | [
"Apache-2.0"
] | 4 | 2021-09-29T20:39:25.000Z | 2022-01-19T18:24:56.000Z | binding/python/test_cobra_perf.py | Picovoice/cobra | 2684f7e873930b66fa5cd114ee06434a63760160 | [
"Apache-2.0"
] | 3 | 2021-11-08T05:19:24.000Z | 2022-03-07T03:08:24.000Z | #
# Copyright 2022 Picovoice Inc.
#
# You may not use this file except in compliance with the license. A copy of the license is located in the "LICENSE"
# file accompanying this source.
#
# Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
# an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
#
import sys
import time
import unittest
from cobra import Cobra
from util import *
from test_util import *
class CobraPerformanceTestCase(unittest.TestCase):
ACCESS_KEY = sys.argv[1]
NUM_TEST_ITERATIONS = int(sys.argv[2])
PERFORMANCE_THRESHOLD_SEC = float(sys.argv[3])
def test_performance(self):
cobra = Cobra(access_key=sys.argv[1], library_path=pv_library_path('../..'))
audio = read_wav_file(
os.path.join(os.path.dirname(__file__), '../../res/audio/sample.wav'),
cobra.sample_rate)
num_frames = len(audio) // cobra.frame_length
perf_results = []
for i in range(self.NUM_TEST_ITERATIONS):
proc_time = 0
for j in range(num_frames):
frame = audio[j * cobra.frame_length:(j + 1) * cobra.frame_length]
start = time.time()
cobra.process(frame)
proc_time += time.time() - start
if i > 0:
perf_results.append(proc_time)
cobra.delete()
avg_perf = sum(perf_results) / self.NUM_TEST_ITERATIONS
print("Average performance: %s" % avg_perf)
self.assertLess(avg_perf, self.PERFORMANCE_THRESHOLD_SEC)
if __name__ == '__main__':
if len(sys.argv) != 4:
print("usage: test_cobra_perf.py ${ACCESS_KEY} ${NUM_TEST_INTERVALS} ${PERFORMANCE_THRESHOLD_SEC}")
exit(1)
unittest.main(argv=sys.argv[:1])
| 33 | 117 | 0.663585 | 262 | 1,947 | 4.729008 | 0.465649 | 0.048426 | 0.01937 | 0.025827 | 0.027441 | 0 | 0 | 0 | 0 | 0 | 0 | 0.009434 | 0.237802 | 1,947 | 58 | 118 | 33.568966 | 0.825472 | 0.247047 | 0 | 0 | 0 | 0 | 0.104467 | 0.051546 | 0 | 0 | 0 | 0 | 0.028571 | 1 | 0.028571 | false | 0 | 0.171429 | 0 | 0.314286 | 0.057143 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e5edfa705d3471d7a0761238dd4b5b4398bf2f00 | 4,034 | py | Python | neurons/boltzmann/main.py | unconst/SimpleWord2Vec | d1af6993c1d6bca273a0c8d147132ee9867f5543 | [
"MIT"
] | 9 | 2019-12-18T10:20:15.000Z | 2021-03-18T00:07:28.000Z | neurons/boltzmann/main.py | unconst/SimpleWord2Vec | d1af6993c1d6bca273a0c8d147132ee9867f5543 | [
"MIT"
] | 5 | 2020-02-12T02:21:15.000Z | 2022-02-10T00:25:28.000Z | neurons/boltzmann/main.py | unconst/BitTensor | d1af6993c1d6bca273a0c8d147132ee9867f5543 | [
"MIT"
] | null | null | null | import bittensor
from config import Config
from metagraph import Metagraph
from dendrite import Dendrite
from nucleus import Nucleus
from neuron import Neuron
from Crypto.Hash import SHA256
from datetime import timedelta
import grpc
from loguru import logger
import pickle
import numpy as np
import random
import time
from timeloop import Timeloop
def set_timed_loops(tl, config, neuron, metagraph):
# Test self.
# @tl.job(interval=timedelta(seconds=1))
# def test():
# channel = grpc.insecure_channel(config.serve_address + ":" + config.port)
#
# for _ in range(100):
# # Inc message id.
# message_id = random.randint(0, 1000000)
#
# # Make request.
# spikes = np.array([['apples']])
# stub = bittensor.proto.bittensor_pb2_grpc.BittensorStub(channel)
#
# time_str = str(time.time())
# # Build hash.
# hash = SHA256.new()
# hash.update(config.identity.encode())
# hash.update(spikes.tobytes())
# hash.update(time_str.encode())
# message_hash = hash.digest()
#
# # Build request.
# request = bittensor.proto.bittensor_pb2.SpikeRequest()
# request.parent_id = config.identity
# request.message_id = message_hash
# request.payload = pickle.dumps(spikes, protocol=0)
#
# # Send Spike.
# try:
# response = stub.Spike(request)
# response = pickle.loads(response.payload).reshape(1, 128)
#
# except Exception as e:
# logger.error(str(e))
#
# # Make grad request.
# grad = np.zeros((1, 128))
# stub = bittensor.proto.bittensor_pb2_grpc.BittensorStub(channel)
#
# # Build hash.
# hash = SHA256.new()
# hash.update(config.identity.encode())
# hash.update(spikes.tobytes())
# hash.update(time_str.encode())
# message_hash = hash.digest()
#
# request = bittensor.proto.bittensor_pb2.GradeRequest()
# request.parent_id = config.identity
# request.message_id = message_hash
# request.payload = pickle.dumps(grad, protocol=0)
#
# # Send grade request.
# try:
# stub.Grade(request)
# except Exception as e:
# logger.error(str(e))
# Pull the updated graph state (Vertices, Edges, Weights)
@tl.job(interval=timedelta(seconds=7))
def pull_metagraph():
metagraph.pull_metagraph()
# Reselect channels.
@tl.job(interval=timedelta(seconds=10))
def connect():
neuron.connect()
# Apply a gradient step.
@tl.job(interval=timedelta(seconds=3))
def learn():
neuron.Learn()
def main():
config = Config()
metagraph = Metagraph(config)
dendrite = Dendrite(config, metagraph)
nucleus = Nucleus(config)
neuron = Neuron(config, dendrite, nucleus, metagraph)
neuron.serve()
# Start timed calls.
tl = Timeloop()
set_timed_loops(tl, config, neuron, metagraph)
tl.start(block=False)
logger.info('Started Timers.')
def tear_down(_config, _neuron, _dendrite, _nucleus, _metagraph):
logger.debug('tear down.')
del _neuron
del _dendrite
del _nucleus
del _metagraph
del _config
try:
logger.info('Begin wait on main...')
while True:
logger.debug('heartbeat')
time.sleep(100)
except KeyboardInterrupt:
logger.debug('Neuron stopped with keyboard interrupt.')
tear_down(config, neuron, dendrite, nucleus, metagraph)
except Exception as e:
logger.error('Neuron stopped with interrupt on error: ' + str(e))
tear_down(config, neuron, dendrite, nucleus, metagraph)
if __name__ == '__main__':
logger.debug("started neuron.")
main()
| 28.609929 | 83 | 0.589737 | 428 | 4,034 | 5.446262 | 0.306075 | 0.030888 | 0.022308 | 0.037752 | 0.424281 | 0.346203 | 0.333762 | 0.246246 | 0.1716 | 0.1716 | 0 | 0.014931 | 0.302677 | 4,034 | 140 | 84 | 28.814286 | 0.813722 | 0.463312 | 0 | 0.035088 | 0 | 0 | 0.074905 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.105263 | false | 0 | 0.263158 | 0 | 0.368421 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e5ee0a1b58d18c70bdda71d41120500488aa7c66 | 897 | py | Python | miplearn/tests/__init__.py | GregorCH/MIPLearn | 28e2ba7c0133602fb361f8690bc7424869f68b43 | [
"BSD-3-Clause"
] | null | null | null | miplearn/tests/__init__.py | GregorCH/MIPLearn | 28e2ba7c0133602fb361f8690bc7424869f68b43 | [
"BSD-3-Clause"
] | null | null | null | miplearn/tests/__init__.py | GregorCH/MIPLearn | 28e2ba7c0133602fb361f8690bc7424869f68b43 | [
"BSD-3-Clause"
] | null | null | null | # MIPLearn: Extensible Framework for Learning-Enhanced Mixed-Integer Optimization
# Copyright (C) 2020, UChicago Argonne, LLC. All rights reserved.
# Released under the modified BSD license. See COPYING.md for more details.
from miplearn import LearningSolver
from miplearn.problems.knapsack import KnapsackInstance
def get_test_pyomo_instances():
instances = [
KnapsackInstance(
weights=[23.0, 26.0, 20.0, 18.0],
prices=[505.0, 352.0, 458.0, 220.0],
capacity=67.0,
),
KnapsackInstance(
weights=[25.0, 30.0, 22.0, 18.0],
prices=[500.0, 365.0, 420.0, 150.0],
capacity=70.0,
),
]
models = [instance.to_model() for instance in instances]
solver = LearningSolver()
for i in range(len(instances)):
solver.solve(instances[i], models[i])
return instances, models
| 34.5 | 82 | 0.634337 | 115 | 897 | 4.913043 | 0.626087 | 0.042478 | 0.014159 | 0.035398 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.098361 | 0.251951 | 897 | 25 | 83 | 35.88 | 0.743666 | 0.244147 | 0 | 0.2 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.05 | false | 0 | 0.1 | 0 | 0.2 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e5ee4943aac68235054c17ff1e0039fbf33c5e05 | 3,394 | py | Python | RPi/PBUtils.py | lefake/RPi-Arduino-PB-Communication | 8f827a4b8eaa331cd47d9a3f5bfa0414ec8c264f | [
"MIT"
] | null | null | null | RPi/PBUtils.py | lefake/RPi-Arduino-PB-Communication | 8f827a4b8eaa331cd47d9a3f5bfa0414ec8c264f | [
"MIT"
] | null | null | null | RPi/PBUtils.py | lefake/RPi-Arduino-PB-Communication | 8f827a4b8eaa331cd47d9a3f5bfa0414ec8c264f | [
"MIT"
] | null | null | null | from binascii import unhexlify
import threading
import time
# Serialization utils
class PBSerializationHandler:
def __init__(self, msg_obj):
self._msg_obj = msg_obj
def encode_msgs(self, ids, msgs):
msg = "<"
for id_msg, pb_msg in zip(ids, msgs):
msg += str(id_msg) + "|"
for byte in bytearray(pb_msg.SerializeToString()):
msg += str(hex(byte))[2:].zfill(2) # Remove \x and fill with 0 in front to always takes 2 digits
msg += ";"
msg += ">"
return msg
def encode_msg(self, id, msg):
return self.encode_msgs([id], [msg])
def deserialize(self, messages):
messages = messages.decode("ascii")
msg_array = messages[1:-1].split(';') # Remove < > characters and split sub-msgs
object_list = []
for msg in msg_array:
if len(msg) > 0:
msg_id, raw_msg = msg.split("|") # Find the id of the message
msg_id = int(msg_id)
obj = self._msg_obj[msg_id]
obj.ParseFromString(unhexlify(raw_msg))
object_list.append([msg_id, obj])
return object_list
# Serial communication utils
class ArduinoReadHandler(threading.Thread):
def __init__(self, sleeptime, readfunc):
self._sleeptime = sleeptime
self._readfunc = readfunc
threading.Thread.__init__(self)
self._runflag = threading.Event()
self._runflag.clear()
self._run = True
def run(self):
self._runflag.set()
self.worker()
def worker(self):
while self._run:
if self._runflag.is_set():
self._readfunc()
time.sleep(self._sleeptime)
def pause(self):
self._runflag.clear()
def resume(self):
self._runflag.set()
def running(self):
return self._runflag.is_set()
def kill(self):
self._run = False
class PBSerialHandler:
def __init__(self, serial, callback, msg_obj, sleeptime=0.01):
self._serial = serial
self._sleeptime = float(sleeptime)
self._callback = callback
self._interlock = False
self._response = None
self._serialization_handler = PBSerializationHandler(msg_obj)
self._worker = ArduinoReadHandler(self._sleeptime, self.read_callback)
self._worker.start()
def kill(self):
self._worker.kill()
def read_callback(self):
if not self._interlock:
self._interlock = True
try:
input = self._serial.read()
if input == b'<':
buffer = self._serial.read_until(b'>')
self._serial.flush()
self._response = b'<' + buffer
self._callback(self._response)
except Exception as e:
print("Read call back error " + str(e))
self._interlock = False
def write_pb_msg(self, id, msg):
self.write_pb_msgs([id], [msg])
def write_pb_msgs(self, ids, msgs):
encoded_msg = self._serialization_handler.encode_msgs(ids, msgs)
while self._interlock:
time.sleep(self._sleeptime)
self._interlock = True
self._serial.write(encoded_msg.encode("ascii"))
self._serial.flush()
self._interlock = False
| 27.819672 | 113 | 0.57749 | 389 | 3,394 | 4.784062 | 0.285347 | 0.041376 | 0.032241 | 0.013971 | 0.017195 | 0 | 0 | 0 | 0 | 0 | 0 | 0.004316 | 0.317325 | 3,394 | 121 | 114 | 28.049587 | 0.798878 | 0.051267 | 0 | 0.170455 | 0 | 0 | 0.012449 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.181818 | false | 0 | 0.034091 | 0.022727 | 0.295455 | 0.011364 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e5f0bb1e5a4c34450975dee81ee353ebb2c952b3 | 9,769 | py | Python | Old_Files/QuickPool_Old.py | PV-Lab/stability | d18da803a399a7c338b225b0d6adbdfe1b427707 | [
"MIT"
] | null | null | null | Old_Files/QuickPool_Old.py | PV-Lab/stability | d18da803a399a7c338b225b0d6adbdfe1b427707 | [
"MIT"
] | null | null | null | Old_Files/QuickPool_Old.py | PV-Lab/stability | d18da803a399a7c338b225b0d6adbdfe1b427707 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Apr 4 18:07:03 2019
@author: NickT
"""
from pymatgen import MPRester
import pandas as pd
import matplotlib.pyplot as plt
import csv
import multiprocessing as mp
import pickle
import tqdm
import time
mat_api_key = '<ENTER API KEY>'
mpr = MPRester(mat_api_key)
print("Loading Compounds....")
file = open('MPDatabase.pickle', 'rb')
all_compounds1 = pickle.load(file)
all_compounds = []
for compound in all_compounds1:
if compound['nsites'] == sum(compound['unit_cell_formula'].values()):
all_compounds.append(compound)
criteria = float(input("Enter Stable Phase Criteria in meV: "))
#def find_stable_phases(compound):
# '''
# find all compounds with e_above_hull within given range of zero
# '''
# if abs(compound['e_above_hull']) < criteria/1000:
# return compound
print('Finding Stable Phases....')
stable_phase = []
for compound in tqdm.tqdm(all_compounds): #find all compounds with e_above_hull within 0.05 of 0
if abs(compound['e_above_hull']) < criteria/1000:
stable_phase.append(compound)
#pool = mp.Pool(processes=1)
#
#stable_phase = list(tqdm.tqdm(pool.imap(find_stable_phases, all_compounds), total=86680))
######## COMPETING PHASE AND OXIDE CALCULATION ########
def find_comp(stable_oxides, compound_unit_cell, compound_formE, condition):
'''
Finds complementary oxide or competing phases group and associated total heat of oxidation
args:
stable_oxides = list of dictionaries of stable oxides or competing phases with
lower formation energy than original material
compound_unit_cell = dict of elements in unit cell of original compound
ompound_formE = formation energy of original compound
condition = string dictating whether it is for comp oxide or comp competing phases
output:
tuple: (list of dicitionatries of predicted materials,
combined formation energy of these materials (with appropriate ratios),
whether this combined formE is lower than that of original material (boolean))
notes:
intersect_rank: used to find limiting element by finding ratio of normalised stochiometry
between original material and oxide
'''
result = []
FinishEarly = False
#what if positive formE
orig_natoms = sum(compound_unit_cell.values())
compound_unit_cell1 = dict((a, b/orig_natoms) for a, b in compound_unit_cell.items()) #normalise stoichiometry
for oxide in stable_oxides:
oxide['el_weight'] = dict((a, b/oxide['nsites']) for a, b in oxide['unit_cell_formula'].items()) #normalise stoichiometry
if condition == 'Oxide':
del oxide['el_weight']['O']
oxide['ranker'] = dict((a, b/compound_unit_cell1[a]) for a, b in oxide['el_weight'].items()) #find greedy ranking parameter
oxide['ranking_no'] = sum(oxide['ranker'].values())
sort_oxides = sorted(stable_oxides, key = lambda oxide: (oxide['formation_energy_per_atom']/oxide['ranking_no']))
sort_oxides1 = sort_oxides[:]
total_formE = 0
while sum(compound_unit_cell1.values()) != 0 and sort_oxides1 != []: #if all atoms in unit cell not yet accounted for
oxide = sort_oxides1[0]
intersection = list(set(oxide['elements']).intersection(compound_unit_cell1.keys()))
if intersection == []:
print(compound_unit_cell)
print(oxide['unit_cell_formula'])
print(oxide['nsites'])
intersect_rank = {}
for element in intersection:
intersect_rank[element] = compound_unit_cell1[element]/ oxide['el_weight'][element]
limiting_element = min(intersect_rank, key=intersect_rank.get) #find limiting element
ratio = intersect_rank[limiting_element] #(value)
used_up_elements = []
for element in intersection:
compound_unit_cell1[element] = compound_unit_cell1[element] - (ratio * oxide['el_weight'][element])
if abs(compound_unit_cell1[element]) < 0.0001: #inequality because of != 0 problem
used_up_elements.append(element)
result.append(oxide)
sort_oxides1.remove(oxide)
total_formE += oxide['formation_energy_per_atom']*ratio
sort_oxides1 = [oxide for oxide in sort_oxides1 if
len(set(oxide['elements']).intersection(used_up_elements)) == 0]
#remove oxides in list which arent useful (dont have new elements)
if sort_oxides1 == [] and abs(sum(compound_unit_cell1.values())) > 0.0001: #inequality because of != 0 problem
print(compound_unit_cell1)
FinishEarly = True
return (result, total_formE, total_formE-compound_formE, len(result), FinishEarly)
#### FOR TESTING FIND_OXIDES
ABCO4 = {'elements': ['A', 'B', 'C', 'O'], 'formation_energy_per_atom': -750, 'nsites':7,
'unit_cell_formula':{'A':1, 'B':1, 'C':1, 'O':4}}
AO = {'elements': ['A', 'O'], 'formation_energy_per_atom': -100, 'nsites':8,
'unit_cell_formula':{'A':4, 'O':4}}
BO2 = {'elements': ['B', 'O'], 'formation_energy_per_atom': -100, 'nsites':6,
'unit_cell_formula':{'B':2, 'O':4}}
C2O = {'elements': ['C', 'O'], 'formation_energy_per_atom': -300, 'nsites':24,
'unit_cell_formula':{'C':16, 'O':8}}
A2BO6 = {'elements': ['A', 'B', 'O'], 'formation_energy_per_atom': -380, 'nsites':9,
'unit_cell_formula':{'A':2, 'B':1, 'O':6}}
A2CO4 = {'elements': ['A', 'C', 'O'], 'formation_energy_per_atom': -620, 'nsites':63,
'unit_cell_formula':{'A':18, 'C':9, 'O':36}}
original = {'A':4, 'B':8, 'C':10}
listt = [ABCO4, AO, BO2, C2O, A2BO6, A2CO4]
find_comp(listt, original, -400, 'Oxide')
####
def Make_Property_Dict(compound):
'''
Function to be iterated over all compounds.
'''
PDict = {}
global stable_phase
if abs(compound['e_above_hull']) < criteria/1000: #if stable
#### FOR NUM PHASES
competing_phases_id_withform1 = []
competing_phase_no1 = 0
comp_listdict =[]
#### FOR NUM OXIDES
v_ratio2 = 0
oxide_no1 = 0
oxides_id_withform1 = []
v_ratio_id2 = 'n/a'
oxide_listdict = []
elements = compound['elements']
for i in stable_phase:
#### FOR NUM PHASES
if set(i['elements']).issubset(elements):
comp_listdict.append(i) #for find_comp
if i['formation_energy_per_atom'] < compound['formation_energy_per_atom']:
#find all other phases containing just those elements
competing_phase_no1 +=1
competing_phases_id_withform1.append(i['task_id'])
#### FOR NUM OXIDES
if 'O' in i['elements']:
el = i['elements'][:]
el.remove('O')
O = i['unit_cell_formula']['O']
if set(el).issubset(elements) and O != i['nsites']:
oxide_listdict.append(i) #for find_comp
if i['formation_energy_per_atom'] < compound['formation_energy_per_atom']:
oxide_no1 += 1
oxides_id_withform1.append(i['task_id'])
#### FOR NUM PHASES
PDict['task_id'] = compound['task_id']
PDict['Formula'] = compound['pretty_formula']
PDict['Bandgap /eV'] = compound['band_gap']
PDict['Competing Phase Number (with formation E correction)'] = competing_phase_no1
PDict['Competing Phase List (with formation E correction)'] = competing_phases_id_withform1
y = find_comp(comp_listdict, compound['unit_cell_formula'], compound['formation_energy_per_atom'], 'NotOx')
PDict['Complementary Competing Phase List'] = y[0]
PDict['Complementary Heat of Decomposition'] = y[1]
PDict['Lower Formation Energy Than Original Material'] = y[2]
PDict['Number of Complementary Phases'] = y[3]
PDict['Early Finish1'] = y[4]
#### FOR NUM OXIDES
PDict['Number of Oxides (with formation E correction)'] = oxide_no1
PDict['Oxide List (with formation E correction)'] = oxides_id_withform1
x = find_comp(oxide_listdict, compound['unit_cell_formula'], compound['formation_energy_per_atom'], 'Oxide')
PDict['Complementary Oxide List'] = x[0]
PDict['Complementary Heat of Oxidation'] = x[1]
PDict['Lower Formation Energy Than Original Material'] = x[2]
PDict['Number of Complementary Oxides'] = x[3]
PDict['Early Finish2'] = x[4]
v_ratio2 = 1000
for i in x[0]:
v2 = i['volume']/compound['volume']
if abs(v2 - 1) < abs(v_ratio2 - 1):
v_ratio2 = v2
v_ratio_id2 = i
PDict['Best Volume Ratio'] = v_ratio_id2
PDict['ID of Best Volume Ratio'] = v_ratio2
return PDict
if __name__ == '__main__':
pool = mp.Pool(processes=16)
print('Calculating Data....')
DictList = list(tqdm.tqdm(pool.imap(Make_Property_Dict, all_compounds), total=len(all_compounds)))
FinalDF = pd.DataFrame(DictList)
filename = 'FinalDF_' + str(criteria) + '.pckl'
f = open(filename, 'wb')
pickle.dump(FinalDF, f)
f.close()
print('Done.')
| 37.003788 | 131 | 0.604873 | 1,192 | 9,769 | 4.758389 | 0.234899 | 0.026798 | 0.044429 | 0.054302 | 0.234133 | 0.165903 | 0.130642 | 0.108075 | 0.050071 | 0.050071 | 0 | 0.026038 | 0.272699 | 9,769 | 263 | 132 | 37.144487 | 0.772273 | 0.193776 | 0 | 0.041667 | 0 | 0 | 0.21372 | 0.045472 | 0 | 0 | 0 | 0 | 0 | 1 | 0.013889 | false | 0 | 0.055556 | 0 | 0.083333 | 0.055556 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e5f19e16e2b08093649aea79bf01a6ebe7b3786c | 1,638 | py | Python | lhq_nn_lib/layers/loss.py | lhq1208/DL_lib | 53c99157efcc36f2288a82eedad09cdecda579e5 | [
"Apache-2.0"
] | null | null | null | lhq_nn_lib/layers/loss.py | lhq1208/DL_lib | 53c99157efcc36f2288a82eedad09cdecda579e5 | [
"Apache-2.0"
] | null | null | null | lhq_nn_lib/layers/loss.py | lhq1208/DL_lib | 53c99157efcc36f2288a82eedad09cdecda579e5 | [
"Apache-2.0"
] | null | null | null | import numpy as np
from layers.activation_layer import *
from layers.gradient_check import *
def mean_square_error_loss(y_hat, y):
"""
MSE loss, loss=mean(y_hat-y)^2
:param y_hat: output of the network
:param y: input labels
:return: MSE loss
"""
loss = np.mean((y_hat - y) ** 2)
num_output = y.shape[1]
d_loss = 2 * (y_hat - y) / num_output
return loss, d_loss
def cross_entropy_loss(y_hat, y):
"""
Cross entropy loss, loss = -sum(yi * log(y_hat))
:param y_hat: output of the network
:param y: input labels (one_hot)
:return: cross entropy loss
"""
loss = -np.sum(y * np.log(y_hat), axis=1)
# loss = np.mean(loss, axis=0)
d_loss = -y / y_hat
return loss, d_loss
def softmax_loss(x, y):
shifted_logits = x - np.max(x, axis=1, keepdims=True)
Z = np.sum(np.exp(shifted_logits), axis=1, keepdims=True)
log_probs = shifted_logits - np.log(Z)
probs = np.exp(log_probs)
N = x.shape[0]
loss = -np.sum(log_probs[np.arange(N), y]) / N
dx = probs.copy()
dx[np.arange(N), y] -= 1
dx /= N
return loss, dx
if __name__ == '__main__':
np.random.seed(231)
num_classes, num_inputs = 10, 50
x = 0.001 * np.random.randn(num_inputs, num_classes)
y = np.random.randint(num_classes, size=num_inputs)
dx_num = eval_numerical_gradient(lambda x: softmax_loss(x, y)[0], x, verbose=False)
loss, dx = softmax_loss(x, y)
# Test softmax_loss function. Loss should be 2.3 and dx error should be 1e-8
print('\nTesting softmax_loss:')
print('loss: ', loss)
print('dx error: ', rel_error(dx_num, dx))
| 27.762712 | 87 | 0.632479 | 276 | 1,638 | 3.557971 | 0.304348 | 0.040733 | 0.025458 | 0.039715 | 0.14664 | 0.089613 | 0.089613 | 0.089613 | 0.089613 | 0.089613 | 0 | 0.020586 | 0.228938 | 1,638 | 58 | 88 | 28.241379 | 0.756928 | 0.218559 | 0 | 0.060606 | 0 | 0 | 0.03843 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.090909 | false | 0 | 0.090909 | 0 | 0.272727 | 0.090909 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e5f2b2946d7fb399e25a69e9d6f0ee5cf294595b | 2,518 | py | Python | clearmash/prep_entities_for_search.py | Beit-Hatfutsot/mojp-dbs-pipelines | 7bac0da9c1777351f40f422c664a7168b52e218a | [
"MIT"
] | 1 | 2017-06-21T11:36:01.000Z | 2017-06-21T11:36:01.000Z | clearmash/prep_entities_for_search.py | Beit-Hatfutsot/mojp-dbs-pipelines | 7bac0da9c1777351f40f422c664a7168b52e218a | [
"MIT"
] | 66 | 2017-05-09T11:48:50.000Z | 2018-01-02T11:57:26.000Z | clearmash/prep_entities_for_search.py | Beit-Hatfutsot/mojp-dbs-pipelines | 7bac0da9c1777351f40f422c664a7168b52e218a | [
"MIT"
] | 2 | 2017-04-25T09:07:15.000Z | 2017-06-15T10:35:36.000Z | from datapackage_pipelines.wrapper import ingest, spew
from datapackage_pipelines.utilities.resources import PROP_STREAMING
from bs4 import BeautifulSoup
parameters, datapackage, resources = ingest()
def get_resource():
for resource in resources:
for row in resource:
if row["collection"] == parameters["collection-name"] and row["display_allowed"]:
doc = row["parsed_doc"]
item = {"doc_id": "clearmash_{}".format(row["id"]),
"source": "clearmash",
"collection": parameters["collection-name"],
"title_he": doc.get("entity_name", {}).get("he", ""),
"title_en": doc.get("entity_name", {}).get("en", ""),
"content_html_he": doc.get("_c6_beit_hatfutsot_bh_base_template_description", {}).get("he", ""),
"content_html_en": doc.get("_c6_beit_hatfutsot_bh_base_template_description", {}).get("en", "")}
item.update(content_text_he=' '.join(BeautifulSoup(item["content_html_he"], "lxml").findAll(text=True)),
content_text_en=' '.join(BeautifulSoup(item["content_html_en"], "lxml").findAll(text=True)))
yield item
datapackage["resources"] = [{PROP_STREAMING: True,
"name": parameters["resource-name"],
"path": "{}.csv".format(parameters["resource-name"]),
"schema": {"fields": [{'name': 'doc_id', 'type': 'string', 'es:index': False},
{"name": "source", "type": "string", "es:index": False},
{"name": "collection", "type": "string", "es:index": False},
{"name": "title_he", "type": "string"},
{"name": "title_en", "type": "string"},
{"name": "content_html_he", "type": "string", "es:index": False},
{"name": "content_html_en", "type": "string", "es:index": False},
{"name": "content_text_he", "type": "string"},
{"name": "content_text_en", "type": "string"},],
"primaryKey": ["doc_id"]}}]
spew(datapackage, [get_resource()])
| 61.414634 | 120 | 0.470612 | 221 | 2,518 | 5.126697 | 0.285068 | 0.079435 | 0.052957 | 0.075022 | 0.303619 | 0.213592 | 0.144748 | 0.086496 | 0.086496 | 0.086496 | 0 | 0.00189 | 0.369738 | 2,518 | 40 | 121 | 62.95 | 0.712035 | 0 | 0 | 0 | 0 | 0 | 0.26251 | 0.037331 | 0 | 0 | 0 | 0 | 0 | 1 | 0.030303 | false | 0 | 0.090909 | 0 | 0.121212 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e5f5488490016c728274b70ed9f953908256998b | 1,788 | py | Python | reanalysis/mt_to_vcf.py | populationgenomics/automated-interpretation-pipeline | 64afbf396dcc2f4f2330cdfd0414238560910e93 | [
"MIT"
] | null | null | null | reanalysis/mt_to_vcf.py | populationgenomics/automated-interpretation-pipeline | 64afbf396dcc2f4f2330cdfd0414238560910e93 | [
"MIT"
] | 4 | 2022-03-28T06:28:01.000Z | 2022-03-31T00:16:02.000Z | reanalysis/mt_to_vcf.py | populationgenomics/automated-interpretation-pipeline | 64afbf396dcc2f4f2330cdfd0414238560910e93 | [
"MIT"
] | null | null | null | """
Takes an input MT, and extracts a VCF-format representation.
This is currently required as the end-to-end CPG pipeline doesn't currently
store intermediate files. To simulate workflows running on VCF files, we
have to regenerate a VCF representation from a MT.
Optional argument allows the specification of an 'additional header' file
When Hail extracts a VCF from a MT, it doesn't contain any custom field
definitions, e.g. 'VQSR' as a Filter field. This argument allows us to
specify additional lines which are required to make the final output valid
within the VCF specification
"""
from typing import Optional
from argparse import ArgumentParser
import hail as hl
from cpg_utils.hail_batch import init_batch
def main(input_mt: str, output_path: str, additional_header: Optional[str] = None):
"""
takes an input MT, and reads it out as a VCF
:param input_mt:
:param output_path:
:param additional_header: file containing lines to append to header
:return:
"""
init_batch()
matrix = hl.read_matrix_table(input_mt)
hl.export_vcf(
matrix,
output_path,
append_to_header=additional_header,
tabix=True,
)
if __name__ == '__main__':
parser = ArgumentParser()
parser.add_argument(
'--input',
type=str,
help='input MatrixTable path',
)
parser.add_argument('--output', type=str, help='path to write VCF out to')
parser.add_argument(
'--additional_header',
type=str,
help='path to file containing any additional header lines',
required=False,
default=None,
)
args = parser.parse_args()
main(
input_mt=args.input,
output_path=args.output,
additional_header=args.additional_header,
)
| 28.380952 | 83 | 0.69519 | 247 | 1,788 | 4.890688 | 0.404858 | 0.10596 | 0.042219 | 0.023179 | 0.056291 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.229306 | 1,788 | 62 | 84 | 28.83871 | 0.876633 | 0.414989 | 0 | 0.117647 | 0 | 0 | 0.138034 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.029412 | false | 0 | 0.117647 | 0 | 0.147059 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e5f5c14f40a001a742cdc89ea72f7c6f1ffa6230 | 3,787 | py | Python | asrtoolkit/wer.py | kaleko/greenkey-asrtoolkit | a729e25ae9c1c65b3c9f25438eb67dba8d03a730 | [
"Apache-2.0"
] | null | null | null | asrtoolkit/wer.py | kaleko/greenkey-asrtoolkit | a729e25ae9c1c65b3c9f25438eb67dba8d03a730 | [
"Apache-2.0"
] | 1 | 2020-02-07T19:20:07.000Z | 2020-02-07T19:27:19.000Z | asrtoolkit/wer.py | kaleko/greenkey-asrtoolkit | a729e25ae9c1c65b3c9f25438eb67dba8d03a730 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
"""
Python function for computing word error rates metric for Automatic Speech Recognition files
"""
import argparse
import re
import editdistance
from asrtoolkit.clean_formatting import clean_up
from asrtoolkit.data_structures.time_aligned_text import time_aligned_text
from asrtoolkit.file_utils.script_input_validation import assign_if_valid
# defines global regex for tagged noises and silence
re_tagged_nonspeech = re.compile(r"[\[<][A-Za-z #]*[\]>]")
# defines global regex to remove these nsns
nonsilence_noises = [
"noise",
"um",
"ah",
"er",
"umm",
"uh",
"mm",
"mn",
"mhm",
"mnh",
"huh",
"hmm",
]
re_nonsilence_noises = re.compile(r"\b({})\b".format(
"|".join(nonsilence_noises)))
def remove_nonsilence_noises(input_text):
"""
Removes nonsilence noises from a transcript
"""
return re.sub(re_nonsilence_noises, "", input_text)
def wer(ref, hyp, remove_nsns=False):
"""
Calculate word error rate between two string or time_aligned_text objects
>>> wer("this is a cat", "this is a dog")
25.0
"""
# accept time_aligned_text objects too
if type(ref) == time_aligned_text:
ref = ref.text()
if type(hyp) == time_aligned_text:
hyp = hyp.text()
# remove tagged noises and other nonspeech events
ref = re.sub(re_tagged_nonspeech, " ", ref)
hyp = re.sub(re_tagged_nonspeech, " ", hyp)
# optionally, remove non silence noises
if remove_nsns:
ref = remove_nonsilence_noises(ref)
hyp = remove_nonsilence_noises(hyp)
# clean punctuation, etc.
ref = clean_up(ref)
hyp = clean_up(hyp)
# calculate WER
return (100 * editdistance.eval(ref.split(" "), hyp.split(" ")) /
max(1, len(ref.split(" "))))
def cer(ref, hyp, remove_nsns=False):
"""
Calculate character error rate between two strings or time_aligned_text objects
>>> cer("this cat", "this bad")
25.0
"""
# accept time_aligned_text objects too
if type(ref) == time_aligned_text:
ref = ref.text()
if type(hyp) == time_aligned_text:
hyp = hyp.text()
if remove_nsns:
ref = remove_nonsilence_noises(ref)
hyp = remove_nonsilence_noises(hyp)
ref = clean_up(ref)
hyp = clean_up(hyp)
# calculate per line CER
return 100 * editdistance.eval(ref, hyp) / max(1, len(ref))
def main():
parser = argparse.ArgumentParser(
description=
"Compares a reference and transcript file and calculates word error rate (WER) between these two files"
)
parser.add_argument(
"reference_file",
metavar="reference_file",
type=str,
help='reference "truth" file',
)
parser.add_argument(
"transcript_file",
metavar="transcript_file",
type=str,
help="transcript possibly containing errors",
)
parser.add_argument(
"--char-level",
help="calculate character error rate instead of word error rate",
action="store_true",
)
parser.add_argument(
"--ignore-nsns",
help="ignore non silence noises like um, uh, etc.",
action="store_true",
)
# parse arguments
args = parser.parse_args()
# read files from arguments
ref = assign_if_valid(args.reference_file)
hyp = assign_if_valid(args.transcript_file)
if ref is None or hyp is None:
print(
"Error with an input file. Please check all files exist and are accepted by ASRToolkit"
)
elif args.char_level:
print("CER: {:5.3f}%".format(cer(ref, hyp, args.ignore_nsns)))
else:
print("WER: {:5.3f}%".format(wer(ref, hyp, args.ignore_nsns)))
if __name__ == "__main__":
main()
| 25.587838 | 111 | 0.639028 | 491 | 3,787 | 4.749491 | 0.321792 | 0.04717 | 0.064322 | 0.037736 | 0.282161 | 0.201544 | 0.175815 | 0.175815 | 0.175815 | 0.145798 | 0 | 0.006298 | 0.245313 | 3,787 | 147 | 112 | 25.761905 | 0.809657 | 0.19831 | 0 | 0.288889 | 0 | 0 | 0.185724 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.044444 | false | 0 | 0.066667 | 0 | 0.144444 | 0.033333 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e5f7fd3c0fafa269562c91d637943b7de22fa323 | 808 | py | Python | src/DeepNovelARG/fasta2kmers.py | gaarangoa/deeparg2.0 | e7777dbc71fa3c527b8b198c79ab8b42fb597d8f | [
"BSD-2-Clause"
] | 2 | 2020-10-29T04:28:45.000Z | 2021-03-20T09:49:26.000Z | src/DeepNovelARG/fasta2kmers.py | gaarangoa/deeparg2.0 | e7777dbc71fa3c527b8b198c79ab8b42fb597d8f | [
"BSD-2-Clause"
] | 4 | 2021-03-07T04:57:16.000Z | 2022-03-13T21:13:59.000Z | src/DeepNovelARG/fasta2kmers.py | gaarangoa/deeparg2.0 | e7777dbc71fa3c527b8b198c79ab8b42fb597d8f | [
"BSD-2-Clause"
] | 3 | 2020-12-01T09:21:20.000Z | 2021-02-24T15:05:02.000Z | import sys
from Bio import SeqIO
import re
import numpy as np
def split_genome(genome="ATCGATATACCA", k=3):
return re.findall('.'*k, genome)
def genearte_one_genome(genome='ATCGATATACCA', k=3):
_genome = genome
_sentence = split_genome(genome=_genome, k=k)
return _sentence
def fasta2kmers(fasta_file, kmer, out_file):
'''
Convert a fasta file into a word/sentence file
'''
# traverse the fasta file
fo = open(out_file + '.sentences', 'w')
fo2 = open(out_file + '.headers', 'w')
for record in SeqIO.parse(fasta_file, 'fasta'):
_genome = str(record.seq).upper()
sentences = genearte_one_genome(genome=_genome, k=kmer)
fo.write(" ".join(sentences) + '\n')
fo2.write(record.description + "\t" + str(len(sentences)) + '\n')
| 23.085714 | 73 | 0.647277 | 110 | 808 | 4.6 | 0.454545 | 0.166008 | 0.067194 | 0.098814 | 0.102767 | 0 | 0 | 0 | 0 | 0 | 0 | 0.007862 | 0.212871 | 808 | 34 | 74 | 23.764706 | 0.787736 | 0.087871 | 0 | 0 | 0 | 0 | 0.079277 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.166667 | false | 0 | 0.222222 | 0.055556 | 0.5 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e5fde087ff89b55704670fe7d181d26045924a03 | 9,762 | py | Python | src/reader/transformer_wrapper.py | isspek/Cross-Lingual-Cyberbullying | 710c136b9233f0be87af72e43e25722e73158c52 | [
"MIT"
] | 1 | 2022-01-12T15:36:30.000Z | 2022-01-12T15:36:30.000Z | src/reader/transformer_wrapper.py | isspek/Cross-Lingual-Cyberbullying | 710c136b9233f0be87af72e43e25722e73158c52 | [
"MIT"
] | null | null | null | src/reader/transformer_wrapper.py | isspek/Cross-Lingual-Cyberbullying | 710c136b9233f0be87af72e43e25722e73158c52 | [
"MIT"
] | null | null | null | from transformers import AutoTokenizer
from pathlib import Path
import torch
from src.reader.pan_hatespeech import AUTHOR_SEP, AUTHOR_ID
import numpy as np
from sklearn.model_selection import StratifiedKFold
from src.utils import RANDOM_SEED
from pathlib import Path
import xml.etree.ElementTree as ET
from transformers import AutoTokenizer
from torch.utils.data import TensorDataset
from tqdm import tqdm
class ExistTaskDataset(torch.utils.data.Dataset):
pass
class PanHateSpeechTaskDataset(torch.utils.data.Dataset):
def __init__(self, files, tokenizer, max_seq_len, ground_truth=None, mode='joined'):
self.files = files
self.ground_truth = ground_truth
self.mode = mode
self.tokenizer = tokenizer
self.max_seq_len = max_seq_len
@staticmethod
def process_text(text):
text = text.replace('#URL#', "[URL]")
text = text.replace('#HASHTAG#', "[HASHTAG]")
text = text.replace('#USER#:', "[USER]")
text = text.replace('#USER#', "[USER]")
text = text.replace('RT', "[RT]")
return text
def __getitem__(self, item):
selected_files = [self.files[item]]
tokenized_texts = []
labels = []
author_ids = []
for profile_file in selected_files:
tree = ET.parse(profile_file)
root = tree.getroot()
if self.ground_truth:
labels.append(self.ground_truth[profile_file.stem])
author_ids.append(profile_file.stem)
if self.mode == 'joined':
for child in root:
posts = []
for ch in child:
posts.append(ch.text)
content = ' '.join(posts)
content = PanHateSpeechTaskDataset.process_text(content)
tokenized_texts.append(content)
elif self.mode == 'joined_post_aware':
for child in root:
posts = []
for ch in child:
posts.append(f'[POSTSTART] {ch.text} [POSTEND]')
content = ' '.join(posts)
content = PanHateSpeechTaskDataset.process_text(content)
tokenized_texts.append(content)
elif self.mode == 'hierarchical':
posts = []
for child in root:
for ch in child:
posts.append(PanHateSpeechTaskDataset.process_text(ch.text))
tokenized_texts.append(posts)
if 'joined' in self.mode:
encoding = self.tokenizer.encode_plus(tokenized_texts[0], add_special_tokens=True,
# Add '[CLS]' and '[SEP]'
max_length=self.max_seq_len,
padding='max_length', # Pad & truncate all sentences.
truncation=True,
return_token_type_ids=False,
return_attention_mask=True, # Construct attn. masks.
return_tensors='pt' # Return pytorch tensors.
)
if self.ground_truth:
return dict(
input_ids=encoding['input_ids'],
attention_mask=encoding['attention_mask'],
labels=torch.LongTensor(labels),
text=tokenized_texts,
author_id=author_ids,
)
else:
return dict(
input_ids=encoding['input_ids'],
attention_mask=encoding['attention_mask'],
text=tokenized_texts,
author_id=author_ids,
)
else:
input_ids = []
attention_masks = []
for idx, tokenized_text in enumerate(tokenized_texts[0]):
encoding = self.tokenizer.encode_plus(tokenized_text, add_special_tokens=True,
# Add '[CLS]' and '[SEP]'
max_length=self.max_seq_len,
padding='max_length', # Pad & truncate all sentences.
truncation=True,
return_token_type_ids=False,
return_attention_mask=True, # Construct attn. masks.
return_tensors='pt' # Return pytorch tensors.
)
input_ids.append(encoding['input_ids'])
attention_masks.append(encoding['attention_mask'])
if self.ground_truth:
return dict(
input_ids=torch.stack(input_ids),
attention_mask=torch.stack(attention_masks),
labels=torch.LongTensor(labels),
text=tokenized_texts,
author_id=author_ids,
)
else:
return dict(
input_ids=torch.stack(input_ids),
attention_mask=torch.stack(attention_masks),
text=tokenized_texts,
author_id=author_ids,
)
def __len__(self):
return len(self.files)
class PANHateSpeechTaskDatasetWrapper:
def create_cv_folds(self):
kf = StratifiedKFold(n_splits=self.cv, random_state=RANDOM_SEED, shuffle=True)
train_folds = []
test_folds = []
for train_index, test_index in kf.split(self.profile_files, list(self.ground_truth.values())):
train_folds.append(train_index)
test_folds.append(test_index)
return train_folds, test_folds
SPECIAL_TOKENS = {
'joined': {'additional_special_tokens': ["[RT]", "[USER]", "[URL]", "[HASHTAG]"]},
'hierarchical': {'additional_special_tokens': ["[RT]", "[USER]", "[URL]", "[HASHTAG]"]},
'joined_post_aware': {
'additional_special_tokens': ["[RT]", "[USER]", "[URL]", "[HASHTAG]", "[POSTSTART]", "[POSTEND]"]}
}
def __init__(self, args):
self.cv = args.cv
self.tokenizer = AutoTokenizer.from_pretrained(args.tokenizer)
# self.tokenizer.save_pretrained(f'trained_models/{args.tokenizer}')
self.special_tokens_dict = PANHateSpeechTaskDatasetWrapper.SPECIAL_TOKENS[args.input_mode]
self.tokenizer.add_special_tokens(self.special_tokens_dict)
if args.lang == 'en_es' or args.lang == 'es_en':
data_path = Path(args.data)
lang_en = data_path / 'en'
files_en = np.asarray([path for path in lang_en.glob('*.xml')])
lang_es = data_path / 'es'
files_es = np.asarray([path for path in lang_es.glob('*.xml')])
self.profile_files = np.concatenate((files_en, files_es), axis=None)
labels_path_en = data_path / 'en' / 'truth.txt'
self.ground_truth = {}
with open(labels_path_en, 'r') as r:
labels = r.readlines()
for label in labels:
label = label.split(AUTHOR_SEP)
self.ground_truth[label[0]] = int(label[1])
labels_path_es = data_path / 'es' / 'truth.txt'
with open(labels_path_es, 'r') as r:
labels = r.readlines()
for label in labels:
label = label.split(AUTHOR_SEP)
self.ground_truth[label[0]] = int(label[1])
else:
data_path = Path(args.data)
self.profile_files = np.asarray([path for path in data_path.glob('*.xml')])
labels_path = data_path / 'truth.txt'
if labels_path.exists():
self.ground_truth = {}
with open(labels_path, 'r') as r:
labels = r.readlines()
for label in labels:
label = label.split(AUTHOR_SEP)
self.ground_truth[label[0]] = int(label[1])
else:
self.ground_truth = None
if self.cv:
train_folds, test_folds = self.create_cv_folds()
self.dataset = []
for idx, train_fold in enumerate(train_folds):
train_files = self.profile_files[train_fold]
test_files = self.profile_files[test_folds[idx]]
self.dataset.append(
(PanHateSpeechTaskDataset(train_files, max_seq_len=args.max_seq_len, tokenizer=self.tokenizer,
ground_truth=self.ground_truth, mode=args.input_mode),
PanHateSpeechTaskDataset(test_files, max_seq_len=args.max_seq_len, tokenizer=self.tokenizer,
ground_truth=self.ground_truth,
mode=args.input_mode)))
else:
# TODO for test files, the files without labels
test_files = self.profile_files
self.dataset = PanHateSpeechTaskDataset(test_files, max_seq_len=args.max_seq_len, tokenizer=self.tokenizer,
ground_truth=self.ground_truth,
mode=args.input_mode)
DATA_LOADERS = {
'pan_hatespeech': PANHateSpeechTaskDatasetWrapper,
'exist': ExistTaskDataset
}
| 41.896996 | 119 | 0.521205 | 959 | 9,762 | 5.062565 | 0.17414 | 0.045314 | 0.046344 | 0.01483 | 0.509784 | 0.464882 | 0.439135 | 0.383522 | 0.355922 | 0.355922 | 0 | 0.001336 | 0.386806 | 9,762 | 232 | 120 | 42.077586 | 0.809723 | 0.032166 | 0 | 0.418848 | 0 | 0 | 0.054678 | 0.007947 | 0 | 0 | 0 | 0.00431 | 0 | 1 | 0.031414 | false | 0.005236 | 0.062827 | 0.005236 | 0.151832 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e5fe149f57cd02f1ac0914d38105b287b42a97c0 | 6,585 | py | Python | data/download.py | molokhovdmitry/placeholder | cc0a983af91fcbea3dcd7b9a16db471b000b5ff5 | [
"MIT"
] | null | null | null | data/download.py | molokhovdmitry/placeholder | cc0a983af91fcbea3dcd7b9a16db471b000b5ff5 | [
"MIT"
] | null | null | null | data/download.py | molokhovdmitry/placeholder | cc0a983af91fcbea3dcd7b9a16db471b000b5ff5 | [
"MIT"
] | null | null | null | """
MIT License
Copyright (c) 2021 molokhovdmitry
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
"""
This file downloads frames from streams in top categories of twitch.
Pseudocode:
while `Enter` is not pressed:
Loop:
1) Get top games from twitch api, save them in a database.
2) Get a game with a minimum number of saved frames.
3) Get logins of streams that are live in that category.
4) Download frames from 5 random streams,
save frame info in the database.
"""
import random
import time
import requests
from threading import Thread
from pathlib import Path
from termcolor import colored
from streamlink import Streamlink
from data.download_functions import download_frames
from data.api import get_top_games, get_streams
from data.db_functions import (session_scope, get_game_count, update_games,
min_data_category, max_data_category, add_frame)
from config import DOWNLOAD_PATH, MAX_GAMES
data_path = Path.joinpath(Path(DOWNLOAD_PATH), "frames")
def update_data():
"""Updates data while no input (Enter not pressed)."""
# Start helper threads.
input_list = []
Thread(target=input_thread, args=(input_list, )).start()
print("Press Enter any time to stop downloading.")
Thread(target=info_thread, args=(input_list, )).start()
# Start a streamlink session.
streamlink_session = Streamlink()
# Start an api session.
api_session = requests.session()
downloaded_streams = 0
fail_count = 0
frame_count = 0
while not input_list:
# Add games if game limit is not exceeded.
with session_scope() as db_session:
game_count = get_game_count(db_session)
if game_count < MAX_GAMES:
games = get_top_games(api_session)
if not games:
print("Error. Could not get top games.")
continue
# Update the database with new games.
with session_scope() as db_session:
update_games(db_session, games)
# Get a category with the minimum number of frames.
with session_scope() as db_session:
game_id = min_data_category(db_session)[0]
# Get streams from the category.
streams = get_streams(api_session, game_id)
if not streams:
print("Error. Could not get streams.")
continue
# Update the category (download frames from 5 streams).
download_count = 0
download_attempts = 0
while streams and download_count < 5 and download_attempts < 10:
if input_list:
break
# Get a random stream.
stream = random.choice(list(streams))
streams.discard(stream)
# Download frames from a stream, update the database.
print(f"Downloading frames from '{stream}', gameID: {game_id}.")
download = False
for frame_path in download_frames(streamlink_session,
stream, game_id):
# Save a frame in the database.
with session_scope() as db_session:
add_frame(db_session, frame_path, game_id, stream)
download = True
frame_count += 1
download_count += download
download_attempts += 1
downloaded_streams += download
fail_count += not download
print_dataset_info()
print("Done.")
print(f"Downloaded {frame_count} frame(s) from {downloaded_streams} "
f"stream(s). Failed {fail_count} time(s).")
def input_thread(input_list):
"""Thread that waits for an input."""
input()
input_list.append(True)
print(colored("Interrupting. Please wait.", 'green'))
def info_thread(input_list):
"""
Thread that shows how much data is downloaded and min/max data categories
every `n` seconds.
"""
n = 300
print_dataset_info()
# Repeat every `n` seconds.
i = 0
while not input_list:
if i != n:
time.sleep(1)
i += 1
continue
i = 0
print_dataset_info()
def print_dataset_info():
"""Prints dataset info."""
# Print dataset size.
print(colored(dir_size(data_path), 'green'))
# Print the number of games.
with session_scope() as db_session:
game_count = get_game_count(db_session)
print(colored(f"{game_count} game(s)", 'green'))
# Print categories with minumum and maximum number of frames.
print_min_max()
def dir_size(path):
"""Returns the size of `path` folder."""
files = list(path.glob('**/*'))
size = 0
for file in files:
if file.is_file():
size += file.stat().st_size
# Convert to GB.
size = size / 1073741824
return "Data size: " + '{:.2f}'.format(size) + " GB"
def print_min_max():
"""Prints categories with minumum and maximum number of frames."""
with session_scope() as db_session:
min_category = min_data_category(db_session)
max_category = max_data_category(db_session)
if min_category:
print(colored("Minimum: {} frame(s) in category {}."
.format(min_category[1], min_category[0]),
'green'))
print(colored("Maximum: {} frame(s) in category {}."
.format(max_category[1], max_category[0]),
'green'))
if __name__ == "__main__":
update_data()
| 30.771028 | 79 | 0.639028 | 852 | 6,585 | 4.782864 | 0.265258 | 0.028712 | 0.023558 | 0.026503 | 0.149202 | 0.083436 | 0.07681 | 0.066748 | 0.048098 | 0.027975 | 0 | 0.0093 | 0.281549 | 6,585 | 213 | 80 | 30.915493 | 0.85204 | 0.288079 | 0 | 0.192308 | 0 | 0 | 0.105112 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.057692 | false | 0 | 0.105769 | 0 | 0.173077 | 0.163462 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f900890adc7949963a4aa7f524446cd1c7206958 | 385 | py | Python | Selenium_WebDriver/Find_Element_by_Css_Selector.py | w2k31984/Selenium_WebDriver_Python | 73e509813c6a5e508677920fa76c8cc56371134b | [
"MIT",
"MIT-0"
] | null | null | null | Selenium_WebDriver/Find_Element_by_Css_Selector.py | w2k31984/Selenium_WebDriver_Python | 73e509813c6a5e508677920fa76c8cc56371134b | [
"MIT",
"MIT-0"
] | null | null | null | Selenium_WebDriver/Find_Element_by_Css_Selector.py | w2k31984/Selenium_WebDriver_Python | 73e509813c6a5e508677920fa76c8cc56371134b | [
"MIT",
"MIT-0"
] | null | null | null | import time
from selenium import webdriver
def main():
driver = webdriver.Chrome(executable_path='chromedriver.exe')
driver.get('https://www.w3schools.com/')
#input()
time.sleep(5)
driver.find_element_by_css_selector('#w3loginbtn').click() #Conseguiremos copiando del objeto el selector del objeto.
input()
if __name__ == '__main__':
main()
#time.sleep(3) | 27.5 | 121 | 0.711688 | 49 | 385 | 5.326531 | 0.734694 | 0.068966 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.01227 | 0.153247 | 385 | 14 | 122 | 27.5 | 0.788344 | 0.2 | 0 | 0 | 0 | 0 | 0.199346 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.1 | false | 0 | 0.2 | 0 | 0.3 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f90152bc71aae7d837d6a1c5e96b7dfa9cb2abc6 | 1,158 | py | Python | time_test.py | zhfeing/graduation-project | e9020a4d7916874ad9d4bf0c9f7f1f82dcfea663 | [
"MIT"
] | null | null | null | time_test.py | zhfeing/graduation-project | e9020a4d7916874ad9d4bf0c9f7f1f82dcfea663 | [
"MIT"
] | 1 | 2019-04-12T06:25:36.000Z | 2019-04-12T06:26:06.000Z | time_test.py | zhfeing/graduation-project | e9020a4d7916874ad9d4bf0c9f7f1f82dcfea663 | [
"MIT"
] | null | null | null | import time
from model_zoo import load_model, resnet, googLeNet
import ensembel_model
import utils
import cv2
import numpy as np
import torch
from torch import nn
import os
os.environ["CUDA_VISIBLE_DEVICES"] = "{}".format(1)
# test resnet
version = 'resnet-tiny-n7'
new_model = resnet.my_resnet
mean = np.array([[[[113.91022]],
[[123.0098]],
[[125.40064]]]], dtype=np.float32)
# model, create_new_model = load_model.load_model(
# version=version,
# new_model=new_model,
# just_weights=False,
# retrain=False,
# to_cuda=False
# )
model = ensembel_model.my_ensembel_model(False)
model.eval()
test_size = 20
time_cost = []
for i in range(test_size):
img = cv2.imread("get_data/data_sample/{}.png".format(i))
img = img.transpose([2, 0, 1]).reshape([1, 3, 32, 32]).astype(np.float32)
img = (img - mean)/64.15484306
time_start = time.time()
x = torch.Tensor(img)
y = model(x).detach()
y = nn.Softmax(dim=1)(y).numpy()
time_end = time.time()
time_cost.append(time_end - time_start)
time_cost = np.array(time_cost)
print(time_cost.mean()*1000, time_cost.std()*1000)
| 24.125 | 77 | 0.665803 | 175 | 1,158 | 4.222857 | 0.457143 | 0.064953 | 0.037889 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.064619 | 0.184801 | 1,158 | 47 | 78 | 24.638298 | 0.71822 | 0.145941 | 0 | 0 | 0 | 0 | 0.064286 | 0.027551 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.290323 | 0 | 0.290323 | 0.032258 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f9098f375c9a6e85e1d5127cd965e29b7cdfa5a4 | 1,315 | py | Python | infra/bots/assets/skimage/create_and_upload.py | travisleithead/skia | 2092340a0edc25e9082ce9717643d12d901c3971 | [
"BSD-3-Clause"
] | 6,304 | 2015-01-05T23:45:12.000Z | 2022-03-31T09:48:13.000Z | infra/bots/assets/skimage/create_and_upload.py | travisleithead/skia | 2092340a0edc25e9082ce9717643d12d901c3971 | [
"BSD-3-Clause"
] | 67 | 2016-04-18T13:30:02.000Z | 2022-03-31T23:06:55.000Z | infra/bots/assets/skimage/create_and_upload.py | travisleithead/skia | 2092340a0edc25e9082ce9717643d12d901c3971 | [
"BSD-3-Clause"
] | 1,231 | 2015-01-05T03:17:39.000Z | 2022-03-31T22:54:58.000Z | #!/usr/bin/env python
#
# Copyright 2016 Google Inc.
#
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Create the asset and upload it."""
import os
import subprocess
import sys
import tempfile
FILE_DIR = os.path.dirname(os.path.abspath(__file__))
ASSET = os.path.basename(FILE_DIR)
def main():
sk = os.path.realpath(os.path.join(
FILE_DIR, os.pardir, os.pardir, os.pardir, os.pardir, 'bin', 'sk'))
if os.name == 'nt':
sk += '.exe'
if not os.path.isfile(sk):
raise Exception('`sk` not found at %s; maybe you need to run bin/fetch-sk?')
# CIPD is picky about where files are downloaded. Use a subdirectory of the
# asset dir rather than /tmp.
tmp_prefix = os.path.join(FILE_DIR, '.')
with tempfile.TemporaryDirectory(prefix=tmp_prefix) as tmp:
subprocess.check_call([sk, 'asset', 'download', ASSET, tmp], cwd=FILE_DIR)
# Allow the user to modify the contents of the target dir.
input('Previous SKImage contents have been downloaded. Please make '
'your desired changes in the following directory and press enter '
'to continue:\n%s\n' % tmp)
subprocess.check_call([sk, 'asset', 'upload', '--in', tmp, ASSET],
cwd=FILE_DIR)
if __name__ == '__main__':
main()
| 29.222222 | 80 | 0.670722 | 201 | 1,315 | 4.278607 | 0.517413 | 0.048837 | 0.034884 | 0.055814 | 0.144186 | 0.104651 | 0 | 0 | 0 | 0 | 0 | 0.003824 | 0.204563 | 1,315 | 44 | 81 | 29.886364 | 0.818356 | 0.255513 | 0 | 0 | 0 | 0 | 0.255959 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.043478 | false | 0 | 0.173913 | 0 | 0.217391 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f90ab2849b2e284d4af9d06d042c800ea64ec2f7 | 1,487 | py | Python | chapter/chapter5/multi-channels/basic.py | nealguo/gluon-study | 5a1bf7de8f6cc1a376ad85758bc24cf9488f17a1 | [
"Apache-2.0"
] | 1 | 2019-06-13T14:21:48.000Z | 2019-06-13T14:21:48.000Z | chapter/chapter5/multi-channels/basic.py | nealguo/gluon-study | 5a1bf7de8f6cc1a376ad85758bc24cf9488f17a1 | [
"Apache-2.0"
] | null | null | null | chapter/chapter5/multi-channels/basic.py | nealguo/gluon-study | 5a1bf7de8f6cc1a376ad85758bc24cf9488f17a1 | [
"Apache-2.0"
] | null | null | null | from mxnet import nd
# 二维互相关
def corr2d(X, K):
h, w = K.shape
Y = nd.zeros((X.shape[0] - h + 1, X.shape[1] - w + 1))
for i in range(Y.shape[0]):
for j in range(Y.shape[1]):
Y[i, j] = (X[i:i + h, j:j + w] * K).sum()
return Y
# 对多通道输入的二维互相关
def corr2d_multi_in(X, K):
# 首先沿着X和K的第0维(通道维)遍历
# 然后使用*将结果列表变成add_n函数的位置参数来进行相加
return nd.add_n(*[corr2d(x, k) for x, k in zip(X, K)])
# 对多通道输入和多通道输出的二维互相关
def corr2d_multi_in_out(X, K):
# 对K的第0维(通道维)遍历,每次与输入X做互相关计算
# 所有结果使用stack函数合并在一起
return nd.stack(*[corr2d_multi_in(X, k) for k in K])
# 对多通道输入和多通道输出使用1×1卷积核的二维互相关
def corr2d_multi_in_out_1x1(X, K):
c_i, h, w = X.shape
c_o = K.shape[0]
X = X.reshape((c_i, h * w))
K = K.reshape((c_o, c_i))
# 全连接层的矩阵乘法
Y = nd.dot(K, X)
return Y.reshape((c_o, h, w))
if __name__ == '__main__':
X = nd.array([[[0, 1, 2], [3, 4, 5], [6, 7, 8]],
[[1, 2, 3], [4, 5, 6], [7, 8, 9]]])
K = nd.array([[[0, 1], [2, 3]], [[1, 2], [3, 4]]])
print(corr2d_multi_in(X, K))
# 核数组K与K+1和K+2连接起来构造一个输出通道数为3的卷积核
# K+1即K中每个元素加1,K+2同理
K = nd.stack(K, K + 1, K + 2)
print(K.shape)
print(corr2d_multi_in_out(X, K))
# 做1×1卷积时,corr2d_multi_in_out_1x1和corr2d_multi_in_out等价
X = nd.random.uniform(shape=(3, 3, 3))
K = nd.random.uniform(shape=(2, 3, 1, 1))
Y1 = corr2d_multi_in_out_1x1(X, K)
Y2 = corr2d_multi_in_out(X, K)
print((Y1 - Y2).norm().asscalar() < 1e-6)
| 26.087719 | 59 | 0.566241 | 271 | 1,487 | 2.944649 | 0.287823 | 0.030075 | 0.146617 | 0.120301 | 0.22807 | 0.16416 | 0.072682 | 0.02005 | 0 | 0 | 0 | 0.069519 | 0.245461 | 1,487 | 56 | 60 | 26.553571 | 0.639929 | 0.184264 | 0 | 0 | 0 | 0 | 0.006667 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.125 | false | 0 | 0.03125 | 0.0625 | 0.28125 | 0.125 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |